patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -139,6 +139,8 @@ static const char* generate_multi_dot_name(ast_t* ast, ast_t** def_found) {
default:
{
+ if (def == NULL)
+ return stringtab("");
pony_assert(0);
}
} | 1 | #include "refer.h"
#include "../ast/id.h"
#include "../pass/expr.h"
#include "../../libponyrt/mem/pool.h"
#include "ponyassert.h"
#include <string.h>
enum lvalue_t {
NOT_LVALUE, // when the left value is something that cannot be assigned to
LVALUE, // when the left value is something that can be assigned to
ERR_LVALUE // when there is an issue with the left value (undefined, consumed)
};
typedef enum lvalue_t lvalue_t;
/**
* Make sure the definition of something occurs before its use. This is for
* both fields and local variable.
*/
bool def_before_use(pass_opt_t* opt, ast_t* def, ast_t* use, const char* name)
{
if((ast_line(def) > ast_line(use)) ||
((ast_line(def) == ast_line(use)) &&
(ast_pos(def) > ast_pos(use))))
{
ast_error(opt->check.errors, use,
"declaration of '%s' appears after use", name);
ast_error_continue(opt->check.errors, def,
"declaration of '%s' appears here", name);
return false;
}
return true;
}
static bool is_this_incomplete(pass_opt_t* opt, ast_t* ast)
{
// If we're in a default field initialiser, we're incomplete by definition.
if(opt->check.frame->method == NULL)
return true;
// If we're not in a constructor, we're complete by definition.
if(ast_id(opt->check.frame->method) != TK_NEW)
return false;
// Check if all fields have been marked as defined.
ast_t* members = ast_childidx(opt->check.frame->type, 4);
ast_t* member = ast_child(members);
while(member != NULL)
{
switch(ast_id(member))
{
case TK_FLET:
case TK_FVAR:
case TK_EMBED:
{
sym_status_t status;
ast_t* id = ast_child(member);
ast_get(ast, ast_name(id), &status);
if(status != SYM_DEFINED)
return true;
break;
}
default: {}
}
member = ast_sibling(member);
}
return false;
}
// This function generates the fully qualified string (without the `this`) for a
// reference (i.e. `a.b.c.d.e`) and ensures it is part of the compiler
// `stringtab`. It is used to generate this fully qualified string for the field
// being consumed for tracking its `consume`d status via the ast `symtab`. It is
// also used to generate the fully qualified string to ensure that same field is
// being assigned to as part of the same expression as the consume. Lastly, it
// is used to get the definition for the type based on the `ast_data` to ensure
// at some point the field is tied to a real type even if we haven't quite fully
// determined the type of each field/subfield reference yet.
static const char* generate_multi_dot_name(ast_t* ast, ast_t** def_found) {
pony_assert(ast_id(ast) == TK_DOT);
ast_t* def = NULL;
size_t len = 0;
ast_t* temp_ast = ast;
do {
if(ast_id(temp_ast) != TK_DOT)
break;
AST_GET_CHILDREN(temp_ast, left, right);
def = (ast_t*) ast_data(left);
temp_ast = left;
// the `+ 1` is for the '.' needed in the string
len += strlen(ast_name(right)) + 1;
} while(def == NULL);
switch(ast_id(temp_ast))
{
case TK_DOT:
{
AST_GET_CHILDREN(temp_ast, left, right);
if(ast_id(left) == TK_THIS)
{
temp_ast = right;
len += strlen(ast_name(temp_ast));
}
else
pony_assert(0);
break;
}
case TK_LETREF:
case TK_VARREF:
case TK_REFERENCE:
case TK_PARAMREF:
{
temp_ast = ast_child(temp_ast);
len += strlen(ast_name(temp_ast));
break;
}
case TK_THIS:
{
temp_ast = ast_sibling(temp_ast);
// string len already added by loop above
// subtract 1 because we don't have to add '.'
// since we're ignoring the `this`
len -= 1;
break;
}
default:
{
pony_assert(0);
}
}
if(def_found != NULL)
{
*def_found = def;
if(def == NULL)
return stringtab("");
}
// for the \0 at the end
len = len + 1;
char* buf = (char*)ponyint_pool_alloc_size(len);
size_t offset = 0;
const char* name = ast_name(temp_ast);
size_t slen = strlen(name);
memcpy(buf + offset, name, slen);
offset += slen;
temp_ast = ast_parent(temp_ast);
while(temp_ast != ast)
{
buf[offset] = '.';
offset += 1;
temp_ast = ast_sibling(temp_ast);
name = ast_name(temp_ast);
slen = strlen(name);
memcpy(buf + offset, name, slen);
offset += slen;
temp_ast = ast_parent(temp_ast);
}
pony_assert((offset + 1) == len);
buf[offset] = '\0';
return stringtab_consume(buf, len);
}
static bool is_matching_assign_lhs(ast_t* a, ast_t* b)
{
// Has to be the left hand side of an assignment (the first child).
if(a == b)
return true;
// or two subfield references that match
if((ast_id(a) == TK_DOT) && (ast_id(b) == TK_DOT))
{
// get fully qualified string identifier without `this`
const char* a_name = generate_multi_dot_name(a, NULL);
const char* b_name = generate_multi_dot_name(b, NULL);
if(a_name == b_name)
return true;
}
return false;
}
static bool is_assigned_to(ast_t* ast, bool check_result_needed)
{
while(true)
{
ast_t* parent = ast_parent(ast);
switch(ast_id(parent))
{
case TK_ASSIGN:
{
if(!is_matching_assign_lhs(ast_child(parent), ast))
return false;
if(!check_result_needed)
return true;
// The result of that assignment can't be used.
return !is_result_needed(parent);
}
case TK_SEQ:
{
// Might be in a tuple on the left hand side.
if(ast_childcount(parent) > 1)
return false;
break;
}
case TK_TUPLE:
break;
default:
return false;
}
ast = parent;
}
}
/**
* Return true if this is a reference is just being used to call a constructor
* on the type, in which case we don't care if it's status is SYM_DEFINED yet.
*
* Example:
* let a: Array[U8] = a.create()
*/
static bool is_constructed_from(ast_t* ast)
{
ast_t* parent = ast_parent(ast);
if(ast_id(parent) != TK_DOT)
return false;
AST_GET_CHILDREN(parent, left, right);
if(left != ast)
return false;
ast_t* def = (ast_t*)ast_data(ast);
// no definition found because it's a TK_DOT
if(def == NULL)
{
pony_assert(ast_id(ast) == TK_DOT);
return false;
}
// TK_LET and TK_VAR have their symtable point to the TK_ID child,
// so if we encounter that here, we move up to the parent node.
if(ast_id(def) == TK_ID)
def = ast_parent(def);
switch(ast_id(def))
{
case TK_VAR:
case TK_LET:
case TK_FVAR:
case TK_FLET:
case TK_EMBED:
{
ast_t* typeref = ast_childidx(def, 1);
if((typeref == NULL) || (ast_data(typeref) == NULL))
return false;
ast_t* typedefn = (ast_t*)ast_data(typeref);
ast_t* find = ast_get(typedefn, ast_name(right), NULL);
return (find != NULL) && (ast_id(find) == TK_NEW);
}
default: {}
}
return false;
}
static bool valid_reference(pass_opt_t* opt, ast_t* ast, sym_status_t status)
{
if(is_constructed_from(ast))
return true;
switch(status)
{
case SYM_DEFINED:
return true;
case SYM_CONSUMED:
case SYM_CONSUMED_SAME_EXPR:
if(is_assigned_to(ast, true))
return true;
ast_error(opt->check.errors, ast,
"can't use a consumed local or field in an expression");
return false;
case SYM_UNDEFINED:
if(is_assigned_to(ast, true))
return true;
ast_error(opt->check.errors, ast,
"can't use an undefined variable in an expression");
return false;
case SYM_NONE:
pony_assert(ast_id(ast) == TK_DOT);
return true;
default: {}
}
pony_assert(0);
return false;
}
static const char* suggest_alt_name(ast_t* ast, const char* name)
{
pony_assert(ast != NULL);
pony_assert(name != NULL);
size_t name_len = strlen(name);
if(is_name_private(name))
{
// Try without leading underscore
const char* try_name = stringtab(name + 1);
if(ast_get(ast, try_name, NULL) != NULL)
return try_name;
}
else
{
// Try with a leading underscore
char* buf = (char*)ponyint_pool_alloc_size(name_len + 2);
buf[0] = '_';
memcpy(buf + 1, name, name_len + 1);
const char* try_name = stringtab_consume(buf, name_len + 2);
if(ast_get(ast, try_name, NULL) != NULL)
return try_name;
}
// Try with a different case (without crossing type/value boundary)
ast_t* case_ast = ast_get_case(ast, name, NULL);
if(case_ast != NULL)
{
ast_t* id = case_ast;
int tk = ast_id(id);
if(tk != TK_ID)
{
AST_GET_CHILDREN(case_ast, first, second);
if((tk = ast_id(first)) == TK_ID)
{
// First is a TK_ID give it as a suggestion
id = first;
} else if((tk = ast_id(second)) == TK_ID) {
// Second is a TK_ID give it as a suggestion
id = second;
} else {
// Giving up on different case as tk != TK_ID
}
}
if(tk == TK_ID)
{
const char* try_name = ast_name(id);
if(ast_get(ast, try_name, NULL) != NULL)
return try_name;
}
}
// Give up
return NULL;
}
static bool refer_this(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_THIS);
// Can only use a this reference if it hasn't been consumed yet.
sym_status_t status;
ast_get(ast, stringtab("this"), &status);
if((status == SYM_CONSUMED) || (status == SYM_CONSUMED_SAME_EXPR))
{
ast_error(opt->check.errors, ast,
"can't use a consumed 'this' in an expression");
return false;
}
pony_assert(status == SYM_NONE);
// Mark the this reference as incomplete if not all fields are defined yet.
if(is_this_incomplete(opt, ast))
ast_setflag(ast, AST_FLAG_INCOMPLETE);
return true;
}
bool refer_reference(pass_opt_t* opt, ast_t** astp)
{
ast_t* ast = *astp;
const char* name = ast_name(ast_child(ast));
// Handle the special case of the "don't care" reference (`_`)
if(is_name_dontcare(name))
{
ast_setid(ast, TK_DONTCAREREF);
return true;
}
// Everything we reference must be in scope, so we can use ast_get for lookup.
sym_status_t status;
ast_t* def = ast_get(ast, ast_name(ast_child(ast)), &status);
// If nothing was found, we fail, but also try to suggest an alternate name.
if(def == NULL)
{
const char* alt_name = suggest_alt_name(ast, name);
if(alt_name == NULL)
ast_error(opt->check.errors, ast, "can't find declaration of '%s'", name);
else
ast_error(opt->check.errors, ast,
"can't find declaration of '%s', did you mean '%s'?", name, alt_name);
return false;
}
// Save the found definition in the AST, so we don't need to look it up again.
ast_setdata(ast, (void*)def);
switch(ast_id(def))
{
case TK_PACKAGE:
{
// Only allowed if in a TK_DOT with a type.
if(ast_id(ast_parent(ast)) != TK_DOT)
{
ast_error(opt->check.errors, ast,
"a package can only appear as a prefix to a type");
return false;
}
ast_setid(ast, TK_PACKAGEREF);
return true;
}
case TK_INTERFACE:
case TK_TRAIT:
case TK_TYPE:
case TK_TYPEPARAM:
case TK_PRIMITIVE:
case TK_STRUCT:
case TK_CLASS:
case TK_ACTOR:
{
ast_setid(ast, TK_TYPEREF);
ast_add(ast, ast_from(ast, TK_NONE)); // 1st child: package reference
ast_append(ast, ast_from(ast, TK_NONE)); // 3rd child: type args
return true;
}
case TK_FVAR:
case TK_FLET:
case TK_EMBED:
case TK_NEW:
case TK_BE:
case TK_FUN:
{
// Transform to "this.f".
ast_t* dot = ast_from(ast, TK_DOT);
ast_add(dot, ast_child(ast));
ast_t* self = ast_from(ast, TK_THIS);
ast_add(dot, self);
ast_replace(astp, dot);
ast = *astp;
return refer_this(opt, self) && refer_dot(opt, ast);
}
case TK_PARAM:
{
if(opt->check.frame->def_arg != NULL)
{
ast_error(opt->check.errors, ast,
"can't reference a parameter in a default argument");
return false;
}
if(!def_before_use(opt, def, ast, name))
return false;
if(!valid_reference(opt, ast, status))
return false;
ast_setid(ast, TK_PARAMREF);
return true;
}
case TK_VAR:
case TK_LET:
case TK_MATCH_CAPTURE:
{
if(!def_before_use(opt, def, ast, name))
return false;
if(!valid_reference(opt, ast, status))
return false;
if(ast_id(def) == TK_VAR)
ast_setid(ast, TK_VARREF);
else
ast_setid(ast, TK_LETREF);
return true;
}
default: {}
}
pony_assert(0);
return false;
}
static bool refer_packageref_dot(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_DOT);
AST_GET_CHILDREN(ast, left, right);
pony_assert(ast_id(left) == TK_PACKAGEREF);
pony_assert(ast_id(right) == TK_ID);
// Must be a type in a package.
const char* package_name = ast_name(ast_child(left));
ast_t* package = ast_get(left, package_name, NULL);
if(package == NULL)
{
ast_error(opt->check.errors, right, "can't access package '%s'",
package_name);
return false;
}
pony_assert(ast_id(package) == TK_PACKAGE);
const char* type_name = ast_name(right);
ast_t* def = ast_get(package, type_name, NULL);
if(def == NULL)
{
ast_error(opt->check.errors, right, "can't find type '%s' in package '%s'",
type_name, package_name);
return false;
}
ast_setdata(ast, (void*)def);
ast_setid(ast, TK_TYPEREF);
ast_append(ast, ast_from(ast, TK_NONE)); // 3rd child: type args
return true;
}
static bool refer_this_dot(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_DOT);
AST_GET_CHILDREN(ast, left, right);
pony_assert(ast_id(left) == TK_THIS);
pony_assert(ast_id(right) == TK_ID);
const char* name = ast_name(right);
sym_status_t status;
ast_t* def = ast_get(ast, name, &status);
ast_setdata(ast, (void*)def);
// If nothing was found, we fail, but also try to suggest an alternate name.
if(def == NULL)
{
const char* alt_name = suggest_alt_name(ast, name);
if(alt_name == NULL)
ast_error(opt->check.errors, ast, "can't find declaration of '%s'", name);
else
ast_error(opt->check.errors, ast,
"can't find declaration of '%s', did you mean '%s'?", name, alt_name);
return false;
}
switch(ast_id(def))
{
case TK_FVAR:
case TK_FLET:
case TK_EMBED:
if(!valid_reference(opt, ast, status))
return false;
break;
default: {}
}
return true;
}
static bool refer_multi_dot(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_DOT);
AST_GET_CHILDREN(ast, left, right);
// get fully qualified string identifier without `this`
const char* name = generate_multi_dot_name(ast, NULL);
// use this string to check status using `valid_reference` function.
sym_status_t status;
ast_get(ast, name, &status);
if(!valid_reference(opt, ast, status))
return false;
return true;
}
bool refer_dot(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_DOT);
AST_GET_CHILDREN(ast, left, right);
switch(ast_id(left))
{
case TK_PACKAGEREF: return refer_packageref_dot(opt, ast);
case TK_THIS: return refer_this_dot(opt, ast);
case TK_PARAMREF:
case TK_VARREF:
case TK_LETREF:
case TK_DOT:
{
// check multi_dot reference if it's not a function call
// only if we had a field consume/reassign
if(ast_checkflag(ast, AST_FLAG_FCNSM_REASGN)
&& (ast_id(ast_parent(ast)) != TK_CALL)
&& (ast_id(ast_parent(ast)) != TK_QUALIFY))
return refer_multi_dot(opt, ast);
}
default: {}
}
return true;
}
static bool qualify_typeref(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
ast_t* typeref = ast_child(ast);
// If the typeref already has type args, it can't get any more, so we'll
// leave as TK_QUALIFY, so expr pass will sugar as qualified call to apply.
if(ast_id(ast_childidx(typeref, 2)) == TK_TYPEARGS)
return true;
ast_t* def = (ast_t*)ast_data(typeref);
pony_assert(def != NULL);
// If the type isn't polymorphic it can't get type args at all, so we'll
// leave as TK_QUALIFY, so expr pass will sugar as qualified call to apply.
ast_t* typeparams = ast_childidx(def, 1);
if(ast_id(typeparams) == TK_NONE)
return true;
// Now, we want to get rid of the inner typeref, take its children, and
// convert this TK_QUALIFY node into a TK_TYPEREF node with type args.
ast_setdata(ast, (void*)def);
ast_setid(ast, TK_TYPEREF);
pony_assert(typeref == ast_pop(ast));
ast_t* package = ast_pop(typeref);
ast_t* type_name = ast_pop(typeref);
ast_free(typeref);
ast_add(ast, type_name);
ast_add(ast, package);
return true;
}
bool refer_qualify(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_QUALIFY);
if(ast_id(ast_child(ast)) == TK_TYPEREF)
return qualify_typeref(opt, ast);
return true;
}
static void error_check_used_decl(errorframe_t* frame, ast_t* ast)
{
// Prints an info about why the lvalue is needed
ast_t* parent = ast_parent(ast);
pony_assert(parent != NULL);
token_id parent_id = ast_id(parent);
if (parent_id == TK_VAR || parent_id == TK_LET) {
ast_error_frame(frame, parent, "the previous value of '%s' is used because you are trying to use the resulting value of this %s declaration", ast_print_type(ast), ast_print_type(parent));
}
}
static void error_consumed_but_used(pass_opt_t* opt, ast_t* ast)
{
// Prints an error about an lvalue's old value being needed, but consumed (it is unknown wether or not this code can be reached in any pratical case)
errorframe_t frame = NULL;
ast_error_frame(&frame, ast,
"the left side is consumed but its value is used");
error_check_used_decl(&frame, ast);
errorframe_report(&frame, opt->check.errors);
}
static void error_undefined_but_used(pass_opt_t* opt, ast_t* ast)
{
// Prints an error about an lvalue's old value being needed, but undefined
errorframe_t frame = NULL;
ast_error_frame(&frame, ast,
"the left side is undefined but its value is used");
error_check_used_decl(&frame, ast);
errorframe_report(&frame, opt->check.errors);
}
static lvalue_t assign_multi_dot(pass_opt_t* opt, ast_t* ast, bool need_value)
{
pony_assert(ast_id(ast) == TK_DOT);
// get fully qualified string identifier without `this`
const char* name = generate_multi_dot_name(ast, NULL);
sym_status_t status;
ast_get(ast, name, &status);
switch(status)
{
case SYM_UNDEFINED:
if(need_value)
{
error_undefined_but_used(opt, ast);
return ERR_LVALUE;
}
ast_setstatus(ast, name, SYM_DEFINED);
return LVALUE;
case SYM_DEFINED:
return LVALUE;
case SYM_CONSUMED:
case SYM_CONSUMED_SAME_EXPR:
{
lvalue_t ok = LVALUE;
if(need_value)
{
error_consumed_but_used(opt, ast);
ok = ERR_LVALUE;
}
if(opt->check.frame->try_expr != NULL)
{
if(status == SYM_CONSUMED)
{
ast_error(opt->check.errors, ast,
"can't reassign to a consumed identifier in a try expression unless"
" it is reassigned in the same expression");
ok = ok == ERR_LVALUE ? ERR_LVALUE : NOT_LVALUE;
}
// SYM_CONSUMED_SAME_EXPR is allowed to pass; verify pass will check if
// there are any partial calls/errors and throw an error if necessary
}
if(ok == LVALUE)
ast_setstatus(ast, name, SYM_DEFINED);
return ok;
}
case SYM_NONE:
pony_assert(ast_id(ast) == TK_DOT);
return LVALUE;
default: {}
}
pony_assert(0);
return NOT_LVALUE;
}
static lvalue_t assign_id(pass_opt_t* opt, ast_t* ast, bool let, bool need_value)
{
pony_assert(ast_id(ast) == TK_ID);
const char* name = ast_name(ast);
sym_status_t status;
ast_get(ast, name, &status);
switch(status)
{
case SYM_UNDEFINED:
if(need_value)
{
error_undefined_but_used(opt, ast);
return ERR_LVALUE;
}
ast_setstatus(ast, name, SYM_DEFINED);
return LVALUE;
case SYM_DEFINED:
if(let)
{
ast_error(opt->check.errors, ast,
"can't assign to a let or embed definition more than once");
return NOT_LVALUE;
}
return LVALUE;
case SYM_CONSUMED:
case SYM_CONSUMED_SAME_EXPR:
{
lvalue_t ok = LVALUE;
if(need_value)
{
error_consumed_but_used(opt, ast);
ok = ERR_LVALUE;
}
if(let)
{
ast_error(opt->check.errors, ast,
"can't assign to a let or embed definition more than once");
ok = ok == ERR_LVALUE ? ERR_LVALUE : NOT_LVALUE;
}
if(opt->check.frame->try_expr != NULL)
{
if(status == SYM_CONSUMED)
{
ast_error(opt->check.errors, ast,
"can't reassign to a consumed identifier in a try expression unless"
" it is reassigned in the same expression");
ok = ok == ERR_LVALUE ? ERR_LVALUE : NOT_LVALUE;
}
// SYM_CONSUMED_SAME_EXPR is allowed to pass; verify pass will check if
// there are any partial calls/errors and throw an error if necessary
}
if(ok == LVALUE)
ast_setstatus(ast, name, SYM_DEFINED);
return ok;
}
default: {}
}
pony_assert(0);
return NOT_LVALUE;
}
static lvalue_t is_lvalue(pass_opt_t* opt, ast_t* ast, bool need_value)
{
switch(ast_id(ast))
{
case TK_DONTCARE:
return LVALUE;
case TK_DONTCAREREF:
// Can only assign to it if we don't need the value.
return need_value ? NOT_LVALUE : LVALUE;
case TK_VAR:
case TK_LET:
return assign_id(opt, ast_child(ast), ast_id(ast) == TK_LET, need_value);
case TK_VARREF:
{
ast_t* id = ast_child(ast);
return assign_id(opt, id, false, need_value);
}
case TK_LETREF:
{
ast_error(opt->check.errors, ast, "can't reassign to a let local");
return NOT_LVALUE;
}
case TK_DOT:
{
AST_GET_CHILDREN(ast, left, right);
switch(ast_id(left))
{
case TK_THIS:
{
ast_t* def = (ast_t*)ast_data(ast);
if(def == NULL)
return NOT_LVALUE;
switch(ast_id(def))
{
case TK_FVAR: return assign_id(opt, right, false, need_value) ? LVALUE : NOT_LVALUE;
case TK_FLET:
case TK_EMBED: return assign_id(opt, right, true, need_value) ? LVALUE : NOT_LVALUE;
default: return NOT_LVALUE;
}
}
case TK_VARREF:
case TK_LETREF:
case TK_DOT:
{
return assign_multi_dot(opt, ast, need_value);
}
default: {}
}
return LVALUE;
}
case TK_TUPLE:
{
// A tuple is an lvalue if every component expression is an lvalue.
ast_t* child = ast_child(ast);
while(child != NULL)
{
switch (is_lvalue(opt, child, need_value)) {
case LVALUE: break;
case ERR_LVALUE: return ERR_LVALUE;
case NOT_LVALUE: return NOT_LVALUE;
}
child = ast_sibling(child);
}
return LVALUE;
}
case TK_SEQ:
{
// A sequence is an lvalue if it has a single child that is an lvalue.
// This is used because the components of a tuple are sequences.
ast_t* child = ast_child(ast);
if(ast_sibling(child) != NULL)
return NOT_LVALUE;
return is_lvalue(opt, child, need_value);
}
default: {}
}
return NOT_LVALUE;
}
static bool refer_pre_call(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_CALL);
AST_GET_CHILDREN(ast, lhs, positional, named, question);
// Run the args before the receiver, so that symbol status tracking
// will see things like consumes in the args first.
if(!ast_passes_subtree(&positional, opt, PASS_REFER) ||
!ast_passes_subtree(&named, opt, PASS_REFER))
return false;
return true;
}
static bool refer_pre_assign(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_ASSIGN);
AST_GET_CHILDREN(ast, left, right);
// Run the right side before the left side, so that symbol status tracking
// will see things like consumes in the right side first.
if(!ast_passes_subtree(&right, opt, PASS_REFER))
return false;
return true;
}
static bool refer_assign(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_ASSIGN);
AST_GET_CHILDREN(ast, left, right);
switch(is_lvalue(opt, left, is_result_needed(ast)))
{
case NOT_LVALUE:
if(ast_id(left) == TK_DONTCAREREF)
{
ast_error(opt->check.errors, left,
"can't read from '_'");
} else {
ast_error(opt->check.errors, ast,
"left side must be something that can be assigned to");
}
return false;
case ERR_LVALUE: return false;
case LVALUE: break;
}
return true;
}
static bool ast_get_child(ast_t* ast, const char* name)
{
const char* assign_name = NULL;
switch(ast_id(ast))
{
case TK_ID:
{
assign_name = ast_name(ast);
break;
}
case TK_DOT:
{
// get fully qualified string identifier without `this`
assign_name = generate_multi_dot_name(ast, NULL);
break;
}
default: {}
}
if(assign_name == name)
return true;
ast_t* child = ast_child(ast);
while(child != NULL)
{
if(ast_get_child(child, name))
return true;
child = ast_sibling(child);
}
return false;
}
static bool check_assigned_same_expression(ast_t* ast, const char* name,
ast_t** ret_assign_ast)
{
ast_t* assign_ast = ast;
while((assign_ast != NULL) && (ast_id(assign_ast) != TK_ASSIGN))
assign_ast = ast_parent(assign_ast);
*ret_assign_ast = assign_ast;
if(assign_ast == NULL)
return false;
ast_t* assign_left = ast_child(assign_ast);
return ast_get_child(assign_left, name);
}
static void set_flag_recursive(ast_t* outer, uint32_t flag)
{
pony_assert(outer != NULL);
ast_setflag(outer, flag);
ast_t* child = ast_child(outer);
while(child != NULL)
{
set_flag_recursive(child, flag);
child = ast_sibling(child);
}
}
static bool refer_consume(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_CONSUME);
AST_GET_CHILDREN(ast, cap, term);
const char* name = NULL;
bool consumed_same_expr = false;
switch(ast_id(term))
{
case TK_VARREF:
case TK_LETREF:
case TK_PARAMREF:
{
ast_t* id = ast_child(term);
name = ast_name(id);
ast_t* assign_ast = NULL;
if(check_assigned_same_expression(id, name, &assign_ast))
{
consumed_same_expr = true;
ast_setflag(assign_ast, AST_FLAG_CNSM_REASGN);
}
break;
}
case TK_THIS:
{
name = stringtab("this");
break;
}
case TK_DOT:
{
AST_GET_CHILDREN(term, left, right);
ast_t* def = NULL;
if(ast_id(left) == TK_THIS)
{
def = (ast_t*)ast_data(term);
name = ast_name(right);
// check it's not a let or embed if it's a this variable
if((ast_id(def) == TK_FLET) || (ast_id(def) == TK_EMBED))
{
ast_error(opt->check.errors, ast,
"can't consume a let or embed field");
return false;
}
} else if (ast_id(left) == TK_CALL) {
ast_error(opt->check.errors, ast,
"consume expressions must specify a single identifier");
return false;
}
else
{
// get fully qualified string identifier without `this`
// and def of the root object
name = generate_multi_dot_name(term, &def);
// defer checking it's not a let or embed if it's not a `this` variable
// because we don't have the type info available. The expr pass will
// catch it in the `expr_consume` function.
}
if(def == NULL)
{
ast_error(opt->check.errors, ast,
"cannot consume an unknown field type");
return false;
}
ast_t* assign_ast = NULL;
if(!check_assigned_same_expression(ast, name, &assign_ast))
{
ast_error(opt->check.errors, ast,
"consuming a field is only allowed if it is reassigned in the same"
" expression");
return false;
}
consumed_same_expr = true;
// assign flag to assign_ast and all children
set_flag_recursive(assign_ast, AST_FLAG_FCNSM_REASGN);
break;
}
default:
ast_error(opt->check.errors, ast,
"consume must take 'this', a local, or a parameter");
return false;
}
// Can't consume from an outer scope while in a loop condition.
if((opt->check.frame->loop_cond != NULL) &&
!ast_within_scope(opt->check.frame->loop_cond, ast, name))
{
ast_error(opt->check.errors, ast,
"can't consume from an outer scope in a loop condition");
return false;
}
if(consumed_same_expr)
ast_setstatus(ast, name, SYM_CONSUMED_SAME_EXPR);
else
ast_setstatus(ast, name, SYM_CONSUMED);
return true;
}
static bool refer_pre_new(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
pony_assert(ast_id(ast) == TK_NEW);
// Set all fields to undefined at the start of this scope.
ast_t* members = ast_parent(ast);
ast_t* member = ast_child(members);
while(member != NULL)
{
switch(ast_id(member))
{
case TK_FVAR:
case TK_FLET:
case TK_EMBED:
{
// Mark this field as SYM_UNDEFINED.
AST_GET_CHILDREN(member, id, type, expr);
ast_setstatus(ast, ast_name(id), SYM_UNDEFINED);
break;
}
default: {}
}
member = ast_sibling(member);
}
return true;
}
static bool refer_new(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_NEW);
ast_t* members = ast_parent(ast);
ast_t* member = ast_child(members);
bool result = true;
while(member != NULL)
{
switch(ast_id(member))
{
case TK_FVAR:
case TK_FLET:
case TK_EMBED:
{
sym_status_t status;
ast_t* id = ast_child(member);
ast_t* def = ast_get(ast, ast_name(id), &status);
if((def != member) || (status != SYM_DEFINED))
{
ast_error(opt->check.errors, def,
"field left undefined in constructor");
result = false;
}
break;
}
default: {}
}
member = ast_sibling(member);
}
if(!result)
ast_error(opt->check.errors, ast,
"constructor with undefined fields is here");
return result;
}
static bool refer_local(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast != NULL);
pony_assert(ast_type(ast) != NULL);
AST_GET_CHILDREN(ast, id, type);
pony_assert(type != NULL);
bool is_dontcare = is_name_dontcare(ast_name(id));
if(ast_id(type) == TK_NONE)
{
// No type specified, infer it later
if(!is_dontcare && !is_assigned_to(ast, false))
{
ast_error(opt->check.errors, ast,
"locals must specify a type or be assigned a value");
return false;
}
}
else if(ast_id(ast) == TK_LET)
{
// Let, check we have a value assigned
if(!is_assigned_to(ast, false))
{
ast_error(opt->check.errors, ast,
"can't declare a let local without assigning to it");
return false;
}
}
if(is_dontcare)
ast_setid(ast, TK_DONTCARE);
return true;
}
static bool refer_seq(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
pony_assert(ast_id(ast) == TK_SEQ);
// If the last expression jumps away with no value, then we do too.
if(ast_checkflag(ast_childlast(ast), AST_FLAG_JUMPS_AWAY))
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
// Propagate symbol status forward in some cases where control flow branches
// always precede other branches of the same control flow structure.
if(ast_has_scope(ast))
{
ast_t* parent = ast_parent(ast);
switch(ast_id(parent))
{
case TK_TRY:
case TK_TRY_NO_CHECK:
{
AST_GET_CHILDREN(parent, body, else_clause, then_clause);
if(body == ast)
{
// Push our consumes, but not defines, to the else clause.
ast_inheritbranch(else_clause, body);
ast_consolidate_branches(else_clause, 2);
} else if(else_clause == ast) {
// Push our consumes, but not defines, to the then clause. This
// includes the consumes from the body.
ast_inheritbranch(then_clause, else_clause);
ast_consolidate_branches(then_clause, 2);
}
}
break;
case TK_REPEAT:
{
AST_GET_CHILDREN(parent, body, cond, else_clause);
if(body == ast)
{
// Push our consumes and our defines to the cond clause.
ast_inheritstatus(cond, body);
} else if(cond == ast) {
// Push our consumes, but not defines, to the else clause. This
// includes the consumes from the body.
ast_inheritbranch(else_clause, cond);
ast_consolidate_branches(else_clause, 2);
}
}
break;
default: {}
}
}
return true;
}
static bool valid_is_comparand(pass_opt_t* opt, ast_t* ast)
{
ast_t* type;
switch(ast_id(ast))
{
case TK_TYPEREF:
type = (ast_t*) ast_data(ast);
if(ast_id(type) != TK_PRIMITIVE)
{
ast_error(opt->check.errors, ast, "identity comparison with a new object"
" will always be false");
return false;
}
return true;
case TK_SEQ:
type = ast_child(ast);
while(type != NULL)
{
if(ast_sibling(type) == NULL)
return valid_is_comparand(opt, type);
type = ast_sibling(type);
}
return true;
case TK_TUPLE:
type = ast_child(ast);
while(type != NULL)
{
if (!valid_is_comparand(opt, type))
return false;
type = ast_sibling(type);
}
return true;
default:
return true;
}
}
static bool refer_is(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
pony_assert((ast_id(ast) == TK_IS) || (ast_id(ast) == TK_ISNT));
AST_GET_CHILDREN(ast, left, right);
return valid_is_comparand(opt, right) && valid_is_comparand(opt, left);
}
static bool refer_if(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
pony_assert((ast_id(ast) == TK_IF) || (ast_id(ast) == TK_IFDEF));
AST_GET_CHILDREN(ast, cond, left, right);
size_t branch_count = 0;
if(!ast_checkflag(left, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, left);
}
if(!ast_checkflag(right, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, right);
}
ast_consolidate_branches(ast, branch_count);
// If all branches jump away with no value, then we do too.
if(branch_count == 0)
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
// Push our symbol status to our parent scope.
ast_inheritstatus(ast_parent(ast), ast);
return true;
}
static bool refer_iftype(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
pony_assert(ast_id(ast) == TK_IFTYPE_SET);
AST_GET_CHILDREN(ast, left_clause, right);
AST_GET_CHILDREN(left_clause, sub, super, left);
size_t branch_count = 0;
if(!ast_checkflag(left, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, left);
}
if(!ast_checkflag(right, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, right);
}
ast_consolidate_branches(ast, branch_count);
// If all branches jump away with no value, then we do too.
if(branch_count == 0)
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
// Push our symbol status to our parent scope.
ast_inheritstatus(ast_parent(ast), ast);
return true;
}
static bool refer_while(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_WHILE);
AST_GET_CHILDREN(ast, cond, body, else_clause);
// All consumes have to be in scope when the loop body finishes.
errorframe_t errorf = NULL;
if(!ast_all_consumes_in_scope(body, body, &errorf))
{
errorframe_report(&errorf, opt->check.errors);
return false;
}
size_t branch_count = 0;
// No symbol status is inherited from the loop body. Nothing from outside the
// loop body can be consumed, and definitions in the body may not occur.
if(!ast_checkflag(body, AST_FLAG_JUMPS_AWAY))
branch_count++;
if(!ast_checkflag(else_clause, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, body);
// Use a branch count of two instead of one. This means we will pick up any
// consumes, but not any definitions, since definitions may not occur.
ast_consolidate_branches(ast, 2);
}
// If all branches jump away with no value, then we do too.
if(branch_count == 0)
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
// Push our symbol status to our parent scope.
ast_inheritstatus(ast_parent(ast), ast);
return true;
}
static bool refer_repeat(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_REPEAT);
AST_GET_CHILDREN(ast, body, cond, else_clause);
// All consumes have to be in scope when the loop body finishes.
errorframe_t errorf = NULL;
if(!ast_all_consumes_in_scope(body, body, &errorf))
{
errorframe_report(&errorf, opt->check.errors);
return false;
}
size_t branch_count = 0;
// No symbol status is inherited from the loop body. Nothing from outside the
// loop body can be consumed, and definitions in the body may not occur.
if(!ast_checkflag(body, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, body);
}
if(!ast_checkflag(else_clause, AST_FLAG_JUMPS_AWAY))
{
// Only include the else clause in the branch analysis
// if the loop has a break statement somewhere in it.
// This allows us to treat the entire loop body as being
// sure to execute in every case, at least for the purposes
// of analyzing variables being defined in its scope.
// For the case of errors and return statements that may
// exit the loop, they do not affect our analysis here
// because they will skip past more than just the loop itself.
if(ast_checkflag(body, AST_FLAG_MAY_BREAK))
{
branch_count++;
ast_inheritbranch(ast, else_clause);
}
}
ast_consolidate_branches(ast, branch_count);
// If all branches jump away with no value, then we do too.
if(branch_count == 0)
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
// Push our symbol status to our parent scope.
ast_inheritstatus(ast_parent(ast), ast);
return true;
}
static bool refer_match(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
pony_assert(ast_id(ast) == TK_MATCH);
AST_GET_CHILDREN(ast, expr, cases, else_clause);
size_t branch_count = 0;
if(!ast_checkflag(cases, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, cases);
}
if(ast_id(else_clause) == TK_NONE)
{
branch_count++;
}
else if(!ast_checkflag(else_clause, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, else_clause);
}
// If all branches jump away with no value, then we do too.
if(branch_count == 0)
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
ast_consolidate_branches(ast, branch_count);
// Push our symbol status to our parent scope.
ast_inheritstatus(ast_parent(ast), ast);
return true;
}
static bool refer_cases(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_CASES);
ast_t* the_case = ast_child(ast);
if(the_case == NULL)
{
ast_error(opt->check.errors, ast, "match must have at least one case");
return false;
}
size_t branch_count = 0;
while(the_case != NULL)
{
AST_GET_CHILDREN(the_case, pattern, guard, body);
if(!ast_checkflag(body, AST_FLAG_JUMPS_AWAY))
{
branch_count++;
ast_inheritbranch(ast, the_case);
}
the_case = ast_sibling(the_case);
}
if(branch_count == 0)
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
ast_consolidate_branches(ast, branch_count);
return true;
}
static bool refer_try(pass_opt_t* opt, ast_t* ast)
{
pony_assert((ast_id(ast) == TK_TRY) || (ast_id(ast) == TK_TRY_NO_CHECK));
AST_GET_CHILDREN(ast, body, else_clause, then_clause);
size_t branch_count = 0;
if(!ast_checkflag(body, AST_FLAG_JUMPS_AWAY))
branch_count++;
if(!ast_checkflag(else_clause, AST_FLAG_JUMPS_AWAY))
branch_count++;
if(ast_checkflag(then_clause, AST_FLAG_JUMPS_AWAY))
{
ast_error(opt->check.errors, then_clause,
"then clause always terminates the function");
return false;
}
// If all branches jump away with no value, then we do too.
if(branch_count == 0)
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
// Push the symbol status from the then clause to our parent scope.
ast_inheritstatus(ast_parent(ast), then_clause);
return true;
}
static bool refer_recover(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
pony_assert(ast_id(ast) == TK_RECOVER);
AST_GET_CHILDREN(ast, cap, expr);
// Push our symbol status to our parent scope.
ast_inheritstatus(ast_parent(ast), expr);
return true;
}
static bool refer_break(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_BREAK);
if(opt->check.frame->loop_body == NULL)
{
ast_error(opt->check.errors, ast, "must be in a loop");
return false;
}
ast_setflag(opt->check.frame->loop_body, AST_FLAG_MAY_BREAK);
errorframe_t errorf = NULL;
if(!ast_all_consumes_in_scope(opt->check.frame->loop_body, ast, &errorf))
{
errorframe_report(&errorf, opt->check.errors);
return false;
}
// break is always the last expression in a sequence
pony_assert(ast_sibling(ast) == NULL);
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
return true;
}
static bool refer_continue(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_CONTINUE);
if(opt->check.frame->loop_body == NULL)
{
ast_error(opt->check.errors, ast, "must be in a loop");
return false;
}
errorframe_t errorf = NULL;
if(!ast_all_consumes_in_scope(opt->check.frame->loop_body, ast, &errorf))
{
errorframe_report(&errorf, opt->check.errors);
return false;
}
// continue is always the last expression in a sequence
pony_assert(ast_sibling(ast) == NULL);
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
return true;
}
static bool refer_return(pass_opt_t* opt, ast_t* ast)
{
pony_assert(ast_id(ast) == TK_RETURN);
// return is always the last expression in a sequence
pony_assert(ast_sibling(ast) == NULL);
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
if((ast_id(opt->check.frame->method) == TK_NEW) &&
is_this_incomplete(opt, ast))
{
ast_error(opt->check.errors, ast,
"all fields must be defined before constructor returns");
return false;
}
return true;
}
static bool refer_error(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
// error is always the last expression in a sequence
pony_assert(ast_sibling(ast) == NULL);
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
return true;
}
static bool refer_compile_error(pass_opt_t* opt, ast_t* ast)
{
(void)opt;
// compile_error is always the last expression in a sequence
pony_assert(ast_sibling(ast) == NULL);
ast_setflag(ast, AST_FLAG_JUMPS_AWAY);
return true;
}
ast_result_t pass_pre_refer(ast_t** astp, pass_opt_t* options)
{
ast_t* ast = *astp;
bool r = true;
switch(ast_id(ast))
{
case TK_NEW: r = refer_pre_new(options, ast); break;
case TK_CALL: r = refer_pre_call(options, ast); break;
case TK_ASSIGN: r = refer_pre_assign(options, ast); break;
default: {}
}
if(!r)
{
pony_assert(errors_get_count(options->check.errors) > 0);
return AST_ERROR;
}
return AST_OK;
}
ast_result_t pass_refer(ast_t** astp, pass_opt_t* options)
{
ast_t* ast = *astp;
bool r = true;
switch(ast_id(ast))
{
case TK_REFERENCE: r = refer_reference(options, astp); break;
case TK_DOT: r = refer_dot(options, ast); break;
case TK_QUALIFY: r = refer_qualify(options, ast); break;
case TK_ASSIGN: r = refer_assign(options, ast); break;
case TK_CONSUME: r = refer_consume(options, ast); break;
case TK_THIS: r = refer_this(options, ast); break;
case TK_NEW: r = refer_new(options, ast); break;
case TK_VAR:
case TK_LET: r = refer_local(options, ast); break;
case TK_SEQ: r = refer_seq(options, ast); break;
case TK_IFDEF:
case TK_IF: r = refer_if(options, ast); break;
case TK_IFTYPE_SET:
r = refer_iftype(options, ast); break;
case TK_WHILE: r = refer_while(options, ast); break;
case TK_REPEAT: r = refer_repeat(options, ast); break;
case TK_MATCH: r = refer_match(options, ast); break;
case TK_CASES: r = refer_cases(options, ast); break;
case TK_TRY_NO_CHECK:
case TK_TRY: r = refer_try(options, ast); break;
case TK_RECOVER: r = refer_recover(options, ast); break;
case TK_BREAK: r = refer_break(options, ast); break;
case TK_CONTINUE: r = refer_continue(options, ast); break;
case TK_RETURN: r = refer_return(options, ast); break;
case TK_ERROR: r = refer_error(options, ast); break;
case TK_COMPILE_ERROR:
r = refer_compile_error(options, ast); break;
case TK_IS:
case TK_ISNT:
r = refer_is(options, ast); break;
default: {}
}
if(!r)
{
pony_assert(errors_get_count(options->check.errors) > 0);
return AST_ERROR;
}
return AST_OK;
}
| 1 | 14,469 | @jemc , this is one change needed in `generate_multi_dot_name`. I believe this * will do no harm to working pony code: The new code was added in a place where it is throwing an assert. So no working code will get affected. * is in sync with the rest of the method: if the parent ast node has null `data`, we are supposed to return empty string. That is possible when we have expressions wrapped in braces. | ponylang-ponyc | c |
@@ -88,6 +88,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.params.CommonParams.SORT;
+import static org.apache.solr.common.params.QueryElevationParams.ONLY_ELEVATED_REPRESENTATIVE;
/**
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.carrotsearch.hppc.FloatArrayList;
import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.IntIntHashMap;
import com.carrotsearch.hppc.IntLongHashMap;
import com.carrotsearch.hppc.cursors.IntIntCursor;
import com.carrotsearch.hppc.cursors.IntLongCursor;
import com.carrotsearch.hppc.procedures.IntProcedure;
import org.apache.commons.lang3.StringUtils;
import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.EmptyDocValuesProducer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.OrdinalMap;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LeafFieldComparator;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.Scorable;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LongValues;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.GroupParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.handler.component.QueryElevationComponent;
import org.apache.solr.handler.component.ResponseBuilder;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.NumberType;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.StrField;
import org.apache.solr.uninverting.UninvertingReader;
import org.apache.solr.util.IntFloatDynamicMap;
import org.apache.solr.util.IntIntDynamicMap;
import org.apache.solr.util.IntLongDynamicMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.params.CommonParams.SORT;
/**
The <b>CollapsingQParserPlugin</b> is a PostFilter that performs field collapsing.
This is a high performance alternative to standard Solr
field collapsing (with ngroups) when the number of distinct groups
in the result set is high.
<p>
Sample syntax:
<p>
Collapse based on the highest scoring document:
<p>
fq=(!collapse field=field_name}
<p>
Collapse based on the min value of a numeric field:
<p>
fq={!collapse field=field_name min=field_name}
<p>
Collapse based on the max value of a numeric field:
<p>
fq={!collapse field=field_name max=field_name}
<p>
Collapse with a null policy:
<p>
fq={!collapse field=field_name nullPolicy=nullPolicy}
<p>
There are three null policies: <br>
ignore : removes docs with a null value in the collapse field (default).<br>
expand : treats each doc with a null value in the collapse field as a separate group.<br>
collapse : collapses all docs with a null value into a single group using either highest score, or min/max.
<p>
The CollapsingQParserPlugin fully supports the QueryElevationComponent
**/
public class CollapsingQParserPlugin extends QParserPlugin {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String NAME = "collapse";
public static final String HINT_TOP_FC = "top_fc";
/**
* <p>
* Indicates that values in the collapse field are unique per contiguous block, and a single pass "block based"
* collapse algorithm can be used. This behavior is the default for collapsing on the <code>_root_</code> field,
* but may also be enabled for other fields that have the same characteristics. This hint will be ignored if
* other options prevent the use of this single pass approach (notable: nullPolicy=collapse)
* </p>
* <p>
* <em>Do <strong>NOT</strong> use this hint if the index is not laid out such that each unique value in the
* collapse field is garuntteed to only exist in one contiguous block, otherwise the results of the collapse
* filter will include more then one document per collapse value.</em>
* </p>
*/
public static final String HINT_BLOCK = "block";
/**
* @deprecated use {@link NullPolicy} instead.
*/
@Deprecated
public static final String NULL_COLLAPSE = "collapse";
@Deprecated
public static final String NULL_IGNORE = "ignore";
@Deprecated
public static final String NULL_EXPAND = "expand";
@Deprecated
public static final String HINT_MULTI_DOCVALUES = "multi_docvalues";
public enum NullPolicy {
IGNORE("ignore", 0),
COLLAPSE("collapse", 1),
EXPAND("expand", 2);
private final String name;
private final int code;
NullPolicy(String name, int code) {
this.name = name;
this.code = code;
}
public String getName() {
return name;
}
public int getCode() {
return code;
}
public static NullPolicy fromString(String nullPolicy) {
if (StringUtils.isEmpty(nullPolicy)) {
return DEFAULT_POLICY;
}
switch (nullPolicy) {
case "ignore": return IGNORE;
case "collapse": return COLLAPSE;
case "expand": return EXPAND;
default:
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid nullPolicy: " + nullPolicy);
}
}
static NullPolicy DEFAULT_POLICY = IGNORE;
}
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest request) {
return new CollapsingQParser(qstr, localParams, params, request);
}
private static class CollapsingQParser extends QParser {
public CollapsingQParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest request) {
super(qstr, localParams, params, request);
}
public Query parse() throws SyntaxError {
try {
return new CollapsingPostFilter(localParams, params, req);
} catch (Exception e) {
throw new SyntaxError(e.getMessage(), e);
}
}
}
public static enum GroupHeadSelectorType {
MIN, MAX, SORT, SCORE;
public static EnumSet<GroupHeadSelectorType> MIN_MAX = EnumSet.of(MIN, MAX);
}
/**
* Models all the information about how group head documents should be selected
*/
public static final class GroupHeadSelector {
/**
* The param value for this selector whose meaning depends on type.
* (ie: a field or valuesource for MIN/MAX, a sort string for SORT, "score" for SCORE).
* Will never be null.
*/
public final String selectorText;
/** The type for this selector, will never be null */
public final GroupHeadSelectorType type;
private GroupHeadSelector(String s, GroupHeadSelectorType type) {
assert null != s;
assert null != type;
this.selectorText = s;
this.type = type;
}
@Override
public boolean equals(final Object other) {
if (other instanceof GroupHeadSelector) {
final GroupHeadSelector that = (GroupHeadSelector) other;
return (this.type == that.type) && this.selectorText.equals(that.selectorText);
}
return false;
}
@Override
public int hashCode() {
return 17 * (31 + selectorText.hashCode()) * (31 + type.hashCode());
}
@Override
public String toString(){
return "GroupHeadSelector(selectorText=" + this.selectorText + ", type=" +this.type + ")";
}
/**
* returns a new GroupHeadSelector based on the specified local params
*/
public static GroupHeadSelector build(final SolrParams localParams) {
final String sortString = StringUtils.defaultIfBlank(localParams.get(SORT), null);
final String max = StringUtils.defaultIfBlank(localParams.get("max"), null);
final String min = StringUtils.defaultIfBlank(localParams.get("min"), null);
if (1 < numNotNull(min, max, sortString)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"At most one localparam for selecting documents (min, max, sort) may be specified: " + localParams.toString());
}
if (null != sortString) {
return new GroupHeadSelector(sortString, GroupHeadSelectorType.SORT);
} else if (null != min) {
return new GroupHeadSelector(min, GroupHeadSelectorType.MIN);
} else if (null != max) {
return new GroupHeadSelector(max, GroupHeadSelectorType.MAX);
}
// default
return new GroupHeadSelector("score", GroupHeadSelectorType.SCORE);
}
}
public static class CollapsingPostFilter extends ExtendedQueryBase implements PostFilter {
private String collapseField;
private final GroupHeadSelector groupHeadSelector;
private final SortSpec sortSpec; // may be null, parsed at most once from groupHeadSelector
public String hint;
private boolean needsScores = true;
private boolean needsScores4Collapsing = false;
private NullPolicy nullPolicy;
private Set<BytesRef> boosted; // ordered by "priority"
private int size;
public String getField(){
return this.collapseField;
}
public void setCache(boolean cache) {
}
public void setCacheSep(boolean cacheSep) {
}
public boolean getCacheSep() {
return false;
}
public boolean getCache() {
return false;
}
// Only a subset of fields in hashCode/equals?
public int hashCode() {
int hashCode = classHash();
hashCode = 31 * hashCode + collapseField.hashCode();
hashCode = 31 * hashCode + groupHeadSelector.hashCode();
hashCode = 31 * hashCode + nullPolicy.hashCode();
return hashCode;
}
public boolean equals(Object other) {
return sameClassAs(other) &&
equalsTo(getClass().cast(other));
}
private boolean equalsTo(CollapsingPostFilter other) {
return collapseField.equals(other.collapseField) &&
groupHeadSelector.equals(other.groupHeadSelector) &&
nullPolicy == other.nullPolicy;
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
public int getCost() {
return Math.max(super.getCost(), 100);
}
public String toString(String s) {
return "CollapsingPostFilter(field=" + this.collapseField +
", nullPolicy=" + this.nullPolicy.getName() + ", " +
this.groupHeadSelector +
(hint == null ? "": ", hint=" + this.hint) +
", size=" + this.size
+ ")";
}
public CollapsingPostFilter(SolrParams localParams, SolrParams params, SolrQueryRequest request) {
// Don't allow collapsing if grouping is being used.
if (request.getParams().getBool(GroupParams.GROUP, false)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can not use collapse with Grouping enabled");
}
this.collapseField = localParams.get("field");
if (this.collapseField == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Required 'field' param is missing.");
}
// if unknown field, this would fail fast
SchemaField collapseFieldSf = request.getSchema().getField(this.collapseField);
if (!(collapseFieldSf.isUninvertible() || collapseFieldSf.hasDocValues())) {
// uninvertible=false and docvalues=false
// field can't be indexed=false and uninvertible=true
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collapsing field '" + collapseField +
"' should be either docValues enabled or indexed with uninvertible enabled");
} else if (collapseFieldSf.multiValued()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collapsing not supported on multivalued fields");
}
this.groupHeadSelector = GroupHeadSelector.build(localParams);
if (groupHeadSelector.type.equals(GroupHeadSelectorType.SORT) &&
CollapseScore.wantsCScore(groupHeadSelector.selectorText)) {
// we can't support Sorts that wrap functions that include "cscore()" because
// the abstraction layer for Sort/SortField rewriting gives each clause it's own
// context Map which we don't have access to -- so for now, give a useful error
// (as early as possible) if attempted
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Using cscore() as a function in the 'sort' local "+
"param of the collapse parser is not supported");
}
this.sortSpec = GroupHeadSelectorType.SORT.equals(groupHeadSelector.type)
? SortSpecParsing.parseSortSpec(groupHeadSelector.selectorText, request)
: null;
this.hint = localParams.get("hint");
this.size = localParams.getInt("size", 100000); //Only used for collapsing on int fields.
{
final SolrRequestInfo info = SolrRequestInfo.getRequestInfo();
assert null != info;
// may be null in some esoteric corner usages
final ResponseBuilder rb = info.getResponseBuilder();
final SortSpec topSort = null == rb ? null : rb.getSortSpec();
this.needsScores4Collapsing = GroupHeadSelectorType.SCORE.equals(groupHeadSelector.type) ||
(GroupHeadSelectorType.SORT.equals(groupHeadSelector.type)
&& this.sortSpec.includesScore()) ||
(GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type)
&& CollapseScore.wantsCScore(groupHeadSelector.selectorText));
this.needsScores = needsScores4Collapsing ||
(info.getRsp().getReturnFields().wantsScore() ||
(null != topSort && topSort.includesScore()) ||
(this.boosted != null));
if (this.needsScores && null != rb) {
// regardless of why we need scores ensure the IndexSearcher will compute them
// for the "real" docs. (ie: maybe we need them because we were
// asked to compute them for the collapsed docs, maybe we need them because in
// order to find the groupHead we need them computed for us.
rb.setFieldFlags( rb.getFieldFlags() | SolrIndexSearcher.GET_SCORES);
}
}
this.nullPolicy = NullPolicy.fromString(localParams.get("nullPolicy"));
}
@SuppressWarnings({"unchecked"})
public DelegatingCollector getFilterCollector(IndexSearcher indexSearcher) {
try {
SolrIndexSearcher searcher = (SolrIndexSearcher)indexSearcher;
CollectorFactory collectorFactory = new CollectorFactory();
//Deal with boosted docs.
//We have to deal with it here rather then the constructor because
//because the QueryElevationComponent runs after the Queries are constructed.
IntIntHashMap boostDocsMap = null;
@SuppressWarnings({"rawtypes"})
Map context = null;
SolrRequestInfo info = SolrRequestInfo.getRequestInfo();
if(info != null) {
context = info.getReq().getContext();
}
if(this.boosted == null && context != null) {
this.boosted = (Set<BytesRef>)context.get(QueryElevationComponent.BOOSTED);
}
boostDocsMap = QueryElevationComponent.getBoostDocs(searcher, this.boosted, context);
return collectorFactory.getCollector(this.collapseField,
this.groupHeadSelector,
this.sortSpec,
this.nullPolicy.getCode(),
this.hint,
this.needsScores4Collapsing,
this.needsScores,
this.size,
boostDocsMap,
searcher);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
/**
* This forces the use of the top level field cache for String fields.
* This is VERY fast at query time but slower to warm and causes insanity.
*/
public static LeafReader getTopFieldCacheReader(SolrIndexSearcher searcher, String collapseField) {
UninvertingReader.Type type = null;
final SchemaField f = searcher.getSchema().getFieldOrNull(collapseField);
assert null != f; // should already be enforced higher up
assert !f.multiValued(); // should already be enforced higher up
assert f.getType() instanceof StrField; // this method shouldn't be called otherwise
if (f.indexed() && f.isUninvertible()) {
type = UninvertingReader.Type.SORTED;
}
return UninvertingReader.wrap(
new ReaderWrapper(searcher.getSlowAtomicReader(), collapseField),
Collections.singletonMap(collapseField, type)::get);
}
private static class ReaderWrapper extends FilterLeafReader {
private final FieldInfos fieldInfos;
ReaderWrapper(LeafReader leafReader, String field) {
super(leafReader);
// TODO can we just do "field" and not bother with the other fields?
List<FieldInfo> newInfos = new ArrayList<>(in.getFieldInfos().size());
for (FieldInfo fieldInfo : in.getFieldInfos()) {
if (fieldInfo.name.equals(field)) {
FieldInfo f = new FieldInfo(fieldInfo.name,
fieldInfo.number,
fieldInfo.hasVectors(),
fieldInfo.hasNorms(),
fieldInfo.hasPayloads(),
fieldInfo.getIndexOptions(),
DocValuesType.NONE,
fieldInfo.getDocValuesGen(),
fieldInfo.attributes(),
fieldInfo.getPointDimensionCount(),
fieldInfo.getPointIndexDimensionCount(),
fieldInfo.getPointNumBytes(),
fieldInfo.getVectorDimension(),
fieldInfo.getVectorSearchStrategy(),
fieldInfo.isSoftDeletesField());
newInfos.add(f);
} else {
newInfos.add(fieldInfo);
}
}
FieldInfos infos = new FieldInfos(newInfos.toArray(new FieldInfo[newInfos.size()]));
this.fieldInfos = infos;
}
public FieldInfos getFieldInfos() {
return fieldInfos;
}
public SortedDocValues getSortedDocValues(String field) {
return null;
}
// NOTE: delegating the caches is wrong here as we are altering the content
// of the reader, this should ONLY be used under an uninvertingreader which
// will restore doc values back using uninversion, otherwise all sorts of
// crazy things could happen.
@Override
public CacheHelper getCoreCacheHelper() {
return in.getCoreCacheHelper();
}
@Override
public CacheHelper getReaderCacheHelper() {
return in.getReaderCacheHelper();
}
}
private static class ScoreAndDoc extends Scorable {
public float score;
public int docId;
public float score() {
return score;
}
public int docID() {
return docId;
}
}
/**
* Collapses on Ordinal Values using Score to select the group head.
* @lucene.internal
*/
static class OrdScoreCollector extends DelegatingCollector {
private LeafReaderContext[] contexts;
private final DocValuesProducer collapseValuesProducer;
private FixedBitSet collapsedSet;
private SortedDocValues collapseValues;
private OrdinalMap ordinalMap;
private SortedDocValues segmentValues;
private LongValues segmentOrdinalMap;
private MultiDocValues.MultiSortedDocValues multiSortedDocValues;
private IntIntDynamicMap ords;
private IntFloatDynamicMap scores;
private int maxDoc;
private int nullPolicy;
private float nullScore = -Float.MAX_VALUE;
private int nullDoc = -1;
private FloatArrayList nullScores;
private final BoostedDocsCollector boostedDocsCollector;
public OrdScoreCollector(int maxDoc,
int segments,
DocValuesProducer collapseValuesProducer,
int nullPolicy,
IntIntHashMap boostDocsMap,
IndexSearcher searcher) throws IOException {
this.maxDoc = maxDoc;
this.contexts = new LeafReaderContext[segments];
List<LeafReaderContext> con = searcher.getTopReaderContext().leaves();
for(int i=0; i<con.size(); i++) {
contexts[i] = con.get(i);
}
this.collapsedSet = new FixedBitSet(maxDoc);
this.collapseValuesProducer = collapseValuesProducer;
this.collapseValues = collapseValuesProducer.getSorted(null);
int valueCount = collapseValues.getValueCount();
if(collapseValues instanceof MultiDocValues.MultiSortedDocValues) {
this.multiSortedDocValues = (MultiDocValues.MultiSortedDocValues)collapseValues;
this.ordinalMap = multiSortedDocValues.mapping;
}
this.ords = new IntIntDynamicMap(valueCount, -1);
this.scores = new IntFloatDynamicMap(valueCount, -Float.MAX_VALUE);
this.nullPolicy = nullPolicy;
if(nullPolicy == NullPolicy.EXPAND.getCode()) {
nullScores = new FloatArrayList();
}
this.boostedDocsCollector = BoostedDocsCollector.build(boostDocsMap);
}
@Override public ScoreMode scoreMode() { return ScoreMode.COMPLETE; }
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
this.contexts[context.ord] = context;
this.docBase = context.docBase;
if(ordinalMap != null) {
this.segmentValues = this.multiSortedDocValues.values[context.ord];
this.segmentOrdinalMap = ordinalMap.getGlobalOrds(context.ord);
} else {
this.segmentValues = collapseValues;
}
}
@Override
public void collect(int contextDoc) throws IOException {
int globalDoc = contextDoc+this.docBase;
int ord = -1;
if(this.ordinalMap != null) {
//Handle ordinalMapping case
if (segmentValues.advanceExact(contextDoc)) {
ord = (int)segmentOrdinalMap.get(segmentValues.ordValue());
} else {
ord = -1;
}
} else {
//Handle top Level FieldCache or Single Segment Case
if (segmentValues.advanceExact(globalDoc)) {
ord = segmentValues.ordValue();
} else {
ord = -1;
}
}
// Check to see if we have documents boosted by the QueryElevationComponent
if (0 <= ord) {
if (boostedDocsCollector.collectIfBoosted(ord, globalDoc)) return;
} else {
if (boostedDocsCollector.collectInNullGroupIfBoosted(globalDoc)) return;
}
if(ord > -1) {
float score = scorer.score();
if(score > scores.get(ord)) {
ords.put(ord, globalDoc);
scores.put(ord, score);
}
} else if(nullPolicy == NullPolicy.COLLAPSE.getCode()) {
float score = scorer.score();
if(score > nullScore) {
nullScore = score;
nullDoc = globalDoc;
}
} else if(nullPolicy == NullPolicy.EXPAND.getCode()) {
collapsedSet.set(globalDoc);
nullScores.add(scorer.score());
}
}
@Override
public void finish() throws IOException {
if(contexts.length == 0) {
return;
}
// Handle the boosted docs.
boostedDocsCollector.purgeGroupsThatHaveBoostedDocs(collapsedSet,
(ord) -> { ords.remove(ord); },
() -> { nullDoc = -1; });
//Build the sorted DocSet of group heads.
if(nullDoc > -1) {
collapsedSet.set(nullDoc);
}
ords.forEachValue(doc -> collapsedSet.set(doc));
int currentContext = 0;
int currentDocBase = 0;
collapseValues = collapseValuesProducer.getSorted(null);
if(collapseValues instanceof MultiDocValues.MultiSortedDocValues) {
this.multiSortedDocValues = (MultiDocValues.MultiSortedDocValues)collapseValues;
this.ordinalMap = multiSortedDocValues.mapping;
}
if(ordinalMap != null) {
this.segmentValues = this.multiSortedDocValues.values[currentContext];
this.segmentOrdinalMap = this.ordinalMap.getGlobalOrds(currentContext);
} else {
this.segmentValues = collapseValues;
}
int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
ScoreAndDoc dummy = new ScoreAndDoc();
leafDelegate.setScorer(dummy);
DocIdSetIterator it = new BitSetIterator(collapsedSet, 0L); // cost is not useful here
final MergeBoost mergeBoost = boostedDocsCollector.getMergeBoost();
int docId = -1;
int index = -1;
while((docId = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while(docId >= nextDocBase) {
currentContext++;
currentDocBase = contexts[currentContext].docBase;
nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
leafDelegate.setScorer(dummy);
if(ordinalMap != null) {
this.segmentValues = this.multiSortedDocValues.values[currentContext];
this.segmentOrdinalMap = this.ordinalMap.getGlobalOrds(currentContext);
}
}
int contextDoc = docId-currentDocBase;
int ord = -1;
if(this.ordinalMap != null) {
//Handle ordinalMapping case
if (segmentValues.advanceExact(contextDoc)) {
ord = (int)segmentOrdinalMap.get(segmentValues.ordValue());
}
} else {
//Handle top Level FieldCache or Single Segment Case
if (segmentValues.advanceExact(docId)) {
ord = segmentValues.ordValue();
}
}
if(ord > -1) {
dummy.score = scores.get(ord);
} else if(mergeBoost.boost(docId)) {
//Ignore so it doesn't mess up the null scoring.
} else if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
dummy.score = nullScore;
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
dummy.score = nullScores.get(++index);
}
dummy.docId = contextDoc;
leafDelegate.collect(contextDoc);
}
if(delegate instanceof DelegatingCollector) {
((DelegatingCollector) delegate).finish();
}
}
}
/**
* Collapses on an integer field using the score to select the group head.
* @lucene.internal
*/
static class IntScoreCollector extends DelegatingCollector {
private LeafReaderContext[] contexts;
private FixedBitSet collapsedSet;
private NumericDocValues collapseValues;
private IntLongHashMap cmap;
private int maxDoc;
private int nullPolicy;
private float nullScore = -Float.MAX_VALUE;
private int nullDoc = -1;
private FloatArrayList nullScores;
private String field;
private final BoostedDocsCollector boostedDocsCollector;
public IntScoreCollector(int maxDoc,
int segments,
int nullPolicy,
int size,
String field,
IntIntHashMap boostDocsMap,
IndexSearcher searcher) {
this.maxDoc = maxDoc;
this.contexts = new LeafReaderContext[segments];
List<LeafReaderContext> con = searcher.getTopReaderContext().leaves();
for(int i=0; i<con.size(); i++) {
contexts[i] = con.get(i);
}
this.collapsedSet = new FixedBitSet(maxDoc);
this.nullPolicy = nullPolicy;
if(nullPolicy == NullPolicy.EXPAND.getCode()) {
nullScores = new FloatArrayList();
}
this.cmap = new IntLongHashMap(size);
this.field = field;
this.boostedDocsCollector = BoostedDocsCollector.build(boostDocsMap);
}
@Override public ScoreMode scoreMode() { return ScoreMode.COMPLETE; }
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
this.contexts[context.ord] = context;
this.docBase = context.docBase;
this.collapseValues = DocValues.getNumeric(context.reader(), this.field);
}
@Override
public void collect(int contextDoc) throws IOException {
final int globalDoc = docBase+contextDoc;
if (collapseValues.advanceExact(contextDoc)) {
final int collapseValue = (int) collapseValues.longValue();
// Check to see if we have documents boosted by the QueryElevationComponent (skip normal strategy based collection)
if (boostedDocsCollector.collectIfBoosted(collapseValue, globalDoc)) return;
float score = scorer.score();
final int idx;
if((idx = cmap.indexOf(collapseValue)) >= 0) {
long scoreDoc = cmap.indexGet(idx);
int testScore = (int)(scoreDoc>>32);
int currentScore = Float.floatToRawIntBits(score);
if(currentScore > testScore) {
//Current score is higher so replace the old scoreDoc with the current scoreDoc
cmap.indexReplace(idx, (((long)currentScore)<<32)+globalDoc);
}
} else {
//Combine the score and document into a long.
long scoreDoc = (((long)Float.floatToRawIntBits(score))<<32)+globalDoc;
cmap.indexInsert(idx, collapseValue, scoreDoc);
}
} else { // Null Group...
// Check to see if we have documents boosted by the QueryElevationComponent (skip normal strategy based collection)
if (boostedDocsCollector.collectInNullGroupIfBoosted(globalDoc)) return;
if(nullPolicy == NullPolicy.COLLAPSE.getCode()) {
float score = scorer.score();
if(score > this.nullScore) {
this.nullScore = score;
this.nullDoc = globalDoc;
}
} else if(nullPolicy == NullPolicy.EXPAND.getCode()) {
collapsedSet.set(globalDoc);
nullScores.add(scorer.score());
}
}
}
@Override
public void finish() throws IOException {
if(contexts.length == 0) {
return;
}
// Handle the boosted docs.
boostedDocsCollector.purgeGroupsThatHaveBoostedDocs(collapsedSet,
(key) -> { cmap.remove(key); },
() -> { nullDoc = -1; });
//Build the sorted DocSet of group heads.
if(nullDoc > -1) {
collapsedSet.set(nullDoc);
}
Iterator<IntLongCursor> it1 = cmap.iterator();
while(it1.hasNext()) {
IntLongCursor cursor = it1.next();
int doc = (int)cursor.value;
collapsedSet.set(doc);
}
int currentContext = 0;
int currentDocBase = 0;
collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.field);
int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
ScoreAndDoc dummy = new ScoreAndDoc();
leafDelegate.setScorer(dummy);
DocIdSetIterator it = new BitSetIterator(collapsedSet, 0L); // cost is not useful here
final MergeBoost mergeBoost = boostedDocsCollector.getMergeBoost();
int globalDoc = -1;
int nullScoreIndex = 0;
while((globalDoc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while(globalDoc >= nextDocBase) {
currentContext++;
currentDocBase = contexts[currentContext].docBase;
nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
leafDelegate.setScorer(dummy);
collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.field);
}
final int contextDoc = globalDoc-currentDocBase;
if (collapseValues.advanceExact(contextDoc)) {
final int collapseValue = (int) collapseValues.longValue();
final long scoreDoc = cmap.get(collapseValue);
dummy.score = Float.intBitsToFloat((int)(scoreDoc>>32));
} else { // Null Group...
if(mergeBoost.boost(globalDoc)) {
//It's an elevated doc so no score is needed (and should not have been populated)
dummy.score = 0F;
} else if (nullPolicy == NullPolicy.COLLAPSE.getCode()) {
dummy.score = nullScore;
} else if(nullPolicy == NullPolicy.EXPAND.getCode()) {
dummy.score = nullScores.get(nullScoreIndex++);
}
}
dummy.docId = contextDoc;
leafDelegate.collect(contextDoc);
}
if(delegate instanceof DelegatingCollector) {
((DelegatingCollector) delegate).finish();
}
}
}
/**
* Collapse on Ordinal value field.
* @lucene.internal
*/
static class OrdFieldValueCollector extends DelegatingCollector {
private LeafReaderContext[] contexts;
private DocValuesProducer collapseValuesProducer;
private SortedDocValues collapseValues;
protected OrdinalMap ordinalMap;
protected SortedDocValues segmentValues;
protected LongValues segmentOrdinalMap;
protected MultiDocValues.MultiSortedDocValues multiSortedDocValues;
private int maxDoc;
private int nullPolicy;
private OrdFieldValueStrategy collapseStrategy;
private boolean needsScores4Collapsing;
private boolean needsScores;
private final BoostedDocsCollector boostedDocsCollector;
public OrdFieldValueCollector(int maxDoc,
int segments,
DocValuesProducer collapseValuesProducer,
int nullPolicy,
GroupHeadSelector groupHeadSelector,
SortSpec sortSpec,
boolean needsScores4Collapsing,
boolean needsScores,
FieldType fieldType,
IntIntHashMap boostDocsMap,
FunctionQuery funcQuery, IndexSearcher searcher) throws IOException{
assert ! GroupHeadSelectorType.SCORE.equals(groupHeadSelector.type);
this.maxDoc = maxDoc;
this.contexts = new LeafReaderContext[segments];
List<LeafReaderContext> con = searcher.getTopReaderContext().leaves();
for(int i=0; i<con.size(); i++) {
contexts[i] = con.get(i);
}
this.collapseValuesProducer = collapseValuesProducer;
this.collapseValues = collapseValuesProducer.getSorted(null);
if(collapseValues instanceof MultiDocValues.MultiSortedDocValues) {
this.multiSortedDocValues = (MultiDocValues.MultiSortedDocValues)collapseValues;
this.ordinalMap = multiSortedDocValues.mapping;
}
this.boostedDocsCollector = BoostedDocsCollector.build(boostDocsMap);
int valueCount = collapseValues.getValueCount();
this.nullPolicy = nullPolicy;
this.needsScores4Collapsing = needsScores4Collapsing;
this.needsScores = needsScores;
if (null != sortSpec) {
this.collapseStrategy = new OrdSortSpecStrategy(maxDoc, nullPolicy, valueCount, groupHeadSelector, this.needsScores4Collapsing, this.needsScores, boostedDocsCollector, sortSpec, searcher, collapseValues);
} else if (funcQuery != null) {
this.collapseStrategy = new OrdValueSourceStrategy(maxDoc, nullPolicy, valueCount, groupHeadSelector, this.needsScores4Collapsing, this.needsScores, boostedDocsCollector, funcQuery, searcher, collapseValues);
} else {
NumberType numType = fieldType.getNumberType();
if (null == numType) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "min/max must be either Int/Long/Float based field types");
}
switch (numType) {
case INTEGER: {
this.collapseStrategy = new OrdIntStrategy(maxDoc, nullPolicy, valueCount, groupHeadSelector, this.needsScores, boostedDocsCollector, collapseValues);
break;
}
case FLOAT: {
this.collapseStrategy = new OrdFloatStrategy(maxDoc, nullPolicy, valueCount, groupHeadSelector, this.needsScores, boostedDocsCollector, collapseValues);
break;
}
case LONG: {
this.collapseStrategy = new OrdLongStrategy(maxDoc, nullPolicy, valueCount, groupHeadSelector, this.needsScores, boostedDocsCollector, collapseValues);
break;
}
default: {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "min/max must be either Int/Long/Float field types");
}
}
}
}
@Override public ScoreMode scoreMode() { return needsScores ? ScoreMode.COMPLETE : super.scoreMode(); }
public void setScorer(Scorable scorer) throws IOException {
this.collapseStrategy.setScorer(scorer);
}
public void doSetNextReader(LeafReaderContext context) throws IOException {
this.contexts[context.ord] = context;
this.docBase = context.docBase;
this.collapseStrategy.setNextReader(context);
if(ordinalMap != null) {
this.segmentValues = this.multiSortedDocValues.values[context.ord];
this.segmentOrdinalMap = ordinalMap.getGlobalOrds(context.ord);
} else {
this.segmentValues = collapseValues;
}
}
public void collect(int contextDoc) throws IOException {
int globalDoc = contextDoc+this.docBase;
int ord = -1;
if(this.ordinalMap != null) {
if (segmentValues.advanceExact(contextDoc)) {
ord = (int)segmentOrdinalMap.get(segmentValues.ordValue());
}
} else {
if (segmentValues.advanceExact(globalDoc)) {
ord = segmentValues.ordValue();
}
}
// Check to see if we have documents boosted by the QueryElevationComponent (skip normal strategy based collection)
if (-1 == ord) {
if (boostedDocsCollector.collectInNullGroupIfBoosted(globalDoc)) return;
} else {
if (boostedDocsCollector.collectIfBoosted(ord, globalDoc)) return;
}
collapseStrategy.collapse(ord, contextDoc, globalDoc);
}
public void finish() throws IOException {
if(contexts.length == 0) {
return;
}
int currentContext = 0;
int currentDocBase = 0;
this.collapseValues = collapseValuesProducer.getSorted(null);
if(collapseValues instanceof MultiDocValues.MultiSortedDocValues) {
this.multiSortedDocValues = (MultiDocValues.MultiSortedDocValues)collapseValues;
this.ordinalMap = multiSortedDocValues.mapping;
}
if(ordinalMap != null) {
this.segmentValues = this.multiSortedDocValues.values[currentContext];
this.segmentOrdinalMap = this.ordinalMap.getGlobalOrds(currentContext);
} else {
this.segmentValues = collapseValues;
}
int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
ScoreAndDoc dummy = new ScoreAndDoc();
leafDelegate.setScorer(dummy);
DocIdSetIterator it = new BitSetIterator(collapseStrategy.getCollapsedSet(), 0); // cost is not useful here
int globalDoc = -1;
int nullScoreIndex = 0;
IntFloatDynamicMap scores = collapseStrategy.getScores();
FloatArrayList nullScores = collapseStrategy.getNullScores();
float nullScore = collapseStrategy.getNullScore();
final MergeBoost mergeBoost = boostedDocsCollector.getMergeBoost();
while((globalDoc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while(globalDoc >= nextDocBase) {
currentContext++;
currentDocBase = contexts[currentContext].docBase;
nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
leafDelegate.setScorer(dummy);
if(ordinalMap != null) {
this.segmentValues = this.multiSortedDocValues.values[currentContext];
this.segmentOrdinalMap = this.ordinalMap.getGlobalOrds(currentContext);
}
}
int contextDoc = globalDoc-currentDocBase;
if(this.needsScores){
int ord = -1;
if(this.ordinalMap != null) {
//Handle ordinalMapping case
if (segmentValues.advanceExact(contextDoc)) {
ord = (int) segmentOrdinalMap.get(segmentValues.ordValue());
}
} else {
//Handle top Level FieldCache or Single Segment Case
if (segmentValues.advanceExact(globalDoc)) {
ord = segmentValues.ordValue();
}
}
if(ord > -1) {
dummy.score = scores.get(ord);
} else if (mergeBoost.boost(globalDoc)) {
//It's an elevated doc so no score is needed (and should not have been populated)
dummy.score = 0F;
} else if (nullPolicy == NullPolicy.COLLAPSE.getCode()) {
dummy.score = nullScore;
} else if(nullPolicy == NullPolicy.EXPAND.getCode()) {
dummy.score = nullScores.get(nullScoreIndex++);
}
}
dummy.docId = contextDoc;
leafDelegate.collect(contextDoc);
}
if(delegate instanceof DelegatingCollector) {
((DelegatingCollector) delegate).finish();
}
}
}
/**
* Collapses on an integer field.
* @lucene.internal
*/
static class IntFieldValueCollector extends DelegatingCollector {
private LeafReaderContext[] contexts;
private NumericDocValues collapseValues;
private int maxDoc;
private int nullPolicy;
private IntFieldValueStrategy collapseStrategy;
private boolean needsScores4Collapsing;
private boolean needsScores;
private String collapseField;
private final BoostedDocsCollector boostedDocsCollector;
public IntFieldValueCollector(int maxDoc,
int size,
int segments,
int nullPolicy,
String collapseField,
GroupHeadSelector groupHeadSelector,
SortSpec sortSpec,
boolean needsScores4Collapsing,
boolean needsScores,
FieldType fieldType,
IntIntHashMap boostDocsMap,
FunctionQuery funcQuery,
IndexSearcher searcher) throws IOException{
assert ! GroupHeadSelectorType.SCORE.equals(groupHeadSelector.type);
this.maxDoc = maxDoc;
this.contexts = new LeafReaderContext[segments];
List<LeafReaderContext> con = searcher.getTopReaderContext().leaves();
for(int i=0; i<con.size(); i++) {
contexts[i] = con.get(i);
}
this.collapseField = collapseField;
this.nullPolicy = nullPolicy;
this.needsScores4Collapsing = needsScores4Collapsing;
this.needsScores = needsScores;
this.boostedDocsCollector = BoostedDocsCollector.build(boostDocsMap);
if (null != sortSpec) {
this.collapseStrategy = new IntSortSpecStrategy(maxDoc, size, collapseField, nullPolicy, groupHeadSelector, this.needsScores4Collapsing, this.needsScores, boostedDocsCollector, sortSpec, searcher);
} else if (funcQuery != null) {
this.collapseStrategy = new IntValueSourceStrategy(maxDoc, size, collapseField, nullPolicy, groupHeadSelector, this.needsScores4Collapsing, this.needsScores, boostedDocsCollector, funcQuery, searcher);
} else {
NumberType numType = fieldType.getNumberType();
assert null != numType; // shouldn't make it here for non-numeric types
switch (numType) {
case INTEGER: {
this.collapseStrategy = new IntIntStrategy(maxDoc, size, collapseField, nullPolicy, groupHeadSelector, this.needsScores, boostedDocsCollector);
break;
}
case FLOAT: {
this.collapseStrategy = new IntFloatStrategy(maxDoc, size, collapseField, nullPolicy, groupHeadSelector, this.needsScores, boostedDocsCollector);
break;
}
default: {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"min/max must be Int or Float field types when collapsing on numeric fields");
}
}
}
}
@Override public ScoreMode scoreMode() { return needsScores ? ScoreMode.COMPLETE : super.scoreMode(); }
@Override
public void setScorer(Scorable scorer) throws IOException {
this.collapseStrategy.setScorer(scorer);
}
public void doSetNextReader(LeafReaderContext context) throws IOException {
this.contexts[context.ord] = context;
this.docBase = context.docBase;
this.collapseStrategy.setNextReader(context);
this.collapseValues = DocValues.getNumeric(context.reader(), this.collapseField);
}
public void collect(int contextDoc) throws IOException {
final int globalDoc = contextDoc+this.docBase;
if (collapseValues.advanceExact(contextDoc)) {
final int collapseKey = (int) collapseValues.longValue();
// Check to see if we have documents boosted by the QueryElevationComponent (skip normal strategy based collection)
if (boostedDocsCollector.collectIfBoosted(collapseKey, globalDoc)) return;
collapseStrategy.collapse(collapseKey, contextDoc, globalDoc);
} else { // Null Group...
// Check to see if we have documents boosted by the QueryElevationComponent (skip normal strategy based collection)
if (boostedDocsCollector.collectInNullGroupIfBoosted(globalDoc)) return;
if (NullPolicy.IGNORE.getCode() != nullPolicy) {
collapseStrategy.collapseNullGroup(contextDoc, globalDoc);
}
}
}
public void finish() throws IOException {
if(contexts.length == 0) {
return;
}
int currentContext = 0;
int currentDocBase = 0;
this.collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.collapseField);
int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
ScoreAndDoc dummy = new ScoreAndDoc();
leafDelegate.setScorer(dummy);
DocIdSetIterator it = new BitSetIterator(collapseStrategy.getCollapsedSet(), 0); // cost is not useful here
int globalDoc = -1;
int nullScoreIndex = 0;
IntIntHashMap cmap = collapseStrategy.getCollapseMap();
IntFloatDynamicMap scores = collapseStrategy.getScores();
FloatArrayList nullScores = collapseStrategy.getNullScores();
float nullScore = collapseStrategy.getNullScore();
final MergeBoost mergeBoost = boostedDocsCollector.getMergeBoost();
while((globalDoc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while(globalDoc >= nextDocBase) {
currentContext++;
currentDocBase = contexts[currentContext].docBase;
nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
leafDelegate.setScorer(dummy);
this.collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.collapseField);
}
final int contextDoc = globalDoc-currentDocBase;
if(this.needsScores){
if (collapseValues.advanceExact(contextDoc)) {
final int collapseValue = (int) collapseValues.longValue();
final int pointer = cmap.get(collapseValue);
dummy.score = scores.get(pointer);
} else { // Null Group...
if (mergeBoost.boost(globalDoc)) {
//It's an elevated doc so no score is needed (and should not have been populated)
dummy.score = 0F;
} else if (nullPolicy == NullPolicy.COLLAPSE.getCode()) {
dummy.score = nullScore;
} else if(nullPolicy == NullPolicy.EXPAND.getCode()) {
dummy.score = nullScores.get(nullScoreIndex++);
}
}
}
dummy.docId = contextDoc;
leafDelegate.collect(contextDoc);
}
if(delegate instanceof DelegatingCollector) {
((DelegatingCollector) delegate).finish();
}
}
}
/**
* Base class for collectors that will do collapsing using "block indexed" documents
*
* @lucene.internal
*/
private static abstract class AbstractBlockCollector extends DelegatingCollector {
protected final BlockGroupState currentGroupState = new BlockGroupState();
protected final String collapseField;
protected final boolean needsScores;
protected final boolean expandNulls;
private final MergeBoost boostDocs;
private int docBase = 0;
protected AbstractBlockCollector(final String collapseField,
final int nullPolicy,
final IntIntHashMap boostDocsMap,
final boolean needsScores) {
this.collapseField = collapseField;
this.needsScores = needsScores;
assert nullPolicy != NullPolicy.COLLAPSE.getCode();
assert nullPolicy == NullPolicy.IGNORE.getCode() || nullPolicy == NullPolicy.EXPAND.getCode();
this.expandNulls = (NullPolicy.EXPAND.getCode() == nullPolicy);
this.boostDocs = BoostedDocsCollector.build(boostDocsMap).getMergeBoost();
currentGroupState.resetForNewGroup();
}
@Override public ScoreMode scoreMode() { return needsScores ? ScoreMode.COMPLETE : super.scoreMode(); }
/**
* If we have a candidate match, delegate the collection of that match.
*/
protected void maybeDelegateCollect() throws IOException {
if (currentGroupState.isCurrentDocCollectable()) {
delegateCollect();
}
}
/**
* Immediately delegate the collection of the current doc
*/
protected void delegateCollect() throws IOException {
// ensure we have the 'correct' scorer
// (our supper class may have set the "real" scorer on our leafDelegate
// and it may have an incorrect docID)
leafDelegate.setScorer(currentGroupState);
leafDelegate.collect(currentGroupState.docID());
}
/**
* NOTE: collects the best doc for the last group in the previous segment
* subclasses must call super <em>BEFORE</em> they make any changes to their own state that might influence
* collection
*/
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
maybeDelegateCollect();
// Now setup for the next segment.
currentGroupState.resetForNewGroup();
this.docBase = context.docBase;
super.doSetNextReader(context);
}
/**
* Acts as an id iterator over the boosted docs
*
* @param contextDoc the context specific docId to check for, iterator is advanced to this id
* @return true if the contextDoc is boosted, false otherwise.
*/
protected boolean isBoostedAdvanceExact(final int contextDoc) {
return boostDocs.boost(contextDoc + docBase);
}
@Override
public void finish() throws IOException {
// Deal with last group (if any)...
maybeDelegateCollect();
super.finish();
}
/**
* Encapsulates basic state information about the current group, and the "best matching" document in that group (so far)
*/
protected static final class BlockGroupState extends ScoreAndDoc {
/**
* Specific values have no intrinsic meaning, but can <em>only</em>
* be considered if the current docID in {@link #docID} is non-negative
*/
private int currentGroup = 0;
private boolean groupHasBoostedDocs;
public void setCurrentGroup(final int groupId) {
this.currentGroup = groupId;
}
public int getCurrentGroup() {
assert -1 < docID();
return this.currentGroup;
}
public void setBestDocForCurrentGroup(final int contextDoc, final boolean isBoosted) {
this.docId = contextDoc;
this.groupHasBoostedDocs |= isBoosted;
}
public void resetForNewGroup() {
this.docId = -1;
this.score = Float.MIN_VALUE;
this.groupHasBoostedDocs = false;
}
public boolean hasBoostedDocs() {
assert -1 < docID();
return groupHasBoostedDocs;
}
/**
* Returns true if we have a valid ("best match") docId for the current group and there are no boosted docs
* for this group (If the current doc was boosted, it should have already been collected)
*/
public boolean isCurrentDocCollectable() {
return (-1 < docID() && ! groupHasBoostedDocs);
}
}
}
/**
* Collapses groups on a block using a field that has values unique to that block (example: <code>_root_</code>)
* choosing the group head based on score
*
* @lucene.internal
*/
static abstract class AbstractBlockScoreCollector extends AbstractBlockCollector {
public AbstractBlockScoreCollector(final String collapseField, final int nullPolicy, final IntIntHashMap boostDocsMap) {
super(collapseField, nullPolicy, boostDocsMap, true);
}
private void setCurrentGroupBestMatch(final int contextDocId, final float score, final boolean isBoosted) {
currentGroupState.setBestDocForCurrentGroup(contextDocId, isBoosted);
currentGroupState.score = score;
}
/**
* This method should be called by subclasses for each doc + group encountered
* @param contextDoc a valid doc id relative to the current reader context
* @param docGroup some uique identifier for the group - the base class makes no assumptions about it's meaning
* @see #collectDocWithNullGroup
*/
protected void collectDocWithGroup(int contextDoc, int docGroup) throws IOException {
assert 0 <= contextDoc;
final boolean isBoosted = isBoostedAdvanceExact(contextDoc);
if (-1 < currentGroupState.docID() && docGroup == currentGroupState.getCurrentGroup()) {
// we have an existing group, and contextDoc is in that group.
if (isBoosted) {
// this doc is the best and should be immediately collected regardless of score
setCurrentGroupBestMatch(contextDoc, scorer.score(), isBoosted);
delegateCollect();
} else if (currentGroupState.hasBoostedDocs()) {
// No-Op: nothing about this doc matters since we've already collected boosted docs in this group
// No-Op
} else {
// check if this doc the new 'best' doc in this group...
final float score = scorer.score();
if (score > currentGroupState.score) {
setCurrentGroupBestMatch(contextDoc, scorer.score(), isBoosted);
}
}
} else {
// We have a document that starts a new group (or may be the first doc+group we've collected this segment)
// first collect the prior group if needed...
maybeDelegateCollect();
// then setup the new group and current best match
currentGroupState.resetForNewGroup();
currentGroupState.setCurrentGroup(docGroup);
setCurrentGroupBestMatch(contextDoc, scorer.score(), isBoosted);
if (isBoosted) { // collect immediately
delegateCollect();
}
}
}
/**
* This method should be called by subclasses for each doc encountered that is not in a group (ie: null group)
* @param contextDoc a valid doc id relative to the current reader context
* @see #collectDocWithGroup
*/
protected void collectDocWithNullGroup(int contextDoc) throws IOException {
assert 0 <= contextDoc;
// NOTE: with 'null group' docs, it doesn't matter if they are boosted since we don't suppor collapsing nulls
// this doc is definitely not part of any prior group, so collect if needed...
maybeDelegateCollect();
if (expandNulls) {
// set & immediately collect our current doc...
setCurrentGroupBestMatch(contextDoc, scorer.score(), false);
delegateCollect();
} else {
// we're ignoring nulls, so: No-Op.
}
// either way re-set for the next doc / group
currentGroupState.resetForNewGroup();
}
}
/**
* A block based score collector that uses a field's "ord" as the group ids
* @lucene.internal
*/
static class BlockOrdScoreCollector extends AbstractBlockScoreCollector {
private SortedDocValues segmentValues;
public BlockOrdScoreCollector(final String collapseField, final int nullPolicy, final IntIntHashMap boostDocsMap) throws IOException {
super(collapseField, nullPolicy, boostDocsMap);
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
this.segmentValues = DocValues.getSorted(context.reader(), collapseField);
}
@Override
public void collect(int contextDoc) throws IOException {
if (segmentValues.advanceExact(contextDoc)) {
int ord = segmentValues.ordValue();
collectDocWithGroup(contextDoc, ord);
} else {
collectDocWithNullGroup(contextDoc);
}
}
}
/**
* A block based score collector that uses a field's numeric value as the group ids
* @lucene.internal
*/
static class BlockIntScoreCollector extends AbstractBlockScoreCollector {
private NumericDocValues segmentValues;
public BlockIntScoreCollector(final String collapseField, final int nullPolicy, final IntIntHashMap boostDocsMap) throws IOException {
super(collapseField, nullPolicy, boostDocsMap);
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
this.segmentValues = DocValues.getNumeric(context.reader(), collapseField);
}
@Override
public void collect(int contextDoc) throws IOException {
if (segmentValues.advanceExact(contextDoc)) {
int group = (int) segmentValues.longValue();
collectDocWithGroup(contextDoc, group);
} else {
collectDocWithNullGroup(contextDoc);
}
}
}
/**
* <p>
* Collapses groups on a block using a field that has values unique to that block (example: <code>_root_</code>)
* choosing the group head based on a {@link SortSpec}
* (which can be synthetically created for min/max group head selectors using {@link #getSort})
* </p>
* <p>
* Note that since this collector does a single pass, and unlike other collectors doesn't need to maintain a large data
* structure of scores (for all matching docs) when they might be needed for the response, it has no need to distinguish
* between the concepts of <code>needsScores4Collapsing</code> vs </code>needsScores</code>
* </p>
* @lucene.internal
*/
static abstract class AbstractBlockSortSpecCollector extends AbstractBlockCollector {
/**
* Helper method for extracting a {@link Sort} out of a {@link SortSpec} <em>or</em> creating one synthetically for
* "min/max" {@link GroupHeadSelector} against a {@link FunctionQuery} <em>or</em> simple field name.
*
* @return appropriate (already re-written) Sort to use with a AbstractBlockSortSpecCollector
*/
public static Sort getSort(final GroupHeadSelector groupHeadSelector,
final SortSpec sortSpec,
final FunctionQuery funcQuery,
final SolrIndexSearcher searcher) throws IOException {
if (null != sortSpec) {
assert GroupHeadSelectorType.SORT.equals(groupHeadSelector.type);
// a "feature" of SortSpec is that getSort() is null if we're just using 'score desc'
if (null == sortSpec.getSort()) {
return Sort.RELEVANCE.rewrite(searcher);
}
return sortSpec.getSort().rewrite(searcher);
} // else: min/max on field or value source...
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
assert ! CollapseScore.wantsCScore(groupHeadSelector.selectorText);
final boolean reverse = GroupHeadSelectorType.MAX.equals(groupHeadSelector.type);
final SortField sf = (null != funcQuery)
? funcQuery.getValueSource().getSortField(reverse)
: searcher.getSchema().getField(groupHeadSelector.selectorText).getSortField(reverse);
return (new Sort(sf)).rewrite(searcher);
}
private final BlockBasedSortFieldsCompare sortsCompare;
public AbstractBlockSortSpecCollector(final String collapseField,
final int nullPolicy,
final IntIntHashMap boostDocsMap,
final Sort sort,
final boolean needsScores) {
super(collapseField, nullPolicy, boostDocsMap, needsScores);
this.sortsCompare = new BlockBasedSortFieldsCompare(sort.getSort());
}
@Override
public void setScorer(Scorable scorer) throws IOException {
sortsCompare.setScorer(scorer);
super.setScorer(scorer);
}
private void setCurrentGroupBestMatch(final int contextDocId, final boolean isBoosted) throws IOException {
currentGroupState.setBestDocForCurrentGroup(contextDocId, isBoosted);
if (needsScores) {
currentGroupState.score = scorer.score();
}
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
this.sortsCompare.setNextReader(context);
}
/**
* This method should be called by subclasses for each doc + group encountered
* @param contextDoc a valid doc id relative to the current reader context
* @param docGroup some uique identifier for the group - the base class makes no assumptions about it's meaning
* @see #collectDocWithNullGroup
*/
protected void collectDocWithGroup(int contextDoc, int docGroup) throws IOException {
assert 0 <= contextDoc;
final boolean isBoosted = isBoostedAdvanceExact(contextDoc);
if (-1 < currentGroupState.docID() && docGroup == currentGroupState.getCurrentGroup()) {
// we have an existing group, and contextDoc is in that group.
if (isBoosted) {
// this doc is the best and should be immediately collected regardless of sort values
setCurrentGroupBestMatch(contextDoc, isBoosted);
delegateCollect();
} else if (currentGroupState.hasBoostedDocs()) {
// No-Op: nothing about this doc matters since we've already collected boosted docs in this group
// No-Op
} else {
// check if it's the new 'best' doc in this group...
if (sortsCompare.testAndSetGroupValues(contextDoc)) {
setCurrentGroupBestMatch(contextDoc, isBoosted);
}
}
} else {
// We have a document that starts a new group (or may be the first doc+group we've collected this segmen)
// first collect the prior group if needed...
maybeDelegateCollect();
// then setup the new group and current best match
currentGroupState.resetForNewGroup();
currentGroupState.setCurrentGroup(docGroup);
sortsCompare.setGroupValues(contextDoc);
setCurrentGroupBestMatch(contextDoc, isBoosted);
if (isBoosted) { // collect immediately
delegateCollect();
}
}
}
/**
* This method should be called by subclasses for each doc encountered that is not in a group (ie: null group)
* @param contextDoc a valid doc id relative to the current reader context
* @see #collectDocWithGroup
*/
protected void collectDocWithNullGroup(int contextDoc) throws IOException {
assert 0 <= contextDoc;
// NOTE: with 'null group' docs, it doesn't matter if they are boosted since we don't suppor collapsing nulls
// this doc is definitely not part of any prior group, so collect if needed...
maybeDelegateCollect();
if (expandNulls) {
// set & immediately collect our current doc...
setCurrentGroupBestMatch(contextDoc, false);
// NOTE: sort values don't matter
delegateCollect();
} else {
// we're ignoring nulls, so: No-Op.
}
// either way re-set for the next doc / group
currentGroupState.resetForNewGroup();
}
}
/**
* A block based score collector that uses a field's "ord" as the group ids
* @lucene.internal
*/
static class BlockOrdSortSpecCollector extends AbstractBlockSortSpecCollector {
private SortedDocValues segmentValues;
public BlockOrdSortSpecCollector(final String collapseField,
final int nullPolicy,
final IntIntHashMap boostDocsMap,
final Sort sort,
final boolean needsScores) throws IOException {
super(collapseField, nullPolicy, boostDocsMap, sort, needsScores);
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
this.segmentValues = DocValues.getSorted(context.reader(), collapseField);
}
@Override
public void collect(int contextDoc) throws IOException {
if (segmentValues.advanceExact(contextDoc)) {
int ord = segmentValues.ordValue();
collectDocWithGroup(contextDoc, ord);
} else {
collectDocWithNullGroup(contextDoc);
}
}
}
/**
* A block based score collector that uses a field's numeric value as the group ids
* @lucene.internal
*/
static class BlockIntSortSpecCollector extends AbstractBlockSortSpecCollector {
private NumericDocValues segmentValues;
public BlockIntSortSpecCollector(final String collapseField,
final int nullPolicy,
final IntIntHashMap boostDocsMap,
final Sort sort,
final boolean needsScores) throws IOException {
super(collapseField, nullPolicy, boostDocsMap, sort, needsScores);
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
this.segmentValues = DocValues.getNumeric(context.reader(), collapseField);
}
@Override
public void collect(int contextDoc) throws IOException {
if (segmentValues.advanceExact(contextDoc)) {
int group = (int) segmentValues.longValue();
collectDocWithGroup(contextDoc, group);
} else {
collectDocWithNullGroup(contextDoc);
}
}
}
private static class CollectorFactory {
/** @see #isNumericCollapsible */
private final static EnumSet<NumberType> NUMERIC_COLLAPSIBLE_TYPES = EnumSet.of(NumberType.INTEGER,
NumberType.FLOAT);
private boolean isNumericCollapsible(FieldType collapseFieldType) {
return NUMERIC_COLLAPSIBLE_TYPES.contains(collapseFieldType.getNumberType());
}
public DelegatingCollector getCollector(String collapseField,
GroupHeadSelector groupHeadSelector,
SortSpec sortSpec,
int nullPolicy,
String hint,
boolean needsScores4Collapsing,
boolean needsScores,
int size,
IntIntHashMap boostDocs,
SolrIndexSearcher searcher) throws IOException {
DocValuesProducer docValuesProducer = null;
FunctionQuery funcQuery = null;
// block collapsing logic is much simpler and uses less memory, but is only viable in specific situations
final boolean blockCollapse = (("_root_".equals(collapseField) || HINT_BLOCK.equals(hint))
// because we currently handle all min/max cases using
// AbstractBlockSortSpecCollector, we can't handle functions wrapping cscore()
// (for the same reason cscore() isn't supported in 'sort' local param)
&& ( ! CollapseScore.wantsCScore(groupHeadSelector.selectorText) )
//
&& NullPolicy.COLLAPSE.getCode() != nullPolicy);
if (HINT_BLOCK.equals(hint) && ! blockCollapse) {
log.debug("Query specifies hint={} but other local params prevent the use block based collapse", HINT_BLOCK);
}
FieldType collapseFieldType = searcher.getSchema().getField(collapseField).getType();
if(collapseFieldType instanceof StrField) {
// if we are using blockCollapse, then there is no need to bother with TOP_FC
if(HINT_TOP_FC.equals(hint) && ! blockCollapse) {
@SuppressWarnings("resource")
final LeafReader uninvertingReader = getTopFieldCacheReader(searcher, collapseField);
docValuesProducer = new EmptyDocValuesProducer() {
@Override
public SortedDocValues getSorted(FieldInfo ignored) throws IOException {
return uninvertingReader.getSortedDocValues(collapseField);
}
};
} else {
docValuesProducer = new EmptyDocValuesProducer() {
@Override
public SortedDocValues getSorted(FieldInfo ignored) throws IOException {
return DocValues.getSorted(searcher.getSlowAtomicReader(), collapseField);
}
};
}
} else {
if(HINT_TOP_FC.equals(hint)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"top_fc hint is only supported when collapsing on String Fields");
}
}
FieldType minMaxFieldType = null;
if (GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type)) {
final String text = groupHeadSelector.selectorText;
if (text.indexOf("(") == -1) {
minMaxFieldType = searcher.getSchema().getField(text).getType();
} else {
SolrParams params = new ModifiableSolrParams();
try (SolrQueryRequest request = new LocalSolrQueryRequest(searcher.getCore(), params)) {
FunctionQParser functionQParser = new FunctionQParser(text, null, null,request);
funcQuery = (FunctionQuery)functionQParser.parse();
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
}
int maxDoc = searcher.maxDoc();
int leafCount = searcher.getTopReaderContext().leaves().size();
if (GroupHeadSelectorType.SCORE.equals(groupHeadSelector.type)) {
if (collapseFieldType instanceof StrField) {
if (blockCollapse) {
return new BlockOrdScoreCollector(collapseField, nullPolicy, boostDocs);
}
return new OrdScoreCollector(maxDoc, leafCount, docValuesProducer, nullPolicy, boostDocs, searcher);
} else if (isNumericCollapsible(collapseFieldType)) {
if (blockCollapse) {
return new BlockIntScoreCollector(collapseField, nullPolicy, boostDocs);
}
return new IntScoreCollector(maxDoc, leafCount, nullPolicy, size, collapseField, boostDocs, searcher);
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Collapsing field should be of either String, Int or Float type");
}
} else { // min, max, sort, etc.. something other then just "score"
if (collapseFieldType instanceof StrField) {
if (blockCollapse) {
// NOTE: for now we don't worry about wether this is a sortSpec of min/max groupHeadSelector,
// we use a "sort spec' based block collector unless/until there is some (performance?) reason to specialize
return new BlockOrdSortSpecCollector(collapseField, nullPolicy, boostDocs,
BlockOrdSortSpecCollector.getSort(groupHeadSelector,
sortSpec, funcQuery, searcher),
needsScores || needsScores4Collapsing);
}
return new OrdFieldValueCollector(maxDoc,
leafCount,
docValuesProducer,
nullPolicy,
groupHeadSelector,
sortSpec,
needsScores4Collapsing,
needsScores,
minMaxFieldType,
boostDocs,
funcQuery,
searcher);
} else if (isNumericCollapsible(collapseFieldType)) {
if (blockCollapse) {
// NOTE: for now we don't worry about wether this is a sortSpec of min/max groupHeadSelector,
// we use a "sort spec' based block collector unless/until there is some (performance?) reason to specialize
return new BlockIntSortSpecCollector(collapseField, nullPolicy, boostDocs,
BlockOrdSortSpecCollector.getSort(groupHeadSelector,
sortSpec, funcQuery, searcher),
needsScores || needsScores4Collapsing);
}
return new IntFieldValueCollector(maxDoc,
size,
leafCount,
nullPolicy,
collapseField,
groupHeadSelector,
sortSpec,
needsScores4Collapsing,
needsScores,
minMaxFieldType,
boostDocs,
funcQuery,
searcher);
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Collapsing field should be of either String, Int or Float type");
}
}
}
}
public static final class CollapseScore {
/**
* Inspects the GroupHeadSelector to determine if this CollapseScore is needed.
* If it is, then "this" will be added to the readerContext
* using the "CSCORE" key, and true will be returned. If not returns false.
*/
@SuppressWarnings({"unchecked"})
public boolean setupIfNeeded(final GroupHeadSelector groupHeadSelector,
@SuppressWarnings({"rawtypes"})final Map readerContext) {
// HACK, but not really any better options until/unless we can recursively
// ask value sources if they depend on score
if (wantsCScore(groupHeadSelector.selectorText)) {
readerContext.put("CSCORE", this);
return true;
}
return false;
}
/**
* Huge HACK, but not really any better options until/unless we can recursively
* ask value sources if they depend on score
*/
public static boolean wantsCScore(final String text) {
return (0 <= text.indexOf("cscore()"));
}
private CollapseScore() {
// No-Op
}
public float score;
}
/*
* Collapse Strategies
*/
/**
* The abstract base Strategy for collapse strategies that collapse on an ordinal
* using min/max field value to select the group head.
*
*/
private static abstract class OrdFieldValueStrategy {
protected int nullPolicy;
protected IntIntDynamicMap ords;
protected Scorable scorer;
protected FloatArrayList nullScores;
protected float nullScore;
protected IntFloatDynamicMap scores;
protected FixedBitSet collapsedSet;
protected int nullDoc = -1;
protected boolean needsScores;
private final BoostedDocsCollector boostedDocsCollector;
public abstract void collapse(int ord, int contextDoc, int globalDoc) throws IOException;
public abstract void setNextReader(LeafReaderContext context) throws IOException;
public OrdFieldValueStrategy(int maxDoc,
int valueCount,
int nullPolicy,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
SortedDocValues values) {
this.ords = new IntIntDynamicMap(valueCount, -1);
this.nullPolicy = nullPolicy;
this.needsScores = needsScores;
this.collapsedSet = new FixedBitSet(maxDoc);
this.boostedDocsCollector = boostedDocsCollector;
if (this.needsScores) {
this.scores = new IntFloatDynamicMap(valueCount, 0.0f);
if(nullPolicy == NullPolicy.EXPAND.getCode()) {
nullScores = new FloatArrayList();
}
}
}
public FixedBitSet getCollapsedSet() {
// Handle the boosted docs.
boostedDocsCollector.purgeGroupsThatHaveBoostedDocs(collapsedSet,
(ord) -> { ords.remove(ord); },
() -> { nullDoc = -1; });
//Build the sorted DocSet of group heads.
if(nullDoc > -1) {
this.collapsedSet.set(nullDoc);
}
ords.forEachValue(doc -> collapsedSet.set(doc));
return collapsedSet;
}
public void setScorer(Scorable scorer) throws IOException {
this.scorer = scorer;
}
public FloatArrayList getNullScores() {
return nullScores;
}
public float getNullScore() {
return this.nullScore;
}
public IntFloatDynamicMap getScores() {
return scores;
}
}
/*
* Strategy for collapsing on ordinal using min/max of an int field to select the group head.
*/
private static class OrdIntStrategy extends OrdFieldValueStrategy {
private final String field;
private NumericDocValues minMaxValues;
private IntCompare comp;
private int nullVal;
private IntIntDynamicMap ordVals;
public OrdIntStrategy(int maxDoc,
int nullPolicy,
int valueCount,
GroupHeadSelector groupHeadSelector,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
SortedDocValues values) throws IOException {
super(maxDoc, valueCount, nullPolicy, needsScores, boostedDocsCollector, values);
this.field = groupHeadSelector.selectorText;
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
if (GroupHeadSelectorType.MAX.equals(groupHeadSelector.type)) {
comp = new MaxIntComp();
this.ordVals = new IntIntDynamicMap(valueCount, Integer.MIN_VALUE);
} else {
comp = new MinIntComp();
this.ordVals = new IntIntDynamicMap(valueCount, Integer.MAX_VALUE);
this.nullVal = Integer.MAX_VALUE;
}
}
public void setNextReader(LeafReaderContext context) throws IOException {
this.minMaxValues = DocValues.getNumeric(context.reader(), this.field);
}
public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
int currentVal;
if (minMaxValues.advanceExact(contextDoc)) {
currentVal = (int) minMaxValues.longValue();
} else {
currentVal = 0;
}
if(ord > -1) {
if(comp.test(currentVal, ordVals.get(ord))) {
ords.put(ord, globalDoc);
ordVals.put(ord, currentVal);
if(needsScores) {
scores.put(ord, scorer.score());
}
}
} else if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if(comp.test(currentVal, nullVal)) {
nullVal = currentVal;
nullDoc = globalDoc;
if(needsScores) {
nullScore = scorer.score();
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if(needsScores) {
nullScores.add(scorer.score());
}
}
}
}
/**
* Strategy for collapsing on ordinal and using the min/max value of a float
* field to select the group head
*/
private static class OrdFloatStrategy extends OrdFieldValueStrategy {
private final String field;
private NumericDocValues minMaxValues;
private FloatCompare comp;
private float nullVal;
private IntFloatDynamicMap ordVals;
public OrdFloatStrategy(int maxDoc,
int nullPolicy,
int valueCount,
GroupHeadSelector groupHeadSelector,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
SortedDocValues values) throws IOException {
super(maxDoc, valueCount, nullPolicy, needsScores, boostedDocsCollector, values);
this.field = groupHeadSelector.selectorText;
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
if (GroupHeadSelectorType.MAX.equals(groupHeadSelector.type)) {
comp = new MaxFloatComp();
this.ordVals = new IntFloatDynamicMap(valueCount, -Float.MAX_VALUE);
this.nullVal = -Float.MAX_VALUE;
} else {
comp = new MinFloatComp();
this.ordVals = new IntFloatDynamicMap(valueCount, Float.MAX_VALUE);
this.nullVal = Float.MAX_VALUE;
}
}
public void setNextReader(LeafReaderContext context) throws IOException {
this.minMaxValues = DocValues.getNumeric(context.reader(), this.field);
}
public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
int currentMinMax;
if (minMaxValues.advanceExact(contextDoc)) {
currentMinMax = (int) minMaxValues.longValue();
} else {
currentMinMax = 0;
}
float currentVal = Float.intBitsToFloat(currentMinMax);
if(ord > -1) {
if(comp.test(currentVal, ordVals.get(ord))) {
ords.put(ord, globalDoc);
ordVals.put(ord, currentVal);
if(needsScores) {
scores.put(ord, scorer.score());
}
}
} else if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if(comp.test(currentVal, nullVal)) {
nullVal = currentVal;
nullDoc = globalDoc;
if(needsScores) {
nullScore = scorer.score();
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if(needsScores) {
nullScores.add(scorer.score());
}
}
}
}
/*
* Strategy for collapsing on ordinal and using the min/max value of a long
* field to select the group head
*/
private static class OrdLongStrategy extends OrdFieldValueStrategy {
private final String field;
private NumericDocValues minMaxVals;
private LongCompare comp;
private long nullVal;
private IntLongDynamicMap ordVals;
public OrdLongStrategy(int maxDoc,
int nullPolicy,
int valueCount,
GroupHeadSelector groupHeadSelector,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
SortedDocValues values) throws IOException {
super(maxDoc, valueCount, nullPolicy, needsScores, boostedDocsCollector, values);
this.field = groupHeadSelector.selectorText;
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
if (GroupHeadSelectorType.MAX.equals(groupHeadSelector.type)) {
comp = new MaxLongComp();
this.ordVals = new IntLongDynamicMap(valueCount, Long.MIN_VALUE);
} else {
this.nullVal = Long.MAX_VALUE;
comp = new MinLongComp();
this.ordVals = new IntLongDynamicMap(valueCount, Long.MAX_VALUE);
}
}
public void setNextReader(LeafReaderContext context) throws IOException {
this.minMaxVals = DocValues.getNumeric(context.reader(), this.field);
}
public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
long currentVal;
if (minMaxVals.advanceExact(contextDoc)) {
currentVal = minMaxVals.longValue();
} else {
currentVal = 0;
}
if(ord > -1) {
if(comp.test(currentVal, ordVals.get(ord))) {
ords.put(ord, globalDoc);
ordVals.put(ord, currentVal);
if(needsScores) {
scores.put(ord, scorer.score());
}
}
} else if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if(comp.test(currentVal, nullVal)) {
nullVal = currentVal;
nullDoc = globalDoc;
if(needsScores) {
nullScore = scorer.score();
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if(needsScores) {
nullScores.add(scorer.score());
}
}
}
}
/*
* Strategy for collapsing on ordinal and using the min/max value of a value source function
* to select the group head
*/
private static class OrdValueSourceStrategy extends OrdFieldValueStrategy {
private FloatCompare comp;
private float nullVal;
private ValueSource valueSource;
private FunctionValues functionValues;
private IntFloatDynamicMap ordVals;
@SuppressWarnings({"rawtypes"})
private Map rcontext;
private final CollapseScore collapseScore = new CollapseScore();
private boolean needsScores4Collapsing;
public OrdValueSourceStrategy(int maxDoc,
int nullPolicy,
int valueCount,
GroupHeadSelector groupHeadSelector,
boolean needsScores4Collapsing,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
FunctionQuery funcQuery,
IndexSearcher searcher,
SortedDocValues values) throws IOException {
super(maxDoc, valueCount, nullPolicy, needsScores, boostedDocsCollector, values);
this.needsScores4Collapsing = needsScores4Collapsing;
this.valueSource = funcQuery.getValueSource();
this.rcontext = ValueSource.newContext(searcher);
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
if (GroupHeadSelectorType.MAX.equals(groupHeadSelector.type)) {
comp = new MaxFloatComp();
this.ordVals = new IntFloatDynamicMap(valueCount, -Float.MAX_VALUE);
} else {
this.nullVal = Float.MAX_VALUE;
comp = new MinFloatComp();
this.ordVals = new IntFloatDynamicMap(valueCount, Float.MAX_VALUE);
}
collapseScore.setupIfNeeded(groupHeadSelector, rcontext);
}
@SuppressWarnings({"unchecked"})
public void setNextReader(LeafReaderContext context) throws IOException {
functionValues = this.valueSource.getValues(rcontext, context);
}
public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
float score = 0;
if (needsScores4Collapsing) {
score = scorer.score();
this.collapseScore.score = score;
}
float currentVal = functionValues.floatVal(contextDoc);
if(ord > -1) {
if(comp.test(currentVal, ordVals.get(ord))) {
ords.put(ord, globalDoc);
ordVals.put(ord, currentVal);
if(needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
scores.put(ord, score);
}
}
} else if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if(comp.test(currentVal, nullVal)) {
nullVal = currentVal;
nullDoc = globalDoc;
if(needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
nullScore = score;
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if(needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
nullScores.add(score);
}
}
}
}
/*
* Strategy for collapsing on ordinal and using the first document according to a complex sort
* as the group head
*/
private static class OrdSortSpecStrategy extends OrdFieldValueStrategy {
private final SortFieldsCompare compareState;
private final Sort sort;
private float score;
private boolean needsScores4Collapsing;
public OrdSortSpecStrategy(int maxDoc,
int nullPolicy,
int valueCount,
GroupHeadSelector groupHeadSelector,
boolean needsScores4Collapsing,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
SortSpec sortSpec,
IndexSearcher searcher,
SortedDocValues values) throws IOException {
super(maxDoc, valueCount, nullPolicy, needsScores, boostedDocsCollector, values);
this.needsScores4Collapsing = needsScores4Collapsing;
assert GroupHeadSelectorType.SORT.equals(groupHeadSelector.type);
this.sort = rewriteSort(sortSpec, searcher);
this.compareState = new SortFieldsCompare(sort.getSort(), valueCount);
}
@Override
public void setNextReader(LeafReaderContext context) throws IOException {
compareState.setNextReader(context);
}
@Override
public void setScorer(Scorable s) throws IOException {
super.setScorer(s);
this.compareState.setScorer(s);
}
@Override
public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
if (needsScores4Collapsing) {
this.score = scorer.score();
}
if (ord > -1) { // real collapseKey
if (-1 == ords.get(ord)) {
// we've never seen this ord (aka: collapseKey) before, treat it as group head for now
compareState.setGroupValues(ord, contextDoc);
ords.put(ord, globalDoc);
if (needsScores) {
if (!needsScores4Collapsing) {
this.score = scorer.score();
}
scores.put(ord, score);
}
} else {
// test this ord to see if it's a new group leader
if (compareState.testAndSetGroupValues(ord, contextDoc)) {//TODO X
ords.put(ord, globalDoc);
if (needsScores) {
if (!needsScores4Collapsing) {
this.score = scorer.score();
}
scores.put(ord, score);
}
}
}
} else if (this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if (-1 == nullDoc) {
// we've never seen a doc with null collapse key yet, treat it as the null group head for now
compareState.setNullGroupValues(contextDoc);
nullDoc = globalDoc;
if (needsScores) {
if (!needsScores4Collapsing) {
this.score = scorer.score();
}
nullScore = score;
}
} else {
// test this doc to see if it's the new null leader
if (compareState.testAndSetNullGroupValues(contextDoc)) {
nullDoc = globalDoc;
if (needsScores) {
if (!needsScores4Collapsing) {
this.score = scorer.score();
}
nullScore = score;
}
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if (needsScores) {
if (!needsScores4Collapsing) {
this.score = scorer.score();
}
nullScores.add(score);
}
}
}
}
/*
* Base strategy for collapsing on a 32 bit numeric field and selecting a group head
* based on min/max value of a 32 bit numeric field.
*/
private static abstract class IntFieldValueStrategy {
protected int nullPolicy;
protected IntIntHashMap cmap;
protected Scorable scorer;
protected FloatArrayList nullScores;
protected float nullScore;
protected IntFloatDynamicMap scores;
protected FixedBitSet collapsedSet;
protected int nullDoc = -1;
protected boolean needsScores;
protected String collapseField;
protected IntIntDynamicMap docs;
private final BoostedDocsCollector boostedDocsCollector;
public abstract void collapseNullGroup(int contextDoc, int globalDoc) throws IOException;
public abstract void collapse(int collapseKey, int contextDoc, int globalDoc) throws IOException;
public abstract void setNextReader(LeafReaderContext context) throws IOException;
public IntFieldValueStrategy(int maxDoc,
int size,
String collapseField,
int nullPolicy,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector) {
this.collapseField = collapseField;
this.nullPolicy = nullPolicy;
this.needsScores = needsScores;
this.collapsedSet = new FixedBitSet(maxDoc);
this.cmap = new IntIntHashMap(size);
this.docs = new IntIntDynamicMap(size, 0);
this.boostedDocsCollector = boostedDocsCollector;
if(needsScores) {
this.scores = new IntFloatDynamicMap(size, 0.0f);
if(nullPolicy == NullPolicy.EXPAND.getCode()) {
nullScores = new FloatArrayList();
}
}
}
public FixedBitSet getCollapsedSet() {
// Handle the boosted docs.
boostedDocsCollector.purgeGroupsThatHaveBoostedDocs(collapsedSet,
(key) -> { cmap.remove(key); },
() -> { nullDoc = -1; });
//Build the sorted DocSet of group heads.
if(nullDoc > -1) {
this.collapsedSet.set(nullDoc);
}
Iterator<IntIntCursor> it1 = cmap.iterator();
while(it1.hasNext()) {
IntIntCursor cursor = it1.next();
int pointer = cursor.value;
collapsedSet.set(docs.get(pointer));
}
return collapsedSet;
}
public void setScorer(Scorable scorer) throws IOException {
this.scorer = scorer;
}
public FloatArrayList getNullScores() {
return nullScores;
}
public IntIntHashMap getCollapseMap() {
return cmap;
}
public float getNullScore() {
return this.nullScore;
}
public IntFloatDynamicMap getScores() {
return scores;
}
public IntIntDynamicMap getDocs() { return docs;}
}
/*
* Strategy for collapsing on a 32 bit numeric field and selecting the group head based
* on the min/max value of a 32 bit field numeric field.
*/
private static class IntIntStrategy extends IntFieldValueStrategy {
private final String field;
private NumericDocValues minMaxVals;
private IntIntDynamicMap testValues;
private IntCompare comp;
private int nullCompVal;
private int index=-1;
public IntIntStrategy(int maxDoc,
int size,
String collapseField,
int nullPolicy,
GroupHeadSelector groupHeadSelector,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector) throws IOException {
super(maxDoc, size, collapseField, nullPolicy, needsScores, boostedDocsCollector);
this.field = groupHeadSelector.selectorText;
this.testValues = new IntIntDynamicMap(size, 0);
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
if (GroupHeadSelectorType.MAX.equals(groupHeadSelector.type)) {
comp = new MaxIntComp();
this.nullCompVal = Integer.MIN_VALUE;
} else {
comp = new MinIntComp();
this.nullCompVal = Integer.MAX_VALUE;
}
}
public void setNextReader(LeafReaderContext context) throws IOException {
this.minMaxVals = DocValues.getNumeric(context.reader(), this.field);
}
private int advanceAndGetCurrentVal(int contextDoc) throws IOException {
if (minMaxVals.advanceExact(contextDoc)) {
return (int) minMaxVals.longValue();
} // else...
return 0;
}
public void collapse(int collapseKey, int contextDoc, int globalDoc) throws IOException {
final int currentVal = advanceAndGetCurrentVal(contextDoc);
final int idx;
if((idx = cmap.indexOf(collapseKey)) >= 0) {
int pointer = cmap.indexGet(idx);
if(comp.test(currentVal, testValues.get(pointer))) {
testValues.put(pointer, currentVal);
docs.put(pointer, globalDoc);
if(needsScores) {
scores.put(pointer, scorer.score());
}
}
} else {
++index;
cmap.put(collapseKey, index);
testValues.put(index, currentVal);
docs.put(index, globalDoc);
if(needsScores) {
scores.put(index, scorer.score());
}
}
}
public void collapseNullGroup(int contextDoc, int globalDoc) throws IOException {
assert NullPolicy.IGNORE.getCode() != this.nullPolicy;
final int currentVal = advanceAndGetCurrentVal(contextDoc);
if (this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if(comp.test(currentVal, nullCompVal)) {
nullCompVal = currentVal;
nullDoc = globalDoc;
if(needsScores) {
nullScore = scorer.score();
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if(needsScores) {
nullScores.add(scorer.score());
}
}
}
}
private static class IntFloatStrategy extends IntFieldValueStrategy {
private final String field;
private NumericDocValues minMaxVals;
private IntFloatDynamicMap testValues;
private FloatCompare comp;
private float nullCompVal;
private int index=-1;
public IntFloatStrategy(int maxDoc,
int size,
String collapseField,
int nullPolicy,
GroupHeadSelector groupHeadSelector,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector) throws IOException {
super(maxDoc, size, collapseField, nullPolicy, needsScores, boostedDocsCollector);
this.field = groupHeadSelector.selectorText;
this.testValues = new IntFloatDynamicMap(size, 0.0f);
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
if (GroupHeadSelectorType.MAX.equals(groupHeadSelector.type)) {
comp = new MaxFloatComp();
this.nullCompVal = -Float.MAX_VALUE;
} else {
comp = new MinFloatComp();
this.nullCompVal = Float.MAX_VALUE;
}
}
public void setNextReader(LeafReaderContext context) throws IOException {
this.minMaxVals = DocValues.getNumeric(context.reader(), this.field);
}
private float advanceAndGetCurrentVal(int contextDoc) throws IOException {
if (minMaxVals.advanceExact(contextDoc)) {
return Float.intBitsToFloat((int) minMaxVals.longValue());
} // else...
return Float.intBitsToFloat(0);
}
public void collapse(int collapseKey, int contextDoc, int globalDoc) throws IOException {
final float currentVal = advanceAndGetCurrentVal(contextDoc);
final int idx;
if((idx = cmap.indexOf(collapseKey)) >= 0) {
int pointer = cmap.indexGet(idx);
if(comp.test(currentVal, testValues.get(pointer))) {
testValues.put(pointer, currentVal);
docs.put(pointer, globalDoc);
if(needsScores) {
scores.put(pointer, scorer.score());
}
}
} else {
++index;
cmap.put(collapseKey, index);
testValues.put(index, currentVal);
docs.put(index, globalDoc);
if(needsScores) {
scores.put(index, scorer.score());
}
}
}
public void collapseNullGroup(int contextDoc, int globalDoc) throws IOException {
assert NullPolicy.IGNORE.getCode() != this.nullPolicy;
final float currentVal = advanceAndGetCurrentVal(contextDoc);
if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if(comp.test(currentVal, nullCompVal)) {
nullCompVal = currentVal;
nullDoc = globalDoc;
if(needsScores) {
nullScore = scorer.score();
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if(needsScores) {
nullScores.add(scorer.score());
}
}
}
}
/*
* Strategy for collapsing on a 32 bit numeric field and selecting the group head based
* on the min/max value of a Value Source Function.
*/
private static class IntValueSourceStrategy extends IntFieldValueStrategy {
private FloatCompare comp;
private IntFloatDynamicMap testValues;
private float nullCompVal;
private ValueSource valueSource;
private FunctionValues functionValues;
@SuppressWarnings({"rawtypes"})
private Map rcontext;
private final CollapseScore collapseScore = new CollapseScore();
private int index=-1;
private boolean needsScores4Collapsing;
public IntValueSourceStrategy(int maxDoc,
int size,
String collapseField,
int nullPolicy,
GroupHeadSelector groupHeadSelector,
boolean needsScores4Collapsing,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
FunctionQuery funcQuery,
IndexSearcher searcher) throws IOException {
super(maxDoc, size, collapseField, nullPolicy, needsScores, boostedDocsCollector);
this.needsScores4Collapsing = needsScores4Collapsing;
this.testValues = new IntFloatDynamicMap(size, 0.0f);
this.valueSource = funcQuery.getValueSource();
this.rcontext = ValueSource.newContext(searcher);
assert GroupHeadSelectorType.MIN_MAX.contains(groupHeadSelector.type);
if (GroupHeadSelectorType.MAX.equals(groupHeadSelector.type)) {
this.nullCompVal = -Float.MAX_VALUE;
comp = new MaxFloatComp();
} else {
this.nullCompVal = Float.MAX_VALUE;
comp = new MinFloatComp();
}
collapseScore.setupIfNeeded(groupHeadSelector, rcontext);
}
@SuppressWarnings({"unchecked"})
public void setNextReader(LeafReaderContext context) throws IOException {
functionValues = this.valueSource.getValues(rcontext, context);
}
private float computeScoreIfNeeded4Collapse() throws IOException {
if (needsScores4Collapsing) {
this.collapseScore.score = scorer.score();
return this.collapseScore.score;
} // else...
return 0F;
}
public void collapse(int collapseKey, int contextDoc, int globalDoc) throws IOException {
float score = computeScoreIfNeeded4Collapse();
final float currentVal = functionValues.floatVal(contextDoc);
final int idx;
if((idx = cmap.indexOf(collapseKey)) >= 0) {
int pointer = cmap.indexGet(idx);
if(comp.test(currentVal, testValues.get(pointer))) {
testValues.put(pointer, currentVal);
docs.put(pointer, globalDoc);
if(needsScores){
if (!needsScores4Collapsing) {
score = scorer.score();
}
scores.put(pointer, score);
}
}
} else {
++index;
cmap.put(collapseKey, index);
docs.put(index, globalDoc);
testValues.put(index, currentVal);
if(needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
scores.put(index, score);
}
}
}
public void collapseNullGroup(int contextDoc, int globalDoc) throws IOException {
assert NullPolicy.IGNORE.getCode() != this.nullPolicy;
float score = computeScoreIfNeeded4Collapse();
final float currentVal = functionValues.floatVal(contextDoc);
if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if(comp.test(currentVal, nullCompVal)) {
nullCompVal = currentVal;
nullDoc = globalDoc;
if(needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
nullScore = score;
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if(needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
nullScores.add(score);
}
}
}
}
/*
* Strategy for collapsing on a 32 bit numeric field and using the first document according
* to a complex sort as the group head
*/
private static class IntSortSpecStrategy extends IntFieldValueStrategy {
private final SortFieldsCompare compareState;
private final SortSpec sortSpec;
private final Sort sort;
private int index=-1;
private boolean needsScores4Collapsing;
public IntSortSpecStrategy(int maxDoc,
int size,
String collapseField,
int nullPolicy,
GroupHeadSelector groupHeadSelector,
boolean needsScores4Collapsing,
boolean needsScores,
BoostedDocsCollector boostedDocsCollector,
SortSpec sortSpec,
IndexSearcher searcher) throws IOException {
super(maxDoc, size, collapseField, nullPolicy, needsScores, boostedDocsCollector);
this.needsScores4Collapsing = needsScores4Collapsing;
assert GroupHeadSelectorType.SORT.equals(groupHeadSelector.type);
this.sortSpec = sortSpec;
this.sort = rewriteSort(sortSpec, searcher);
this.compareState = new SortFieldsCompare(sort.getSort(), size);
}
@Override
public void setNextReader(LeafReaderContext context) throws IOException {
compareState.setNextReader(context);
}
@Override
public void setScorer(Scorable s) throws IOException {
super.setScorer(s);
this.compareState.setScorer(s);
}
private float computeScoreIfNeeded4Collapse() throws IOException {
return needsScores4Collapsing ? scorer.score() : 0F;
}
public void collapse(int collapseKey, int contextDoc, int globalDoc) throws IOException {
float score = computeScoreIfNeeded4Collapse();
final int idx;
if ((idx = cmap.indexOf(collapseKey)) >= 0) {
// we've seen this collapseKey before, test to see if it's a new group leader
int pointer = cmap.indexGet(idx);
if (compareState.testAndSetGroupValues(pointer, contextDoc)) {
docs.put(pointer, globalDoc);
if (needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
scores.put(pointer, score);
}
}
} else {
// we've never seen this collapseKey before, treat it as group head for now
++index;
cmap.put(collapseKey, index);
docs.put(index, globalDoc);
compareState.setGroupValues(index, contextDoc);
if(needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
scores.put(index, score);
}
}
}
public void collapseNullGroup(int contextDoc, int globalDoc) throws IOException {
assert NullPolicy.IGNORE.getCode() != this.nullPolicy;
float score = computeScoreIfNeeded4Collapse();
if(this.nullPolicy == NullPolicy.COLLAPSE.getCode()) {
if (-1 == nullDoc) {
// we've never seen a doc with null collapse key yet, treat it as the null group head for now
compareState.setNullGroupValues(contextDoc);
nullDoc = globalDoc;
if (needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
nullScore = score;
}
} else {
// test this doc to see if it's the new null leader
if (compareState.testAndSetNullGroupValues(contextDoc)) {
nullDoc = globalDoc;
if (needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
nullScore = score;
}
}
}
} else if(this.nullPolicy == NullPolicy.EXPAND.getCode()) {
this.collapsedSet.set(globalDoc);
if (needsScores) {
if (!needsScores4Collapsing) {
score = scorer.score();
}
nullScores.add(score);
}
}
}
}
/**
* Helper class for dealing with boosted docs, which always get collected
* (even if there is more then one in a group) and suppress any non-boosted
* docs from being collected from their group (even if they should be based
* on the group head selectors)
*
* NOTE: collect methods must be called in increasing globalDoc order
*/
private static class BoostedDocsCollector {
private final IntIntHashMap boostDocsMap;
private final int[] sortedGlobalDocIds;
private final boolean hasBoosts;
private final IntArrayList boostedKeys = new IntArrayList();
private final IntArrayList boostedDocs = new IntArrayList();;
private boolean boostedNullGroup = false;
private final MergeBoost boostedDocsIdsIter;
public static BoostedDocsCollector build(final IntIntHashMap boostDocsMap) {
if (null != boostDocsMap && ! boostDocsMap.isEmpty()) {
return new BoostedDocsCollector(boostDocsMap);
}
// else: No-Op impl (short circut default impl)....
return new BoostedDocsCollector(new IntIntHashMap()) {
@Override
public boolean collectIfBoosted(int groupKey, int globalDoc) {
return false;
}
@Override
public boolean collectInNullGroupIfBoosted(int globalDoc) {
return false;
}
@Override
public void purgeGroupsThatHaveBoostedDocs(final FixedBitSet collapsedSet,
final IntProcedure removeGroupKey,
final Runnable resetNullGroupHead) {
return;
}
};
}
private BoostedDocsCollector(final IntIntHashMap boostDocsMap) {
this.boostDocsMap = boostDocsMap;
this.hasBoosts = ! boostDocsMap.isEmpty();
sortedGlobalDocIds = new int[boostDocsMap.size()];
Iterator<IntIntCursor> it = boostDocsMap.iterator();
int index = -1;
while(it.hasNext()) {
IntIntCursor cursor = it.next();
sortedGlobalDocIds[++index] = cursor.key;
}
Arrays.sort(sortedGlobalDocIds);
boostedDocsIdsIter = getMergeBoost();
}
/** True if there are any requested boosts (regardless of wether any have been collected) */
public boolean hasBoosts() {
return hasBoosts;
}
/**
* Returns a brand new MergeBoost instance listing all requested boosted docs
*/
public MergeBoost getMergeBoost() {
return new MergeBoost(sortedGlobalDocIds);
}
/**
* @return true if doc is boosted and has (now) been collected
*/
public boolean collectIfBoosted(int groupKey, int globalDoc) {
if (boostedDocsIdsIter.boost(globalDoc)) {
this.boostedDocs.add(globalDoc);
this.boostedKeys.add(groupKey);
return true;
}
return false;
}
/**
* @return true if doc is boosted and has (now) been collected
*/
public boolean collectInNullGroupIfBoosted(int globalDoc) {
if (boostedDocsIdsIter.boost(globalDoc)) {
this.boostedDocs.add(globalDoc);
this.boostedNullGroup = true;
return true;
}
return false;
}
/**
* Kludgy API neccessary to deal with diff collectors/strategies using diff
* data structs for tracking collapse keys...
*/
public void purgeGroupsThatHaveBoostedDocs(final FixedBitSet collapsedSet,
final IntProcedure removeGroupKey,
final Runnable resetNullGroupHead) {
// Add the (collected) boosted docs to the collapsedSet
boostedDocs.forEach(new IntProcedure() {
public void apply(int globalDoc) {
collapsedSet.set(globalDoc);
}
});
// Remove any group heads that are in the same groups as (collected) boosted documents.
boostedKeys.forEach(removeGroupKey);
if (boostedNullGroup) {
// If we're using IGNORE then no (matching) null docs were collected (by caller)
// If we're using EXPAND then all (matching) null docs were already collected (by us)
// ...and that's *good* because each is treated like it's own group, our boosts don't matter
// We only have to worry about removing null docs when using COLLAPSE, in which case any boosted null doc
// means we clear the group head of the null group..
resetNullGroupHead.run();
}
}
}
static class MergeBoost {
private int[] boostDocs;
private int index = 0;
public MergeBoost(int[] boostDocs) {
this.boostDocs = boostDocs;
}
public void reset() {
this.index = 0;
}
public boolean boost(int globalDoc) {
if(index == Integer.MIN_VALUE) {
return false;
} else {
while(true) {
if(index >= boostDocs.length) {
index = Integer.MIN_VALUE;
return false;
} else {
int comp = boostDocs[index];
if(comp == globalDoc) {
++index;
return true;
} else if(comp < globalDoc) {
++index;
} else {
return false;
}
}
}
}
}
}
/**
* This structure wraps (and semi-emulates) the {@link SortFieldsCompare} functionality/API
* for "block" based group collection, where we only ever need a single group in memory at a time
* As a result, it's API has a smaller surface area...
*/
private static class BlockBasedSortFieldsCompare {
/**
* this will always have a numGroups of '0' and we will (ab)use the 'null' group methods for tracking
* and comparison as we collect docs (since we only ever consider one group at a time)
*/
final private SortFieldsCompare inner;
public BlockBasedSortFieldsCompare(final SortField[] sorts) {
this.inner = new SortFieldsCompare(sorts, 0);
}
public void setNextReader(LeafReaderContext context) throws IOException {
inner.setNextReader(context);
}
public void setScorer(Scorable s) throws IOException {
inner.setScorer(s);
}
/** @see SortFieldsCompare#setGroupValues */
public void setGroupValues(int contextDoc) throws IOException {
inner.setNullGroupValues(contextDoc);
}
/** @see SortFieldsCompare#testAndSetGroupValues */
public boolean testAndSetGroupValues(int contextDoc) throws IOException {
return inner.testAndSetNullGroupValues(contextDoc);
}
}
/**
* Class for comparing documents according to a list of SortField clauses and
* tracking the groupHeadLeaders and their sort values. groups will be identified
* by int "contextKey values, which may either be (encoded) 32bit numeric values, or
* ordinal values for Strings -- this class doesn't care, and doesn't assume any special
* meaning.
*/
private static class SortFieldsCompare {
final private int numClauses;
final private SortField[] sorts;
final private int[] reverseMul;
@SuppressWarnings({"rawtypes"})
final private FieldComparator[] fieldComparators;
final private LeafFieldComparator[] leafFieldComparators;
private Object[][] groupHeadValues; // growable
final private Object[] nullGroupValues;
/**
* Constructs an instance based on the the (raw, un-rewritten) SortFields to be used,
* and an initial number of expected groups (will grow as needed).
*/
@SuppressWarnings({"rawtypes"})
public SortFieldsCompare(SortField[] sorts, int initNumGroups) {
this.sorts = sorts;
numClauses = sorts.length;
fieldComparators = new FieldComparator[numClauses];
leafFieldComparators = new LeafFieldComparator[numClauses];
reverseMul = new int[numClauses];
for (int clause = 0; clause < numClauses; clause++) {
SortField sf = sorts[clause];
// we only need one slot for every comparator
fieldComparators[clause] = sf.getComparator(1, clause);
reverseMul[clause] = sf.getReverse() ? -1 : 1;
}
groupHeadValues = new Object[initNumGroups][];
nullGroupValues = new Object[numClauses];
}
public void setNextReader(LeafReaderContext context) throws IOException {
for (int clause = 0; clause < numClauses; clause++) {
leafFieldComparators[clause] = fieldComparators[clause].getLeafComparator(context);
}
}
public void setScorer(Scorable s) throws IOException {
for (int clause = 0; clause < numClauses; clause++) {
leafFieldComparators[clause].setScorer(s);
}
}
// LUCENE-6808 workaround
private static Object cloneIfBytesRef(Object val) {
if (val instanceof BytesRef) {
return BytesRef.deepCopyOf((BytesRef) val);
}
return val;
}
/**
* Returns the current SortField values for the specified collapseKey.
* If this collapseKey has never been seen before, then an array of null values is inited
* and tracked so that the caller may update it if needed.
*/
private Object[] getOrInitGroupHeadValues(int collapseKey) {
Object[] values = groupHeadValues[collapseKey];
if (null == values) {
values = new Object[numClauses];
groupHeadValues[collapseKey] = values;
}
return values;
}
/**
* Records the SortField values for the specified contextDoc as the "best" values
* for the group identified by the specified collapseKey.
*
* Should be called the first time a contextKey is encountered.
*/
public void setGroupValues(int collapseKey, int contextDoc) throws IOException {
assert 0 <= collapseKey : "negative collapseKey";
if (collapseKey >= groupHeadValues.length) {
grow(collapseKey + 1);
}
setGroupValues(getOrInitGroupHeadValues(collapseKey), contextDoc);
}
/**
* Records the SortField values for the specified contextDoc as the "best" values
* for the null group.
*
* Should be calledthe first time a doc in the null group is encountered
*/
public void setNullGroupValues(int contextDoc) throws IOException {
setGroupValues(nullGroupValues, contextDoc);
}
/**
* Records the SortField values for the specified contextDoc into the
* values array provided by the caller.
*/
private void setGroupValues(Object[] values, int contextDoc) throws IOException {
for (int clause = 0; clause < numClauses; clause++) {
leafFieldComparators[clause].copy(0, contextDoc);
values[clause] = cloneIfBytesRef(fieldComparators[clause].value(0));
}
}
/**
* Compares the SortField values of the specified contextDoc with the existing group head
* values for the group identified by the specified collapseKey, and overwrites them
* (and returns true) if this document should become the new group head in accordance
* with the SortFields
* (otherwise returns false)
*/
public boolean testAndSetGroupValues(int collapseKey, int contextDoc) throws IOException {
assert 0 <= collapseKey : "negative collapseKey";
if (collapseKey >= groupHeadValues.length) {
grow(collapseKey + 1);
}
return testAndSetGroupValues(getOrInitGroupHeadValues(collapseKey), contextDoc);
}
/**
* Compares the SortField values of the specified contextDoc with the existing group head
* values for the null group, and overwrites them (and returns true) if this document
* should become the new group head in accordance with the SortFields.
* (otherwise returns false)
*/
public boolean testAndSetNullGroupValues(int contextDoc) throws IOException {
return testAndSetGroupValues(nullGroupValues, contextDoc);
}
/**
* Compares the SortField values of the specified contextDoc with the existing values
* array, and overwrites them (and returns true) if this document is the new group head in
* accordance with the SortFields.
* (otherwise returns false)
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private boolean testAndSetGroupValues(Object[] values, int contextDoc) throws IOException {
Object[] stash = new Object[numClauses];
int lastCompare = 0;
int testClause = 0;
for (/* testClause */; testClause < numClauses; testClause++) {
leafFieldComparators[testClause].copy(0, contextDoc);
FieldComparator fcomp = fieldComparators[testClause];
stash[testClause] = cloneIfBytesRef(fcomp.value(0));
lastCompare = reverseMul[testClause] * fcomp.compareValues(stash[testClause], values[testClause]);
if (0 != lastCompare) {
// no need to keep checking additional clauses
break;
}
}
if (0 <= lastCompare) {
// we're either not competitive, or we're completely tied with another doc that's already group head
// that's already been selected
return false;
} // else...
// this doc is our new group head, we've already read some of the values into our stash
testClause++;
System.arraycopy(stash, 0, values, 0, testClause);
// read the remaining values we didn't need to test
for (int copyClause = testClause; copyClause < numClauses; copyClause++) {
leafFieldComparators[copyClause].copy(0, contextDoc);
values[copyClause] = cloneIfBytesRef(fieldComparators[copyClause].value(0));
}
return true;
}
/**
* Grows all internal arrays to the specified minSize
*/
public void grow(int minSize) {
groupHeadValues = ArrayUtil.grow(groupHeadValues, minSize);
}
}
private static interface IntCompare {
public boolean test(int i1, int i2);
}
private static interface FloatCompare {
public boolean test(float i1, float i2);
}
private static interface LongCompare {
public boolean test(long i1, long i2);
}
private static class MaxIntComp implements IntCompare {
public boolean test(int i1, int i2) {
return i1 > i2;
}
}
private static class MinIntComp implements IntCompare {
public boolean test(int i1, int i2) {
return i1 < i2;
}
}
private static class MaxFloatComp implements FloatCompare {
public boolean test(float i1, float i2) {
return i1 > i2;
}
}
private static class MinFloatComp implements FloatCompare {
public boolean test(float i1, float i2) {
return i1 < i2;
}
}
private static class MaxLongComp implements LongCompare {
public boolean test(long i1, long i2) {
return i1 > i2;
}
}
private static class MinLongComp implements LongCompare {
public boolean test(long i1, long i2) {
return i1 < i2;
}
}
/** returns the number of arguments that are non null */
private static final int numNotNull(final Object... args) {
int r = 0;
for (final Object o : args) {
if (null != o) {
r++;
}
}
return r;
}
/**
* Helper method for rewriting the Sort associated with a SortSpec.
* Handles the special case default of relevancy sort (ie: a SortSpec w/null Sort object)
*/
public static Sort rewriteSort(SortSpec sortSpec, IndexSearcher searcher) throws IOException {
assert null != sortSpec : "SortSpec must not be null";
assert null != searcher : "Searcher must not be null";
Sort orig = sortSpec.getSort();
if (null == orig) {
orig = Sort.RELEVANCE;
}
return orig.rewrite(searcher);
}
}
| 1 | 38,870 | Should not be a static import. | apache-lucene-solr | java |
@@ -288,13 +288,13 @@ func (client *APIECSClient) getCustomAttributes() []*ecs.Attribute {
func (client *APIECSClient) SubmitTaskStateChange(change api.TaskStateChange) error {
// Submit attachment state change
- if change.Attachments != nil {
+ if change.Attachment != nil {
var attachments []*ecs.AttachmentStateChange
- eniStatus := change.Attachments.Status.String()
+ eniStatus := change.Attachment.Status.String()
attachments = []*ecs.AttachmentStateChange{
{
- AttachmentArn: &change.Attachments.AttachmentArn,
+ AttachmentArn: &change.Attachment.AttachmentARN,
Status: &eniStatus,
},
} | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package ecsclient
import (
"errors"
"fmt"
"runtime"
"strings"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/async"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/httpclient"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/cihub/seelog"
"github.com/docker/docker/pkg/system"
)
const (
ecsMaxReasonLength = 255
pollEndpointCacheSize = 1
pollEndpointCacheTTL = 20 * time.Minute
roundtripTimeout = 5 * time.Second
)
// APIECSClient implements ECSClient
type APIECSClient struct {
credentialProvider *credentials.Credentials
config *config.Config
standardClient api.ECSSDK
submitStateChangeClient api.ECSSubmitStateSDK
ec2metadata ec2.EC2MetadataClient
pollEndpoinCache async.Cache
}
// NewECSClient creates a new ECSClient interface object
func NewECSClient(
credentialProvider *credentials.Credentials,
config *config.Config,
ec2MetadataClient ec2.EC2MetadataClient) api.ECSClient {
var ecsConfig aws.Config
ecsConfig.Credentials = credentialProvider
ecsConfig.Region = &config.AWSRegion
ecsConfig.HTTPClient = httpclient.New(roundtripTimeout, config.AcceptInsecureCert)
if config.APIEndpoint != "" {
ecsConfig.Endpoint = &config.APIEndpoint
}
standardClient := ecs.New(session.New(&ecsConfig))
submitStateChangeClient := newSubmitStateChangeClient(&ecsConfig)
pollEndpoinCache := async.NewLRUCache(pollEndpointCacheSize, pollEndpointCacheTTL)
return &APIECSClient{
credentialProvider: credentialProvider,
config: config,
standardClient: standardClient,
submitStateChangeClient: submitStateChangeClient,
ec2metadata: ec2MetadataClient,
pollEndpoinCache: pollEndpoinCache,
}
}
// SetSDK overrides the SDK to the given one. This is useful for injecting a
// test implementation
func (client *APIECSClient) SetSDK(sdk api.ECSSDK) {
client.standardClient = sdk
}
// SetSubmitStateChangeSDK overrides the SDK to the given one. This is useful
// for injecting a test implementation
func (client *APIECSClient) SetSubmitStateChangeSDK(sdk api.ECSSubmitStateSDK) {
client.submitStateChangeClient = sdk
}
// CreateCluster creates a cluster from a given name and returns its arn
func (client *APIECSClient) CreateCluster(clusterName string) (string, error) {
resp, err := client.standardClient.CreateCluster(&ecs.CreateClusterInput{ClusterName: &clusterName})
if err != nil {
seelog.Criticalf("Could not create cluster: %v", err)
return "", err
}
seelog.Infof("Created a cluster named: %s", clusterName)
return *resp.Cluster.ClusterName, nil
}
// RegisterContainerInstance calculates the appropriate resources, creates
// the default cluster if necessary, and returns the registered
// ContainerInstanceARN if successful. Supplying a non-empty container
// instance ARN allows a container instance to update its registered
// resources.
func (client *APIECSClient) RegisterContainerInstance(containerInstanceArn string, attributes []*ecs.Attribute) (string, error) {
clusterRef := client.config.Cluster
// If our clusterRef is empty, we should try to create the default
if clusterRef == "" {
clusterRef = config.DefaultClusterName
defer func() {
// Update the config value to reflect the cluster we end up in
client.config.Cluster = clusterRef
}()
// Attempt to register without checking existence of the cluster so we don't require
// excess permissions in the case where the cluster already exists and is active
containerInstanceArn, err := client.registerContainerInstance(clusterRef, containerInstanceArn, attributes)
if err == nil {
return containerInstanceArn, nil
}
// If trying to register fails, try to create the cluster before calling
// register again
clusterRef, err = client.CreateCluster(clusterRef)
if err != nil {
return "", err
}
}
return client.registerContainerInstance(clusterRef, containerInstanceArn, attributes)
}
func (client *APIECSClient) registerContainerInstance(clusterRef string, containerInstanceArn string, attributes []*ecs.Attribute) (string, error) {
registerRequest := ecs.RegisterContainerInstanceInput{Cluster: &clusterRef}
var registrationAttributes []*ecs.Attribute
if containerInstanceArn != "" {
// We are re-connecting a previously registered instance, restored from snapshot.
registerRequest.ContainerInstanceArn = &containerInstanceArn
} else {
// This is a new instance, not previously registered.
// Custom attribute registration only happens on initial instance registration.
for _, attribute := range client.getCustomAttributes() {
seelog.Debugf("Added a new custom attribute %v=%v",
aws.StringValue(attribute.Name),
aws.StringValue(attribute.Value),
)
registrationAttributes = append(registrationAttributes, attribute)
}
}
// Standard attributes are included with all registrations.
registrationAttributes = append(registrationAttributes, attributes...)
// Add additional attributes such as the os type
registrationAttributes = append(registrationAttributes, client.getAdditionalAttributes()...)
registerRequest.Attributes = registrationAttributes
iidRetrieved := true
instanceIdentityDoc, err := client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentResource)
if err != nil {
seelog.Errorf("Unable to get instance identity document: %v", err)
iidRetrieved = false
instanceIdentityDoc = ""
}
registerRequest.InstanceIdentityDocument = &instanceIdentityDoc
instanceIdentitySignature := ""
if iidRetrieved {
instanceIdentitySignature, err = client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource)
if err != nil {
seelog.Errorf("Unable to get instance identity signature: %v", err)
}
}
registerRequest.InstanceIdentityDocumentSignature = &instanceIdentitySignature
// Micro-optimization, the pointer to this is used multiple times below
integerStr := "INTEGER"
cpu, mem := getCpuAndMemory()
remainingMem := mem - int64(client.config.ReservedMemory)
if remainingMem < 0 {
return "", fmt.Errorf(
"api register-container-instance: reserved memory is higher than available memory on the host, total memory: %d, reserved: %d",
mem, client.config.ReservedMemory)
}
cpuResource := ecs.Resource{
Name: utils.Strptr("CPU"),
Type: &integerStr,
IntegerValue: &cpu,
}
memResource := ecs.Resource{
Name: utils.Strptr("MEMORY"),
Type: &integerStr,
IntegerValue: &remainingMem,
}
portResource := ecs.Resource{
Name: utils.Strptr("PORTS"),
Type: utils.Strptr("STRINGSET"),
StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPorts),
}
udpPortResource := ecs.Resource{
Name: utils.Strptr("PORTS_UDP"),
Type: utils.Strptr("STRINGSET"),
StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPortsUDP),
}
resources := []*ecs.Resource{&cpuResource, &memResource, &portResource, &udpPortResource}
registerRequest.TotalResources = resources
resp, err := client.standardClient.RegisterContainerInstance(®isterRequest)
if err != nil {
seelog.Errorf("Could not register: %v", err)
return "", err
}
seelog.Info("Registered container instance with cluster!")
err = validateRegisteredAttributes(registerRequest.Attributes, resp.ContainerInstance.Attributes)
return aws.StringValue(resp.ContainerInstance.ContainerInstanceArn), err
}
func attributesToMap(attributes []*ecs.Attribute) map[string]string {
attributeMap := make(map[string]string)
attribs := attributes
for _, attribute := range attribs {
attributeMap[aws.StringValue(attribute.Name)] = aws.StringValue(attribute.Value)
}
return attributeMap
}
func findMissingAttributes(expectedAttributes, actualAttributes map[string]string) ([]string, error) {
missingAttributes := make([]string, 0)
var err error
for key, val := range expectedAttributes {
if actualAttributes[key] != val {
missingAttributes = append(missingAttributes, key)
} else {
seelog.Tracef("Response contained expected value for attribute %v", key)
}
}
if len(missingAttributes) > 0 {
err = utils.NewAttributeError("Attribute validation failed")
}
return missingAttributes, err
}
func validateRegisteredAttributes(expectedAttributes, actualAttributes []*ecs.Attribute) error {
var err error
expectedAttributesMap := attributesToMap(expectedAttributes)
actualAttributesMap := attributesToMap(actualAttributes)
missingAttributes, err := findMissingAttributes(expectedAttributesMap, actualAttributesMap)
if err != nil {
msg := strings.Join(missingAttributes, ",")
seelog.Errorf("Error registering attributes: %v", msg)
}
return err
}
func getCpuAndMemory() (int64, int64) {
memInfo, err := system.ReadMemInfo()
mem := int64(0)
if err == nil {
mem = memInfo.MemTotal / 1024 / 1024 // MiB
} else {
seelog.Errorf("Unable getting memory info: %v", err)
}
cpu := runtime.NumCPU() * 1024
return int64(cpu), mem
}
func (client *APIECSClient) getAdditionalAttributes() []*ecs.Attribute {
return []*ecs.Attribute{&ecs.Attribute{
Name: aws.String("ecs.os-type"),
Value: aws.String(api.OSType),
}}
}
func (client *APIECSClient) getCustomAttributes() []*ecs.Attribute {
var attributes []*ecs.Attribute
for attribute, value := range client.config.InstanceAttributes {
attributes = append(attributes, &ecs.Attribute{
Name: aws.String(attribute),
Value: aws.String(value),
})
}
return attributes
}
func (client *APIECSClient) SubmitTaskStateChange(change api.TaskStateChange) error {
// Submit attachment state change
if change.Attachments != nil {
var attachments []*ecs.AttachmentStateChange
eniStatus := change.Attachments.Status.String()
attachments = []*ecs.AttachmentStateChange{
{
AttachmentArn: &change.Attachments.AttachmentArn,
Status: &eniStatus,
},
}
_, err := client.submitStateChangeClient.SubmitTaskStateChange(&ecs.SubmitTaskStateChangeInput{
Cluster: &client.config.Cluster,
Task: &change.TaskArn,
Attachments: attachments,
})
if err != nil {
seelog.Warnf("Could not submit an attachment state change: %v", err)
return err
}
return nil
}
// Submit task state change
if change.Status == api.TaskStatusNone {
seelog.Warnf("SubmitTaskStateChange called with an invalid change: %s", change.String())
return errors.New("ecs api client: SubmitTaskStateChange called with an invalid change")
}
if change.Status != api.TaskRunning && change.Status != api.TaskStopped {
seelog.Debugf("Not submitting unsupported upstream task state: %s", change.Status.String())
// Not really an error
return nil
}
status := change.Status.String()
req := ecs.SubmitTaskStateChangeInput{
Cluster: aws.String(client.config.Cluster),
Task: aws.String(change.TaskArn),
Status: aws.String(status),
Reason: aws.String(change.Reason),
}
containerEvents := make([]*ecs.ContainerStateChange, len(change.Containers))
for i, containerEvent := range change.Containers {
containerEvents[i] = client.buildContainerStateChangePayload(containerEvent)
}
req.Containers = containerEvents
_, err := client.submitStateChangeClient.SubmitTaskStateChange(&req)
if err != nil {
seelog.Warnf("Could not submit task state change: [%s]: %v", change.String(), err)
return err
}
return nil
}
func (client *APIECSClient) buildContainerStateChangePayload(change api.ContainerStateChange) *ecs.ContainerStateChange {
statechange := &ecs.ContainerStateChange{
ContainerName: aws.String(change.ContainerName),
}
if change.Reason != "" {
if len(change.Reason) > ecsMaxReasonLength {
trimmed := change.Reason[0:ecsMaxReasonLength]
statechange.Reason = aws.String(trimmed)
} else {
statechange.Reason = aws.String(change.Reason)
}
}
status := change.Status
if status != api.ContainerStopped && status != api.ContainerRunning {
seelog.Warnf("Not submitting unsupported upstream container state %s for container %s in task %s",
status.String(), change.ContainerName, change.TaskArn)
return nil
}
statechange.Status = aws.String(status.String())
if change.ExitCode != nil {
exitCode := int64(aws.IntValue(change.ExitCode))
statechange.ExitCode = aws.Int64(exitCode)
}
networkBindings := make([]*ecs.NetworkBinding, len(change.PortBindings))
for i, binding := range change.PortBindings {
hostPort := int64(binding.HostPort)
containerPort := int64(binding.ContainerPort)
bindIP := binding.BindIP
protocol := binding.Protocol.String()
networkBindings[i] = &ecs.NetworkBinding{
BindIP: aws.String(bindIP),
ContainerPort: aws.Int64(containerPort),
HostPort: aws.Int64(hostPort),
Protocol: aws.String(protocol),
}
}
statechange.NetworkBindings = networkBindings
return statechange
}
func (client *APIECSClient) SubmitContainerStateChange(change api.ContainerStateChange) error {
req := ecs.SubmitContainerStateChangeInput{
Cluster: &client.config.Cluster,
Task: &change.TaskArn,
ContainerName: &change.ContainerName,
}
if change.Reason != "" {
if len(change.Reason) > ecsMaxReasonLength {
trimmed := change.Reason[0:ecsMaxReasonLength]
req.Reason = &trimmed
} else {
req.Reason = &change.Reason
}
}
stat := change.Status.String()
if stat == "DEAD" {
stat = "STOPPED"
}
if stat != "STOPPED" && stat != "RUNNING" {
seelog.Infof("Not submitting unsupported upstream container state: %s", stat)
return nil
}
req.Status = &stat
if change.ExitCode != nil {
exitCode := int64(*change.ExitCode)
req.ExitCode = &exitCode
}
networkBindings := make([]*ecs.NetworkBinding, len(change.PortBindings))
for i, binding := range change.PortBindings {
hostPort := int64(binding.HostPort)
containerPort := int64(binding.ContainerPort)
bindIP := binding.BindIP
protocol := binding.Protocol.String()
networkBindings[i] = &ecs.NetworkBinding{
BindIP: &bindIP,
ContainerPort: &containerPort,
HostPort: &hostPort,
Protocol: &protocol,
}
}
req.NetworkBindings = networkBindings
_, err := client.submitStateChangeClient.SubmitContainerStateChange(&req)
if err != nil {
seelog.Warnf("Could not submit container state change: [%s]: %v", change.String(), err)
return err
}
return nil
}
func (client *APIECSClient) DiscoverPollEndpoint(containerInstanceArn string) (string, error) {
resp, err := client.discoverPollEndpoint(containerInstanceArn)
if err != nil {
return "", err
}
return aws.StringValue(resp.Endpoint), nil
}
func (client *APIECSClient) DiscoverTelemetryEndpoint(containerInstanceArn string) (string, error) {
resp, err := client.discoverPollEndpoint(containerInstanceArn)
if err != nil {
return "", err
}
if resp.TelemetryEndpoint == nil {
return "", errors.New("No telemetry endpoint returned; nil")
}
return aws.StringValue(resp.TelemetryEndpoint), nil
}
func (client *APIECSClient) discoverPollEndpoint(containerInstanceArn string) (*ecs.DiscoverPollEndpointOutput, error) {
// Try getting an entry from the cache
cachedEndpoint, found := client.pollEndpoinCache.Get(containerInstanceArn)
if found {
// Cache hit. Return the output.
if output, ok := cachedEndpoint.(*ecs.DiscoverPollEndpointOutput); ok {
return output, nil
}
}
// Cache miss, invoke the ECS DiscoverPollEndpoint API.
seelog.Debugf("Invoking DiscoverPollEndpoint for '%s'", containerInstanceArn)
output, err := client.standardClient.DiscoverPollEndpoint(&ecs.DiscoverPollEndpointInput{
ContainerInstance: &containerInstanceArn,
Cluster: &client.config.Cluster,
})
if err != nil {
return nil, err
}
// Cache the response from ECS.
client.pollEndpoinCache.Set(containerInstanceArn, output)
return output, nil
}
| 1 | 16,872 | should we be using `aws.String` here? it seems there are a few other spots we could use `aws.String` in the `if change.Attachment != nil { ...` block. | aws-amazon-ecs-agent | go |
@@ -66,7 +66,9 @@ function DashboardTopEarningPagesWidget( { Widget, WidgetReportZero, WidgetRepor
return {
isAdSenseLinked: select( MODULES_ANALYTICS ).getAdsenseLinked(),
- analyticsMainURL: select( MODULES_ANALYTICS ).getServiceURL(),
+ analyticsMainURL: select( MODULES_ANALYTICS ).getServiceReportURL( 'content-publisher-overview', {
+ '_u.date00': startDate, '_u.date01': endDate,
+ } ),
data: select( MODULES_ANALYTICS ).getReport( args ),
error: select( MODULES_ANALYTICS ).getErrorForSelector( 'getReport', [ args ] ),
loading: ! select( MODULES_ANALYTICS ).hasFinishedResolution( 'getReport', [ args ] ), | 1 | /**
* DashboardTopEarningPagesWidget component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { __, _x } from '@wordpress/i18n';
import { compose } from '@wordpress/compose';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { MODULES_ANALYTICS, DATE_RANGE_OFFSET } from '../../../analytics/datastore/constants';
import { CORE_USER } from '../../../../googlesitekit/datastore/user/constants';
import whenActive from '../../../../util/when-active';
import PreviewTable from '../../../../components/PreviewTable';
import { getDataTableFromData } from '../../../../components/data-table';
import SourceLink from '../../../../components/SourceLink';
import AdSenseLinkCTA from '../../../analytics/components/common/AdSenseLinkCTA';
import { isZeroReport } from '../../../analytics/util';
import TableOverflowContainer from '../../../../components/TableOverflowContainer';
const { useSelect } = Data;
function DashboardTopEarningPagesWidget( { Widget, WidgetReportZero, WidgetReportError } ) {
const {
isAdSenseLinked,
analyticsMainURL,
data,
error,
loading,
} = useSelect( ( select ) => {
const { startDate, endDate } = select( CORE_USER ).getDateRangeDates( {
offsetDays: DATE_RANGE_OFFSET,
} );
const args = {
startDate,
endDate,
dimensions: [ 'ga:pageTitle', 'ga:pagePath' ],
metrics: [
{ expression: 'ga:adsenseRevenue', alias: 'Earnings' },
{ expression: 'ga:adsenseECPM', alias: 'Page RPM' },
{ expression: 'ga:adsensePageImpressions', alias: 'Impressions' },
],
orderby: {
fieldName: 'ga:adsenseRevenue',
sortOrder: 'DESCENDING',
},
limit: 10,
};
return {
isAdSenseLinked: select( MODULES_ANALYTICS ).getAdsenseLinked(),
analyticsMainURL: select( MODULES_ANALYTICS ).getServiceURL(),
data: select( MODULES_ANALYTICS ).getReport( args ),
error: select( MODULES_ANALYTICS ).getErrorForSelector( 'getReport', [ args ] ),
loading: ! select( MODULES_ANALYTICS ).hasFinishedResolution( 'getReport', [ args ] ),
};
} );
if ( loading ) {
return (
<PreviewTable rows={ 5 } padding />
);
}
// A restricted metrics error will cause this value to change in the resolver
// so this check should happen before an error, which is only relevant if they are linked.
if ( ! isAdSenseLinked ) {
return <AdSenseLinkCTA />;
}
if ( error ) {
return <WidgetReportError moduleSlug="analytics" error={ error } />;
}
if ( isZeroReport( data ) ) {
return <WidgetReportZero moduleSlug="analytics" />;
}
const headers = [
{
title: __( 'Top Earning Pages', 'google-site-kit' ),
tooltip: __( 'Top Earning Pages', 'google-site-kit' ),
primary: true,
},
{
title: __( 'Revenue', 'google-site-kit' ),
tooltip: __( 'Revenue', 'google-site-kit' ),
},
];
const links = [];
const dataMapped = data[ 0 ].data.rows.map( ( row, i ) => {
links[ i ] = row.dimensions[ 1 ];
return [
row.dimensions[ 0 ],
Number( row.metrics[ 0 ].values[ 0 ] ).toFixed( 2 ),
];
} );
const options = {
hideHeader: false,
chartsEnabled: false,
cap: 5,
links,
};
const dataTable = getDataTableFromData( dataMapped, headers, options );
return (
<Widget
noPadding
Footer={ () => (
<SourceLink
className="googlesitekit-data-block__source"
name={ _x( 'Analytics', 'Service name', 'google-site-kit' ) }
href={ analyticsMainURL }
external
/>
) }
>
<TableOverflowContainer>
{ dataTable }
</TableOverflowContainer>
</Widget>
);
}
export default compose(
whenActive( { moduleName: 'adsense' } ),
whenActive( { moduleName: 'analytics' } ),
)( DashboardTopEarningPagesWidget );
| 1 | 36,219 | We can't pass raw dates like this because they need to be formatted as `YYYYMMDD` as noted in the IB. We added the `generateDateRangeArgs` utilities to handle this for us, as well as abstracting the relationship between arg name and specific dates which is not obvious just by looking at it. Let's update it to use `generateDateRangeArgs( { startDate, endDate } )` instead Use care to make sure we're sourcing the utility from Analytics utils though since AdSense has its own version by the same name, which is different. | google-site-kit-wp | js |
@@ -313,6 +313,8 @@ class worker(Config):
no_install_shutdown_handler = BoolParameter(default=False,
description='If true, the SIGUSR1 shutdown handler will'
'NOT be install on the worker')
+ save_summary_data = BoolParameter(default=True, description='If true, save data required for '
+ 'execution summary')
class KeepAliveThread(threading.Thread): | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The worker communicates with the scheduler and does two things:
1. Sends all tasks that has to be run
2. Gets tasks from the scheduler that should be run
When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.CentralPlannerScheduler` instance.
When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance.
Everything in this module is private to luigi and may change in incompatible
ways between versions. The exception is the exception types and the
:py:class:`worker` config class.
"""
import collections
import getpass
import logging
import multiprocessing # Note: this seems to have some stability issues: https://github.com/spotify/luigi/pull/438
import os
import signal
try:
import Queue
except ImportError:
import queue as Queue
import random
import socket
import threading
import time
import traceback
import types
from luigi import six
from luigi import notifications
from luigi.event import Event
from luigi.task_register import load_task
from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, CentralPlannerScheduler
from luigi.target import Target
from luigi.task import Task, flatten, getpaths, Config
from luigi.task_register import TaskClassException
from luigi.task_status import RUNNING
from luigi.parameter import FloatParameter, IntParameter, BoolParameter
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
# Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex,
# that may not be unlocked in child process, resulting in the process being locked indefinitely.
fork_lock = threading.Lock()
# Why we assert on _WAIT_INTERVAL_EPS:
# multiprocessing.Queue.get() is undefined for timeout=0 it seems:
# https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get.
# I also tried with really low epsilon, but then ran into the same issue where
# the test case "test_external_dependency_worker_is_patient" got stuck. So I
# unscientifically just set the final value to a floating point number that
# "worked for me".
_WAIT_INTERVAL_EPS = 0.00001
class TaskException(Exception):
pass
class TaskProcess(multiprocessing.Process):
""" Wrap all task execution in this class.
Mainly for convenience since this is run in a separate process. """
def __init__(self, task, worker_id, result_queue, random_seed=False, worker_timeout=0,
tracking_url_callback=None):
super(TaskProcess, self).__init__()
self.task = task
self.worker_id = worker_id
self.result_queue = result_queue
self.random_seed = random_seed
self.tracking_url_callback = tracking_url_callback
if task.worker_timeout is not None:
worker_timeout = task.worker_timeout
self.timeout_time = time.time() + worker_timeout if worker_timeout else None
def _run_get_new_deps(self):
run_again = False
try:
task_gen = self.task.run(tracking_url_callback=self.tracking_url_callback)
except TypeError as ex:
if 'unexpected keyword argument' not in str(ex):
raise
run_again = True
if run_again:
task_gen = self.task.run()
if not isinstance(task_gen, types.GeneratorType):
return None
next_send = None
while True:
try:
if next_send is None:
requires = six.next(task_gen)
else:
requires = task_gen.send(next_send)
except StopIteration:
return None
new_req = flatten(requires)
new_deps = [(t.task_module, t.task_family, t.to_str_params())
for t in new_req]
if all(t.complete() for t in new_req):
next_send = getpaths(requires)
else:
return new_deps
def run(self):
logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task)
if self.random_seed:
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
status = FAILED
expl = ''
missing = []
new_deps = []
try:
# Verify that all the tasks are fulfilled! For external tasks we
# don't care about unfulfilled dependencies, because we are just
# checking completeness of self.task so outputs of dependencies are
# irrelevant.
if self.task.run != NotImplemented:
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
if missing:
deps = 'dependency' if len(missing) == 1 else 'dependencies'
raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing)))
self.task.trigger_event(Event.START, self.task)
t0 = time.time()
status = None
if self.task.run == NotImplemented:
# External task
# TODO(erikbern): We should check for task completeness after non-external tasks too!
# This will resolve #814 and make things a lot more consistent
if self.task.complete():
status = DONE
else:
status = FAILED
expl = 'Task is an external data dependency ' \
'and data does not exist (yet?).'
else:
new_deps = self._run_get_new_deps()
status = DONE if not new_deps else PENDING
if new_deps:
logger.info(
'[pid %s] Worker %s new requirements %s',
os.getpid(), self.worker_id, self.task)
elif status == DONE:
self.task.trigger_event(
Event.PROCESSING_TIME, self.task, time.time() - t0)
expl = self.task.on_success()
logger.info('[pid %s] Worker %s done %s', os.getpid(),
self.worker_id, self.task)
self.task.trigger_event(Event.SUCCESS, self.task)
except KeyboardInterrupt:
raise
except BaseException as ex:
status = FAILED
logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task)
self.task.trigger_event(Event.FAILURE, self.task, ex)
raw_error_message = self.task.on_failure(ex)
expl = raw_error_message
finally:
self.result_queue.put(
(self.task.task_id, status, expl, missing, new_deps))
def _recursive_terminate(self):
import psutil
try:
parent = psutil.Process(self.pid)
children = parent.children(recursive=True)
# terminate parent. Give it a chance to clean up
super(TaskProcess, self).terminate()
parent.wait()
# terminate children
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
continue
except psutil.NoSuchProcess:
return
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate()
class SingleProcessPool(object):
"""
Dummy process pool for using a single processor.
Imitates the api of multiprocessing.Pool using single-processor equivalents.
"""
def apply_async(self, function, args):
return function(*args)
def close(self):
pass
def join(self):
pass
class DequeQueue(collections.deque):
"""
deque wrapper implementing the Queue interface.
"""
def put(self, obj, block=None, timeout=None):
return self.append(obj)
def get(self, block=None, timeout=None):
return self.pop()
class AsyncCompletionException(Exception):
"""
Exception indicating that something went wrong with checking complete.
"""
def __init__(self, trace):
self.trace = trace
class TracebackWrapper(object):
"""
Class to wrap tracebacks so we can know they're not just strings.
"""
def __init__(self, trace):
self.trace = trace
def check_complete(task, out_queue):
"""
Checks if task is complete, puts the result to out_queue.
"""
logger.debug("Checking if %s is complete", task)
try:
is_complete = task.complete()
except Exception:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete))
class worker(Config):
# NOTE: `section.config-variable` in the config_path argument is deprecated in favor of `worker.config_variable`
ping_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a '
'worker alive only if it has a unique pending task, as '
'well as having keep-alive true')
wait_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-wait-interval'))
wait_jitter = FloatParameter(default=5.0)
max_reschedules = IntParameter(default=1,
config_path=dict(section='core', name='worker-max-reschedules'))
timeout = IntParameter(default=0,
config_path=dict(section='core', name='worker-timeout'))
task_limit = IntParameter(default=None,
config_path=dict(section='core', name='worker-task-limit'))
retry_external_tasks = BoolParameter(default=False,
config_path=dict(section='core', name='retry-external-tasks'),
description='If true, incomplete external tasks will be '
'retested for completion while Luigi is running.')
no_install_shutdown_handler = BoolParameter(default=False,
description='If true, the SIGUSR1 shutdown handler will'
'NOT be install on the worker')
class KeepAliveThread(threading.Thread):
"""
Periodically tell the scheduler that the worker still lives.
"""
def __init__(self, scheduler, worker_id, ping_interval):
super(KeepAliveThread, self).__init__()
self._should_stop = threading.Event()
self._scheduler = scheduler
self._worker_id = worker_id
self._ping_interval = ping_interval
def stop(self):
self._should_stop.set()
def run(self):
while True:
self._should_stop.wait(self._ping_interval)
if self._should_stop.is_set():
logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id)
break
with fork_lock:
try:
self._scheduler.ping(worker=self._worker_id)
except: # httplib.BadStatusLine:
logger.warning('Failed pinging scheduler')
class Worker(object):
"""
Worker object communicates with a scheduler.
Simple class that talks to a scheduler and:
* tells the scheduler what it has to do + its dependencies
* asks for stuff to do (pulls it in a loop and runs it)
"""
def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs):
if scheduler is None:
scheduler = CentralPlannerScheduler()
self.worker_processes = int(worker_processes)
self._worker_info = self._generate_worker_info()
if not worker_id:
worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])
self._config = worker(**kwargs)
assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive"
assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero"
self._id = worker_id
self._scheduler = scheduler
self._assistant = assistant
self._stop_requesting_work = False
self.host = socket.gethostname()
self._scheduled_tasks = {}
self._suspended_tasks = {}
self._first_task = None
self.add_succeeded = True
self.run_succeeded = True
self.unfulfilled_counts = collections.defaultdict(int)
# note that ``signal.signal(signal.SIGUSR1, fn)`` only works inside the main execution thread, which is why we
# provide the ability to conditionally install the hook.
if not self._config.no_install_shutdown_handler:
try:
signal.signal(signal.SIGUSR1, self.handle_interrupt)
except AttributeError:
pass
# Keep info about what tasks are running (could be in other processes)
if worker_processes == 1:
self._task_result_queue = DequeQueue()
else:
self._task_result_queue = multiprocessing.Queue()
self._running_tasks = {}
# Stuff for execution_summary
self._add_task_history = []
self._get_work_response_history = []
def _add_task(self, *args, **kwargs):
"""
Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.
"""
task_id = kwargs['task_id']
status = kwargs['status']
runnable = kwargs['runnable']
task = self._scheduled_tasks.get(task_id)
if task:
msg = (task, status, runnable)
self._add_task_history.append(msg)
self._scheduler.add_task(*args, **kwargs)
logger.info('Informed scheduler that task %s has status %s', task_id, status)
def __enter__(self):
"""
Start the KeepAliveThread.
"""
self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
return self
def __exit__(self, type, value, traceback):
"""
Stop the KeepAliveThread and kill still running tasks.
"""
self._keep_alive_thread.stop()
self._keep_alive_thread.join()
for task in self._running_tasks.values():
if task.is_alive():
task.terminate()
return False # Don't suppress exception
def _generate_worker_info(self):
# Generate as much info as possible about the worker
# Some of these calls might not be available on all OS's
args = [('salt', '%09d' % random.randrange(0, 999999999)),
('workers', self.worker_processes)]
try:
args += [('host', socket.gethostname())]
except BaseException:
pass
try:
args += [('username', getpass.getuser())]
except BaseException:
pass
try:
args += [('pid', os.getpid())]
except BaseException:
pass
try:
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
args.append(('sudo_user', sudo_user))
except BaseException:
pass
return args
def _validate_task(self, task):
if not isinstance(task, Task):
raise TaskException('Can not schedule non-task %s' % task)
if not task.initialized():
# we can't get the repr of it since it's not initialized...
raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__)
def _log_complete_error(self, task, tb):
log_msg = "Will not schedule {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_dependency_error(self, task, tb):
log_msg = "Will not schedule {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_unexpected_error(self, task):
logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause
def _email_complete_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not schedule task or any dependencies due to error in complete() method",
)
def _email_dependency_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not schedule task or any dependencies due to error in deps() method",
)
def _email_unexpected_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: Framework error while scheduling {task}. Host: {host}",
headline="Luigi framework error",
)
def _email_task_failure(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} FAILED. Host: {host}",
headline="A task failed when running. Most likely run() raised an exception.",
)
def _email_error(self, task, formatted_traceback, subject, headline):
formatted_subject = subject.format(task=task, host=self.host)
message = notifications.format_task_error(headline, task, formatted_traceback)
notifications.send_error_email(formatted_subject, message, task.owner_email)
def add(self, task, multiprocess=False):
"""
Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.
"""
if self._first_task is None and hasattr(task, 'task_id'):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
# we track queue size ourselves because len(queue) won't work for multiprocessing
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
item, is_complete = current
for next in self._add(item, is_complete):
if next.task_id not in seen:
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
finally:
pool.close()
pool.join()
return self.add_succeeded
def _add(self, task, is_complete):
if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:
logger.warning('Will not schedule %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)
return
formatted_traceback = None
try:
self._check_complete_value(is_complete)
except KeyboardInterrupt:
raise
except AsyncCompletionException as ex:
formatted_traceback = ex.trace
except BaseException:
formatted_traceback = traceback.format_exc()
if formatted_traceback is not None:
self.add_succeeded = False
self._log_complete_error(task, formatted_traceback)
task.trigger_event(Event.DEPENDENCY_MISSING, task)
self._email_complete_error(task, formatted_traceback)
# abort, i.e. don't schedule any subtasks of a task with
# failing complete()-method since we don't know if the task
# is complete and subtasks might not be desirable to run if
# they have already ran before
return
if is_complete:
deps = None
status = DONE
runnable = False
task.trigger_event(Event.DEPENDENCY_PRESENT, task)
elif task.run == NotImplemented:
deps = None
status = PENDING
runnable = worker().retry_external_tasks
task.trigger_event(Event.DEPENDENCY_MISSING, task)
logger.warning('Data for %s does not exist (yet?). The task is an '
'external data depedency, so it can not be run from'
' this luigi process.', task)
else:
try:
deps = task.deps()
except Exception as ex:
formatted_traceback = traceback.format_exc()
self.add_succeeded = False
self._log_dependency_error(task, formatted_traceback)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_dependency_error(task, formatted_traceback)
return
status = PENDING
runnable = True
if task.disabled:
status = DISABLED
if deps:
for d in deps:
self._validate_dependency(d)
task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)
yield d # return additional tasks to add
deps = [d.task_id for d in deps]
self._scheduled_tasks[task.task_id] = task
self._add_task(worker=self._id, task_id=task.task_id, status=status,
deps=deps, runnable=runnable, priority=task.priority,
resources=task.process_resources(),
params=task.to_str_params(),
family=task.task_family,
module=task.task_module)
def _validate_dependency(self, dependency):
if isinstance(dependency, Target):
raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class')
elif not isinstance(dependency, Task):
raise Exception('requires() must return Task objects')
def _check_complete_value(self, is_complete):
if is_complete not in (True, False):
if isinstance(is_complete, TracebackWrapper):
raise AsyncCompletionException(is_complete.trace)
raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete)
def _add_worker(self):
self._worker_info.append(('first_task', self._first_task))
self._scheduler.add_worker(self._id, self._worker_info)
def _log_remote_tasks(self, running_tasks, n_pending_tasks, n_unique_pending):
logger.debug("Done")
logger.debug("There are no more tasks to run at this time")
if running_tasks:
for r in running_tasks:
logger.debug('%s is currently run by worker %s', r['task_id'], r['worker'])
elif n_pending_tasks:
logger.debug("There are %s pending tasks possibly being run by other workers", n_pending_tasks)
if n_unique_pending:
logger.debug("There are %i pending tasks unique to this worker", n_unique_pending)
def _get_work(self):
if self._stop_requesting_work:
return None, 0, 0, 0
logger.debug("Asking scheduler for work...")
r = self._scheduler.get_work(
worker=self._id,
host=self.host,
assistant=self._assistant,
current_tasks=list(self._running_tasks.keys()),
)
n_pending_tasks = r['n_pending_tasks']
task_id = r['task_id']
running_tasks = r['running_tasks']
n_unique_pending = r['n_unique_pending']
self._get_work_response_history.append(dict(
task_id=task_id,
running_tasks=running_tasks,
))
if task_id is not None and task_id not in self._scheduled_tasks:
logger.info('Did not schedule %s, will load it dynamically', task_id)
try:
# TODO: we should obtain the module name from the server!
self._scheduled_tasks[task_id] = \
load_task(module=r.get('task_module'),
task_name=r['task_family'],
params_str=r['task_params'])
except TaskClassException as ex:
msg = 'Cannot find task for %s' % task_id
logger.exception(msg)
subject = 'Luigi: %s' % msg
error_message = notifications.wrap_traceback(ex)
notifications.send_error_email(subject, error_message)
self._add_task(worker=self._id, task_id=task_id, status=FAILED, runnable=False,
assistant=self._assistant)
task_id = None
self.run_succeeded = False
return task_id, running_tasks, n_pending_tasks, n_unique_pending
def _run_task(self, task_id):
task = self._scheduled_tasks[task_id]
p = self._create_task_process(task)
self._running_tasks[task_id] = p
if self.worker_processes > 1:
with fork_lock:
p.start()
else:
# Run in the same process
p.run()
def _create_task_process(self, task):
def update_tracking_url(tracking_url):
self._scheduler.add_task(
task_id=task.task_id,
worker=self._id,
status=RUNNING,
tracking_url=tracking_url,
)
return TaskProcess(
task, self._id, self._task_result_queue,
random_seed=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout,
tracking_url_callback=update_tracking_url,
)
def _purge_children(self):
"""
Find dead children and put a response on the result queue.
:return:
"""
for task_id, p in six.iteritems(self._running_tasks):
if not p.is_alive() and p.exitcode:
error_msg = 'Task %s died unexpectedly with exit code %s' % (task_id, p.exitcode)
elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive():
p.terminate()
error_msg = 'Task %s timed out and was terminated.' % task_id
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
# external task if run not implemented, retry-able if config option is enabled.
external_task_retryable = task.run == NotImplemented and self._config.retry_external_tasks
if status == FAILED and not external_task_retryable:
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant)
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return
def _sleeper(self):
# TODO is exponential backoff necessary?
while True:
jitter = self._config.wait_jitter
wait_interval = self._config.wait_interval + random.uniform(0, jitter)
logger.debug('Sleeping for %f seconds', wait_interval)
time.sleep(wait_interval)
yield
def _keep_alive(self, n_pending_tasks, n_unique_pending):
"""
Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.
"""
if not self._config.keep_alive:
return False
elif self._assistant:
return True
else:
return n_pending_tasks and (n_unique_pending or not self._config.count_uniques)
def handle_interrupt(self, signum, _):
"""
Stops the assistant from asking for more work on SIGUSR1
"""
if signum == signal.SIGUSR1:
self._config.keep_alive = False
self._stop_requesting_work = True
def run(self):
"""
Returns True if all scheduled tasks were executed successfully.
"""
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while len(self._running_tasks) >= self.worker_processes:
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
task_id, running_tasks, n_pending_tasks, n_unique_pending = self._get_work()
if task_id is None:
if not self._stop_requesting_work:
self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending)
if len(self._running_tasks) == 0:
if self._keep_alive(n_pending_tasks, n_unique_pending):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
# task_id is not None:
logger.debug("Pending tasks: %s", n_pending_tasks)
self._run_task(task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded
| 1 | 14,202 | `default=True` shouldn't be used for BoolParameters iirc. | spotify-luigi | py |
@@ -35,6 +35,10 @@ func parsePrettyFormatLog(repo *Repository, logByts []byte) (*list.List, error)
}
func RefEndName(refStr string) string {
+ if strings.HasPrefix(refStr, "refs/heads/") {
+ return strings.TrimPrefix(refStr, "refs/heads/")
+ }
+
index := strings.LastIndex(refStr, "/")
if index != -1 {
return refStr[index+1:] | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"bytes"
"container/list"
"fmt"
"os"
"path/filepath"
"strings"
)
const prettyLogFormat = `--pretty=format:%H`
func parsePrettyFormatLog(repo *Repository, logByts []byte) (*list.List, error) {
l := list.New()
if len(logByts) == 0 {
return l, nil
}
parts := bytes.Split(logByts, []byte{'\n'})
for _, commitId := range parts {
commit, err := repo.GetCommit(string(commitId))
if err != nil {
return nil, err
}
l.PushBack(commit)
}
return l, nil
}
func RefEndName(refStr string) string {
index := strings.LastIndex(refStr, "/")
if index != -1 {
return refStr[index+1:]
}
return refStr
}
// If the object is stored in its own file (i.e not in a pack file),
// this function returns the full path to the object file.
// It does not test if the file exists.
func filepathFromSHA1(rootdir, sha1 string) string {
return filepath.Join(rootdir, "objects", sha1[:2], sha1[2:])
}
// isDir returns true if given path is a directory,
// or returns false when it's a file or does not exist.
func isDir(dir string) bool {
f, e := os.Stat(dir)
if e != nil {
return false
}
return f.IsDir()
}
// isFile returns true if given path is a file,
// or returns false when it's a directory or does not exist.
func isFile(filePath string) bool {
f, e := os.Stat(filePath)
if e != nil {
return false
}
return !f.IsDir()
}
func concatenateError(err error, stderr string) error {
if len(stderr) == 0 {
return err
}
return fmt.Errorf("%v: %s", err, stderr)
}
| 1 | 9,602 | It would be better to use `return refStr[12:]`. or `11`... I have problem with counting.. | gogs-gogs | go |
@@ -211,9 +211,7 @@ func ruleHash(state *core.BuildState, target *core.BuildTarget, runtime bool) []
for _, licence := range target.Licences {
h.Write([]byte(licence))
}
- for _, output := range target.TestOutputs {
- h.Write([]byte(output))
- }
+
for _, output := range target.OptionalOutputs {
h.Write([]byte(output))
} | 1 | // Utilities to help with incremental builds.
//
// There are four things we consider for each rule:
// - the global config, some parts of which affect all rules
// - the rule definition itself (the command to run, etc)
// - any input files it might have
// - any dependencies.
//
// If all of those are the same as the last time the rule was run,
// we can safely assume that the output will be the same this time
// and so we don't have to re-run it again.
package build
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/gob"
"fmt"
"hash"
"os"
"path"
"sort"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
const hashLength = sha1.Size
// Tag that we attach for xattrs to store hashes against files.
// Note that we are required to provide the user namespace; that seems to be set implicitly
// by the attr utility, but that is not done for us here.
const xattrName = "user.plz_build"
// Length of the full hash we write, which has multiple parts.
const fullHashLength = 5 * hashLength
// noSecrets is the thing we write when a rule doesn't have any secrets defined.
var noSecrets = []byte{45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45}
// Used to write something when we need to indicate a boolean in a hash. Can be essentially
// any value as long as they're different from one another.
var boolTrueHashValue = []byte{2}
var boolFalseHashValue = []byte{1}
// Return true if the rule needs building, false if the existing outputs are OK.
func needsBuilding(state *core.BuildState, target *core.BuildTarget, postBuild bool) bool {
// Check the dependencies first, because they don't need any disk I/O.
if target.NeedsTransitiveDependencies {
if anyDependencyHasChanged(target) {
return true // one of the transitive deps has changed, need to rebuild
}
} else {
for _, dep := range target.Dependencies() {
if dep.State() < core.Unchanged {
log.Debug("Need to rebuild %s, %s has changed", target.Label, dep.Label)
return true // dependency has just been rebuilt, do this too.
}
}
}
// If the metadata file containing the std-out and additional outputs doesn't exist, rebuild
if !fs.FileExists(targetBuildMetadataFileName(target)) {
log.Debug("Need to rebuild %s, metadata file is missing", target.Label)
return true
}
oldHashes := readRuleHashFromXattrs(state, target, postBuild)
if !bytes.Equal(oldHashes.config, state.Hashes.Config) {
if len(oldHashes.config) == 0 {
// Small nicety to make it a bit clearer what's going on.
log.Debug("Need to build %s, outputs aren't there", target.Label)
} else {
log.Debug("Need to rebuild %s, config has changed (was %s, need %s)", target.Label, b64(oldHashes.config), b64(state.Hashes.Config))
}
return true
}
newRuleHash := RuleHash(state, target, false, postBuild)
if !bytes.Equal(oldHashes.rule, newRuleHash) {
log.Debug("Need to rebuild %s, rule has changed (was %s, need %s)", target.Label, b64(oldHashes.rule), b64(newRuleHash))
return true
}
newSourceHash, err := sourceHash(state, target)
if err != nil || !bytes.Equal(oldHashes.source, newSourceHash) {
log.Debug("Need to rebuild %s, sources have changed (was %s, need %s)", target.Label, b64(oldHashes.source), b64(newSourceHash))
return true
}
newSecretHash, err := secretHash(state, target)
if err != nil || !bytes.Equal(oldHashes.secret, newSecretHash) {
log.Debug("Need to rebuild %s, secrets have changed (was %s, need %s)", target.Label, b64(oldHashes.secret), b64(newSecretHash))
return true
}
// Check the outputs of this rule exist. This would only happen if the user had
// removed them but it's incredibly aggravating if you remove an output and the
// rule won't rebuild itself.
for _, output := range target.Outputs() {
realOutput := path.Join(target.OutDir(), output)
if !core.PathExists(realOutput) {
log.Debug("Output %s doesn't exist for rule %s; will rebuild.", realOutput, target.Label)
return true
}
}
// Maybe we've forced a rebuild. Do this last; might be interesting to see if it needed building anyway.
return state.ShouldRebuild(target)
}
// b64 base64 encodes a string of bytes for printing.
func b64(b []byte) string {
if len(b) == 0 {
return "<not found>"
}
return base64.RawStdEncoding.EncodeToString(b)
}
// Returns true if any transitive dependency of this target has changed.
func anyDependencyHasChanged(target *core.BuildTarget) bool {
done := map[core.BuildLabel]bool{}
var inner func(*core.BuildTarget) bool
inner = func(dependency *core.BuildTarget) bool {
done[dependency.Label] = true
if dependency != target && dependency.State() < core.Unchanged {
return true
} else if !dependency.OutputIsComplete || dependency == target {
for _, dep := range dependency.Dependencies() {
if !done[dep.Label] {
if inner(dep) {
log.Debug("Need to rebuild %s, %s has changed", target.Label, dep.Label)
return true
}
}
}
}
return false
}
return inner(target)
}
func mustSourceHash(state *core.BuildState, target *core.BuildTarget) []byte {
b, err := sourceHash(state, target)
if err != nil {
log.Fatalf("%s", err)
}
return b
}
// Calculate the hash of all sources of this rule
func sourceHash(state *core.BuildState, target *core.BuildTarget) ([]byte, error) {
h := sha1.New()
for source := range core.IterSources(state.Graph, target, false) {
result, err := state.PathHasher.Hash(source.Src, false, true)
if err != nil {
return nil, err
}
h.Write(result)
h.Write([]byte(source.Src))
}
for _, tool := range target.AllTools() {
for _, path := range tool.FullPaths(state.Graph) {
result, err := state.PathHasher.Hash(path, false, true)
if err != nil {
return nil, err
}
h.Write(result)
}
}
return h.Sum(nil), nil
}
// RuleHash calculates a hash for the relevant bits of this rule that affect its output.
// Optionally it can include parts of the rule that affect runtime (most obviously test-time).
// Note that we have to hash on the declared fields, we obviously can't hash pointers etc.
// incrementality_test will warn if new fields are added to the struct but not here.
func RuleHash(state *core.BuildState, target *core.BuildTarget, runtime, postBuild bool) []byte {
if runtime || (postBuild && target.BuildCouldModifyTarget()) {
return ruleHash(state, target, runtime)
}
// Non-post-build hashes get stored on the target itself.
if len(target.RuleHash) != 0 {
return target.RuleHash
}
target.RuleHash = ruleHash(state, target, false) // This is never a runtime hash.
return target.RuleHash
}
func ruleHash(state *core.BuildState, target *core.BuildTarget, runtime bool) []byte {
h := sha1.New()
h.Write([]byte(target.Label.String()))
for _, dep := range target.DeclaredDependencies() {
h.Write([]byte(dep.String()))
}
for _, vis := range target.Visibility {
h.Write([]byte(vis.String())) // Doesn't strictly affect the output, but best to be safe.
}
for _, hsh := range target.Hashes {
h.Write([]byte(hsh))
}
for _, source := range target.AllSources() {
h.Write([]byte(source.String()))
}
for _, out := range target.DeclaredOutputs() {
h.Write([]byte(out))
}
outs := target.DeclaredNamedOutputs()
for _, name := range target.DeclaredOutputNames() {
h.Write([]byte(name))
for _, out := range outs[name] {
h.Write([]byte(out))
}
}
for _, licence := range target.Licences {
h.Write([]byte(licence))
}
for _, output := range target.TestOutputs {
h.Write([]byte(output))
}
for _, output := range target.OptionalOutputs {
h.Write([]byte(output))
}
for _, label := range target.Labels {
h.Write([]byte(label))
}
for _, secret := range target.Secrets {
h.Write([]byte(secret))
}
hashBool(h, target.IsBinary)
hashBool(h, target.IsTest)
hashOptionalBool(h, target.Sandbox)
// Note that we only hash the current command here; whatever's set in commands that we're not going
// to run is uninteresting to us.
h.Write([]byte(target.GetCommand(state)))
if runtime {
// Similarly, we only hash the current command here again.
h.Write([]byte(target.GetTestCommand(state)))
for _, datum := range target.AllData() {
h.Write([]byte(datum.String()))
}
hashOptionalBool(h, target.TestSandbox)
}
hashBool(h, target.NeedsTransitiveDependencies)
hashBool(h, target.OutputIsComplete)
hashBool(h, target.Stamp)
hashBool(h, target.IsFilegroup)
hashBool(h, target.IsTextFile)
hashBool(h, target.IsRemoteFile)
hashBool(h, target.Local)
hashOptionalBool(h, target.ExitOnError)
for _, require := range target.Requires {
h.Write([]byte(require))
}
// Indeterminate iteration order, yay...
languages := []string{}
for k := range target.Provides {
languages = append(languages, k)
}
sort.Strings(languages)
for _, lang := range languages {
h.Write([]byte(lang))
h.Write([]byte(target.Provides[lang].String()))
}
// We don't need to hash the functions themselves because they get rerun every time -
// we just need to check whether one is added or removed, which is good since it's
// nigh impossible to really verify whether it's changed or not (since it may call
// any amount of other stuff).
hashBool(h, target.PreBuildFunction != nil)
hashBool(h, target.PostBuildFunction != nil)
if target.PassEnv != nil {
for _, env := range *target.PassEnv {
h.Write([]byte(env))
h.Write([]byte{'='})
h.Write([]byte(os.Getenv(env)))
}
}
for _, o := range target.OutputDirectories {
h.Write([]byte(o))
}
hashMap(h, target.EntryPoints)
hashMap(h, target.Env)
h.Write([]byte(target.FileContent))
return h.Sum(nil)
}
func hashMap(writer hash.Hash, eps map[string]string) {
keys := make([]string, len(eps))
for ep := range eps {
keys = append(keys, ep)
}
sort.Strings(keys)
for _, ep := range keys {
writer.Write([]byte(ep + "=" + eps[ep]))
}
}
func hashBool(writer hash.Hash, b bool) {
if b {
writer.Write(boolTrueHashValue)
} else {
writer.Write(boolFalseHashValue)
}
}
func hashOptionalBool(writer hash.Hash, b bool) {
if b {
hashBool(writer, b)
}
}
type ruleHashes struct {
rule, config, source, secret []byte
postBuildHash bool
}
// readRuleHashFromXattrs reads the hash of a file using xattrs.
// If postBuild is true then the rule hash will be the post-build one if present.
func readRuleHashFromXattrs(state *core.BuildState, target *core.BuildTarget, postBuild bool) ruleHashes {
var h []byte
for _, output := range target.FullOutputs() {
b := fs.ReadAttr(output, xattrName, state.XattrsSupported)
if b == nil {
return ruleHashes{}
} else if h != nil && !bytes.Equal(h, b) {
// Not an error; we could warn but it's possible to get here legitimately so
// just return nothing.
return ruleHashes{}
}
h = b
}
if h == nil {
// If the target could be modified during build, we might have written the hash on the build MD file.
// Only works for pre-build, though.
if target.BuildCouldModifyTarget() && !postBuild {
h = fs.ReadAttr(targetBuildMetadataFileName(target), xattrName, state.XattrsSupported)
if h == nil {
return ruleHashes{}
}
} else {
// Try the fallback file; target might not have had any outputs, for example.
h = fs.ReadAttrFile(path.Join(target.OutDir(), target.Label.Name))
if h == nil {
return ruleHashes{}
}
}
}
if postBuild {
return ruleHashes{
rule: h[hashLength : 2*hashLength],
config: h[2*hashLength : 3*hashLength],
source: h[3*hashLength : 4*hashLength],
secret: h[4*hashLength : fullHashLength],
postBuildHash: true,
}
}
return ruleHashes{
rule: h[0:hashLength],
config: h[2*hashLength : 3*hashLength],
source: h[3*hashLength : 4*hashLength],
secret: h[4*hashLength : fullHashLength],
}
}
// writeRuleHash attaches the rule hash to the file to its outputs using xattrs.
func writeRuleHash(state *core.BuildState, target *core.BuildTarget) error {
hash, err := targetHash(state, target)
if err != nil {
return err
}
secretHash, err := secretHash(state, target)
if err != nil {
return err
}
hash = append(hash, secretHash...)
outputs := target.FullOutputs()
if len(outputs) == 0 {
// Target has no outputs, have to use the fallback file.
return fs.RecordAttrFile(path.Join(target.OutDir(), target.Label.Name), hash)
}
for _, output := range outputs {
if err := fs.RecordAttr(output, hash, xattrName, state.XattrsSupported); err != nil {
return err
}
}
if fs.FileExists(targetBuildMetadataFileName(target)) {
return fs.RecordAttr(targetBuildMetadataFileName(target), hash, xattrName, state.XattrsSupported)
}
return nil
}
func targetBuildMetadataFileName(target *core.BuildTarget) string {
return path.Join(target.OutDir(), target.TargetBuildMetadataFileName())
}
// loadTargetMetadata retrieves the target metadata from a file in the output directory of this target
func loadTargetMetadata(target *core.BuildTarget) (*core.BuildMetadata, error) {
file, err := os.Open(targetBuildMetadataFileName(target))
if err != nil {
return nil, err
}
defer file.Close()
md := new(core.BuildMetadata)
reader := gob.NewDecoder(file)
if err := reader.Decode(&md); err != nil {
return nil, err
}
return md, nil
}
// StoreTargetMetadata stores the target metadata into a file in the output directory of the target.
func StoreTargetMetadata(target *core.BuildTarget, md *core.BuildMetadata) error {
filename := targetBuildMetadataFileName(target)
if err := os.RemoveAll(filename); err != nil {
return fmt.Errorf("failed to remove existing %s build metadata file: %w", target.Label, err)
} else if err := os.MkdirAll(path.Dir(filename), core.DirPermissions); err != nil {
return fmt.Errorf("Failed to create directory for build metadata file for %s: %w", target, err)
}
mdFile, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create new %s build metadata file: %w", target.Label, err)
}
defer mdFile.Close()
writer := gob.NewEncoder(mdFile)
if err := writer.Encode(md); err != nil {
return fmt.Errorf("failed to encode %s build metadata file: %w", target.Label, err)
}
return nil
}
// targetHash returns the hash for a target and any error encountered while calculating it.
func targetHash(state *core.BuildState, target *core.BuildTarget) ([]byte, error) {
hash := append(RuleHash(state, target, false, false), RuleHash(state, target, false, true)...)
hash = append(hash, state.Hashes.Config...)
hash2, err := sourceHash(state, target)
if err != nil {
return nil, err
}
return append(hash, hash2...), nil
}
// mustTargetHash returns the hash for a target and panics if it can't be calculated.
func mustTargetHash(state *core.BuildState, target *core.BuildTarget) []byte {
hash, err := targetHash(state, target)
if err != nil {
panic(err)
}
return hash
}
// mustShortTargetHash returns the hash for a target, shortened to 1/4 length.
func mustShortTargetHash(state *core.BuildState, target *core.BuildTarget) []byte {
return core.CollapseHash(mustTargetHash(state, target))
}
// RuntimeHash returns the target hash, config hash & runtime file hash,
// all rolled into one. Essentially this is one hash needed to determine if the runtime
// state is consistent.
func RuntimeHash(state *core.BuildState, target *core.BuildTarget, testRun int) ([]byte, error) {
hash := append(RuleHash(state, target, true, false), RuleHash(state, target, true, true)...)
hash = append(hash, state.Hashes.Config...)
h := sha1.New()
for source := range core.IterRuntimeFiles(state.Graph, target, true, target.TestDir(testRun)) {
result, err := state.PathHasher.Hash(source.Src, false, true)
if err != nil {
return result, err
}
h.Write(result)
}
return append(hash, h.Sum(nil)...), nil
}
// PrintHashes prints the various hashes for a target to stdout.
// It's used by plz hash --detailed to show a breakdown of the input hashes of a target.
func PrintHashes(state *core.BuildState, target *core.BuildTarget) {
if state.RemoteClient != nil && !target.Local {
state.RemoteClient.PrintHashes(target, false)
return
}
fmt.Printf("%s:\n", target.Label)
fmt.Printf(" Config: %s\n", b64(state.Hashes.Config))
fmt.Printf(" Rule: %s (pre-build)\n", b64(RuleHash(state, target, false, false)))
fmt.Printf(" Rule: %s (post-build)\n", b64(RuleHash(state, target, false, true)))
fmt.Printf(" Source: %s\n", b64(mustSourceHash(state, target)))
// Note that the logic here mimics sourceHash, but I don't want to pollute that with
// optional printing nonsense since it's on our hot path.
for source := range core.IterSources(state.Graph, target, false) {
fmt.Printf(" Source: %s: %s\n", source.Src, b64(state.PathHasher.MustHash(source.Src)))
}
for _, tool := range target.AllTools() {
if label, ok := tool.Label(); ok {
fmt.Printf(" Tool: %s: %s\n", label, b64(mustShortTargetHash(state, state.Graph.TargetOrDie(label))))
} else {
fmt.Printf(" Tool: %s: %s\n", tool, b64(state.PathHasher.MustHash(tool.FullPaths(state.Graph)[0])))
}
}
}
// secretHash calculates a hash for any secrets of a target.
func secretHash(state *core.BuildState, target *core.BuildTarget) ([]byte, error) {
if len(target.Secrets) == 0 {
return noSecrets, nil
}
h := sha1.New()
for _, secret := range target.Secrets {
ph, err := state.PathHasher.Hash(secret, false, false)
if err != nil && os.IsNotExist(err) {
return noSecrets, nil // Not having the secrets is not an error yet.
} else if err != nil {
return nil, err
}
h.Write(ph)
}
return h.Sum(nil), nil
}
| 1 | 10,047 | Pretty sure these should only contribute to the runtime hash. | thought-machine-please | go |
@@ -1,4 +1,4 @@
-<div id="ncr-layout">
+<div id="gsa18f-layout">
<table class="w-container main-container" width='800'>
<tr>
<td> | 1 | <div id="ncr-layout">
<table class="w-container main-container" width='800'>
<tr>
<td>
<h1 class="communicart_header"><%= title %>: <%= cart.proposal.name %></h1>
<div class="communicart_description">
<p>
Requested by:
<strong><%= cart.requester.full_name %></strong>
</p>
</div>
</td>
</tr>
<%= render partial: "shared/email_status" %>
<tr>
<td>
<%= render partial: 'shared/proposal_properties', locals: {proposal: cart.proposal} %>
</td>
</tr>
</table>
</div>
| 1 | 12,755 | Just verifying: this change doesn't break the 18f layout, right? | 18F-C2 | rb |
@@ -120,14 +120,6 @@ func (c call) endStats(elapsed time.Duration, err error, isApplicationError bool
counter.Inc()
}
return
- case yarpcerrors.CodeOK:
- // If we got "CodeOK" it really means that this is not a yarpcError, in
- // which case this is another level of "unknown" error.
- c.edge.serverErrLatencies.Observe(elapsed)
- if counter, err := c.edge.serverFailures.Get("unknown_internal_yarpc"); err == nil {
- counter.Inc()
- }
- return
}
// If this code is executed we've hit an error code outside the usual error
// code range, so we'll just log the string representation of that code. | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package observability
import (
"context"
"time"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/yarpcerrors"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// A call represents a single RPC along an edge.
//
// To prevent allocating on the heap on the request path, it's a value instead
// of a pointer.
type call struct {
edge *edge
extract ContextExtractor
fields [5]zapcore.Field
started time.Time
ctx context.Context
req *transport.Request
rpcType transport.Type
inbound bool
}
func (c call) End(err error, isApplicationError bool) {
elapsed := _timeNow().Sub(c.started)
c.endLogs(elapsed, err, isApplicationError)
c.endStats(elapsed, err, isApplicationError)
}
func (c call) endLogs(elapsed time.Duration, err error, isApplicationError bool) {
msg := "Handled inbound request."
if !c.inbound {
msg = "Made outbound call."
}
ce := c.edge.logger.Check(zap.DebugLevel, msg)
if ce == nil {
return
}
fields := c.fields[:0]
fields = append(fields, zap.String("rpcType", c.rpcType.String()))
fields = append(fields, zap.Duration("latency", elapsed))
fields = append(fields, zap.Bool("successful", err == nil && !isApplicationError))
fields = append(fields, c.extract(c.ctx))
if isApplicationError {
fields = append(fields, zap.String("error", "application_error"))
} else {
fields = append(fields, zap.Error(err))
}
ce.Write(fields...)
}
func (c call) endStats(elapsed time.Duration, err error, isApplicationError bool) {
// TODO: We need a much better way to distinguish between caller and server
// errors. See T855583.
c.edge.calls.Inc()
if err == nil && !isApplicationError {
c.edge.successes.Inc()
c.edge.latencies.Observe(elapsed)
return
}
// For now, assume that all application errors are the caller's fault.
if isApplicationError {
c.edge.callerErrLatencies.Observe(elapsed)
if counter, err := c.edge.callerFailures.Get("application_error"); err == nil {
counter.Inc()
}
return
}
errCode := yarpcerrors.ErrorCode(err)
switch errCode {
case yarpcerrors.CodeCancelled,
yarpcerrors.CodeInvalidArgument,
yarpcerrors.CodeNotFound,
yarpcerrors.CodeAlreadyExists,
yarpcerrors.CodePermissionDenied,
yarpcerrors.CodeFailedPrecondition,
yarpcerrors.CodeAborted,
yarpcerrors.CodeOutOfRange,
yarpcerrors.CodeUnimplemented,
yarpcerrors.CodeUnauthenticated:
c.edge.callerErrLatencies.Observe(elapsed)
if counter, err := c.edge.callerFailures.Get(errCode.String()); err == nil {
counter.Inc()
}
return
case yarpcerrors.CodeUnknown,
yarpcerrors.CodeDeadlineExceeded,
yarpcerrors.CodeResourceExhausted,
yarpcerrors.CodeInternal,
yarpcerrors.CodeUnavailable,
yarpcerrors.CodeDataLoss:
c.edge.serverErrLatencies.Observe(elapsed)
if counter, err := c.edge.serverFailures.Get(errCode.String()); err == nil {
counter.Inc()
}
return
case yarpcerrors.CodeOK:
// If we got "CodeOK" it really means that this is not a yarpcError, in
// which case this is another level of "unknown" error.
c.edge.serverErrLatencies.Observe(elapsed)
if counter, err := c.edge.serverFailures.Get("unknown_internal_yarpc"); err == nil {
counter.Inc()
}
return
}
// If this code is executed we've hit an error code outside the usual error
// code range, so we'll just log the string representation of that code.
c.edge.serverErrLatencies.Observe(elapsed)
if counter, err := c.edge.serverFailures.Get(errCode.String()); err == nil {
counter.Inc()
}
}
| 1 | 15,268 | we should keep this counter using the "isYarpcError" api | yarpc-yarpc-go | go |
@@ -28,16 +28,16 @@ import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.relocated.com.google.common.base.Objects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
-import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.StructType;
+import org.apache.iceberg.util.CaseInsensitiveMap;
public class GenericRecord implements Record, StructLike {
private static final LoadingCache<StructType, Map<String, Integer>> NAME_MAP_CACHE =
Caffeine.newBuilder()
.weakKeys()
.build(struct -> {
- Map<String, Integer> idToPos = Maps.newHashMap();
+ Map<String, Integer> idToPos = new CaseInsensitiveMap<>();
List<Types.NestedField> fields = struct.fields();
for (int i = 0; i < fields.size(); i += 1) {
idToPos.put(fields.get(i).name(), i); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.relocated.com.google.common.base.Objects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.StructType;
public class GenericRecord implements Record, StructLike {
private static final LoadingCache<StructType, Map<String, Integer>> NAME_MAP_CACHE =
Caffeine.newBuilder()
.weakKeys()
.build(struct -> {
Map<String, Integer> idToPos = Maps.newHashMap();
List<Types.NestedField> fields = struct.fields();
for (int i = 0; i < fields.size(); i += 1) {
idToPos.put(fields.get(i).name(), i);
}
return idToPos;
});
public static GenericRecord create(Schema schema) {
return new GenericRecord(schema.asStruct());
}
public static GenericRecord create(StructType struct) {
return new GenericRecord(struct);
}
private final StructType struct;
private final int size;
private final Object[] values;
private final Map<String, Integer> nameToPos;
private GenericRecord(StructType struct) {
this.struct = struct;
this.size = struct.fields().size();
this.values = new Object[size];
this.nameToPos = NAME_MAP_CACHE.get(struct);
}
private GenericRecord(GenericRecord toCopy) {
this.struct = toCopy.struct;
this.size = toCopy.size;
this.values = Arrays.copyOf(toCopy.values, toCopy.values.length);
this.nameToPos = toCopy.nameToPos;
}
private GenericRecord(GenericRecord toCopy, Map<String, Object> overwrite) {
this.struct = toCopy.struct;
this.size = toCopy.size;
this.values = Arrays.copyOf(toCopy.values, toCopy.values.length);
this.nameToPos = toCopy.nameToPos;
for (Map.Entry<String, Object> entry : overwrite.entrySet()) {
setField(entry.getKey(), entry.getValue());
}
}
@Override
public StructType struct() {
return struct;
}
@Override
public Object getField(String name) {
Integer pos = nameToPos.get(name);
if (pos != null) {
return values[pos];
}
return null;
}
@Override
public void setField(String name, Object value) {
Integer pos = nameToPos.get(name);
Preconditions.checkArgument(pos != null, "Cannot set unknown field named: %s", name);
values[pos] = value;
}
@Override
public int size() {
return size;
}
@Override
public Object get(int pos) {
return values[pos];
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
Object value = get(pos);
if (value == null || javaClass.isInstance(value)) {
return javaClass.cast(value);
} else {
throw new IllegalStateException("Not an instance of " + javaClass.getName() + ": " + value);
}
}
@Override
public <T> void set(int pos, T value) {
values[pos] = value;
}
@Override
public GenericRecord copy() {
return new GenericRecord(this);
}
@Override
public GenericRecord copy(Map<String, Object> overwriteValues) {
return new GenericRecord(this, overwriteValues);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Record(");
for (int i = 0; i < values.length; i += 1) {
if (i != 0) {
sb.append(", ");
}
sb.append(values[i]);
}
sb.append(")");
return sb.toString();
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (!(other instanceof GenericRecord)) {
return false;
}
GenericRecord that = (GenericRecord) other;
return Arrays.deepEquals(this.values, that.values);
}
@Override
public int hashCode() {
return Objects.hashCode(values);
}
}
| 1 | 38,583 | Could we just use case insensitive strings as keys instead of creating a new map implementation? e.g. simply using a treemap with a comparator `Map<String, Integer> idToPos = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);` could work I think | apache-iceberg | java |
@@ -67,7 +67,14 @@ namespace OpenTelemetry.Context
/// <returns>The slot previously registered.</returns>
public static RuntimeContextSlot<T> GetSlot<T>(string name)
{
- return (RuntimeContextSlot<T>)Slots[name];
+ if (Slots.TryGetValue(name, out var slot) && slot is RuntimeContextSlot<T> expectedSlot)
+ {
+ return expectedSlot;
+ }
+ else
+ {
+ return null;
+ }
}
/* | 1 | // <copyright file="RuntimeContext.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Runtime.CompilerServices;
namespace OpenTelemetry.Context
{
/// <summary>
/// Generic runtime context management API.
/// </summary>
public static class RuntimeContext
{
private static readonly ConcurrentDictionary<string, object> Slots = new ConcurrentDictionary<string, object>();
/// <summary>
/// Gets or sets the actual context carrier implementation.
/// </summary>
#if !NET452
public static Type ContextSlotType { get; set; } = typeof(AsyncLocalRuntimeContextSlot<>);
#else
public static Type ContextSlotType { get; set; } = typeof(RemotingRuntimeContextSlot<>);
#endif
/// <summary>
/// Register a named context slot.
/// </summary>
/// <param name="name">The name of the context slot.</param>
/// <typeparam name="T">The type of the underlying value.</typeparam>
/// <returns>The slot registered.</returns>
public static RuntimeContextSlot<T> RegisterSlot<T>(string name)
{
lock (Slots)
{
if (Slots.ContainsKey(name))
{
throw new InvalidOperationException($"The context slot {name} is already registered.");
}
var type = ContextSlotType.MakeGenericType(typeof(T));
var ctor = type.GetConstructor(new Type[] { typeof(string) });
var slot = (RuntimeContextSlot<T>)ctor.Invoke(new object[] { name });
Slots[name] = slot;
return slot;
}
}
/// <summary>
/// Get a registered slot from a given name.
/// </summary>
/// <param name="name">The name of the context slot.</param>
/// <typeparam name="T">The type of the underlying value.</typeparam>
/// <returns>The slot previously registered.</returns>
public static RuntimeContextSlot<T> GetSlot<T>(string name)
{
return (RuntimeContextSlot<T>)Slots[name];
}
/*
public static void Apply(IDictionary<string, object> snapshot)
{
foreach (var entry in snapshot)
{
// TODO: revisit this part if we want Snapshot() to be used on critical paths
dynamic value = entry.Value;
SetValue(entry.Key, value);
}
}
public static IDictionary<string, object> Snapshot()
{
var retval = new Dictionary<string, object>();
foreach (var entry in Slots)
{
// TODO: revisit this part if we want Snapshot() to be used on critical paths
dynamic slot = entry.Value;
retval[entry.Key] = slot.Get();
}
return retval;
}
*/
/// <summary>
/// Sets the value to a registered slot.
/// </summary>
/// <param name="name">The name of the context slot.</param>
/// <param name="value">The value to be set.</param>
/// <typeparam name="T">The type of the value.</typeparam>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static void SetValue<T>(string name, T value)
{
var slot = (RuntimeContextSlot<T>)Slots[name];
slot.Set(value);
}
/// <summary>
/// Gets the value from a registered slot.
/// </summary>
/// <param name="name">The name of the context slot.</param>
/// <typeparam name="T">The type of the value.</typeparam>
/// <returns>The value retrieved from the context slot.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static T GetValue<T>(string name)
{
var slot = (RuntimeContextSlot<T>)Slots[name];
return slot.Get();
}
// For testing purpose
// private static Clear
}
}
| 1 | 16,328 | I think this should be a throw. Or the method should be TryGetSlot? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -3991,7 +3991,15 @@ int LuaScriptInterface::luaGameLoadMap(lua_State* L)
{
// Game.loadMap(path)
const std::string& path = getString(L, 1);
- g_dispatcher.addTask(createTask(std::bind(&Game::loadMap, &g_game, path)));
+ g_dispatcher.addTask(createTask( [path]() {
+ try {
+ g_game.loadMap(path);
+ } catch (const std::exception& e) {
+ // FIXME: Should only catch some exceptions
+ std::cout << "[Error - LuaScriptInterface::luaGameLoadMap] Failed to load map: "
+ << e.what() << std::endl;
+ }
+ }));
return 0;
}
| 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2017 Mark Samman <mark.samman@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <boost/range/adaptor/reversed.hpp>
#include "luascript.h"
#include "chat.h"
#include "player.h"
#include "game.h"
#include "protocolstatus.h"
#include "spells.h"
#include "iologindata.h"
#include "configmanager.h"
#include "teleport.h"
#include "databasemanager.h"
#include "bed.h"
#include "monster.h"
#include "scheduler.h"
#include "databasetasks.h"
extern Chat* g_chat;
extern Game g_game;
extern Monsters g_monsters;
extern ConfigManager g_config;
extern Vocations g_vocations;
extern Spells* g_spells;
ScriptEnvironment::DBResultMap ScriptEnvironment::tempResults;
uint32_t ScriptEnvironment::lastResultId = 0;
std::multimap<ScriptEnvironment*, Item*> ScriptEnvironment::tempItems;
LuaEnvironment g_luaEnvironment;
ScriptEnvironment::ScriptEnvironment()
{
resetEnv();
}
ScriptEnvironment::~ScriptEnvironment()
{
resetEnv();
}
void ScriptEnvironment::resetEnv()
{
scriptId = 0;
callbackId = 0;
timerEvent = false;
interface = nullptr;
localMap.clear();
tempResults.clear();
auto pair = tempItems.equal_range(this);
auto it = pair.first;
while (it != pair.second) {
Item* item = it->second;
if (item->getParent() == VirtualCylinder::virtualCylinder) {
g_game.ReleaseItem(item);
}
it = tempItems.erase(it);
}
}
bool ScriptEnvironment::setCallbackId(int32_t callbackId, LuaScriptInterface* scriptInterface)
{
if (this->callbackId != 0) {
//nested callbacks are not allowed
if (interface) {
interface->reportErrorFunc("Nested callbacks!");
}
return false;
}
this->callbackId = callbackId;
interface = scriptInterface;
return true;
}
void ScriptEnvironment::getEventInfo(int32_t& scriptId, LuaScriptInterface*& scriptInterface, int32_t& callbackId, bool& timerEvent) const
{
scriptId = this->scriptId;
scriptInterface = interface;
callbackId = this->callbackId;
timerEvent = this->timerEvent;
}
uint32_t ScriptEnvironment::addThing(Thing* thing)
{
if (!thing || thing->isRemoved()) {
return 0;
}
Creature* creature = thing->getCreature();
if (creature) {
return creature->getID();
}
Item* item = thing->getItem();
if (item && item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
return item->getUniqueId();
}
for (const auto& it : localMap) {
if (it.second == item) {
return it.first;
}
}
localMap[++lastUID] = item;
return lastUID;
}
void ScriptEnvironment::insertItem(uint32_t uid, Item* item)
{
auto result = localMap.emplace(uid, item);
if (!result.second) {
std::cout << std::endl << "Lua Script Error: Thing uid already taken.";
}
}
Thing* ScriptEnvironment::getThingByUID(uint32_t uid)
{
if (uid >= 0x10000000) {
return g_game.getCreatureByID(uid);
}
if (uid <= std::numeric_limits<uint16_t>::max()) {
Item* item = g_game.getUniqueItem(uid);
if (item && !item->isRemoved()) {
return item;
}
return nullptr;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
Item* item = it->second;
if (!item->isRemoved()) {
return item;
}
}
return nullptr;
}
Item* ScriptEnvironment::getItemByUID(uint32_t uid)
{
Thing* thing = getThingByUID(uid);
if (!thing) {
return nullptr;
}
return thing->getItem();
}
Container* ScriptEnvironment::getContainerByUID(uint32_t uid)
{
Item* item = getItemByUID(uid);
if (!item) {
return nullptr;
}
return item->getContainer();
}
void ScriptEnvironment::removeItemByUID(uint32_t uid)
{
if (uid <= std::numeric_limits<uint16_t>::max()) {
g_game.removeUniqueItem(uid);
return;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
localMap.erase(it);
}
}
void ScriptEnvironment::addTempItem(Item* item)
{
tempItems.emplace(this, item);
}
void ScriptEnvironment::removeTempItem(Item* item)
{
for (auto it = tempItems.begin(), end = tempItems.end(); it != end; ++it) {
if (it->second == item) {
tempItems.erase(it);
break;
}
}
}
uint32_t ScriptEnvironment::addResult(DBResult_ptr res)
{
tempResults[++lastResultId] = res;
return lastResultId;
}
bool ScriptEnvironment::removeResult(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return false;
}
tempResults.erase(it);
return true;
}
DBResult_ptr ScriptEnvironment::getResultByID(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return nullptr;
}
return it->second;
}
std::string LuaScriptInterface::getErrorDesc(ErrorCode_t code)
{
switch (code) {
case LUA_ERROR_PLAYER_NOT_FOUND: return "Player not found";
case LUA_ERROR_CREATURE_NOT_FOUND: return "Creature not found";
case LUA_ERROR_ITEM_NOT_FOUND: return "Item not found";
case LUA_ERROR_THING_NOT_FOUND: return "Thing not found";
case LUA_ERROR_TILE_NOT_FOUND: return "Tile not found";
case LUA_ERROR_HOUSE_NOT_FOUND: return "House not found";
case LUA_ERROR_COMBAT_NOT_FOUND: return "Combat not found";
case LUA_ERROR_CONDITION_NOT_FOUND: return "Condition not found";
case LUA_ERROR_AREA_NOT_FOUND: return "Area not found";
case LUA_ERROR_CONTAINER_NOT_FOUND: return "Container not found";
case LUA_ERROR_VARIANT_NOT_FOUND: return "Variant not found";
case LUA_ERROR_VARIANT_UNKNOWN: return "Unknown variant type";
case LUA_ERROR_SPELL_NOT_FOUND: return "Spell not found";
default: return "Bad error code";
}
}
ScriptEnvironment LuaScriptInterface::scriptEnv[16];
int32_t LuaScriptInterface::scriptEnvIndex = -1;
LuaScriptInterface::LuaScriptInterface(std::string interfaceName) : interfaceName(std::move(interfaceName))
{
if (!g_luaEnvironment.getLuaState()) {
g_luaEnvironment.initState();
}
}
LuaScriptInterface::~LuaScriptInterface()
{
closeState();
}
bool LuaScriptInterface::reInitState()
{
g_luaEnvironment.clearCombatObjects(this);
g_luaEnvironment.clearAreaObjects(this);
closeState();
return initState();
}
/// Same as lua_pcall, but adds stack trace to error strings in called function.
int LuaScriptInterface::protectedCall(lua_State* L, int nargs, int nresults)
{
int error_index = lua_gettop(L) - nargs;
lua_pushcfunction(L, luaErrorHandler);
lua_insert(L, error_index);
int ret = lua_pcall(L, nargs, nresults, error_index);
lua_remove(L, error_index);
return ret;
}
int32_t LuaScriptInterface::loadFile(const std::string& file, Npc* npc /* = nullptr*/)
{
//loads file as a chunk at stack top
int ret = luaL_loadfile(luaState, file.c_str());
if (ret != 0) {
lastLuaError = popString(luaState);
return -1;
}
//check that it is loaded as a function
if (!isFunction(luaState, -1)) {
return -1;
}
loadingFile = file;
if (!reserveScriptEnv()) {
return -1;
}
ScriptEnvironment* env = getScriptEnv();
env->setScriptId(EVENT_ID_LOADING, this);
env->setNpc(npc);
//execute it
ret = protectedCall(luaState, 0, 0);
if (ret != 0) {
reportError(nullptr, popString(luaState));
resetScriptEnv();
return -1;
}
resetScriptEnv();
return 0;
}
int32_t LuaScriptInterface::getEvent(const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 2);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -3, runningEventId);
lua_pop(luaState, 2);
//reset global value of this event
lua_pushnil(luaState);
lua_setglobal(luaState, eventName.c_str());
cacheFiles[runningEventId] = loadingFile + ":" + eventName;
return runningEventId++;
}
int32_t LuaScriptInterface::getMetaEvent(const std::string& globalName, const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, globalName.c_str());
lua_getfield(luaState, -1, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 3);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -4, runningEventId);
lua_pop(luaState, 1);
//reset global value of this event
lua_pushnil(luaState);
lua_setfield(luaState, -2, eventName.c_str());
lua_pop(luaState, 2);
cacheFiles[runningEventId] = loadingFile + ":" + globalName + "@" + eventName;
return runningEventId++;
}
const std::string& LuaScriptInterface::getFileById(int32_t scriptId)
{
if (scriptId == EVENT_ID_LOADING) {
return loadingFile;
}
auto it = cacheFiles.find(scriptId);
if (it == cacheFiles.end()) {
static const std::string& unk = "(Unknown scriptfile)";
return unk;
}
return it->second;
}
std::string LuaScriptInterface::getStackTrace(const std::string& error_desc)
{
lua_getglobal(luaState, "debug");
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return error_desc;
}
lua_getfield(luaState, -1, "traceback");
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 2);
return error_desc;
}
lua_replace(luaState, -2);
pushString(luaState, error_desc);
lua_call(luaState, 1, 1);
return popString(luaState);
}
void LuaScriptInterface::reportError(const char* function, const std::string& error_desc, bool stack_trace/* = false*/)
{
int32_t scriptId;
int32_t callbackId;
bool timerEvent;
LuaScriptInterface* scriptInterface;
getScriptEnv()->getEventInfo(scriptId, scriptInterface, callbackId, timerEvent);
std::cout << std::endl << "Lua Script Error: ";
if (scriptInterface) {
std::cout << '[' << scriptInterface->getInterfaceName() << "] " << std::endl;
if (timerEvent) {
std::cout << "in a timer event called from: " << std::endl;
}
if (callbackId) {
std::cout << "in callback: " << scriptInterface->getFileById(callbackId) << std::endl;
}
std::cout << scriptInterface->getFileById(scriptId) << std::endl;
}
if (function) {
std::cout << function << "(). ";
}
if (stack_trace && scriptInterface) {
std::cout << scriptInterface->getStackTrace(error_desc) << std::endl;
} else {
std::cout << error_desc << std::endl;
}
}
bool LuaScriptInterface::pushFunction(int32_t functionId)
{
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
return false;
}
lua_rawgeti(luaState, -1, functionId);
lua_replace(luaState, -2);
return isFunction(luaState, -1);
}
bool LuaScriptInterface::initState()
{
luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return false;
}
lua_newtable(luaState);
eventTableRef = luaL_ref(luaState, LUA_REGISTRYINDEX);
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaScriptInterface::closeState()
{
if (!g_luaEnvironment.getLuaState() || !luaState) {
return false;
}
cacheFiles.clear();
if (eventTableRef != -1) {
luaL_unref(luaState, LUA_REGISTRYINDEX, eventTableRef);
eventTableRef = -1;
}
luaState = nullptr;
return true;
}
int LuaScriptInterface::luaErrorHandler(lua_State* L)
{
const std::string& errorMessage = popString(L);
auto interface = getScriptEnv()->getScriptInterface();
assert(interface); //This fires if the ScriptEnvironment hasn't been setup
pushString(L, interface->getStackTrace(errorMessage));
return 1;
}
bool LuaScriptInterface::callFunction(int params)
{
bool result = false;
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 1) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::getString(luaState, -1));
} else {
result = LuaScriptInterface::getBoolean(luaState, -1);
}
lua_pop(luaState, 1);
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
return result;
}
void LuaScriptInterface::callVoidFunction(int params)
{
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 0) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::popString(luaState));
}
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
}
void LuaScriptInterface::pushVariant(lua_State* L, const LuaVariant& var)
{
lua_createtable(L, 0, 2);
setField(L, "type", var.type);
switch (var.type) {
case VARIANT_NUMBER:
setField(L, "number", var.number);
break;
case VARIANT_STRING:
setField(L, "string", var.text);
break;
case VARIANT_TARGETPOSITION:
case VARIANT_POSITION: {
pushPosition(L, var.pos);
lua_setfield(L, -2, "pos");
break;
}
default:
break;
}
setMetatable(L, -1, "Variant");
}
void LuaScriptInterface::pushThing(lua_State* L, Thing* thing)
{
if (!thing) {
lua_createtable(L, 0, 4);
setField(L, "uid", 0);
setField(L, "itemid", 0);
setField(L, "actionid", 0);
setField(L, "type", 0);
return;
}
if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushCylinder(lua_State* L, Cylinder* cylinder)
{
if (Creature* creature = cylinder->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* parentItem = cylinder->getItem()) {
pushUserdata<Item>(L, parentItem);
setItemMetatable(L, -1, parentItem);
} else if (Tile* tile = cylinder->getTile()) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else if (cylinder == VirtualCylinder::virtualCylinder) {
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushString(lua_State* L, const std::string& value)
{
lua_pushlstring(L, value.c_str(), value.length());
}
void LuaScriptInterface::pushCallback(lua_State* L, int32_t callback)
{
lua_rawgeti(L, LUA_REGISTRYINDEX, callback);
}
std::string LuaScriptInterface::popString(lua_State* L)
{
if (lua_gettop(L) == 0) {
return std::string();
}
std::string str(getString(L, -1));
lua_pop(L, 1);
return str;
}
int32_t LuaScriptInterface::popCallback(lua_State* L)
{
return luaL_ref(L, LUA_REGISTRYINDEX);
}
// Metatables
void LuaScriptInterface::setMetatable(lua_State* L, int32_t index, const std::string& name)
{
luaL_getmetatable(L, name.c_str());
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setWeakMetatable(lua_State* L, int32_t index, const std::string& name)
{
static std::set<std::string> weakObjectTypes;
const std::string& weakName = name + "_weak";
auto result = weakObjectTypes.emplace(name);
if (result.second) {
luaL_getmetatable(L, name.c_str());
int childMetatable = lua_gettop(L);
luaL_newmetatable(L, weakName.c_str());
int metatable = lua_gettop(L);
static const std::vector<std::string> methodKeys = {"__index", "__metatable", "__eq"};
for (const std::string& metaKey : methodKeys) {
lua_getfield(L, childMetatable, metaKey.c_str());
lua_setfield(L, metatable, metaKey.c_str());
}
static const std::vector<int> methodIndexes = {'h', 'p', 't'};
for (int metaIndex : methodIndexes) {
lua_rawgeti(L, childMetatable, metaIndex);
lua_rawseti(L, metatable, metaIndex);
}
lua_pushnil(L);
lua_setfield(L, metatable, "__gc");
lua_remove(L, childMetatable);
} else {
luaL_getmetatable(L, weakName.c_str());
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setItemMetatable(lua_State* L, int32_t index, const Item* item)
{
if (item->getContainer()) {
luaL_getmetatable(L, "Container");
} else if (item->getTeleport()) {
luaL_getmetatable(L, "Teleport");
} else {
luaL_getmetatable(L, "Item");
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setCreatureMetatable(lua_State* L, int32_t index, const Creature* creature)
{
if (creature->getPlayer()) {
luaL_getmetatable(L, "Player");
} else if (creature->getMonster()) {
luaL_getmetatable(L, "Monster");
} else {
luaL_getmetatable(L, "Npc");
}
lua_setmetatable(L, index - 1);
}
// Get
CombatDamage LuaScriptInterface::getCombatDamage(lua_State* L)
{
CombatDamage damage;
damage.primary.value = getNumber<int32_t>(L, -4);
damage.primary.type = getNumber<CombatType_t>(L, -3);
damage.secondary.value = getNumber<int32_t>(L, -2);
damage.secondary.type = getNumber<CombatType_t>(L, -1);
lua_pop(L, 4);
return damage;
}
std::string LuaScriptInterface::getString(lua_State* L, int32_t arg)
{
size_t len;
const char* c_str = lua_tolstring(L, arg, &len);
if (!c_str || len == 0) {
return std::string();
}
return std::string(c_str, len);
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg, int32_t& stackpos)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_getfield(L, arg, "stackpos");
if (lua_isnil(L, -1) == 1) {
stackpos = 0;
} else {
stackpos = getNumber<int32_t>(L, -1);
}
lua_pop(L, 4);
return position;
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_pop(L, 3);
return position;
}
Outfit_t LuaScriptInterface::getOutfit(lua_State* L, int32_t arg)
{
Outfit_t outfit;
outfit.lookMount = getField<uint16_t>(L, arg, "lookMount");
outfit.lookAddons = getField<uint8_t>(L, arg, "lookAddons");
outfit.lookFeet = getField<uint8_t>(L, arg, "lookFeet");
outfit.lookLegs = getField<uint8_t>(L, arg, "lookLegs");
outfit.lookBody = getField<uint8_t>(L, arg, "lookBody");
outfit.lookHead = getField<uint8_t>(L, arg, "lookHead");
outfit.lookTypeEx = getField<uint16_t>(L, arg, "lookTypeEx");
outfit.lookType = getField<uint16_t>(L, arg, "lookType");
lua_pop(L, 8);
return outfit;
}
LuaVariant LuaScriptInterface::getVariant(lua_State* L, int32_t arg)
{
LuaVariant var;
switch (var.type = getField<LuaVariantType_t>(L, arg, "type")) {
case VARIANT_NUMBER: {
var.number = getField<uint32_t>(L, arg, "number");
lua_pop(L, 2);
break;
}
case VARIANT_STRING: {
var.text = getFieldString(L, arg, "string");
lua_pop(L, 2);
break;
}
case VARIANT_POSITION:
case VARIANT_TARGETPOSITION: {
lua_getfield(L, arg, "pos");
var.pos = getPosition(L, lua_gettop(L));
lua_pop(L, 2);
break;
}
default: {
var.type = VARIANT_NONE;
lua_pop(L, 1);
break;
}
}
return var;
}
Thing* LuaScriptInterface::getThing(lua_State* L, int32_t arg)
{
Thing* thing;
if (lua_getmetatable(L, arg) != 0) {
lua_rawgeti(L, -1, 't');
switch(getNumber<uint32_t>(L, -1)) {
case LuaData_Item:
thing = getUserdata<Item>(L, arg);
break;
case LuaData_Container:
thing = getUserdata<Container>(L, arg);
break;
case LuaData_Teleport:
thing = getUserdata<Teleport>(L, arg);
break;
case LuaData_Player:
thing = getUserdata<Player>(L, arg);
break;
case LuaData_Monster:
thing = getUserdata<Monster>(L, arg);
break;
case LuaData_Npc:
thing = getUserdata<Npc>(L, arg);
break;
default:
thing = nullptr;
break;
}
lua_pop(L, 2);
} else {
thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, arg));
}
return thing;
}
Creature* LuaScriptInterface::getCreature(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Creature>(L, arg);
}
return g_game.getCreatureByID(getNumber<uint32_t>(L, arg));
}
Player* LuaScriptInterface::getPlayer(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Player>(L, arg);
}
return g_game.getPlayerByID(getNumber<uint32_t>(L, arg));
}
std::string LuaScriptInterface::getFieldString(lua_State* L, int32_t arg, const std::string& key)
{
lua_getfield(L, arg, key.c_str());
return getString(L, -1);
}
LuaDataType LuaScriptInterface::getUserdataType(lua_State* L, int32_t arg)
{
if (lua_getmetatable(L, arg) == 0) {
return LuaData_Unknown;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
lua_pop(L, 2);
return type;
}
// Push
void LuaScriptInterface::pushBoolean(lua_State* L, bool value)
{
lua_pushboolean(L, value ? 1 : 0);
}
void LuaScriptInterface::pushCombatDamage(lua_State* L, const CombatDamage& damage)
{
lua_pushnumber(L, damage.primary.value);
lua_pushnumber(L, damage.primary.type);
lua_pushnumber(L, damage.secondary.value);
lua_pushnumber(L, damage.secondary.type);
lua_pushnumber(L, damage.origin);
}
void LuaScriptInterface::pushInstantSpell(lua_State* L, const InstantSpell& spell)
{
lua_createtable(L, 0, 6);
setField(L, "name", spell.getName());
setField(L, "words", spell.getWords());
setField(L, "level", spell.getLevel());
setField(L, "mlevel", spell.getMagicLevel());
setField(L, "mana", spell.getMana());
setField(L, "manapercent", spell.getManaPercent());
setMetatable(L, -1, "Spell");
}
void LuaScriptInterface::pushPosition(lua_State* L, const Position& position, int32_t stackpos/* = 0*/)
{
lua_createtable(L, 0, 4);
setField(L, "x", position.x);
setField(L, "y", position.y);
setField(L, "z", position.z);
setField(L, "stackpos", stackpos);
setMetatable(L, -1, "Position");
}
void LuaScriptInterface::pushOutfit(lua_State* L, const Outfit_t& outfit)
{
lua_createtable(L, 0, 8);
setField(L, "lookType", outfit.lookType);
setField(L, "lookTypeEx", outfit.lookTypeEx);
setField(L, "lookHead", outfit.lookHead);
setField(L, "lookBody", outfit.lookBody);
setField(L, "lookLegs", outfit.lookLegs);
setField(L, "lookFeet", outfit.lookFeet);
setField(L, "lookAddons", outfit.lookAddons);
setField(L, "lookMount", outfit.lookMount);
}
#define registerEnum(value) { std::string enumName = #value; registerGlobalVariable(enumName.substr(enumName.find_last_of(':') + 1), value); }
#define registerEnumIn(tableName, value) { std::string enumName = #value; registerVariable(tableName, enumName.substr(enumName.find_last_of(':') + 1), value); }
void LuaScriptInterface::registerFunctions()
{
//doPlayerAddItem(uid, itemid, <optional: default: 1> count/subtype)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
//Returns uid of the created item
lua_register(luaState, "doPlayerAddItem", LuaScriptInterface::luaDoPlayerAddItem);
//doTileAddItemEx(pos, uid)
lua_register(luaState, "doTileAddItemEx", LuaScriptInterface::luaDoTileAddItemEx);
//doSetCreatureLight(cid, lightLevel, lightColor, time)
lua_register(luaState, "doSetCreatureLight", LuaScriptInterface::luaDoSetCreatureLight);
//isValidUID(uid)
lua_register(luaState, "isValidUID", LuaScriptInterface::luaIsValidUID);
//isDepot(uid)
lua_register(luaState, "isDepot", LuaScriptInterface::luaIsDepot);
//isMovable(uid)
lua_register(luaState, "isMovable", LuaScriptInterface::luaIsMoveable);
//doAddContainerItem(uid, itemid, <optional> count/subtype)
lua_register(luaState, "doAddContainerItem", LuaScriptInterface::luaDoAddContainerItem);
//getDepotId(uid)
lua_register(luaState, "getDepotId", LuaScriptInterface::luaGetDepotId);
//getWorldTime()
lua_register(luaState, "getWorldTime", LuaScriptInterface::luaGetWorldTime);
//getWorldLight()
lua_register(luaState, "getWorldLight", LuaScriptInterface::luaGetWorldLight);
//getWorldUpTime()
lua_register(luaState, "getWorldUpTime", LuaScriptInterface::luaGetWorldUpTime);
//createCombatArea( {area}, <optional> {extArea} )
lua_register(luaState, "createCombatArea", LuaScriptInterface::luaCreateCombatArea);
//doAreaCombatHealth(cid, type, pos, area, min, max, effect)
lua_register(luaState, "doAreaCombatHealth", LuaScriptInterface::luaDoAreaCombatHealth);
//doTargetCombatHealth(cid, target, type, min, max, effect)
lua_register(luaState, "doTargetCombatHealth", LuaScriptInterface::luaDoTargetCombatHealth);
//doAreaCombatMana(cid, pos, area, min, max, effect)
lua_register(luaState, "doAreaCombatMana", LuaScriptInterface::luaDoAreaCombatMana);
//doTargetCombatMana(cid, target, min, max, effect)
lua_register(luaState, "doTargetCombatMana", LuaScriptInterface::luaDoTargetCombatMana);
//doAreaCombatCondition(cid, pos, area, condition, effect)
lua_register(luaState, "doAreaCombatCondition", LuaScriptInterface::luaDoAreaCombatCondition);
//doTargetCombatCondition(cid, target, condition, effect)
lua_register(luaState, "doTargetCombatCondition", LuaScriptInterface::luaDoTargetCombatCondition);
//doAreaCombatDispel(cid, pos, area, type, effect)
lua_register(luaState, "doAreaCombatDispel", LuaScriptInterface::luaDoAreaCombatDispel);
//doTargetCombatDispel(cid, target, type, effect)
lua_register(luaState, "doTargetCombatDispel", LuaScriptInterface::luaDoTargetCombatDispel);
//doChallengeCreature(cid, target)
lua_register(luaState, "doChallengeCreature", LuaScriptInterface::luaDoChallengeCreature);
//addEvent(callback, delay, ...)
lua_register(luaState, "addEvent", LuaScriptInterface::luaAddEvent);
//stopEvent(eventid)
lua_register(luaState, "stopEvent", LuaScriptInterface::luaStopEvent);
//saveServer()
lua_register(luaState, "saveServer", LuaScriptInterface::luaSaveServer);
//cleanMap()
lua_register(luaState, "cleanMap", LuaScriptInterface::luaCleanMap);
//debugPrint(text)
lua_register(luaState, "debugPrint", LuaScriptInterface::luaDebugPrint);
//isInWar(cid, target)
lua_register(luaState, "isInWar", LuaScriptInterface::luaIsInWar);
//getWaypointPosition(name)
lua_register(luaState, "getWaypointPositionByName", LuaScriptInterface::luaGetWaypointPositionByName);
//sendChannelMessage(channelId, type, message)
lua_register(luaState, "sendChannelMessage", LuaScriptInterface::luaSendChannelMessage);
//sendGuildChannelMessage(guildId, type, message)
lua_register(luaState, "sendGuildChannelMessage", LuaScriptInterface::luaSendGuildChannelMessage);
#ifndef LUAJIT_VERSION
//bit operations for Lua, based on bitlib project release 24
//bit.bnot, bit.band, bit.bor, bit.bxor, bit.lshift, bit.rshift
luaL_register(luaState, "bit", LuaScriptInterface::luaBitReg);
#endif
//configManager table
luaL_register(luaState, "configManager", LuaScriptInterface::luaConfigManagerTable);
//db table
luaL_register(luaState, "db", LuaScriptInterface::luaDatabaseTable);
//result table
luaL_register(luaState, "result", LuaScriptInterface::luaResultTable);
/* New functions */
//registerClass(className, baseClass, newFunction)
//registerTable(tableName)
//registerMethod(className, functionName, function)
//registerMetaMethod(className, functionName, function)
//registerGlobalMethod(functionName, function)
//registerVariable(tableName, name, value)
//registerGlobalVariable(name, value)
//registerEnum(value)
//registerEnumIn(tableName, value)
// Enums
registerEnum(ACCOUNT_TYPE_NORMAL)
registerEnum(ACCOUNT_TYPE_TUTOR)
registerEnum(ACCOUNT_TYPE_SENIORTUTOR)
registerEnum(ACCOUNT_TYPE_GAMEMASTER)
registerEnum(ACCOUNT_TYPE_GOD)
registerEnum(BUG_CATEGORY_MAP)
registerEnum(BUG_CATEGORY_TYPO)
registerEnum(BUG_CATEGORY_TECHNICAL)
registerEnum(BUG_CATEGORY_OTHER)
registerEnum(CALLBACK_PARAM_LEVELMAGICVALUE)
registerEnum(CALLBACK_PARAM_SKILLVALUE)
registerEnum(CALLBACK_PARAM_TARGETTILE)
registerEnum(CALLBACK_PARAM_TARGETCREATURE)
registerEnum(COMBAT_FORMULA_UNDEFINED)
registerEnum(COMBAT_FORMULA_LEVELMAGIC)
registerEnum(COMBAT_FORMULA_SKILL)
registerEnum(COMBAT_FORMULA_DAMAGE)
registerEnum(DIRECTION_NORTH)
registerEnum(DIRECTION_EAST)
registerEnum(DIRECTION_SOUTH)
registerEnum(DIRECTION_WEST)
registerEnum(DIRECTION_SOUTHWEST)
registerEnum(DIRECTION_SOUTHEAST)
registerEnum(DIRECTION_NORTHWEST)
registerEnum(DIRECTION_NORTHEAST)
registerEnum(COMBAT_NONE)
registerEnum(COMBAT_PHYSICALDAMAGE)
registerEnum(COMBAT_ENERGYDAMAGE)
registerEnum(COMBAT_EARTHDAMAGE)
registerEnum(COMBAT_FIREDAMAGE)
registerEnum(COMBAT_UNDEFINEDDAMAGE)
registerEnum(COMBAT_LIFEDRAIN)
registerEnum(COMBAT_MANADRAIN)
registerEnum(COMBAT_HEALING)
registerEnum(COMBAT_DROWNDAMAGE)
registerEnum(COMBAT_ICEDAMAGE)
registerEnum(COMBAT_HOLYDAMAGE)
registerEnum(COMBAT_DEATHDAMAGE)
registerEnum(COMBAT_PARAM_TYPE)
registerEnum(COMBAT_PARAM_EFFECT)
registerEnum(COMBAT_PARAM_DISTANCEEFFECT)
registerEnum(COMBAT_PARAM_BLOCKSHIELD)
registerEnum(COMBAT_PARAM_BLOCKARMOR)
registerEnum(COMBAT_PARAM_TARGETCASTERORTOPMOST)
registerEnum(COMBAT_PARAM_CREATEITEM)
registerEnum(COMBAT_PARAM_AGGRESSIVE)
registerEnum(COMBAT_PARAM_DISPEL)
registerEnum(COMBAT_PARAM_USECHARGES)
registerEnum(CONDITION_NONE)
registerEnum(CONDITION_POISON)
registerEnum(CONDITION_FIRE)
registerEnum(CONDITION_ENERGY)
registerEnum(CONDITION_BLEEDING)
registerEnum(CONDITION_HASTE)
registerEnum(CONDITION_PARALYZE)
registerEnum(CONDITION_OUTFIT)
registerEnum(CONDITION_INVISIBLE)
registerEnum(CONDITION_LIGHT)
registerEnum(CONDITION_MANASHIELD)
registerEnum(CONDITION_INFIGHT)
registerEnum(CONDITION_DRUNK)
registerEnum(CONDITION_EXHAUST_WEAPON)
registerEnum(CONDITION_REGENERATION)
registerEnum(CONDITION_SOUL)
registerEnum(CONDITION_DROWN)
registerEnum(CONDITION_MUTED)
registerEnum(CONDITION_CHANNELMUTEDTICKS)
registerEnum(CONDITION_YELLTICKS)
registerEnum(CONDITION_ATTRIBUTES)
registerEnum(CONDITION_FREEZING)
registerEnum(CONDITION_DAZZLED)
registerEnum(CONDITION_CURSED)
registerEnum(CONDITION_EXHAUST_COMBAT)
registerEnum(CONDITION_EXHAUST_HEAL)
registerEnum(CONDITION_PACIFIED)
registerEnum(CONDITION_SPELLCOOLDOWN)
registerEnum(CONDITION_SPELLGROUPCOOLDOWN)
registerEnum(CONDITIONID_DEFAULT)
registerEnum(CONDITIONID_COMBAT)
registerEnum(CONDITIONID_HEAD)
registerEnum(CONDITIONID_NECKLACE)
registerEnum(CONDITIONID_BACKPACK)
registerEnum(CONDITIONID_ARMOR)
registerEnum(CONDITIONID_RIGHT)
registerEnum(CONDITIONID_LEFT)
registerEnum(CONDITIONID_LEGS)
registerEnum(CONDITIONID_FEET)
registerEnum(CONDITIONID_RING)
registerEnum(CONDITIONID_AMMO)
registerEnum(CONDITION_PARAM_OWNER)
registerEnum(CONDITION_PARAM_TICKS)
registerEnum(CONDITION_PARAM_HEALTHGAIN)
registerEnum(CONDITION_PARAM_HEALTHTICKS)
registerEnum(CONDITION_PARAM_MANAGAIN)
registerEnum(CONDITION_PARAM_MANATICKS)
registerEnum(CONDITION_PARAM_DELAYED)
registerEnum(CONDITION_PARAM_SPEED)
registerEnum(CONDITION_PARAM_LIGHT_LEVEL)
registerEnum(CONDITION_PARAM_LIGHT_COLOR)
registerEnum(CONDITION_PARAM_SOULGAIN)
registerEnum(CONDITION_PARAM_SOULTICKS)
registerEnum(CONDITION_PARAM_MINVALUE)
registerEnum(CONDITION_PARAM_MAXVALUE)
registerEnum(CONDITION_PARAM_STARTVALUE)
registerEnum(CONDITION_PARAM_TICKINTERVAL)
registerEnum(CONDITION_PARAM_FORCEUPDATE)
registerEnum(CONDITION_PARAM_SKILL_MELEE)
registerEnum(CONDITION_PARAM_SKILL_FIST)
registerEnum(CONDITION_PARAM_SKILL_CLUB)
registerEnum(CONDITION_PARAM_SKILL_SWORD)
registerEnum(CONDITION_PARAM_SKILL_AXE)
registerEnum(CONDITION_PARAM_SKILL_DISTANCE)
registerEnum(CONDITION_PARAM_SKILL_SHIELD)
registerEnum(CONDITION_PARAM_SKILL_FISHING)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTSPERCENT)
registerEnum(CONDITION_PARAM_PERIODICDAMAGE)
registerEnum(CONDITION_PARAM_SKILL_MELEEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISTPERCENT)
registerEnum(CONDITION_PARAM_SKILL_CLUBPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SWORDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_AXEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_DISTANCEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SHIELDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISHINGPERCENT)
registerEnum(CONDITION_PARAM_BUFF_SPELL)
registerEnum(CONDITION_PARAM_SUBID)
registerEnum(CONDITION_PARAM_FIELD)
registerEnum(CONDITION_PARAM_DISABLE_DEFENSE)
registerEnum(CONST_ME_NONE)
registerEnum(CONST_ME_DRAWBLOOD)
registerEnum(CONST_ME_LOSEENERGY)
registerEnum(CONST_ME_POFF)
registerEnum(CONST_ME_BLOCKHIT)
registerEnum(CONST_ME_EXPLOSIONAREA)
registerEnum(CONST_ME_EXPLOSIONHIT)
registerEnum(CONST_ME_FIREAREA)
registerEnum(CONST_ME_YELLOW_RINGS)
registerEnum(CONST_ME_GREEN_RINGS)
registerEnum(CONST_ME_HITAREA)
registerEnum(CONST_ME_TELEPORT)
registerEnum(CONST_ME_ENERGYHIT)
registerEnum(CONST_ME_MAGIC_BLUE)
registerEnum(CONST_ME_MAGIC_RED)
registerEnum(CONST_ME_MAGIC_GREEN)
registerEnum(CONST_ME_HITBYFIRE)
registerEnum(CONST_ME_HITBYPOISON)
registerEnum(CONST_ME_MORTAREA)
registerEnum(CONST_ME_SOUND_GREEN)
registerEnum(CONST_ME_SOUND_RED)
registerEnum(CONST_ME_POISONAREA)
registerEnum(CONST_ME_SOUND_YELLOW)
registerEnum(CONST_ME_SOUND_PURPLE)
registerEnum(CONST_ME_SOUND_BLUE)
registerEnum(CONST_ME_SOUND_WHITE)
registerEnum(CONST_ME_BUBBLES)
registerEnum(CONST_ME_CRAPS)
registerEnum(CONST_ME_GIFT_WRAPS)
registerEnum(CONST_ME_FIREWORK_YELLOW)
registerEnum(CONST_ME_FIREWORK_RED)
registerEnum(CONST_ME_FIREWORK_BLUE)
registerEnum(CONST_ME_STUN)
registerEnum(CONST_ME_SLEEP)
registerEnum(CONST_ME_WATERCREATURE)
registerEnum(CONST_ME_GROUNDSHAKER)
registerEnum(CONST_ME_HEARTS)
registerEnum(CONST_ME_FIREATTACK)
registerEnum(CONST_ME_ENERGYAREA)
registerEnum(CONST_ME_SMALLCLOUDS)
registerEnum(CONST_ME_HOLYDAMAGE)
registerEnum(CONST_ME_BIGCLOUDS)
registerEnum(CONST_ME_ICEAREA)
registerEnum(CONST_ME_ICETORNADO)
registerEnum(CONST_ME_ICEATTACK)
registerEnum(CONST_ME_STONES)
registerEnum(CONST_ME_SMALLPLANTS)
registerEnum(CONST_ME_CARNIPHILA)
registerEnum(CONST_ME_PURPLEENERGY)
registerEnum(CONST_ME_YELLOWENERGY)
registerEnum(CONST_ME_HOLYAREA)
registerEnum(CONST_ME_BIGPLANTS)
registerEnum(CONST_ME_CAKE)
registerEnum(CONST_ME_GIANTICE)
registerEnum(CONST_ME_WATERSPLASH)
registerEnum(CONST_ME_PLANTATTACK)
registerEnum(CONST_ME_TUTORIALARROW)
registerEnum(CONST_ME_TUTORIALSQUARE)
registerEnum(CONST_ME_MIRRORHORIZONTAL)
registerEnum(CONST_ME_MIRRORVERTICAL)
registerEnum(CONST_ME_SKULLHORIZONTAL)
registerEnum(CONST_ME_SKULLVERTICAL)
registerEnum(CONST_ME_ASSASSIN)
registerEnum(CONST_ME_STEPSHORIZONTAL)
registerEnum(CONST_ME_BLOODYSTEPS)
registerEnum(CONST_ME_STEPSVERTICAL)
registerEnum(CONST_ME_YALAHARIGHOST)
registerEnum(CONST_ME_BATS)
registerEnum(CONST_ME_SMOKE)
registerEnum(CONST_ME_INSECTS)
registerEnum(CONST_ME_DRAGONHEAD)
registerEnum(CONST_ME_ORCSHAMAN)
registerEnum(CONST_ME_ORCSHAMAN_FIRE)
registerEnum(CONST_ME_THUNDER)
registerEnum(CONST_ME_FERUMBRAS)
registerEnum(CONST_ME_CONFETTI_HORIZONTAL)
registerEnum(CONST_ME_CONFETTI_VERTICAL)
registerEnum(CONST_ME_BLACKSMOKE)
registerEnum(CONST_ME_REDSMOKE)
registerEnum(CONST_ME_YELLOWSMOKE)
registerEnum(CONST_ME_GREENSMOKE)
registerEnum(CONST_ME_PURPLESMOKE)
registerEnum(CONST_ME_EARLY_THUNDER)
registerEnum(CONST_ME_RAGIAZ_BONECAPSULE)
registerEnum(CONST_ME_CRITICAL_DAMAGE)
registerEnum(CONST_ME_PLUNGING_FISH)
registerEnum(CONST_ANI_NONE)
registerEnum(CONST_ANI_SPEAR)
registerEnum(CONST_ANI_BOLT)
registerEnum(CONST_ANI_ARROW)
registerEnum(CONST_ANI_FIRE)
registerEnum(CONST_ANI_ENERGY)
registerEnum(CONST_ANI_POISONARROW)
registerEnum(CONST_ANI_BURSTARROW)
registerEnum(CONST_ANI_THROWINGSTAR)
registerEnum(CONST_ANI_THROWINGKNIFE)
registerEnum(CONST_ANI_SMALLSTONE)
registerEnum(CONST_ANI_DEATH)
registerEnum(CONST_ANI_LARGEROCK)
registerEnum(CONST_ANI_SNOWBALL)
registerEnum(CONST_ANI_POWERBOLT)
registerEnum(CONST_ANI_POISON)
registerEnum(CONST_ANI_INFERNALBOLT)
registerEnum(CONST_ANI_HUNTINGSPEAR)
registerEnum(CONST_ANI_ENCHANTEDSPEAR)
registerEnum(CONST_ANI_REDSTAR)
registerEnum(CONST_ANI_GREENSTAR)
registerEnum(CONST_ANI_ROYALSPEAR)
registerEnum(CONST_ANI_SNIPERARROW)
registerEnum(CONST_ANI_ONYXARROW)
registerEnum(CONST_ANI_PIERCINGBOLT)
registerEnum(CONST_ANI_WHIRLWINDSWORD)
registerEnum(CONST_ANI_WHIRLWINDAXE)
registerEnum(CONST_ANI_WHIRLWINDCLUB)
registerEnum(CONST_ANI_ETHEREALSPEAR)
registerEnum(CONST_ANI_ICE)
registerEnum(CONST_ANI_EARTH)
registerEnum(CONST_ANI_HOLY)
registerEnum(CONST_ANI_SUDDENDEATH)
registerEnum(CONST_ANI_FLASHARROW)
registerEnum(CONST_ANI_FLAMMINGARROW)
registerEnum(CONST_ANI_SHIVERARROW)
registerEnum(CONST_ANI_ENERGYBALL)
registerEnum(CONST_ANI_SMALLICE)
registerEnum(CONST_ANI_SMALLHOLY)
registerEnum(CONST_ANI_SMALLEARTH)
registerEnum(CONST_ANI_EARTHARROW)
registerEnum(CONST_ANI_EXPLOSION)
registerEnum(CONST_ANI_CAKE)
registerEnum(CONST_ANI_TARSALARROW)
registerEnum(CONST_ANI_VORTEXBOLT)
registerEnum(CONST_ANI_PRISMATICBOLT)
registerEnum(CONST_ANI_CRYSTALLINEARROW)
registerEnum(CONST_ANI_DRILLBOLT)
registerEnum(CONST_ANI_ENVENOMEDARROW)
registerEnum(CONST_ANI_GLOOTHSPEAR)
registerEnum(CONST_ANI_SIMPLEARROW)
registerEnum(CONST_ANI_WEAPONTYPE)
registerEnum(CONST_PROP_BLOCKSOLID)
registerEnum(CONST_PROP_HASHEIGHT)
registerEnum(CONST_PROP_BLOCKPROJECTILE)
registerEnum(CONST_PROP_BLOCKPATH)
registerEnum(CONST_PROP_ISVERTICAL)
registerEnum(CONST_PROP_ISHORIZONTAL)
registerEnum(CONST_PROP_MOVEABLE)
registerEnum(CONST_PROP_IMMOVABLEBLOCKSOLID)
registerEnum(CONST_PROP_IMMOVABLEBLOCKPATH)
registerEnum(CONST_PROP_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(CONST_PROP_NOFIELDBLOCKPATH)
registerEnum(CONST_PROP_SUPPORTHANGABLE)
registerEnum(CONST_SLOT_HEAD)
registerEnum(CONST_SLOT_NECKLACE)
registerEnum(CONST_SLOT_BACKPACK)
registerEnum(CONST_SLOT_ARMOR)
registerEnum(CONST_SLOT_RIGHT)
registerEnum(CONST_SLOT_LEFT)
registerEnum(CONST_SLOT_LEGS)
registerEnum(CONST_SLOT_FEET)
registerEnum(CONST_SLOT_RING)
registerEnum(CONST_SLOT_AMMO)
registerEnum(CREATURE_EVENT_NONE)
registerEnum(CREATURE_EVENT_LOGIN)
registerEnum(CREATURE_EVENT_LOGOUT)
registerEnum(CREATURE_EVENT_THINK)
registerEnum(CREATURE_EVENT_PREPAREDEATH)
registerEnum(CREATURE_EVENT_DEATH)
registerEnum(CREATURE_EVENT_KILL)
registerEnum(CREATURE_EVENT_ADVANCE)
registerEnum(CREATURE_EVENT_MODALWINDOW)
registerEnum(CREATURE_EVENT_TEXTEDIT)
registerEnum(CREATURE_EVENT_HEALTHCHANGE)
registerEnum(CREATURE_EVENT_MANACHANGE)
registerEnum(CREATURE_EVENT_EXTENDED_OPCODE)
registerEnum(GAME_STATE_STARTUP)
registerEnum(GAME_STATE_INIT)
registerEnum(GAME_STATE_NORMAL)
registerEnum(GAME_STATE_CLOSED)
registerEnum(GAME_STATE_SHUTDOWN)
registerEnum(GAME_STATE_CLOSING)
registerEnum(GAME_STATE_MAINTAIN)
registerEnum(MESSAGE_STATUS_CONSOLE_BLUE)
registerEnum(MESSAGE_STATUS_CONSOLE_RED)
registerEnum(MESSAGE_STATUS_DEFAULT)
registerEnum(MESSAGE_STATUS_WARNING)
registerEnum(MESSAGE_EVENT_ADVANCE)
registerEnum(MESSAGE_STATUS_SMALL)
registerEnum(MESSAGE_INFO_DESCR)
registerEnum(MESSAGE_DAMAGE_DEALT)
registerEnum(MESSAGE_DAMAGE_RECEIVED)
registerEnum(MESSAGE_HEALED)
registerEnum(MESSAGE_EXPERIENCE)
registerEnum(MESSAGE_DAMAGE_OTHERS)
registerEnum(MESSAGE_HEALED_OTHERS)
registerEnum(MESSAGE_EXPERIENCE_OTHERS)
registerEnum(MESSAGE_EVENT_DEFAULT)
registerEnum(MESSAGE_GUILD)
registerEnum(MESSAGE_PARTY_MANAGEMENT)
registerEnum(MESSAGE_PARTY)
registerEnum(MESSAGE_EVENT_ORANGE)
registerEnum(MESSAGE_STATUS_CONSOLE_ORANGE)
registerEnum(CREATURETYPE_PLAYER)
registerEnum(CREATURETYPE_MONSTER)
registerEnum(CREATURETYPE_NPC)
registerEnum(CREATURETYPE_SUMMON_OWN)
registerEnum(CREATURETYPE_SUMMON_OTHERS)
registerEnum(CLIENTOS_LINUX)
registerEnum(CLIENTOS_WINDOWS)
registerEnum(CLIENTOS_FLASH)
registerEnum(CLIENTOS_OTCLIENT_LINUX)
registerEnum(CLIENTOS_OTCLIENT_WINDOWS)
registerEnum(CLIENTOS_OTCLIENT_MAC)
registerEnum(FIGHTMODE_ATTACK)
registerEnum(FIGHTMODE_BALANCED)
registerEnum(FIGHTMODE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_NONE)
registerEnum(ITEM_ATTRIBUTE_ACTIONID)
registerEnum(ITEM_ATTRIBUTE_UNIQUEID)
registerEnum(ITEM_ATTRIBUTE_DESCRIPTION)
registerEnum(ITEM_ATTRIBUTE_TEXT)
registerEnum(ITEM_ATTRIBUTE_DATE)
registerEnum(ITEM_ATTRIBUTE_WRITER)
registerEnum(ITEM_ATTRIBUTE_NAME)
registerEnum(ITEM_ATTRIBUTE_ARTICLE)
registerEnum(ITEM_ATTRIBUTE_PLURALNAME)
registerEnum(ITEM_ATTRIBUTE_WEIGHT)
registerEnum(ITEM_ATTRIBUTE_ATTACK)
registerEnum(ITEM_ATTRIBUTE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_EXTRADEFENSE)
registerEnum(ITEM_ATTRIBUTE_ARMOR)
registerEnum(ITEM_ATTRIBUTE_HITCHANCE)
registerEnum(ITEM_ATTRIBUTE_SHOOTRANGE)
registerEnum(ITEM_ATTRIBUTE_OWNER)
registerEnum(ITEM_ATTRIBUTE_DURATION)
registerEnum(ITEM_ATTRIBUTE_DECAYSTATE)
registerEnum(ITEM_ATTRIBUTE_CORPSEOWNER)
registerEnum(ITEM_ATTRIBUTE_CHARGES)
registerEnum(ITEM_ATTRIBUTE_FLUIDTYPE)
registerEnum(ITEM_ATTRIBUTE_DOORID)
registerEnum(ITEM_TYPE_DEPOT)
registerEnum(ITEM_TYPE_MAILBOX)
registerEnum(ITEM_TYPE_TRASHHOLDER)
registerEnum(ITEM_TYPE_CONTAINER)
registerEnum(ITEM_TYPE_DOOR)
registerEnum(ITEM_TYPE_MAGICFIELD)
registerEnum(ITEM_TYPE_TELEPORT)
registerEnum(ITEM_TYPE_BED)
registerEnum(ITEM_TYPE_KEY)
registerEnum(ITEM_TYPE_RUNE)
registerEnum(ITEM_BAG)
registerEnum(ITEM_GOLD_COIN)
registerEnum(ITEM_PLATINUM_COIN)
registerEnum(ITEM_CRYSTAL_COIN)
registerEnum(ITEM_AMULETOFLOSS)
registerEnum(ITEM_PARCEL)
registerEnum(ITEM_LABEL)
registerEnum(ITEM_FIREFIELD_PVP_FULL)
registerEnum(ITEM_FIREFIELD_PVP_MEDIUM)
registerEnum(ITEM_FIREFIELD_PVP_SMALL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_FULL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_MEDIUM)
registerEnum(ITEM_FIREFIELD_PERSISTENT_SMALL)
registerEnum(ITEM_FIREFIELD_NOPVP)
registerEnum(ITEM_POISONFIELD_PVP)
registerEnum(ITEM_POISONFIELD_PERSISTENT)
registerEnum(ITEM_POISONFIELD_NOPVP)
registerEnum(ITEM_ENERGYFIELD_PVP)
registerEnum(ITEM_ENERGYFIELD_PERSISTENT)
registerEnum(ITEM_ENERGYFIELD_NOPVP)
registerEnum(ITEM_MAGICWALL)
registerEnum(ITEM_MAGICWALL_PERSISTENT)
registerEnum(ITEM_MAGICWALL_SAFE)
registerEnum(ITEM_WILDGROWTH)
registerEnum(ITEM_WILDGROWTH_PERSISTENT)
registerEnum(ITEM_WILDGROWTH_SAFE)
registerEnum(PlayerFlag_CannotUseCombat)
registerEnum(PlayerFlag_CannotAttackPlayer)
registerEnum(PlayerFlag_CannotAttackMonster)
registerEnum(PlayerFlag_CannotBeAttacked)
registerEnum(PlayerFlag_CanConvinceAll)
registerEnum(PlayerFlag_CanSummonAll)
registerEnum(PlayerFlag_CanIllusionAll)
registerEnum(PlayerFlag_CanSenseInvisibility)
registerEnum(PlayerFlag_IgnoredByMonsters)
registerEnum(PlayerFlag_NotGainInFight)
registerEnum(PlayerFlag_HasInfiniteMana)
registerEnum(PlayerFlag_HasInfiniteSoul)
registerEnum(PlayerFlag_HasNoExhaustion)
registerEnum(PlayerFlag_CannotUseSpells)
registerEnum(PlayerFlag_CannotPickupItem)
registerEnum(PlayerFlag_CanAlwaysLogin)
registerEnum(PlayerFlag_CanBroadcast)
registerEnum(PlayerFlag_CanEditHouses)
registerEnum(PlayerFlag_CannotBeBanned)
registerEnum(PlayerFlag_CannotBePushed)
registerEnum(PlayerFlag_HasInfiniteCapacity)
registerEnum(PlayerFlag_CanPushAllCreatures)
registerEnum(PlayerFlag_CanTalkRedPrivate)
registerEnum(PlayerFlag_CanTalkRedChannel)
registerEnum(PlayerFlag_TalkOrangeHelpChannel)
registerEnum(PlayerFlag_NotGainExperience)
registerEnum(PlayerFlag_NotGainMana)
registerEnum(PlayerFlag_NotGainHealth)
registerEnum(PlayerFlag_NotGainSkill)
registerEnum(PlayerFlag_SetMaxSpeed)
registerEnum(PlayerFlag_SpecialVIP)
registerEnum(PlayerFlag_NotGenerateLoot)
registerEnum(PlayerFlag_CanTalkRedChannelAnonymous)
registerEnum(PlayerFlag_IgnoreProtectionZone)
registerEnum(PlayerFlag_IgnoreSpellCheck)
registerEnum(PlayerFlag_IgnoreWeaponCheck)
registerEnum(PlayerFlag_CannotBeMuted)
registerEnum(PlayerFlag_IsAlwaysPremium)
registerEnum(PLAYERSEX_FEMALE)
registerEnum(PLAYERSEX_MALE)
registerEnum(REPORT_REASON_NAMEINAPPROPRIATE)
registerEnum(REPORT_REASON_NAMEPOORFORMATTED)
registerEnum(REPORT_REASON_NAMEADVERTISING)
registerEnum(REPORT_REASON_NAMEUNFITTING)
registerEnum(REPORT_REASON_NAMERULEVIOLATION)
registerEnum(REPORT_REASON_INSULTINGSTATEMENT)
registerEnum(REPORT_REASON_SPAMMING)
registerEnum(REPORT_REASON_ADVERTISINGSTATEMENT)
registerEnum(REPORT_REASON_UNFITTINGSTATEMENT)
registerEnum(REPORT_REASON_LANGUAGESTATEMENT)
registerEnum(REPORT_REASON_DISCLOSURE)
registerEnum(REPORT_REASON_RULEVIOLATION)
registerEnum(REPORT_REASON_STATEMENT_BUGABUSE)
registerEnum(REPORT_REASON_UNOFFICIALSOFTWARE)
registerEnum(REPORT_REASON_PRETENDING)
registerEnum(REPORT_REASON_HARASSINGOWNERS)
registerEnum(REPORT_REASON_FALSEINFO)
registerEnum(REPORT_REASON_ACCOUNTSHARING)
registerEnum(REPORT_REASON_STEALINGDATA)
registerEnum(REPORT_REASON_SERVICEATTACKING)
registerEnum(REPORT_REASON_SERVICEAGREEMENT)
registerEnum(REPORT_TYPE_NAME)
registerEnum(REPORT_TYPE_STATEMENT)
registerEnum(REPORT_TYPE_BOT)
registerEnum(VOCATION_NONE)
registerEnum(SKILL_FIST)
registerEnum(SKILL_CLUB)
registerEnum(SKILL_SWORD)
registerEnum(SKILL_AXE)
registerEnum(SKILL_DISTANCE)
registerEnum(SKILL_SHIELD)
registerEnum(SKILL_FISHING)
registerEnum(SKILL_MAGLEVEL)
registerEnum(SKILL_LEVEL)
registerEnum(SKULL_NONE)
registerEnum(SKULL_YELLOW)
registerEnum(SKULL_GREEN)
registerEnum(SKULL_WHITE)
registerEnum(SKULL_RED)
registerEnum(SKULL_BLACK)
registerEnum(SKULL_ORANGE)
registerEnum(TALKTYPE_SAY)
registerEnum(TALKTYPE_WHISPER)
registerEnum(TALKTYPE_YELL)
registerEnum(TALKTYPE_PRIVATE_FROM)
registerEnum(TALKTYPE_PRIVATE_TO)
registerEnum(TALKTYPE_CHANNEL_Y)
registerEnum(TALKTYPE_CHANNEL_O)
registerEnum(TALKTYPE_PRIVATE_NP)
registerEnum(TALKTYPE_PRIVATE_PN)
registerEnum(TALKTYPE_BROADCAST)
registerEnum(TALKTYPE_CHANNEL_R1)
registerEnum(TALKTYPE_PRIVATE_RED_FROM)
registerEnum(TALKTYPE_PRIVATE_RED_TO)
registerEnum(TALKTYPE_MONSTER_SAY)
registerEnum(TALKTYPE_MONSTER_YELL)
registerEnum(TALKTYPE_CHANNEL_R2)
registerEnum(TEXTCOLOR_BLUE)
registerEnum(TEXTCOLOR_LIGHTGREEN)
registerEnum(TEXTCOLOR_LIGHTBLUE)
registerEnum(TEXTCOLOR_MAYABLUE)
registerEnum(TEXTCOLOR_DARKRED)
registerEnum(TEXTCOLOR_LIGHTGREY)
registerEnum(TEXTCOLOR_SKYBLUE)
registerEnum(TEXTCOLOR_PURPLE)
registerEnum(TEXTCOLOR_ELECTRICPURPLE)
registerEnum(TEXTCOLOR_RED)
registerEnum(TEXTCOLOR_PASTELRED)
registerEnum(TEXTCOLOR_ORANGE)
registerEnum(TEXTCOLOR_YELLOW)
registerEnum(TEXTCOLOR_WHITE_EXP)
registerEnum(TEXTCOLOR_NONE)
registerEnum(TILESTATE_NONE)
registerEnum(TILESTATE_PROTECTIONZONE)
registerEnum(TILESTATE_NOPVPZONE)
registerEnum(TILESTATE_NOLOGOUT)
registerEnum(TILESTATE_PVPZONE)
registerEnum(TILESTATE_FLOORCHANGE)
registerEnum(TILESTATE_FLOORCHANGE_DOWN)
registerEnum(TILESTATE_FLOORCHANGE_NORTH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH)
registerEnum(TILESTATE_FLOORCHANGE_EAST)
registerEnum(TILESTATE_FLOORCHANGE_WEST)
registerEnum(TILESTATE_TELEPORT)
registerEnum(TILESTATE_MAGICFIELD)
registerEnum(TILESTATE_MAILBOX)
registerEnum(TILESTATE_TRASHHOLDER)
registerEnum(TILESTATE_BED)
registerEnum(TILESTATE_DEPOT)
registerEnum(TILESTATE_BLOCKSOLID)
registerEnum(TILESTATE_BLOCKPATH)
registerEnum(TILESTATE_IMMOVABLEBLOCKSOLID)
registerEnum(TILESTATE_IMMOVABLEBLOCKPATH)
registerEnum(TILESTATE_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(TILESTATE_NOFIELDBLOCKPATH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH_ALT)
registerEnum(TILESTATE_FLOORCHANGE_EAST_ALT)
registerEnum(TILESTATE_SUPPORTS_HANGABLE)
registerEnum(WEAPON_NONE)
registerEnum(WEAPON_SWORD)
registerEnum(WEAPON_CLUB)
registerEnum(WEAPON_AXE)
registerEnum(WEAPON_SHIELD)
registerEnum(WEAPON_DISTANCE)
registerEnum(WEAPON_WAND)
registerEnum(WEAPON_AMMO)
registerEnum(WORLD_TYPE_NO_PVP)
registerEnum(WORLD_TYPE_PVP)
registerEnum(WORLD_TYPE_PVP_ENFORCED)
// Use with container:addItem, container:addItemEx and possibly other functions.
registerEnum(FLAG_NOLIMIT)
registerEnum(FLAG_IGNOREBLOCKITEM)
registerEnum(FLAG_IGNOREBLOCKCREATURE)
registerEnum(FLAG_CHILDISOWNER)
registerEnum(FLAG_PATHFINDING)
registerEnum(FLAG_IGNOREFIELDDAMAGE)
registerEnum(FLAG_IGNORENOTMOVEABLE)
registerEnum(FLAG_IGNOREAUTOSTACK)
// Use with itemType:getSlotPosition
registerEnum(SLOTP_WHEREEVER)
registerEnum(SLOTP_HEAD)
registerEnum(SLOTP_NECKLACE)
registerEnum(SLOTP_BACKPACK)
registerEnum(SLOTP_ARMOR)
registerEnum(SLOTP_RIGHT)
registerEnum(SLOTP_LEFT)
registerEnum(SLOTP_LEGS)
registerEnum(SLOTP_FEET)
registerEnum(SLOTP_RING)
registerEnum(SLOTP_AMMO)
registerEnum(SLOTP_DEPOT)
registerEnum(SLOTP_TWO_HAND)
// Use with combat functions
registerEnum(ORIGIN_NONE)
registerEnum(ORIGIN_CONDITION)
registerEnum(ORIGIN_SPELL)
registerEnum(ORIGIN_MELEE)
registerEnum(ORIGIN_RANGED)
// Use with house:getAccessList, house:setAccessList
registerEnum(GUEST_LIST)
registerEnum(SUBOWNER_LIST)
// Use with npc:setSpeechBubble
registerEnum(SPEECHBUBBLE_NONE)
registerEnum(SPEECHBUBBLE_NORMAL)
registerEnum(SPEECHBUBBLE_TRADE)
registerEnum(SPEECHBUBBLE_QUEST)
registerEnum(SPEECHBUBBLE_QUESTTRADER)
// Use with player:addMapMark
registerEnum(MAPMARK_TICK)
registerEnum(MAPMARK_QUESTION)
registerEnum(MAPMARK_EXCLAMATION)
registerEnum(MAPMARK_STAR)
registerEnum(MAPMARK_CROSS)
registerEnum(MAPMARK_TEMPLE)
registerEnum(MAPMARK_KISS)
registerEnum(MAPMARK_SHOVEL)
registerEnum(MAPMARK_SWORD)
registerEnum(MAPMARK_FLAG)
registerEnum(MAPMARK_LOCK)
registerEnum(MAPMARK_BAG)
registerEnum(MAPMARK_SKULL)
registerEnum(MAPMARK_DOLLAR)
registerEnum(MAPMARK_REDNORTH)
registerEnum(MAPMARK_REDSOUTH)
registerEnum(MAPMARK_REDEAST)
registerEnum(MAPMARK_REDWEST)
registerEnum(MAPMARK_GREENNORTH)
registerEnum(MAPMARK_GREENSOUTH)
// Use with Game.getReturnMessage
registerEnum(RETURNVALUE_NOERROR)
registerEnum(RETURNVALUE_NOTPOSSIBLE)
registerEnum(RETURNVALUE_NOTENOUGHROOM)
registerEnum(RETURNVALUE_PLAYERISPZLOCKED)
registerEnum(RETURNVALUE_PLAYERISNOTINVITED)
registerEnum(RETURNVALUE_CANNOTTHROW)
registerEnum(RETURNVALUE_THEREISNOWAY)
registerEnum(RETURNVALUE_DESTINATIONOUTOFREACH)
registerEnum(RETURNVALUE_CREATUREBLOCK)
registerEnum(RETURNVALUE_NOTMOVEABLE)
registerEnum(RETURNVALUE_DROPTWOHANDEDITEM)
registerEnum(RETURNVALUE_BOTHHANDSNEEDTOBEFREE)
registerEnum(RETURNVALUE_CANONLYUSEONEWEAPON)
registerEnum(RETURNVALUE_NEEDEXCHANGE)
registerEnum(RETURNVALUE_CANNOTBEDRESSED)
registerEnum(RETURNVALUE_PUTTHISOBJECTINYOURHAND)
registerEnum(RETURNVALUE_PUTTHISOBJECTINBOTHHANDS)
registerEnum(RETURNVALUE_TOOFARAWAY)
registerEnum(RETURNVALUE_FIRSTGODOWNSTAIRS)
registerEnum(RETURNVALUE_FIRSTGOUPSTAIRS)
registerEnum(RETURNVALUE_CONTAINERNOTENOUGHROOM)
registerEnum(RETURNVALUE_NOTENOUGHCAPACITY)
registerEnum(RETURNVALUE_CANNOTPICKUP)
registerEnum(RETURNVALUE_THISISIMPOSSIBLE)
registerEnum(RETURNVALUE_DEPOTISFULL)
registerEnum(RETURNVALUE_CREATUREDOESNOTEXIST)
registerEnum(RETURNVALUE_CANNOTUSETHISOBJECT)
registerEnum(RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE)
registerEnum(RETURNVALUE_NOTREQUIREDLEVELTOUSERUNE)
registerEnum(RETURNVALUE_YOUAREALREADYTRADING)
registerEnum(RETURNVALUE_THISPLAYERISALREADYTRADING)
registerEnum(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT)
registerEnum(RETURNVALUE_DIRECTPLAYERSHOOT)
registerEnum(RETURNVALUE_NOTENOUGHLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMAGICLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMANA)
registerEnum(RETURNVALUE_NOTENOUGHSOUL)
registerEnum(RETURNVALUE_YOUAREEXHAUSTED)
registerEnum(RETURNVALUE_PLAYERISNOTREACHABLE)
registerEnum(RETURNVALUE_CANONLYUSETHISRUNEONCREATURES)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISPLAYER)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONWHILEINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISCREATURE)
registerEnum(RETURNVALUE_YOUCANONLYUSEITONCREATURES)
registerEnum(RETURNVALUE_CREATUREISNOTREACHABLE)
registerEnum(RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS)
registerEnum(RETURNVALUE_YOUNEEDPREMIUMACCOUNT)
registerEnum(RETURNVALUE_YOUNEEDTOLEARNTHISSPELL)
registerEnum(RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL)
registerEnum(RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDLEAVEPVPZONE)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDENTERPVPZONE)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINANOPVPZONE)
registerEnum(RETURNVALUE_YOUCANNOTLOGOUTHERE)
registerEnum(RETURNVALUE_YOUNEEDAMAGICITEMTOCASTSPELL)
registerEnum(RETURNVALUE_CANNOTCONJUREITEMHERE)
registerEnum(RETURNVALUE_YOUNEEDTOSPLITYOURSPEARS)
registerEnum(RETURNVALUE_NAMEISTOOAMBIGUOUS)
registerEnum(RETURNVALUE_CANONLYUSEONESHIELD)
registerEnum(RETURNVALUE_NOPARTYMEMBERSINRANGE)
registerEnum(RETURNVALUE_YOUARENOTTHEOWNER)
registerEnum(RETURNVALUE_TRADEPLAYERFARAWAY)
registerEnum(RETURNVALUE_YOUDONTOWNTHISHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERHIGHESTBIDDER)
registerEnum(RETURNVALUE_YOUCANNOTTRADETHISHOUSE)
registerEnum(RELOAD_TYPE_ALL)
registerEnum(RELOAD_TYPE_ACTIONS)
registerEnum(RELOAD_TYPE_CHAT)
registerEnum(RELOAD_TYPE_CONFIG)
registerEnum(RELOAD_TYPE_CREATURESCRIPTS)
registerEnum(RELOAD_TYPE_EVENTS)
registerEnum(RELOAD_TYPE_GLOBAL)
registerEnum(RELOAD_TYPE_GLOBALEVENTS)
registerEnum(RELOAD_TYPE_ITEMS)
registerEnum(RELOAD_TYPE_MONSTERS)
registerEnum(RELOAD_TYPE_MOUNTS)
registerEnum(RELOAD_TYPE_MOVEMENTS)
registerEnum(RELOAD_TYPE_NPCS)
registerEnum(RELOAD_TYPE_QUESTS)
registerEnum(RELOAD_TYPE_RAIDS)
registerEnum(RELOAD_TYPE_SPELLS)
registerEnum(RELOAD_TYPE_TALKACTIONS)
registerEnum(RELOAD_TYPE_WEAPONS)
// _G
registerGlobalVariable("INDEX_WHEREEVER", INDEX_WHEREEVER);
registerGlobalBoolean("VIRTUAL_PARENT", true);
registerGlobalMethod("isType", LuaScriptInterface::luaIsType);
registerGlobalMethod("rawgetmetatable", LuaScriptInterface::luaRawGetMetatable);
// configKeys
registerTable("configKeys");
registerEnumIn("configKeys", ConfigManager::ALLOW_CHANGEOUTFIT)
registerEnumIn("configKeys", ConfigManager::ONE_PLAYER_ON_ACCOUNT)
registerEnumIn("configKeys", ConfigManager::AIMBOT_HOTKEY_ENABLED)
registerEnumIn("configKeys", ConfigManager::REMOVE_RUNE_CHARGES)
registerEnumIn("configKeys", ConfigManager::EXPERIENCE_FROM_PLAYERS)
registerEnumIn("configKeys", ConfigManager::FREE_PREMIUM)
registerEnumIn("configKeys", ConfigManager::REPLACE_KICK_ON_LOGIN)
registerEnumIn("configKeys", ConfigManager::ALLOW_CLONES)
registerEnumIn("configKeys", ConfigManager::BIND_ONLY_GLOBAL_ADDRESS)
registerEnumIn("configKeys", ConfigManager::OPTIMIZE_DATABASE)
registerEnumIn("configKeys", ConfigManager::MARKET_PREMIUM)
registerEnumIn("configKeys", ConfigManager::EMOTE_SPELLS)
registerEnumIn("configKeys", ConfigManager::STAMINA_SYSTEM)
registerEnumIn("configKeys", ConfigManager::WARN_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CONVERT_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CLASSIC_EQUIPMENT_SLOTS)
registerEnumIn("configKeys", ConfigManager::CLASSIC_ATTACK_SPEED)
registerEnumIn("configKeys", ConfigManager::MAP_NAME)
registerEnumIn("configKeys", ConfigManager::HOUSE_RENT_PERIOD)
registerEnumIn("configKeys", ConfigManager::SERVER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_EMAIL)
registerEnumIn("configKeys", ConfigManager::URL)
registerEnumIn("configKeys", ConfigManager::LOCATION)
registerEnumIn("configKeys", ConfigManager::IP)
registerEnumIn("configKeys", ConfigManager::MOTD)
registerEnumIn("configKeys", ConfigManager::WORLD_TYPE)
registerEnumIn("configKeys", ConfigManager::MYSQL_HOST)
registerEnumIn("configKeys", ConfigManager::MYSQL_USER)
registerEnumIn("configKeys", ConfigManager::MYSQL_PASS)
registerEnumIn("configKeys", ConfigManager::MYSQL_DB)
registerEnumIn("configKeys", ConfigManager::MYSQL_SOCK)
registerEnumIn("configKeys", ConfigManager::DEFAULT_PRIORITY)
registerEnumIn("configKeys", ConfigManager::MAP_AUTHOR)
registerEnumIn("configKeys", ConfigManager::SQL_PORT)
registerEnumIn("configKeys", ConfigManager::MAX_PLAYERS)
registerEnumIn("configKeys", ConfigManager::PZ_LOCKED)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRANGE)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRADIUS)
registerEnumIn("configKeys", ConfigManager::RATE_EXPERIENCE)
registerEnumIn("configKeys", ConfigManager::RATE_SKILL)
registerEnumIn("configKeys", ConfigManager::RATE_LOOT)
registerEnumIn("configKeys", ConfigManager::RATE_MAGIC)
registerEnumIn("configKeys", ConfigManager::RATE_SPAWN)
registerEnumIn("configKeys", ConfigManager::HOUSE_PRICE)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_RED)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_BLACK)
registerEnumIn("configKeys", ConfigManager::MAX_MESSAGEBUFFER)
registerEnumIn("configKeys", ConfigManager::ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::EX_ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::KICK_AFTER_MINUTES)
registerEnumIn("configKeys", ConfigManager::PROTECTION_LEVEL)
registerEnumIn("configKeys", ConfigManager::DEATH_LOSE_PERCENT)
registerEnumIn("configKeys", ConfigManager::STATUSQUERY_TIMEOUT)
registerEnumIn("configKeys", ConfigManager::FRAG_TIME)
registerEnumIn("configKeys", ConfigManager::WHITE_SKULL_TIME)
registerEnumIn("configKeys", ConfigManager::GAME_PORT)
registerEnumIn("configKeys", ConfigManager::LOGIN_PORT)
registerEnumIn("configKeys", ConfigManager::STATUS_PORT)
registerEnumIn("configKeys", ConfigManager::STAIRHOP_DELAY)
registerEnumIn("configKeys", ConfigManager::MARKET_OFFER_DURATION)
registerEnumIn("configKeys", ConfigManager::CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES)
registerEnumIn("configKeys", ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER)
registerEnumIn("configKeys", ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)
registerEnumIn("configKeys", ConfigManager::MAX_PACKETS_PER_SECOND)
// os
registerMethod("os", "mtime", LuaScriptInterface::luaSystemTime);
// table
registerMethod("table", "create", LuaScriptInterface::luaTableCreate);
// Game
registerTable("Game");
registerMethod("Game", "getSpectators", LuaScriptInterface::luaGameGetSpectators);
registerMethod("Game", "getPlayers", LuaScriptInterface::luaGameGetPlayers);
registerMethod("Game", "loadMap", LuaScriptInterface::luaGameLoadMap);
registerMethod("Game", "getExperienceStage", LuaScriptInterface::luaGameGetExperienceStage);
registerMethod("Game", "getMonsterCount", LuaScriptInterface::luaGameGetMonsterCount);
registerMethod("Game", "getPlayerCount", LuaScriptInterface::luaGameGetPlayerCount);
registerMethod("Game", "getNpcCount", LuaScriptInterface::luaGameGetNpcCount);
registerMethod("Game", "getTowns", LuaScriptInterface::luaGameGetTowns);
registerMethod("Game", "getHouses", LuaScriptInterface::luaGameGetHouses);
registerMethod("Game", "getGameState", LuaScriptInterface::luaGameGetGameState);
registerMethod("Game", "setGameState", LuaScriptInterface::luaGameSetGameState);
registerMethod("Game", "getWorldType", LuaScriptInterface::luaGameGetWorldType);
registerMethod("Game", "setWorldType", LuaScriptInterface::luaGameSetWorldType);
registerMethod("Game", "getReturnMessage", LuaScriptInterface::luaGameGetReturnMessage);
registerMethod("Game", "createItem", LuaScriptInterface::luaGameCreateItem);
registerMethod("Game", "createContainer", LuaScriptInterface::luaGameCreateContainer);
registerMethod("Game", "createMonster", LuaScriptInterface::luaGameCreateMonster);
registerMethod("Game", "createNpc", LuaScriptInterface::luaGameCreateNpc);
registerMethod("Game", "createTile", LuaScriptInterface::luaGameCreateTile);
registerMethod("Game", "startRaid", LuaScriptInterface::luaGameStartRaid);
registerMethod("Game", "getClientVersion", LuaScriptInterface::luaGameGetClientVersion);
registerMethod("Game", "reload", LuaScriptInterface::luaGameReload);
// Variant
registerClass("Variant", "", LuaScriptInterface::luaVariantCreate);
registerMethod("Variant", "getNumber", LuaScriptInterface::luaVariantGetNumber);
registerMethod("Variant", "getString", LuaScriptInterface::luaVariantGetString);
registerMethod("Variant", "getPosition", LuaScriptInterface::luaVariantGetPosition);
// Position
registerClass("Position", "", LuaScriptInterface::luaPositionCreate);
registerMetaMethod("Position", "__add", LuaScriptInterface::luaPositionAdd);
registerMetaMethod("Position", "__sub", LuaScriptInterface::luaPositionSub);
registerMetaMethod("Position", "__eq", LuaScriptInterface::luaPositionCompare);
registerMethod("Position", "getDistance", LuaScriptInterface::luaPositionGetDistance);
registerMethod("Position", "isSightClear", LuaScriptInterface::luaPositionIsSightClear);
registerMethod("Position", "sendMagicEffect", LuaScriptInterface::luaPositionSendMagicEffect);
registerMethod("Position", "sendDistanceEffect", LuaScriptInterface::luaPositionSendDistanceEffect);
// Tile
registerClass("Tile", "", LuaScriptInterface::luaTileCreate);
registerMetaMethod("Tile", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Tile", "getPosition", LuaScriptInterface::luaTileGetPosition);
registerMethod("Tile", "getGround", LuaScriptInterface::luaTileGetGround);
registerMethod("Tile", "getThing", LuaScriptInterface::luaTileGetThing);
registerMethod("Tile", "getThingCount", LuaScriptInterface::luaTileGetThingCount);
registerMethod("Tile", "getTopVisibleThing", LuaScriptInterface::luaTileGetTopVisibleThing);
registerMethod("Tile", "getTopTopItem", LuaScriptInterface::luaTileGetTopTopItem);
registerMethod("Tile", "getTopDownItem", LuaScriptInterface::luaTileGetTopDownItem);
registerMethod("Tile", "getFieldItem", LuaScriptInterface::luaTileGetFieldItem);
registerMethod("Tile", "getItemById", LuaScriptInterface::luaTileGetItemById);
registerMethod("Tile", "getItemByType", LuaScriptInterface::luaTileGetItemByType);
registerMethod("Tile", "getItemByTopOrder", LuaScriptInterface::luaTileGetItemByTopOrder);
registerMethod("Tile", "getItemCountById", LuaScriptInterface::luaTileGetItemCountById);
registerMethod("Tile", "getBottomCreature", LuaScriptInterface::luaTileGetBottomCreature);
registerMethod("Tile", "getTopCreature", LuaScriptInterface::luaTileGetTopCreature);
registerMethod("Tile", "getBottomVisibleCreature", LuaScriptInterface::luaTileGetBottomVisibleCreature);
registerMethod("Tile", "getTopVisibleCreature", LuaScriptInterface::luaTileGetTopVisibleCreature);
registerMethod("Tile", "getItems", LuaScriptInterface::luaTileGetItems);
registerMethod("Tile", "getItemCount", LuaScriptInterface::luaTileGetItemCount);
registerMethod("Tile", "getDownItemCount", LuaScriptInterface::luaTileGetDownItemCount);
registerMethod("Tile", "getTopItemCount", LuaScriptInterface::luaTileGetTopItemCount);
registerMethod("Tile", "getCreatures", LuaScriptInterface::luaTileGetCreatures);
registerMethod("Tile", "getCreatureCount", LuaScriptInterface::luaTileGetCreatureCount);
registerMethod("Tile", "getThingIndex", LuaScriptInterface::luaTileGetThingIndex);
registerMethod("Tile", "hasProperty", LuaScriptInterface::luaTileHasProperty);
registerMethod("Tile", "hasFlag", LuaScriptInterface::luaTileHasFlag);
registerMethod("Tile", "queryAdd", LuaScriptInterface::luaTileQueryAdd);
registerMethod("Tile", "getHouse", LuaScriptInterface::luaTileGetHouse);
// NetworkMessage
registerClass("NetworkMessage", "", LuaScriptInterface::luaNetworkMessageCreate);
registerMetaMethod("NetworkMessage", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("NetworkMessage", "__gc", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "delete", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "getByte", LuaScriptInterface::luaNetworkMessageGetByte);
registerMethod("NetworkMessage", "getU16", LuaScriptInterface::luaNetworkMessageGetU16);
registerMethod("NetworkMessage", "getU32", LuaScriptInterface::luaNetworkMessageGetU32);
registerMethod("NetworkMessage", "getU64", LuaScriptInterface::luaNetworkMessageGetU64);
registerMethod("NetworkMessage", "getString", LuaScriptInterface::luaNetworkMessageGetString);
registerMethod("NetworkMessage", "getPosition", LuaScriptInterface::luaNetworkMessageGetPosition);
registerMethod("NetworkMessage", "addByte", LuaScriptInterface::luaNetworkMessageAddByte);
registerMethod("NetworkMessage", "addU16", LuaScriptInterface::luaNetworkMessageAddU16);
registerMethod("NetworkMessage", "addU32", LuaScriptInterface::luaNetworkMessageAddU32);
registerMethod("NetworkMessage", "addU64", LuaScriptInterface::luaNetworkMessageAddU64);
registerMethod("NetworkMessage", "addString", LuaScriptInterface::luaNetworkMessageAddString);
registerMethod("NetworkMessage", "addPosition", LuaScriptInterface::luaNetworkMessageAddPosition);
registerMethod("NetworkMessage", "addDouble", LuaScriptInterface::luaNetworkMessageAddDouble);
registerMethod("NetworkMessage", "addItem", LuaScriptInterface::luaNetworkMessageAddItem);
registerMethod("NetworkMessage", "addItemId", LuaScriptInterface::luaNetworkMessageAddItemId);
registerMethod("NetworkMessage", "reset", LuaScriptInterface::luaNetworkMessageReset);
registerMethod("NetworkMessage", "skipBytes", LuaScriptInterface::luaNetworkMessageSkipBytes);
registerMethod("NetworkMessage", "sendToPlayer", LuaScriptInterface::luaNetworkMessageSendToPlayer);
// ModalWindow
registerClass("ModalWindow", "", LuaScriptInterface::luaModalWindowCreate);
registerMetaMethod("ModalWindow", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("ModalWindow", "__gc", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "delete", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "getId", LuaScriptInterface::luaModalWindowGetId);
registerMethod("ModalWindow", "getTitle", LuaScriptInterface::luaModalWindowGetTitle);
registerMethod("ModalWindow", "getMessage", LuaScriptInterface::luaModalWindowGetMessage);
registerMethod("ModalWindow", "setTitle", LuaScriptInterface::luaModalWindowSetTitle);
registerMethod("ModalWindow", "setMessage", LuaScriptInterface::luaModalWindowSetMessage);
registerMethod("ModalWindow", "getButtonCount", LuaScriptInterface::luaModalWindowGetButtonCount);
registerMethod("ModalWindow", "getChoiceCount", LuaScriptInterface::luaModalWindowGetChoiceCount);
registerMethod("ModalWindow", "addButton", LuaScriptInterface::luaModalWindowAddButton);
registerMethod("ModalWindow", "addChoice", LuaScriptInterface::luaModalWindowAddChoice);
registerMethod("ModalWindow", "getDefaultEnterButton", LuaScriptInterface::luaModalWindowGetDefaultEnterButton);
registerMethod("ModalWindow", "setDefaultEnterButton", LuaScriptInterface::luaModalWindowSetDefaultEnterButton);
registerMethod("ModalWindow", "getDefaultEscapeButton", LuaScriptInterface::luaModalWindowGetDefaultEscapeButton);
registerMethod("ModalWindow", "setDefaultEscapeButton", LuaScriptInterface::luaModalWindowSetDefaultEscapeButton);
registerMethod("ModalWindow", "hasPriority", LuaScriptInterface::luaModalWindowHasPriority);
registerMethod("ModalWindow", "setPriority", LuaScriptInterface::luaModalWindowSetPriority);
registerMethod("ModalWindow", "sendToPlayer", LuaScriptInterface::luaModalWindowSendToPlayer);
// Item
registerClass("Item", "", LuaScriptInterface::luaItemCreate);
registerMetaMethod("Item", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Item", "isItem", LuaScriptInterface::luaItemIsItem);
registerMethod("Item", "getParent", LuaScriptInterface::luaItemGetParent);
registerMethod("Item", "getTopParent", LuaScriptInterface::luaItemGetTopParent);
registerMethod("Item", "getId", LuaScriptInterface::luaItemGetId);
registerMethod("Item", "clone", LuaScriptInterface::luaItemClone);
registerMethod("Item", "split", LuaScriptInterface::luaItemSplit);
registerMethod("Item", "remove", LuaScriptInterface::luaItemRemove);
registerMethod("Item", "getUniqueId", LuaScriptInterface::luaItemGetUniqueId);
registerMethod("Item", "getActionId", LuaScriptInterface::luaItemGetActionId);
registerMethod("Item", "setActionId", LuaScriptInterface::luaItemSetActionId);
registerMethod("Item", "getCount", LuaScriptInterface::luaItemGetCount);
registerMethod("Item", "getCharges", LuaScriptInterface::luaItemGetCharges);
registerMethod("Item", "getFluidType", LuaScriptInterface::luaItemGetFluidType);
registerMethod("Item", "getWeight", LuaScriptInterface::luaItemGetWeight);
registerMethod("Item", "getSubType", LuaScriptInterface::luaItemGetSubType);
registerMethod("Item", "getName", LuaScriptInterface::luaItemGetName);
registerMethod("Item", "getPluralName", LuaScriptInterface::luaItemGetPluralName);
registerMethod("Item", "getArticle", LuaScriptInterface::luaItemGetArticle);
registerMethod("Item", "getPosition", LuaScriptInterface::luaItemGetPosition);
registerMethod("Item", "getTile", LuaScriptInterface::luaItemGetTile);
registerMethod("Item", "hasAttribute", LuaScriptInterface::luaItemHasAttribute);
registerMethod("Item", "getAttribute", LuaScriptInterface::luaItemGetAttribute);
registerMethod("Item", "setAttribute", LuaScriptInterface::luaItemSetAttribute);
registerMethod("Item", "removeAttribute", LuaScriptInterface::luaItemRemoveAttribute);
registerMethod("Item", "moveTo", LuaScriptInterface::luaItemMoveTo);
registerMethod("Item", "transform", LuaScriptInterface::luaItemTransform);
registerMethod("Item", "decay", LuaScriptInterface::luaItemDecay);
registerMethod("Item", "getDescription", LuaScriptInterface::luaItemGetDescription);
registerMethod("Item", "hasProperty", LuaScriptInterface::luaItemHasProperty);
registerMethod("Item", "isLoadedFromMap", LuaScriptInterface::luaItemIsLoadedFromMap);
// Container
registerClass("Container", "Item", LuaScriptInterface::luaContainerCreate);
registerMetaMethod("Container", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Container", "getSize", LuaScriptInterface::luaContainerGetSize);
registerMethod("Container", "getCapacity", LuaScriptInterface::luaContainerGetCapacity);
registerMethod("Container", "getEmptySlots", LuaScriptInterface::luaContainerGetEmptySlots);
registerMethod("Container", "getItemHoldingCount", LuaScriptInterface::luaContainerGetItemHoldingCount);
registerMethod("Container", "getItemCountById", LuaScriptInterface::luaContainerGetItemCountById);
registerMethod("Container", "getItem", LuaScriptInterface::luaContainerGetItem);
registerMethod("Container", "hasItem", LuaScriptInterface::luaContainerHasItem);
registerMethod("Container", "addItem", LuaScriptInterface::luaContainerAddItem);
registerMethod("Container", "addItemEx", LuaScriptInterface::luaContainerAddItemEx);
// Teleport
registerClass("Teleport", "Item", LuaScriptInterface::luaTeleportCreate);
registerMetaMethod("Teleport", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Teleport", "getDestination", LuaScriptInterface::luaTeleportGetDestination);
registerMethod("Teleport", "setDestination", LuaScriptInterface::luaTeleportSetDestination);
// Creature
registerClass("Creature", "", LuaScriptInterface::luaCreatureCreate);
registerMetaMethod("Creature", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Creature", "getEvents", LuaScriptInterface::luaCreatureGetEvents);
registerMethod("Creature", "registerEvent", LuaScriptInterface::luaCreatureRegisterEvent);
registerMethod("Creature", "unregisterEvent", LuaScriptInterface::luaCreatureUnregisterEvent);
registerMethod("Creature", "isRemoved", LuaScriptInterface::luaCreatureIsRemoved);
registerMethod("Creature", "isCreature", LuaScriptInterface::luaCreatureIsCreature);
registerMethod("Creature", "isInGhostMode", LuaScriptInterface::luaCreatureIsInGhostMode);
registerMethod("Creature", "isHealthHidden", LuaScriptInterface::luaCreatureIsHealthHidden);
registerMethod("Creature", "isImmune", LuaScriptInterface::luaCreatureIsImmune);
registerMethod("Creature", "canSee", LuaScriptInterface::luaCreatureCanSee);
registerMethod("Creature", "canSeeCreature", LuaScriptInterface::luaCreatureCanSeeCreature);
registerMethod("Creature", "getParent", LuaScriptInterface::luaCreatureGetParent);
registerMethod("Creature", "getId", LuaScriptInterface::luaCreatureGetId);
registerMethod("Creature", "getName", LuaScriptInterface::luaCreatureGetName);
registerMethod("Creature", "getTarget", LuaScriptInterface::luaCreatureGetTarget);
registerMethod("Creature", "setTarget", LuaScriptInterface::luaCreatureSetTarget);
registerMethod("Creature", "getFollowCreature", LuaScriptInterface::luaCreatureGetFollowCreature);
registerMethod("Creature", "setFollowCreature", LuaScriptInterface::luaCreatureSetFollowCreature);
registerMethod("Creature", "getMaster", LuaScriptInterface::luaCreatureGetMaster);
registerMethod("Creature", "setMaster", LuaScriptInterface::luaCreatureSetMaster);
registerMethod("Creature", "getLight", LuaScriptInterface::luaCreatureGetLight);
registerMethod("Creature", "setLight", LuaScriptInterface::luaCreatureSetLight);
registerMethod("Creature", "getSpeed", LuaScriptInterface::luaCreatureGetSpeed);
registerMethod("Creature", "getBaseSpeed", LuaScriptInterface::luaCreatureGetBaseSpeed);
registerMethod("Creature", "changeSpeed", LuaScriptInterface::luaCreatureChangeSpeed);
registerMethod("Creature", "setDropLoot", LuaScriptInterface::luaCreatureSetDropLoot);
registerMethod("Creature", "setSkillLoss", LuaScriptInterface::luaCreatureSetSkillLoss);
registerMethod("Creature", "getPosition", LuaScriptInterface::luaCreatureGetPosition);
registerMethod("Creature", "getTile", LuaScriptInterface::luaCreatureGetTile);
registerMethod("Creature", "getDirection", LuaScriptInterface::luaCreatureGetDirection);
registerMethod("Creature", "setDirection", LuaScriptInterface::luaCreatureSetDirection);
registerMethod("Creature", "getHealth", LuaScriptInterface::luaCreatureGetHealth);
registerMethod("Creature", "addHealth", LuaScriptInterface::luaCreatureAddHealth);
registerMethod("Creature", "getMaxHealth", LuaScriptInterface::luaCreatureGetMaxHealth);
registerMethod("Creature", "setMaxHealth", LuaScriptInterface::luaCreatureSetMaxHealth);
registerMethod("Creature", "setHiddenHealth", LuaScriptInterface::luaCreatureSetHiddenHealth);
registerMethod("Creature", "getSkull", LuaScriptInterface::luaCreatureGetSkull);
registerMethod("Creature", "setSkull", LuaScriptInterface::luaCreatureSetSkull);
registerMethod("Creature", "getOutfit", LuaScriptInterface::luaCreatureGetOutfit);
registerMethod("Creature", "setOutfit", LuaScriptInterface::luaCreatureSetOutfit);
registerMethod("Creature", "getCondition", LuaScriptInterface::luaCreatureGetCondition);
registerMethod("Creature", "addCondition", LuaScriptInterface::luaCreatureAddCondition);
registerMethod("Creature", "removeCondition", LuaScriptInterface::luaCreatureRemoveCondition);
registerMethod("Creature", "hasCondition", LuaScriptInterface::luaCreatureHasCondition);
registerMethod("Creature", "remove", LuaScriptInterface::luaCreatureRemove);
registerMethod("Creature", "teleportTo", LuaScriptInterface::luaCreatureTeleportTo);
registerMethod("Creature", "say", LuaScriptInterface::luaCreatureSay);
registerMethod("Creature", "getDamageMap", LuaScriptInterface::luaCreatureGetDamageMap);
registerMethod("Creature", "getSummons", LuaScriptInterface::luaCreatureGetSummons);
registerMethod("Creature", "getDescription", LuaScriptInterface::luaCreatureGetDescription);
registerMethod("Creature", "getPathTo", LuaScriptInterface::luaCreatureGetPathTo);
registerMethod("Creature", "move", LuaScriptInterface::luaCreatureMove);
// Player
registerClass("Player", "Creature", LuaScriptInterface::luaPlayerCreate);
registerMetaMethod("Player", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Player", "isPlayer", LuaScriptInterface::luaPlayerIsPlayer);
registerMethod("Player", "getGuid", LuaScriptInterface::luaPlayerGetGuid);
registerMethod("Player", "getIp", LuaScriptInterface::luaPlayerGetIp);
registerMethod("Player", "getAccountId", LuaScriptInterface::luaPlayerGetAccountId);
registerMethod("Player", "getLastLoginSaved", LuaScriptInterface::luaPlayerGetLastLoginSaved);
registerMethod("Player", "getLastLogout", LuaScriptInterface::luaPlayerGetLastLogout);
registerMethod("Player", "getAccountType", LuaScriptInterface::luaPlayerGetAccountType);
registerMethod("Player", "setAccountType", LuaScriptInterface::luaPlayerSetAccountType);
registerMethod("Player", "getCapacity", LuaScriptInterface::luaPlayerGetCapacity);
registerMethod("Player", "setCapacity", LuaScriptInterface::luaPlayerSetCapacity);
registerMethod("Player", "getFreeCapacity", LuaScriptInterface::luaPlayerGetFreeCapacity);
registerMethod("Player", "getDepotChest", LuaScriptInterface::luaPlayerGetDepotChest);
registerMethod("Player", "getInbox", LuaScriptInterface::luaPlayerGetInbox);
registerMethod("Player", "getSkullTime", LuaScriptInterface::luaPlayerGetSkullTime);
registerMethod("Player", "setSkullTime", LuaScriptInterface::luaPlayerSetSkullTime);
registerMethod("Player", "getDeathPenalty", LuaScriptInterface::luaPlayerGetDeathPenalty);
registerMethod("Player", "getExperience", LuaScriptInterface::luaPlayerGetExperience);
registerMethod("Player", "addExperience", LuaScriptInterface::luaPlayerAddExperience);
registerMethod("Player", "removeExperience", LuaScriptInterface::luaPlayerRemoveExperience);
registerMethod("Player", "getLevel", LuaScriptInterface::luaPlayerGetLevel);
registerMethod("Player", "getMagicLevel", LuaScriptInterface::luaPlayerGetMagicLevel);
registerMethod("Player", "getBaseMagicLevel", LuaScriptInterface::luaPlayerGetBaseMagicLevel);
registerMethod("Player", "getMana", LuaScriptInterface::luaPlayerGetMana);
registerMethod("Player", "addMana", LuaScriptInterface::luaPlayerAddMana);
registerMethod("Player", "getMaxMana", LuaScriptInterface::luaPlayerGetMaxMana);
registerMethod("Player", "setMaxMana", LuaScriptInterface::luaPlayerSetMaxMana);
registerMethod("Player", "getManaSpent", LuaScriptInterface::luaPlayerGetManaSpent);
registerMethod("Player", "addManaSpent", LuaScriptInterface::luaPlayerAddManaSpent);
registerMethod("Player", "getBaseMaxHealth", LuaScriptInterface::luaPlayerGetBaseMaxHealth);
registerMethod("Player", "getBaseMaxMana", LuaScriptInterface::luaPlayerGetBaseMaxMana);
registerMethod("Player", "getSkillLevel", LuaScriptInterface::luaPlayerGetSkillLevel);
registerMethod("Player", "getEffectiveSkillLevel", LuaScriptInterface::luaPlayerGetEffectiveSkillLevel);
registerMethod("Player", "getSkillPercent", LuaScriptInterface::luaPlayerGetSkillPercent);
registerMethod("Player", "getSkillTries", LuaScriptInterface::luaPlayerGetSkillTries);
registerMethod("Player", "addSkillTries", LuaScriptInterface::luaPlayerAddSkillTries);
registerMethod("Player", "addOfflineTrainingTime", LuaScriptInterface::luaPlayerAddOfflineTrainingTime);
registerMethod("Player", "getOfflineTrainingTime", LuaScriptInterface::luaPlayerGetOfflineTrainingTime);
registerMethod("Player", "removeOfflineTrainingTime", LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime);
registerMethod("Player", "addOfflineTrainingTries", LuaScriptInterface::luaPlayerAddOfflineTrainingTries);
registerMethod("Player", "getOfflineTrainingSkill", LuaScriptInterface::luaPlayerGetOfflineTrainingSkill);
registerMethod("Player", "setOfflineTrainingSkill", LuaScriptInterface::luaPlayerSetOfflineTrainingSkill);
registerMethod("Player", "getItemCount", LuaScriptInterface::luaPlayerGetItemCount);
registerMethod("Player", "getItemById", LuaScriptInterface::luaPlayerGetItemById);
registerMethod("Player", "getVocation", LuaScriptInterface::luaPlayerGetVocation);
registerMethod("Player", "setVocation", LuaScriptInterface::luaPlayerSetVocation);
registerMethod("Player", "getSex", LuaScriptInterface::luaPlayerGetSex);
registerMethod("Player", "setSex", LuaScriptInterface::luaPlayerSetSex);
registerMethod("Player", "getTown", LuaScriptInterface::luaPlayerGetTown);
registerMethod("Player", "setTown", LuaScriptInterface::luaPlayerSetTown);
registerMethod("Player", "getGuild", LuaScriptInterface::luaPlayerGetGuild);
registerMethod("Player", "setGuild", LuaScriptInterface::luaPlayerSetGuild);
registerMethod("Player", "getGuildLevel", LuaScriptInterface::luaPlayerGetGuildLevel);
registerMethod("Player", "setGuildLevel", LuaScriptInterface::luaPlayerSetGuildLevel);
registerMethod("Player", "getGuildNick", LuaScriptInterface::luaPlayerGetGuildNick);
registerMethod("Player", "setGuildNick", LuaScriptInterface::luaPlayerSetGuildNick);
registerMethod("Player", "getGroup", LuaScriptInterface::luaPlayerGetGroup);
registerMethod("Player", "setGroup", LuaScriptInterface::luaPlayerSetGroup);
registerMethod("Player", "getStamina", LuaScriptInterface::luaPlayerGetStamina);
registerMethod("Player", "setStamina", LuaScriptInterface::luaPlayerSetStamina);
registerMethod("Player", "getSoul", LuaScriptInterface::luaPlayerGetSoul);
registerMethod("Player", "addSoul", LuaScriptInterface::luaPlayerAddSoul);
registerMethod("Player", "getMaxSoul", LuaScriptInterface::luaPlayerGetMaxSoul);
registerMethod("Player", "getBankBalance", LuaScriptInterface::luaPlayerGetBankBalance);
registerMethod("Player", "setBankBalance", LuaScriptInterface::luaPlayerSetBankBalance);
registerMethod("Player", "getStorageValue", LuaScriptInterface::luaPlayerGetStorageValue);
registerMethod("Player", "setStorageValue", LuaScriptInterface::luaPlayerSetStorageValue);
registerMethod("Player", "addItem", LuaScriptInterface::luaPlayerAddItem);
registerMethod("Player", "addItemEx", LuaScriptInterface::luaPlayerAddItemEx);
registerMethod("Player", "removeItem", LuaScriptInterface::luaPlayerRemoveItem);
registerMethod("Player", "getMoney", LuaScriptInterface::luaPlayerGetMoney);
registerMethod("Player", "addMoney", LuaScriptInterface::luaPlayerAddMoney);
registerMethod("Player", "removeMoney", LuaScriptInterface::luaPlayerRemoveMoney);
registerMethod("Player", "showTextDialog", LuaScriptInterface::luaPlayerShowTextDialog);
registerMethod("Player", "sendTextMessage", LuaScriptInterface::luaPlayerSendTextMessage);
registerMethod("Player", "sendChannelMessage", LuaScriptInterface::luaPlayerSendChannelMessage);
registerMethod("Player", "sendPrivateMessage", LuaScriptInterface::luaPlayerSendPrivateMessage);
registerMethod("Player", "channelSay", LuaScriptInterface::luaPlayerChannelSay);
registerMethod("Player", "openChannel", LuaScriptInterface::luaPlayerOpenChannel);
registerMethod("Player", "getSlotItem", LuaScriptInterface::luaPlayerGetSlotItem);
registerMethod("Player", "getParty", LuaScriptInterface::luaPlayerGetParty);
registerMethod("Player", "addOutfit", LuaScriptInterface::luaPlayerAddOutfit);
registerMethod("Player", "addOutfitAddon", LuaScriptInterface::luaPlayerAddOutfitAddon);
registerMethod("Player", "removeOutfit", LuaScriptInterface::luaPlayerRemoveOutfit);
registerMethod("Player", "removeOutfitAddon", LuaScriptInterface::luaPlayerRemoveOutfitAddon);
registerMethod("Player", "hasOutfit", LuaScriptInterface::luaPlayerHasOutfit);
registerMethod("Player", "sendOutfitWindow", LuaScriptInterface::luaPlayerSendOutfitWindow);
registerMethod("Player", "addMount", LuaScriptInterface::luaPlayerAddMount);
registerMethod("Player", "removeMount", LuaScriptInterface::luaPlayerRemoveMount);
registerMethod("Player", "hasMount", LuaScriptInterface::luaPlayerHasMount);
registerMethod("Player", "getPremiumDays", LuaScriptInterface::luaPlayerGetPremiumDays);
registerMethod("Player", "addPremiumDays", LuaScriptInterface::luaPlayerAddPremiumDays);
registerMethod("Player", "removePremiumDays", LuaScriptInterface::luaPlayerRemovePremiumDays);
registerMethod("Player", "hasBlessing", LuaScriptInterface::luaPlayerHasBlessing);
registerMethod("Player", "addBlessing", LuaScriptInterface::luaPlayerAddBlessing);
registerMethod("Player", "removeBlessing", LuaScriptInterface::luaPlayerRemoveBlessing);
registerMethod("Player", "canLearnSpell", LuaScriptInterface::luaPlayerCanLearnSpell);
registerMethod("Player", "learnSpell", LuaScriptInterface::luaPlayerLearnSpell);
registerMethod("Player", "forgetSpell", LuaScriptInterface::luaPlayerForgetSpell);
registerMethod("Player", "hasLearnedSpell", LuaScriptInterface::luaPlayerHasLearnedSpell);
registerMethod("Player", "sendTutorial", LuaScriptInterface::luaPlayerSendTutorial);
registerMethod("Player", "addMapMark", LuaScriptInterface::luaPlayerAddMapMark);
registerMethod("Player", "save", LuaScriptInterface::luaPlayerSave);
registerMethod("Player", "popupFYI", LuaScriptInterface::luaPlayerPopupFYI);
registerMethod("Player", "isPzLocked", LuaScriptInterface::luaPlayerIsPzLocked);
registerMethod("Player", "getClient", LuaScriptInterface::luaPlayerGetClient);
registerMethod("Player", "getHouse", LuaScriptInterface::luaPlayerGetHouse);
registerMethod("Player", "sendHouseWindow", LuaScriptInterface::luaPlayerSendHouseWindow);
registerMethod("Player", "setEditHouse", LuaScriptInterface::luaPlayerSetEditHouse);
registerMethod("Player", "setGhostMode", LuaScriptInterface::luaPlayerSetGhostMode);
registerMethod("Player", "getContainerId", LuaScriptInterface::luaPlayerGetContainerId);
registerMethod("Player", "getContainerById", LuaScriptInterface::luaPlayerGetContainerById);
registerMethod("Player", "getContainerIndex", LuaScriptInterface::luaPlayerGetContainerIndex);
registerMethod("Player", "getInstantSpells", LuaScriptInterface::luaPlayerGetInstantSpells);
registerMethod("Player", "canCast", LuaScriptInterface::luaPlayerCanCast);
registerMethod("Player", "hasChaseMode", LuaScriptInterface::luaPlayerHasChaseMode);
registerMethod("Player", "hasSecureMode", LuaScriptInterface::luaPlayerHasSecureMode);
registerMethod("Player", "getFightMode", LuaScriptInterface::luaPlayerGetFightMode);
// Monster
registerClass("Monster", "Creature", LuaScriptInterface::luaMonsterCreate);
registerMetaMethod("Monster", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Monster", "isMonster", LuaScriptInterface::luaMonsterIsMonster);
registerMethod("Monster", "getType", LuaScriptInterface::luaMonsterGetType);
registerMethod("Monster", "getSpawnPosition", LuaScriptInterface::luaMonsterGetSpawnPosition);
registerMethod("Monster", "isInSpawnRange", LuaScriptInterface::luaMonsterIsInSpawnRange);
registerMethod("Monster", "isIdle", LuaScriptInterface::luaMonsterIsIdle);
registerMethod("Monster", "setIdle", LuaScriptInterface::luaMonsterSetIdle);
registerMethod("Monster", "isTarget", LuaScriptInterface::luaMonsterIsTarget);
registerMethod("Monster", "isOpponent", LuaScriptInterface::luaMonsterIsOpponent);
registerMethod("Monster", "isFriend", LuaScriptInterface::luaMonsterIsFriend);
registerMethod("Monster", "addFriend", LuaScriptInterface::luaMonsterAddFriend);
registerMethod("Monster", "removeFriend", LuaScriptInterface::luaMonsterRemoveFriend);
registerMethod("Monster", "getFriendList", LuaScriptInterface::luaMonsterGetFriendList);
registerMethod("Monster", "getFriendCount", LuaScriptInterface::luaMonsterGetFriendCount);
registerMethod("Monster", "addTarget", LuaScriptInterface::luaMonsterAddTarget);
registerMethod("Monster", "removeTarget", LuaScriptInterface::luaMonsterRemoveTarget);
registerMethod("Monster", "getTargetList", LuaScriptInterface::luaMonsterGetTargetList);
registerMethod("Monster", "getTargetCount", LuaScriptInterface::luaMonsterGetTargetCount);
registerMethod("Monster", "selectTarget", LuaScriptInterface::luaMonsterSelectTarget);
registerMethod("Monster", "searchTarget", LuaScriptInterface::luaMonsterSearchTarget);
// Npc
registerClass("Npc", "Creature", LuaScriptInterface::luaNpcCreate);
registerMetaMethod("Npc", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Npc", "isNpc", LuaScriptInterface::luaNpcIsNpc);
registerMethod("Npc", "setMasterPos", LuaScriptInterface::luaNpcSetMasterPos);
registerMethod("Npc", "getSpeechBubble", LuaScriptInterface::luaNpcGetSpeechBubble);
registerMethod("Npc", "setSpeechBubble", LuaScriptInterface::luaNpcSetSpeechBubble);
// Guild
registerClass("Guild", "", LuaScriptInterface::luaGuildCreate);
registerMetaMethod("Guild", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Guild", "getId", LuaScriptInterface::luaGuildGetId);
registerMethod("Guild", "getName", LuaScriptInterface::luaGuildGetName);
registerMethod("Guild", "getMembersOnline", LuaScriptInterface::luaGuildGetMembersOnline);
registerMethod("Guild", "addRank", LuaScriptInterface::luaGuildAddRank);
registerMethod("Guild", "getRankById", LuaScriptInterface::luaGuildGetRankById);
registerMethod("Guild", "getRankByLevel", LuaScriptInterface::luaGuildGetRankByLevel);
registerMethod("Guild", "getMotd", LuaScriptInterface::luaGuildGetMotd);
registerMethod("Guild", "setMotd", LuaScriptInterface::luaGuildSetMotd);
// Group
registerClass("Group", "", LuaScriptInterface::luaGroupCreate);
registerMetaMethod("Group", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Group", "getId", LuaScriptInterface::luaGroupGetId);
registerMethod("Group", "getName", LuaScriptInterface::luaGroupGetName);
registerMethod("Group", "getFlags", LuaScriptInterface::luaGroupGetFlags);
registerMethod("Group", "getAccess", LuaScriptInterface::luaGroupGetAccess);
registerMethod("Group", "getMaxDepotItems", LuaScriptInterface::luaGroupGetMaxDepotItems);
registerMethod("Group", "getMaxVipEntries", LuaScriptInterface::luaGroupGetMaxVipEntries);
registerMethod("Group", "hasFlag", LuaScriptInterface::luaGroupHasFlag);
// Vocation
registerClass("Vocation", "", LuaScriptInterface::luaVocationCreate);
registerMetaMethod("Vocation", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Vocation", "getId", LuaScriptInterface::luaVocationGetId);
registerMethod("Vocation", "getClientId", LuaScriptInterface::luaVocationGetClientId);
registerMethod("Vocation", "getName", LuaScriptInterface::luaVocationGetName);
registerMethod("Vocation", "getDescription", LuaScriptInterface::luaVocationGetDescription);
registerMethod("Vocation", "getRequiredSkillTries", LuaScriptInterface::luaVocationGetRequiredSkillTries);
registerMethod("Vocation", "getRequiredManaSpent", LuaScriptInterface::luaVocationGetRequiredManaSpent);
registerMethod("Vocation", "getCapacityGain", LuaScriptInterface::luaVocationGetCapacityGain);
registerMethod("Vocation", "getHealthGain", LuaScriptInterface::luaVocationGetHealthGain);
registerMethod("Vocation", "getHealthGainTicks", LuaScriptInterface::luaVocationGetHealthGainTicks);
registerMethod("Vocation", "getHealthGainAmount", LuaScriptInterface::luaVocationGetHealthGainAmount);
registerMethod("Vocation", "getManaGain", LuaScriptInterface::luaVocationGetManaGain);
registerMethod("Vocation", "getManaGainTicks", LuaScriptInterface::luaVocationGetManaGainTicks);
registerMethod("Vocation", "getManaGainAmount", LuaScriptInterface::luaVocationGetManaGainAmount);
registerMethod("Vocation", "getMaxSoul", LuaScriptInterface::luaVocationGetMaxSoul);
registerMethod("Vocation", "getSoulGainTicks", LuaScriptInterface::luaVocationGetSoulGainTicks);
registerMethod("Vocation", "getAttackSpeed", LuaScriptInterface::luaVocationGetAttackSpeed);
registerMethod("Vocation", "getBaseSpeed", LuaScriptInterface::luaVocationGetBaseSpeed);
registerMethod("Vocation", "getDemotion", LuaScriptInterface::luaVocationGetDemotion);
registerMethod("Vocation", "getPromotion", LuaScriptInterface::luaVocationGetPromotion);
// Town
registerClass("Town", "", LuaScriptInterface::luaTownCreate);
registerMetaMethod("Town", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Town", "getId", LuaScriptInterface::luaTownGetId);
registerMethod("Town", "getName", LuaScriptInterface::luaTownGetName);
registerMethod("Town", "getTemplePosition", LuaScriptInterface::luaTownGetTemplePosition);
// House
registerClass("House", "", LuaScriptInterface::luaHouseCreate);
registerMetaMethod("House", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("House", "getId", LuaScriptInterface::luaHouseGetId);
registerMethod("House", "getName", LuaScriptInterface::luaHouseGetName);
registerMethod("House", "getTown", LuaScriptInterface::luaHouseGetTown);
registerMethod("House", "getExitPosition", LuaScriptInterface::luaHouseGetExitPosition);
registerMethod("House", "getRent", LuaScriptInterface::luaHouseGetRent);
registerMethod("House", "getOwnerGuid", LuaScriptInterface::luaHouseGetOwnerGuid);
registerMethod("House", "setOwnerGuid", LuaScriptInterface::luaHouseSetOwnerGuid);
registerMethod("House", "startTrade", LuaScriptInterface::luaHouseStartTrade);
registerMethod("House", "getBeds", LuaScriptInterface::luaHouseGetBeds);
registerMethod("House", "getBedCount", LuaScriptInterface::luaHouseGetBedCount);
registerMethod("House", "getDoors", LuaScriptInterface::luaHouseGetDoors);
registerMethod("House", "getDoorCount", LuaScriptInterface::luaHouseGetDoorCount);
registerMethod("House", "getDoorIdByPosition", LuaScriptInterface::luaHouseGetDoorIdByPosition);
registerMethod("House", "getTiles", LuaScriptInterface::luaHouseGetTiles);
registerMethod("House", "getTileCount", LuaScriptInterface::luaHouseGetTileCount);
registerMethod("House", "canEditAccessList", LuaScriptInterface::luaHouseCanEditAccessList);
registerMethod("House", "getAccessList", LuaScriptInterface::luaHouseGetAccessList);
registerMethod("House", "setAccessList", LuaScriptInterface::luaHouseSetAccessList);
registerMethod("House", "kickPlayer", LuaScriptInterface::luaHouseKickPlayer);
// ItemType
registerClass("ItemType", "", LuaScriptInterface::luaItemTypeCreate);
registerMetaMethod("ItemType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("ItemType", "isCorpse", LuaScriptInterface::luaItemTypeIsCorpse);
registerMethod("ItemType", "isDoor", LuaScriptInterface::luaItemTypeIsDoor);
registerMethod("ItemType", "isContainer", LuaScriptInterface::luaItemTypeIsContainer);
registerMethod("ItemType", "isFluidContainer", LuaScriptInterface::luaItemTypeIsFluidContainer);
registerMethod("ItemType", "isMovable", LuaScriptInterface::luaItemTypeIsMovable);
registerMethod("ItemType", "isRune", LuaScriptInterface::luaItemTypeIsRune);
registerMethod("ItemType", "isStackable", LuaScriptInterface::luaItemTypeIsStackable);
registerMethod("ItemType", "isReadable", LuaScriptInterface::luaItemTypeIsReadable);
registerMethod("ItemType", "isWritable", LuaScriptInterface::luaItemTypeIsWritable);
registerMethod("ItemType", "isBlocking", LuaScriptInterface::luaItemTypeIsBlocking);
registerMethod("ItemType", "isGroundTile", LuaScriptInterface::luaItemTypeIsGroundTile);
registerMethod("ItemType", "isMagicField", LuaScriptInterface::luaItemTypeIsMagicField);
registerMethod("ItemType", "isUseable", LuaScriptInterface::luaItemTypeIsUseable);
registerMethod("ItemType", "isPickupable", LuaScriptInterface::luaItemTypeIsPickupable);
registerMethod("ItemType", "getType", LuaScriptInterface::luaItemTypeGetType);
registerMethod("ItemType", "getId", LuaScriptInterface::luaItemTypeGetId);
registerMethod("ItemType", "getClientId", LuaScriptInterface::luaItemTypeGetClientId);
registerMethod("ItemType", "getName", LuaScriptInterface::luaItemTypeGetName);
registerMethod("ItemType", "getPluralName", LuaScriptInterface::luaItemTypeGetPluralName);
registerMethod("ItemType", "getArticle", LuaScriptInterface::luaItemTypeGetArticle);
registerMethod("ItemType", "getDescription", LuaScriptInterface::luaItemTypeGetDescription);
registerMethod("ItemType", "getSlotPosition", LuaScriptInterface::luaItemTypeGetSlotPosition);
registerMethod("ItemType", "getCharges", LuaScriptInterface::luaItemTypeGetCharges);
registerMethod("ItemType", "getFluidSource", LuaScriptInterface::luaItemTypeGetFluidSource);
registerMethod("ItemType", "getCapacity", LuaScriptInterface::luaItemTypeGetCapacity);
registerMethod("ItemType", "getWeight", LuaScriptInterface::luaItemTypeGetWeight);
registerMethod("ItemType", "getHitChance", LuaScriptInterface::luaItemTypeGetHitChance);
registerMethod("ItemType", "getShootRange", LuaScriptInterface::luaItemTypeGetShootRange);
registerMethod("ItemType", "getAttack", LuaScriptInterface::luaItemTypeGetAttack);
registerMethod("ItemType", "getDefense", LuaScriptInterface::luaItemTypeGetDefense);
registerMethod("ItemType", "getExtraDefense", LuaScriptInterface::luaItemTypeGetExtraDefense);
registerMethod("ItemType", "getArmor", LuaScriptInterface::luaItemTypeGetArmor);
registerMethod("ItemType", "getWeaponType", LuaScriptInterface::luaItemTypeGetWeaponType);
registerMethod("ItemType", "getElementType", LuaScriptInterface::luaItemTypeGetElementType);
registerMethod("ItemType", "getElementDamage", LuaScriptInterface::luaItemTypeGetElementDamage);
registerMethod("ItemType", "getTransformEquipId", LuaScriptInterface::luaItemTypeGetTransformEquipId);
registerMethod("ItemType", "getTransformDeEquipId", LuaScriptInterface::luaItemTypeGetTransformDeEquipId);
registerMethod("ItemType", "getDestroyId", LuaScriptInterface::luaItemTypeGetDestroyId);
registerMethod("ItemType", "getDecayId", LuaScriptInterface::luaItemTypeGetDecayId);
registerMethod("ItemType", "getRequiredLevel", LuaScriptInterface::luaItemTypeGetRequiredLevel);
registerMethod("ItemType", "getAmmoType", LuaScriptInterface::luaItemTypeGetAmmoType);
registerMethod("ItemType", "getCorpseType", LuaScriptInterface::luaItemTypeGetCorpseType);
registerMethod("ItemType", "hasSubType", LuaScriptInterface::luaItemTypeHasSubType);
// Combat
registerClass("Combat", "", LuaScriptInterface::luaCombatCreate);
registerMetaMethod("Combat", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Combat", "setParameter", LuaScriptInterface::luaCombatSetParameter);
registerMethod("Combat", "setFormula", LuaScriptInterface::luaCombatSetFormula);
registerMethod("Combat", "setArea", LuaScriptInterface::luaCombatSetArea);
registerMethod("Combat", "addCondition", LuaScriptInterface::luaCombatAddCondition);
registerMethod("Combat", "setCallback", LuaScriptInterface::luaCombatSetCallback);
registerMethod("Combat", "setOrigin", LuaScriptInterface::luaCombatSetOrigin);
registerMethod("Combat", "execute", LuaScriptInterface::luaCombatExecute);
// Condition
registerClass("Condition", "", LuaScriptInterface::luaConditionCreate);
registerMetaMethod("Condition", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("Condition", "__gc", LuaScriptInterface::luaConditionDelete);
registerMethod("Condition", "delete", LuaScriptInterface::luaConditionDelete);
registerMethod("Condition", "getId", LuaScriptInterface::luaConditionGetId);
registerMethod("Condition", "getSubId", LuaScriptInterface::luaConditionGetSubId);
registerMethod("Condition", "getType", LuaScriptInterface::luaConditionGetType);
registerMethod("Condition", "getIcons", LuaScriptInterface::luaConditionGetIcons);
registerMethod("Condition", "getEndTime", LuaScriptInterface::luaConditionGetEndTime);
registerMethod("Condition", "clone", LuaScriptInterface::luaConditionClone);
registerMethod("Condition", "getTicks", LuaScriptInterface::luaConditionGetTicks);
registerMethod("Condition", "setTicks", LuaScriptInterface::luaConditionSetTicks);
registerMethod("Condition", "setParameter", LuaScriptInterface::luaConditionSetParameter);
registerMethod("Condition", "setFormula", LuaScriptInterface::luaConditionSetFormula);
registerMethod("Condition", "setOutfit", LuaScriptInterface::luaConditionSetOutfit);
registerMethod("Condition", "addDamage", LuaScriptInterface::luaConditionAddDamage);
// MonsterType
registerClass("MonsterType", "", LuaScriptInterface::luaMonsterTypeCreate);
registerMetaMethod("MonsterType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("MonsterType", "isAttackable", LuaScriptInterface::luaMonsterTypeIsAttackable);
registerMethod("MonsterType", "isConvinceable", LuaScriptInterface::luaMonsterTypeIsConvinceable);
registerMethod("MonsterType", "isSummonable", LuaScriptInterface::luaMonsterTypeIsSummonable);
registerMethod("MonsterType", "isIllusionable", LuaScriptInterface::luaMonsterTypeIsIllusionable);
registerMethod("MonsterType", "isHostile", LuaScriptInterface::luaMonsterTypeIsHostile);
registerMethod("MonsterType", "isPushable", LuaScriptInterface::luaMonsterTypeIsPushable);
registerMethod("MonsterType", "isHealthShown", LuaScriptInterface::luaMonsterTypeIsHealthShown);
registerMethod("MonsterType", "canPushItems", LuaScriptInterface::luaMonsterTypeCanPushItems);
registerMethod("MonsterType", "canPushCreatures", LuaScriptInterface::luaMonsterTypeCanPushCreatures);
registerMethod("MonsterType", "getName", LuaScriptInterface::luaMonsterTypeGetName);
registerMethod("MonsterType", "getNameDescription", LuaScriptInterface::luaMonsterTypeGetNameDescription);
registerMethod("MonsterType", "getHealth", LuaScriptInterface::luaMonsterTypeGetHealth);
registerMethod("MonsterType", "getMaxHealth", LuaScriptInterface::luaMonsterTypeGetMaxHealth);
registerMethod("MonsterType", "getRunHealth", LuaScriptInterface::luaMonsterTypeGetRunHealth);
registerMethod("MonsterType", "getExperience", LuaScriptInterface::luaMonsterTypeGetExperience);
registerMethod("MonsterType", "getCombatImmunities", LuaScriptInterface::luaMonsterTypeGetCombatImmunities);
registerMethod("MonsterType", "getConditionImmunities", LuaScriptInterface::luaMonsterTypeGetConditionImmunities);
registerMethod("MonsterType", "getAttackList", LuaScriptInterface::luaMonsterTypeGetAttackList);
registerMethod("MonsterType", "getDefenseList", LuaScriptInterface::luaMonsterTypeGetDefenseList);
registerMethod("MonsterType", "getElementList", LuaScriptInterface::luaMonsterTypeGetElementList);
registerMethod("MonsterType", "getVoices", LuaScriptInterface::luaMonsterTypeGetVoices);
registerMethod("MonsterType", "getLoot", LuaScriptInterface::luaMonsterTypeGetLoot);
registerMethod("MonsterType", "getCreatureEvents", LuaScriptInterface::luaMonsterTypeGetCreatureEvents);
registerMethod("MonsterType", "getSummonList", LuaScriptInterface::luaMonsterTypeGetSummonList);
registerMethod("MonsterType", "getMaxSummons", LuaScriptInterface::luaMonsterTypeGetMaxSummons);
registerMethod("MonsterType", "getArmor", LuaScriptInterface::luaMonsterTypeGetArmor);
registerMethod("MonsterType", "getDefense", LuaScriptInterface::luaMonsterTypeGetDefense);
registerMethod("MonsterType", "getOutfit", LuaScriptInterface::luaMonsterTypeGetOutfit);
registerMethod("MonsterType", "getRace", LuaScriptInterface::luaMonsterTypeGetRace);
registerMethod("MonsterType", "getCorpseId", LuaScriptInterface::luaMonsterTypeGetCorpseId);
registerMethod("MonsterType", "getManaCost", LuaScriptInterface::luaMonsterTypeGetManaCost);
registerMethod("MonsterType", "getBaseSpeed", LuaScriptInterface::luaMonsterTypeGetBaseSpeed);
registerMethod("MonsterType", "getLight", LuaScriptInterface::luaMonsterTypeGetLight);
registerMethod("MonsterType", "getStaticAttackChance", LuaScriptInterface::luaMonsterTypeGetStaticAttackChance);
registerMethod("MonsterType", "getTargetDistance", LuaScriptInterface::luaMonsterTypeGetTargetDistance);
registerMethod("MonsterType", "getYellChance", LuaScriptInterface::luaMonsterTypeGetYellChance);
registerMethod("MonsterType", "getYellSpeedTicks", LuaScriptInterface::luaMonsterTypeGetYellSpeedTicks);
registerMethod("MonsterType", "getChangeTargetChance", LuaScriptInterface::luaMonsterTypeGetChangeTargetChance);
registerMethod("MonsterType", "getChangeTargetSpeed", LuaScriptInterface::luaMonsterTypeGetChangeTargetSpeed);
// Party
registerClass("Party", "", nullptr);
registerMetaMethod("Party", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Party", "disband", LuaScriptInterface::luaPartyDisband);
registerMethod("Party", "getLeader", LuaScriptInterface::luaPartyGetLeader);
registerMethod("Party", "setLeader", LuaScriptInterface::luaPartySetLeader);
registerMethod("Party", "getMembers", LuaScriptInterface::luaPartyGetMembers);
registerMethod("Party", "getMemberCount", LuaScriptInterface::luaPartyGetMemberCount);
registerMethod("Party", "getInvitees", LuaScriptInterface::luaPartyGetInvitees);
registerMethod("Party", "getInviteeCount", LuaScriptInterface::luaPartyGetInviteeCount);
registerMethod("Party", "addInvite", LuaScriptInterface::luaPartyAddInvite);
registerMethod("Party", "removeInvite", LuaScriptInterface::luaPartyRemoveInvite);
registerMethod("Party", "addMember", LuaScriptInterface::luaPartyAddMember);
registerMethod("Party", "removeMember", LuaScriptInterface::luaPartyRemoveMember);
registerMethod("Party", "isSharedExperienceActive", LuaScriptInterface::luaPartyIsSharedExperienceActive);
registerMethod("Party", "isSharedExperienceEnabled", LuaScriptInterface::luaPartyIsSharedExperienceEnabled);
registerMethod("Party", "shareExperience", LuaScriptInterface::luaPartyShareExperience);
registerMethod("Party", "setSharedExperience", LuaScriptInterface::luaPartySetSharedExperience);
// Spells
registerClass("Spell", "", LuaScriptInterface::luaSpellCreate);
registerMetaMethod("Spell", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Spell", "getManaCost", LuaScriptInterface::luaSpellGetManaCost);
registerMethod("Spell", "getSoulCost", LuaScriptInterface::luaSpellGetSoulCost);
registerMethod("Spell", "isPremium", LuaScriptInterface::luaSpellIsPremium);
registerMethod("Spell", "isLearnable", LuaScriptInterface::luaSpellIsLearnable);
}
#undef registerEnum
#undef registerEnumIn
void LuaScriptInterface::registerClass(const std::string& className, const std::string& baseClass, lua_CFunction newFunction/* = nullptr*/)
{
// className = {}
lua_newtable(luaState);
lua_pushvalue(luaState, -1);
lua_setglobal(luaState, className.c_str());
int methods = lua_gettop(luaState);
// methodsTable = {}
lua_newtable(luaState);
int methodsTable = lua_gettop(luaState);
if (newFunction) {
// className.__call = newFunction
lua_pushcfunction(luaState, newFunction);
lua_setfield(luaState, methodsTable, "__call");
}
uint32_t parents = 0;
if (!baseClass.empty()) {
lua_getglobal(luaState, baseClass.c_str());
lua_rawgeti(luaState, -1, 'p');
parents = getNumber<uint32_t>(luaState, -1) + 1;
lua_pop(luaState, 1);
lua_setfield(luaState, methodsTable, "__index");
}
// setmetatable(className, methodsTable)
lua_setmetatable(luaState, methods);
// className.metatable = {}
luaL_newmetatable(luaState, className.c_str());
int metatable = lua_gettop(luaState);
// className.metatable.__metatable = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__metatable");
// className.metatable.__index = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__index");
// className.metatable['h'] = hash
lua_pushnumber(luaState, std::hash<std::string>()(className));
lua_rawseti(luaState, metatable, 'h');
// className.metatable['p'] = parents
lua_pushnumber(luaState, parents);
lua_rawseti(luaState, metatable, 'p');
// className.metatable['t'] = type
if (className == "Item") {
lua_pushnumber(luaState, LuaData_Item);
} else if (className == "Container") {
lua_pushnumber(luaState, LuaData_Container);
} else if (className == "Teleport") {
lua_pushnumber(luaState, LuaData_Teleport);
} else if (className == "Player") {
lua_pushnumber(luaState, LuaData_Player);
} else if (className == "Monster") {
lua_pushnumber(luaState, LuaData_Monster);
} else if (className == "Npc") {
lua_pushnumber(luaState, LuaData_Npc);
} else if (className == "Tile") {
lua_pushnumber(luaState, LuaData_Tile);
} else {
lua_pushnumber(luaState, LuaData_Unknown);
}
lua_rawseti(luaState, metatable, 't');
// pop className, className.metatable
lua_pop(luaState, 2);
}
void LuaScriptInterface::registerTable(const std::string& tableName)
{
// _G[tableName] = {}
lua_newtable(luaState);
lua_setglobal(luaState, tableName.c_str());
}
void LuaScriptInterface::registerMethod(const std::string& globalName, const std::string& methodName, lua_CFunction func)
{
// globalName.methodName = func
lua_getglobal(luaState, globalName.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop globalName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerMetaMethod(const std::string& className, const std::string& methodName, lua_CFunction func)
{
// className.metatable.methodName = func
luaL_getmetatable(luaState, className.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop className.metatable
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalMethod(const std::string& functionName, lua_CFunction func)
{
// _G[functionName] = func
lua_pushcfunction(luaState, func);
lua_setglobal(luaState, functionName.c_str());
}
void LuaScriptInterface::registerVariable(const std::string& tableName, const std::string& name, lua_Number value)
{
// tableName.name = value
lua_getglobal(luaState, tableName.c_str());
setField(luaState, name.c_str(), value);
// pop tableName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalVariable(const std::string& name, lua_Number value)
{
// _G[name] = value
lua_pushnumber(luaState, value);
lua_setglobal(luaState, name.c_str());
}
void LuaScriptInterface::registerGlobalBoolean(const std::string& name, bool value)
{
// _G[name] = value
pushBoolean(luaState, value);
lua_setglobal(luaState, name.c_str());
}
int LuaScriptInterface::luaDoPlayerAddItem(lua_State* L)
{
//doPlayerAddItem(cid, itemid, <optional: default: 1> count/subtype, <optional: default: 1> canDropOnMap)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
int32_t count = getNumber<int32_t>(L, 3, 1);
bool canDropOnMap = getBoolean(L, 4, true);
uint16_t subType = getNumber<uint16_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount;
auto parameters = lua_gettop(L);
if (parameters > 4) {
//subtype already supplied, count then is the amount
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
} else {
itemCount = 1;
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
uint16_t stackCount = subType;
if (it.stackable && stackCount > 100) {
stackCount = 100;
}
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, newItem, canDropOnMap);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
uint32_t uid = getScriptEnv()->addThing(newItem);
lua_pushnumber(L, uid);
return 1;
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
return 1;
}
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaDoTileAddItemEx(lua_State* L)
{
//doTileAddItemEx(pos, uid)
const Position& pos = getPosition(L, 1);
Tile* tile = g_game.map.getTile(pos);
if (!tile) {
std::ostringstream ss;
ss << pos << ' ' << getErrorDesc(LUA_ERROR_TILE_NOT_FOUND);
reportErrorFunc(ss.str());
pushBoolean(L, false);
return 1;
}
uint32_t uid = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(uid);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc("Item already has a parent");
pushBoolean(L, false);
return 1;
}
lua_pushnumber(L, g_game.internalAddItem(tile, item));
return 1;
}
int LuaScriptInterface::luaDebugPrint(lua_State* L)
{
//debugPrint(text)
reportErrorFunc(getString(L, -1));
return 0;
}
int LuaScriptInterface::luaGetWorldTime(lua_State* L)
{
//getWorldTime()
uint32_t time = g_game.getLightHour();
lua_pushnumber(L, time);
return 1;
}
int LuaScriptInterface::luaGetWorldLight(lua_State* L)
{
//getWorldLight()
LightInfo lightInfo = g_game.getWorldLightInfo();
lua_pushnumber(L, lightInfo.level);
lua_pushnumber(L, lightInfo.color);
return 2;
}
int LuaScriptInterface::luaGetWorldUpTime(lua_State* L)
{
//getWorldUpTime()
uint64_t uptime = (OTSYS_TIME() - ProtocolStatus::start) / 1000;
lua_pushnumber(L, uptime);
return 1;
}
bool LuaScriptInterface::getArea(lua_State* L, std::list<uint32_t>& list, uint32_t& rows)
{
lua_pushnil(L);
for (rows = 0; lua_next(L, -2) != 0; ++rows) {
if (!isTable(L, -1)) {
return false;
}
lua_pushnil(L);
while (lua_next(L, -2) != 0) {
if (!isNumber(L, -1)) {
return false;
}
list.push_back(getNumber<uint32_t>(L, -1));
lua_pop(L, 1);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
return (rows != 0);
}
int LuaScriptInterface::luaCreateCombatArea(lua_State* L)
{
//createCombatArea( {area}, <optional> {extArea} )
ScriptEnvironment* env = getScriptEnv();
if (env->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc("This function can only be used while loading the script.");
pushBoolean(L, false);
return 1;
}
uint32_t areaId = g_luaEnvironment.createAreaObject(env->getScriptInterface());
AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
int parameters = lua_gettop(L);
if (parameters >= 2) {
uint32_t rowsExtArea;
std::list<uint32_t> listExtArea;
if (!isTable(L, 2) || !getArea(L, listExtArea, rowsExtArea)) {
reportErrorFunc("Invalid extended area table.");
pushBoolean(L, false);
return 1;
}
area->setupExtArea(listExtArea, rowsExtArea);
}
uint32_t rowsArea = 0;
std::list<uint32_t> listArea;
if (!isTable(L, 1) || !getArea(L, listArea, rowsArea)) {
reportErrorFunc("Invalid area table.");
pushBoolean(L, false);
return 1;
}
area->setupArea(listArea, rowsArea);
lua_pushnumber(L, areaId);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatHealth(lua_State* L)
{
//doAreaCombatHealth(cid, type, pos, area, min, max, effect[, origin = ORIGIN_SPELL])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 4);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatType_t combatType = getNumber<CombatType_t>(L, 2);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 7);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 8, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 6), getNumber<int32_t>(L, 5));
Combat::doCombatHealth(creature, getPosition(L, 3), area, damage, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatHealth(lua_State* L)
{
//doTargetCombatHealth(cid, target, type, min, max, effect[, origin = ORIGIN_SPELL])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatType_t combatType = getNumber<CombatType_t>(L, 3);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 6);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5));
Combat::doCombatHealth(creature, target, damage, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatMana(lua_State* L)
{
//doAreaCombatMana(cid, pos, area, min, max, effect[, origin = ORIGIN_SPELL])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 3);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 6);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL);
damage.primary.type = COMBAT_MANADRAIN;
damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5));
Position pos = getPosition(L, 2);
Combat::doCombatMana(creature, pos, area, damage, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatMana(lua_State* L)
{
//doTargetCombatMana(cid, target, min, max, effect[, origin = ORIGIN_SPELL)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 5);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 6, ORIGIN_SPELL);
damage.primary.type = COMBAT_MANADRAIN;
damage.primary.value = normal_random(getNumber<int32_t>(L, 3), getNumber<int32_t>(L, 4));
Combat::doCombatMana(creature, target, damage, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatCondition(lua_State* L)
{
//doAreaCombatCondition(cid, pos, area, condition, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
const Condition* condition = getUserdata<Condition>(L, 4);
if (!condition) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONDITION_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 3);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 5);
params.conditionList.emplace_front(condition->clone());
Combat::doCombatCondition(creature, getPosition(L, 2), area, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatCondition(lua_State* L)
{
//doTargetCombatCondition(cid, target, condition, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
const Condition* condition = getUserdata<Condition>(L, 3);
if (!condition) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONDITION_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 4);
params.conditionList.emplace_front(condition->clone());
Combat::doCombatCondition(creature, target, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatDispel(lua_State* L)
{
//doAreaCombatDispel(cid, pos, area, type, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 3);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 5);
params.dispelType = getNumber<ConditionType_t>(L, 4);
Combat::doCombatDispel(creature, getPosition(L, 2), area, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatDispel(lua_State* L)
{
//doTargetCombatDispel(cid, target, type, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatParams params;
params.dispelType = getNumber<ConditionType_t>(L, 3);
params.impactEffect = getNumber<uint8_t>(L, 4);
Combat::doCombatDispel(creature, target, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoChallengeCreature(lua_State* L)
{
//doChallengeCreature(cid, target)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
target->challengeCreature(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaIsValidUID(lua_State* L)
{
//isValidUID(uid)
pushBoolean(L, getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)) != nullptr);
return 1;
}
int LuaScriptInterface::luaIsDepot(lua_State* L)
{
//isDepot(uid)
Container* container = getScriptEnv()->getContainerByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, container && container->getDepotLocker());
return 1;
}
int LuaScriptInterface::luaIsMoveable(lua_State* L)
{
//isMoveable(uid)
//isMovable(uid)
Thing* thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, thing && thing->isPushable());
return 1;
}
int LuaScriptInterface::luaDoAddContainerItem(lua_State* L)
{
//doAddContainerItem(uid, itemid, <optional> count/subtype)
uint32_t uid = getNumber<uint32_t>(L, 1);
ScriptEnvironment* env = getScriptEnv();
Container* container = env->getContainerByUID(uid);
if (!container) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int32_t subType = 1;
uint32_t count = getNumber<uint32_t>(L, 3, 1);
if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
int32_t stackCount = std::min<int32_t>(100, subType);
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalAddItem(container, newItem);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
lua_pushnumber(L, env->addThing(newItem));
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
}
return 1;
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaGetDepotId(lua_State* L)
{
//getDepotId(uid)
uint32_t uid = getNumber<uint32_t>(L, -1);
Container* container = getScriptEnv()->getContainerByUID(uid);
if (!container) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
DepotLocker* depotLocker = container->getDepotLocker();
if (!depotLocker) {
reportErrorFunc("Depot not found");
pushBoolean(L, false);
return 1;
}
lua_pushnumber(L, depotLocker->getDepotId());
return 1;
}
int LuaScriptInterface::luaDoSetCreatureLight(lua_State* L)
{
//doSetCreatureLight(cid, lightLevel, lightColor, time)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t level = getNumber<uint16_t>(L, 2);
uint16_t color = getNumber<uint16_t>(L, 3);
uint32_t time = getNumber<uint32_t>(L, 4);
Condition* condition = Condition::createCondition(CONDITIONID_COMBAT, CONDITION_LIGHT, time, level | (color << 8));
creature->addCondition(condition);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaAddEvent(lua_State* L)
{
//addEvent(callback, delay, ...)
lua_State* globalState = g_luaEnvironment.getLuaState();
if (!globalState) {
reportErrorFunc("No valid script interface!");
pushBoolean(L, false);
return 1;
} else if (globalState != L) {
lua_xmove(L, globalState, lua_gettop(L));
}
int parameters = lua_gettop(globalState);
if (!isFunction(globalState, -parameters)) { //-parameters means the first parameter from left to right
reportErrorFunc("callback parameter should be a function.");
pushBoolean(L, false);
return 1;
}
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS) || g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
std::vector<std::pair<int32_t, LuaDataType>> indexes;
for (int i = 3; i <= parameters; ++i) {
if (lua_getmetatable(globalState, i) == 0) {
continue;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
if (type != LuaData_Unknown && type != LuaData_Tile) {
indexes.push_back({i, type});
}
lua_pop(globalState, 2);
}
if (!indexes.empty()) {
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS)) {
bool plural = indexes.size() > 1;
std::string warningString = "Argument";
if (plural) {
warningString += 's';
}
for (const auto& entry : indexes) {
if (entry == indexes.front()) {
warningString += ' ';
} else if (entry == indexes.back()) {
warningString += " and ";
} else {
warningString += ", ";
}
warningString += '#';
warningString += std::to_string(entry.first);
}
if (plural) {
warningString += " are unsafe";
} else {
warningString += " is unsafe";
}
reportErrorFunc(warningString);
}
if (g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
for (const auto& entry : indexes) {
switch (entry.second) {
case LuaData_Item:
case LuaData_Container:
case LuaData_Teleport: {
lua_getglobal(globalState, "Item");
lua_getfield(globalState, -1, "getUniqueId");
break;
}
case LuaData_Player:
case LuaData_Monster:
case LuaData_Npc: {
lua_getglobal(globalState, "Creature");
lua_getfield(globalState, -1, "getId");
break;
}
default:
break;
}
lua_replace(globalState, -2);
lua_pushvalue(globalState, entry.first);
lua_call(globalState, 1, 1);
lua_replace(globalState, entry.first);
}
}
}
}
LuaTimerEventDesc eventDesc;
for (int i = 0; i < parameters - 2; ++i) { //-2 because addEvent needs at least two parameters
eventDesc.parameters.push_back(luaL_ref(globalState, LUA_REGISTRYINDEX));
}
uint32_t delay = std::max<uint32_t>(100, getNumber<uint32_t>(globalState, 2));
lua_pop(globalState, 1);
eventDesc.function = luaL_ref(globalState, LUA_REGISTRYINDEX);
eventDesc.scriptId = getScriptEnv()->getScriptId();
auto& lastTimerEventId = g_luaEnvironment.lastEventTimerId;
eventDesc.eventId = g_scheduler.addEvent(createSchedulerTask(
delay, std::bind(&LuaEnvironment::executeTimerEvent, &g_luaEnvironment, lastTimerEventId)
));
g_luaEnvironment.timerEvents.emplace(lastTimerEventId, std::move(eventDesc));
lua_pushnumber(L, lastTimerEventId++);
return 1;
}
int LuaScriptInterface::luaStopEvent(lua_State* L)
{
//stopEvent(eventid)
lua_State* globalState = g_luaEnvironment.getLuaState();
if (!globalState) {
reportErrorFunc("No valid script interface!");
pushBoolean(L, false);
return 1;
}
uint32_t eventId = getNumber<uint32_t>(L, 1);
auto& timerEvents = g_luaEnvironment.timerEvents;
auto it = timerEvents.find(eventId);
if (it == timerEvents.end()) {
pushBoolean(L, false);
return 1;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
g_scheduler.stopEvent(timerEventDesc.eventId);
luaL_unref(globalState, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(globalState, LUA_REGISTRYINDEX, parameter);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaSaveServer(lua_State* L)
{
g_game.saveGameState();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCleanMap(lua_State* L)
{
lua_pushnumber(L, g_game.map.clean());
return 1;
}
int LuaScriptInterface::luaIsInWar(lua_State* L)
{
//isInWar(cid, target)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* targetPlayer = getPlayer(L, 2);
if (!targetPlayer) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, player->isInWar(targetPlayer));
return 1;
}
int LuaScriptInterface::luaGetWaypointPositionByName(lua_State* L)
{
//getWaypointPositionByName(name)
auto& waypoints = g_game.map.waypoints;
auto it = waypoints.find(getString(L, -1));
if (it != waypoints.end()) {
pushPosition(L, it->second);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaSendChannelMessage(lua_State* L)
{
//sendChannelMessage(channelId, type, message)
uint32_t channelId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getChannelById(channelId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaSendGuildChannelMessage(lua_State* L)
{
//sendGuildChannelMessage(guildId, type, message)
uint32_t guildId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getGuildChannelById(guildId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
std::string LuaScriptInterface::escapeString(const std::string& string)
{
std::string s = string;
replaceString(s, "\\", "\\\\");
replaceString(s, "\"", "\\\"");
replaceString(s, "'", "\\'");
replaceString(s, "[[", "\\[[");
return s;
}
#ifndef LUAJIT_VERSION
const luaL_Reg LuaScriptInterface::luaBitReg[] = {
//{"tobit", LuaScriptInterface::luaBitToBit},
{"bnot", LuaScriptInterface::luaBitNot},
{"band", LuaScriptInterface::luaBitAnd},
{"bor", LuaScriptInterface::luaBitOr},
{"bxor", LuaScriptInterface::luaBitXor},
{"lshift", LuaScriptInterface::luaBitLeftShift},
{"rshift", LuaScriptInterface::luaBitRightShift},
//{"arshift", LuaScriptInterface::luaBitArithmeticalRightShift},
//{"rol", LuaScriptInterface::luaBitRotateLeft},
//{"ror", LuaScriptInterface::luaBitRotateRight},
//{"bswap", LuaScriptInterface::luaBitSwapEndian},
//{"tohex", LuaScriptInterface::luaBitToHex},
{nullptr, nullptr}
};
int LuaScriptInterface::luaBitNot(lua_State* L)
{
lua_pushnumber(L, ~getNumber<uint32_t>(L, -1));
return 1;
}
#define MULTIOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
int n = lua_gettop(L); \
uint32_t w = getNumber<uint32_t>(L, -1); \
for (int i = 1; i < n; ++i) \
w op getNumber<uint32_t>(L, i); \
lua_pushnumber(L, w); \
return 1; \
}
MULTIOP(And, &= )
MULTIOP(Or, |= )
MULTIOP(Xor, ^= )
#define SHIFTOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
uint32_t n1 = getNumber<uint32_t>(L, 1), n2 = getNumber<uint32_t>(L, 2); \
lua_pushnumber(L, (n1 op n2)); \
return 1; \
}
SHIFTOP(LeftShift, << )
SHIFTOP(RightShift, >> )
#endif
const luaL_Reg LuaScriptInterface::luaConfigManagerTable[] = {
{"getString", LuaScriptInterface::luaConfigManagerGetString},
{"getNumber", LuaScriptInterface::luaConfigManagerGetNumber},
{"getBoolean", LuaScriptInterface::luaConfigManagerGetBoolean},
{nullptr, nullptr}
};
int LuaScriptInterface::luaConfigManagerGetString(lua_State* L)
{
pushString(L, g_config.getString(getNumber<ConfigManager::string_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetNumber(lua_State* L)
{
lua_pushnumber(L, g_config.getNumber(getNumber<ConfigManager::integer_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetBoolean(lua_State* L)
{
pushBoolean(L, g_config.getBoolean(getNumber<ConfigManager::boolean_config_t>(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaDatabaseTable[] = {
{"query", LuaScriptInterface::luaDatabaseExecute},
{"asyncQuery", LuaScriptInterface::luaDatabaseAsyncExecute},
{"storeQuery", LuaScriptInterface::luaDatabaseStoreQuery},
{"asyncStoreQuery", LuaScriptInterface::luaDatabaseAsyncStoreQuery},
{"escapeString", LuaScriptInterface::luaDatabaseEscapeString},
{"escapeBlob", LuaScriptInterface::luaDatabaseEscapeBlob},
{"lastInsertId", LuaScriptInterface::luaDatabaseLastInsertId},
{"tableExists", LuaScriptInterface::luaDatabaseTableExists},
{nullptr, nullptr}
};
int LuaScriptInterface::luaDatabaseExecute(lua_State* L)
{
pushBoolean(L, Database::getInstance().executeQuery(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncExecute(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr, bool success) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
pushBoolean(luaState, success);
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback);
return 0;
}
int LuaScriptInterface::luaDatabaseStoreQuery(lua_State* L)
{
if (DBResult_ptr res = Database::getInstance().storeQuery(getString(L, -1))) {
lua_pushnumber(L, ScriptEnvironment::addResult(res));
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncStoreQuery(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr result, bool) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
if (result) {
lua_pushnumber(luaState, ScriptEnvironment::addResult(result));
} else {
pushBoolean(luaState, false);
}
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback, true);
return 0;
}
int LuaScriptInterface::luaDatabaseEscapeString(lua_State* L)
{
pushString(L, Database::getInstance().escapeString(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseEscapeBlob(lua_State* L)
{
uint32_t length = getNumber<uint32_t>(L, 2);
pushString(L, Database::getInstance().escapeBlob(getString(L, 1).c_str(), length));
return 1;
}
int LuaScriptInterface::luaDatabaseLastInsertId(lua_State* L)
{
lua_pushnumber(L, Database::getInstance().getLastInsertId());
return 1;
}
int LuaScriptInterface::luaDatabaseTableExists(lua_State* L)
{
pushBoolean(L, DatabaseManager::tableExists(getString(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaResultTable[] = {
{"getNumber", LuaScriptInterface::luaResultGetNumber},
{"getString", LuaScriptInterface::luaResultGetString},
{"getStream", LuaScriptInterface::luaResultGetStream},
{"next", LuaScriptInterface::luaResultNext},
{"free", LuaScriptInterface::luaResultFree},
{nullptr, nullptr}
};
int LuaScriptInterface::luaResultGetNumber(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
lua_pushnumber(L, res->getNumber<int64_t>(s));
return 1;
}
int LuaScriptInterface::luaResultGetString(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
pushString(L, res->getString(s));
return 1;
}
int LuaScriptInterface::luaResultGetStream(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
unsigned long length;
const char* stream = res->getStream(getString(L, 2), length);
lua_pushlstring(L, stream, length);
lua_pushnumber(L, length);
return 2;
}
int LuaScriptInterface::luaResultNext(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, -1));
if (!res) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, res->next());
return 1;
}
int LuaScriptInterface::luaResultFree(lua_State* L)
{
pushBoolean(L, ScriptEnvironment::removeResult(getNumber<uint32_t>(L, -1)));
return 1;
}
// Userdata
int LuaScriptInterface::luaUserdataCompare(lua_State* L)
{
// userdataA == userdataB
pushBoolean(L, getUserdata<void>(L, 1) == getUserdata<void>(L, 2));
return 1;
}
// _G
int LuaScriptInterface::luaIsType(lua_State* L)
{
// isType(derived, base)
lua_getmetatable(L, -2);
lua_getmetatable(L, -2);
lua_rawgeti(L, -2, 'p');
uint_fast8_t parentsB = getNumber<uint_fast8_t>(L, 1);
lua_rawgeti(L, -3, 'h');
size_t hashB = getNumber<size_t>(L, 1);
lua_rawgeti(L, -3, 'p');
uint_fast8_t parentsA = getNumber<uint_fast8_t>(L, 1);
for (uint_fast8_t i = parentsA; i < parentsB; ++i) {
lua_getfield(L, -3, "__index");
lua_replace(L, -4);
}
lua_rawgeti(L, -4, 'h');
size_t hashA = getNumber<size_t>(L, 1);
pushBoolean(L, hashA == hashB);
return 1;
}
int LuaScriptInterface::luaRawGetMetatable(lua_State* L)
{
// rawgetmetatable(metatableName)
luaL_getmetatable(L, getString(L, 1).c_str());
return 1;
}
// os
int LuaScriptInterface::luaSystemTime(lua_State* L)
{
// os.mtime()
lua_pushnumber(L, OTSYS_TIME());
return 1;
}
// table
int LuaScriptInterface::luaTableCreate(lua_State* L)
{
// table.create(arrayLength, keyLength)
lua_createtable(L, getNumber<int32_t>(L, 1), getNumber<int32_t>(L, 2));
return 1;
}
// Game
int LuaScriptInterface::luaGameGetSpectators(lua_State* L)
{
// Game.getSpectators(position[, multifloor = false[, onlyPlayer = false[, minRangeX = 0[, maxRangeX = 0[, minRangeY = 0[, maxRangeY = 0]]]]]])
const Position& position = getPosition(L, 1);
bool multifloor = getBoolean(L, 2, false);
bool onlyPlayers = getBoolean(L, 3, false);
int32_t minRangeX = getNumber<int32_t>(L, 4, 0);
int32_t maxRangeX = getNumber<int32_t>(L, 5, 0);
int32_t minRangeY = getNumber<int32_t>(L, 6, 0);
int32_t maxRangeY = getNumber<int32_t>(L, 7, 0);
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, multifloor, onlyPlayers, minRangeX, maxRangeX, minRangeY, maxRangeY);
lua_createtable(L, spectators.size(), 0);
int index = 0;
for (Creature* creature : spectators) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetPlayers(lua_State* L)
{
// Game.getPlayers()
lua_createtable(L, g_game.getPlayersOnline(), 0);
int index = 0;
for (const auto& playerEntry : g_game.getPlayers()) {
pushUserdata<Player>(L, playerEntry.second);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameLoadMap(lua_State* L)
{
// Game.loadMap(path)
const std::string& path = getString(L, 1);
g_dispatcher.addTask(createTask(std::bind(&Game::loadMap, &g_game, path)));
return 0;
}
int LuaScriptInterface::luaGameGetExperienceStage(lua_State* L)
{
// Game.getExperienceStage(level)
uint32_t level = getNumber<uint32_t>(L, 1);
lua_pushnumber(L, g_game.getExperienceStage(level));
return 1;
}
int LuaScriptInterface::luaGameGetMonsterCount(lua_State* L)
{
// Game.getMonsterCount()
lua_pushnumber(L, g_game.getMonstersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetPlayerCount(lua_State* L)
{
// Game.getPlayerCount()
lua_pushnumber(L, g_game.getPlayersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetNpcCount(lua_State* L)
{
// Game.getNpcCount()
lua_pushnumber(L, g_game.getNpcsOnline());
return 1;
}
int LuaScriptInterface::luaGameGetTowns(lua_State* L)
{
// Game.getTowns()
const auto& towns = g_game.map.towns.getTowns();
lua_createtable(L, towns.size(), 0);
int index = 0;
for (auto townEntry : towns) {
pushUserdata<Town>(L, townEntry.second);
setMetatable(L, -1, "Town");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetHouses(lua_State* L)
{
// Game.getHouses()
const auto& houses = g_game.map.houses.getHouses();
lua_createtable(L, houses.size(), 0);
int index = 0;
for (auto houseEntry : houses) {
pushUserdata<House>(L, houseEntry.second);
setMetatable(L, -1, "House");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetGameState(lua_State* L)
{
// Game.getGameState()
lua_pushnumber(L, g_game.getGameState());
return 1;
}
int LuaScriptInterface::luaGameSetGameState(lua_State* L)
{
// Game.setGameState(state)
GameState_t state = getNumber<GameState_t>(L, 1);
g_game.setGameState(state);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetWorldType(lua_State* L)
{
// Game.getWorldType()
lua_pushnumber(L, g_game.getWorldType());
return 1;
}
int LuaScriptInterface::luaGameSetWorldType(lua_State* L)
{
// Game.setWorldType(type)
WorldType_t type = getNumber<WorldType_t>(L, 1);
g_game.setWorldType(type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetReturnMessage(lua_State* L)
{
// Game.getReturnMessage(value)
ReturnValue value = getNumber<ReturnValue>(L, 1);
pushString(L, getReturnMessage(value));
return 1;
}
int LuaScriptInterface::luaGameCreateItem(lua_State* L)
{
// Game.createItem(itemId[, count[, position]])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
const ItemType& it = Item::items[id];
if (it.stackable) {
count = std::min<uint16_t>(count, 100);
}
Item* item = Item::CreateItem(id, count);
if (!item) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete item;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, item, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(item);
item->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaGameCreateContainer(lua_State* L)
{
// Game.createContainer(itemId, size[, position])
uint16_t size = getNumber<uint16_t>(L, 2);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
Container* container = Item::CreateItemAsContainer(id, size);
if (!container) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete container;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, container, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(container);
container->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
return 1;
}
int LuaScriptInterface::luaGameCreateMonster(lua_State* L)
{
// Game.createMonster(monsterName, position[, extended = false[, force = false]])
Monster* monster = Monster::createMonster(getString(L, 1));
if (!monster) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_game.placeCreature(monster, position, extended, force)) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
delete monster;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateNpc(lua_State* L)
{
// Game.createNpc(npcName, position[, extended = false[, force = false]])
Npc* npc = Npc::createNpc(getString(L, 1));
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_game.placeCreature(npc, position, extended, force)) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
delete npc;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateTile(lua_State* L)
{
// Game.createTile(x, y, z[, isDynamic = false])
// Game.createTile(position[, isDynamic = false])
Position position;
bool isDynamic;
if (isTable(L, 1)) {
position = getPosition(L, 1);
isDynamic = getBoolean(L, 2, false);
} else {
position.x = getNumber<uint16_t>(L, 1);
position.y = getNumber<uint16_t>(L, 2);
position.z = getNumber<uint16_t>(L, 3);
isDynamic = getBoolean(L, 4, false);
}
Tile* tile = g_game.map.getTile(position);
if (!tile) {
if (isDynamic) {
tile = new DynamicTile(position.x, position.y, position.z);
} else {
tile = new StaticTile(position.x, position.y, position.z);
}
g_game.map.setTile(position, tile);
}
pushUserdata(L, tile);
setMetatable(L, -1, "Tile");
return 1;
}
int LuaScriptInterface::luaGameStartRaid(lua_State* L)
{
// Game.startRaid(raidName)
const std::string& raidName = getString(L, 1);
Raid* raid = g_game.raids.getRaidByName(raidName);
if (!raid || !raid->isLoaded()) {
lua_pushnumber(L, RETURNVALUE_NOSUCHRAIDEXISTS);
return 1;
}
if (g_game.raids.getRunning()) {
lua_pushnumber(L, RETURNVALUE_ANOTHERRAIDISALREADYEXECUTING);
return 1;
}
g_game.raids.setRunning(raid);
raid->startRaid();
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaGameGetClientVersion(lua_State* L)
{
// Game.getClientVersion()
lua_createtable(L, 0, 3);
setField(L, "min", CLIENT_VERSION_MIN);
setField(L, "max", CLIENT_VERSION_MAX);
setField(L, "string", CLIENT_VERSION_STR);
return 1;
}
int LuaScriptInterface::luaGameReload(lua_State* L)
{
// Game.reload(reloadType)
ReloadTypes_t reloadType = getNumber<ReloadTypes_t>(L, 1);
if (reloadType == RELOAD_TYPE_GLOBAL) {
pushBoolean(L, g_luaEnvironment.loadFile("data/global.lua") == 0);
} else {
pushBoolean(L, g_game.reload(reloadType));
}
lua_gc(g_luaEnvironment.getLuaState(), LUA_GCCOLLECT, 0);
return 1;
}
// Variant
int LuaScriptInterface::luaVariantCreate(lua_State* L)
{
// Variant(number or string or position or thing)
LuaVariant variant;
if (isUserdata(L, 2)) {
if (Thing* thing = getThing(L, 2)) {
variant.type = VARIANT_TARGETPOSITION;
variant.pos = thing->getPosition();
}
} else if (isTable(L, 2)) {
variant.type = VARIANT_POSITION;
variant.pos = getPosition(L, 2);
} else if (isNumber(L, 2)) {
variant.type = VARIANT_NUMBER;
variant.number = getNumber<uint32_t>(L, 2);
} else if (isString(L, 2)) {
variant.type = VARIANT_STRING;
variant.text = getString(L, 2);
}
pushVariant(L, variant);
return 1;
}
int LuaScriptInterface::luaVariantGetNumber(lua_State* L)
{
// Variant:getNumber()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_NUMBER) {
lua_pushnumber(L, variant.number);
} else {
lua_pushnumber(L, 0);
}
return 1;
}
int LuaScriptInterface::luaVariantGetString(lua_State* L)
{
// Variant:getString()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_STRING) {
pushString(L, variant.text);
} else {
pushString(L, std::string());
}
return 1;
}
int LuaScriptInterface::luaVariantGetPosition(lua_State* L)
{
// Variant:getPosition()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_POSITION || variant.type == VARIANT_TARGETPOSITION) {
pushPosition(L, variant.pos);
} else {
pushPosition(L, Position());
}
return 1;
}
// Position
int LuaScriptInterface::luaPositionCreate(lua_State* L)
{
// Position([x = 0[, y = 0[, z = 0[, stackpos = 0]]]])
// Position([position])
if (lua_gettop(L) <= 1) {
pushPosition(L, Position());
return 1;
}
int32_t stackpos;
if (isTable(L, 2)) {
const Position& position = getPosition(L, 2, stackpos);
pushPosition(L, position, stackpos);
} else {
uint16_t x = getNumber<uint16_t>(L, 2, 0);
uint16_t y = getNumber<uint16_t>(L, 3, 0);
uint8_t z = getNumber<uint8_t>(L, 4, 0);
stackpos = getNumber<int32_t>(L, 5, 0);
pushPosition(L, Position(x, y, z), stackpos);
}
return 1;
}
int LuaScriptInterface::luaPositionAdd(lua_State* L)
{
// positionValue = position + positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position + positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionSub(lua_State* L)
{
// positionValue = position - positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position - positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionCompare(lua_State* L)
{
// position == positionEx
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, position == positionEx);
return 1;
}
int LuaScriptInterface::luaPositionGetDistance(lua_State* L)
{
// position:getDistance(positionEx)
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
lua_pushnumber(L, std::max<int32_t>(
std::max<int32_t>(
std::abs(Position::getDistanceX(position, positionEx)),
std::abs(Position::getDistanceY(position, positionEx))
),
std::abs(Position::getDistanceZ(position, positionEx))
));
return 1;
}
int LuaScriptInterface::luaPositionIsSightClear(lua_State* L)
{
// position:isSightClear(positionEx[, sameFloor = true])
bool sameFloor = getBoolean(L, 3, true);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, g_game.isSightClear(position, positionEx, sameFloor));
return 1;
}
int LuaScriptInterface::luaPositionSendMagicEffect(lua_State* L)
{
// position:sendMagicEffect(magicEffect[, player = nullptr])
SpectatorHashSet spectators;
if (lua_gettop(L) >= 3) {
Player* player = getPlayer(L, 3);
if (player) {
spectators.insert(player);
}
}
MagicEffectClasses magicEffect = getNumber<MagicEffectClasses>(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addMagicEffect(spectators, position, magicEffect);
} else {
g_game.addMagicEffect(position, magicEffect);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPositionSendDistanceEffect(lua_State* L)
{
// position:sendDistanceEffect(positionEx, distanceEffect[, player = nullptr])
SpectatorHashSet spectators;
if (lua_gettop(L) >= 4) {
Player* player = getPlayer(L, 4);
if (player) {
spectators.insert(player);
}
}
ShootType_t distanceEffect = getNumber<ShootType_t>(L, 3);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addDistanceEffect(spectators, position, positionEx, distanceEffect);
} else {
g_game.addDistanceEffect(position, positionEx, distanceEffect);
}
pushBoolean(L, true);
return 1;
}
// Tile
int LuaScriptInterface::luaTileCreate(lua_State* L)
{
// Tile(x, y, z)
// Tile(position)
Tile* tile;
if (isTable(L, 2)) {
tile = g_game.map.getTile(getPosition(L, 2));
} else {
uint8_t z = getNumber<uint8_t>(L, 4);
uint16_t y = getNumber<uint16_t>(L, 3);
uint16_t x = getNumber<uint16_t>(L, 2);
tile = g_game.map.getTile(x, y, z);
}
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetPosition(lua_State* L)
{
// tile:getPosition()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
pushPosition(L, tile->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetGround(lua_State* L)
{
// tile:getGround()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile && tile->getGround()) {
pushUserdata<Item>(L, tile->getGround());
setItemMetatable(L, -1, tile->getGround());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThing(lua_State* L)
{
// tile:getThing(index)
int32_t index = getNumber<int32_t>(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getThing(index);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThingCount(lua_State* L)
{
// tile:getThingCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getThingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleThing(lua_State* L)
{
// tile:getTopVisibleThing(creature)
Creature* creature = getCreature(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getTopVisibleThing(creature);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* visibleCreature = thing->getCreature()) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else if (Item* visibleItem = thing->getItem()) {
pushUserdata<Item>(L, visibleItem);
setItemMetatable(L, -1, visibleItem);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopTopItem(lua_State* L)
{
// tile:getTopTopItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopTopItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopDownItem(lua_State* L)
{
// tile:getTopDownItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopDownItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetFieldItem(lua_State* L)
{
// tile:getFieldItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getFieldItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemById(lua_State* L)
{
// tile:getItemById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
Item* item = g_game.findItemOfType(tile, itemId, false, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemByType(lua_State* L)
{
// tile:getItemByType(itemType)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
bool found;
ItemTypes_t itemType = getNumber<ItemTypes_t>(L, 2);
switch (itemType) {
case ITEM_TYPE_TELEPORT:
found = tile->hasFlag(TILESTATE_TELEPORT);
break;
case ITEM_TYPE_MAGICFIELD:
found = tile->hasFlag(TILESTATE_MAGICFIELD);
break;
case ITEM_TYPE_MAILBOX:
found = tile->hasFlag(TILESTATE_MAILBOX);
break;
case ITEM_TYPE_TRASHHOLDER:
found = tile->hasFlag(TILESTATE_TRASHHOLDER);
break;
case ITEM_TYPE_BED:
found = tile->hasFlag(TILESTATE_BED);
break;
case ITEM_TYPE_DEPOT:
found = tile->hasFlag(TILESTATE_DEPOT);
break;
default:
found = true;
break;
}
if (!found) {
lua_pushnil(L);
return 1;
}
if (Item* item = tile->getGround()) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
if (const TileItemVector* items = tile->getItemList()) {
for (Item* item : *items) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
}
lua_pushnil(L);
return 1;
}
int LuaScriptInterface::luaTileGetItemByTopOrder(lua_State* L)
{
// tile:getItemByTopOrder(topOrder)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t topOrder = getNumber<int32_t>(L, 2);
Item* item = tile->getItemByTopOrder(topOrder);
if (!item) {
lua_pushnil(L);
return 1;
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaTileGetItemCountById(lua_State* L)
{
// tile:getItemCountById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
lua_pushnumber(L, tile->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaTileGetBottomCreature(lua_State* L)
{
// tile:getBottomCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
const Creature* creature = tile->getBottomCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<const Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetTopCreature(lua_State* L)
{
// tile:getTopCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = tile->getTopCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetBottomVisibleCreature(lua_State* L)
{
// tile:getBottomVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Creature* visibleCreature = tile->getBottomVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<const Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleCreature(lua_State* L)
{
// tile:getTopVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* visibleCreature = tile->getTopVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItems(lua_State* L)
{
// tile:getItems()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
TileItemVector* itemVector = tile->getItemList();
if (!itemVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, itemVector->size(), 0);
int index = 0;
for (Item* item : *itemVector) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemCount(lua_State* L)
{
// tile:getItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetDownItemCount(lua_State* L)
{
// tile:getDownItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getDownItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopItemCount(lua_State* L)
{
// tile:getTopItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getTopItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetCreatures(lua_State* L)
{
// tile:getCreatures()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
CreatureVector* creatureVector = tile->getCreatures();
if (!creatureVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creatureVector->size(), 0);
int index = 0;
for (Creature* creature : *creatureVector) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetCreatureCount(lua_State* L)
{
// tile:getCreatureCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getCreatureCount());
return 1;
}
int LuaScriptInterface::luaTileHasProperty(lua_State* L)
{
// tile:hasProperty(property[, item])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item;
if (lua_gettop(L) >= 3) {
item = getUserdata<Item>(L, 3);
} else {
item = nullptr;
}
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
if (item) {
pushBoolean(L, tile->hasProperty(item, property));
} else {
pushBoolean(L, tile->hasProperty(property));
}
return 1;
}
int LuaScriptInterface::luaTileGetThingIndex(lua_State* L)
{
// tile:getThingIndex(thing)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
lua_pushnumber(L, tile->getThingIndex(thing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileHasFlag(lua_State* L)
{
// tile:hasFlag(flag)
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
tileflags_t flag = getNumber<tileflags_t>(L, 2);
pushBoolean(L, tile->hasFlag(flag));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileQueryAdd(lua_State* L)
{
// tile:queryAdd(thing[, flags])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
uint32_t flags = getNumber<uint32_t>(L, 3, 0);
lua_pushnumber(L, tile->queryAdd(0, *thing, 1, flags));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetHouse(lua_State* L)
{
// tile:getHouse()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tile)) {
pushUserdata<House>(L, houseTile->getHouse());
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
// NetworkMessage
int LuaScriptInterface::luaNetworkMessageCreate(lua_State* L)
{
// NetworkMessage()
pushUserdata<NetworkMessage>(L, new NetworkMessage);
setMetatable(L, -1, "NetworkMessage");
return 1;
}
int LuaScriptInterface::luaNetworkMessageDelete(lua_State* L)
{
NetworkMessage** messagePtr = getRawUserdata<NetworkMessage>(L, 1);
if (messagePtr && *messagePtr) {
delete *messagePtr;
*messagePtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaNetworkMessageGetByte(lua_State* L)
{
// networkMessage:getByte()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getByte());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU16(lua_State* L)
{
// networkMessage:getU16()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint16_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU32(lua_State* L)
{
// networkMessage:getU32()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint32_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU64(lua_State* L)
{
// networkMessage:getU64()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint64_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetString(lua_State* L)
{
// networkMessage:getString()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushString(L, message->getString());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetPosition(lua_State* L)
{
// networkMessage:getPosition()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushPosition(L, message->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddByte(lua_State* L)
{
// networkMessage:addByte(number)
uint8_t number = getNumber<uint8_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addByte(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU16(lua_State* L)
{
// networkMessage:addU16(number)
uint16_t number = getNumber<uint16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint16_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU32(lua_State* L)
{
// networkMessage:addU32(number)
uint32_t number = getNumber<uint32_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint32_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU64(lua_State* L)
{
// networkMessage:addU64(number)
uint64_t number = getNumber<uint64_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint64_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddString(lua_State* L)
{
// networkMessage:addString(string)
const std::string& string = getString(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addString(string);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddPosition(lua_State* L)
{
// networkMessage:addPosition(position)
const Position& position = getPosition(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addPosition(position);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddDouble(lua_State* L)
{
// networkMessage:addDouble(number)
double number = getNumber<double>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addDouble(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItem(lua_State* L)
{
// networkMessage:addItem(item)
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
lua_pushnil(L);
return 1;
}
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addItem(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItemId(lua_State* L)
{
// networkMessage:addItemId(itemId)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
message->addItemId(itemId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNetworkMessageReset(lua_State* L)
{
// networkMessage:reset()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->reset();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSkipBytes(lua_State* L)
{
// networkMessage:skipBytes(number)
int16_t number = getNumber<int16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->skipBytes(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSendToPlayer(lua_State* L)
{
// networkMessage:sendToPlayer(player)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
Player* player = getPlayer(L, 2);
if (player) {
player->sendNetworkMessage(*message);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
lua_pushnil(L);
}
return 1;
}
// ModalWindow
int LuaScriptInterface::luaModalWindowCreate(lua_State* L)
{
// ModalWindow(id, title, message)
const std::string& message = getString(L, 4);
const std::string& title = getString(L, 3);
uint32_t id = getNumber<uint32_t>(L, 2);
pushUserdata<ModalWindow>(L, new ModalWindow(id, title, message));
setMetatable(L, -1, "ModalWindow");
return 1;
}
int LuaScriptInterface::luaModalWindowDelete(lua_State* L)
{
ModalWindow** windowPtr = getRawUserdata<ModalWindow>(L, 1);
if (windowPtr && *windowPtr) {
delete *windowPtr;
*windowPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaModalWindowGetId(lua_State* L)
{
// modalWindow:getId()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetTitle(lua_State* L)
{
// modalWindow:getTitle()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->title);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetMessage(lua_State* L)
{
// modalWindow:getMessage()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->message);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetTitle(lua_State* L)
{
// modalWindow:setTitle(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->title = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetMessage(lua_State* L)
{
// modalWindow:setMessage(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->message = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetButtonCount(lua_State* L)
{
// modalWindow:getButtonCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->buttons.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetChoiceCount(lua_State* L)
{
// modalWindow:getChoiceCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->choices.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddButton(lua_State* L)
{
// modalWindow:addButton(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->buttons.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddChoice(lua_State* L)
{
// modalWindow:addChoice(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->choices.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEnterButton(lua_State* L)
{
// modalWindow:getDefaultEnterButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEnterButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEnterButton(lua_State* L)
{
// modalWindow:setDefaultEnterButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEnterButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEscapeButton(lua_State* L)
{
// modalWindow:getDefaultEscapeButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEscapeButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEscapeButton(lua_State* L)
{
// modalWindow:setDefaultEscapeButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEscapeButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowHasPriority(lua_State* L)
{
// modalWindow:hasPriority()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushBoolean(L, window->priority);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetPriority(lua_State* L)
{
// modalWindow:setPriority(priority)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->priority = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSendToPlayer(lua_State* L)
{
// modalWindow:sendToPlayer(player)
Player* player = getPlayer(L, 2);
if (!player) {
lua_pushnil(L);
return 1;
}
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
if (!player->hasModalWindowOpen(window->id)) {
player->sendModalWindow(*window);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Item
int LuaScriptInterface::luaItemCreate(lua_State* L)
{
// Item(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemIsItem(lua_State* L)
{
// item:isItem()
pushBoolean(L, getUserdata<const Item>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaItemGetParent(lua_State* L)
{
// item:getParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = item->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaItemGetTopParent(lua_State* L)
{
// item:getTopParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* topParent = item->getTopParent();
if (!topParent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, topParent);
return 1;
}
int LuaScriptInterface::luaItemGetId(lua_State* L)
{
// item:getId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemClone(lua_State* L)
{
// item:clone()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Item* clone = item->clone();
if (!clone) {
lua_pushnil(L);
return 1;
}
getScriptEnv()->addTempItem(clone);
clone->setParent(VirtualCylinder::virtualCylinder);
pushUserdata<Item>(L, clone);
setItemMetatable(L, -1, clone);
return 1;
}
int LuaScriptInterface::luaItemSplit(lua_State* L)
{
// item:split([count = 1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || !item->isStackable()) {
lua_pushnil(L);
return 1;
}
uint16_t count = std::min<uint16_t>(getNumber<uint16_t>(L, 2, 1), item->getItemCount());
uint16_t diff = item->getItemCount() - count;
Item* splitItem = item->clone();
if (!splitItem) {
lua_pushnil(L);
return 1;
}
splitItem->setItemCount(count);
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, item->getID(), diff);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
*itemPtr = newItem;
splitItem->setParent(VirtualCylinder::virtualCylinder);
env->addTempItem(splitItem);
pushUserdata<Item>(L, splitItem);
setItemMetatable(L, -1, splitItem);
return 1;
}
int LuaScriptInterface::luaItemRemove(lua_State* L)
{
// item:remove([count = -1])
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t count = getNumber<int32_t>(L, 2, -1);
pushBoolean(L, g_game.internalRemoveItem(item, count) == RETURNVALUE_NOERROR);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetUniqueId(lua_State* L)
{
// item:getUniqueId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
uint32_t uniqueId = item->getUniqueId();
if (uniqueId == 0) {
uniqueId = getScriptEnv()->addThing(item);
}
lua_pushnumber(L, uniqueId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetActionId(lua_State* L)
{
// item:getActionId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getActionId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetActionId(lua_State* L)
{
// item:setActionId(actionId)
uint16_t actionId = getNumber<uint16_t>(L, 2);
Item* item = getUserdata<Item>(L, 1);
if (item) {
item->setActionId(actionId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCount(lua_State* L)
{
// item:getCount()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCharges(lua_State* L)
{
// item:getCharges()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getCharges());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetFluidType(lua_State* L)
{
// item:getFluidType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getFluidType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetWeight(lua_State* L)
{
// item:getWeight()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getWeight());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetSubType(lua_State* L)
{
// item:getSubType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getSubType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetName(lua_State* L)
{
// item:getName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPluralName(lua_State* L)
{
// item:getPluralName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetArticle(lua_State* L)
{
// item:getArticle()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getArticle());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPosition(lua_State* L)
{
// item:getPosition()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushPosition(L, item->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetTile(lua_State* L)
{
// item:getTile()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Tile* tile = item->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasAttribute(lua_State* L)
{
// item:hasAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
pushBoolean(L, item->hasAttribute(attribute));
return 1;
}
int LuaScriptInterface::luaItemGetAttribute(lua_State* L)
{
// item:getAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
lua_pushnumber(L, item->getIntAttr(attribute));
} else if (ItemAttributes::isStrAttrType(attribute)) {
pushString(L, item->getStrAttr(attribute));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetAttribute(lua_State* L)
{
// item:setAttribute(key, value)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
if (attribute == ITEM_ATTRIBUTE_UNIQUEID) {
reportErrorFunc("Attempt to set protected key \"uid\"");
pushBoolean(L, false);
return 1;
}
item->setIntAttr(attribute, getNumber<int32_t>(L, 3));
pushBoolean(L, true);
} else if (ItemAttributes::isStrAttrType(attribute)) {
item->setStrAttr(attribute, getString(L, 3));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemRemoveAttribute(lua_State* L)
{
// item:removeAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
bool ret = attribute != ITEM_ATTRIBUTE_UNIQUEID;
if (ret) {
item->removeAttribute(attribute);
} else {
reportErrorFunc("Attempt to erase protected key \"uid\"");
}
pushBoolean(L, ret);
return 1;
}
int LuaScriptInterface::luaItemMoveTo(lua_State* L)
{
// item:moveTo(position or cylinder[, flags])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || item->isRemoved()) {
lua_pushnil(L);
return 1;
}
Cylinder* toCylinder;
if (isUserdata(L, 2)) {
const LuaDataType type = getUserdataType(L, 2);
switch (type) {
case LuaData_Container:
toCylinder = getUserdata<Container>(L, 2);
break;
case LuaData_Player:
toCylinder = getUserdata<Player>(L, 2);
break;
case LuaData_Tile:
toCylinder = getUserdata<Tile>(L, 2);
break;
default:
toCylinder = nullptr;
break;
}
} else {
toCylinder = g_game.map.getTile(getPosition(L, 2));
}
if (!toCylinder) {
lua_pushnil(L);
return 1;
}
if (item->getParent() == toCylinder) {
pushBoolean(L, true);
return 1;
}
uint32_t flags = getNumber<uint32_t>(L, 3, FLAG_NOLIMIT | FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE | FLAG_IGNORENOTMOVEABLE);
if (item->getParent() == VirtualCylinder::virtualCylinder) {
pushBoolean(L, g_game.internalAddItem(toCylinder, item, INDEX_WHEREEVER, flags) == RETURNVALUE_NOERROR);
} else {
Item* moveItem = nullptr;
ReturnValue ret = g_game.internalMoveItem(item->getParent(), toCylinder, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, flags);
if (moveItem) {
*itemPtr = moveItem;
}
pushBoolean(L, ret == RETURNVALUE_NOERROR);
}
return 1;
}
int LuaScriptInterface::luaItemTransform(lua_State* L)
{
// item:transform(itemId[, count/subType = -1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item*& item = *itemPtr;
if (!item) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
pushBoolean(L, true);
return 1;
}
const ItemType& it = Item::items[itemId];
if (it.stackable) {
subType = std::min<int32_t>(subType, 100);
}
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, itemId, subType);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
item = newItem;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaItemDecay(lua_State* L)
{
// item:decay(decayId)
Item* item = getUserdata<Item>(L, 1);
if (item) {
if (isNumber(L, 2)) {
ItemType& it = Item::items.getItemType(item->getID());
it.decayTo = getNumber<int32_t>(L, 2);
}
g_game.startDecay(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetDescription(lua_State* L)
{
// item:getDescription(distance)
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t distance = getNumber<int32_t>(L, 2);
pushString(L, item->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasProperty(lua_State* L)
{
// item:hasProperty(property)
Item* item = getUserdata<Item>(L, 1);
if (item) {
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
pushBoolean(L, item->hasProperty(property));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemIsLoadedFromMap(lua_State* L)
{
// item:isLoadedFromMap()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushBoolean(L, item->isLoadedFromMap());
} else {
lua_pushnil(L);
}
return 1;
}
// Container
int LuaScriptInterface::luaContainerCreate(lua_State* L)
{
// Container(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Container* container = getScriptEnv()->getContainerByUID(id);
if (container) {
pushUserdata(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetSize(lua_State* L)
{
// container:getSize()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetCapacity(lua_State* L)
{
// container:getCapacity()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->capacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetEmptySlots(lua_State* L)
{
// container:getEmptySlots([recursive = false])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t slots = container->capacity() - container->size();
bool recursive = getBoolean(L, 2, false);
if (recursive) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if (Container* tmpContainer = (*it)->getContainer()) {
slots += tmpContainer->capacity() - tmpContainer->size();
}
}
}
lua_pushnumber(L, slots);
return 1;
}
int LuaScriptInterface::luaContainerGetItemHoldingCount(lua_State* L)
{
// container:getItemHoldingCount()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->getItemHoldingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItem(lua_State* L)
{
// container:getItem(index)
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t index = getNumber<uint32_t>(L, 2);
Item* item = container->getItemByIndex(index);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerHasItem(lua_State* L)
{
// container:hasItem(item)
Item* item = getUserdata<Item>(L, 2);
Container* container = getUserdata<Container>(L, 1);
if (container) {
pushBoolean(L, container->isHoldingItem(item));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItem(lua_State* L)
{
// container:addItem(itemId[, count/subType = 1[, index = INDEX_WHEREEVER[, flags = 0]]])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t count = getNumber<uint32_t>(L, 3, 1);
const ItemType& it = Item::items[itemId];
if (it.stackable) {
count = std::min<uint16_t>(count, 100);
}
Item* item = Item::CreateItem(itemId, count);
if (!item) {
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
delete item;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItemEx(lua_State* L)
{
// container:addItemEx(item[, index = INDEX_WHEREEVER[, flags = 0]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc("Item already has a parent");
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 3, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 4, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, ret);
return 1;
}
int LuaScriptInterface::luaContainerGetItemCountById(lua_State* L)
{
// container:getItemCountById(itemId[, subType = -1])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, container->getItemTypeCount(itemId, subType));
return 1;
}
// Teleport
int LuaScriptInterface::luaTeleportCreate(lua_State* L)
{
// Teleport(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item && item->getTeleport()) {
pushUserdata(L, item);
setMetatable(L, -1, "Teleport");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportGetDestination(lua_State* L)
{
// teleport:getDestination()
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
pushPosition(L, teleport->getDestPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportSetDestination(lua_State* L)
{
// teleport:setDestination(position)
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
teleport->setDestPos(getPosition(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Creature
int LuaScriptInterface::luaCreatureCreate(lua_State* L)
{
// Creature(id or name or userdata)
Creature* creature;
if (isNumber(L, 2)) {
creature = g_game.getCreatureByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
creature = g_game.getCreatureByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
LuaDataType type = getUserdataType(L, 2);
if (type != LuaData_Player && type != LuaData_Monster && type != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
creature = getUserdata<Creature>(L, 2);
} else {
creature = nullptr;
}
if (creature) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetEvents(lua_State* L)
{
// creature:getEvents(type)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CreatureEventType_t eventType = getNumber<CreatureEventType_t>(L, 2);
const auto& eventList = creature->getCreatureEvents(eventType);
lua_createtable(L, eventList.size(), 0);
int index = 0;
for (CreatureEvent* event : eventList) {
pushString(L, event->getName());
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureRegisterEvent(lua_State* L)
{
// creature:registerEvent(name)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
const std::string& name = getString(L, 2);
pushBoolean(L, creature->registerCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureUnregisterEvent(lua_State* L)
{
// creature:unregisterEvent(name)
const std::string& name = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->unregisterCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsRemoved(lua_State* L)
{
// creature:isRemoved()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isRemoved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsCreature(lua_State* L)
{
// creature:isCreature()
pushBoolean(L, getUserdata<const Creature>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaCreatureIsInGhostMode(lua_State* L)
{
// creature:isInGhostMode()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isInGhostMode());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsHealthHidden(lua_State* L)
{
// creature:isHealthHidden()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isHealthHidden());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSee(lua_State* L)
{
// creature:canSee(position)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Position& position = getPosition(L, 2);
pushBoolean(L, creature->canSee(position));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeCreature(lua_State* L)
{
// creature:canSeeCreature(creature)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Creature* otherCreature = getCreature(L, 2);
pushBoolean(L, creature->canSeeCreature(otherCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetParent(lua_State* L)
{
// creature:getParent()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = creature->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaCreatureGetId(lua_State* L)
{
// creature:getId()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetName(lua_State* L)
{
// creature:getName()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushString(L, creature->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTarget(lua_State* L)
{
// creature:getTarget()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* target = creature->getAttackedCreature();
if (target) {
pushUserdata<Creature>(L, target);
setCreatureMetatable(L, -1, target);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetTarget(lua_State* L)
{
// creature:setTarget(target)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
Creature* target = getCreature(L, 2);
pushBoolean(L, creature->setAttackedCreature(target));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetFollowCreature(lua_State* L)
{
// creature:getFollowCreature()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* followCreature = creature->getFollowCreature();
if (followCreature) {
pushUserdata<Creature>(L, followCreature);
setCreatureMetatable(L, -1, followCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetFollowCreature(lua_State* L)
{
// creature:setFollowCreature(followedCreature)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
Creature* followCreature = getCreature(L, 2);
pushBoolean(L, creature->setFollowCreature(followCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetMaster(lua_State* L)
{
// creature:getMaster()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* master = creature->getMaster();
if (!master) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, master);
setCreatureMetatable(L, -1, master);
return 1;
}
int LuaScriptInterface::luaCreatureSetMaster(lua_State* L)
{
// creature:setMaster(master)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, creature->setMaster(getCreature(L, 2)));
g_game.updateCreatureType(creature);
return 1;
}
int LuaScriptInterface::luaCreatureGetLight(lua_State* L)
{
// creature:getLight()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo lightInfo = creature->getCreatureLight();
lua_pushnumber(L, lightInfo.level);
lua_pushnumber(L, lightInfo.color);
return 2;
}
int LuaScriptInterface::luaCreatureSetLight(lua_State* L)
{
// creature:setLight(color, level)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo light;
light.color = getNumber<uint8_t>(L, 2);
light.level = getNumber<uint8_t>(L, 3);
creature->setCreatureLight(light);
g_game.changeLight(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureGetSpeed(lua_State* L)
{
// creature:getSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetBaseSpeed(lua_State* L)
{
// creature:getBaseSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureChangeSpeed(lua_State* L)
{
// creature:changeSpeed(delta)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
int32_t delta = getNumber<int32_t>(L, 2);
g_game.changeSpeed(creature, delta);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetDropLoot(lua_State* L)
{
// creature:setDropLoot(doDrop)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setDropLoot(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetSkillLoss(lua_State* L)
{
// creature:setSkillLoss(skillLoss)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setSkillLoss(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPosition(lua_State* L)
{
// creature:getPosition()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushPosition(L, creature->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTile(lua_State* L)
{
// creature:getTile()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Tile* tile = creature->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDirection(lua_State* L)
{
// creature:getDirection()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getDirection());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetDirection(lua_State* L)
{
// creature:setDirection(direction)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, g_game.internalCreatureTurn(creature, getNumber<Direction>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetHealth(lua_State* L)
{
// creature:getHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureAddHealth(lua_State* L)
{
// creature:addHealth(healthChange)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CombatDamage damage;
damage.primary.value = getNumber<int32_t>(L, 2);
if (damage.primary.value >= 0) {
damage.primary.type = COMBAT_HEALING;
} else {
damage.primary.type = COMBAT_UNDEFINEDDAMAGE;
}
pushBoolean(L, g_game.combatChangeHealth(nullptr, creature, damage));
return 1;
}
int LuaScriptInterface::luaCreatureGetMaxHealth(lua_State* L)
{
// creature:getMaxHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getMaxHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetMaxHealth(lua_State* L)
{
// creature:setMaxHealth(maxHealth)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
creature->healthMax = getNumber<uint32_t>(L, 2);
creature->health = std::min<int32_t>(creature->health, creature->healthMax);
g_game.addCreatureHealth(creature);
Player* player = creature->getPlayer();
if (player) {
player->sendStats();
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetHiddenHealth(lua_State* L)
{
// creature:setHiddenHealth(hide)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setHiddenHealth(getBoolean(L, 2));
g_game.addCreatureHealth(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSkull(lua_State* L)
{
// creature:getSkull()
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSkull());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetSkull(lua_State* L)
{
// creature:setSkull(skull)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setSkull(getNumber<Skulls_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetOutfit(lua_State* L)
{
// creature:getOutfit()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushOutfit(L, creature->getCurrentOutfit());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetOutfit(lua_State* L)
{
// creature:setOutfit(outfit)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->defaultOutfit = getOutfit(L, 2);
g_game.internalCreatureChangeOutfit(creature, creature->defaultOutfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetCondition(lua_State* L)
{
// creature:getCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
Condition* condition = creature->getCondition(conditionType, conditionId, subId);
if (condition) {
pushUserdata<Condition>(L, condition);
setWeakMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureAddCondition(lua_State* L)
{
// creature:addCondition(condition[, force = false])
Creature* creature = getUserdata<Creature>(L, 1);
Condition* condition = getUserdata<Condition>(L, 2);
if (creature && condition) {
bool force = getBoolean(L, 3, false);
pushBoolean(L, creature->addCondition(condition->clone(), force));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemoveCondition(lua_State* L)
{
// creature:removeCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0[, force = false]]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
Condition* condition = creature->getCondition(conditionType, conditionId, subId);
if (condition) {
bool force = getBoolean(L, 5, false);
creature->removeCondition(condition, force);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureHasCondition(lua_State* L)
{
// creature:hasCondition(conditionType[, subId = 0])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
uint32_t subId = getNumber<uint32_t>(L, 3, 0);
pushBoolean(L, creature->hasCondition(conditionType, subId));
return 1;
}
int LuaScriptInterface::luaCreatureIsImmune(lua_State* L)
{
// creature:isImmune(condition or conditionType)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
pushBoolean(L, creature->isImmune(getNumber<ConditionType_t>(L, 2)));
} else if (Condition* condition = getUserdata<Condition>(L, 2)) {
pushBoolean(L, creature->isImmune(condition->getType()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemove(lua_State* L)
{
// creature:remove()
Creature** creaturePtr = getRawUserdata<Creature>(L, 1);
if (!creaturePtr) {
lua_pushnil(L);
return 1;
}
Creature* creature = *creaturePtr;
if (!creature) {
lua_pushnil(L);
return 1;
}
Player* player = creature->getPlayer();
if (player) {
player->kickPlayer(true);
} else {
g_game.removeCreature(creature);
}
*creaturePtr = nullptr;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureTeleportTo(lua_State* L)
{
// creature:teleportTo(position[, pushMovement = false])
bool pushMovement = getBoolean(L, 3, false);
const Position& position = getPosition(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position oldPosition = creature->getPosition();
if (g_game.internalTeleport(creature, position, pushMovement) != RETURNVALUE_NOERROR) {
pushBoolean(L, false);
return 1;
}
if (pushMovement) {
if (oldPosition.x == position.x) {
if (oldPosition.y < position.y) {
g_game.internalCreatureTurn(creature, DIRECTION_SOUTH);
} else {
g_game.internalCreatureTurn(creature, DIRECTION_NORTH);
}
} else if (oldPosition.x > position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_WEST);
} else if (oldPosition.x < position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_EAST);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSay(lua_State* L)
{
// creature:say(text, type[, ghost = false[, target = nullptr[, position]]])
int parameters = lua_gettop(L);
Position position;
if (parameters >= 6) {
position = getPosition(L, 6);
if (!position.x || !position.y) {
reportErrorFunc("Invalid position specified.");
pushBoolean(L, false);
return 1;
}
}
Creature* target = nullptr;
if (parameters >= 5) {
target = getCreature(L, 5);
}
bool ghost = getBoolean(L, 4, false);
SpeakClasses type = getNumber<SpeakClasses>(L, 3);
const std::string& text = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
SpectatorHashSet spectators;
if (target) {
spectators.insert(target);
}
if (position.x != 0) {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators, &position));
} else {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators));
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDamageMap(lua_State* L)
{
// creature:getDamageMap()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->damageMap.size(), 0);
for (auto damageEntry : creature->damageMap) {
lua_createtable(L, 0, 2);
setField(L, "total", damageEntry.second.total);
setField(L, "ticks", damageEntry.second.ticks);
lua_rawseti(L, -2, damageEntry.first);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSummons(lua_State* L)
{
// creature:getSummons()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->getSummonCount(), 0);
int index = 0;
for (Creature* summon : creature->getSummons()) {
pushUserdata<Creature>(L, summon);
setCreatureMetatable(L, -1, summon);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDescription(lua_State* L)
{
// creature:getDescription(distance)
int32_t distance = getNumber<int32_t>(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushString(L, creature->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPathTo(lua_State* L)
{
// creature:getPathTo(pos[, minTargetDist = 0[, maxTargetDist = 1[, fullPathSearch = true[, clearSight = true[, maxSearchDist = 0]]]]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
FindPathParams fpp;
fpp.minTargetDist = getNumber<int32_t>(L, 3, 0);
fpp.maxTargetDist = getNumber<int32_t>(L, 4, 1);
fpp.fullPathSearch = getBoolean(L, 5, fpp.fullPathSearch);
fpp.clearSight = getBoolean(L, 6, fpp.clearSight);
fpp.maxSearchDist = getNumber<int32_t>(L, 7, fpp.maxSearchDist);
std::forward_list<Direction> dirList;
if (creature->getPathTo(position, dirList, fpp)) {
lua_newtable(L);
int index = 0;
for (Direction dir : dirList) {
lua_pushnumber(L, dir);
lua_rawseti(L, -2, ++index);
}
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaCreatureMove(lua_State* L)
{
// creature:move(direction)
// creature:move(tile[, flags = 0])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
Direction direction = getNumber<Direction>(L, 2);
if (direction > DIRECTION_LAST) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, g_game.internalMoveCreature(creature, direction, FLAG_NOLIMIT));
} else {
Tile* tile = getUserdata<Tile>(L, 2);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, g_game.internalMoveCreature(*creature, *tile, getNumber<uint32_t>(L, 3)));
}
return 1;
}
// Player
int LuaScriptInterface::luaPlayerCreate(lua_State* L)
{
// Player(id or name or userdata)
Player* player;
if (isNumber(L, 2)) {
player = g_game.getPlayerByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
ReturnValue ret = g_game.getPlayerByNameWildcard(getString(L, 2), player);
if (ret != RETURNVALUE_NOERROR) {
lua_pushnil(L);
lua_pushnumber(L, ret);
return 2;
}
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Player) {
lua_pushnil(L);
return 1;
}
player = getUserdata<Player>(L, 2);
} else {
player = nullptr;
}
if (player) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPlayer(lua_State* L)
{
// player:isPlayer()
pushBoolean(L, getUserdata<const Player>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuid(lua_State* L)
{
// player:getGuid()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getGUID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetIp(lua_State* L)
{
// player:getIp()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getIP());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountId(lua_State* L)
{
// player:getAccountId()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLoginSaved(lua_State* L)
{
// player:getLastLoginSaved()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLoginSaved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLogout(lua_State* L)
{
// player:getLastLogout()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLogout());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountType(lua_State* L)
{
// player:getAccountType()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccountType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetAccountType(lua_State* L)
{
// player:setAccountType(accountType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->accountType = getNumber<AccountType_t>(L, 2);
IOLoginData::setAccountType(player->getAccount(), player->accountType);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetCapacity(lua_State* L)
{
// player:getCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetCapacity(lua_State* L)
{
// player:setCapacity(capacity)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->capacity = getNumber<uint32_t>(L, 2);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFreeCapacity(lua_State* L)
{
// player:getFreeCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getFreeCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDepotChest(lua_State* L)
{
// player:getDepotChest(depotId[, autoCreate = false])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t depotId = getNumber<uint32_t>(L, 2);
bool autoCreate = getBoolean(L, 3, false);
DepotChest* depotChest = player->getDepotChest(depotId, autoCreate);
if (depotChest) {
pushUserdata<Item>(L, depotChest);
setItemMetatable(L, -1, depotChest);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInbox(lua_State* L)
{
// player:getInbox()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Inbox* inbox = player->getInbox();
if (inbox) {
pushUserdata<Item>(L, inbox);
setItemMetatable(L, -1, inbox);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkullTime(lua_State* L)
{
// player:getSkullTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSkullTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSkullTime(lua_State* L)
{
// player:setSkullTime(skullTime)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setSkullTicks(getNumber<int64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDeathPenalty(lua_State* L)
{
// player:getDeathPenalty()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, static_cast<uint32_t>(player->getLostPercent() * 100));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetExperience(lua_State* L)
{
// player:getExperience()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getExperience());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddExperience(lua_State* L)
{
// player:addExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
int64_t experience = getNumber<int64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->addExperience(nullptr, experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveExperience(lua_State* L)
{
// player:removeExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
int64_t experience = getNumber<int64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->removeExperience(experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLevel(lua_State* L)
{
// player:getLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMagicLevel(lua_State* L)
{
// player:getMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMagicLevel(lua_State* L)
{
// player:getBaseMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBaseMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMana(lua_State* L)
{
// player:getMana()
const Player* player = getUserdata<const Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMana(lua_State* L)
{
// player:addMana(manaChange[, animationOnLoss = false])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int32_t manaChange = getNumber<int32_t>(L, 2);
bool animationOnLoss = getBoolean(L, 3, false);
if (!animationOnLoss && manaChange < 0) {
player->changeMana(manaChange);
} else {
CombatDamage damage;
damage.primary.value = manaChange;
damage.origin = ORIGIN_NONE;
g_game.combatChangeMana(nullptr, player, damage);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetMaxMana(lua_State* L)
{
// player:getMaxMana()
const Player* player = getUserdata<const Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMaxMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetMaxMana(lua_State* L)
{
// player:setMaxMana(maxMana)
Player* player = getPlayer(L, 1);
if (player) {
player->manaMax = getNumber<int32_t>(L, 2);
player->mana = std::min<int32_t>(player->mana, player->manaMax);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetManaSpent(lua_State* L)
{
// player:getManaSpent()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSpentMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddManaSpent(lua_State* L)
{
// player:addManaSpent(amount)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addManaSpent(getNumber<uint64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxHealth(lua_State* L)
{
// player:getBaseMaxHealth()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->healthMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxMana(lua_State* L)
{
// player:getBaseMaxMana()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->manaMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillLevel(lua_State* L)
{
// player:getSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetEffectiveSkillLevel(lua_State* L)
{
// player:getEffectiveSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->getSkillLevel(skillType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillPercent(lua_State* L)
{
// player:getSkillPercent(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].percent);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillTries(lua_State* L)
{
// player:getSkillTries(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].tries);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSkillTries(lua_State* L)
{
// player:addSkillTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
player->addSkillAdvance(skillType, tries);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTime(lua_State* L)
{
// player:addOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->addOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingTime(lua_State* L)
{
// player:getOfflineTrainingTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime(lua_State* L)
{
// player:removeOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->removeOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTries(lua_State* L)
{
// player:addOfflineTrainingTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
pushBoolean(L, player->addOfflineTrainingTries(skillType, tries));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingSkill(lua_State* L)
{
// player:getOfflineTrainingSkill()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingSkill());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetOfflineTrainingSkill(lua_State* L)
{
// player:setOfflineTrainingSkill(skillId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint32_t skillId = getNumber<uint32_t>(L, 2);
player->setOfflineTrainingSkill(skillId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetItemCount(lua_State* L)
{
// player:getItemCount(itemId[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, player->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaPlayerGetItemById(lua_State* L)
{
// player:getItemById(itemId, deepSearch[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
bool deepSearch = getBoolean(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
Item* item = g_game.findItemOfType(player, itemId, deepSearch, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetVocation(lua_State* L)
{
// player:getVocation()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Vocation>(L, player->getVocation());
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetVocation(lua_State* L)
{
// player:setVocation(id or name or userdata)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Vocation* vocation;
if (isNumber(L, 2)) {
vocation = g_vocations.getVocation(getNumber<uint16_t>(L, 2));
} else if (isString(L, 2)) {
vocation = g_vocations.getVocation(g_vocations.getVocationId(getString(L, 2)));
} else if (isUserdata(L, 2)) {
vocation = getUserdata<Vocation>(L, 2);
} else {
vocation = nullptr;
}
if (!vocation) {
pushBoolean(L, false);
return 1;
}
player->setVocation(vocation->getId());
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetSex(lua_State* L)
{
// player:getSex()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSex());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSex(lua_State* L)
{
// player:setSex(newSex)
Player* player = getUserdata<Player>(L, 1);
if (player) {
PlayerSex_t newSex = getNumber<PlayerSex_t>(L, 2);
player->setSex(newSex);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetTown(lua_State* L)
{
// player:getTown()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Town>(L, player->getTown());
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetTown(lua_State* L)
{
// player:setTown(town)
Town* town = getUserdata<Town>(L, 2);
if (!town) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setTown(town);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuild(lua_State* L)
{
// player:getGuild()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Guild* guild = player->getGuild();
if (!guild) {
lua_pushnil(L);
return 1;
}
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
return 1;
}
int LuaScriptInterface::luaPlayerSetGuild(lua_State* L)
{
// player:setGuild(guild)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
player->setGuild(getUserdata<Guild>(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildLevel(lua_State* L)
{
// player:getGuildLevel()
Player* player = getUserdata<Player>(L, 1);
if (player && player->getGuild()) {
lua_pushnumber(L, player->getGuildRank()->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildLevel(lua_State* L)
{
// player:setGuildLevel(level)
uint8_t level = getNumber<uint8_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (!player || !player->getGuild()) {
lua_pushnil(L);
return 1;
}
const GuildRank* rank = player->getGuild()->getRankByLevel(level);
if (!rank) {
pushBoolean(L, false);
} else {
player->setGuildRank(rank);
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildNick(lua_State* L)
{
// player:getGuildNick()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushString(L, player->getGuildNick());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildNick(lua_State* L)
{
// player:setGuildNick(nick)
const std::string& nick = getString(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGuildNick(nick);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGroup(lua_State* L)
{
// player:getGroup()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Group>(L, player->getGroup());
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGroup(lua_State* L)
{
// player:setGroup(group)
Group* group = getUserdata<Group>(L, 2);
if (!group) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGroup(group);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetStamina(lua_State* L)
{
// player:getStamina()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getStaminaMinutes());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStamina(lua_State* L)
{
// player:setStamina(stamina)
uint16_t stamina = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->staminaMinutes = std::min<uint16_t>(2520, stamina);
player->sendStats();
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSoul(lua_State* L)
{
// player:getSoul()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSoul());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSoul(lua_State* L)
{
// player:addSoul(soulChange)
int32_t soulChange = getNumber<int32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->changeSoul(soulChange);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMaxSoul(lua_State* L)
{
// player:getMaxSoul()
Player* player = getUserdata<Player>(L, 1);
if (player && player->vocation) {
lua_pushnumber(L, player->vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBankBalance(lua_State* L)
{
// player:getBankBalance()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBankBalance());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetBankBalance(lua_State* L)
{
// player:setBankBalance(bankBalance)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int64_t balance = getNumber<int64_t>(L, 2);
if (balance < 0) {
reportErrorFunc("Invalid bank balance value.");
lua_pushnil(L);
return 1;
}
player->setBankBalance(balance);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetStorageValue(lua_State* L)
{
// player:getStorageValue(key)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t key = getNumber<uint32_t>(L, 2);
int32_t value;
if (player->getStorageValue(key, value)) {
lua_pushnumber(L, value);
} else {
lua_pushnumber(L, -1);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStorageValue(lua_State* L)
{
// player:setStorageValue(key, value)
int32_t value = getNumber<int32_t>(L, 3);
uint32_t key = getNumber<uint32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) {
std::ostringstream ss;
ss << "Accessing reserved range: " << key;
reportErrorFunc(ss.str());
pushBoolean(L, false);
return 1;
}
if (player) {
player->addStorageValue(key, value);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItem(lua_State* L)
{
// player:addItem(itemId[, count = 1[, canDropOnMap = true[, subType = 1[, slot = CONST_SLOT_WHEREEVER]]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
pushBoolean(L, false);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t count = getNumber<int32_t>(L, 3, 1);
int32_t subType = getNumber<int32_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int parameters = lua_gettop(L);
if (parameters >= 4) {
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = std::ceil(count / 100.f);
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
bool hasTable = itemCount > 1;
if (hasTable) {
lua_newtable(L);
} else if (itemCount == 0) {
lua_pushnil(L);
return 1;
}
bool canDropOnMap = getBoolean(L, 4, true);
slots_t slot = getNumber<slots_t>(L, 6, CONST_SLOT_WHEREEVER);
for (int32_t i = 1; i <= itemCount; ++i) {
int32_t stackCount = subType;
if (it.stackable) {
stackCount = std::min<int32_t>(stackCount, 100);
subType -= stackCount;
}
Item* item = Item::CreateItem(itemId, stackCount);
if (!item) {
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, item, canDropOnMap, slot);
if (ret != RETURNVALUE_NOERROR) {
delete item;
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
if (hasTable) {
lua_pushnumber(L, i);
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_settable(L, -3);
} else {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
}
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItemEx(lua_State* L)
{
// player:addItemEx(item[, canDropOnMap = false[, index = INDEX_WHEREEVER[, flags = 0]]])
// player:addItemEx(item[, canDropOnMap = true[, slot = CONST_SLOT_WHEREEVER]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc("Item already has a parent");
pushBoolean(L, false);
return 1;
}
bool canDropOnMap = getBoolean(L, 3, false);
ReturnValue returnValue;
if (canDropOnMap) {
slots_t slot = getNumber<slots_t>(L, 4, CONST_SLOT_WHEREEVER);
returnValue = g_game.internalPlayerAddItem(player, item, true, slot);
} else {
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
returnValue = g_game.internalAddItem(player, item, index, flags);
}
if (returnValue == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, returnValue);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveItem(lua_State* L)
{
// player:removeItem(itemId, count[, subType = -1[, ignoreEquipped = false]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t count = getNumber<uint32_t>(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
bool ignoreEquipped = getBoolean(L, 5, false);
pushBoolean(L, player->removeItemOfType(itemId, count, subType, ignoreEquipped));
return 1;
}
int LuaScriptInterface::luaPlayerGetMoney(lua_State* L)
{
// player:getMoney()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMoney());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMoney(lua_State* L)
{
// player:addMoney(money)
uint64_t money = getNumber<uint64_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.addMoney(player, money);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMoney(lua_State* L)
{
// player:removeMoney(money)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t money = getNumber<uint64_t>(L, 2);
pushBoolean(L, g_game.removeMoney(player, money));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerShowTextDialog(lua_State* L)
{
// player:showTextDialog(itemId[, text[, canWrite[, length]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int32_t length = getNumber<int32_t>(L, 5, -1);
bool canWrite = getBoolean(L, 4, false);
std::string text;
int parameters = lua_gettop(L);
if (parameters >= 3) {
text = getString(L, 3);
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
Item* item = Item::CreateItem(itemId);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (length < 0) {
length = Item::items[item->getID()].maxTextLen;
}
if (!text.empty()) {
item->setText(text);
length = std::max<int32_t>(text.size(), length);
}
item->setParent(player);
player->setWriteItem(item, length);
player->sendTextWindow(item, length, canWrite);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendTextMessage(lua_State* L)
{
// player:sendTextMessage(type, text[, position, primaryValue = 0, primaryColor = TEXTCOLOR_NONE[, secondaryValue = 0, secondaryColor = TEXTCOLOR_NONE]])
// player:sendTextMessage(type, text, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int parameters = lua_gettop(L);
TextMessage message(getNumber<MessageClasses>(L, 2), getString(L, 3));
if (parameters == 4) {
uint16_t channelId = getNumber<uint16_t>(L, 4);
ChatChannel* channel = g_chat->getChannel(*player, channelId);
if (!channel || !channel->hasUser(*player)) {
pushBoolean(L, false);
return 1;
}
message.channelId = channelId;
} else {
if (parameters >= 6) {
message.position = getPosition(L, 4);
message.primary.value = getNumber<int32_t>(L, 5);
message.primary.color = getNumber<TextColor_t>(L, 6);
}
if (parameters >= 8) {
message.secondary.value = getNumber<int32_t>(L, 7);
message.secondary.color = getNumber<TextColor_t>(L, 8);
}
}
player->sendTextMessage(message);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendChannelMessage(lua_State* L)
{
// player:sendChannelMessage(author, text, type, channelId)
uint16_t channelId = getNumber<uint16_t>(L, 5);
SpeakClasses type = getNumber<SpeakClasses>(L, 4);
const std::string& text = getString(L, 3);
const std::string& author = getString(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->sendChannelMessage(author, text, type, channelId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendPrivateMessage(lua_State* L)
{
// player:sendPrivateMessage(speaker, text[, type])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const Player* speaker = getUserdata<const Player>(L, 2);
const std::string& text = getString(L, 3);
SpeakClasses type = getNumber<SpeakClasses>(L, 4, TALKTYPE_PRIVATE_FROM);
player->sendPrivateMessage(speaker, type, text);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerChannelSay(lua_State* L)
{
// player:channelSay(speaker, type, text, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Creature* speaker = getCreature(L, 2);
SpeakClasses type = getNumber<SpeakClasses>(L, 3);
const std::string& text = getString(L, 4);
uint16_t channelId = getNumber<uint16_t>(L, 5);
player->sendToChannel(speaker, type, text, channelId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerOpenChannel(lua_State* L)
{
// player:openChannel(channelId)
uint16_t channelId = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.playerOpenChannel(player->getID(), channelId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSlotItem(lua_State* L)
{
// player:getSlotItem(slot)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t slot = getNumber<uint32_t>(L, 2);
Thing* thing = player->getThing(slot);
if (!thing) {
lua_pushnil(L);
return 1;
}
Item* item = thing->getItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetParty(lua_State* L)
{
// player:getParty()
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Party* party = player->getParty();
if (party) {
pushUserdata<Party>(L, party);
setMetatable(L, -1, "Party");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfit(lua_State* L)
{
// player:addOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addOutfit(getNumber<uint16_t>(L, 2), 0);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfitAddon(lua_State* L)
{
// player:addOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
player->addOutfit(lookType, addon);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfit(lua_State* L)
{
// player:removeOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
pushBoolean(L, player->removeOutfit(lookType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfitAddon(lua_State* L)
{
// player:removeOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
pushBoolean(L, player->removeOutfitAddon(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasOutfit(lua_State* L)
{
// player:hasOutfit(lookType[, addon = 0])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3, 0);
pushBoolean(L, player->canWear(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendOutfitWindow(lua_State* L)
{
// player:sendOutfitWindow()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->sendOutfitWindow();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMount(lua_State* L) {
// player:addMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->tameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMount(lua_State* L) {
// player:removeMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->untameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerHasMount(lua_State* L) {
// player:hasMount(mountId or mountName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Mount* mount = nullptr;
if (isNumber(L, 2)) {
mount = g_game.mounts.getMountByID(getNumber<uint8_t>(L, 2));
} else {
mount = g_game.mounts.getMountByName(getString(L, 2));
}
if (mount) {
pushBoolean(L, player->hasMount(mount));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetPremiumDays(lua_State* L)
{
// player:getPremiumDays()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->premiumDays);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddPremiumDays(lua_State* L)
{
// player:addPremiumDays(days)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (player->premiumDays != std::numeric_limits<uint16_t>::max()) {
uint16_t days = getNumber<uint16_t>(L, 2);
int32_t addDays = std::min<int32_t>(0xFFFE - player->premiumDays, days);
if (addDays > 0) {
player->setPremiumDays(player->premiumDays + addDays);
IOLoginData::addPremiumDays(player->getAccount(), addDays);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerRemovePremiumDays(lua_State* L)
{
// player:removePremiumDays(days)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (player->premiumDays != std::numeric_limits<uint16_t>::max()) {
uint16_t days = getNumber<uint16_t>(L, 2);
int32_t removeDays = std::min<int32_t>(player->premiumDays, days);
if (removeDays > 0) {
player->setPremiumDays(player->premiumDays - removeDays);
IOLoginData::removePremiumDays(player->getAccount(), removeDays);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerHasBlessing(lua_State* L)
{
// player:hasBlessing(blessing)
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->hasBlessing(blessing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddBlessing(lua_State* L)
{
// player:addBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->addBlessing(1 << blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveBlessing(lua_State* L)
{
// player:removeBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (!player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->removeBlessing(1 << blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerCanLearnSpell(lua_State* L)
{
// player:canLearnSpell(spellName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const std::string& spellName = getString(L, 2);
InstantSpell* spell = g_spells->getInstantSpellByName(spellName);
if (!spell) {
reportErrorFunc("Spell \"" + spellName + "\" not found");
pushBoolean(L, false);
return 1;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
pushBoolean(L, true);
return 1;
}
const auto& vocMap = spell->getVocMap();
if (vocMap.count(player->getVocationId()) == 0) {
pushBoolean(L, false);
} else if (player->getLevel() < spell->getLevel()) {
pushBoolean(L, false);
} else if (player->getMagicLevel() < spell->getMagicLevel()) {
pushBoolean(L, false);
} else {
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerLearnSpell(lua_State* L)
{
// player:learnSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->learnInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerForgetSpell(lua_State* L)
{
// player:forgetSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->forgetInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasLearnedSpell(lua_State* L)
{
// player:hasLearnedSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
pushBoolean(L, player->hasLearnedInstantSpell(spellName));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendTutorial(lua_State* L)
{
// player:sendTutorial(tutorialId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint8_t tutorialId = getNumber<uint8_t>(L, 2);
player->sendTutorial(tutorialId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMapMark(lua_State* L)
{
// player:addMapMark(position, type, description)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const Position& position = getPosition(L, 2);
uint8_t type = getNumber<uint8_t>(L, 3);
const std::string& description = getString(L, 4);
player->sendAddMarker(position, type, description);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSave(lua_State* L)
{
// player:save()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->loginPosition = player->getPosition();
pushBoolean(L, IOLoginData::savePlayer(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerPopupFYI(lua_State* L)
{
// player:popupFYI(message)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& message = getString(L, 2);
player->sendFYIBox(message);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPzLocked(lua_State* L)
{
// player:isPzLocked()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->isPzLocked());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetClient(lua_State* L)
{
// player:getClient()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_createtable(L, 0, 2);
setField(L, "version", player->getProtocolVersion());
setField(L, "os", player->getOperatingSystem());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetHouse(lua_State* L)
{
// player:getHouse()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = g_game.map.houses.getHouseByPlayerId(player->getGUID());
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendHouseWindow(lua_State* L)
{
// player:sendHouseWindow(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->sendHouseWindow(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetEditHouse(lua_State* L)
{
// player:setEditHouse(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->setEditHouse(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetGhostMode(lua_State* L)
{
// player:setGhostMode(enabled[, showEffect=true])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
bool enabled = getBoolean(L, 2);
if (player->isInGhostMode() == enabled) {
pushBoolean(L, true);
return 1;
}
bool showEffect = getBoolean(L, 3, true);
player->switchGhostMode();
Tile* tile = player->getTile();
const Position& position = player->getPosition();
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer != player && !tmpPlayer->isAccessPlayer()) {
if (enabled) {
tmpPlayer->sendRemoveTileThing(position, tile->getStackposOfCreature(tmpPlayer, player));
} else {
tmpPlayer->sendCreatureAppear(player, position, showEffect);
}
} else {
tmpPlayer->sendCreatureChangeVisible(player, !enabled);
}
}
if (player->isInGhostMode()) {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_OFFLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), false);
} else {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_ONLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), true);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerId(lua_State* L)
{
// player:getContainerId(container)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 2);
if (container) {
lua_pushnumber(L, player->getContainerID(container));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerById(lua_State* L)
{
// player:getContainerById(id)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = player->getContainerByID(getNumber<uint8_t>(L, 2));
if (container) {
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerIndex(lua_State* L)
{
// player:getContainerIndex(id)
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getContainerIndex(getNumber<uint8_t>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInstantSpells(lua_State* L)
{
// player:getInstantSpells()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
std::vector<InstantSpell*> spells;
for (auto spell : g_spells->getInstantSpells()) {
if (spell.second->canCast(player)) {
spells.push_back(spell.second);
}
}
lua_createtable(L, spells.size(), 0);
int index = 0;
for (auto spell : spells) {
pushInstantSpell(L, *spell);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPlayerCanCast(lua_State* L)
{
// player:canCast(spell)
Player* player = getUserdata<Player>(L, 1);
InstantSpell* spell = getUserdata<InstantSpell>(L, 2);
if (player && spell) {
pushBoolean(L, spell->canCast(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasChaseMode(lua_State* L)
{
// player:hasChaseMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->chaseMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasSecureMode(lua_State* L)
{
// player:hasSecureMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->secureMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFightMode(lua_State* L)
{
// player:getFightMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->fightMode);
} else {
lua_pushnil(L);
}
return 1;
}
// Monster
int LuaScriptInterface::luaMonsterCreate(lua_State* L)
{
// Monster(id or userdata)
Monster* monster;
if (isNumber(L, 2)) {
monster = g_game.getMonsterByID(getNumber<uint32_t>(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Monster) {
lua_pushnil(L);
return 1;
}
monster = getUserdata<Monster>(L, 2);
} else {
monster = nullptr;
}
if (monster) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsMonster(lua_State* L)
{
// monster:isMonster()
pushBoolean(L, getUserdata<const Monster>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaMonsterGetType(lua_State* L)
{
// monster:getType()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushUserdata<MonsterType>(L, monster->mType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetSpawnPosition(lua_State* L)
{
// monster:getSpawnPosition()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushPosition(L, monster->getMasterPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsInSpawnRange(lua_State* L)
{
// monster:isInSpawnRange([position])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->isInSpawnRange(lua_gettop(L) >= 2 ? getPosition(L, 2) : monster->getPosition()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsIdle(lua_State* L)
{
// monster:isIdle()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->getIdleStatus());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSetIdle(lua_State* L)
{
// monster:setIdle(idle)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->setIdle(getBoolean(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterIsTarget(lua_State* L)
{
// monster:isTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->isTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsOpponent(lua_State* L)
{
// monster:isOpponent(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->isOpponent(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsFriend(lua_State* L)
{
// monster:isFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->isFriend(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddFriend(lua_State* L)
{
// monster:addFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
monster->addFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterRemoveFriend(lua_State* L)
{
// monster:removeFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
monster->removeFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendList(lua_State* L)
{
// monster:getFriendList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& friendList = monster->getFriendList();
lua_createtable(L, friendList.size(), 0);
int index = 0;
for (Creature* creature : friendList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendCount(lua_State* L)
{
// monster:getFriendCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getFriendList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddTarget(lua_State* L)
{
// monster:addTarget(creature[, pushFront = false])
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
bool pushFront = getBoolean(L, 3, false);
monster->addTarget(creature, pushFront);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterRemoveTarget(lua_State* L)
{
// monster:removeTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->removeTarget(getCreature(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetList(lua_State* L)
{
// monster:getTargetList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& targetList = monster->getTargetList();
lua_createtable(L, targetList.size(), 0);
int index = 0;
for (Creature* creature : targetList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetCount(lua_State* L)
{
// monster:getTargetCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getTargetList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSelectTarget(lua_State* L)
{
// monster:selectTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->selectTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSearchTarget(lua_State* L)
{
// monster:searchTarget([searchType = TARGETSEARCH_DEFAULT])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
TargetSearchType_t searchType = getNumber<TargetSearchType_t>(L, 2, TARGETSEARCH_DEFAULT);
pushBoolean(L, monster->searchTarget(searchType));
} else {
lua_pushnil(L);
}
return 1;
}
// Npc
int LuaScriptInterface::luaNpcCreate(lua_State* L)
{
// Npc([id or name or userdata])
Npc* npc;
if (lua_gettop(L) >= 2) {
if (isNumber(L, 2)) {
npc = g_game.getNpcByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
npc = g_game.getNpcByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
npc = getUserdata<Npc>(L, 2);
} else {
npc = nullptr;
}
} else {
npc = getScriptEnv()->getNpc();
}
if (npc) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcIsNpc(lua_State* L)
{
// npc:isNpc()
pushBoolean(L, getUserdata<const Npc>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaNpcSetMasterPos(lua_State* L)
{
// npc:setMasterPos(pos[, radius])
Npc* npc = getUserdata<Npc>(L, 1);
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& pos = getPosition(L, 2);
int32_t radius = getNumber<int32_t>(L, 3, 1);
npc->setMasterPos(pos, radius);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNpcGetSpeechBubble(lua_State* L)
{
// npc:getSpeechBubble()
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
lua_pushnumber(L, npc->getSpeechBubble());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcSetSpeechBubble(lua_State* L)
{
// npc:setSpeechBubble(speechBubble)
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
npc->setSpeechBubble(getNumber<uint8_t>(L, 2));
}
return 0;
}
// Guild
int LuaScriptInterface::luaGuildCreate(lua_State* L)
{
// Guild(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Guild* guild = g_game.getGuild(id);
if (guild) {
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetId(lua_State* L)
{
// guild:getId()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
lua_pushnumber(L, guild->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetName(lua_State* L)
{
// guild:getName()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMembersOnline(lua_State* L)
{
// guild:getMembersOnline()
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
const auto& members = guild->getMembersOnline();
lua_createtable(L, members.size(), 0);
int index = 0;
for (Player* player : members) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGuildAddRank(lua_State* L)
{
// guild:addRank(id, name, level)
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
uint32_t id = getNumber<uint32_t>(L, 2);
const std::string& name = getString(L, 3);
uint8_t level = getNumber<uint8_t>(L, 4);
guild->addRank(id, name, level);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankById(lua_State* L)
{
// guild:getRankById(id)
Guild* guild = getUserdata<Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint32_t id = getNumber<uint32_t>(L, 2);
GuildRank* rank = guild->getRankById(id);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankByLevel(lua_State* L)
{
// guild:getRankByLevel(level)
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint8_t level = getNumber<uint8_t>(L, 2);
const GuildRank* rank = guild->getRankByLevel(level);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMotd(lua_State* L)
{
// guild:getMotd()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getMotd());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildSetMotd(lua_State* L)
{
// guild:setMotd(motd)
const std::string& motd = getString(L, 2);
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
guild->setMotd(motd);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Group
int LuaScriptInterface::luaGroupCreate(lua_State* L)
{
// Group(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Group* group = g_game.groups.getGroup(id);
if (group) {
pushUserdata<Group>(L, group);
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetId(lua_State* L)
{
// group:getId()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetName(lua_State* L)
{
// group:getName()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushString(L, group->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetFlags(lua_State* L)
{
// group:getFlags()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->flags);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetAccess(lua_State* L)
{
// group:getAccess()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushBoolean(L, group->access);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxDepotItems(lua_State* L)
{
// group:getMaxDepotItems()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxDepotItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxVipEntries(lua_State* L)
{
// group:getMaxVipEntries()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxVipEntries);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupHasFlag(lua_State* L)
{
// group:hasFlag(flag)
Group* group = getUserdata<Group>(L, 1);
if (group) {
PlayerFlags flag = getNumber<PlayerFlags>(L, 2);
pushBoolean(L, (group->flags & flag) != 0);
} else {
lua_pushnil(L);
}
return 1;
}
// Vocation
int LuaScriptInterface::luaVocationCreate(lua_State* L)
{
// Vocation(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else {
id = g_vocations.getVocationId(getString(L, 2));
}
Vocation* vocation = g_vocations.getVocation(id);
if (vocation) {
pushUserdata<Vocation>(L, vocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetId(lua_State* L)
{
// vocation:getId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetClientId(lua_State* L)
{
// vocation:getClientId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getClientId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetName(lua_State* L)
{
// vocation:getName()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDescription(lua_State* L)
{
// vocation:getDescription()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredSkillTries(lua_State* L)
{
// vocation:getRequiredSkillTries(skillType, skillLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint16_t skillLevel = getNumber<uint16_t>(L, 3);
lua_pushnumber(L, vocation->getReqSkillTries(skillType, skillLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredManaSpent(lua_State* L)
{
// vocation:getRequiredManaSpent(magicLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
uint32_t magicLevel = getNumber<uint32_t>(L, 2);
lua_pushnumber(L, vocation->getReqMana(magicLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetCapacityGain(lua_State* L)
{
// vocation:getCapacityGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getCapGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGain(lua_State* L)
{
// vocation:getHealthGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHPGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainTicks(lua_State* L)
{
// vocation:getHealthGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainAmount(lua_State* L)
{
// vocation:getHealthGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGain(lua_State* L)
{
// vocation:getManaGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainTicks(lua_State* L)
{
// vocation:getManaGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainAmount(lua_State* L)
{
// vocation:getManaGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetMaxSoul(lua_State* L)
{
// vocation:getMaxSoul()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetSoulGainTicks(lua_State* L)
{
// vocation:getSoulGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetAttackSpeed(lua_State* L)
{
// vocation:getAttackSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getAttackSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetBaseSpeed(lua_State* L)
{
// vocation:getBaseSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDemotion(lua_State* L)
{
// vocation:getDemotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t fromId = vocation->getFromVocation();
if (fromId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* demotedVocation = g_vocations.getVocation(fromId);
if (demotedVocation && demotedVocation != vocation) {
pushUserdata<Vocation>(L, demotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetPromotion(lua_State* L)
{
// vocation:getPromotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t promotedId = g_vocations.getPromotedVocation(vocation->getId());
if (promotedId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* promotedVocation = g_vocations.getVocation(promotedId);
if (promotedVocation && promotedVocation != vocation) {
pushUserdata<Vocation>(L, promotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
// Town
int LuaScriptInterface::luaTownCreate(lua_State* L)
{
// Town(id or name)
Town* town;
if (isNumber(L, 2)) {
town = g_game.map.towns.getTown(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
town = g_game.map.towns.getTown(getString(L, 2));
} else {
town = nullptr;
}
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetId(lua_State* L)
{
// town:getId()
Town* town = getUserdata<Town>(L, 1);
if (town) {
lua_pushnumber(L, town->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetName(lua_State* L)
{
// town:getName()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushString(L, town->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetTemplePosition(lua_State* L)
{
// town:getTemplePosition()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushPosition(L, town->getTemplePosition());
} else {
lua_pushnil(L);
}
return 1;
}
// House
int LuaScriptInterface::luaHouseCreate(lua_State* L)
{
// House(id)
House* house = g_game.map.houses.getHouse(getNumber<uint32_t>(L, 2));
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetId(lua_State* L)
{
// house:getId()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetName(lua_State* L)
{
// house:getName()
House* house = getUserdata<House>(L, 1);
if (house) {
pushString(L, house->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTown(lua_State* L)
{
// house:getTown()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Town* town = g_game.map.towns.getTown(house->getTownId());
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetExitPosition(lua_State* L)
{
// house:getExitPosition()
House* house = getUserdata<House>(L, 1);
if (house) {
pushPosition(L, house->getEntryPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetRent(lua_State* L)
{
// house:getRent()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getRent());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetOwnerGuid(lua_State* L)
{
// house:getOwnerGuid()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getOwner());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseSetOwnerGuid(lua_State* L)
{
// house:setOwnerGuid(guid[, updateDatabase = true])
House* house = getUserdata<House>(L, 1);
if (house) {
uint32_t guid = getNumber<uint32_t>(L, 2);
bool updateDatabase = getBoolean(L, 3, true);
house->setOwner(guid, updateDatabase);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseStartTrade(lua_State* L)
{
// house:startTrade(player, tradePartner)
House* house = getUserdata<House>(L, 1);
Player* player = getUserdata<Player>(L, 2);
Player* tradePartner = getUserdata<Player>(L, 3);
if (!player || !tradePartner || !house) {
lua_pushnil(L);
return 1;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERFARAWAY);
return 1;
}
if (house->getOwner() != player->getGUID()) {
lua_pushnumber(L, RETURNVALUE_YOUDONTOWNTHISHOUSE);
return 1;
}
if (g_game.map.houses.getHouseByPlayerId(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE);
return 1;
}
if (IOLoginData::hasBiddedOnHouse(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERHIGHESTBIDDER);
return 1;
}
Item* transferItem = house->getTransferItem();
if (!transferItem) {
lua_pushnumber(L, RETURNVALUE_YOUCANNOTTRADETHISHOUSE);
return 1;
}
transferItem->getParent()->setParent(player);
if (!g_game.internalStartTrade(player, tradePartner, transferItem)) {
house->resetTransferItem();
}
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaHouseGetBeds(lua_State* L)
{
// house:getBeds()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& beds = house->getBeds();
lua_createtable(L, beds.size(), 0);
int index = 0;
for (BedItem* bedItem : beds) {
pushUserdata<Item>(L, bedItem);
setItemMetatable(L, -1, bedItem);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetBedCount(lua_State* L)
{
// house:getBedCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getBedCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoors(lua_State* L)
{
// house:getDoors()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& doors = house->getDoors();
lua_createtable(L, doors.size(), 0);
int index = 0;
for (Door* door : doors) {
pushUserdata<Item>(L, door);
setItemMetatable(L, -1, door);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorCount(lua_State* L)
{
// house:getDoorCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getDoors().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorIdByPosition(lua_State* L)
{
// house:getDoorIdByPosition(position)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Door* door = house->getDoorByPosition(getPosition(L, 2));
if (door) {
lua_pushnumber(L, door->getDoorId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTiles(lua_State* L)
{
// house:getTiles()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& tiles = house->getTiles();
lua_createtable(L, tiles.size(), 0);
int index = 0;
for (Tile* tile : tiles) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTileCount(lua_State* L)
{
// house:getTileCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getTiles().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseCanEditAccessList(lua_State* L)
{
// house:canEditAccessList(listId, player)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
Player* player = getPlayer(L, 3);
pushBoolean(L, house->canEditAccessList(listId, player));
return 1;
}
int LuaScriptInterface::luaHouseGetAccessList(lua_State* L)
{
// house:getAccessList(listId)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
std::string list;
uint32_t listId = getNumber<uint32_t>(L, 2);
if (house->getAccessList(listId, list)) {
pushString(L, list);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaHouseSetAccessList(lua_State* L)
{
// house:setAccessList(listId, list)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
const std::string& list = getString(L, 3);
house->setAccessList(listId, list);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaHouseKickPlayer(lua_State* L)
{
// house:kickPlayer(player, targetPlayer)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, house->kickPlayer(getPlayer(L, 2), getPlayer(L, 3)));
return 1;
}
// ItemType
int LuaScriptInterface::luaItemTypeCreate(lua_State* L)
{
// ItemType(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else {
id = Item::items.getItemIdByName(getString(L, 2));
}
const ItemType& itemType = Item::items[id];
pushUserdata<const ItemType>(L, &itemType);
setMetatable(L, -1, "ItemType");
return 1;
}
int LuaScriptInterface::luaItemTypeIsCorpse(lua_State* L)
{
// itemType:isCorpse()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->corpseType != RACE_NONE);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsDoor(lua_State* L)
{
// itemType:isDoor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isDoor());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsContainer(lua_State* L)
{
// itemType:isContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsFluidContainer(lua_State* L)
{
// itemType:isFluidContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isFluidContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsMovable(lua_State* L)
{
// itemType:isMovable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->moveable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsRune(lua_State* L)
{
// itemType:isRune()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isRune());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsStackable(lua_State* L)
{
// itemType:isStackable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->stackable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsReadable(lua_State* L)
{
// itemType:isReadable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canReadText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsWritable(lua_State* L)
{
// itemType:isWritable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canWriteText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsBlocking(lua_State* L)
{
// itemType:isBlocking()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->blockProjectile || itemType->blockSolid);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsGroundTile(lua_State* L)
{
// itemType:isGroundTile()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isGroundTile());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsMagicField(lua_State* L)
{
// itemType:isMagicField()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isMagicField());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsUseable(lua_State* L)
{
// itemType:isUseable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isUseable());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsPickupable(lua_State* L)
{
// itemType:isPickupable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isPickupable());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetType(lua_State* L)
{
// itemType:getType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->type);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetId(lua_State* L)
{
// itemType:getId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetClientId(lua_State* L)
{
// itemType:getClientId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->clientId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetName(lua_State* L)
{
// itemType:getName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetPluralName(lua_State* L)
{
// itemType:getPluralName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArticle(lua_State* L)
{
// itemType:getArticle()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->article);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDescription(lua_State* L)
{
// itemType:getDescription()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->description);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetSlotPosition(lua_State *L)
{
// itemType:getSlotPosition()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->slotPosition);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCharges(lua_State* L)
{
// itemType:getCharges()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->charges);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetFluidSource(lua_State* L)
{
// itemType:getFluidSource()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->fluidSource);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCapacity(lua_State* L)
{
// itemType:getCapacity()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->maxItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeight(lua_State* L)
{
// itemType:getWeight([count = 1])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
uint64_t weight = static_cast<uint64_t>(itemType->weight) * std::max<int32_t>(1, count);
lua_pushnumber(L, weight);
return 1;
}
int LuaScriptInterface::luaItemTypeGetHitChance(lua_State* L)
{
// itemType:getHitChance()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->hitChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetShootRange(lua_State* L)
{
// itemType:getShootRange()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->shootRange);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAttack(lua_State* L)
{
// itemType:getAttack()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->attack);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDefense(lua_State* L)
{
// itemType:getDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->defense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetExtraDefense(lua_State* L)
{
// itemType:getExtraDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->extraDefense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArmor(lua_State* L)
{
// itemType:getArmor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->armor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeaponType(lua_State* L)
{
// itemType:getWeaponType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->weaponType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAmmoType(lua_State* L)
{
// itemType:getAmmoType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->ammoType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCorpseType(lua_State* L)
{
// itemType:getCorpseType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->corpseType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementType(lua_State* L)
{
// itemType:getElementType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementDamage(lua_State* L)
{
// itemType:getElementDamage()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementDamage);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformEquipId(lua_State* L)
{
// itemType:getTransformEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformDeEquipId(lua_State* L)
{
// itemType:getTransformDeEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformDeEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDestroyId(lua_State* L)
{
// itemType:getDestroyId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->destroyTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDecayId(lua_State* L)
{
// itemType:getDecayId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->decayTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetRequiredLevel(lua_State* L)
{
// itemType:getRequiredLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->minReqLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasSubType(lua_State* L)
{
// itemType:hasSubType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->hasSubType());
} else {
lua_pushnil(L);
}
return 1;
}
// Combat
int LuaScriptInterface::luaCombatCreate(lua_State* L)
{
// Combat()
pushUserdata<Combat>(L, g_luaEnvironment.createCombatObject(getScriptEnv()->getScriptInterface()));
setMetatable(L, -1, "Combat");
return 1;
}
int LuaScriptInterface::luaCombatSetParameter(lua_State* L)
{
// combat:setParameter(key, value)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
CombatParam_t key = getNumber<CombatParam_t>(L, 2);
uint32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<uint32_t>(L, 3);
}
combat->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetFormula(lua_State* L)
{
// combat:setFormula(type, mina, minb, maxa, maxb)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
formulaType_t type = getNumber<formulaType_t>(L, 2);
double mina = getNumber<double>(L, 3);
double minb = getNumber<double>(L, 4);
double maxa = getNumber<double>(L, 5);
double maxb = getNumber<double>(L, 6);
combat->setPlayerCombatValues(type, mina, minb, maxa, maxb);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetArea(lua_State* L)
{
// combat:setArea(area)
if (getScriptEnv()->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc("This function can only be used while loading the script.");
lua_pushnil(L);
return 1;
}
const AreaCombat* area = g_luaEnvironment.getAreaObject(getNumber<uint32_t>(L, 2));
if (!area) {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
lua_pushnil(L);
return 1;
}
Combat* combat = getUserdata<Combat>(L, 1);
if (combat) {
combat->setArea(new AreaCombat(*area));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatAddCondition(lua_State* L)
{
// combat:addCondition(condition)
Condition* condition = getUserdata<Condition>(L, 2);
Combat* combat = getUserdata<Combat>(L, 1);
if (combat && condition) {
combat->addCondition(condition->clone());
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatSetCallback(lua_State* L)
{
// combat:setCallback(key, function)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
CallBackParam_t key = getNumber<CallBackParam_t>(L, 2);
if (!combat->setCallback(key)) {
lua_pushnil(L);
return 1;
}
CallBack* callback = combat->getCallback(key);
if (!callback) {
lua_pushnil(L);
return 1;
}
const std::string& function = getString(L, 3);
pushBoolean(L, callback->loadCallBack(getScriptEnv()->getScriptInterface(), function));
return 1;
}
int LuaScriptInterface::luaCombatSetOrigin(lua_State* L)
{
// combat:setOrigin(origin)
Combat* combat = getUserdata<Combat>(L, 1);
if (combat) {
combat->setOrigin(getNumber<CombatOrigin>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatExecute(lua_State* L)
{
// combat:execute(creature, variant)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
pushBoolean(L, false);
return 1;
}
Creature* creature = getCreature(L, 2);
const LuaVariant& variant = getVariant(L, 3);
switch (variant.type) {
case VARIANT_NUMBER: {
Creature* target = g_game.getCreatureByID(variant.number);
if (!target) {
pushBoolean(L, false);
return 1;
}
if (combat->hasArea()) {
combat->doCombat(creature, target->getPosition());
} else {
combat->doCombat(creature, target);
}
break;
}
case VARIANT_POSITION: {
combat->doCombat(creature, variant.pos);
break;
}
case VARIANT_TARGETPOSITION: {
if (combat->hasArea()) {
combat->doCombat(creature, variant.pos);
} else {
combat->postCombatEffects(creature, variant.pos);
g_game.addMagicEffect(variant.pos, CONST_ME_POFF);
}
break;
}
case VARIANT_STRING: {
Player* target = g_game.getPlayerByName(variant.text);
if (!target) {
pushBoolean(L, false);
return 1;
}
combat->doCombat(creature, target);
break;
}
case VARIANT_NONE: {
reportErrorFunc(getErrorDesc(LUA_ERROR_VARIANT_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
default: {
break;
}
}
pushBoolean(L, true);
return 1;
}
// Condition
int LuaScriptInterface::luaConditionCreate(lua_State* L)
{
// Condition(conditionType[, conditionId = CONDITIONID_COMBAT])
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
Condition* condition = Condition::createCondition(conditionId, conditionType, 0, 0);
if (condition) {
pushUserdata<Condition>(L, condition);
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionDelete(lua_State* L)
{
// condition:delete()
Condition** conditionPtr = getRawUserdata<Condition>(L, 1);
if (conditionPtr && *conditionPtr) {
delete *conditionPtr;
*conditionPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaConditionGetId(lua_State* L)
{
// condition:getId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetSubId(lua_State* L)
{
// condition:getSubId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getSubId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetType(lua_State* L)
{
// condition:getType()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetIcons(lua_State* L)
{
// condition:getIcons()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getIcons());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetEndTime(lua_State* L)
{
// condition:getEndTime()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getEndTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionClone(lua_State* L)
{
// condition:clone()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
pushUserdata<Condition>(L, condition->clone());
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetTicks(lua_State* L)
{
// condition:getTicks()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetTicks(lua_State* L)
{
// condition:setTicks(ticks)
int32_t ticks = getNumber<int32_t>(L, 2);
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
condition->setTicks(ticks);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetParameter(lua_State* L)
{
// condition:setParameter(key, value)
Condition* condition = getUserdata<Condition>(L, 1);
if (!condition) {
lua_pushnil(L);
return 1;
}
ConditionParam_t key = getNumber<ConditionParam_t>(L, 2);
int32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<int32_t>(L, 3);
}
condition->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaConditionSetFormula(lua_State* L)
{
// condition:setFormula(mina, minb, maxa, maxb)
double maxb = getNumber<double>(L, 5);
double maxa = getNumber<double>(L, 4);
double minb = getNumber<double>(L, 3);
double mina = getNumber<double>(L, 2);
ConditionSpeed* condition = dynamic_cast<ConditionSpeed*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setFormulaVars(mina, minb, maxa, maxb);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetOutfit(lua_State* L)
{
// condition:setOutfit(outfit)
// condition:setOutfit(lookTypeEx, lookType, lookHead, lookBody, lookLegs, lookFeet[, lookAddons[, lookMount]])
Outfit_t outfit;
if (isTable(L, 2)) {
outfit = getOutfit(L, 2);
} else {
outfit.lookMount = getNumber<uint16_t>(L, 9, outfit.lookMount);
outfit.lookAddons = getNumber<uint8_t>(L, 8, outfit.lookAddons);
outfit.lookFeet = getNumber<uint8_t>(L, 7);
outfit.lookLegs = getNumber<uint8_t>(L, 6);
outfit.lookBody = getNumber<uint8_t>(L, 5);
outfit.lookHead = getNumber<uint8_t>(L, 4);
outfit.lookType = getNumber<uint16_t>(L, 3);
outfit.lookTypeEx = getNumber<uint16_t>(L, 2);
}
ConditionOutfit* condition = dynamic_cast<ConditionOutfit*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setOutfit(outfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionAddDamage(lua_State* L)
{
// condition:addDamage(rounds, time, value)
int32_t value = getNumber<int32_t>(L, 4);
int32_t time = getNumber<int32_t>(L, 3);
int32_t rounds = getNumber<int32_t>(L, 2);
ConditionDamage* condition = dynamic_cast<ConditionDamage*>(getUserdata<Condition>(L, 1));
if (condition) {
pushBoolean(L, condition->addDamage(rounds, time, value));
} else {
lua_pushnil(L);
}
return 1;
}
// MonsterType
int LuaScriptInterface::luaMonsterTypeCreate(lua_State* L)
{
// MonsterType(name)
MonsterType* monsterType = g_monsters.getMonsterType(getString(L, 2));
if (monsterType) {
pushUserdata<MonsterType>(L, monsterType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsAttackable(lua_State* L)
{
// monsterType:isAttackable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isAttackable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsConvinceable(lua_State* L)
{
// monsterType:isConvinceable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isConvinceable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsSummonable(lua_State* L)
{
// monsterType:isSummonable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isSummonable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsIllusionable(lua_State* L)
{
// monsterType:isIllusionable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isIllusionable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHostile(lua_State* L)
{
// monsterType:isHostile()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isHostile);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsPushable(lua_State* L)
{
// monsterType:isPushable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.pushable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHealthShown(lua_State* L)
{
// monsterType:isHealthShown()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, !monsterType->info.hiddenHealth);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushItems(lua_State* L)
{
// monsterType:canPushItems()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.canPushItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushCreatures(lua_State* L)
{
// monsterType:canPushCreatures()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.canPushCreatures);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetName(lua_State* L)
{
// monsterType:getName()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushString(L, monsterType->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetNameDescription(lua_State* L)
{
// monsterType:getNameDescription()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushString(L, monsterType->nameDescription);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetHealth(lua_State* L)
{
// monsterType:getHealth()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.health);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetMaxHealth(lua_State* L)
{
// monsterType:getMaxHealth()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.healthMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetRunHealth(lua_State* L)
{
// monsterType:getRunHealth()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.runAwayHealth);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetExperience(lua_State* L)
{
// monsterType:getExperience()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.experience);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCombatImmunities(lua_State* L)
{
// monsterType:getCombatImmunities()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.damageImmunities);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetConditionImmunities(lua_State* L)
{
// monsterType:getConditionImmunities()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.conditionImmunities);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetAttackList(lua_State* L)
{
// monsterType:getAttackList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.attackSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.attackSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetDefenseList(lua_State* L)
{
// monsterType:getDefenseList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.defenseSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.defenseSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetElementList(lua_State* L)
{
// monsterType:getElementList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.elementMap.size(), 0);
for (const auto& elementEntry : monsterType->info.elementMap) {
lua_pushnumber(L, elementEntry.second);
lua_rawseti(L, -2, elementEntry.first);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetVoices(lua_State* L)
{
// monsterType:getVoices()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.voiceVector.size(), 0);
for (const auto& voiceBlock : monsterType->info.voiceVector) {
lua_createtable(L, 0, 2);
setField(L, "text", voiceBlock.text);
setField(L, "yellText", voiceBlock.yellText);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetLoot(lua_State* L)
{
// monsterType:getLoot()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
static const std::function<void(const std::vector<LootBlock>&)> parseLoot = [&](const std::vector<LootBlock>& lootList) {
lua_createtable(L, lootList.size(), 0);
int index = 0;
for (const auto& lootBlock : lootList) {
lua_createtable(L, 0, 7);
setField(L, "itemId", lootBlock.id);
setField(L, "chance", lootBlock.chance);
setField(L, "subType", lootBlock.subType);
setField(L, "maxCount", lootBlock.countmax);
setField(L, "actionId", lootBlock.actionId);
setField(L, "text", lootBlock.text);
parseLoot(lootBlock.childLoot);
lua_setfield(L, -2, "childLoot");
lua_rawseti(L, -2, ++index);
}
};
parseLoot(monsterType->info.lootItems);
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCreatureEvents(lua_State* L)
{
// monsterType:getCreatureEvents()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.scripts.size(), 0);
for (const std::string& creatureEvent : monsterType->info.scripts) {
pushString(L, creatureEvent);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetSummonList(lua_State* L)
{
// monsterType:getSummonList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.summons.size(), 0);
for (const auto& summonBlock : monsterType->info.summons) {
lua_createtable(L, 0, 3);
setField(L, "name", summonBlock.name);
setField(L, "speed", summonBlock.speed);
setField(L, "chance", summonBlock.chance);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetMaxSummons(lua_State* L)
{
// monsterType:getMaxSummons()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.maxSummons);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetArmor(lua_State* L)
{
// monsterType:getArmor()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.armor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetDefense(lua_State* L)
{
// monsterType:getDefense()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.defense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetOutfit(lua_State* L)
{
// monsterType:getOutfit()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushOutfit(L, monsterType->info.outfit);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetRace(lua_State* L)
{
// monsterType:getRace()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.race);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCorpseId(lua_State* L)
{
// monsterType:getCorpseId()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.lookcorpse);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetManaCost(lua_State* L)
{
// monsterType:getManaCost()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.manaCost);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetBaseSpeed(lua_State* L)
{
// monsterType:getBaseSpeed()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.baseSpeed);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetLight(lua_State* L)
{
// monsterType:getLight()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, monsterType->info.light.level);
lua_pushnumber(L, monsterType->info.light.color);
return 2;
}
int LuaScriptInterface::luaMonsterTypeGetStaticAttackChance(lua_State* L)
{
// monsterType:getStaticAttackChance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.staticAttackChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetTargetDistance(lua_State* L)
{
// monsterType:getTargetDistance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.targetDistance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetYellChance(lua_State* L)
{
// monsterType:getYellChance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.yellChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetYellSpeedTicks(lua_State* L)
{
// monsterType:getYellSpeedTicks()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.yellSpeedTicks);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetChangeTargetChance(lua_State* L)
{
// monsterType:getChangeTargetChance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.changeTargetChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetChangeTargetSpeed(lua_State* L)
{
// monsterType:getChangeTargetSpeed()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.changeTargetSpeed);
} else {
lua_pushnil(L);
}
return 1;
}
// Party
int LuaScriptInterface::luaPartyDisband(lua_State* L)
{
// party:disband()
Party** partyPtr = getRawUserdata<Party>(L, 1);
if (partyPtr && *partyPtr) {
Party*& party = *partyPtr;
party->disband();
party = nullptr;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetLeader(lua_State* L)
{
// party:getLeader()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
Player* leader = party->getLeader();
if (leader) {
pushUserdata<Player>(L, leader);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetLeader(lua_State* L)
{
// party:setLeader(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->passPartyLeadership(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMembers(lua_State* L)
{
// party:getMembers()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, party->getMemberCount(), 0);
for (Player* player : party->getMembers()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMemberCount(lua_State* L)
{
// party:getMemberCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getMemberCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInvitees(lua_State* L)
{
// party:getInvitees()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_createtable(L, party->getInvitationCount(), 0);
int index = 0;
for (Player* player : party->getInvitees()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInviteeCount(lua_State* L)
{
// party:getInviteeCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getInvitationCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddInvite(lua_State* L)
{
// party:addInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->invitePlayer(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveInvite(lua_State* L)
{
// party:removeInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->removeInvite(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddMember(lua_State* L)
{
// party:addMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->joinParty(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveMember(lua_State* L)
{
// party:removeMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->leaveParty(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceActive(lua_State* L)
{
// party:isSharedExperienceActive()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceActive());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceEnabled(lua_State* L)
{
// party:isSharedExperienceEnabled()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceEnabled());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyShareExperience(lua_State* L)
{
// party:shareExperience(experience)
uint64_t experience = getNumber<uint64_t>(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
party->shareExperience(experience);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetSharedExperience(lua_State* L)
{
// party:setSharedExperience(active)
bool active = getBoolean(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->setSharedExperience(party->getLeader(), active));
} else {
lua_pushnil(L);
}
return 1;
}
// Spells
int LuaScriptInterface::luaSpellCreate(lua_State* L)
{
// Spell(words, name or id)
InstantSpell* spell = nullptr;
if (isNumber(L, 2)) {
spell = g_spells->getInstantSpellById(getNumber<uint32_t>(L, 2));
} else {
std::string stringArgument = getString(L, 2);
spell = g_spells->getInstantSpellByName(stringArgument);
if (!spell) {
spell = g_spells->getInstantSpell(stringArgument);
}
}
if (spell) {
pushInstantSpell(L, *spell);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGetManaCost(lua_State* L)
{
// spell:getManaCost(player)
InstantSpell* spell = getUserdata<InstantSpell>(L, 1);
Player* player = getUserdata<Player>(L, 2);
if (spell && player) {
lua_pushnumber(L, spell->getManaCost(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGetSoulCost(lua_State* L)
{
// spell:getSoulCost()
if (InstantSpell* spell = getUserdata<InstantSpell>(L, 1)) {
lua_pushnumber(L, spell->getSoulCost());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellIsPremium(lua_State* L)
{
// spell:isPremium()
if (InstantSpell* spell = getUserdata<InstantSpell>(L, 1)) {
pushBoolean(L, spell->isPremium());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellIsLearnable(lua_State* L)
{
// spell:isLearnable()
if (InstantSpell* spell = getUserdata<InstantSpell>(L, 1)) {
pushBoolean(L, spell->isLearnable());
} else {
lua_pushnil(L);
}
return 1;
}
//
LuaEnvironment::LuaEnvironment() : LuaScriptInterface("Main Interface") {}
LuaEnvironment::~LuaEnvironment()
{
delete testInterface;
closeState();
}
bool LuaEnvironment::initState()
{
luaState = luaL_newstate();
if (!luaState) {
return false;
}
luaL_openlibs(luaState);
registerFunctions();
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaEnvironment::reInitState()
{
// TODO: get children, reload children
closeState();
return initState();
}
bool LuaEnvironment::closeState()
{
if (!luaState) {
return false;
}
for (const auto& combatEntry : combatIdMap) {
clearCombatObjects(combatEntry.first);
}
for (const auto& areaEntry : areaIdMap) {
clearAreaObjects(areaEntry.first);
}
for (auto& timerEntry : timerEvents) {
LuaTimerEventDesc timerEventDesc = std::move(timerEntry.second);
for (int32_t parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
}
combatIdMap.clear();
areaIdMap.clear();
timerEvents.clear();
cacheFiles.clear();
lua_close(luaState);
luaState = nullptr;
return true;
}
LuaScriptInterface* LuaEnvironment::getTestInterface()
{
if (!testInterface) {
testInterface = new LuaScriptInterface("Test Interface");
testInterface->initState();
}
return testInterface;
}
Combat* LuaEnvironment::getCombatObject(uint32_t id) const
{
auto it = combatMap.find(id);
if (it == combatMap.end()) {
return nullptr;
}
return it->second;
}
Combat* LuaEnvironment::createCombatObject(LuaScriptInterface* interface)
{
Combat* combat = new Combat;
combatMap[++lastCombatId] = combat;
combatIdMap[interface].push_back(lastCombatId);
return combat;
}
void LuaEnvironment::clearCombatObjects(LuaScriptInterface* interface)
{
auto it = combatIdMap.find(interface);
if (it == combatIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = combatMap.find(id);
if (itt != combatMap.end()) {
delete itt->second;
combatMap.erase(itt);
}
}
it->second.clear();
}
AreaCombat* LuaEnvironment::getAreaObject(uint32_t id) const
{
auto it = areaMap.find(id);
if (it == areaMap.end()) {
return nullptr;
}
return it->second;
}
uint32_t LuaEnvironment::createAreaObject(LuaScriptInterface* interface)
{
areaMap[++lastAreaId] = new AreaCombat;
areaIdMap[interface].push_back(lastAreaId);
return lastAreaId;
}
void LuaEnvironment::clearAreaObjects(LuaScriptInterface* interface)
{
auto it = areaIdMap.find(interface);
if (it == areaIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = areaMap.find(id);
if (itt != areaMap.end()) {
delete itt->second;
areaMap.erase(itt);
}
}
it->second.clear();
}
void LuaEnvironment::executeTimerEvent(uint32_t eventIndex)
{
auto it = timerEvents.find(eventIndex);
if (it == timerEvents.end()) {
return;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
//push function
lua_rawgeti(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
//push parameters
for (auto parameter : boost::adaptors::reverse(timerEventDesc.parameters)) {
lua_rawgeti(luaState, LUA_REGISTRYINDEX, parameter);
}
//call the function
if (reserveScriptEnv()) {
ScriptEnvironment* env = getScriptEnv();
env->setTimerEvent();
env->setScriptId(timerEventDesc.scriptId, this);
callFunction(timerEventDesc.parameters.size());
} else {
std::cout << "[Error - LuaScriptInterface::executeTimerEvent] Call stack overflow" << std::endl;
}
//free resources
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
}
| 1 | 15,180 | We don't usually put spaces between ( and [, and I think `path` should be passed by reference. | otland-forgottenserver | cpp |
@@ -419,12 +419,11 @@ int Zone::SaveTempItem(uint32 merchantid, uint32 npcid, uint32 item, int32 charg
if (!ml.origslot) {
ml.origslot = ml.slot;
}
-
- if (charges > 0) {
+ bool IsStackable = database.GetItem(item)->Stackable;
+ if ((IsStackable && charges > 0) || (!IsStackable && sold)) {
database.SaveMerchantTemp(npcid, ml.origslot, item, ml.charges);
tmp_merlist.push_back(ml);
- }
- else {
+ } else {
database.DeleteMerchantTemp(npcid, ml.origslot);
}
} | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2003 EQEMu Development Team (http://eqemulator.net)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <float.h>
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _WINDOWS
#define snprintf _snprintf
#define vsnprintf _vsnprintf
#else
#include <pthread.h>
#include "../common/unix.h"
#endif
#include "../common/global_define.h"
#include "../common/features.h"
#include "../common/rulesys.h"
#include "../common/seperator.h"
#include "../common/string_util.h"
#include "../common/eqemu_logsys.h"
#include "expedition.h"
#include "guild_mgr.h"
#include "map.h"
#include "npc.h"
#include "object.h"
#include "pathfinder_null.h"
#include "pathfinder_nav_mesh.h"
#include "pathfinder_waypoint.h"
#include "petitions.h"
#include "quest_parser_collection.h"
#include "spawn2.h"
#include "spawngroup.h"
#include "water_map.h"
#include "worldserver.h"
#include "zone.h"
#include "zone_config.h"
#include "mob_movement_manager.h"
#include "npc_scale_manager.h"
#include "../common/data_verification.h"
#include "zone_reload.h"
#include "../common/repositories/criteria/content_filter_criteria.h"
#include "../common/repositories/content_flags_repository.h"
#include "../common/repositories/zone_points_repository.h"
#include <time.h>
#include <ctime>
#include <iostream>
#ifdef _WINDOWS
#define snprintf _snprintf
#define strncasecmp _strnicmp
#define strcasecmp _stricmp
#endif
extern bool staticzone;
extern PetitionList petition_list;
extern QuestParserCollection* parse;
extern uint32 numclients;
extern WorldServer worldserver;
extern Zone* zone;
extern NpcScaleManager* npc_scale_manager;
Mutex MZoneShutdown;
volatile bool is_zone_loaded = false;
Zone* zone = 0;
void UpdateWindowTitle(char* iNewTitle);
bool Zone::Bootup(uint32 iZoneID, uint32 iInstanceID, bool iStaticZone) {
const char* zonename = ZoneName(iZoneID);
if (iZoneID == 0 || zonename == 0)
return false;
if (zone != 0 || is_zone_loaded) {
std::cerr << "Error: Zone::Bootup call when zone already booted!" << std::endl;
worldserver.SetZoneData(0);
return false;
}
LogInfo("Booting [{}] ([{}]:[{}])", zonename, iZoneID, iInstanceID);
numclients = 0;
zone = new Zone(iZoneID, iInstanceID, zonename);
//init the zone, loads all the data, etc
if (!zone->Init(iStaticZone)) {
safe_delete(zone);
std::cerr << "Zone->Init failed" << std::endl;
worldserver.SetZoneData(0);
return false;
}
std::string tmp;
if (database.GetVariable("loglevel", tmp)) {
int log_levels[4];
int tmp_i = atoi(tmp.c_str());
if (tmp_i>9){ //Server is using the new code
for(int i=0;i<4;i++){
if (((int)tmp[i]>=48) && ((int)tmp[i]<=57))
log_levels[i]=(int)tmp[i]-48; //get the value to convert it to an int from the ascii value
else
log_levels[i]=0; //set to zero on a bogue char
}
zone->loglevelvar = log_levels[0];
LogInfo("General logging level: [{}]", zone->loglevelvar);
zone->merchantvar = log_levels[1];
LogInfo("Merchant logging level: [{}]", zone->merchantvar);
zone->tradevar = log_levels[2];
LogInfo("Trade logging level: [{}]", zone->tradevar);
zone->lootvar = log_levels[3];
LogInfo("Loot logging level: [{}]", zone->lootvar);
}
else {
zone->loglevelvar = uint8(tmp_i); //continue supporting only command logging (for now)
zone->merchantvar = 0;
zone->tradevar = 0;
zone->lootvar = 0;
}
}
is_zone_loaded = true;
worldserver.SetZoneData(iZoneID, iInstanceID);
if(iInstanceID != 0)
{
auto pack = new ServerPacket(ServerOP_AdventureZoneData, sizeof(uint16));
*((uint16*)pack->pBuffer) = iInstanceID;
worldserver.SendPacket(pack);
delete pack;
}
LogInfo("---- Zone server [{}], listening on port:[{}] ----", zonename, ZoneConfig::get()->ZonePort);
LogInfo("Zone Bootup: [{}] ([{}]: [{}])", zonename, iZoneID, iInstanceID);
parse->Init();
UpdateWindowTitle(nullptr);
zone->GetTimeSync();
zone->RequestUCSServerStatus();
/**
* Set Shutdown timer
*/
uint32 shutdown_timer = static_cast<uint32>(content_db.getZoneShutDownDelay(zone->GetZoneID(), zone->GetInstanceVersion()));
zone->StartShutdownTimer(shutdown_timer);
/*
* Set Logging
*/
LogSys.StartFileLogs(StringFormat("%s_version_%u_inst_id_%u_port_%u", zone->GetShortName(), zone->GetInstanceVersion(), zone->GetInstanceID(), ZoneConfig::get()->ZonePort));
return true;
}
//this really loads the objects into entity_list
bool Zone::LoadZoneObjects()
{
std::string query = StringFormat(
"SELECT id, zoneid, xpos, ypos, zpos, heading, itemid, charges, objectname, type, icon, "
"unknown08, unknown10, unknown20, unknown24, unknown76, size, tilt_x, tilt_y, display_name "
"FROM object WHERE zoneid = %i AND (version = %u OR version = -1) %s",
zoneid,
instanceversion,
ContentFilterCriteria::apply().c_str()
);
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
LogError("Error Loading Objects from DB: [{}]",
results.ErrorMessage().c_str());
return false;
}
LogInfo("Loading Objects from DB");
for (auto row = results.begin(); row != results.end(); ++row) {
if (atoi(row[9]) == 0) {
// Type == 0 - Static Object
const char *shortname = ZoneName(atoi(row[1]), false); // zoneid -> zone_shortname
if (!shortname)
continue;
Door d;
memset(&d, 0, sizeof(d));
strn0cpy(d.zone_name, shortname, sizeof(d.zone_name));
d.db_id = 1000000000 + atoi(row[0]); // Out of range of normal use for doors.id
d.door_id = -1; // Client doesn't care if these are all the same door_id
d.pos_x = atof(row[2]); // xpos
d.pos_y = atof(row[3]); // ypos
d.pos_z = atof(row[4]); // zpos
d.heading = atof(row[5]); // heading
strn0cpy(d.door_name, row[8], sizeof(d.door_name)); // objectname
// Strip trailing "_ACTORDEF" if present. Client won't accept it for doors.
int len = strlen(d.door_name);
if ((len > 9) && (memcmp(&d.door_name[len - 9], "_ACTORDEF", 10) == 0))
d.door_name[len - 9] = '\0';
memcpy(d.dest_zone, "NONE", 5);
if ((d.size = atoi(row[11])) == 0) // unknown08 = optional size percentage
d.size = 100;
switch (d.opentype = atoi(row[12])) // unknown10 = optional request_nonsolid (0 or 1 or experimental number)
{
case 0:
d.opentype = 31;
break;
case 1:
d.opentype = 9;
break;
}
d.incline = atoi(row[13]); // unknown20 = optional model incline value
d.client_version_mask = 0xFFFFFFFF; // We should load the mask from the zone.
auto door = new Doors(&d);
entity_list.AddDoor(door);
}
Object_Struct data = {0};
uint32 id = 0;
uint32 icon = 0;
uint32 type = 0;
uint32 itemid = 0;
uint32 idx = 0;
int16 charges = 0;
id = (uint32)atoi(row[0]);
data.zone_id = atoi(row[1]);
data.x = atof(row[2]);
data.y = atof(row[3]);
data.z = atof(row[4]);
data.heading = atof(row[5]);
itemid = (uint32)atoi(row[6]);
charges = (int16)atoi(row[7]);
strcpy(data.object_name, row[8]);
type = (uint8)atoi(row[9]);
icon = (uint32)atoi(row[10]);
data.object_type = type;
data.linked_list_addr[0] = 0;
data.linked_list_addr[1] = 0;
data.solidtype = (uint32)atoi(row[12]);
data.unknown020 = (uint32)atoi(row[13]);
data.unknown024 = (uint32)atoi(row[14]);
data.unknown076 = (uint32)atoi(row[15]);
data.size = atof(row[16]);
data.tilt_x = atof(row[17]);
data.tilt_y = atof(row[18]);
data.unknown084 = 0;
glm::vec3 position;
position.x = data.x;
position.y = data.y;
position.z = data.z;
if (zone->HasMap()) {
data.z = zone->zonemap->FindBestZ(position, nullptr);
}
EQ::ItemInstance *inst = nullptr;
// FatherNitwit: this dosent seem to work...
// tradeskill containers do not have an itemid of 0... at least what I am seeing
if (itemid == 0) {
// Generic tradeskill container
inst = new EQ::ItemInstance(ItemInstWorldContainer);
} else {
// Groundspawn object
inst = database.CreateItem(itemid);
}
// Father Nitwit's fix... not perfect...
if (inst == nullptr && type != OT_DROPPEDITEM) {
inst = new EQ::ItemInstance(ItemInstWorldContainer);
}
// Load child objects if container
if (inst && inst->IsType(EQ::item::ItemClassBag)) {
database.LoadWorldContainer(id, inst);
}
auto object = new Object(id, type, icon, data, inst);
object->SetDisplayName(row[19]);
entity_list.AddObject(object, false);
if (type == OT_DROPPEDITEM && itemid != 0)
entity_list.RemoveObject(object->GetID());
safe_delete(inst);
}
return true;
}
//this also just loads into entity_list, not really into zone
bool Zone::LoadGroundSpawns() {
Ground_Spawns groundspawn;
memset(&groundspawn, 0, sizeof(groundspawn));
int gsindex=0;
LogInfo("Loading Ground Spawns from DB");
content_db.LoadGroundSpawns(zoneid, GetInstanceVersion(), &groundspawn);
uint32 ix=0;
char* name = nullptr;
uint32 gsnumber=0;
for(gsindex=0;gsindex<50;gsindex++){
if(groundspawn.spawn[gsindex].item>0 && groundspawn.spawn[gsindex].item<SAYLINK_ITEM_ID){
EQ::ItemInstance* inst = nullptr;
inst = database.CreateItem(groundspawn.spawn[gsindex].item);
gsnumber=groundspawn.spawn[gsindex].max_allowed;
ix=0;
if(inst){
name = groundspawn.spawn[gsindex].name;
for(ix=0;ix<gsnumber;ix++){
auto object = new Object(
inst, name, groundspawn.spawn[gsindex].max_x,
groundspawn.spawn[gsindex].min_x, groundspawn.spawn[gsindex].max_y,
groundspawn.spawn[gsindex].min_y, groundspawn.spawn[gsindex].max_z,
groundspawn.spawn[gsindex].heading,
groundspawn.spawn[gsindex].respawntimer); // new object with id of 10000+
entity_list.AddObject(object, false);
}
safe_delete(inst);
}
}
}
return(true);
}
void Zone::DumpMerchantList(uint32 npcid) {
std::list<TempMerchantList> tmp_merlist = tmpmerchanttable[npcid];
std::list<TempMerchantList>::const_iterator tmp_itr;
TempMerchantList ml;
for (tmp_itr = tmp_merlist.begin(); tmp_itr != tmp_merlist.end(); ++tmp_itr) {
ml = *tmp_itr;
LogInventory("slot[{}] Orig[{}] Item[{}] Charges[{}]", ml.slot, ml.origslot, ml.item, ml.charges);
}
}
int Zone::SaveTempItem(uint32 merchantid, uint32 npcid, uint32 item, int32 charges, bool sold) {
LogInventory("Transaction of [{}] [{}]", charges, item);
//DumpMerchantList(npcid);
// Iterate past main items.
// If the item being transacted is in this list, return 0;
std::list<MerchantList> merlist = merchanttable[merchantid];
std::list<MerchantList>::const_iterator itr;
uint32 temp_slot_index = 1;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (ml.item == item) {
return 0;
}
// Account for merchant lists with gaps in them.
if (ml.slot >= temp_slot_index) {
temp_slot_index = ml.slot + 1;
}
}
LogInventory("Searching Temporary List. Main list ended at [{}]", temp_slot_index-1);
// Now search the temporary list.
std::list<TempMerchantList> tmp_merlist = tmpmerchanttable[npcid];
std::list<TempMerchantList>::const_iterator tmp_itr;
TempMerchantList ml;
uint32 first_empty_slot = 0; // Save 1st vacant slot while searching..
bool found = false;
for (tmp_itr = tmp_merlist.begin(); tmp_itr != tmp_merlist.end(); ++tmp_itr) {
ml = *tmp_itr;
if (ml.item == item) {
found = true;
LogInventory("Item found in temp list at [{}] with [{}] charges", ml.origslot, ml.charges);
break;
}
}
if (found) {
tmp_merlist.clear();
std::list<TempMerchantList> oldtmp_merlist = tmpmerchanttable[npcid];
for (tmp_itr = oldtmp_merlist.begin(); tmp_itr != oldtmp_merlist.end(); ++tmp_itr) {
TempMerchantList ml2 = *tmp_itr;
if(ml2.item != item)
tmp_merlist.push_back(ml2);
else {
if (sold) {
LogInventory("Total charges is [{}] + [{}] charges", ml.charges, charges);
ml.charges = ml.charges + charges;
}
else {
ml.charges = charges;
LogInventory("new charges is [{}] charges", ml.charges);
}
if (!ml.origslot) {
ml.origslot = ml.slot;
}
if (charges > 0) {
database.SaveMerchantTemp(npcid, ml.origslot, item, ml.charges);
tmp_merlist.push_back(ml);
}
else {
database.DeleteMerchantTemp(npcid, ml.origslot);
}
}
}
tmpmerchanttable[npcid] = tmp_merlist;
//DumpMerchantList(npcid);
return ml.slot;
}
else {
if (charges < 0) { //sanity check only, shouldnt happen
charges = 0x7FFF;
}
// Find an ununsed db slot #
std::list<int> slots;
TempMerchantList ml3;
for (tmp_itr = tmp_merlist.begin(); tmp_itr != tmp_merlist.end(); ++tmp_itr) {
ml3 = *tmp_itr;
slots.push_back(ml3.origslot);
}
slots.sort();
std::list<int>::const_iterator slots_itr;
uint32 first_empty_slot = 0;
uint32 idx = temp_slot_index;
for (slots_itr = slots.begin(); slots_itr != slots.end(); ++slots_itr) {
if (!first_empty_slot && *slots_itr > idx) {
LogInventory("Popped [{}]", *slots_itr);
LogInventory("First Gap Found at [{}]", idx);
break;
}
++idx;
}
first_empty_slot = idx;
// Find an ununsed mslot
slots.clear();
for (tmp_itr = tmp_merlist.begin(); tmp_itr != tmp_merlist.end(); ++tmp_itr) {
ml3 = *tmp_itr;
slots.push_back(ml3.slot);
}
slots.sort();
uint32 first_empty_mslot=0;
idx = temp_slot_index;
for (slots_itr = slots.begin(); slots_itr != slots.end(); ++slots_itr) {
if (!first_empty_mslot && *slots_itr > idx) {
LogInventory("Popped [{}]", *slots_itr);
LogInventory("First Gap Found at [{}]", idx);
break;
}
++idx;
}
first_empty_mslot = idx;
database.SaveMerchantTemp(npcid, first_empty_slot, item, charges);
tmp_merlist = tmpmerchanttable[npcid];
TempMerchantList ml2;
ml2.charges = charges;
LogInventory("Adding slot [{}] with [{}] charges.", first_empty_mslot, charges);
ml2.item = item;
ml2.npcid = npcid;
ml2.slot = first_empty_mslot;
ml2.origslot = first_empty_slot;
tmp_merlist.push_back(ml2);
tmpmerchanttable[npcid] = tmp_merlist;
//DumpMerchantList(npcid);
return ml2.slot;
}
}
uint32 Zone::GetTempMerchantQuantity(uint32 NPCID, uint32 Slot) {
std::list<TempMerchantList> TmpMerchantList = tmpmerchanttable[NPCID];
std::list<TempMerchantList>::const_iterator Iterator;
for (Iterator = TmpMerchantList.begin(); Iterator != TmpMerchantList.end(); ++Iterator)
if ((*Iterator).slot == Slot) {
LogInventory("Slot [{}] has [{}] charges.", Slot, (*Iterator).charges);
return (*Iterator).charges;
}
return 0;
}
void Zone::LoadTempMerchantData()
{
LogInfo("Loading Temporary Merchant Lists");
auto results = content_db.QueryDatabase(
fmt::format(
SQL(
SELECT
DISTINCT npc_types.id
FROM
npc_types
JOIN spawnentry ON spawnentry.npcID = npc_types.id
JOIN spawn2 ON spawn2.spawngroupID = spawnentry.spawngroupID
WHERE
spawn2.zone = '{}'
AND spawn2.version = {}
),
GetShortName(),
GetInstanceVersion()
)
);
std::vector<std::string> npc_ids;
for (auto row = results.begin(); row != results.end(); ++row) {
npc_ids.push_back(row[0]);
}
results = database.QueryDatabase(
fmt::format(
SQL(
SELECT
npcid,
slot,
charges,
itemid
FROM merchantlist_temp
WHERE npcid IN ({})
),
implode(", ", npc_ids)
)
);
std::map<uint32, std::list<TempMerchantList> >::iterator temp_merchant_table_entry;
uint32 npc_id = 0;
for (auto row = results.begin(); row != results.end(); ++row) {
TempMerchantList temp_merchant_list;
temp_merchant_list.npcid = atoul(row[0]);
if (npc_id != temp_merchant_list.npcid) {
temp_merchant_table_entry = tmpmerchanttable.find(temp_merchant_list.npcid);
if (temp_merchant_table_entry == tmpmerchanttable.end()) {
std::list<TempMerchantList> empty;
tmpmerchanttable[temp_merchant_list.npcid] = empty;
temp_merchant_table_entry = tmpmerchanttable.find(temp_merchant_list.npcid);
}
npc_id = temp_merchant_list.npcid;
}
temp_merchant_list.slot = atoul(row[1]);
temp_merchant_list.charges = atoul(row[2]);
temp_merchant_list.item = atoul(row[3]);
temp_merchant_list.origslot = temp_merchant_list.slot;
LogMerchants(
"[LoadTempMerchantData] Loading merchant temp items npc_id [{}] slot [{}] charges [{}] item [{}] origslot [{}]",
npc_id,
temp_merchant_list.slot,
temp_merchant_list.charges,
temp_merchant_list.item,
temp_merchant_list.origslot
);
temp_merchant_table_entry->second.push_back(temp_merchant_list);
}
}
void Zone::LoadNewMerchantData(uint32 merchantid) {
std::list<MerchantList> merlist;
std::string query = fmt::format(
SQL(
SELECT
item,
slot,
faction_required,
level_required,
alt_currency_cost,
classes_required,
probability
FROM
merchantlist
WHERE
merchantid = {}
{}
ORDER BY
slot
),
merchantid,
ContentFilterCriteria::apply()
);
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
MerchantList ml;
ml.id = merchantid;
ml.item = atoul(row[0]);
ml.slot = atoul(row[1]);
ml.faction_required = atoul(row[2]);
ml.level_required = atoul(row[3]);
ml.alt_currency_cost = atoul(row[4]);
ml.classes_required = atoul(row[5]);
ml.probability = atoul(row[6]);
merlist.push_back(ml);
}
merchanttable[merchantid] = merlist;
}
void Zone::GetMerchantDataForZoneLoad() {
LogInfo("Loading Merchant Lists");
std::string query = fmt::format(
SQL (
SELECT
DISTINCT merchantlist.merchantid,
merchantlist.slot,
merchantlist.item,
merchantlist.faction_required,
merchantlist.level_required,
merchantlist.alt_currency_cost,
merchantlist.classes_required,
merchantlist.probability
FROM
merchantlist,
npc_types,
spawnentry,
spawn2
WHERE
npc_types.merchant_id = merchantlist.merchantid
AND npc_types.id = spawnentry.npcid
AND spawnentry.spawngroupid = spawn2.spawngroupid
AND spawn2.zone = '{}'
AND spawn2.version = {}
{}
ORDER BY
merchantlist.slot
),
GetShortName(),
GetInstanceVersion(),
ContentFilterCriteria::apply("merchantlist")
);
auto results = content_db.QueryDatabase(query);
std::map<uint32, std::list<MerchantList> >::iterator merchant_list;
uint32 npc_id = 0;
if (results.RowCount() == 0) {
LogDebug("No Merchant Data found for [{}]", GetShortName());
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
MerchantList merchant_list_entry{};
merchant_list_entry.id = atoul(row[0]);
if (npc_id != merchant_list_entry.id) {
merchant_list = merchanttable.find(merchant_list_entry.id);
if (merchant_list == merchanttable.end()) {
std::list<MerchantList> empty;
merchanttable[merchant_list_entry.id] = empty;
merchant_list = merchanttable.find(merchant_list_entry.id);
}
npc_id = merchant_list_entry.id;
}
auto iter = merchant_list->second.begin();
bool found = false;
while (iter != merchant_list->second.end()) {
if ((*iter).item == merchant_list_entry.id) {
found = true;
break;
}
++iter;
}
if (found) {
continue;
}
merchant_list_entry.slot = atoul(row[1]);
merchant_list_entry.item = atoul(row[2]);
merchant_list_entry.faction_required = atoul(row[3]);
merchant_list_entry.level_required = atoul(row[4]);
merchant_list_entry.alt_currency_cost = atoul(row[5]);
merchant_list_entry.classes_required = atoul(row[6]);
merchant_list_entry.probability = atoul(row[7]);
merchant_list->second.push_back(merchant_list_entry);
}
}
void Zone::LoadMercTemplates(){
std::list<MercStanceInfo> merc_stances;
merc_templates.clear();
std::string query = "SELECT `class_id`, `proficiency_id`, `stance_id`, `isdefault` FROM "
"`merc_stance_entries` ORDER BY `class_id`, `proficiency_id`, `stance_id`";
auto results = database.QueryDatabase(query);
if (!results.Success()) {
LogError("Error in ZoneDatabase::LoadMercTemplates()");
}
else {
for (auto row = results.begin(); row != results.end(); ++row) {
MercStanceInfo tempMercStanceInfo;
tempMercStanceInfo.ClassID = atoi(row[0]);
tempMercStanceInfo.ProficiencyID = atoi(row[1]);
tempMercStanceInfo.StanceID = atoi(row[2]);
tempMercStanceInfo.IsDefault = atoi(row[3]);
merc_stances.push_back(tempMercStanceInfo);
}
}
query = "SELECT DISTINCT MTem.merc_template_id, MTyp.dbstring "
"AS merc_type_id, MTem.dbstring "
"AS merc_subtype_id, MTyp.race_id, MS.class_id, MTyp.proficiency_id, MS.tier_id, 0 "
"AS CostFormula, MTem.clientversion, MTem.merc_npc_type_id "
"FROM merc_types MTyp, merc_templates MTem, merc_subtypes MS "
"WHERE MTem.merc_type_id = MTyp.merc_type_id AND MTem.merc_subtype_id = MS.merc_subtype_id "
"ORDER BY MTyp.race_id, MS.class_id, MTyp.proficiency_id;";
results = database.QueryDatabase(query);
if (!results.Success()) {
LogError("Error in ZoneDatabase::LoadMercTemplates()");
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
MercTemplate tempMercTemplate;
tempMercTemplate.MercTemplateID = atoi(row[0]);
tempMercTemplate.MercType = atoi(row[1]);
tempMercTemplate.MercSubType = atoi(row[2]);
tempMercTemplate.RaceID = atoi(row[3]);
tempMercTemplate.ClassID = atoi(row[4]);
tempMercTemplate.ProficiencyID = atoi(row[5]);
tempMercTemplate.TierID = atoi(row[6]);
tempMercTemplate.CostFormula = atoi(row[7]);
tempMercTemplate.ClientVersion = atoi(row[8]);
tempMercTemplate.MercNPCID = atoi(row[9]);
for(int i = 0; i < MaxMercStanceID; i++)
tempMercTemplate.Stances[i] = 0;
int stanceIndex = 0;
for (auto mercStanceListItr = merc_stances.begin(); mercStanceListItr != merc_stances.end(); ++mercStanceListItr) {
if(mercStanceListItr->ClassID != tempMercTemplate.ClassID || mercStanceListItr->ProficiencyID != tempMercTemplate.ProficiencyID)
continue;
zone->merc_stance_list[tempMercTemplate.MercTemplateID].push_back((*mercStanceListItr));
tempMercTemplate.Stances[stanceIndex] = mercStanceListItr->StanceID;
++stanceIndex;
}
merc_templates[tempMercTemplate.MercTemplateID] = tempMercTemplate;
}
}
void Zone::LoadLevelEXPMods(){
level_exp_mod.clear();
const std::string query = "SELECT level, exp_mod, aa_exp_mod FROM level_exp_mods";
auto results = database.QueryDatabase(query);
if (!results.Success()) {
LogError("Error in ZoneDatabase::LoadEXPLevelMods()");
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
uint32 index = atoi(row[0]);
float exp_mod = atof(row[1]);
float aa_exp_mod = atof(row[2]);
level_exp_mod[index].ExpMod = exp_mod;
level_exp_mod[index].AAExpMod = aa_exp_mod;
}
}
void Zone::LoadMercSpells(){
merc_spells_list.clear();
const std::string query = "SELECT msl.class_id, msl.proficiency_id, msle.spell_id, msle.spell_type, "
"msle.stance_id, msle.minlevel, msle.maxlevel, msle.slot, msle.procChance "
"FROM merc_spell_lists msl, merc_spell_list_entries msle "
"WHERE msle.merc_spell_list_id = msl.merc_spell_list_id "
"ORDER BY msl.class_id, msl.proficiency_id, msle.spell_type, msle.minlevel, msle.slot;";
auto results = database.QueryDatabase(query);
if (!results.Success()) {
LogError("Error in Zone::LoadMercSpells()");
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
uint32 classid;
MercSpellEntry tempMercSpellEntry;
classid = atoi(row[0]);
tempMercSpellEntry.proficiencyid = atoi(row[1]);
tempMercSpellEntry.spellid = atoi(row[2]);
tempMercSpellEntry.type = atoi(row[3]);
tempMercSpellEntry.stance = atoi(row[4]);
tempMercSpellEntry.minlevel = atoi(row[5]);
tempMercSpellEntry.maxlevel = atoi(row[6]);
tempMercSpellEntry.slot = atoi(row[7]);
tempMercSpellEntry.proc_chance = atoi(row[8]);
merc_spells_list[classid].push_back(tempMercSpellEntry);
}
Log(Logs::General, Logs::Mercenaries, "Loaded %i merc spells.", merc_spells_list[1].size() + merc_spells_list[2].size() + merc_spells_list[9].size() + merc_spells_list[12].size());
}
bool Zone::IsLoaded() {
return is_zone_loaded;
}
void Zone::Shutdown(bool quiet)
{
if (!is_zone_loaded) {
return;
}
entity_list.StopMobAI();
std::map<uint32, NPCType *>::iterator itr;
while (!zone->npctable.empty()) {
itr = zone->npctable.begin();
delete itr->second;
itr->second = nullptr;
zone->npctable.erase(itr);
}
while (!zone->merctable.empty()) {
itr = zone->merctable.begin();
delete itr->second;
itr->second = nullptr;
zone->merctable.erase(itr);
}
zone->adventure_entry_list_flavor.clear();
std::map<uint32, LDoNTrapTemplate *>::iterator itr4;
while (!zone->ldon_trap_list.empty()) {
itr4 = zone->ldon_trap_list.begin();
delete itr4->second;
itr4->second = nullptr;
zone->ldon_trap_list.erase(itr4);
}
zone->ldon_trap_entry_list.clear();
LogInfo("Zone Shutdown: [{}] ([{}])", zone->GetShortName(), zone->GetZoneID());
petition_list.ClearPetitions();
zone->SetZoneHasCurrentTime(false);
if (!quiet) {
LogInfo("Zone Shutdown: Going to sleep");
}
is_zone_loaded = false;
zone->ResetAuth();
safe_delete(zone);
entity_list.ClearAreas();
parse->ReloadQuests(true);
UpdateWindowTitle(nullptr);
LogSys.CloseFileLogs();
if (RuleB(Zone, KillProcessOnDynamicShutdown)) {
LogInfo("[KillProcessOnDynamicShutdown] Shutting down");
EQ::EventLoop::Get().Shutdown();
}
}
void Zone::LoadZoneDoors(const char* zone, int16 version)
{
LogInfo("Loading doors for [{}] ", zone);
uint32 maxid;
int32 count = content_db.GetDoorsCount(&maxid, zone, version);
if(count < 1) {
LogInfo("No doors loaded");
return;
}
auto dlist = new Door[count];
if(!content_db.LoadDoors(count, dlist, zone, version)) {
LogError("Failed to load doors");
delete[] dlist;
return;
}
int r;
Door *d = dlist;
for(r = 0; r < count; r++, d++) {
auto newdoor = new Doors(d);
entity_list.AddDoor(newdoor);
Log(Logs::Detail, Logs::Doors, "Door Add to Entity List, index: %u db id: %u, door_id %u", r, dlist[r].db_id, dlist[r].door_id);
}
delete[] dlist;
}
Zone::Zone(uint32 in_zoneid, uint32 in_instanceid, const char* in_short_name)
: initgrids_timer(10000),
autoshutdown_timer((RuleI(Zone, AutoShutdownDelay))),
clientauth_timer(AUTHENTICATION_TIMEOUT * 1000),
spawn2_timer(1000),
hot_reload_timer(1000),
qglobal_purge_timer(30000),
m_SafePoint(0.0f,0.0f,0.0f,0.0f),
m_Graveyard(0.0f,0.0f,0.0f,0.0f)
{
zoneid = in_zoneid;
instanceid = in_instanceid;
instanceversion = database.GetInstanceVersion(instanceid);
pers_instance = false;
zonemap = nullptr;
watermap = nullptr;
pathing = nullptr;
qGlobals = nullptr;
default_ruleset = 0;
is_zone_time_localized = false;
process_mobs_while_empty = false;
loglevelvar = 0;
merchantvar = 0;
tradevar = 0;
lootvar = 0;
if(RuleB(TaskSystem, EnableTaskSystem)) {
task_manager->LoadProximities(zoneid);
}
short_name = strcpy(new char[strlen(in_short_name)+1], in_short_name);
strlwr(short_name);
memset(file_name, 0, sizeof(file_name));
long_name = 0;
aggroedmobs =0;
pgraveyard_id = 0;
pgraveyard_zoneid = 0;
pMaxClients = 0;
pvpzone = false;
if(database.GetServerType() == 1)
pvpzone = true;
content_db.GetZoneLongName(short_name, &long_name, file_name, &m_SafePoint.x, &m_SafePoint.y, &m_SafePoint.z, &pgraveyard_id, &pMaxClients);
if(graveyard_id() > 0)
{
LogDebug("Graveyard ID is [{}]", graveyard_id());
bool GraveYardLoaded = content_db.GetZoneGraveyard(graveyard_id(), &pgraveyard_zoneid, &m_Graveyard.x, &m_Graveyard.y, &m_Graveyard.z, &m_Graveyard.w);
if (GraveYardLoaded) {
LogDebug("Loaded a graveyard for zone [{}]: graveyard zoneid is [{}] at [{}]", short_name, graveyard_zoneid(), to_string(m_Graveyard).c_str());
}
else {
LogError("Unable to load the graveyard id [{}] for zone [{}]", graveyard_id(), short_name);
}
}
if (long_name == 0) {
long_name = strcpy(new char[18], "Long zone missing");
}
autoshutdown_timer.Start(AUTHENTICATION_TIMEOUT * 1000, false);
Weather_Timer = new Timer(60000);
Weather_Timer->Start();
LogDebug("The next weather check for zone: [{}] will be in [{}] seconds", short_name, Weather_Timer->GetRemainingTime()/1000);
zone_weather = 0;
weather_intensity = 0;
blocked_spells = nullptr;
zone_total_blocked_spells = 0;
zone_has_current_time = false;
Instance_Shutdown_Timer = nullptr;
bool is_perma = false;
if(instanceid > 0)
{
uint32 rem = database.GetTimeRemainingInstance(instanceid, is_perma);
if(!is_perma)
{
if(rem < 150) //give some leeway to people who are zoning in 2.5 minutes to finish zoning in and get ported out
rem = 150;
Instance_Timer = new Timer(rem * 1000);
}
else
{
pers_instance = true;
Instance_Timer = nullptr;
}
}
else
{
Instance_Timer = nullptr;
}
adv_data = nullptr;
map_name = nullptr;
Instance_Warning_timer = nullptr;
did_adventure_actions = false;
database.QGlobalPurge();
if(zoneid == RuleI(World, GuildBankZoneID))
GuildBanks = new GuildBankManager;
else
GuildBanks = nullptr;
m_ucss_available = false;
m_last_ucss_update = 0;
mMovementManager = &MobMovementManager::Get();
SetNpcPositionUpdateDistance(0);
SetQuestHotReloadQueued(false);
}
Zone::~Zone() {
spawn2_list.Clear();
safe_delete(zonemap);
safe_delete(watermap);
safe_delete(pathing);
if (worldserver.Connected()) {
worldserver.SetZoneData(0);
}
safe_delete_array(short_name);
safe_delete_array(long_name);
safe_delete(Weather_Timer);
NPCEmoteList.Clear();
zone_point_list.Clear();
entity_list.Clear();
ClearBlockedSpells();
safe_delete(Instance_Timer);
safe_delete(Instance_Shutdown_Timer);
safe_delete(Instance_Warning_timer);
safe_delete(qGlobals);
safe_delete_array(adv_data);
safe_delete_array(map_name);
safe_delete(GuildBanks);
}
//Modified for timezones.
bool Zone::Init(bool iStaticZone) {
SetStaticZone(iStaticZone);
//load the zone config file.
if (!LoadZoneCFG(zone->GetShortName(), zone->GetInstanceVersion())) { // try loading the zone name...
LoadZoneCFG(
zone->GetFileName(),
zone->GetInstanceVersion()
);
} // if that fails, try the file name, then load defaults
if (RuleManager::Instance()->GetActiveRulesetID() != default_ruleset) {
std::string r_name = RuleManager::Instance()->GetRulesetName(&database, default_ruleset);
if (r_name.size() > 0) {
RuleManager::Instance()->LoadRules(&database, r_name.c_str(), false);
}
}
zone->zonemap = Map::LoadMapFile(zone->map_name);
zone->watermap = WaterMap::LoadWaterMapfile(zone->map_name);
zone->pathing = IPathfinder::Load(zone->map_name);
LogInfo("Loading spawn conditions");
if(!spawn_conditions.LoadSpawnConditions(short_name, instanceid)) {
LogError("Loading spawn conditions failed, continuing without them");
}
LogInfo("Loading static zone points");
if (!content_db.LoadStaticZonePoints(&zone_point_list, short_name, GetInstanceVersion())) {
LogError("Loading static zone points failed");
return false;
}
LogInfo("Loading spawn groups");
if (!content_db.LoadSpawnGroups(short_name, GetInstanceVersion(), &spawn_group_list)) {
LogError("Loading spawn groups failed");
return false;
}
LogInfo("Loading spawn2 points");
if (!content_db.PopulateZoneSpawnList(zoneid, spawn2_list, GetInstanceVersion()))
{
LogError("Loading spawn2 points failed");
return false;
}
LogInfo("Loading player corpses");
if (!database.LoadCharacterCorpses(zoneid, instanceid)) {
LogError("Loading player corpses failed");
return false;
}
LogInfo("Loading traps");
if (!content_db.LoadTraps(short_name, GetInstanceVersion()))
{
LogError("Loading traps failed");
return false;
}
LogInfo("Loading adventure flavor text");
LoadAdventureFlavor();
LogInfo("Loading ground spawns");
if (!LoadGroundSpawns())
{
LogError("Loading ground spawns failed. continuing");
}
LogInfo("Loading World Objects from DB");
if (!LoadZoneObjects())
{
LogError("Loading World Objects failed. continuing");
}
LogInfo("Flushing old respawn timers");
database.QueryDatabase("DELETE FROM `respawn_times` WHERE (`start` + `duration`) < UNIX_TIMESTAMP(NOW())");
zone->LoadZoneDoors(zone->GetShortName(), zone->GetInstanceVersion());
zone->LoadZoneBlockedSpells(zone->GetZoneID());
//clear trader items if we are loading the bazaar
if (strncasecmp(short_name, "bazaar", 6) == 0) {
database.DeleteTraderItem(0);
database.DeleteBuyLines(0);
}
zone->LoadLDoNTraps();
zone->LoadLDoNTrapEntries();
zone->LoadVeteranRewards();
zone->LoadAlternateCurrencies();
zone->LoadNPCEmotes(&NPCEmoteList);
LoadAlternateAdvancement();
content_db.LoadGlobalLoot();
//Load merchant data
zone->GetMerchantDataForZoneLoad();
//Load temporary merchant data
zone->LoadTempMerchantData();
// Merc data
if (RuleB(Mercs, AllowMercs)) {
zone->LoadMercTemplates();
zone->LoadMercSpells();
}
if (RuleB(Zone, LevelBasedEXPMods))
zone->LoadLevelEXPMods();
petition_list.ClearPetitions();
petition_list.ReadDatabase();
LogInfo("Loading active Expeditions");
Expedition::CacheAllFromDatabase();
LogInfo("Loading timezone data");
zone->zone_time.setEQTimeZone(content_db.GetZoneTZ(zoneid, GetInstanceVersion()));
LogInfo("Init Finished: ZoneID = [{}], Time Offset = [{}]", zoneid, zone->zone_time.getEQTimeZone());
LoadGrids();
LoadTickItems();
//MODDING HOOK FOR ZONE INIT
mod_init();
return true;
}
void Zone::ReloadStaticData() {
LogInfo("Reloading Zone Static Data");
LogInfo("Reloading static zone points");
zone_point_list.Clear();
if (!content_db.LoadStaticZonePoints(&zone_point_list, GetShortName(), GetInstanceVersion())) {
LogError("Loading static zone points failed");
}
LogInfo("Reloading traps");
entity_list.RemoveAllTraps();
if (!content_db.LoadTraps(GetShortName(), GetInstanceVersion()))
{
LogError("Reloading traps failed");
}
LogInfo("Reloading ground spawns");
if (!LoadGroundSpawns())
{
LogError("Reloading ground spawns failed. continuing");
}
entity_list.RemoveAllObjects();
LogInfo("Reloading World Objects from DB");
if (!LoadZoneObjects())
{
LogError("Reloading World Objects failed. continuing");
}
entity_list.RemoveAllDoors();
zone->LoadZoneDoors(zone->GetShortName(), zone->GetInstanceVersion());
entity_list.RespawnAllDoors();
zone->LoadVeteranRewards();
zone->LoadAlternateCurrencies();
NPCEmoteList.Clear();
zone->LoadNPCEmotes(&NPCEmoteList);
//load the zone config file.
if (!LoadZoneCFG(zone->GetShortName(), zone->GetInstanceVersion())) { // try loading the zone name...
LoadZoneCFG(
zone->GetFileName(),
zone->GetInstanceVersion()
);
} // if that fails, try the file name, then load defaults
content_service.SetExpansionContext();
ZoneStore::LoadContentFlags();
LogInfo("Zone Static Data Reloaded");
}
bool Zone::LoadZoneCFG(const char* filename, uint16 instance_id)
{
memset(&newzone_data, 0, sizeof(NewZone_Struct));
map_name = nullptr;
if (!content_db.GetZoneCFG(
ZoneID(filename),
instance_id,
&newzone_data,
can_bind,
can_combat,
can_levitate,
can_castoutdoor,
is_city,
is_hotzone,
allow_mercs,
max_movement_update_range,
zone_type,
default_ruleset,
&map_name
)) {
// If loading a non-zero instance failed, try loading the default
if (instance_id != 0) {
safe_delete_array(map_name);
if (!content_db.GetZoneCFG(
ZoneID(filename),
0,
&newzone_data,
can_bind,
can_combat,
can_levitate,
can_castoutdoor,
is_city,
is_hotzone,
allow_mercs,
max_movement_update_range,
zone_type,
default_ruleset,
&map_name
)) {
LogError("Error loading the Zone Config");
return false;
}
}
}
//overwrite with our internal variables
strcpy(newzone_data.zone_short_name, GetShortName());
strcpy(newzone_data.zone_long_name, GetLongName());
strcpy(newzone_data.zone_short_name2, GetShortName());
LogInfo(
"Successfully loaded Zone Config for Zone [{}] ({}) Version [{}] Instance ID [{}]",
GetShortName(),
GetLongName(),
GetInstanceVersion(),
instance_id
);
return true;
}
bool Zone::SaveZoneCFG()
{
return content_db.SaveZoneCFG(GetZoneID(), GetInstanceVersion(), &newzone_data);
}
void Zone::AddAuth(ServerZoneIncomingClient_Struct* szic) {
auto zca = new ZoneClientAuth_Struct;
memset(zca, 0, sizeof(ZoneClientAuth_Struct));
zca->ip = szic->ip;
zca->wid = szic->wid;
zca->accid = szic->accid;
zca->admin = szic->admin;
zca->charid = szic->charid;
zca->lsid = szic->lsid;
zca->tellsoff = szic->tellsoff;
strn0cpy(zca->charname, szic->charname, sizeof(zca->charname));
strn0cpy(zca->lskey, szic->lskey, sizeof(zca->lskey));
zca->stale = false;
client_auth_list.Insert(zca);
}
void Zone::RemoveAuth(const char* iCharName, const char* iLSKey)
{
LinkedListIterator<ZoneClientAuth_Struct*> iterator(client_auth_list);
iterator.Reset();
while (iterator.MoreElements()) {
ZoneClientAuth_Struct* zca = iterator.GetData();
if (strcasecmp(zca->charname, iCharName) == 0 && strcasecmp(zca->lskey, iLSKey) == 0) {
iterator.RemoveCurrent();
return;
}
iterator.Advance();
}
}
void Zone::RemoveAuth(uint32 lsid)
{
LinkedListIterator<ZoneClientAuth_Struct*> iterator(client_auth_list);
iterator.Reset();
while (iterator.MoreElements()) {
ZoneClientAuth_Struct* zca = iterator.GetData();
if (zca->lsid == lsid) {
iterator.RemoveCurrent();
continue;
}
iterator.Advance();
}
}
void Zone::ResetAuth()
{
LinkedListIterator<ZoneClientAuth_Struct*> iterator(client_auth_list);
iterator.Reset();
while (iterator.MoreElements()) {
iterator.RemoveCurrent();
}
}
bool Zone::GetAuth(uint32 iIP, const char* iCharName, uint32* oWID, uint32* oAccID, uint32* oCharID, int16* oStatus, char* oLSKey, bool* oTellsOff) {
LinkedListIterator<ZoneClientAuth_Struct*> iterator(client_auth_list);
iterator.Reset();
while (iterator.MoreElements()) {
ZoneClientAuth_Struct* zca = iterator.GetData();
if (strcasecmp(zca->charname, iCharName) == 0) {
if(oWID)
*oWID = zca->wid;
if(oAccID)
*oAccID = zca->accid;
if(oCharID)
*oCharID = zca->charid;
if(oStatus)
*oStatus = zca->admin;
if(oTellsOff)
*oTellsOff = zca->tellsoff;
zca->stale = true;
return true;
}
iterator.Advance();
}
return false;
}
uint32 Zone::CountAuth() {
LinkedListIterator<ZoneClientAuth_Struct*> iterator(client_auth_list);
int x = 0;
iterator.Reset();
while (iterator.MoreElements()) {
x++;
iterator.Advance();
}
return x;
}
bool Zone::Process() {
spawn_conditions.Process();
if (spawn2_timer.Check()) {
LinkedListIterator<Spawn2 *> iterator(spawn2_list);
EQ::InventoryProfile::CleanDirty();
iterator.Reset();
while (iterator.MoreElements()) {
if (iterator.GetData()->Process()) {
iterator.Advance();
}
else {
iterator.RemoveCurrent();
}
}
if (adv_data && !did_adventure_actions) {
DoAdventureActions();
}
if (GetNpcPositionUpdateDistance() == 0) {
CalculateNpcUpdateDistanceSpread();
}
}
if (hot_reload_timer.Check() && IsQuestHotReloadQueued()) {
LogHotReloadDetail("Hot reload timer check...");
bool perform_reload = true;
if (RuleB(HotReload, QuestsRepopWhenPlayersNotInCombat)) {
for (auto &it : entity_list.GetClientList()) {
auto client = it.second;
if (client->GetAggroCount() > 0) {
perform_reload = false;
break;
}
}
}
if (perform_reload) {
ZoneReload::HotReloadQuests();
}
}
if(initgrids_timer.Check()) {
//delayed grid loading stuff.
initgrids_timer.Disable();
LinkedListIterator<Spawn2*> iterator(spawn2_list);
iterator.Reset();
while (iterator.MoreElements()) {
iterator.GetData()->LoadGrid();
iterator.Advance();
}
}
if(!staticzone) {
if (autoshutdown_timer.Check()) {
StartShutdownTimer();
if (numclients == 0) {
return false;
}
}
}
if(GetInstanceID() > 0)
{
if(Instance_Timer != nullptr && Instance_Shutdown_Timer == nullptr)
{
if(Instance_Timer->Check())
{
// if this is a dynamic zone instance notify system associated with it
auto expedition = Expedition::FindCachedExpeditionByZoneInstance(GetZoneID(), GetInstanceID());
if (expedition)
{
expedition->RemoveAllMembers(false); // entity list will teleport clients out immediately
}
// instance shutting down, move corpses to graveyard or non-instanced zone at same coords
entity_list.MovePlayerCorpsesToGraveyard(true);
entity_list.GateAllClientsToSafeReturn();
database.DeleteInstance(GetInstanceID());
Instance_Shutdown_Timer = new Timer(20000); //20 seconds
}
if(adv_data == nullptr)
{
if(Instance_Warning_timer == nullptr)
{
uint32 rem_time = Instance_Timer->GetRemainingTime();
uint32_t minutes_warning = 0;
if(rem_time < 60000 && rem_time > 55000)
{
minutes_warning = 1;
}
else if(rem_time < 300000 && rem_time > 295000)
{
minutes_warning = 5;
}
else if(rem_time < 900000 && rem_time > 895000)
{
minutes_warning = 15;
}
if (minutes_warning > 0)
{
// expedition expire warnings are handled by world
auto expedition = Expedition::FindCachedExpeditionByZoneInstance(GetZoneID(), GetInstanceID());
if (!expedition)
{
entity_list.ExpeditionWarning(minutes_warning);
Instance_Warning_timer = new Timer(10000);
}
}
}
else if(Instance_Warning_timer->Check())
{
safe_delete(Instance_Warning_timer);
}
}
}
else if(Instance_Shutdown_Timer != nullptr)
{
if(Instance_Shutdown_Timer->Check())
{
StartShutdownTimer();
return false;
}
}
}
if(Weather_Timer->Check())
{
Weather_Timer->Disable();
this->ChangeWeather();
}
if(qGlobals)
{
if(qglobal_purge_timer.Check())
{
qGlobals->PurgeExpiredGlobals();
}
}
if (clientauth_timer.Check()) {
LinkedListIterator<ZoneClientAuth_Struct*> iterator2(client_auth_list);
iterator2.Reset();
while (iterator2.MoreElements()) {
if (iterator2.GetData()->stale)
iterator2.RemoveCurrent();
else {
iterator2.GetData()->stale = true;
iterator2.Advance();
}
}
}
mMovementManager->Process();
return true;
}
void Zone::ChangeWeather()
{
if(!HasWeather())
{
Weather_Timer->Disable();
return;
}
int chance = zone->random.Int(0, 3);
uint8 rainchance = zone->newzone_data.rain_chance[chance];
uint8 rainduration = zone->newzone_data.rain_duration[chance];
uint8 snowchance = zone->newzone_data.snow_chance[chance];
uint8 snowduration = zone->newzone_data.snow_duration[chance];
uint32 weathertimer = 0;
uint16 tmpweather = zone->random.Int(0, 100);
uint8 duration = 0;
uint8 tmpOldWeather = zone->zone_weather;
bool changed = false;
if(tmpOldWeather == 0)
{
if(rainchance > 0 || snowchance > 0)
{
uint8 intensity = zone->random.Int(1, 10);
if((rainchance > snowchance) || (rainchance == snowchance))
{
//It's gunna rain!
if(rainchance >= tmpweather)
{
if(rainduration == 0)
duration = 1;
else
duration = rainduration*3; //Duration is 1 EQ hour which is 3 earth minutes.
weathertimer = (duration*60)*1000;
Weather_Timer->Start(weathertimer);
zone->zone_weather = 1;
zone->weather_intensity = intensity;
changed = true;
}
}
else
{
//It's gunna snow!
if(snowchance >= tmpweather)
{
if(snowduration == 0)
duration = 1;
else
duration = snowduration*3;
weathertimer = (duration*60)*1000;
Weather_Timer->Start(weathertimer);
zone->zone_weather = 2;
zone->weather_intensity = intensity;
changed = true;
}
}
}
}
else
{
changed = true;
//We've had weather, now taking a break
if(tmpOldWeather == 1)
{
if(rainduration == 0)
duration = 1;
else
duration = rainduration*3; //Duration is 1 EQ hour which is 3 earth minutes.
weathertimer = (duration*60)*1000;
Weather_Timer->Start(weathertimer);
zone->weather_intensity = 0;
}
else if(tmpOldWeather == 2)
{
if(snowduration == 0)
duration = 1;
else
duration = snowduration*3; //Duration is 1 EQ hour which is 3 earth minutes.
weathertimer = (duration*60)*1000;
Weather_Timer->Start(weathertimer);
zone->weather_intensity = 0;
}
}
if(changed == false)
{
if(weathertimer == 0)
{
uint32 weatherTimerRule = RuleI(Zone, WeatherTimer);
weathertimer = weatherTimerRule*1000;
Weather_Timer->Start(weathertimer);
}
LogDebug("The next weather check for zone: [{}] will be in [{}] seconds", zone->GetShortName(), Weather_Timer->GetRemainingTime()/1000);
}
else
{
LogDebug("The weather for zone: [{}] has changed. Old weather was = [{}]. New weather is = [{}] The next check will be in [{}] seconds. Rain chance: [{}], Rain duration: [{}], Snow chance [{}], Snow duration: [{}]", zone->GetShortName(), tmpOldWeather, zone_weather,Weather_Timer->GetRemainingTime()/1000,rainchance,rainduration,snowchance,snowduration);
this->weatherSend();
if (zone->weather_intensity == 0)
{
zone->zone_weather = 0;
}
}
}
bool Zone::HasWeather()
{
uint8 rain1 = zone->newzone_data.rain_chance[0];
uint8 rain2 = zone->newzone_data.rain_chance[1];
uint8 rain3 = zone->newzone_data.rain_chance[2];
uint8 rain4 = zone->newzone_data.rain_chance[3];
uint8 snow1 = zone->newzone_data.snow_chance[0];
uint8 snow2 = zone->newzone_data.snow_chance[1];
uint8 snow3 = zone->newzone_data.snow_chance[2];
uint8 snow4 = zone->newzone_data.snow_chance[3];
if(rain1 == 0 && rain2 == 0 && rain3 == 0 && rain4 == 0 && snow1 == 0 && snow2 == 0 && snow3 == 0 && snow4 == 0)
return false;
else
return true;
}
void Zone::StartShutdownTimer(uint32 set_time) {
if (set_time > autoshutdown_timer.GetRemainingTime()) {
if (set_time == (RuleI(Zone, AutoShutdownDelay))) {
set_time = static_cast<uint32>(content_db.getZoneShutDownDelay(GetZoneID(), GetInstanceVersion()));
}
autoshutdown_timer.SetTimer(set_time);
LogDebug("Zone::StartShutdownTimer set to {}", set_time);
}
LogDebug("Zone::StartShutdownTimer trigger - set_time: [{}] remaining_time: [{}] diff: [{}]", set_time, autoshutdown_timer.GetRemainingTime(), (set_time - autoshutdown_timer.GetRemainingTime()));
}
bool Zone::Depop(bool StartSpawnTimer) {
std::map<uint32,NPCType *>::iterator itr;
entity_list.Depop(StartSpawnTimer);
entity_list.ClearTrapPointers();
entity_list.UpdateAllTraps(false);
/* Refresh npctable (cache), getting current info from database. */
while(!npctable.empty()) {
itr = npctable.begin();
delete itr->second;
itr->second = nullptr;
npctable.erase(itr);
}
// clear spell cache
database.ClearNPCSpells();
zone->spawn_group_list.ReloadSpawnGroups();
return true;
}
void Zone::ClearNPCTypeCache(int id) {
if (id <= 0) {
auto iter = npctable.begin();
while (iter != npctable.end()) {
delete iter->second;
iter->second = nullptr;
++iter;
}
npctable.clear();
}
else {
auto iter = npctable.begin();
while (iter != npctable.end()) {
if (iter->first == (uint32)id) {
delete iter->second;
iter->second = nullptr;
npctable.erase(iter);
return;
}
++iter;
}
}
}
void Zone::Repop(uint32 delay)
{
if (!Depop()) {
return;
}
LinkedListIterator<Spawn2 *> iterator(spawn2_list);
iterator.Reset();
while (iterator.MoreElements()) {
iterator.RemoveCurrent();
}
npc_scale_manager->LoadScaleData();
entity_list.ClearTrapPointers();
quest_manager.ClearAllTimers();
LogInfo("Loading spawn groups");
if (!content_db.LoadSpawnGroups(short_name, GetInstanceVersion(), &spawn_group_list)) {
LogError("Loading spawn groups failed");
}
LogInfo("Loading spawn conditions");
if (!spawn_conditions.LoadSpawnConditions(short_name, instanceid)) {
LogError("Loading spawn conditions failed, continuing without them");
}
if (!content_db.PopulateZoneSpawnList(zoneid, spawn2_list, GetInstanceVersion(), delay)) {
LogDebug("Error in Zone::Repop: database.PopulateZoneSpawnList failed");
}
LoadGrids();
initgrids_timer.Start();
entity_list.UpdateAllTraps(true, true);
//MODDING HOOK FOR REPOP
mod_repop();
}
void Zone::GetTimeSync()
{
if (worldserver.Connected() && !zone_has_current_time) {
auto pack = new ServerPacket(ServerOP_GetWorldTime, 1);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void Zone::SetDate(uint16 year, uint8 month, uint8 day, uint8 hour, uint8 minute)
{
if (worldserver.Connected()) {
auto pack = new ServerPacket(ServerOP_SetWorldTime, sizeof(eqTimeOfDay));
eqTimeOfDay* eqtod = (eqTimeOfDay*)pack->pBuffer;
eqtod->start_eqtime.minute=minute;
eqtod->start_eqtime.hour=hour;
eqtod->start_eqtime.day=day;
eqtod->start_eqtime.month=month;
eqtod->start_eqtime.year=year;
eqtod->start_realtime=time(0);
printf("Setting master date on world server to: %d-%d-%d %d:%d (%d)\n", year, month, day, hour, minute, (int)eqtod->start_realtime);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void Zone::SetTime(uint8 hour, uint8 minute, bool update_world /*= true*/)
{
if (worldserver.Connected()) {
auto pack = new ServerPacket(ServerOP_SetWorldTime, sizeof(eqTimeOfDay));
eqTimeOfDay* eq_time_of_day = (eqTimeOfDay*)pack->pBuffer;
zone_time.GetCurrentEQTimeOfDay(time(0), &eq_time_of_day->start_eqtime);
eq_time_of_day->start_eqtime.minute = minute;
eq_time_of_day->start_eqtime.hour = hour;
eq_time_of_day->start_realtime = time(0);
/* By Default we update worlds time, but we can optionally no update world which updates the rest of the zone servers */
if (update_world){
LogInfo("Setting master time on world server to: {}:{} ({})\n", hour, minute, (int)eq_time_of_day->start_realtime);
worldserver.SendPacket(pack);
/* Set Time Localization Flag */
zone->is_zone_time_localized = false;
}
/* When we don't update world, we are localizing ourselves, we become disjointed from normal syncs and set time locally */
else{
LogInfo("Setting zone localized time...");
zone->zone_time.SetCurrentEQTimeOfDay(eq_time_of_day->start_eqtime, eq_time_of_day->start_realtime);
auto outapp = new EQApplicationPacket(OP_TimeOfDay, sizeof(TimeOfDay_Struct));
TimeOfDay_Struct* time_of_day = (TimeOfDay_Struct*)outapp->pBuffer;
zone->zone_time.GetCurrentEQTimeOfDay(time(0), time_of_day);
entity_list.QueueClients(0, outapp, false);
safe_delete(outapp);
/* Set Time Localization Flag */
zone->is_zone_time_localized = true;
}
safe_delete(pack);
}
}
ZonePoint* Zone::GetClosestZonePoint(const glm::vec3& location, uint32 to, Client* client, float max_distance) {
LinkedListIterator<ZonePoint*> iterator(zone_point_list);
ZonePoint* closest_zp = nullptr;
float closest_dist = FLT_MAX;
float max_distance2 = max_distance * max_distance;
iterator.Reset();
while(iterator.MoreElements())
{
ZonePoint* zp = iterator.GetData();
uint32 mask_test = client->ClientVersionBit();
if (!(zp->client_version_mask & mask_test)) {
iterator.Advance();
continue;
}
if (zp->target_zone_id == to)
{
auto dist = Distance(glm::vec2(zp->x, zp->y), glm::vec2(location));
if ((zp->x == 999999 || zp->x == -999999) && (zp->y == 999999 || zp->y == -999999))
dist = 0;
if (dist < closest_dist)
{
closest_zp = zp;
closest_dist = dist;
}
}
iterator.Advance();
}
// if we have a water map and it says we're in a zoneline, lets assume it's just a really big zone line
// this shouldn't open up any exploits since those situations are detected later on
if ((zone->HasWaterMap() && !zone->watermap->InZoneLine(glm::vec3(client->GetPosition()))) || (!zone->HasWaterMap() && closest_dist > 400.0f && closest_dist < max_distance2))
{
//TODO cheat detection
LogInfo("WARNING: Closest zone point for zone id [{}] is [{}], you might need to update your zone_points table if you dont arrive at the right spot", to, closest_dist);
LogInfo("<Real Zone Points>. [{}]", to_string(location).c_str());
}
if(closest_dist > max_distance2)
closest_zp = nullptr;
if(!closest_zp)
closest_zp = GetClosestZonePointWithoutZone(location.x, location.y, location.z, client);
return closest_zp;
}
ZonePoint* Zone::GetClosestZonePoint(const glm::vec3& location, const char* to_name, Client* client, float max_distance) {
if(to_name == nullptr)
return GetClosestZonePointWithoutZone(location.x, location.y, location.z, client, max_distance);
return GetClosestZonePoint(location, ZoneID(to_name), client, max_distance);
}
ZonePoint* Zone::GetClosestZonePointWithoutZone(float x, float y, float z, Client* client, float max_distance) {
LinkedListIterator<ZonePoint*> iterator(zone_point_list);
ZonePoint* closest_zp = nullptr;
float closest_dist = FLT_MAX;
float max_distance2 = max_distance*max_distance;
iterator.Reset();
while(iterator.MoreElements())
{
ZonePoint* zp = iterator.GetData();
uint32 mask_test = client->ClientVersionBit();
if(!(zp->client_version_mask & mask_test))
{
iterator.Advance();
continue;
}
float delta_x = zp->x - x;
float delta_y = zp->y - y;
if(zp->x == 999999 || zp->x == -999999)
delta_x = 0;
if(zp->y == 999999 || zp->y == -999999)
delta_y = 0;
float dist = delta_x*delta_x+delta_y*delta_y;///*+(zp->z-z)*(zp->z-z)*/;
if (dist < closest_dist)
{
closest_zp = zp;
closest_dist = dist;
}
iterator.Advance();
}
if(closest_dist > max_distance2)
closest_zp = nullptr;
return closest_zp;
}
bool ZoneDatabase::LoadStaticZonePoints(LinkedList<ZonePoint *> *zone_point_list, const char *zonename, uint32 version)
{
zone_point_list->Clear();
zone->numzonepoints = 0;
zone->virtual_zone_point_list.clear();
auto zone_points = ZonePointsRepository::GetWhere(content_db,
fmt::format(
"zone = '{}' AND (version = {} OR version = -1) {} ORDER BY number",
zonename,
version,
ContentFilterCriteria::apply()
)
);
for (auto &zone_point : zone_points) {
auto zp = new ZonePoint;
zp->x = zone_point.x;
zp->y = zone_point.y;
zp->z = zone_point.z;
zp->target_x = zone_point.target_x;
zp->target_y = zone_point.target_y;
zp->target_z = zone_point.target_z;
zp->target_zone_id = zone_point.target_zone_id;
zp->heading = zone_point.heading;
zp->target_heading = zone_point.target_heading;
zp->number = zone_point.number;
zp->target_zone_instance = zone_point.target_instance;
zp->client_version_mask = zone_point.client_version_mask;
zp->is_virtual = zone_point.is_virtual > 0;
zp->height = zone_point.height;
zp->width = zone_point.width;
LogZonePoints(
"Loading ZP x [{}] y [{}] z [{}] heading [{}] target x y z zone_id instance_id [{}] [{}] [{}] [{}] [{}] number [{}] is_virtual [{}] height [{}] width [{}]",
zp->x,
zp->y,
zp->z,
zp->heading,
zp->target_x,
zp->target_y,
zp->target_z,
zp->target_zone_id,
zp->target_zone_instance,
zp->number,
zp->is_virtual ? "true" : "false",
zp->height,
zp->width
);
if (zone_point.is_virtual) {
zone->virtual_zone_point_list.emplace_back(zone_point);
safe_delete(zp);
continue;
}
zone_point_list->Insert(zp);
zone->numzonepoints++;
}
return true;
}
void Zone::SpawnStatus(Mob* client) {
LinkedListIterator<Spawn2*> iterator(spawn2_list);
uint32 x = 0;
iterator.Reset();
while(iterator.MoreElements())
{
if (iterator.GetData()->timer.GetRemainingTime() == 0xFFFFFFFF)
client->Message(Chat::White, " %d: %1.1f, %1.1f, %1.1f: disabled", iterator.GetData()->GetID(), iterator.GetData()->GetX(), iterator.GetData()->GetY(), iterator.GetData()->GetZ());
else
client->Message(Chat::White, " %d: %1.1f, %1.1f, %1.1f: %1.2f", iterator.GetData()->GetID(), iterator.GetData()->GetX(), iterator.GetData()->GetY(), iterator.GetData()->GetZ(), (float)iterator.GetData()->timer.GetRemainingTime() / 1000);
x++;
iterator.Advance();
}
client->Message(Chat::White, "%i spawns listed.", x);
}
void Zone::ShowEnabledSpawnStatus(Mob* client)
{
LinkedListIterator<Spawn2*> iterator(spawn2_list);
int x = 0;
int iEnabledCount = 0;
iterator.Reset();
while(iterator.MoreElements())
{
if (iterator.GetData()->timer.GetRemainingTime() != 0xFFFFFFFF)
{
client->Message(Chat::White, " %d: %1.1f, %1.1f, %1.1f: %1.2f", iterator.GetData()->GetID(), iterator.GetData()->GetX(), iterator.GetData()->GetY(), iterator.GetData()->GetZ(), (float)iterator.GetData()->timer.GetRemainingTime() / 1000);
iEnabledCount++;
}
x++;
iterator.Advance();
}
client->Message(Chat::White, "%i of %i spawns listed.", iEnabledCount, x);
}
void Zone::ShowDisabledSpawnStatus(Mob* client)
{
LinkedListIterator<Spawn2*> iterator(spawn2_list);
int x = 0;
int iDisabledCount = 0;
iterator.Reset();
while(iterator.MoreElements())
{
if (iterator.GetData()->timer.GetRemainingTime() == 0xFFFFFFFF)
{
client->Message(Chat::White, " %d: %1.1f, %1.1f, %1.1f: disabled", iterator.GetData()->GetID(), iterator.GetData()->GetX(), iterator.GetData()->GetY(), iterator.GetData()->GetZ());
iDisabledCount++;
}
x++;
iterator.Advance();
}
client->Message(Chat::White, "%i of %i spawns listed.", iDisabledCount, x);
}
void Zone::ShowSpawnStatusByID(Mob* client, uint32 spawnid)
{
LinkedListIterator<Spawn2*> iterator(spawn2_list);
int x = 0;
int iSpawnIDCount = 0;
iterator.Reset();
while(iterator.MoreElements())
{
if (iterator.GetData()->GetID() == spawnid)
{
if (iterator.GetData()->timer.GetRemainingTime() == 0xFFFFFFFF)
client->Message(Chat::White, " %d: %1.1f, %1.1f, %1.1f: disabled", iterator.GetData()->GetID(), iterator.GetData()->GetX(), iterator.GetData()->GetY(), iterator.GetData()->GetZ());
else
client->Message(Chat::White, " %d: %1.1f, %1.1f, %1.1f: %1.2f", iterator.GetData()->GetID(), iterator.GetData()->GetX(), iterator.GetData()->GetY(), iterator.GetData()->GetZ(), (float)iterator.GetData()->timer.GetRemainingTime() / 1000);
iSpawnIDCount++;
break;
}
x++;
iterator.Advance();
}
if(iSpawnIDCount > 0)
client->Message(Chat::White, "%i of %i spawns listed.", iSpawnIDCount, x);
else
client->Message(Chat::White, "No matching spawn id was found in this zone.");
}
bool ZoneDatabase::GetDecayTimes(npcDecayTimes_Struct *npcCorpseDecayTimes)
{
const std::string query =
"SELECT varname, value FROM variables WHERE varname LIKE 'decaytime%%' ORDER BY varname";
auto results = QueryDatabase(query);
if (!results.Success())
return false;
int index = 0;
for (auto row = results.begin(); row != results.end(); ++row, ++index) {
Seperator sep(row[0]);
npcCorpseDecayTimes[index].minlvl = atoi(sep.arg[1]);
npcCorpseDecayTimes[index].maxlvl = atoi(sep.arg[2]);
npcCorpseDecayTimes[index].seconds = std::min(24 * 60 * 60, atoi(row[1]));
}
return true;
}
void Zone::weatherSend(Client *client)
{
auto outapp = new EQApplicationPacket(OP_Weather, 8);
if (zone_weather > 0) {
outapp->pBuffer[0] = zone_weather - 1;
}
if (zone_weather > 0) {
outapp->pBuffer[4] = zone->weather_intensity;
}
if (client) {
client->QueuePacket(outapp);
}
else {
entity_list.QueueClients(0, outapp);
}
safe_delete(outapp);
}
bool Zone::HasGraveyard() {
bool Result = false;
if(graveyard_zoneid() > 0)
Result = true;
return Result;
}
void Zone::SetGraveyard(uint32 zoneid, const glm::vec4& graveyardPosition) {
pgraveyard_zoneid = zoneid;
m_Graveyard = graveyardPosition;
}
void Zone::LoadZoneBlockedSpells(uint32 zone_id)
{
if (!blocked_spells) {
zone_total_blocked_spells = content_db.GetBlockedSpellsCount(zone_id);
if (zone_total_blocked_spells > 0) {
blocked_spells = new ZoneSpellsBlocked[zone_total_blocked_spells];
if (!content_db.LoadBlockedSpells(zone_total_blocked_spells, blocked_spells, zone_id)) {
LogError(" Failed to load blocked spells");
ClearBlockedSpells();
}
}
}
}
void Zone::ClearBlockedSpells()
{
if (blocked_spells) {
safe_delete_array(blocked_spells);
zone_total_blocked_spells = 0;
}
}
bool Zone::IsSpellBlocked(uint32 spell_id, const glm::vec3 &location)
{
if (blocked_spells) {
bool exception = false;
bool block_all = false;
for (int x = 0; x < GetZoneTotalBlockedSpells(); x++) {
if (blocked_spells[x].spellid == spell_id) {
exception = true;
}
if (blocked_spells[x].spellid == 0) {
block_all = true;
}
}
// If all spells are blocked and this is an exception, it is not blocked
if (block_all && exception) {
return false;
}
for (int x = 0; x < GetZoneTotalBlockedSpells(); x++) {
// Spellid of 0 matches all spells
if (0 != blocked_spells[x].spellid && spell_id != blocked_spells[x].spellid) {
continue;
}
switch (blocked_spells[x].type) {
case ZoneBlockedSpellTypes::ZoneWide: {
return true;
break;
}
case ZoneBlockedSpellTypes::Region: {
if (IsWithinAxisAlignedBox(
location,
blocked_spells[x].m_Location - blocked_spells[x].m_Difference,
blocked_spells[x].m_Location + blocked_spells[x].m_Difference
)) {
return true;
}
break;
}
default: {
continue;
break;
}
}
}
}
return false;
}
const char *Zone::GetSpellBlockedMessage(uint32 spell_id, const glm::vec3 &location)
{
if (blocked_spells) {
for (int x = 0; x < GetZoneTotalBlockedSpells(); x++) {
if (spell_id != blocked_spells[x].spellid && blocked_spells[x].spellid != 0) {
continue;
}
switch (blocked_spells[x].type) {
case ZoneBlockedSpellTypes::ZoneWide: {
return blocked_spells[x].message;
break;
}
case ZoneBlockedSpellTypes::Region: {
if (IsWithinAxisAlignedBox(
location,
blocked_spells[x].m_Location - blocked_spells[x].m_Difference,
blocked_spells[x].m_Location + blocked_spells[x].m_Difference
)) {
return blocked_spells[x].message;
}
break;
}
default: {
continue;
break;
}
}
}
}
return "Error: Message String Not Found\0";
}
void Zone::SetInstanceTimer(uint32 new_duration)
{
if(Instance_Timer)
{
Instance_Timer->Start(new_duration * 1000);
}
}
void Zone::LoadLDoNTraps()
{
const std::string query = "SELECT id, type, spell_id, skill, locked FROM ldon_trap_templates";
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
auto lt = new LDoNTrapTemplate;
lt->id = atoi(row[0]);
lt->type = (LDoNChestTypes) atoi(row[1]);
lt->spell_id = atoi(row[2]);
lt->skill = atoi(row[3]);
lt->locked = atoi(row[4]);
ldon_trap_list[lt->id] = lt;
}
}
void Zone::LoadLDoNTrapEntries()
{
const std::string query = "SELECT id, trap_id FROM ldon_trap_entries";
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
return;
}
for (auto row = results.begin(); row != results.end(); ++row)
{
uint32 id = atoi(row[0]);
uint32 trap_id = atoi(row[1]);
LDoNTrapTemplate *trapTemplate = nullptr;
auto it = ldon_trap_list.find(trap_id);
if(it == ldon_trap_list.end())
continue;
trapTemplate = ldon_trap_list[trap_id];
std::list<LDoNTrapTemplate*> temp;
auto iter = ldon_trap_entry_list.find(id);
if(iter != ldon_trap_entry_list.end())
temp = ldon_trap_entry_list[id];
temp.push_back(trapTemplate);
ldon_trap_entry_list[id] = temp;
}
}
void Zone::LoadVeteranRewards()
{
VeteranRewards.clear();
InternalVeteranReward current_reward;
current_reward.claim_id = 0;
const std::string query = "SELECT claim_id, name, item_id, charges "
"FROM veteran_reward_templates "
"WHERE reward_slot < 8 and claim_id > 0 "
"ORDER by claim_id, reward_slot";
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
return;
}
int index = 0;
for (auto row = results.begin(); row != results.end(); ++row, ++index)
{
uint32 claim = atoi(row[0]);
if(claim != current_reward.claim_id)
{
if(current_reward.claim_id != 0)
{
current_reward.claim_count = index;
current_reward.number_available = 1;
VeteranRewards.push_back(current_reward);
}
index = 0;
memset(¤t_reward, 0, sizeof(InternalVeteranReward));
current_reward.claim_id = claim;
}
strcpy(current_reward.items[index].item_name, row[1]);
current_reward.items[index].item_id = atoi(row[2]);
current_reward.items[index].charges = atoi(row[3]);
}
if(current_reward.claim_id != 0)
{
current_reward.claim_count = index;
current_reward.number_available = 1;
VeteranRewards.push_back(current_reward);
}
}
void Zone::LoadAlternateCurrencies()
{
AlternateCurrencies.clear();
AltCurrencyDefinition_Struct current_currency;
const std::string query = "SELECT id, item_id FROM alternate_currency";
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
return;
}
for (auto row = results.begin(); row != results.end(); ++row)
{
current_currency.id = atoi(row[0]);
current_currency.item_id = atoi(row[1]);
AlternateCurrencies.push_back(current_currency);
}
}
void Zone::UpdateQGlobal(uint32 qid, QGlobal newGlobal)
{
if(newGlobal.npc_id != 0)
return;
if(newGlobal.char_id != 0)
return;
if(newGlobal.zone_id == GetZoneID() || newGlobal.zone_id == 0)
{
if(qGlobals)
{
qGlobals->AddGlobal(qid, newGlobal);
}
else
{
qGlobals = new QGlobalCache();
qGlobals->AddGlobal(qid, newGlobal);
}
}
}
void Zone::DeleteQGlobal(std::string name, uint32 npcID, uint32 charID, uint32 zoneID)
{
if(qGlobals)
{
qGlobals->RemoveGlobal(name, npcID, charID, zoneID);
}
}
void Zone::LoadAdventureFlavor()
{
const std::string query = "SELECT id, text FROM adventure_template_entry_flavor";
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
uint32 id = atoi(row[0]);
adventure_entry_list_flavor[id] = row[1];
}
}
void Zone::DoAdventureCountIncrease()
{
ServerZoneAdventureDataReply_Struct *sr = (ServerZoneAdventureDataReply_Struct*)adv_data;
if(sr->count < sr->total)
{
sr->count++;
auto pack = new ServerPacket(ServerOP_AdventureCountUpdate, sizeof(uint16));
*((uint16*)pack->pBuffer) = instanceid;
worldserver.SendPacket(pack);
delete pack;
}
}
void Zone::DoAdventureAssassinationCountIncrease()
{
ServerZoneAdventureDataReply_Struct *sr = (ServerZoneAdventureDataReply_Struct*)adv_data;
if(sr->assa_count < RuleI(Adventure, NumberKillsForBossSpawn))
{
sr->assa_count++;
auto pack = new ServerPacket(ServerOP_AdventureAssaCountUpdate, sizeof(uint16));
*((uint16*)pack->pBuffer) = instanceid;
worldserver.SendPacket(pack);
delete pack;
}
}
void Zone::DoAdventureActions()
{
ServerZoneAdventureDataReply_Struct* ds = (ServerZoneAdventureDataReply_Struct*)adv_data;
if(ds->type == Adventure_Collect)
{
int count = (ds->total - ds->count) * 25 / 10;
entity_list.AddLootToNPCS(ds->data_id, count);
did_adventure_actions = true;
}
else if(ds->type == Adventure_Assassinate)
{
if(ds->assa_count >= RuleI(Adventure, NumberKillsForBossSpawn))
{
const NPCType* tmp = content_db.LoadNPCTypesData(ds->data_id);
if(tmp)
{
NPC* npc = new NPC(tmp, nullptr, glm::vec4(ds->assa_x, ds->assa_y, ds->assa_z, ds->assa_h), GravityBehavior::Water);
npc->AddLootTable();
if (npc->DropsGlobalLoot())
npc->CheckGlobalLootTables();
entity_list.AddNPC(npc);
npc->Shout("Rarrrgh!");
did_adventure_actions = true;
}
}
}
else
{
did_adventure_actions = true;
}
}
void Zone::LoadNPCEmotes(LinkedList<NPC_Emote_Struct*>* NPCEmoteList)
{
NPCEmoteList->Clear();
const std::string query = "SELECT emoteid, event_, type, text FROM npc_emotes";
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
return;
}
for (auto row = results.begin(); row != results.end(); ++row)
{
auto nes = new NPC_Emote_Struct;
nes->emoteid = atoi(row[0]);
nes->event_ = atoi(row[1]);
nes->type = atoi(row[2]);
strn0cpy(nes->text, row[3], sizeof(nes->text));
NPCEmoteList->Insert(nes);
}
}
void Zone::ReloadWorld(uint32 Option){
if (Option == 0) {
entity_list.ClearAreas();
parse->ReloadQuests();
} else if(Option == 1) {
entity_list.ClearAreas();
parse->ReloadQuests();
zone->Repop(0);
}
}
void Zone::LoadTickItems()
{
tick_items.clear();
const std::string query = "SELECT it_itemid, it_chance, it_level, it_qglobal, it_bagslot FROM item_tick";
auto results = database.QueryDatabase(query);
if (!results.Success()) {
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
if(atoi(row[0]) == 0)
continue;
item_tick_struct ti_tmp;
ti_tmp.itemid = atoi(row[0]);
ti_tmp.chance = atoi(row[1]);
ti_tmp.level = atoi(row[2]);
ti_tmp.bagslot = (int16)atoi(row[4]);
ti_tmp.qglobal = std::string(row[3]);
tick_items[atoi(row[0])] = ti_tmp;
}
}
uint32 Zone::GetSpawnKillCount(uint32 in_spawnid) {
LinkedListIterator<Spawn2*> iterator(spawn2_list);
iterator.Reset();
while(iterator.MoreElements())
{
if(iterator.GetData()->GetID() == in_spawnid)
{
return(iterator.GetData()->killcount);
}
iterator.Advance();
}
return 0;
}
void Zone::SetIsHotzone(bool is_hotzone)
{
Zone::is_hotzone = is_hotzone;
}
void Zone::RequestUCSServerStatus() {
auto outapp = new ServerPacket(ServerOP_UCSServerStatusRequest, sizeof(UCSServerStatus_Struct));
auto ucsss = (UCSServerStatus_Struct*)outapp->pBuffer;
ucsss->available = 0;
ucsss->port = Config->ZonePort;
ucsss->unused = 0;
worldserver.SendPacket(outapp);
safe_delete(outapp);
}
void Zone::SetUCSServerAvailable(bool ucss_available, uint32 update_timestamp) {
if (m_last_ucss_update == update_timestamp && m_ucss_available != ucss_available) {
m_ucss_available = false;
RequestUCSServerStatus();
return;
}
if (m_last_ucss_update < update_timestamp)
m_ucss_available = ucss_available;
}
int Zone::GetNpcPositionUpdateDistance() const
{
return npc_position_update_distance;
}
void Zone::SetNpcPositionUpdateDistance(int in_npc_position_update_distance)
{
Zone::npc_position_update_distance = in_npc_position_update_distance;
}
void Zone::CalculateNpcUpdateDistanceSpread()
{
float max_x = 0;
float max_y = 0;
float min_x = 0;
float min_y = 0;
auto &mob_list = entity_list.GetMobList();
for (auto &it : mob_list) {
Mob *entity = it.second;
if (!entity->IsNPC()) {
continue;
}
if (entity->GetX() <= min_x) {
min_x = entity->GetX();
}
if (entity->GetY() <= min_y) {
min_y = entity->GetY();
}
if (entity->GetX() >= max_x) {
max_x = entity->GetX();
}
if (entity->GetY() >= max_y) {
max_y = entity->GetY();
}
}
int x_spread = int(abs(max_x - min_x));
int y_spread = int(abs(max_y - min_y));
int combined_spread = int(abs((x_spread + y_spread) / 2));
int update_distance = EQ::ClampLower(int(combined_spread / 4), int(zone->GetMaxMovementUpdateRange()));
SetNpcPositionUpdateDistance(update_distance);
Log(Logs::General, Logs::Debug,
"NPC update spread distance set to [%i] combined_spread [%i]",
update_distance,
combined_spread
);
}
bool Zone::IsQuestHotReloadQueued() const
{
return quest_hot_reload_queued;
}
void Zone::SetQuestHotReloadQueued(bool in_quest_hot_reload_queued)
{
quest_hot_reload_queued = in_quest_hot_reload_queued;
}
void Zone::LoadGrids()
{
zone_grids = GridRepository::GetZoneGrids(GetZoneID());
zone_grid_entries = GridEntriesRepository::GetZoneGridEntries(GetZoneID());
}
Timer Zone::GetInitgridsTimer()
{
return initgrids_timer;
}
uint32 Zone::GetInstanceTimeRemaining() const
{
return instance_time_remaining;
}
void Zone::SetInstanceTimeRemaining(uint32 instance_time_remaining)
{
Zone::instance_time_remaining = instance_time_remaining;
}
bool Zone::IsZone(uint32 zone_id, uint16 instance_id) const
{
return (zoneid == zone_id && instanceid == instance_id);
}
DynamicZone* Zone::GetDynamicZone()
{
if (GetInstanceID() == 0)
{
return nullptr;
}
auto expedition = Expedition::FindCachedExpeditionByZoneInstance(GetZoneID(), GetInstanceID());
if (expedition)
{
return &expedition->GetDynamicZone();
}
// todo: tasks, missions, and quests with an associated dz for this instance id
return nullptr;
}
| 1 | 10,352 | Make sure to `snake_case` locally scoped variable names in the future | EQEmu-Server | cpp |
@@ -56,6 +56,7 @@ type LogsConfig struct {
// RoutingRule holds the path to route requests to the service.
type RoutingRule struct {
Path string `yaml:"path"`
+ HealthCheckPath string `yaml:"healthcheck"`
}
// AutoScalingConfig is the configuration to scale the service with target tracking scaling policies. | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/template"
)
const (
lbFargateManifestPath = "lb-fargate-service/manifest.yml"
// LogRetentionInDays is the default log retention time in days.
LogRetentionInDays = 30
)
// LBFargateManifest holds the configuration to build a container image with an exposed port that receives
// requests through a load balancer with AWS Fargate as the compute engine.
type LBFargateManifest struct {
AppManifest `yaml:",inline"`
Image ImageWithPort `yaml:",flow"`
LBFargateConfig `yaml:",inline"`
Environments map[string]LBFargateConfig `yaml:",flow"` // Fields to override per environment.
parser template.Parser
}
// ImageWithPort represents a container image with an exposed port.
type ImageWithPort struct {
AppImage `yaml:",inline"`
Port uint16 `yaml:"port"`
}
// LBFargateConfig represents a load balanced web application with AWS Fargate as compute.
type LBFargateConfig struct {
RoutingRule `yaml:"http,flow"`
ContainersConfig `yaml:",inline"`
Scaling *AutoScalingConfig `yaml:",flow"`
LogsConfig `yaml:",flow"`
}
// ContainersConfig represents the resource boundaries and environment variables for the containers in the service.
type ContainersConfig struct {
CPU int `yaml:"cpu"`
Memory int `yaml:"memory"`
Count int `yaml:"count"`
Variables map[string]string `yaml:"variables"`
Secrets map[string]string `yaml:"secrets"`
}
// LogsConfig is the configuration to the ECS logs.
type LogsConfig struct {
LogRetention int `yaml:"logRetention"`
}
// RoutingRule holds the path to route requests to the service.
type RoutingRule struct {
Path string `yaml:"path"`
}
// AutoScalingConfig is the configuration to scale the service with target tracking scaling policies.
type AutoScalingConfig struct {
MinCount int `yaml:"minCount"`
MaxCount int `yaml:"maxCount"`
TargetCPU float64 `yaml:"targetCPU"`
TargetMemory float64 `yaml:"targetMemory"`
}
// LBFargateManifestProps contains properties for creating a new load balanced fargate application manifest.
type LBFargateManifestProps struct {
*AppManifestProps
Path string
Port uint16
}
// NewLoadBalancedFargateManifest creates a new public load balanced web service with an exposed port of 80, receives
// all the requests from the load balancer and has a single task with minimal CPU and Memory thresholds.
func NewLoadBalancedFargateManifest(input *LBFargateManifestProps) *LBFargateManifest {
return &LBFargateManifest{
AppManifest: AppManifest{
Name: input.AppName,
Type: LoadBalancedWebApplication,
},
Image: ImageWithPort{
AppImage: AppImage{
Build: input.Dockerfile,
},
Port: input.Port,
},
LBFargateConfig: LBFargateConfig{
RoutingRule: RoutingRule{
Path: input.Path,
},
ContainersConfig: ContainersConfig{
CPU: 256,
Memory: 512,
Count: 1,
},
LogsConfig: LogsConfig{},
},
parser: template.New(),
}
}
// MarshalBinary serializes the manifest object into a binary YAML document.
func (m *LBFargateManifest) MarshalBinary() ([]byte, error) {
content, err := m.parser.Parse(lbFargateManifestPath, *m)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// DockerfilePath returns the image build path.
func (m LBFargateManifest) DockerfilePath() string {
return m.Image.Build
}
// EnvConf returns the application configuration with environment overrides.
// If the environment passed in does not have any overrides then we return the default values.
func (m *LBFargateManifest) EnvConf(envName string) LBFargateConfig {
if _, ok := m.Environments[envName]; !ok {
return m.LBFargateConfig
}
// We don't want to modify the default settings, so deep copy into a "conf" variable.
envVars := make(map[string]string, len(m.Variables))
for k, v := range m.Variables {
envVars[k] = v
}
secrets := make(map[string]string, len(m.Secrets))
for k, v := range m.Secrets {
secrets[k] = v
}
var scaling *AutoScalingConfig
if m.Scaling != nil {
scaling = &AutoScalingConfig{
MinCount: m.Scaling.MinCount,
MaxCount: m.Scaling.MaxCount,
TargetCPU: m.Scaling.TargetCPU,
TargetMemory: m.Scaling.TargetMemory,
}
}
conf := LBFargateConfig{
RoutingRule: RoutingRule{
Path: m.Path,
},
ContainersConfig: ContainersConfig{
CPU: m.CPU,
Memory: m.Memory,
Count: m.Count,
Variables: envVars,
Secrets: secrets,
},
Scaling: scaling,
}
// Override with fields set in the environment.
target := m.Environments[envName]
if target.RoutingRule.Path != "" {
conf.RoutingRule.Path = target.RoutingRule.Path
}
if target.CPU != 0 {
conf.CPU = target.CPU
}
if target.Memory != 0 {
conf.Memory = target.Memory
}
if target.Count != 0 {
conf.Count = target.Count
}
for k, v := range target.Variables {
conf.Variables[k] = v
}
for k, v := range target.Secrets {
conf.Secrets[k] = v
}
if target.Scaling != nil {
if conf.Scaling == nil {
conf.Scaling = &AutoScalingConfig{}
}
if target.Scaling.MinCount != 0 {
conf.Scaling.MinCount = target.Scaling.MinCount
}
if target.Scaling.MaxCount != 0 {
conf.Scaling.MaxCount = target.Scaling.MaxCount
}
if target.Scaling.TargetCPU != 0 {
conf.Scaling.TargetCPU = target.Scaling.TargetCPU
}
if target.Scaling.TargetMemory != 0 {
conf.Scaling.TargetMemory = target.Scaling.TargetMemory
}
}
return conf
}
// CFNTemplate serializes the manifest object into a CloudFormation template.
func (m *LBFargateManifest) CFNTemplate() (string, error) {
return "", nil
}
| 1 | 12,299 | Maybe we should use underscores like `health_check` - what do you think? | aws-copilot-cli | go |
@@ -139,6 +139,12 @@ func (v *volumeAPIOpsV1alpha1) read(volumeName string) (*v1alpha1.CASVolume, err
vol.Namespace = hdrNS
}
+ // use sc name from header if present
+ scName := v.req.Header.Get(string(v1alpha1.StorageClassKey))
+ if scName != "" {
+ vol.Annotations[string(v1alpha1.StorageClassKey)] = scName
+ }
+
vOps, err := volume.NewVolumeOperation(vol)
if err != nil {
return nil, CodedError(400, err.Error()) | 1 | package server
import (
"fmt"
"net/http"
"strings"
"github.com/golang/glog"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/template"
"github.com/openebs/maya/pkg/volume"
)
type volumeAPIOpsV1alpha1 struct {
req *http.Request
resp http.ResponseWriter
}
// volumeV1alpha1SpecificRequest is a http handler to handle HTTP
// requests to a OpenEBS volume.
func (s *HTTPServer) volumeV1alpha1SpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
glog.Infof("cas template based volume request was received: method '%s'", req.Method)
if req == nil {
return nil, CodedError(400, "nil http request was received")
}
volOp := &volumeAPIOpsV1alpha1{
req: req,
resp: resp,
}
switch req.Method {
case "POST":
return volOp.create()
case "GET":
return volOp.httpGet()
case "DELETE":
return volOp.httpDelete()
default:
return nil, CodedError(405, ErrInvalidMethod)
}
}
// httpGet deals with http GET request
func (v *volumeAPIOpsV1alpha1) httpGet() (interface{}, error) {
// Extract name of volume from path after trimming
path := strings.TrimSpace(strings.TrimPrefix(v.req.URL.Path, "/latest/volumes"))
// list cas volumes
if path == "/" {
return v.list()
}
// read a cas volume
volName := strings.TrimPrefix(path, "/")
return v.read(volName)
}
// httpDelete deals with http DELETE request
func (v *volumeAPIOpsV1alpha1) httpDelete() (interface{}, error) {
// Extract name of volume from path after trimming
volName := strings.TrimSpace(strings.TrimPrefix(v.req.URL.Path, "/latest/volumes/"))
// check if req url has volume name
if len(volName) == 0 {
return nil, CodedError(405, ErrInvalidMethod)
}
return v.delete(volName)
}
func (v *volumeAPIOpsV1alpha1) create() (*v1alpha1.CASVolume, error) {
glog.Infof("cas template based volume create request was received")
vol := &v1alpha1.CASVolume{}
err := decodeBody(v.req, vol)
if err != nil {
return nil, CodedError(400, err.Error())
}
// volume name is expected
if len(vol.Name) == 0 {
return nil, CodedError(400, fmt.Sprintf("failed to create volume: missing volume name '%v'", vol))
}
// use run namespace from labels if volume's namespace is not set
if len(vol.Namespace) == 0 {
vol.Namespace = vol.Labels[string(v1alpha1.NamespaceKey)]
}
// use run namespace from http request header if volume's namespace is still not set
if len(vol.Namespace) == 0 {
vol.Namespace = v.req.Header.Get(NamespaceKey)
}
vOps, err := volume.NewVolumeOperation(vol)
if err != nil {
return nil, CodedError(400, err.Error())
}
cvol, err := vOps.Create()
if err != nil {
glog.Errorf("failed to create cas template based volume: error '%s'", err.Error())
return nil, CodedError(500, err.Error())
}
glog.Infof("cas template based volume created successfully: name '%s'", cvol.Name)
return cvol, nil
}
func (v *volumeAPIOpsV1alpha1) read(volumeName string) (*v1alpha1.CASVolume, error) {
glog.Infof("cas template based volume read request was received")
vol := &v1alpha1.CASVolume{}
// hdrNS will store namespace from http header
hdrNS := ""
// get volume related details from http request
if v.req != nil {
decodeBody(v.req, vol)
hdrNS = v.req.Header.Get(NamespaceKey)
}
vol.Name = volumeName
// volume name is expected
if len(vol.Name) == 0 {
return nil, CodedError(400, fmt.Sprintf("failed to read volume: missing volume name '%v'", vol))
}
// use namespace from labels if volume ns is not set
if len(vol.Namespace) == 0 {
vol.Namespace = vol.Labels[string(v1alpha1.NamespaceKey)]
}
// use namespace from req headers if volume ns is still not set
if len(vol.Namespace) == 0 {
vol.Namespace = hdrNS
}
vOps, err := volume.NewVolumeOperation(vol)
if err != nil {
return nil, CodedError(400, err.Error())
}
cvol, err := vOps.Read()
if err != nil {
glog.Errorf("failed to read cas template based volume: error '%s'", err.Error())
if _, ok := err.(*template.NotFoundError); ok {
return nil, CodedError(404, fmt.Sprintf("volume '%s' not found at namespace '%s'", vol.Name, vol.Namespace))
}
return nil, CodedError(500, err.Error())
}
glog.Infof("cas template based volume was read successfully: name '%s'", cvol.Name)
return cvol, nil
}
func (v *volumeAPIOpsV1alpha1) delete(volumeName string) (*v1alpha1.CASVolume, error) {
glog.Infof("cas template based volume delete request was received")
vol := &v1alpha1.CASVolume{}
// hdrNS will store namespace from http header
hdrNS := ""
// get volume related details from http request
if v.req != nil {
decodeBody(v.req, vol)
hdrNS = v.req.Header.Get(NamespaceKey)
}
vol.Name = volumeName
// volume name is expected
if len(vol.Name) == 0 {
return nil, CodedError(400, fmt.Sprintf("failed to delete volume: missing volume name '%v'", vol))
}
// use namespace from labels if volume ns is not set
if len(vol.Namespace) == 0 {
vol.Namespace = vol.Labels[string(v1alpha1.NamespaceKey)]
}
// use namespace from req headers if volume ns is still not set
if len(vol.Namespace) == 0 {
vol.Namespace = hdrNS
}
vOps, err := volume.NewVolumeOperation(vol)
if err != nil {
return nil, CodedError(400, err.Error())
}
cvol, err := vOps.Delete()
if err != nil {
glog.Errorf("failed to delete cas template based volume: error '%s'", err.Error())
if _, ok := err.(*template.NotFoundError); ok {
return nil, CodedError(404, fmt.Sprintf("volume '%s' not found at namespace '%s'", vol.Name, vol.Namespace))
}
return nil, CodedError(500, err.Error())
}
glog.Infof("cas template based volume was deleted successfully: name '%s'", cvol.Name)
return cvol, nil
}
func (v *volumeAPIOpsV1alpha1) list() (*v1alpha1.CASVolumeList, error) {
glog.Infof("cas template based volume list request was received")
vols := &v1alpha1.CASVolumeList{}
// hdrNS will store namespace from http header
hdrNS := ""
// extract volume list details from http request
if v.req != nil {
decodeBody(v.req, vols)
hdrNS = v.req.Header.Get(NamespaceKey)
}
// use namespace from labels if volume ns is not set
if len(vols.Namespace) == 0 {
vols.Namespace = vols.Labels[string(v1alpha1.NamespaceKey)]
}
// use namespace from req headers if volume ns is still not set
if len(vols.Namespace) == 0 {
vols.Namespace = hdrNS
}
vOps, err := volume.NewVolumeListOperation(vols)
if err != nil {
return nil, CodedError(400, err.Error())
}
cvols, err := vOps.List()
if err != nil {
glog.Errorf("failed to list cas template based volumes at namespaces '%s': error '%s'", vols.Namespace, err.Error())
return nil, CodedError(500, err.Error())
}
glog.Infof("cas template based volumes were listed successfully: namespaces '%s'", vols.Namespace)
return cvols, nil
}
| 1 | 8,828 | Do a TrimSpace before setting. | openebs-maya | go |
@@ -41,6 +41,9 @@ type ENI struct {
// PrivateDNSName is the dns name assigned by the vpc to this eni
PrivateDNSName string `json:",omitempty"`
+ // SubnetGatewayIPV4Address is the address to the subnet gateway for
+ // the eni
+ SubnetGatewayIPV4Address string `json:",omitempty"`
}
// GetIPV4Addresses returns a list of ipv4 addresses allocated to the ENI | 1 | // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import (
"fmt"
"strings"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/aws-sdk-go/aws"
"github.com/pkg/errors"
)
// ENI contains information of the eni
type ENI struct {
// ID is the id of eni
ID string `json:"ec2Id"`
// IPV4Addresses is the ipv4 address associated with the eni
IPV4Addresses []*ENIIPV4Address
// IPV6Addresses is the ipv6 address associated with the eni
IPV6Addresses []*ENIIPV6Address
// MacAddress is the mac address of the eni
MacAddress string
// DomainNameServers specifies the nameserver IP addresses for
// the eni
DomainNameServers []string `json:",omitempty"`
// DomainNameSearchList specifies the search list for the domain
// name lookup, for the eni
DomainNameSearchList []string `json:",omitempty"`
// PrivateDNSName is the dns name assigned by the vpc to this eni
PrivateDNSName string `json:",omitempty"`
}
// GetIPV4Addresses returns a list of ipv4 addresses allocated to the ENI
func (eni *ENI) GetIPV4Addresses() []string {
var addresses []string
for _, addr := range eni.IPV4Addresses {
addresses = append(addresses, addr.Address)
}
return addresses
}
// GetIPV6Addresses returns a list of ipv6 addresses allocated to the ENI
func (eni *ENI) GetIPV6Addresses() []string {
var addresses []string
for _, addr := range eni.IPV6Addresses {
addresses = append(addresses, addr.Address)
}
return addresses
}
// GetHostname returns the hostname assigned to the ENI
func (eni *ENI) GetHostname() string {
return eni.PrivateDNSName
}
// String returns a human readable version of the ENI object
func (eni *ENI) String() string {
var ipv4Addresses []string
for _, addr := range eni.IPV4Addresses {
ipv4Addresses = append(ipv4Addresses, addr.Address)
}
var ipv6Addresses []string
for _, addr := range eni.IPV6Addresses {
ipv6Addresses = append(ipv6Addresses, addr.Address)
}
return fmt.Sprintf(
"eni id:%s, mac: %s, hostname: %s, ipv4addresses: [%s], ipv6addresses: [%s], dns: [%s], dns search: [%s]",
eni.ID, eni.MacAddress, eni.GetHostname(), strings.Join(ipv4Addresses, ","), strings.Join(ipv6Addresses, ","),
strings.Join(eni.DomainNameServers, ","), strings.Join(eni.DomainNameSearchList, ","))
}
// ENIIPV4Address is the ipv4 information of the eni
type ENIIPV4Address struct {
// Primary indicates whether the ip address is primary
Primary bool
// Address is the ipv4 address associated with eni
Address string
}
// ENIIPV6Address is the ipv6 information of the eni
type ENIIPV6Address struct {
// Address is the ipv6 address associated with eni
Address string
}
// ENIFromACS validates the information from acs message and create the ENI object
func ENIFromACS(acsenis []*ecsacs.ElasticNetworkInterface) (*ENI, error) {
err := ValidateTaskENI(acsenis)
if err != nil {
return nil, err
}
var ipv4 []*ENIIPV4Address
var ipv6 []*ENIIPV6Address
// Read ipv4 address information of the eni
for _, ec2Ipv4 := range acsenis[0].Ipv4Addresses {
ipv4 = append(ipv4, &ENIIPV4Address{
Primary: aws.BoolValue(ec2Ipv4.Primary),
Address: aws.StringValue(ec2Ipv4.PrivateAddress),
})
}
// Read ipv6 address information of the eni
for _, ec2Ipv6 := range acsenis[0].Ipv6Addresses {
ipv6 = append(ipv6, &ENIIPV6Address{
Address: aws.StringValue(ec2Ipv6.Address),
})
}
eni := &ENI{
ID: aws.StringValue(acsenis[0].Ec2Id),
IPV4Addresses: ipv4,
IPV6Addresses: ipv6,
MacAddress: aws.StringValue(acsenis[0].MacAddress),
PrivateDNSName: aws.StringValue(acsenis[0].PrivateDnsName),
}
for _, nameserverIP := range acsenis[0].DomainNameServers {
eni.DomainNameServers = append(eni.DomainNameServers, aws.StringValue(nameserverIP))
}
for _, nameserverDomain := range acsenis[0].DomainName {
eni.DomainNameSearchList = append(eni.DomainNameSearchList, aws.StringValue(nameserverDomain))
}
return eni, nil
}
// ValidateTaskENI validates the eni informaiton sent from acs
func ValidateTaskENI(acsenis []*ecsacs.ElasticNetworkInterface) error {
// Only one eni should be associated with the task
// Only one ipv4 should be associated with the eni
// No more than one ipv6 should be associated with the eni
if len(acsenis) != 1 {
return errors.Errorf("eni message validation: more than one ENIs in the message(%d)", len(acsenis))
} else if len(acsenis[0].Ipv4Addresses) != 1 {
return errors.Errorf("eni message validation: more than one ipv4 addresses in the message(%d)", len(acsenis[0].Ipv4Addresses))
} else if len(acsenis[0].Ipv6Addresses) > 1 {
return errors.Errorf("eni message validation: more than one ipv6 addresses in the message(%d)", len(acsenis[0].Ipv6Addresses))
}
if acsenis[0].MacAddress == nil {
return errors.Errorf("eni message validation: empty eni mac address in the message")
}
if acsenis[0].Ec2Id == nil {
return errors.Errorf("eni message validation: empty eni id in the message")
}
return nil
}
| 1 | 20,052 | Are the fields in this struct grouped by IP address family or by function? There are separate fields for IPv4Addresses and IPv6Addresses, but a single field for DomainNameServers. Should this new field be named SubnetGatewayAddress with type array consisting of both IPv4 and IPv6 gateways? | aws-amazon-ecs-agent | go |
@@ -38,7 +38,7 @@ class LinkTest < ActiveSupport::TestCase
new_link.revive_or_create
end
- deleted_link = Link.find(link)
+ deleted_link = Link.where(id: link.id).first
deleted_link.title.must_equal new_title
deleted_link.link_category_id.must_equal Link::CATEGORIES[:Forums]
end | 1 | require 'test_helper'
class LinkTest < ActiveSupport::TestCase
it 'must raise an error when no editor' do
skip 'Integrate alongwith acts_as_editable'
-> { create(:link) }.must_raise(ActiveRecord::Acts::Editable::MissingEditorError)
end
it 'must create a link' do
link = create(:link, project: projects(:linux))
projects(:linux).links.must_include link
end
it 'must prevent blank url' do
link = build(:link, url: '')
link.save
link.errors.must_include(:url)
end
it 'must prevent duplicate url' do
link = create(:link, project: projects(:linux))
link.errors.must_be :empty?
link = build(:link, project: projects(:linux), url: link.url)
link.save
link.errors.must_include(:url)
end
it 'must revive or create deleted links' do
link = create(:link, project: projects(:linux))
link.destroy
new_title = 'new title'
new_link = build(:link, url: link.url, project_id: link.project_id,
title: new_title, link_category_id: Link::CATEGORIES[:Forums])
new_link.editor_account = create(:account)
assert_no_difference('Link.count') do
new_link.revive_or_create
end
deleted_link = Link.find(link)
deleted_link.title.must_equal new_title
deleted_link.link_category_id.must_equal Link::CATEGORIES[:Forums]
end
it 'must receive or create for new links' do
new_title = 'new title'
new_url = 'http://www.domain.com'
new_link = build(:link, url: new_url, project: projects(:linux),
title: new_title, link_category_id: Link::CATEGORIES[:Forums])
new_link.editor_account = create(:account)
assert_difference('Link.count', 1) do
new_link.revive_or_create.must_equal true
end
link = Link.find(new_link)
link.title.must_equal new_title
link.url.must_equal new_url
link.link_category_id.must_equal Link::CATEGORIES[:Forums]
end
describe 'explain_yourself' do
let(:edit) { stub(value: 5, key: 'some_attribute', is_a?: false) }
let(:link) { Link.new }
before { link.stubs(:id).returns(1) }
it 'test value when not property edit' do
skip('FIXME: Integrate alongwith acts_as_editable')
information = link.explain_yourself(edit)
information.must_equal 'Created link 1'
end
it 'test value when property edit without link category id' do
skip('FIXME: Integrate alongwith acts_as_editable')
edit.stubs(:is_a?).returns(true)
information = link.explain_yourself(edit)
information.must_equal "Changed link 1's some_attribute to be '5'"
end
it 'test value when property edit with link category id' do
skip('FIXME: Integrate alongwith acts_as_editable')
edit = stub(value: Link::CATEGORIES[:Forums], key: 'link_category_id', is_a?: true)
information = link.explain_yourself(edit)
information.must_equal "Changed link 1's link_category_id to be 'Forums'"
end
end
it 'test url' do
# TODO: Uncomment after integrating acts_as_editable.
# -> { projects(:linux).update(url: 'linux.com') }.must_raise(ActiveRecord::Acts::Editable::MissingEditorError)
[ # test for basic validity failures.
'bad url', 'http://\"$', 'ftp://booasd', 'http://',
'http://;', "http://www.oh.net'", 'http://www.oh.net`'
].each do |url|
link = build(:link, url: url, project: projects(:linux))
link.save
link.wont_be :valid?
link.errors[:url].must_be :present?
end
[ # test for basic validity successes.
'http://www.domain.com', 'https://www.domain.com',
'http://www.google.com:8080/some/other/path.php', 'http://www.freshvanilla.org:8080/'
].each do |url|
link = build(:link, url: url, project: projects(:linux))
link.editor_account = create(:account)
link.save
link.must_be :valid?
end
end
end
| 1 | 6,974 | .find(id) is being deprecated to Rails 5. | blackducksoftware-ohloh-ui | rb |
@@ -4,6 +4,9 @@ import datetime
from itertools import product
import iris
+from iris.coords import DimCoord
+from iris.cube import CubeList
+from iris.experimental.equalise_cubes import equalise_attributes
from iris.util import guess_coord_axis
import numpy as np | 1 | from __future__ import absolute_import
import datetime
from itertools import product
import iris
from iris.util import guess_coord_axis
import numpy as np
from .interface import Interface, DataError
from .grid import GridInterface
from ..dimension import Dimension
from ..element import Element
from ..ndmapping import (NdMapping, item_check, sorted_context)
from ..spaces import HoloMap
from .. import util
def get_date_format(coord):
def date_formatter(val, pos=None):
date = coord.units.num2date(val)
date_format = Dimension.type_formatters.get(datetime.datetime, None)
if date_format:
return date.strftime(date_format)
else:
return date
return date_formatter
def coord_to_dimension(coord):
"""
Converts an iris coordinate to a HoloViews dimension.
"""
kwargs = {}
if coord.units.is_time_reference():
kwargs['value_format'] = get_date_format(coord)
else:
kwargs['unit'] = str(coord.units)
return Dimension(coord.name(), **kwargs)
def sort_coords(coord):
"""
Sorts a list of DimCoords trying to ensure that
dates and pressure levels appear first and the
longitude and latitude appear last in the correct
order.
"""
order = {'T': -2, 'Z': -1, 'X': 1, 'Y': 2}
axis = guess_coord_axis(coord)
return (order.get(axis, 0), coord and coord.name())
class CubeInterface(GridInterface):
"""
The CubeInterface provides allows HoloViews to interact with iris
Cube data. When passing an iris Cube to a HoloViews Element the
init method will infer the dimensions of the Cube from its
coordinates. Currently the interface only provides the basic
methods required for HoloViews to work with an object.
"""
types = (iris.cube.Cube,)
datatype = 'cube'
@classmethod
def init(cls, eltype, data, kdims, vdims):
if kdims:
kdim_names = [kd.name if isinstance(kd, Dimension) else kd for kd in kdims]
else:
kdims = eltype.kdims
kdim_names = [kd.name for kd in eltype.kdims]
if not isinstance(data, iris.cube.Cube):
if vdims is None:
vdims = eltype.vdims
ndims = len(kdim_names)
kdims = [kd if isinstance(kd, Dimension) else Dimension(kd)
for kd in kdims]
vdim = vdims[0] if isinstance(vdims[0], Dimension) else Dimension(vdims[0])
if isinstance(data, tuple):
value_array = data[-1]
data = {d: vals for d, vals in zip(kdim_names + [vdim.name], data)}
elif isinstance(data, list) and data == []:
ndims = len(kdims)
dimensions = [d.name if isinstance(d, Dimension) else
d for d in kdims + vdims]
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
if isinstance(data, dict):
value_array = data[vdim.name]
coords = [(iris.coords.DimCoord(data[kd.name], long_name=kd.name,
units=kd.unit), ndims-n-1)
for n, kd in enumerate(kdims)]
try:
data = iris.cube.Cube(value_array, long_name=vdim.name,
dim_coords_and_dims=coords)
except:
pass
if not isinstance(data, iris.cube.Cube):
raise TypeError('Data must be be an iris Cube type.')
if kdims:
coords = []
for kd in kdims:
coord = data.coords(kd.name if isinstance(kd, Dimension) else kd)
if len(coord) == 0:
raise ValueError('Key dimension %s not found in '
'Iris cube.' % kd)
coords.append(kd if isinstance(kd, Dimension) else coord[0])
else:
coords = data.dim_coords
coords = sorted(coords, key=sort_coords)
kdims = [crd if isinstance(crd, Dimension) else coord_to_dimension(crd)
for crd in coords]
if vdims is None:
vdims = [Dimension(data.name(), unit=str(data.units))]
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def validate(cls, dataset, vdims=True):
if vdims and len(dataset.vdims) > 1:
raise DataError("Iris cubes do not support more than one value dimension", cls)
@classmethod
def irregular(cls, dataset, dim):
"CubeInterface does not support irregular data"
return False
@classmethod
def shape(cls, dataset, gridded=False):
if gridded:
return dataset.data.shape
else:
return (cls.length(dataset), len(dataset.dimensions()))
@classmethod
def coords(cls, dataset, dim, ordered=False, expanded=False):
if expanded:
return util.expand_grid_coords(dataset, dim)
data = dataset.data.coords(dim)[0].points
if ordered and np.all(data[1:] < data[:-1]):
data = data[::-1]
return data
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
"""
Returns an array of the values along the supplied dimension.
"""
dim = dataset.get_dimension(dim, strict=True)
if dim in dataset.vdims:
coord_names = [c.name() for c in dataset.data.dim_coords]
data = dataset.data.copy().data
data = cls.canonicalize(dataset, data, coord_names)
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def reindex(cls, dataset, kdims=None, vdims=None):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
if len(constant) == len(dropped_kdims):
constraints = iris.Constraint(**constant)
return dataset.data.extract(constraints)
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return dataset.data
@classmethod
def groupby(cls, dataset, dims, container_type=HoloMap, group_type=None, **kwargs):
"""
Groups the data by one or more dimensions returning a container
indexed by the grouped dimensions containing slices of the
cube wrapped in the group_type. This makes it very easy to
break up a high-dimensional dataset into smaller viewable chunks.
"""
if not isinstance(dims, list): dims = [dims]
dims = [dataset.get_dimension(d, strict=True) for d in dims]
constraints = [d.name for d in dims]
slice_dims = [d for d in dataset.kdims if d not in dims]
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = slice_dims
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in slice_dims)
unique_coords = product(*[cls.values(dataset, d, expanded=False)
for d in dims])
data = []
for key in unique_coords:
constraint = iris.Constraint(**dict(zip(constraints, key)))
extracted = dataset.data.extract(constraint)
if drop_dim:
extracted = group_type(extracted, kdims=slice_dims,
vdims=dataset.vdims).columns()
cube = group_type(extracted, **group_kwargs)
data.append((key, cube))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(data, kdims=dims)
else:
return container_type(data)
@classmethod
def range(cls, dataset, dimension):
"""
Computes the range along a particular dimension.
"""
dim = dataset.get_dimension(dimension, strict=True)
values = dataset.dimension_values(dim.name, False)
return (np.nanmin(values), np.nanmax(values))
@classmethod
def redim(cls, dataset, dimensions):
"""
Rename coords on the Cube.
"""
new_dataset = dataset.data.copy()
for name, new_dim in dimensions.items():
if name == new_dataset.name():
new_dataset.rename(new_dim.name)
for coord in new_dataset.dim_coords:
if name == coord.name():
coord.rename(new_dim.name)
return new_dataset
@classmethod
def length(cls, dataset):
"""
Returns the total number of samples in the dataset.
"""
return np.product([len(d.points) for d in dataset.data.coords()])
@classmethod
def sort(cls, columns, by=[], reverse=False):
"""
Cubes are assumed to be sorted by default.
"""
return columns
@classmethod
def aggregate(cls, columns, kdims, function, **kwargs):
"""
Aggregation currently not implemented.
"""
raise NotImplementedError
@classmethod
def sample(cls, dataset, samples=[]):
"""
Sampling currently not implemented.
"""
raise NotImplementedError
@classmethod
def add_dimension(cls, columns, dimension, dim_pos, values, vdim):
"""
Adding value dimensions not currently supported by iris interface.
Adding key dimensions not possible on dense interfaces.
"""
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
raise NotImplementedError
@classmethod
def select_to_constraint(cls, dataset, selection):
"""
Transform a selection dictionary to an iris Constraint.
"""
def get_slicer(start, end):
def slicer(cell):
return start <= cell.point < end
return slicer
constraint_kwargs = {}
for dim, constraint in selection.items():
if isinstance(constraint, slice):
constraint = (constraint.start, constraint.stop)
if isinstance(constraint, tuple):
if constraint == (None, None):
continue
constraint = get_slicer(*constraint)
dim = dataset.get_dimension(dim, strict=True)
constraint_kwargs[dim.name] = constraint
return iris.Constraint(**constraint_kwargs)
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
"""
Apply a selection to the data.
"""
constraint = cls.select_to_constraint(dataset, selection)
pre_dim_coords = [c.name() for c in dataset.data.dim_coords]
indexed = cls.indexed(dataset, selection)
extracted = dataset.data.extract(constraint)
if indexed and not extracted.dim_coords:
return extracted.data.item()
post_dim_coords = [c.name() for c in extracted.dim_coords]
dropped = [c for c in pre_dim_coords if c not in post_dim_coords]
for d in dropped:
extracted = iris.util.new_axis(extracted, d)
return extracted
Interface.register(CubeInterface)
| 1 | 21,185 | Will be good to have the iris interface moved to geoviews. Could this be done for 1.10.6? | holoviz-holoviews | py |
@@ -152,7 +152,10 @@ func (cs *ClientServerImpl) Connect() error {
request, _ := http.NewRequest("GET", parsedURL.String(), nil)
// Sign the request; we'll send its headers via the websocket client which includes the signature
- utils.SignHTTPRequest(request, cs.AgentConfig.AWSRegion, ServiceName, cs.CredentialProvider, nil)
+ err = utils.SignHTTPRequest(request, cs.AgentConfig.AWSRegion, ServiceName, cs.CredentialProvider, nil)
+ if err != nil {
+ return err
+ }
timeoutDialer := &net.Dialer{Timeout: wsConnectTimeout}
tlsConfig := &tls.Config{ServerName: parsedURL.Host, InsecureSkipVerify: cs.AgentConfig.AcceptInsecureCert} | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package wsclient wraps the generated aws-sdk-go client to provide marshalling
// and unmarshalling of data over a websocket connection in the format expected
// by backend. It allows for bidirectional communication and acts as both a
// client-and-server in terms of requests, but only as a client in terms of
// connecting.
package wsclient
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/wsclient/wsconn"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
)
const (
// ServiceName defines the service name for the agent. This is used to sign messages
// that are sent to the backend.
ServiceName = "ecs"
// wsConnectTimeout specifies the default connection timeout to the backend.
wsConnectTimeout = 30 * time.Second
// wsHandshakeTimeout specifies the default handshake timeout for the websocket client
wsHandshakeTimeout = wsConnectTimeout
// readBufSize is the size of the read buffer for the ws connection.
readBufSize = 4096
// writeBufSize is the size of the write buffer for the ws connection.
writeBufSize = 32768
// Default NO_PROXY env var IP addresses
defaultNoProxyIP = "169.254.169.254,169.254.170.2"
errClosed = "use of closed network connection"
)
// ReceivedMessage is the intermediate message used to unmarshal a
// message from backend
type ReceivedMessage struct {
Type string `json:"type"`
Message json.RawMessage `json:"message"`
}
// RequestMessage is the intermediate message marshalled to send to backend.
type RequestMessage struct {
Type string `json:"type"`
Message json.RawMessage `json:"message"`
}
// RequestHandler would be func(*ecsacs.T for T in ecsacs.*) to be more proper, but it needs
// to be interface{} to properly capture that
type RequestHandler interface{}
// ClientServer is a combined client and server for the backend websocket connection
type ClientServer interface {
AddRequestHandler(RequestHandler)
// SetAnyRequestHandler takes a function with the signature 'func(i
// interface{})' and calls it with every message the server passes down.
// Only a single 'AnyRequestHandler' will be active at a given time for a
// ClientServer
SetAnyRequestHandler(RequestHandler)
MakeRequest(input interface{}) error
WriteMessage(input []byte) error
Connect() error
IsConnected() bool
SetConnection(conn wsconn.WebsocketConn)
Disconnect(...interface{}) error
Serve() error
SetReadDeadline(t time.Time) error
io.Closer
}
// ClientServerImpl wraps commonly used methods defined in ClientServer interface.
type ClientServerImpl struct {
// AgentConfig is the user-specified runtime configuration
AgentConfig *config.Config
// conn holds the underlying low-level websocket connection
conn wsconn.WebsocketConn
// CredentialProvider is used to retrieve AWS credentials
CredentialProvider *credentials.Credentials
// RequestHandlers is a map from message types to handler functions of the
// form:
// "FooMessage": func(message *ecsacs.FooMessage)
RequestHandlers map[string]RequestHandler
// AnyRequestHandler is a request handler that, if set, is called on every
// message with said message. It will be called before a RequestHandler is
// called. It must take a single interface{} argument.
AnyRequestHandler RequestHandler
// URL is the full url to the backend, including path, querystring, and so on.
URL string
// RWTimeout is the duration used for setting read and write deadlines
// for the websocket connection
RWTimeout time.Duration
// writeLock needed to ensure that only one routine is writing to the socket
writeLock sync.RWMutex
ClientServer
ServiceError
TypeDecoder
}
// Connect opens a connection to the backend and upgrades it to a websocket. Calls to
// 'MakeRequest' can be made after calling this, but responss will not be
// receivable until 'Serve' is also called.
func (cs *ClientServerImpl) Connect() error {
seelog.Debugf("Establishing a Websocket connection to %s", cs.URL)
parsedURL, err := url.Parse(cs.URL)
if err != nil {
return err
}
wsScheme, err := websocketScheme(parsedURL.Scheme)
if err != nil {
return err
}
parsedURL.Scheme = wsScheme
// NewRequest never returns an error if the url parses and we just verified
// it did above
request, _ := http.NewRequest("GET", parsedURL.String(), nil)
// Sign the request; we'll send its headers via the websocket client which includes the signature
utils.SignHTTPRequest(request, cs.AgentConfig.AWSRegion, ServiceName, cs.CredentialProvider, nil)
timeoutDialer := &net.Dialer{Timeout: wsConnectTimeout}
tlsConfig := &tls.Config{ServerName: parsedURL.Host, InsecureSkipVerify: cs.AgentConfig.AcceptInsecureCert}
// Ensure that NO_PROXY gets set
noProxy := os.Getenv("NO_PROXY")
if noProxy == "" {
dockerHost, err := url.Parse(cs.AgentConfig.DockerEndpoint)
if err == nil {
dockerHost.Scheme = ""
os.Setenv("NO_PROXY", fmt.Sprintf("%s,%s", defaultNoProxyIP, dockerHost.String()))
seelog.Info("NO_PROXY set:", os.Getenv("NO_PROXY"))
} else {
seelog.Errorf("NO_PROXY unable to be set: the configured Docker endpoint is invalid.")
}
}
dialer := websocket.Dialer{
ReadBufferSize: readBufSize,
WriteBufferSize: writeBufSize,
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
NetDial: timeoutDialer.Dial,
HandshakeTimeout: wsHandshakeTimeout,
}
websocketConn, httpResponse, err := dialer.Dial(parsedURL.String(), request.Header)
if httpResponse != nil {
defer httpResponse.Body.Close()
}
if err != nil {
var resp []byte
if httpResponse != nil {
var readErr error
resp, readErr = ioutil.ReadAll(httpResponse.Body)
if readErr != nil {
return fmt.Errorf("Unable to read websocket connection: " + readErr.Error() + ", " + err.Error())
}
// If there's a response, we can try to unmarshal it into one of the
// modeled error types
possibleError, _, decodeErr := DecodeData(resp, cs.TypeDecoder)
if decodeErr == nil {
return cs.NewError(possibleError)
}
}
seelog.Warnf("Error creating a websocket client: %v", err)
return errors.Wrapf(err, "websocket client: unable to dial %s response: %s",
parsedURL.Host, string(resp))
}
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
cs.conn = websocketConn
seelog.Debugf("Established a Websocket connection to %s", cs.URL)
return nil
}
// IsReady gives a boolean response that informs the caller if the websocket
// connection is fully established.
func (cs *ClientServerImpl) IsReady() bool {
cs.writeLock.RLock()
defer cs.writeLock.RUnlock()
return cs.conn != nil
}
// SetConnection passes a websocket connection object into the client. This is used only in
// testing and should be avoided in non-test code.
func (cs *ClientServerImpl) SetConnection(conn wsconn.WebsocketConn) {
cs.conn = conn
}
// SetReadDeadline sets the read deadline for the websocket connection
// A read timeout results in an io error if there are any outstanding reads
// that exceed the deadline
func (cs *ClientServerImpl) SetReadDeadline(t time.Time) error {
err := cs.conn.SetReadDeadline(t)
if err == nil {
return nil
}
seelog.Warnf("Unable to set read deadline for websocket connection: %v for %s", err, cs.URL)
// If we get connection closed error from SetReadDeadline, break out of the for loop and
// return an error
if opErr, ok := err.(*net.OpError); ok && strings.Contains(opErr.Err.Error(), errClosed) {
seelog.Errorf("Stopping redundant reads on closed network connection: %s", cs.URL)
return opErr
}
// An unhandled error has occurred while trying to extend read deadline.
// Try asynchronously closing the connection. We don't want to be blocked on stale connections
// taking too long to close. The flip side is that we might start accumulating stale connections.
// But, that still seems more desirable than waiting for ever for the connection to close
cs.forceCloseConnection()
return err
}
func (cs *ClientServerImpl) forceCloseConnection() {
closeChan := make(chan error)
go func() {
closeChan <- cs.Close()
}()
ctx, cancel := context.WithTimeout(context.TODO(), wsConnectTimeout)
defer cancel()
select {
case closeErr := <-closeChan:
if closeErr != nil {
seelog.Warnf("Unable to close websocket connection: %v for %s",
closeErr, cs.URL)
}
case <-ctx.Done():
if ctx.Err() != nil {
seelog.Warnf("Context canceled waiting for termination of websocket connection: %v for %s",
ctx.Err(), cs.URL)
}
}
}
// Disconnect disconnects the connection
func (cs *ClientServerImpl) Disconnect(...interface{}) error {
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
if cs.conn == nil {
return fmt.Errorf("websocker client: no connection to close")
}
// Close() in turn results in a an internal flushFrame() call in gorilla
// as the close frame needs to be sent to the server. Set the deadline
// for that as well.
if err := cs.conn.SetWriteDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
seelog.Warnf("Unable to set write deadline for websocket connection: %v for %s", err, cs.URL)
}
return cs.conn.Close()
}
// AddRequestHandler adds a request handler to this client.
// A request handler *must* be a function taking a single argument, and that
// argument *must* be a pointer to a recognized 'ecsacs' struct.
// E.g. if you desired to handle messages from acs of type 'FooMessage', you
// would pass the following handler in:
// func(message *ecsacs.FooMessage)
// This function will panic if the passed in function does not have one pointer
// argument or the argument is not a recognized type.
// Additionally, the request handler will block processing of further messages
// on this connection so it's important that it return quickly.
func (cs *ClientServerImpl) AddRequestHandler(f RequestHandler) {
firstArg := reflect.TypeOf(f).In(0)
firstArgTypeStr := firstArg.Elem().Name()
recognizedTypes := cs.GetRecognizedTypes()
_, ok := recognizedTypes[firstArgTypeStr]
if !ok {
panic("AddRequestHandler called with invalid function; argument type not recognized: " + firstArgTypeStr)
}
cs.RequestHandlers[firstArgTypeStr] = f
}
// SetAnyRequestHandler passes a RequestHandler object into the client.
func (cs *ClientServerImpl) SetAnyRequestHandler(f RequestHandler) {
cs.AnyRequestHandler = f
}
// MakeRequest makes a request using the given input. Note, the input *MUST* be
// a pointer to a valid backend type that this client recognises
func (cs *ClientServerImpl) MakeRequest(input interface{}) error {
send, err := cs.CreateRequestMessage(input)
if err != nil {
return err
}
// Over the wire we send something like
// {"type":"AckRequest","message":{"messageId":"xyz"}}
return cs.WriteMessage(send)
}
// WriteMessage wraps the low level websocket write method with a lock
func (cs *ClientServerImpl) WriteMessage(send []byte) error {
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
// This is just future proofing. Ignore the error as the gorilla websocket
// library returns 'nil' anyway for SetWriteDeadline
// https://github.com/gorilla/websocket/blob/4201258b820c74ac8e6922fc9e6b52f71fe46f8d/conn.go#L761
if err := cs.conn.SetWriteDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
seelog.Warnf("Unable to set write deadline for websocket connection: %v for %s", err, cs.URL)
}
return cs.conn.WriteMessage(websocket.TextMessage, send)
}
// ConsumeMessages reads messages from the websocket connection and handles read
// messages from an active connection.
func (cs *ClientServerImpl) ConsumeMessages() error {
for {
if err := cs.SetReadDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
return err
}
messageType, message, err := cs.conn.ReadMessage()
switch {
case err == nil:
if messageType != websocket.TextMessage {
// maybe not fatal though, we'll try to process it anyways
seelog.Errorf("Unexpected messageType: %v", messageType)
}
cs.handleMessage(message)
case permissibleCloseCode(err):
seelog.Debugf("Connection closed for a valid reason: %s", err)
return io.EOF
default:
// Unexpected error occurred
seelog.Errorf("Error getting message from ws backend: error: [%v], messageType: [%v] ",
err, messageType)
return err
}
}
}
// CreateRequestMessage creates the request json message using the given input.
// Note, the input *MUST* be a pointer to a valid backend type that this
// client recognises.
func (cs *ClientServerImpl) CreateRequestMessage(input interface{}) ([]byte, error) {
msg := &RequestMessage{}
recognizedTypes := cs.GetRecognizedTypes()
for typeStr, typeVal := range recognizedTypes {
if reflect.TypeOf(input) == reflect.PtrTo(typeVal) {
msg.Type = typeStr
break
}
}
if msg.Type == "" {
return nil, &UnrecognizedWSRequestType{reflect.TypeOf(input).String()}
}
messageData, err := jsonutil.BuildJSON(input)
if err != nil {
return nil, &NotMarshallableWSRequest{msg.Type, err}
}
msg.Message = json.RawMessage(messageData)
send, err := json.Marshal(msg)
if err != nil {
return nil, &NotMarshallableWSRequest{msg.Type, err}
}
return send, nil
}
// handleMessage dispatches a message to the correct 'requestHandler' for its
// type. If no request handler is found, the message is discarded.
func (cs *ClientServerImpl) handleMessage(data []byte) {
typedMessage, typeStr, err := DecodeData(data, cs.TypeDecoder)
if err != nil {
seelog.Warnf("Unable to handle message from backend: %v", err)
return
}
seelog.Debugf("Received message of type: %s", typeStr)
if cs.AnyRequestHandler != nil {
reflect.ValueOf(cs.AnyRequestHandler).Call([]reflect.Value{reflect.ValueOf(typedMessage)})
}
if handler, ok := cs.RequestHandlers[typeStr]; ok {
reflect.ValueOf(handler).Call([]reflect.Value{reflect.ValueOf(typedMessage)})
} else {
seelog.Infof("No handler for message type: %s", typeStr)
}
}
func websocketScheme(httpScheme string) (string, error) {
// gorilla/websocket expects the websocket scheme (ws[s]://)
var wsScheme string
switch httpScheme {
case "http":
wsScheme = "ws"
case "https":
wsScheme = "wss"
default:
return "", fmt.Errorf("wsclient: unknown scheme %s", httpScheme)
}
return wsScheme, nil
}
// See https://github.com/gorilla/websocket/blob/87f6f6a22ebfbc3f89b9ccdc7fddd1b914c095f9/conn.go#L650
func permissibleCloseCode(err error) bool {
return websocket.IsCloseError(err,
websocket.CloseNormalClosure, // websocket error code 1000
websocket.CloseAbnormalClosure, // websocket error code 1006
websocket.CloseGoingAway, // websocket error code 1001
websocket.CloseInternalServerErr) // websocket error code 1011
}
| 1 | 20,065 | does this get wrapped in the calling method? | aws-amazon-ecs-agent | go |
@@ -195,6 +195,8 @@ class DictAuth(unittest.TestCase):
subprocess.check_output(
store_cmd, encoding="utf-8", errors="ignore")
+ # TODO: This test should not test case-insensivity, only the successful
+ # group authorization. The permission tests should do that.
def test_group_auth(self):
"""
Test for case insensitive group comparison at authorization. | 1 | #
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Authentication tests.
"""
import json
import os
import subprocess
import unittest
from codechecker_api_shared.ttypes import RequestFailed, Permission
from codechecker_client.credential_manager import UserCredentials
from libtest import codechecker
from libtest import env
class DictAuth(unittest.TestCase):
"""
Dictionary based authentication tests.
"""
def setUp(self):
# Get the test workspace used to authentication tests.
self._test_workspace = os.environ['TEST_WORKSPACE']
test_class = self.__class__.__name__
print('Running ' + test_class + ' tests in ' + self._test_workspace)
self._test_cfg = env.import_test_cfg(self._test_workspace)
def test_privileged_access(self):
"""
Tests that initially, a non-authenticating server is accessible,
but an authenticating one is not.
"""
auth_client = env.setup_auth_client(self._test_workspace,
session_token='_PROHIBIT')
handshake = auth_client.getAuthParameters()
self.assertTrue(handshake.requiresAuthentication,
"Privileged server " +
"did not report that it requires authentication.")
self.assertFalse(handshake.sessionStillActive, "Empty session was " +
"reported to be still active.")
with self.assertRaises(RequestFailed):
auth_client.performLogin("Username:Password", "invalid:invalid")
print("Invalid credentials gave us a token!")
with self.assertRaises(RequestFailed):
auth_client.performLogin("Username:Password", None)
# A non-authenticated session should return an empty user.
user = auth_client.getLoggedInUser()
self.assertEqual(user, "")
self.sessionToken = auth_client.performLogin("Username:Password",
"cc:test")
self.assertIsNotNone(self.sessionToken,
"Valid credentials didn't give us a token!")
handshake = auth_client.getAuthParameters()
self.assertTrue(handshake.requiresAuthentication,
"Privileged server " +
"did not report that it requires authentication.")
self.assertFalse(handshake.sessionStillActive,
"Valid session was " + "reported not to be active.")
client = env.setup_viewer_client(self._test_workspace,
session_token=self.sessionToken)
self.assertIsNotNone(client.getPackageVersion(),
"Privileged server didn't respond properly.")
authd_auth_client = \
env.setup_auth_client(self._test_workspace,
session_token=self.sessionToken)
user = authd_auth_client.getLoggedInUser()
self.assertEqual(user, "cc")
# No personal token in the database.
personal_tokens = authd_auth_client.getTokens()
self.assertEqual(len(personal_tokens), 0)
# Create a new personal token.
description = "description"
personal_token = authd_auth_client.newToken(description)
token = personal_token.token
self.assertEqual(personal_token.description, description)
# Check whether the new token has been added.
personal_tokens = authd_auth_client.getTokens()
self.assertEqual(len(personal_tokens), 1)
self.assertEqual(personal_tokens[0].token, token)
self.assertEqual(personal_tokens[0].description, description)
auth_client = env.setup_auth_client(self._test_workspace,
session_token=self.sessionToken)
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
self.sessionToken = auth_client.performLogin("Username:Password",
"colon:my:password")
self.assertIsNotNone(self.sessionToken,
"Valid credentials didn't give us a token!")
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
# Kill the session token that was created by login() too.
codechecker.logout(self._test_cfg['codechecker_cfg'],
self._test_workspace)
auth_token_client = \
env.setup_auth_client(self._test_workspace,
session_token=token)
# Log-in by using an already generated personal token.
self.sessionToken = auth_token_client.performLogin("Username:Password",
"cc:" + token)
self.assertIsNotNone(self.sessionToken,
"Valid credentials didn't give us a token!")
user = auth_token_client.getLoggedInUser()
self.assertEqual(user, "cc")
result = auth_token_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
# Kill the session token that was created by login() too.
codechecker.logout(self._test_cfg['codechecker_cfg'],
self._test_workspace)
self.sessionToken = auth_client.performLogin("Username:Password",
"cc:test")
self.assertIsNotNone(self.sessionToken,
"Valid credentials didn't give us a token!")
auth_client = env.setup_auth_client(self._test_workspace,
session_token=self.sessionToken)
# Remove the generated personal token.
ret = auth_client.removeToken(token)
self.assertTrue(ret)
# Check whether no more personal token in the database.
personal_tokens = auth_client.getTokens()
self.assertEqual(len(personal_tokens), 0)
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
# The server reports a HTTP 401 error which is not a valid
# Thrift response. But if it does so, it passes the test!
version = client.getPackageVersion()
self.assertIsNone(version,
"Privileged client allowed access after logout.")
handshake = auth_client.getAuthParameters()
self.assertFalse(handshake.sessionStillActive,
"Destroyed session was " +
"reported to be still active.")
def test_nonauth_storage(self):
"""
Storing the result should fail.
Authentication is required by the server but before the
store command there was no login so storing the report should fail.
"""
test_dir = os.path.dirname(os.path.realpath(__file__))
report_file = os.path.join(test_dir, 'clang-5.0-trunk.plist')
codechecker_cfg = self._test_cfg['codechecker_cfg']
store_cmd = [env.codechecker_cmd(), 'store', '--name', 'auth',
# Use the 'Default' product.
'--url', env.parts_to_url(codechecker_cfg),
report_file]
with self.assertRaises(subprocess.CalledProcessError):
subprocess.check_output(
store_cmd, encoding="utf-8", errors="ignore")
def test_group_auth(self):
"""
Test for case insensitive group comparison at authorization.
"""
auth_client = env.setup_auth_client(self._test_workspace,
session_token='_PROHIBIT')
# A non-authenticated session should return an empty user.
user = auth_client.getLoggedInUser()
self.assertEqual(user, "")
# Create a SUPERUSER login.
self.sessionToken = auth_client.performLogin("Username:Password",
"root:root")
self.assertIsNotNone(self.sessionToken,
"Valid credentials didn't give us a token!")
authd_auth_client = \
env.setup_auth_client(self._test_workspace,
session_token=self.sessionToken)
user = authd_auth_client.getLoggedInUser()
self.assertEqual(user, "root")
product_name = self._test_cfg['codechecker_cfg']['viewer_product']
pr_client = env.setup_product_client(
self._test_workspace, product=product_name)
product_id = pr_client.getCurrentProduct().id
extra_params = {'productID': product_id}
ret = authd_auth_client.addPermission(Permission.PRODUCT_ADMIN,
"ADMIN_group",
True,
json.dumps(extra_params))
self.assertTrue(ret)
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
# Perform login with a user who is in ADMIN_GROUP and check that
# he has permission to perform operations.
self.sessionToken = \
auth_client.performLogin("Username:Password",
"admin_group_user:admin123")
self.assertIsNotNone(self.sessionToken,
"Valid credentials didn't give us a token!")
client = env.setup_viewer_client(self._test_workspace,
session_token=self.sessionToken)
self.assertIsNotNone(client.allowsStoringAnalysisStatistics(),
"Privileged server didn't respond properly.")
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
def test_regex_groups(self):
auth_client = env.setup_auth_client(self._test_workspace,
session_token='_PROHIBIT')
# First login as root.
self.sessionToken = auth_client.performLogin("Username:Password",
"root:root")
self.assertIsNotNone(self.sessionToken,
"root was unable to login!")
# Then give SUPERUSER privs to admins_custom_group.
authd_auth_client = \
env.setup_auth_client(self._test_workspace,
session_token=self.sessionToken)
ret = authd_auth_client.addPermission(Permission.SUPERUSER,
"admins_custom_group",
True, None)
self.assertTrue(ret)
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
# Login as a user who is in admins_custom_group.
sessionToken = auth_client.performLogin("Username:Password",
"regex_admin:blah")
self.assertIsNotNone(sessionToken,
"Valid credentials didn't give us a token!")
# Do something privileged.
client = env.setup_viewer_client(self._test_workspace,
session_token=sessionToken)
self.assertIsNotNone(client.allowsStoringAnalysisStatistics(),
"Privileged call failed.")
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
# Finally try to do the same with an unprivileged user.
sessionToken = auth_client.performLogin("Username:Password",
"john:doe")
self.assertIsNotNone(sessionToken,
"Valid credentials didn't give us a token!")
client = env.setup_viewer_client(self._test_workspace,
session_token=sessionToken)
self.assertFalse(client.allowsStoringAnalysisStatistics(),
"Privileged call from unprivileged user"
" did not fail!")
result = auth_client.destroySession()
self.assertTrue(result, "Server did not allow us to destroy session.")
def test_personal_access_tokens(self):
""" Test personal access token commands. """
codechecker_cfg = self._test_cfg['codechecker_cfg']
host = codechecker_cfg['viewer_host']
port = codechecker_cfg['viewer_port']
new_token_cmd = [env.codechecker_cmd(), 'cmd', 'token', 'new',
'--url', env.parts_to_url(codechecker_cfg)]
with self.assertRaises(subprocess.CalledProcessError):
subprocess.check_output(
new_token_cmd,
encoding="utf-8",
errors="ignore")
# Login to the server.
auth_client = env.setup_auth_client(self._test_workspace,
session_token='_PROHIBIT')
# A non-authenticated session should return an empty user.
user = auth_client.getLoggedInUser()
self.assertEqual(user, "")
# Create a SUPERUSER login.
session_token = auth_client.performLogin("Username:Password",
"cc:test")
self.assertIsNotNone(session_token,
"Valid credentials didn't give us a token!")
cred_manager = UserCredentials()
cred_manager.save_token(host, port, session_token)
# Run the new token command after login.
subprocess.check_output(
new_token_cmd,
encoding="utf-8",
errors="ignore")
# List personal access tokens.
list_token_cmd = [env.codechecker_cmd(), 'cmd', 'token', 'list',
'--url', env.parts_to_url(codechecker_cfg),
'-o', 'json']
out_json = subprocess.check_output(
list_token_cmd, encoding="utf-8", errors="ignore")
tokens = json.loads(out_json)
self.assertEqual(len(tokens), 1)
# Remove personal access token.
del_token_cmd = [env.codechecker_cmd(), 'cmd', 'token', 'del',
'--url', env.parts_to_url(codechecker_cfg),
tokens[0]['token']]
subprocess.check_output(
del_token_cmd,
encoding="utf-8",
errors="ignore")
cred_manager.save_token(host, port, session_token, True)
| 1 | 13,568 | Do we still need this test case? If your new test cases test this, then we can remove it. | Ericsson-codechecker | c |
@@ -641,6 +641,15 @@ class Plan < ActiveRecord::Base
User.joins(:roles).where('roles.plan_id = ? AND roles.access IN (?)', self.id, vals).first
end
+ ##
+ # returns the shared roles of a plan, excluding the creator
+ def shared
+ role_values = Role.access_values_for(:commenter)
+ .concat(Role.access_values_for(:editor))
+ .concat(Role.access_values_for(:administrator)).uniq! - Role.access_values_for(:creator)
+ Role.where(plan: self, access: role_values)
+ end
+
##
# the owner and co-owners of the project
# | 1 | class Plan < ActiveRecord::Base
include ConditionalUserMailer
before_validation :set_creation_defaults
##
# Associations
belongs_to :template
has_many :phases, through: :template
has_many :sections, through: :phases
has_many :questions, through: :sections
has_many :themes, through: :questions
has_many :answers, dependent: :destroy
has_many :notes, through: :answers
has_many :roles, dependent: :destroy
has_many :users, through: :roles
has_and_belongs_to_many :guidance_groups, join_table: :plans_guidance_groups
accepts_nested_attributes_for :template
has_many :exported_plans
has_many :roles
# Active Record Callbacks
# Creates answers for plan question and persists them whenever a new plan is created and successfully saved
after_create do
Answer.create(
self.questions.map{ |q| { lock_version: 1, text: q.default_value, plan_id: self.id, question_id: q.id }})
end
# COMMENTED OUT THE DIRECT CONNECTION HERE TO Users to prevent assignment of users without an access_level specified (currently defaults to creator)
# has_many :users, through: :roles
##
# Possibly needed for active_admin
# -relies on protected_attributes gem as syntax depricated in rails 4.2
attr_accessible :locked, :project_id, :version_id, :version, :plan_sections,
:exported_plans, :project, :title, :template, :grant_number,
:identifier, :principal_investigator, :principal_investigator_identifier,
:description, :data_contact, :funder_name, :visibility, :exported_plans,
:roles, :users, :org, :data_contact_email, :data_contact_phone, :feedback_requested,
:principal_investigator_email, :as => [:default, :admin]
accepts_nested_attributes_for :roles
# public is a Ruby keyword so using publicly
enum visibility: [:organisationally_visible, :publicly_visible, :is_test, :privately_visible]
#TODO: work out why this messes up plan creation :
# briley: Removed reliance on :users, its really on :roles (shouldn't have a plan without at least a creator right?) It should be ok like this though now
# validates :template, :title, presence: true
##
# Constants
A4_PAGE_HEIGHT = 297 #(in mm)
A4_PAGE_WIDTH = 210 #(in mm)
ROUNDING = 5 #round estimate up to nearest 5%
FONT_HEIGHT_CONVERSION_FACTOR = 0.35278 #convert font point size to mm
FONT_WIDTH_HEIGHT_RATIO = 0.4 #Assume glyph width averages 2/5 the height
# Scope queries
# Note that in ActiveRecord::Enum the mappings are exposed through a class method with the pluralized attribute name (e.g visibilities rather than visibility)
scope :publicly_visible, -> { where(:visibility => visibilities[:publicly_visible]).order(:title => :asc) }
# Retrieves any plan organisationally or publicly visible for a given org id
scope :organisationally_or_publicly_visible, -> (user) {
Plan.includes(:template)
.where({
visibility: [visibilities[:organisationally_visible], visibilities[:publicly_visible]],
"templates.org_id": user.org_id})
.where(['NOT EXISTS (SELECT 1 FROM roles WHERE plan_id = plans.id AND user_id = ?)', user.id])
.order(:title => :asc)
}
##
# Settings for the template
has_settings :export, class_name: 'Settings::Template' do |s|
s.key :export, defaults: Settings::Template::DEFAULT_SETTINGS
end
alias_method :super_settings, :settings
##
# Proxy through to the template settings (or defaults if this plan doesn't have
# an associated template) if there are no settings stored for this plan.
# `key` is required by rails-settings, so it's required here, too.
#
# @param key [Key] a key required by rails
# @return [Settings] settings for this plan's template
def settings(key)
self_settings = self.super_settings(key)
return self_settings if self_settings.value?
# self.dmptemplate.settings(key)
self.template.settings(key) unless self.template.nil?
end
##
# returns the template for this plan, or generates an empty template and returns that
#
# @return [Dmptemplate] the template associated with this plan
def dmptemplate
#self.project.try(:dmptemplate) || Dmptemplate.new
self.template
end
def base_template
base = nil
t = self.template
if t.customization_of.present?
base = Template.where("dmptemplate_id = ? and created_at < ?", t.customization_of, self.created_at).order(version: :desc).first
end
return base
end
##
# returns the most recent answer to the given question id
# optionally can create an answer if none exists
#
# @param qid [Integer] the id for the question to find the answer for
# @param create_if_missing [Boolean] if true, will genereate a default answer to the question
# @return [Answer,nil] the most recent answer to the question, or a new question with default value, or nil
def answer(qid, create_if_missing = true)
answer = answers.where(:question_id => qid).order("created_at DESC").first
question = Question.find(qid)
if answer.nil? && create_if_missing then
answer = Answer.new
answer.plan_id = id
answer.question_id = qid
answer.text = question.default_value
default_options = Array.new
question.question_options.each do |option|
if option.is_default
default_options << option
end
end
answer.question_options = default_options
end
return answer
end
# TODO: This just retrieves all of the guidance associated with the themes within the template
# so why are we transferring it here to the plan?
##
# returns all of the sections for this version of the plan, and for the project's organisation
#
# @return [Array<Section>,nil] either a list of sections, or nil if none were found
def set_possible_guidance_groups
# find all the themes in this plan
# and get the guidance groups they belong to
ggroups = []
self.template.phases.each do |phase|
phase.sections.each do |section|
section.questions.each do |question|
question.themes.each do |theme|
theme.guidances.each do |guidance|
ggroups << guidance.guidance_group if guidance.guidance_group.published
# only show published guidance groups
end
end
end
end
end
self.guidance_groups = ggroups.uniq
end
##
# returns all of the possible guidance groups for the plan (all options to
# be selected by the user to display)
#
# @return Array<Guidance>
def get_guidance_group_options
# find all the themes in this plan
# and get the guidance groups they belong to
ggroups = []
Template.includes(phases: [sections: [questions: [themes: [guidances: [guidance_group: :org]]]]]).find(self.template_id).phases.each do |phase|
phase.sections.each do |section|
section.questions.each do |question|
question.themes.each do |theme|
theme.guidances.each do |guidance|
ggroups << guidance.guidance_group if guidance.guidance_group.published
# only show published guidance groups
end
end
end
end
end
return ggroups.uniq
end
##
# Sets up the plan for feedback:
# emails confirmation messages to owners
# emails org admins and org contact
# adds org admins to plan with the 'reviewer' Role
def request_feedback(user)
Plan.transaction do
begin
val = Role.access_values_for(:reviewer, :commenter).min
self.feedback_requested = true
# Share the plan with each org admin as the reviewer role
admins = user.org.org_admins
admins.each do |admin|
self.roles << Role.new(user: admin, access: val)
end
if self.save!
# Send an email confirmation to the owners and co-owners
deliver_if(recipients: self.owner_and_coowners, key: 'users.feedback_requested') do |r|
UserMailer.feedback_confirmation(r, self, user).deliver_now
end
# Send an email to all of the org admins as well as the Org's administrator email
if user.org.contact_email.present?
admins << User.new(email: user.org.contact_email, firstname: user.org.contact_name)
end
deliver_if(recipients: admins, key: 'admins.feedback_requested') do |r|
UserMailer.feedback_notification(r, self, user).deliver_now
end
true
else
false
end
rescue Exception => e
Rails.logger.error e
false
end
end
end
##
# Finalizes the feedback for the plan:
# emails confirmation messages to owners
# sets flag on plans.feedback_requested to false
# removes org admins from the 'reviewer' Role for the Plan
def complete_feedback(org_admin)
Plan.transaction do
begin
self.feedback_requested = false
# Remove the org admins reviewer role from the plan
vals = Role.access_values_for(:reviewer)
self.roles.delete(Role.where(plan: self, access: vals))
if self.save!
# Send an email confirmation to the owners and co-owners
deliver_if(recipients: self.owner_and_coowners, key: 'users.feedback_provided') do |r|
UserMailer.feedback_notification(r, self, org_admin).deliver_now
end
true
else
false
end
rescue Exception => e
Rails.logger.error e
false
end
end
end
##
# returns the guidances associated with the project's organisation, for a specified question
#
# @param question [Question] the question to find guidance for
# @return array of hashes with orgname, themes and the guidance itself
def guidance_for_question(question)
guidances = []
# add in the guidance for the template org
unless self.template.org.nil? then
self.template.org.guidance_groups.each do |group|
group.guidances.each do |guidance|
common_themes = guidance.themes.all & question.themes.all
if common_themes.length > 0
guidances << { orgname: self.template.org.name, theme: common_themes.join(','), guidance: guidance }
end
end
end
end
# add in the guidance for the user's org
unless self.owner.nil?
unless self.owner.org.nil? then
self.owner.org.guidance_groups.each do |group|
group.guidances.each do |guidance|
common_themes = guidance.themes.all & question.themes.all
if common_themes.length > 0
guidances << { orgname: self.template.org.name, theme: common_themes.join(','), guidance: guidance }
end
end
end
end
end
# Get guidance by theme from any guidance groups currently selected
self.guidance_groups.each do |group|
group.guidances.each do |guidance|
common_themes = guidance.themes.all & question.themes.all
if common_themes.length > 0
guidances << { orgname: self.template.org.name, theme: common_themes.join(','), guidance: guidance }
end
end
end
return guidances
end
##
# adds the given guidance to a hash indexed by a passed guidance group and theme
#
# @param guidance_array [{GuidanceGroup => {Theme => Array<Gudiance>}}] the passed hash of arrays of guidances. Indexed by GuidanceGroup and Theme.
# @param guidance_group [GuidanceGroup] the guidance_group index of the hash
# @param theme [Theme] the theme object for the GuidanceGroup
# @param guidance [Guidance] the guidance object to be appended to the correct section of the array
# @return [{GuidanceGroup => {Theme => Array<Guidance>}}] the updated object which was passed in
def add_guidance_to_array(guidance_array, guidance_group, theme, guidance)
if guidance_array[guidance_group].nil? then
guidance_array[guidance_group] = {}
end
if theme.nil? then
if guidance_array[guidance_group]["no_theme"].nil? then
guidance_array[guidance_group]["no_theme"] = []
end
if !guidance_array[guidance_group]["no_theme"].include?(guidance) then
guidance_array[guidance_group]["no_theme"].push(guidance)
end
else
if guidance_array[guidance_group][theme].nil? then
guidance_array[guidance_group][theme] = []
end
if !guidance_array[guidance_group][theme].include?(guidance) then
guidance_array[guidance_group][theme].push(guidance)
end
end
return guidance_array
end
##
# determines if the plan is editable by the specified user
#
# @param user_id [Integer] the id for a user
# @return [Boolean] true if user can edit the plan
def editable_by?(user_id)
user_id = user_id.id if user_id.is_a?(User)
has_role(user_id, :editor)
end
##
# determines if the plan is readable by the specified user
# TODO: introduce explicit readable rather than implicit
# currently role with no flags = readable
#
# @param user_id [Integer] the id for a user
# @return [Boolean] true if the user can read the plan
def readable_by?(user_id)
user_id = user_id.id if user_id.is_a?(User)
has_role(user_id, :commenter)
end
##
# determines if the plan is administerable by the specified user
#
# @param user_id [Integer] the id for the user
# @return [Boolean] true if the user can administer the plan
def administerable_by?(user_id)
user_id = user_id.id if user_id.is_a?(User)
has_role(user_id, :administrator)
end
##
# determines if the plan is owned by the specified user
#
# @param user_id [Integer] the id for the user
# @return [Boolean] true if the user can administer the plan
def owned_by?(user_id)
user_id = user_id.id if user_id.is_a?(User)
has_role(user_id, :creator)
end
##
# determines if the plan is reviewable by the specified user
#
# @param user_id [Integer] the id for the user
# @return [Boolean] true if the user can administer the plan
def reviewable_by?(user_id)
user_id = user_id.id if user_id.is_a?(User)
has_role(user_id, :reviewer)
end
##
# determines whether or not the specified user has any rol on the plan
#
# @param user_id [Integer] the id for the user
# @return [Boolean] true if the user has any rol
def any_role?(user)
user_id = user.id if user.is_a?(User)
!self.roles.index{ |rol| rol.user_id == user_id }.nil?
end
##
# defines and returns the status of the plan
# status consists of a hash of the num_questions, num_answers, sections, questions, and spaced used.
# For each section, it contains the id's of each of the questions
# for each question, it contains the answer_id, answer_created_by, answer_text, answer_options_id, aand answered_by
#
# @return [Status]
def status
status = {
"num_questions" => 0,
"num_answers" => 0,
"sections" => {},
"questions" => {},
"space_used" => 0 # percentage of available space in pdf used
}
space_used = height_of_text(self.title, 2, 2)
section_ids = sections.map {|s| s.id}
# we retrieve this is 2 joins:
# 1. sections and questions
# 2. questions and answers
# why? because Rails 4 doesn't have any sensible left outer join.
# when we change to RAILS 5 it is meant to have so this can be fixed then
records = Section.joins(questions: :question_format)
.select('sections.id as sectionid,
sections.title as stitle,
questions.id as questionid,
questions.text as questiontext,
question_formats.title as qformat')
.where("sections.id in (?) ", section_ids)
.to_a
# extract question ids to get answers
question_ids = records.map {|r| r.questionid}.uniq
status["num_questions"] = question_ids.count
arecords = Question.joins(answers: :user)
.select('questions.id as questionid,
answers.id as answerid,
answers.plan_id as plan_id,
answers.text as answertext,
answers.updated_at as updated,
users.email as username')
.where("questions.id in (?) and answers.plan_id = ?",question_ids, self.id)
.to_a
# we want answerids to extract options later
answer_ids = arecords.map {|r| r.answerid}.uniq
status["num_answers"] = answer_ids.count
# create map from questionid to answer structure
qa_map = {}
arecords.each do |rec|
qa_map[rec.questionid] = {
plan: rec.plan_id,
id: rec.answerid,
text: rec.answertext,
updated: rec.updated,
user: rec.username
}
end
# build main status structure
records.each do |rec|
sid = rec.sectionid
stitle = rec.stitle
qid = rec.questionid
qtext = rec.questiontext
format = rec.qformat
answer = nil
if qa_map.has_key?(qid)
answer = qa_map[qid]
end
aid = answer.nil? ? nil : answer[:id]
atext = answer.nil? ? nil : answer[:text]
updated = answer.nil? ? nil : answer[:updated]
uname = answer.nil? ? nil : answer[:user]
space_used += height_of_text(stitle, 1, 1)
shash = status["sections"]
if !shash.has_key?(sid)
shash[sid] = {}
shash[sid]["num_questions"] = 0
shash[sid]["num_answers"] = 0
shash[sid]["questions"] = Array.new
end
shash[sid]["questions"] << qid
shash[sid]["num_questions"] += 1
space_used += height_of_text(qtext) unless qtext == stitle
if atext.present?
space_used += height_of_text(atext)
else
space_used += height_of_text(_('Question not answered.'))
end
if answer.present? then
shash[sid]["num_answers"] += 1
end
status["questions"][qid] = {
"format" => format,
"answer_id" => aid,
"answer_updated_at" => updated.to_i,
"answer_text" => atext,
"answered_by" => uname
}
end
records = Answer.joins(:question_options).select('answers.id as answerid, question_options.id as optid').where(id: answer_ids).to_a
opt_hash = {}
records.each do |rec|
aid = rec.answerid
optid = rec.optid
if !opt_hash.has_key?(aid)
opt_hash[aid] = Array.new
end
opt_hash[aid] << optid
end
status["questions"].each_key do |questionid|
answerid = status["questions"][questionid]["answer_id"]
status["questions"][questionid]["answer_option_ids"] = opt_hash[answerid]
end
status['space_used'] = estimate_space_used(space_used)
return status
end
##
# assigns the passed user_id to the creater_role for the project
# gives the user rights to read, edit, administrate, and defines them as creator
#
# @param user_id [Integer] the user to be given priveleges' id
def assign_creator(user_id)
user_id = user_id.id if user_id.is_a?(User)
add_user(user_id, true, true, true)
end
##
# returns the funder id for the plan
#
# @return [Integer, nil] the id for the funder
def funder_id
if self.template.nil? then
return nil
end
return self.template.org
end
##
# returns the funder organisation for the project or nil if none is specified
#
# @return [Organisation, nil] the funder for project, or nil if none exists
def funder
template = self.template
if template.nil? then
return nil
end
if template.customization_of
return template.customization_of.org
else
return template.org
end
end
##
# assigns the passed user_id as an editor for the project
# gives the user rights to read and edit
#
# @param user_id [Integer] the user to be given priveleges' id
def assign_editor(user_id)
add_user(user_id, true)
end
##
# assigns the passed user_id as a reader for the project
# gives the user rights to read
#
# @param user_id [Integer] the user to be given priveleges' id
def assign_reader(user_id)
add_user(user_id)
end
##
# assigns the passed user_id as an administrator for the project
# gives the user rights to read, adit, and administrate the project
#
# @param user_id [Integer] the user to be given priveleges' id
def assign_administrator(user_id)
add_user(user_id, true, true)
end
##
# the datetime for the latest update of this plan
#
# @return [DateTime] the time of latest update
def latest_update
latest_update = updated_at
phases.each do |phase|
if phase.updated_at > latest_update then
latest_update = phase.updated_at
end
end
return latest_update
end
# Getters to match 'My plans' columns
##
# the title of the project
#
# @return [String] the title of the project
def name
self.title
end
##
# the owner of the project
#
# @return [User] the creater of the project
def owner
vals = Role.access_values_for(:creator)
User.joins(:roles).where('roles.plan_id = ? AND roles.access IN (?)', self.id, vals).first
end
##
# the owner and co-owners of the project
#
# @return [Users]
def owner_and_coowners
vals = Role.access_values_for(:creator).concat(Role.access_values_for(:administrator))
User.joins(:roles).where("roles.plan_id = ? AND roles.access IN (?)", self.id, vals)
end
##
# the time the project was last updated, formatted as a date
#
# @return [Date] last update as a date
def last_edited
self.latest_update.to_date
end
# Returns the number of answered questions from the entire plan
def num_answered_questions
n = 0
self.sections.each do |s|
n+= s.num_answered_questions(self.id)
end
return n
end
# Returns a section given its id or nil if does not exist for the current plan
def get_section(section_id)
self.sections.find { |s| s.id == section_id }
end
# Returns the number of questions for a plan. Note, this method becomes useful
# for when sections and their questions are eager loaded so that avoids SQL queries.
def num_questions
n = 0
self.sections.each do |s|
n+= s.questions.size()
end
return n
end
# the following two methods are for eager loading. One gets used for the plan/show
# page and the oter for the plan/edit. The difference is just that one pulls in more than
# the other.
# TODO: revisit this and work out for sure that maintaining the difference is worthwhile.
# it may not be. Also make sure nether is doing more thanit needs to.
#
def self.eager_load(id)
Plan.includes(
[{template: [
{phases: {sections: {questions: :answers}}},
{customizations: :org}
]},
{plans_guidance_groups: {guidance_group: :guidances}}
]).find(id)
end
def self.load_for_phase(id, phase_id)
Plan.includes(
[template: [
{phases: {sections: {questions: [{answers: :notes}, :annotations, :question_format, :themes]}}},
{customizations: :org},
:org
],
plans_guidance_groups: {guidance_group: {guidances: :themes}}
]).where(id: id, phases: { id: phase_id }).first
end
# deep copy the given plan and all of it's associations
#
# @params [Plan] plan to be deep copied
# @return [Plan] saved copied plan
def self.deep_copy(plan)
plan_copy = plan.dup
plan_copy.title = "Copy of " + plan.title
plan_copy.save!
plan.answers.each do |answer|
answer_copy = Answer.deep_copy(answer)
answer_copy.plan_id = plan_copy.id
answer_copy.save!
end
plan.guidance_groups.each do |guidance_group|
if guidance_group.present?
plan_copy.guidance_groups << GuidanceGroup.where(id: guidance_group.id).first
end
end
return plan_copy
end
# Returns visibility message given a Symbol type visibility passed, otherwise nil
def self.visibility_message(type)
message = {
:organisationally_visible => _('institutional'),
:publicly_visible => _('public'),
:is_test => _('test'),
:privately_visible => _('private')
}
message[type]
end
# Determines whether or not visibility changes are permitted according to the
# percentage of the plan answered in respect to a threshold defined at application.config
def visibility_allowed?
value=(self.num_answered_questions().to_f/self.num_questions()*100).round(2)
!self.is_test? && value >= Rails.application.config.default_plan_percentage_answered
end
private
# Returns whether or not the user has the specified role for the plan
def has_role(user_id, role_as_sym)
if user_id.is_a?(Integer) && role_as_sym.is_a?(Symbol)
vals = Role.access_values_for(role_as_sym)
self.roles.where(user_id: user_id, access: vals).first.present?
else
false
end
end
##
# adds a user to the project
# if no flags are specified, the user is given read privleges
#
# @param user_id [Integer] the user to be given privleges
# @param is_editor [Boolean] whether or not the user can edit the project
# @param is_administrator [Boolean] whether or not the user can administrate the project
# @param is_creator [Boolean] wheter or not the user created the project
# @return [Array<ProjectGroup>]
#
# TODO: change this to specifying uniqueness of user/plan association and handle
# that way
#
def add_user(user_id, is_editor = false, is_administrator = false, is_creator = false)
Role.where(plan_id: self.id, user_id: user_id).each do |r|
r.destroy
end
role = Role.new
role.user_id = user_id
role.plan_id = id
# if you get assigned a role you can comment
role.commenter= true
# the rest of the roles are inclusing so creator => administrator => editor
if is_creator
role.creator = true
role.administrator = true
role.editor = true
end
if is_administrator
role.administrator = true
role.editor = true
end
if is_editor
role.editor = true
end
role.save
# This is necessary because we're creating the associated record but not assigning it
# to roles. Auto-saving like this may be confusing when coding upstream in a controller,
# view or api. Should probably change this to:
# self.roles << role
# and then let the save be called manually via:
# plan.save!
#self.reload
end
##
# creates a plan for each phase in the dmptemplate associated with this project
# unless the phase is unpublished, it creates a new plan, and a new version of the plan and adds them to the project's plans
#
# @return [Array<Plan>]
def create_plans
dmptemplate.phases.each do |phase|
latest_published_version = phase.latest_published_version
unless latest_published_version.nil?
new_plan = Plan.new
new_plan.version = latest_published_version
plans << new_plan
end
end
end
##
# Based on the height of the text gathered so far and the available vertical
# space of the pdf, estimate a percentage of how much space has been used.
# This is highly dependent on the layout in the pdf. A more accurate approach
# would be to render the pdf and check how much space had been used, but that
# could be very slow.
# NOTE: This is only an estimate, rounded up to the nearest 5%; it is intended
# for guidance when editing plan data, not to be 100% accurate.
#
# @param used_height [Integer] an estimate of the height used so far
# @return [Integer] the estimate of space used of an A4 portrain
def estimate_space_used(used_height)
@formatting ||= self.settings(:export).formatting
return 0 unless @formatting[:font_size] > 0
margin_height = @formatting[:margin][:top].to_i + @formatting[:margin][:bottom].to_i
page_height = A4_PAGE_HEIGHT - margin_height # 297mm for A4 portrait
available_height = page_height * self.dmptemplate.settings(:export).max_pages
percentage = (used_height / available_height) * 100
(percentage / ROUNDING).ceil * ROUNDING # round up to nearest five
end
##
# Take a guess at the vertical height (in mm) of the given text based on the
# font-size and left/right margins stored in the plan's settings.
# This assumes a fixed-width for each glyph, which is obviously
# incorrect for the font-face choices available; the idea is that
# they'll hopefully average out to that in the long-run.
# Allows for hinting different font sizes (offset from base via font_size_inc)
# and vertical margins (i.e. for heading text)
#
# @param text [String] the text to estimate size of
# @param font_size_inc [Integer] the size of the font of the text, defaults to 0
# @param vertical_margin [Integer] the top margin above the text, defaults to 0
def height_of_text(text, font_size_inc = 0, vertical_margin = 0)
@formatting ||= self.settings(:export).formatting
@margin_width ||= @formatting[:margin][:left].to_i + @formatting[:margin][:right].to_i
@base_font_size ||= @formatting[:font_size]
return 0 unless @base_font_size > 0
font_height = FONT_HEIGHT_CONVERSION_FACTOR * (@base_font_size + font_size_inc)
font_width = font_height * FONT_WIDTH_HEIGHT_RATIO # Assume glyph width averages at 2/5s the height
leading = font_height / 2
chars_in_line = (A4_PAGE_WIDTH - @margin_width) / font_width # 210mm for A4 portrait
num_lines = (text.length / chars_in_line).ceil
(num_lines * font_height) + vertical_margin + leading
end
# Initialize the title and dirty flags for new templates
# --------------------------------------------------------
def set_creation_defaults
# Only run this before_validation because rails fires this before save/create
if self.id.nil?
self.title = "My plan (#{self.template.title})" if self.title.nil? && !self.template.nil?
end
end
end
| 1 | 17,122 | If you want to express "any role that is not creator" you could use the following statement: Role.where(plan: self).where(Role.not_creator_condition).any? which would be less verbose and a bit more efficient if it is used for Yes/No shared? | DMPRoadmap-roadmap | rb |
@@ -188,14 +188,12 @@ func migrateJSONFile(from, to string) (bool, error) {
if len(chain) == 0 {
return nil, nil
}
- // The chain is in one of three states:
+ // The chain is in one of two states:
// 1) single self-signed cert
- // 2) single upstream-signed cert, implying upstream_bundle=false
- // 3) an upstream-signed cert followed by any intermediates and a root
+ // 2) an upstream-signed cert followed by any intermediates and a root
//
- // The ca should only be considered an "intermediate" in case #3, so a
- // check for more than one cert should be sufficient for that. However
- // we don't want the the root in the chain anymore, so remove it.
+ // In the case where the CA is an intermediate, we don't want to store
+ // the root, so remove it.
certificate := chain[0].Raw
var upstreamChain [][]byte
if len(chain) > 1 { | 1 | package ca
import (
"crypto/x509"
"encoding/json"
"encoding/pem"
"io/ioutil"
"os"
"sort"
"sync"
"time"
"github.com/golang/protobuf/proto"
"github.com/spiffe/spire/pkg/common/diskutil"
"github.com/spiffe/spire/proto/private/server/journal"
"github.com/spiffe/spire/proto/spire/common"
"github.com/zeebo/errs"
)
const (
// journalCap is the maximum number of entries per type that we'll
// hold onto.
journalCap = 10
// journalPEMType is the type in the PEM header
journalPEMType = "SPIRE CA JOURNAL"
)
type JournalEntries = journal.Entries
type X509CAEntry = journal.X509CAEntry
type JWTKeyEntry = journal.JWTKeyEntry
// Journal stores X509 CAs and JWT keys on disk as they are rotated by the
// manager. The data format on disk is a PEM encoded protocol buffer.
type Journal struct {
path string
mu sync.RWMutex
entries *JournalEntries
}
func LoadJournal(path string) (*Journal, error) {
j := &Journal{
path: path,
entries: new(JournalEntries),
}
pemBytes, err := ioutil.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return j, nil
}
return nil, errs.Wrap(err)
}
pemBlock, _ := pem.Decode(pemBytes)
if pemBlock == nil {
return nil, errs.New("invalid PEM block")
}
if pemBlock.Type != journalPEMType {
return nil, errs.New("invalid PEM block type %q", pemBlock.Type)
}
if err := proto.Unmarshal(pemBlock.Bytes, j.entries); err != nil {
return nil, errs.New("unable to unmarshal entries: %v", err)
}
return j, nil
}
func (j *Journal) Entries() *JournalEntries {
j.mu.RLock()
defer j.mu.RUnlock()
return proto.Clone(j.entries).(*JournalEntries)
}
func (j *Journal) AppendX509CA(slotID string, issuedAt time.Time, x509CA *X509CA) error {
j.mu.Lock()
defer j.mu.Unlock()
backup := j.entries.X509CAs
j.entries.X509CAs = append(j.entries.X509CAs, &X509CAEntry{
SlotId: slotID,
IssuedAt: issuedAt.Unix(),
Certificate: x509CA.Certificate.Raw,
UpstreamChain: chainDER(x509CA.UpstreamChain),
})
exceeded := len(j.entries.X509CAs) - journalCap
if exceeded > 0 {
// make a new slice so we keep growing the backing array to drop the first
x509CAs := make([]*X509CAEntry, journalCap)
copy(x509CAs, j.entries.X509CAs[exceeded:])
j.entries.X509CAs = x509CAs
}
if err := j.save(); err != nil {
j.entries.X509CAs = backup
return err
}
return nil
}
func (j *Journal) AppendJWTKey(slotID string, issuedAt time.Time, jwtKey *JWTKey) error {
j.mu.Lock()
defer j.mu.Unlock()
pkixBytes, err := x509.MarshalPKIXPublicKey(jwtKey.Signer.Public())
if err != nil {
return errs.Wrap(err)
}
backup := j.entries.JwtKeys
j.entries.JwtKeys = append(j.entries.JwtKeys, &JWTKeyEntry{
SlotId: slotID,
IssuedAt: issuedAt.Unix(),
Kid: jwtKey.Kid,
PublicKey: pkixBytes,
NotAfter: jwtKey.NotAfter.Unix(),
})
exceeded := len(j.entries.JwtKeys) - journalCap
if exceeded > 0 {
// make a new slice so we keep growing the backing array to drop the first
jwtKeys := make([]*JWTKeyEntry, journalCap)
copy(jwtKeys, j.entries.JwtKeys[exceeded:])
j.entries.JwtKeys = jwtKeys
}
if err := j.save(); err != nil {
j.entries.JwtKeys = backup
return err
}
return nil
}
func (j *Journal) save() error {
return saveJournalEntries(j.path, j.entries)
}
func saveJournalEntries(path string, entries *JournalEntries) error {
entriesBytes, err := proto.Marshal(entries)
if err != nil {
return errs.Wrap(err)
}
pemBytes := pem.EncodeToMemory(&pem.Block{
Type: journalPEMType,
Bytes: entriesBytes,
})
if err := diskutil.AtomicWriteFile(path, pemBytes, 0644); err != nil {
return errs.Wrap(err)
}
return nil
}
func migrateJSONFile(from, to string) (bool, error) {
type keypairData struct {
CAs map[string][]byte `json:"cas"`
PublicKeys map[string][]byte `json:"public_keys"`
}
jsonBytes, err := ioutil.ReadFile(from)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, errs.New("error reading JSON file: %v", err)
}
data := new(keypairData)
if err := json.Unmarshal(jsonBytes, data); err != nil {
return false, errs.New("unable to decode JSON: %v", err)
}
parseX509CA := func(slotID string) (*X509CAEntry, error) {
certsBytes := data.CAs[x509CAKmKeyID(slotID)]
if len(certsBytes) == 0 {
return nil, nil
}
chain, err := x509.ParseCertificates(certsBytes)
if err != nil {
return nil, errs.New("failed to parse slot %q certificates: %v", slotID, err)
}
if len(chain) == 0 {
return nil, nil
}
// The chain is in one of three states:
// 1) single self-signed cert
// 2) single upstream-signed cert, implying upstream_bundle=false
// 3) an upstream-signed cert followed by any intermediates and a root
//
// The ca should only be considered an "intermediate" in case #3, so a
// check for more than one cert should be sufficient for that. However
// we don't want the the root in the chain anymore, so remove it.
certificate := chain[0].Raw
var upstreamChain [][]byte
if len(chain) > 1 {
upstreamChain = chainDER(chain[:len(chain)-1])
}
return &X509CAEntry{
SlotId: slotID,
// Using NotBefore as IssuedAt is a close enough estimation.
IssuedAt: chain[0].NotBefore.Unix(),
Certificate: certificate,
UpstreamChain: upstreamChain,
}, nil
}
parseJWTKey := func(slotID string) (*JWTKeyEntry, error) {
entryData := data.PublicKeys[jwtKeyKmKeyID(slotID)]
if len(entryData) == 0 {
return nil, nil
}
publicKey := new(common.PublicKey)
if err := proto.Unmarshal(entryData, publicKey); err != nil {
return nil, errs.New("failed to parse slot %q public key: %v", slotID, err)
}
// Return a JWTKeyEntry w/o the IssuedAt. The CA and JWT key used to
// rotate at the same time, so the code below will estimate it based
// on the CA for the same lost.
return &JWTKeyEntry{
SlotId: slotID,
PublicKey: publicKey.PkixBytes,
Kid: publicKey.Kid,
NotAfter: publicKey.NotAfter,
}, nil
}
aX509CA, err := parseX509CA("A")
if err != nil {
return false, err
}
bX509CA, err := parseX509CA("B")
if err != nil {
return false, err
}
aJWTKey, err := parseJWTKey("A")
if err != nil {
return false, err
}
bJWTKey, err := parseJWTKey("B")
if err != nil {
return false, err
}
// either both X509CA and JWTKey must be valid for each slot or we should
// discard the other since the old rotation code rotated them together and
// we need the X509CA to estimate the JWTKey "issued at" time.
entries := new(JournalEntries)
if aX509CA != nil && aJWTKey != nil {
aJWTKey.IssuedAt = aX509CA.IssuedAt
entries.X509CAs = append(entries.X509CAs, aX509CA)
entries.JwtKeys = append(entries.JwtKeys, aJWTKey)
}
if bX509CA != nil && bJWTKey != nil {
bJWTKey.IssuedAt = bX509CA.IssuedAt
entries.X509CAs = append(entries.X509CAs, bX509CA)
entries.JwtKeys = append(entries.JwtKeys, bJWTKey)
}
// sort in ascending "issued at" order
sort.Slice(entries.X509CAs, func(a, b int) bool {
return entries.X509CAs[a].IssuedAt < entries.X509CAs[b].IssuedAt
})
sort.Slice(entries.JwtKeys, func(a, b int) bool {
return entries.JwtKeys[a].IssuedAt < entries.JwtKeys[b].IssuedAt
})
// save the journal and remove the JSON file
if err := saveJournalEntries(to, entries); err != nil {
return false, err
}
if err := os.Remove(from); err != nil {
return false, errs.New("unable to remove old JSON file: %v", err)
}
return true, nil
}
func chainDER(chain []*x509.Certificate) [][]byte {
var der [][]byte
for _, cert := range chain {
der = append(der, cert.Raw)
}
return der
}
| 1 | 14,290 | Do we need any update on the test side? | spiffe-spire | go |
@@ -4663,6 +4663,10 @@ func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
return nil
}
}
+ // Cancel any block prefetches for unreferenced blocks.
+ for _, ptr := range op.Unrefs() {
+ fbo.config.BlockOps().Prefetcher().CancelPrefetch(ptr.ID)
+ }
fbo.observers.batchChanges(ctx, changes)
return nil | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// mdReadType indicates whether a read needs identifies.
type mdReadType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReadType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
)
// mdUpdateType indicates update type.
type mdUpdateType int
const (
mdWrite mdUpdateType = iota
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
// MaxBlockSizeBytesDefault is the default maximum block size for KBFS.
// 512K blocks by default, block changes embedded max == 8K.
// Block size was chosen somewhat arbitrarily by trying to
// minimize the overall size of the history written by a user when
// appending 1KB writes to a file, up to a 1GB total file. Here
// is the output of a simple script that approximates that
// calculation:
//
// Total history size for 0065536-byte blocks: 1134341128192 bytes
// Total history size for 0131072-byte blocks: 618945052672 bytes
// Total history size for 0262144-byte blocks: 412786622464 bytes
// Total history size for 0524288-byte blocks: 412786622464 bytes
// Total history size for 1048576-byte blocks: 618945052672 bytes
// Total history size for 2097152-byte blocks: 1134341128192 bytes
// Total history size for 4194304-byte blocks: 2216672886784 bytes
MaxBlockSizeBytesDefault = 512 << 10
// Maximum number of blocks that can be sent in parallel
maxParallelBlockPuts = 100
// Maximum number of blocks that can be fetched in parallel
maxParallelBlockGets = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
// If it's been more than this long since our last update, check
// the current head before downloading all of the new revisions.
fastForwardTimeThresh = 15 * time.Minute
// If there are more than this many new revisions, fast forward
// rather than downloading them all.
fastForwardRevThresh = 50
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead fboMutexLevel = 2
fboBlock fboMutexLevel = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// headTrustStatus marks whether the head is from a trusted or
// untrusted source. When rekeying we get the head MD by folder id
// and do not check the tlf handle
type headTrustStatus int
const (
headUntrusted headTrustStatus = iota
headTrusted
)
type cachedDirOp struct {
dirOp op
nodes []Node
}
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid kbfsmd.BranchID // protected by mdWriterLock
bType branchType
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
dirOps []cachedDirOp
// protects access to head, headStatus, latestMergedRevision,
// and hasBeenCleared.
headLock leveledRWMutex
head ImmutableRootMetadata
headStatus headTrustStatus
// latestMergedRevision tracks the latest heard merged revision on server
latestMergedRevision kbfsmd.Revision
// Has this folder ever been cleared?
hasBeenCleared bool
blocks folderBlockOps
prepper folderUpdatePrepper
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log traceLogger
deferLog traceLogger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
cancelUpdatesLock sync.Mutex
// Cancels the goroutine currently waiting on TLF MD updates.
cancelUpdates context.CancelFunc
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// syncNeededChan is signalled when a buffered write happens, and
// lets the background syncer wait rather than waking up all the
// time.
syncNeededChan chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
rekeyFSM RekeyFSM
editHistory *TlfEditHistory
branchChanges kbfssync.RepeatedWaitGroup
mdFlushes kbfssync.RepeatedWaitGroup
forcedFastForwards kbfssync.RepeatedWaitGroup
merkleFetches kbfssync.RepeatedWaitGroup
muLastGetHead sync.Mutex
// We record a timestamp everytime getHead or getTrustedHead is called, and
// use this as a heuristic for whether user is actively using KBFS. If user
// has been generating KBFS activities recently, it makes sense to try to
// reconnect as soon as possible in case of a deployment causes
// disconnection.
lastGetHead time.Time
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(ctx context.Context, config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
var nodeCache NodeCache
if config.Mode() == InitMinimal {
// If we're in minimal mode, let the node cache remain nil to
// ensure that the user doesn't try any data reads or writes.
} else {
nodeCache = newNodeCacheStandard(fb)
}
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(ctx, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: kbfsmd.BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
deferred: make(map[BlockRef]deferredState),
unrefCache: make(map[BlockRef]*syncInfo),
deCache: make(map[BlockRef]deCacheEntry),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: traceLogger{log},
deferLog: traceLogger{log.CloneWithAddedDepth(1)},
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
syncNeededChan: make(chan struct{}, 1),
}
fbo.prepper = folderUpdatePrepper{
config: config,
folderBranch: fb,
blocks: &fbo.blocks,
log: log,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
fbo.editHistory = NewTlfEditHistory(config, fbo, log)
fbo.rekeyFSM = NewRekeyFSM(fbo)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher()
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown(ctx context.Context) error {
if fbo.config.CheckStateOnShutdown() {
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if !fbo.isMasterBranch(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServerForTesting(ctx,
fbo.folderBranch, nil); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.merkleFetches.Wait(ctx)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
fbo.editHistory.Shutdown()
fbo.rekeyFSM.Shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) AddFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("AddFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
return OpsCantHandleFavorite{"Can't add a favorite without a handle"}
}
return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created)
}
func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context,
favorites *Favorites, handle *TlfHandle, created bool) (err error) {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
favorites.AddAsync(ctx, handle.toFavToAdd(created))
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
// This can happen when identifies fail and the head is never set.
return OpsCantHandleFavorite{"Can't delete a favorite without a handle"}
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
func (fbo *folderBranchOps) doFavoritesOp(ctx context.Context,
favs *Favorites, fop FavoritesOp, handle *TlfHandle) error {
switch fop {
case FavoritesOpNoChange:
return nil
case FavoritesOpAdd:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, false)
}
return fbo.addToFavorites(ctx, favs, false)
case FavoritesOpAddNewlyCreated:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, true)
}
return fbo.addToFavorites(ctx, favs, true)
case FavoritesOpRemove:
return fbo.deleteFromFavorites(ctx, favs)
default:
return InvalidFavoritesOpError{}
}
}
func (fbo *folderBranchOps) updateLastGetHeadTimestamp() {
fbo.muLastGetHead.Lock()
defer fbo.muLastGetHead.Unlock()
fbo.lastGetHead = fbo.config.Clock().Now()
}
// getTrustedHead should not be called outside of folder_branch_ops.go.
// Returns ImmutableRootMetadata{} when the head is not trusted.
// See the comment on headTrustedStatus for more information.
func (fbo *folderBranchOps) getTrustedHead(lState *lockState) ImmutableRootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.headStatus == headUntrusted {
return ImmutableRootMetadata{}
}
// This triggers any mdserver backoff timer to fast forward. In case of a
// deployment, this causes KBFS client to try to reconnect to mdserver
// immediately rather than waiting until the random backoff timer is up.
// Note that this doesn't necessarily guarantee that the fbo handler that
// called this method would get latest MD.
fbo.config.MDServer().FastForwardBackoff()
fbo.updateLastGetHeadTimestamp()
return fbo.head
}
// getHead should not be called outside of folder_branch_ops.go.
func (fbo *folderBranchOps) getHead(lState *lockState) (
ImmutableRootMetadata, headTrustStatus) {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
// See getTrustedHead for explanation.
fbo.config.MDServer().FastForwardBackoff()
fbo.updateLastGetHeadTimestamp()
return fbo.head, fbo.headStatus
}
// isMasterBranch should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid == kbfsmd.NullBranchID
}
func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.bid == kbfsmd.NullBranchID
}
func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid kbfsmd.BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
if fbo.bid != bid {
fbo.cr.BeginNewBranch()
}
fbo.bid = bid
if bid == kbfsmd.NullBranchID {
fbo.status.setCRSummary(nil, nil)
}
}
var errNoFlushedRevisions = errors.New("No flushed MDs yet")
var errNoMergedRevWhileStaged = errors.New(
"Cannot find most recent merged revision while staged")
// getJournalPredecessorRevision returns the revision that precedes
// the current journal head if journaling enabled and there are
// unflushed MD updates; otherwise it returns
// kbfsmd.RevisionUninitialized. If there aren't any flushed MD
// revisions, it returns errNoFlushedRevisions.
func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) (
kbfsmd.Revision, error) {
jServer, err := GetJournalServer(fbo.config)
if err != nil {
// Journaling is disabled entirely.
return kbfsmd.RevisionUninitialized, nil
}
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
// Journaling is disabled for this TLF, so use the local head.
// TODO: JournalStatus could return other errors (likely
// file/disk corruption) that indicate a real problem, so it
// might be nice to type those errors so we can distinguish
// them.
return kbfsmd.RevisionUninitialized, nil
}
if jStatus.BranchID != kbfsmd.NullBranchID.String() {
return kbfsmd.RevisionUninitialized, errNoMergedRevWhileStaged
}
if jStatus.RevisionStart == kbfsmd.RevisionUninitialized {
// The journal is empty, so the local head must be the most recent.
return kbfsmd.RevisionUninitialized, nil
} else if jStatus.RevisionStart == kbfsmd.RevisionInitial {
// Nothing has been flushed to the servers yet, so don't
// return anything.
return kbfsmd.RevisionUninitialized, errNoFlushedRevisions
}
return jStatus.RevisionStart - 1, nil
}
// validateHeadLocked validates an untrusted head and sets it as trusted.
// see headTrustedState comment for more information.
func (fbo *folderBranchOps) validateHeadLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.headLock.AssertLocked(lState)
// Validate fbo against fetched md and discard the fetched one.
if fbo.head.TlfID() != md.TlfID() {
fbo.log.CCriticalf(ctx, "Fake untrusted TLF encountered %v %v %v %v", fbo.head.TlfID(), md.TlfID(), fbo.head.mdID, md.mdID)
return kbfsmd.MDTlfIDMismatch{CurrID: fbo.head.TlfID(), NextID: md.TlfID()}
}
fbo.headStatus = headTrusted
return nil
}
func (fbo *folderBranchOps) setHeadLocked(
ctx context.Context, lState *lockState,
md ImmutableRootMetadata, headStatus headTrustStatus) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == ImmutableRootMetadata{}
wasReadable := false
if !isFirstHead {
if headStatus == headUntrusted {
panic("setHeadLocked: Trying to set an untrusted head over an existing head")
}
wasReadable = fbo.head.IsReadable()
if fbo.headStatus == headUntrusted {
err := fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
if fbo.head.mdID == md.mdID {
return nil
}
}
if fbo.head.mdID == md.mdID {
panic(errors.Errorf("Re-putting the same MD: %s", md.mdID))
}
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision())
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == kbfsmd.Unmerged {
fbo.setBranchIDLocked(lState, md.BID())
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
} else if md.MergedStatus() == kbfsmd.Merged {
journalEnabled := TLFJournalEnabled(fbo.config, fbo.id())
if journalEnabled {
if isFirstHead {
// If journaling is on, and this is the first head
// we're setting, we have to make sure we use the
// server's notion of the latest MD, not the one
// potentially coming from our journal. If there are
// no flushed revisions, it's not a hard error, and we
// just leave the latest merged revision
// uninitialized.
journalPred, err := fbo.getJournalPredecessorRevision(ctx)
switch err {
case nil:
// journalPred will be
// kbfsmd.RevisionUninitialized when the journal
// is empty.
if journalPred >= kbfsmd.RevisionInitial {
fbo.setLatestMergedRevisionLocked(
ctx, lState, journalPred, false)
} else {
fbo.setLatestMergedRevisionLocked(ctx, lState,
md.Revision(), false)
}
case errNoFlushedRevisions:
// The server has no revisions, so leave the
// latest merged revision uninitialized.
default:
return err
}
} else {
// If this isn't the first head, then this is either
// an update from the server, or an update just
// written by the client. But since journaling is on,
// then latter case will be handled by onMDFlush when
// the update is properly flushed to the server. So
// ignore updates that haven't yet been put to the
// server.
if md.putToServer {
fbo.setLatestMergedRevisionLocked(
ctx, lState, md.Revision(), false)
}
}
} else {
// This is a merged revision, and journaling is disabled,
// so it's definitely the latest revision on the server as
// well.
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
}
// Make sure that any unembedded block changes have been swapped
// back in.
if fbo.config.Mode() != InitMinimal &&
md.data.Changes.Info.BlockPointer != zeroPtr &&
len(md.data.Changes.Ops) == 0 {
return errors.New("Must swap in block changes before setting head")
}
fbo.head = md
if isFirstHead && headStatus == headTrusted {
fbo.headStatus = headTrusted
}
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch && fbo.config.Mode() != InitSingleOp {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
// If journaling is enabled, we should make sure to enable it
// for this TLF. That's because we may have received the TLF
// ID from the service, rather than via a GetIDForHandle call,
// and so we might have skipped the journal.
if jServer, err := GetJournalServer(fbo.config); err == nil {
_, _ = jServer.getTLFJournal(fbo.id(), md.GetTlfHandle())
}
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle(), md.TlfID().Type() == tlf.Public))
}
return nil
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched not due to a user action, i.e. via a Rekey
// notification, and we don't have a TLF name to check against.
func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headUntrusted)
}
// setNewInitialHeadLocked is for when we're creating a brand-new TLF.
// This is trusted.
func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setNewInitialHeadLocked")
}
if md.Revision() != kbfsmd.RevisionInitial {
return errors.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision())
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setInitialHeadTrustedLocked is for when the given RootMetadata
// was fetched due to a user action, and will be checked against the
// TLF name.
func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadSuccessorLocked is for when we're applying updates from the
// server or when we're applying new updates we created ourselves.
func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata, rebased bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// This can happen in tests via SyncFromServerForTesting().
return fbo.setInitialHeadTrustedLocked(ctx, lState, md)
}
if !rebased {
err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly())
if err != nil {
return err
}
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// Newer handles should be equal or more resolved over time.
//
// TODO: In some cases, they shouldn't, e.g. if we're on an
// unmerged branch. Add checks for this.
resolvesTo, partialResolvedOldHandle, err :=
oldHandle.ResolvesTo(
ctx, fbo.config.Codec(), fbo.config.KBPKI(), fbo.config.MDOps(),
*newHandle)
if err != nil {
return err
}
oldName := oldHandle.GetCanonicalName()
newName := newHandle.GetCanonicalName()
if !resolvesTo {
return IncompatibleHandleError{
oldName,
partialResolvedOldHandle.GetCanonicalName(),
newName,
}
}
err = fbo.setHeadLocked(ctx, lState, md, headTrusted)
if err != nil {
return err
}
if oldName != newName {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldName, newName)
fbo.config.MDCache().ChangeHandleForID(oldHandle, newHandle)
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle())
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
return nil
}
// setHeadPredecessorLocked is for when we're unstaging updates.
func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return errors.New("Unexpected nil head in setHeadPredecessorLocked")
}
if fbo.head.Revision() <= kbfsmd.RevisionInitial {
return errors.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision())
}
if fbo.head.MergedStatus() != kbfsmd.Unmerged {
return errors.New("Unexpected merged head in setHeadPredecessorLocked")
}
err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly())
if err != nil {
return err
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// The two handles must be the same, since no rekeying is done
// while unmerged.
eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle)
if err != nil {
return err
}
if !eq {
return errors.Errorf(
"head handle %v unexpectedly not equal to new handle = %v",
oldHandle, newHandle)
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadConflictResolvedLocked is for when we're setting the merged
// update with resolved conflicts.
func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head.MergedStatus() != kbfsmd.Unmerged {
return errors.New("Unexpected merged head in setHeadConflictResolvedLocked")
}
if md.MergedStatus() != kbfsmd.Merged {
return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md ReadOnlyRootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
ei := getExtendedIdentify(ctx)
if fbo.identifyDone && !ei.behavior.AlwaysRunIdentify() {
// TODO: provide a way for the service to break this cache when identify
// state changes on a TLF. For now, we do it this way to make chat work.
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() &&
len(ei.getTlfBreakAndClose().Breaks) > 0 {
fbo.log.CDebugf(ctx,
"Identify finished with no error but broken proof warnings")
} else if ei.behavior == keybase1.TLFIdentifyBehavior_CHAT_SKIP {
fbo.log.CDebugf(ctx, "Identify skipped")
} else {
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
}
return nil
}
// getMDForRead returns an existing md for a read operation. Note that
// mds will not be fetched here.
func (fbo *folderBranchOps) getMDForRead(
ctx context.Context, lState *lockState, rtype mdReadType) (
md ImmutableRootMetadata, err error) {
if rtype != mdReadNeedIdentify && rtype != mdReadNoIdentify {
panic("Invalid rtype in getMDLockedForRead")
}
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
if rtype != mdReadNoIdentify {
err = fbo.identifyOnce(ctx, md.ReadOnly())
}
return md, err
}
return ImmutableRootMetadata{}, MDWriteNeededInRequest{}
}
// getMDForWriteOrRekeyLocked can fetch MDs, identify them and
// contains the fancy logic. For reading use getMDLockedForRead.
// Here we actually can fetch things from the server.
// rekeys are untrusted.
func (fbo *folderBranchOps) getMDForWriteOrRekeyLocked(
ctx context.Context, lState *lockState, mdType mdUpdateType) (
md ImmutableRootMetadata, err error) {
defer func() {
if err != nil || mdType == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md.ReadOnly())
}()
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
return md, nil
}
// MDs coming from from rekey notifications are marked untrusted.
//
// TODO: Make tests not take this code path.
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), kbfsmd.NullBranchID)
if err != nil {
return ImmutableRootMetadata{}, err
}
mergedMD, err := mdops.GetForTLF(ctx, fbo.id(), nil)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedMD == (ImmutableRootMetadata{}) {
return ImmutableRootMetadata{},
errors.WithStack(NoMergedMDError{fbo.id()})
}
if md == (ImmutableRootMetadata{}) {
// There are no unmerged MDs for this device, so just use the current head.
md = mergedMD
} else {
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// We don't need to do this for merged head
// because the setHeadLocked() already does
// that anyway.
fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false)
}()
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
return ImmutableRootMetadata{}, errors.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable())
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
headStatus := headTrusted
if mdType == mdRekey {
// If we already have a head (that has been filled after the initial
// check, but before we acquired the lock), then just return it.
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head, nil
}
headStatus = headUntrusted
}
err = fbo.setHeadLocked(ctx, lState, md, headStatus)
if err != nil {
return ImmutableRootMetadata{}, err
}
return md, nil
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReadType) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForRead(ctx, lState, rtype)
if err != nil {
return ImmutableRootMetadata{}, err
}
if md.TlfID().Type() != tlf.Public {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !isReader {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
// getMostRecentFullyMergedMD is a helper method that returns the most
// recent merged MD that has been flushed to the server. This could
// be different from the current local head if journaling is on. If
// the journal is on a branch, it returns an error.
func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) (
ImmutableRootMetadata, error) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedRev == kbfsmd.RevisionUninitialized {
// No unflushed journal entries, so use the local head.
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
// Otherwise, use the specified revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), kbfsmd.NullBranchID,
mergedRev, kbfsmd.Merged, nil)
if err != nil {
return ImmutableRootMetadata{}, err
}
fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev)
return rmd, nil
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
// getMDForReadNeedIdentifyOnMaybeFirstAccess should be called by a
// code path (like chat) that might be accessing this folder for the
// first time. Other folderBranchOps methods like Lookup which know
// the folder has already been accessed at least once (to get the root
// node, for example) do not need to call this. Unlike other getMD
// calls, this one may return a nil ImmutableRootMetadata along with a
// nil error, to indicate that there isn't any MD for this TLF yet and
// one must be created by the caller.
func (fbo *folderBranchOps) getMDForReadNeedIdentifyOnMaybeFirstAccess(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}
if _, noMD := errors.Cause(err).(NoMergedMDError); noMD {
return ImmutableRootMetadata{}, nil
}
if err != nil {
return ImmutableRootMetadata{}, err
}
if md.TlfID().Type() != tlf.Public {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID)
if !isReader {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
func (fbo *folderBranchOps) getMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (
ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
if err != nil {
return ImmutableRootMetadata{}, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isWriter, err := md.IsWriter(
ctx, fbo.config.KBPKI(), session.UID, session.VerifyingKey)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !isWriter {
return ImmutableRootMetadata{}, NewWriteAccessError(
md.GetTlfHandle(), session.Name, filename)
}
return md, nil
}
func (fbo *folderBranchOps) getSuccessorMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (
*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, err
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into `finalizeMDWriteLocked`
// or the changes will be lost.
return md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(),
fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),
md.mdID, true)
}
// getSuccessorMDForWriteLocked returns a new RootMetadata object with
// an incremented version number for modification. If the returned
// object is put to the MDServer (via MDOps), mdWriterLock must be
// held until then. (See comments for mdWriterLock above.)
func (fbo *folderBranchOps) getSuccessorMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getSuccessorMDForWriteLockedForFilename(ctx, lState, "")
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (
rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey,
wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdRekey)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(session.UID) {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(md.GetTlfHandle(), session.Name)
}
newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(),
fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),
md.mdID, handle.IsWriter(session.UID))
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(session.UID) && !newMd.IsWriterMetadataCopiedSet() {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(handle, session.Name)
}
return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context,
md *RootMetadata) (*blockPutState, error) {
if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
return nil, nil
}
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), md.GetTlfHandle())
if err != nil {
return nil, err
}
bps := newBlockPutState(1)
err = fbo.prepper.unembedBlockChanges(
ctx, bps, md, &md.data.Changes, chargedTo)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return nil, err
}
if len(ptrsToDelete) > 0 {
return nil, errors.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
return bps, nil
}
// ResetRootBlock creates a new empty dir block and sets the given
// metadata's root block to it.
func ResetRootBlock(ctx context.Context, config Config,
rmd *RootMetadata) (Block, BlockInfo, ReadyBlockData, error) {
newDblock := NewDirBlock()
chargedTo, err := chargedToForTLF(
ctx, config.KBPKI(), config.KBPKI(), rmd.GetTlfHandle())
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, config.BlockCache(), config.BlockOps(),
config.Crypto(), rmd.ReadOnly(), newDblock, chargedTo,
config.DefaultBlockType())
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
now := config.Clock().Now().UnixNano()
rmd.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
prevDiskUsage := rmd.DiskUsage()
rmd.SetDiskUsage(0)
// Redundant, since this is called only for brand-new or
// successor RMDs, but leave in to be defensive.
rmd.ClearBlockChanges()
co := newCreateOpForRootDir()
rmd.AddOp(co)
rmd.AddRefBlock(rmd.data.Dir.BlockInfo)
// Set unref bytes to the previous disk usage, so that the
// accounting works out.
rmd.AddUnrefBytes(prevDiskUsage)
return newDblock, info, readyBlockData, nil
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
isWriter, err := md.IsWriter(
ctx, fbo.config.KBPKI(), session.UID, session.VerifyingKey)
if err != nil {
return err
}
if !isWriter {
return NewWriteAccessError(
handle, session.Name, handle.GetCanonicalPath())
}
var expectedKeyGen kbfsmd.KeyGen
var tlfCryptKey *kbfscrypto.TLFCryptKey
switch md.TypeForKeying() {
case tlf.PublicKeying:
expectedKeyGen = kbfsmd.PublicKeyGen
case tlf.PrivateKeying:
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return errors.Errorf("Initial rekey unexpectedly not done for "+
"private TLF %v", md.TlfID())
}
expectedKeyGen = kbfsmd.FirstValidKeyGen
case tlf.TeamKeying:
// Teams get their crypt key from the service, no need to
// rekey in KBFS.
tid, err := handle.FirstResolvedWriter().AsTeam()
if err != nil {
return err
}
keys, keyGen, err := fbo.config.KBPKI().GetTeamTLFCryptKeys(
ctx, tid, kbfsmd.UnspecifiedKeyGen)
if err != nil {
return err
}
if keyGen < kbfsmd.FirstValidKeyGen {
return errors.WithStack(
kbfsmd.InvalidKeyGenerationError{TlfID: md.TlfID(), KeyGen: keyGen})
}
expectedKeyGen = keyGen
md.bareMd.SetLatestKeyGenerationForTeamTLF(keyGen)
key, ok := keys[keyGen]
if !ok {
return errors.WithStack(
kbfsmd.InvalidKeyGenerationError{TlfID: md.TlfID(), KeyGen: keyGen})
}
tlfCryptKey = &key
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return kbfsmd.InvalidKeyGenerationError{TlfID: md.TlfID(), KeyGen: keyGen}
}
// create a dblock since one doesn't exist yet
newDblock, info, readyBlockData, err := ResetRootBlock(ctx, fbo.config, md)
if err != nil {
return err
}
// Some other thread got here first, so give up and let it go
// before we push anything to the servers.
if h, _ := fbo.getHead(lState); h != (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "Head was already set, aborting")
return nil
}
if err = PutBlockCheckLimitErrs(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil {
return err
}
if err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil {
return err
}
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so push straight to the server.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
mdOps = jServer.delegateMDOps
}
irmd, err := mdOps.Put(
ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
} else if isConflict {
return RekeyConflictError{err}
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.TlfID(), fbo.head.mdID)
}
fbo.setNewInitialHeadLocked(ctx, lState, irmd)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(
md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context,
h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) {
return nil, tlf.ID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (tlf.ID, error) {
return tlf.ID{}, errors.New("GetTLFID is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
// SetInitialHeadFromServer sets the head to the given
// ImmutableRootMetadata, which must be retrieved from the MD server.
func (fbo *folderBranchOps) SetInitialHeadFromServer(
ctx context.Context, md ImmutableRootMetadata) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)",
md.Revision(), md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx,
"SetInitialHeadFromServer, revision=%d (%s) done: %+v",
md.Revision(), md.MergedStatus(), err)
}()
if md.IsReadable() && fbo.config.Mode() != InitMinimal {
// We `Get` the root block to ensure downstream prefetches occur.
_ = fbo.config.BlockOps().BlockRetriever().Request(ctx,
defaultOnDemandRequestPriority, md, md.data.Dir.BlockPointer,
&DirBlock{}, TransientEntry)
} else {
fbo.log.CDebugf(ctx,
"Setting an unreadable head with revision=%d", md.Revision())
}
// Return early if the head is already set. This avoids taking
// mdWriterLock for no reason, and it also avoids any side effects
// (e.g., calling `identifyOnce` and downloading the merged
// head) if head is already set.
lState := makeFBOLockState()
head, headStatus := fbo.getHead(lState)
if headStatus == headTrusted && head != (ImmutableRootMetadata{}) && head.mdID == md.mdID {
fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+
"need to set initial head again", md.Revision(), md.MergedStatus())
return nil
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.MergedStatus() == kbfsmd.Unmerged {
mdops := fbo.config.MDOps()
mergedMD, err := mdops.GetForTLF(ctx, fbo.id(), nil)
if err != nil {
return err
}
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState,
mergedMD.Revision(), false)
}()
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == (ImmutableRootMetadata{}) {
err = fbo.setInitialHeadTrustedLocked(ctx, lState, md)
if err != nil {
return err
}
} else if headStatus == headUntrusted {
err = fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
})
}
// SetInitialHeadToNew creates a brand-new ImmutableRootMetadata
// object and sets the head to that. This is trusted.
func (fbo *folderBranchOps) SetInitialHeadToNew(
ctx context.Context, id tlf.ID, handle *TlfHandle) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadToNew %s", id)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetInitialHeadToNew %s done: %+v",
id, err)
}()
rmd, err := makeInitialRootMetadata(
fbo.config.MetadataVersion(), id, handle)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{rmd.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, rmd.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.initMDLocked(ctx, lState, rmd)
})
}
func getNodeIDStr(n Node) string {
if n == nil {
return "NodeID(nil)"
}
return fmt.Sprintf("NodeID(%v)", n.GetID())
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
fbo.deferLog.CDebugf(ctx, "getRootNode done: %s %+v",
getNodeIDStr(node), err)
}()
lState := makeFBOLockState()
var md ImmutableRootMetadata
md, err = fbo.getMDForRead(ctx, lState, mdReadNoIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
func() {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}()
}
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := isReadableOrError(ctx, fbo.config.KBPKI(), md.ReadOnly()); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %s", getNodeIDStr(dir))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetDirChildren %s done, %d entries: %+v",
getNodeIDStr(dir), len(children), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
err = runUnlessCanceled(ctx, func() error {
var err error
lState := makeFBOLockState()
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
if fbo.nodeCache.IsUnlinked(dir) {
fbo.log.CDebugf(ctx, "Returning an empty children set for "+
"unlinked directory %v", dirPath.tailPointer())
return nil
}
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
children, err = fbo.blocks.GetDirtyDirChildren(
ctx, lState, md.ReadOnly(), dirPath)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return children, nil
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "Lookup %s %s done: %v %+v",
getNodeIDStr(dir), name, getNodeIDStr(node), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
// It's racy for the goroutine to write directly to return param
// `node`, so use a new param for that.
var n Node
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
if fbo.nodeCache.IsUnlinked(dir) {
fbo.log.CDebugf(ctx, "Refusing a lookup for unlinked directory %v",
fbo.nodeCache.PathFromNode(dir).tailPointer())
return NoSuchNameError{name}
}
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
n, de, err = fbo.blocks.Lookup(ctx, lState, md.ReadOnly(), dir, name)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, EntryInfo{}, err
}
return n, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md ImmutableRootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
de = fbo.blocks.UpdateDirtyEntry(ctx, lState, de)
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
oldPtr BlockPointer
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "Stat %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) (
res NodeMetadata, err error) {
fbo.log.CDebugf(ctx, "GetNodeMetadata %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetNodeMetadata %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return res, err
}
res.BlockInfo = de.BlockInfo
id := de.TeamWriter.AsUserOrTeam()
if id.IsNil() {
id = de.Writer
}
if id.IsNil() {
id = de.Creator
}
res.LastWriterUnverified, err =
fbo.config.KBPKI().GetNormalizedUsername(ctx, id)
if err != nil {
return res, err
}
prefetchStatus := fbo.config.PrefetchStatus(ctx, fbo.id(),
res.BlockInfo.BlockPointer)
res.PrefetchStatus = prefetchStatus.String()
return res, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(
blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb, zeroPtr})
}
// saveOldPtr stores the given BlockPointer as the old (pre-readied)
// pointer for the most recent blockState.
func (bps *blockPutState) saveOldPtr(oldPtr BlockPointer) {
bps.blockStates[len(bps.blockStates)-1].oldPtr = oldPtr
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) removeOtherBps(other *blockPutState) {
if len(other.blockStates) == 0 {
return
}
otherPtrs := make(map[BlockPointer]bool, len(other.blockStates))
for _, bs := range other.blockStates {
otherPtrs[bs.blockPtr] = true
}
// Assume that `other` is a subset of `bps` when initializing the
// slice length.
newLen := len(bps.blockStates) - len(other.blockStates)
if newLen < 0 {
newLen = 0
}
// Remove any blocks that appear in `other`.
newBlockStates := make([]blockState, 0, newLen)
for _, bs := range bps.blockStates {
if otherPtrs[bs.blockPtr] {
continue
}
newBlockStates = append(newBlockStates, bs)
}
bps.blockStates = newBlockStates
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
type localBcache map[BlockPointer]*DirBlock
// Returns whether the given error is one that shouldn't block the
// removal of a file or directory.
//
// TODO: Consider other errors recoverable, e.g. ones that arise from
// present but corrupted blocks?
func isRecoverableBlockErrorForRemoval(err error) bool {
return isRecoverableBlockError(err)
}
func isRetriableError(err error, retries int) bool {
_, isExclOnUnmergedError := err.(ExclOnUnmergedError)
_, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError)
recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError ||
isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error {
if bps == nil {
return nil
}
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
return err
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(kbfsmd.ServerErrorConflictRevision)
_, isConflictPrevRoot := err.(kbfsmd.ServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(kbfsmd.ServerErrorConflictDiskUsage)
_, isConditionFailed := err.(kbfsmd.ServerErrorConditionFailed)
_, isConflictFolderMapping := err.(kbfsmd.ServerErrorConflictFolderMapping)
_, isJournal := err.(MDJournalConflictError)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping || isJournal
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl,
notifyFn func(ImmutableRootMetadata) error) (
err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut := true
mergedRev := kbfsmd.RevisionUninitialized
oldPrevRoot := md.PrevRoot()
var irmd ImmutableRootMetadata
// This puts on a delay on any cancellations arriving to ctx. It is intended
// to work sort of like a critical section, except that there isn't an
// explicit call to exit the critical section. The cancellation, if any, is
// triggered after a timeout (i.e.
// fbo.config.DelayedCancellationGracePeriod()).
//
// The purpose of trying to avoid cancellation once we start MD write is to
// avoid having an unpredictable perceived MD state. That is, when
// runUnlessCanceled returns Canceled on cancellation, application receives
// an EINTR, and would assume the operation didn't succeed. But the MD write
// continues, and there's a chance the write will succeed, meaning the
// operation succeeds. This contradicts with the application's perception
// through error code and can lead to horrible situations. An easily caught
// situation is when application calls Create with O_EXCL set, gets an EINTR
// while MD write succeeds, retries and gets an EEXIST error. If users hit
// Ctrl-C, this might not be a big deal. However, it also happens for other
// interrupts. For applications that use signals to communicate, e.g.
// SIGALRM and SIGUSR1, this can happen pretty often, which renders broken.
if err = EnableDelayedCancellationWithGracePeriod(
ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil {
return err
}
// we don't explicitly clean up (by using a defer) CancellationDelayer here
// because sometimes fuse makes another call using the same ctx. For example, in
// fuse's Create call handler, a dir.Create is followed by an Attr call. If
// we do a deferred cleanup here, if an interrupt has been received, it can
// cause ctx to be canceled before Attr call finishes, which causes FUSE to
// return EINTR for the Create request. But at this point, the request may
// have already succeeded. Returning EINTR makes application thinks the file
// is not created successfully.
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if fbo.isMasterBranchLocked(lState) {
// only do a normal Put if we're not already staged.
irmd, err = mdops.Put(
ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal)
if doUnmergedPut = isRevisionConflict(err); doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision()
if excl == WithExcl {
// If this was caused by an exclusive create, we shouldn't do an
// kbfsmd.UnmergedPut, but rather try to get newest update from server, and
// retry afterwards.
err = fbo.getAndApplyMDUpdates(ctx,
lState, nil, fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
return ExclOnUnmergedError{}
}
} else if err != nil {
return err
}
} else if excl == WithExcl {
return ExclOnUnmergedError{}
}
doResolve := false
resolveMergedRev := mergedRev
if doUnmergedPut {
// We're out of date, and this is not an exclusive write, so put it as an
// unmerged MD.
irmd, err = mdops.PutUnmerged(ctx, md, session.VerifyingKey)
if isRevisionConflict(err) {
// Self-conflicts are retried in `doMDWriteWithRetry`.
return UnmergedSelfConflictError{err}
} else if err != nil {
// If a PutUnmerged fails, we are in a bad situation: if
// we fail, but the put succeeded, then dirty data will
// remain cached locally and will be re-tried
// (non-idempotently) on the next sync call. This should
// be a very rare situation when journaling is enabled, so
// instead let's pretend it succeeded so that the cached
// data is cleared and the nodeCache is updated. If we're
// wrong, and the update didn't make it to the server,
// then the next call will get an
// kbfsmd.UnmergedSelfConflictError but fail to find any new
// updates and fail the operation, but things will get
// fixed up once conflict resolution finally completes.
//
// TODO: how confused will the kernel cache get if the
// pointers are updated but the file system operation
// still gets an error returned by the wrapper function
// that calls us (in the event of a user cancellation)?
fbo.log.CInfof(ctx, "Ignoring a PutUnmerged error: %+v", err)
err = encryptMDPrivateData(
ctx, fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.Crypto(), fbo.config.KeyManager(), session.UID, md)
if err != nil {
return err
}
mdID, err := kbfsmd.MakeID(fbo.config.Codec(), md.bareMd)
if err != nil {
return err
}
irmd = MakeImmutableRootMetadata(
md, session.VerifyingKey, mdID, fbo.config.Clock().Now(), true)
err = fbo.config.MDCache().Put(irmd)
if err != nil {
return err
}
}
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
} else {
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
resolveMergedRev = kbfsmd.RevisionUninitialized
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
// Call Resolve() after the head is set, to make sure it fetches
// the correct unmerged MD range during resolution.
if doResolve {
fbo.cr.Resolve(ctx, md.Revision(), resolveMergedRev)
}
if notifyFn != nil {
err := notifyFn(irmd)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context,
lState *lockState, jServer *JournalServer) error {
fbo.mdWriterLock.AssertLocked(lState)
if !TLFJournalEnabled(fbo.config, fbo.id()) {
// Nothing to do.
return nil
}
if err := jServer.Wait(ctx, fbo.id()); err != nil {
return err
}
// Make sure everything flushed successfully, since we're holding
// the writer lock, no other revisions could have snuck in.
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
return err
}
if jStatus.RevisionEnd != kbfsmd.RevisionUninitialized {
return errors.Errorf("Couldn't flush all MD revisions; current "+
"revision end for the journal is %d", jStatus.RevisionEnd)
}
if jStatus.LastFlushErr != "" {
return errors.Errorf("Couldn't flush the journal: %s",
jStatus.LastFlushErr)
}
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata,
lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
oldPrevRoot := md.PrevRoot()
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so wait for the journal to flush and then push
// straight to the server. TODO: we're holding the writer lock
// while flushing the journal here (just like for exclusive
// writes), which may end up blocking incoming writes for a long
// time. Rekeys are pretty rare, but if this becomes an issue
// maybe we should consider letting these hit the journal and
// scrubbing them when converting it to a branch.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
var key kbfscrypto.VerifyingKey
if md.IsWriterMetadataCopiedSet() {
key = lastWriterVerifyingKey
} else {
var err error
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
key = session.VerifyingKey
}
irmd, err := mdOps.Put(ctx, md, key, nil, keybase1.MDPriorityNormal)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// Drop this block. We've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. We'll queue another rekey just in case. It should
// be safe as it's idempotent. We don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.TlfID())
return RekeyConflictError{err}
}
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
}
md.loadCachedBlockChanges(ctx, nil, fbo.log)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Explicitly set the latest merged revision, since if journaling
// is on, `setHeadLocked` will not do it for us (even though
// rekeys bypass the journal).
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
return nil
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == kbfsmd.Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
// TODO: if the revision number of this new commit is sequential
// with `LatestRev`, we can probably change this to
// `gco.LatestRev+1`.
md.SetLastGCRevision(gco.LatestRev)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
oldPrevRoot := md.PrevRoot()
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
// finally, write out the new metadata
irmd, err := fbo.config.MDOps().Put(
ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
md.loadCachedBlockChanges(ctx, bps, fbo.log)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
return fbo.notifyBatchLocked(ctx, lState, irmd)
}
func checkDisallowedPrefixes(name string, mode InitMode) error {
if mode == InitSingleOp {
// Allow specialized, single-op KBFS programs (like the kbgit
// remote helper) to bypass the disallowed prefix check.
return nil
}
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata,
dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// PathType returns path type
func (fbo *folderBranchOps) PathType() PathType {
switch fbo.folderBranch.Tlf.Type() {
case tlf.Public:
return PublicPathType
case tlf.Private:
return PrivatePathType
case tlf.SingleTeam:
return SingleTeamPathType
default:
panic(fmt.Sprintf("Unknown TLF type: %s", fbo.folderBranch.Tlf.Type()))
}
}
// canonicalPath returns full canonical path for dir node and name.
func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) {
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return "", err
}
return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil
}
func (fbo *folderBranchOps) signalWrite() {
select {
case fbo.syncNeededChan <- struct{}{}:
// Kick off a merkle root fetch in the background, so that it's
// ready by the time we do the SyncAll.
fbo.merkleFetches.Add(1)
go func() {
defer fbo.merkleFetches.Done()
newCtx := fbo.ctxWithFBOID(context.Background())
_, err := fbo.config.KBPKI().GetCurrentMerkleRoot(newCtx)
if err != nil {
fbo.log.CDebugf(newCtx, "Couldn't fetch merkle root: %+v", err)
}
}()
default:
}
// A local write always means any ongoing CR should be canceled,
// because the set of unmerged writes has changed.
fbo.cr.ForceCancel()
}
func (fbo *folderBranchOps) syncDirUpdateOrSignal(
ctx context.Context, lState *lockState) error {
if fbo.config.BGFlushDirOpBatchSize() == 1 {
return fbo.syncAllLocked(ctx, lState, NoExcl)
}
fbo.signalWrite()
return nil
}
func (fbo *folderBranchOps) checkForUnlinkedDir(dir Node) error {
// Disallow directory operations within an unlinked directory.
// Shells don't seem to allow it, and it will just pollute the dir
// entry cache with unsyncable entries.
if fbo.nodeCache.IsUnlinked(dir) {
dirPath := fbo.nodeCache.PathFromNode(dir).String()
return errors.WithStack(UnsupportedOpInUnlinkedDirError{dirPath})
}
return nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType, excl Excl) (childNode Node, de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(name, fbo.config.Mode()); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return nil, DirEntry{}, err
}
filename, err := fbo.canonicalPath(ctx, dir, name)
if err != nil {
return nil, DirEntry{}, err
}
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
dblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), dirPath, name); err != nil {
return nil, DirEntry{}, err
}
parentPtr := dirPath.tailPointer()
co, err := newCreateOp(name, parentPtr, entryType)
if err != nil {
return nil, DirEntry{}, err
}
co.setFinalPath(dirPath)
// create new data block
var newBlock Block
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
// Cache update and operations until batch happens. Make a new
// temporary ID and directory entry.
newID, err := fbo.config.cryptoPure().MakeTemporaryBlockID()
if err != nil {
return nil, DirEntry{}, err
}
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), md.GetTlfHandle())
if err != nil {
return nil, DirEntry{}, err
}
newPtr := BlockPointer{
ID: newID,
KeyGen: md.LatestKeyGeneration(),
DataVer: fbo.config.DataVersion(),
DirectType: DirectBlock,
Context: kbfsblock.MakeFirstContext(
chargedTo, fbo.config.DefaultBlockType()),
}
co.AddRefBlock(newPtr)
co.AddSelfUpdate(parentPtr)
node, err := fbo.nodeCache.GetOrCreate(newPtr, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
err = fbo.config.DirtyBlockCache().Put(
fbo.id(), newPtr, fbo.branch(), newBlock)
if err != nil {
return nil, DirEntry{}, err
}
now := fbo.nowUnixNano()
de = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: newPtr,
EncodedSize: 0,
},
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
Mtime: now,
Ctime: now,
},
}
dirCacheUndoFn := fbo.blocks.AddDirEntryInCache(lState, dirPath, name, de)
fbo.dirOps = append(fbo.dirOps, cachedDirOp{co, []Node{dir, node}})
added := fbo.status.addDirtyNode(dir)
cleanupFn := func() {
if added {
fbo.status.rmDirtyNode(dir)
}
fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1]
if dirCacheUndoFn != nil {
dirCacheUndoFn(lState)
}
// Delete should never fail.
_ = fbo.config.DirtyBlockCache().Delete(fbo.id(), newPtr, fbo.branch())
}
defer func() {
if err != nil && cleanupFn != nil {
cleanupFn()
}
}()
if entryType != Dir {
// Dirty the file with a zero-byte write, to ensure the new
// block is synced in SyncAll. TODO: remove this if we ever
// embed 0-byte files in the directory entry itself.
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), node, []byte{}, 0)
if err != nil {
return nil, DirEntry{}, err
}
oldCleanupFn := cleanupFn
cleanupFn = func() {
fbo.blocks.ClearCacheInfo(lState, fbo.nodeCache.PathFromNode(node))
oldCleanupFn()
}
}
// It's safe to notify before we've synced, since it is only
// sending invalidation notifications. At worst the upper layer
// will just have to refresh its cache needlessly.
err = fbo.notifyOneOp(ctx, lState, co, md.ReadOnly(), false)
if err != nil {
return nil, DirEntry{}, err
}
if excl == WithExcl {
// Sync this change to the server.
err := fbo.syncAllLocked(ctx, lState, WithExcl)
_, isNoUpdatesWhileDirty := errors.Cause(err).(NoUpdatesWhileDirtyError)
if isNoUpdatesWhileDirty {
// If an exclusive write hits a conflict, it will try to
// update, but won't be able to because of the dirty
// directory entries. We need to clean up the dirty
// entries here first before trying to apply the updates
// again. By returning `ExclOnUnmergedError` below, we
// force the caller to retry the whole operation again.
fbo.log.CDebugf(ctx, "Clearing dirty entry before applying new "+
"updates for exclusive write")
cleanupFn()
cleanupFn = nil
// Sync anything else that might be buffered (non-exclusively).
err = fbo.syncAllLocked(ctx, lState, NoExcl)
if err != nil {
return nil, DirEntry{}, err
}
// Now we should be in a clean state, so this should work.
err = fbo.getAndApplyMDUpdates(
ctx, lState, nil, fbo.applyMDUpdatesLocked)
if err != nil {
return nil, DirEntry{}, err
}
return nil, DirEntry{}, ExclOnUnmergedError{}
} else if err != nil {
return nil, DirEntry{}, err
}
} else {
err = fbo.syncDirUpdateOrSignal(ctx, lState)
if err != nil {
return nil, DirEntry{}, err
}
}
return node, de, nil
}
func (fbo *folderBranchOps) maybeWaitForSquash(
ctx context.Context, bid kbfsmd.BranchID) {
if bid != kbfsmd.PendingLocalSquashBranchID {
return
}
fbo.log.CDebugf(ctx, "Blocking until squash finishes")
// Limit the time we wait to just under the ctx deadline if there
// is one, or 10s if there isn't.
deadline, ok := ctx.Deadline()
if ok {
deadline = deadline.Add(-1 * time.Second)
} else {
// Can't use config.Clock() since context doesn't respect it.
deadline = time.Now().Add(10 * time.Second)
}
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
// Wait for CR to finish. Note that if the user is issuing
// concurrent writes, the current CR could be canceled, and when
// the call belows returns, the branch still won't be squashed.
// That's ok, this is just an optimization.
err := fbo.cr.Wait(ctx)
if err != nil {
fbo.log.CDebugf(ctx, "Error while waiting for CR: %+v", err)
}
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
bid := fbo.bid
fbo.mdWriterLock.Unlock(lState)
// Don't let a pending squash get too big.
fbo.maybeWaitForSquash(ctx, bid)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
if _, ok := err.(ExclOnUnmergedError); ok {
if err = fbo.cr.Wait(ctx); err != nil {
return err
}
} else if _, ok := err.(UnmergedSelfConflictError); ok {
// We can only get here if we are already on an
// unmerged branch and an errored PutUnmerged did make
// it to the mdserver. Let's force sync, with a fresh
// context so the observer doesn't ignore the updates
// (but tie the cancels together).
newCtx := fbo.ctxWithFBOID(context.Background())
newCtx, cancel := context.WithCancel(newCtx)
defer cancel()
go func() {
select {
case <-ctx.Done():
cancel()
case <-newCtx.Done():
}
}()
fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+
"(%v); forcing a sync", err)
err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState)
if err != nil {
// TODO: we might be stuck at this point if we're
// ahead of the unmerged branch on the server, in
// which case we might want to just abandon any
// cached updates and force a sync to the head.
return err
}
cancel()
}
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %s %s", getNodeIDStr(dir), path)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateDir %s %s done: %v %+v",
getNodeIDStr(dir), path, getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl)
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool, excl Excl) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %s %s isExec=%v Excl=%s",
getNodeIDStr(dir), path, isExec, excl)
defer func() {
fbo.deferLog.CDebugf(ctx,
"CreateFile %s %s isExec=%v Excl=%s done: %v %+v",
getNodeIDStr(dir), path, isExec, excl,
getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
// If journaling is turned on, an exclusive create may end up on a
// conflict branch.
if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.")
excl = NoExcl
}
if excl == WithExcl {
if err = fbo.cr.Wait(ctx); err != nil {
return nil, EntryInfo{}, err
}
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl)
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
// notifyAndSyncOrSignal caches an op in memory and dirties the
// relevant node, and then sends a notification for it. If batching
// is on, it signals the write; otherwise it syncs the change. It
// should only be called as the final instruction that can fail in a
// method.
func (fbo *folderBranchOps) notifyAndSyncOrSignal(
ctx context.Context, lState *lockState, undoFn dirCacheUndoFn,
nodesToDirty []Node, op op, md ReadOnlyRootMetadata) (err error) {
fbo.dirOps = append(fbo.dirOps, cachedDirOp{op, nodesToDirty})
var addedNodes []Node
for _, n := range nodesToDirty {
added := fbo.status.addDirtyNode(n)
if added {
addedNodes = append(addedNodes, n)
}
}
defer func() {
if err != nil {
for _, n := range addedNodes {
fbo.status.rmDirtyNode(n)
}
fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1]
if undoFn != nil {
undoFn(lState)
}
}
}()
// It's safe to notify before we've synced, since it is only
// sending invalidation notifications. At worst the upper layer
// will just have to refresh its cache needlessly.
err = fbo.notifyOneOp(ctx, lState, op, md, false)
if err != nil {
return err
}
return fbo.syncDirUpdateOrSignal(ctx, lState)
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(fromName, fbo.config.Mode()); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return DirEntry{}, err
}
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
dblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(),
dirPath, fromName); err != nil {
return DirEntry{}, err
}
parentPtr := dirPath.tailPointer()
co, err := newCreateOp(fromName, parentPtr, Sym)
if err != nil {
return DirEntry{}, err
}
co.setFinalPath(dirPath)
co.AddSelfUpdate(parentPtr)
// Nothing below here can fail, so no need to clean up the dir
// entry cache on a failure. If this ever panics, we need to add
// cleanup code.
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
de := DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
dirCacheUndoFn := fbo.blocks.AddDirEntryInCache(
lState, dirPath, fromName, de)
err = fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{dir}, co, md.ReadOnly())
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %s %s -> %s",
getNodeIDStr(dir), fromName, toPath)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateLink %s %s -> %s done: %+v",
getNodeIDStr(dir), fromName, toPath, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return EntryInfo{}, err
}
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set ei directly, as that can cause a race when
// the Create is canceled.
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return retEntryInfo, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ro op, dir path, de DirEntry,
name string) error {
fbo.mdWriterLock.AssertLocked(lState)
if de.Type == Sym {
return nil
}
unrefsToAdd := make(map[BlockPointer]bool)
fbo.prepper.cacheBlockInfos([]BlockInfo{de.BlockInfo})
unrefsToAdd[de.BlockPointer] = true
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, kmd, childPath)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
}
fbo.prepper.cacheBlockInfos(blockInfos)
for _, blockInfo := range blockInfos {
unrefsToAdd[blockInfo.BlockPointer] = true
}
}
// Any referenced blocks that were unreferenced since the last
// sync can just be forgotten about. Note that any updated
// pointers that are unreferenced will be fixed up during syncing.
for _, dirOp := range fbo.dirOps {
for i := len(dirOp.dirOp.Refs()) - 1; i >= 0; i-- {
ref := dirOp.dirOp.Refs()[i]
if _, ok := unrefsToAdd[ref]; ok {
dirOp.dirOp.DelRefBlock(ref)
delete(unrefsToAdd, ref)
}
}
}
for unref := range unrefsToAdd {
ro.AddUnrefBlock(unref)
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata, dir Node, dirPath path,
name string) error {
fbo.mdWriterLock.AssertLocked(lState)
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
pblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dirPath, blockRead)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
parentPtr := dirPath.tailPointer()
ro, err := newRmOp(name, parentPtr)
if err != nil {
return err
}
ro.setFinalPath(dirPath)
ro.AddSelfUpdate(parentPtr)
err = fbo.unrefEntryLocked(ctx, lState, md, ro, dirPath, de, name)
if err != nil {
return err
}
dirCacheUndoFn := fbo.blocks.RemoveDirEntryInCache(
lState, dirPath, name, de)
if de.Type == Dir {
removedNode := fbo.nodeCache.Get(de.BlockPointer.Ref())
if removedNode != nil {
// If it was a dirty directory, the removed node no longer
// counts as dirty (it will never be sync'd). Note that
// removed files will still be synced since any data
// written to them via a handle stays in memory until the
// sync actually happens.
removed := fbo.status.rmDirtyNode(removedNode)
if removed {
oldUndoFn := dirCacheUndoFn
dirCacheUndoFn = func(lState *lockState) {
oldUndoFn(lState)
fbo.status.addDirtyNode(removedNode)
}
}
}
}
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{dir}, ro, md.ReadOnly())
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), childPath, blockRead)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
} else if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(
ctx, lState, md.ReadOnly(), dir, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %s %s", getNodeIDStr(dir), dirName)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveDir %s %s done: %+v",
getNodeIDStr(dir), dirName, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveEntry %s %s done: %+v",
getNodeIDStr(dir), name, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Verify we have permission to write (but no need to make
// a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(
ctx, lState, md.ReadOnly(), dir, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent Node, oldName string,
newParent Node, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := fbo.checkForUnlinkedDir(oldParent); err != nil {
return err
}
if err := fbo.checkForUnlinkedDir(newParent); err != nil {
return err
}
if err := checkDisallowedPrefixes(newName, fbo.config.Mode()); err != nil {
return err
}
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// Verify we have permission to write (but no need to make a
// successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
_, newPBlock, newDe, ro, err := fbo.blocks.PrepRename(
ctx, lState, md.ReadOnly(), oldParentPath, oldName, newParentPath,
newName)
if err != nil {
return err
}
// does name exist?
replacedDe, ok := newPBlock.Children[newName]
if ok {
// Usually higher-level programs check these, but just in case.
if replacedDe.Type == Dir && newDe.Type != Dir {
return NotDirError{newParentPath.ChildPathNoPtr(newName)}
} else if replacedDe.Type != Dir && newDe.Type == Dir {
return NotFileError{newParentPath.ChildPathNoPtr(newName)}
}
if replacedDe.Type == Dir {
// The directory must be empty.
oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState,
md.ReadOnly(), replacedDe.BlockPointer, newParentPath.Branch,
newParentPath.ChildPathNoPtr(newName))
if err != nil {
return err
}
if len(oldTargetDir.Children) != 0 {
fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+
" (%s/%s) not allowed.", newParentPath, newName)
return DirNotEmptyError{newName}
}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntryLocked(
ctx, lState, md.ReadOnly(), ro, newParentPath, replacedDe, newName)
if err != nil {
return err
}
} else {
// If the entry doesn't exist yet, see if the new name will
// make the new parent directory too big. If the entry is
// remaining in the same directory, only check the size
// difference.
checkName := newName
if oldParent == newParent {
if extra := len(newName) - len(oldName); extra <= 0 {
checkName = ""
} else {
checkName = newName[:extra]
}
}
if len(checkName) > 0 {
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), newParentPath,
checkName); err != nil {
return err
}
}
}
// Only the ctime changes on the directory entry itself.
newDe.Ctime = fbo.nowUnixNano()
dirCacheUndoFn, err := fbo.blocks.RenameDirEntryInCache(
lState, oldParentPath, oldName, newParentPath, newName, newDe,
replacedDe)
if err != nil {
return err
}
nodesToDirty := []Node{oldParent}
if oldParent.GetID() != newParent.GetID() {
nodesToDirty = append(nodesToDirty, newParent)
}
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, nodesToDirty, ro, md.ReadOnly())
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %s/%s -> %s/%s", getNodeIDStr(oldParent),
oldName, getNodeIDStr(newParent), newName)
defer func() {
fbo.deferLog.CDebugf(ctx, "Rename %s/%s -> %s/%s done: %+v",
getNodeIDStr(oldParent), oldName,
getNodeIDStr(newParent), newName, err)
}()
err = fbo.checkNode(newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// only works for paths within the same topdir
if oldParent.GetFolderBranch() != newParent.GetFolderBranch() {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParent, oldName,
newParent, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %s %d %d", getNodeIDStr(file),
len(dest), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Read %s %d %d (n=%d) done: %+v",
getNodeIDStr(file), len(dest), off, n, err)
}()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
{
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return 0, err
}
// It seems git isn't handling EINTR from some of its read calls (likely
// fread), which causes it to get corrupted data (which leads to coredumps
// later) when a read system call on pack files gets interrupted. This
// enables delayed cancellation for Read if the file path contains `.git`.
//
// TODO: get a patch in git, wait for sufficiently long time for people to
// upgrade, and remove this.
// allow turning this feature off by env var to make life easier when we
// try to fix git.
if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet {
for _, n := range filePath.path {
if n.Name == ".git" {
EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod())
break
}
}
}
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
// Read using the `file` Node, not `filePath`, since the path
// could change until we take `blockLock` for reading.
bytesRead, err = fbo.blocks.Read(
ctx, lState, md.ReadOnly(), file, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %s %d %d", getNodeIDStr(file),
len(data), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Write %s %d %d done: %+v",
getNodeIDStr(file), len(data), off, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
fbo.signalWrite()
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %s %d", getNodeIDStr(file), size)
defer func() {
fbo.deferLog.CDebugf(ctx, "Truncate %s %d done: %+v",
getNodeIDStr(file), size, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(
ctx, lState, md.ReadOnly(), file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
fbo.signalWrite()
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file Node, ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
// Verify we have permission to write (no need to make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return
}
de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), filePath)
if err != nil {
return err
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym || de.Type == Dir {
fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type)
return nil
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
} else {
// Treating this as a no-op, without updating the ctime, is a
// POSIX violation, but it's an important optimization to keep
// permissions-preserving rsyncs fast.
fbo.log.CDebugf(ctx, "Ignoring no-op setex")
return nil
}
de.Ctime = fbo.nowUnixNano()
parentPtr := filePath.parentPath().tailPointer()
sao, err := newSetAttrOp(filePath.tailName(), parentPtr,
exAttr, filePath.tailPointer())
if err != nil {
return err
}
sao.AddSelfUpdate(parentPtr)
// If the node has been unlinked, we can safely ignore this setex.
if fbo.nodeCache.IsUnlinked(file) {
fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v",
filePath.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(filePath)
dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache(
lState, filePath, de, sao.Attr)
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly())
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %s %t", getNodeIDStr(file), ex)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetEx %s %t done: %+v",
getNodeIDStr(file), ex, err)
}()
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.setExLocked(ctx, lState, file, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file Node,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
// Verify we have permission to write (no need to make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), filePath)
if err != nil {
return err
}
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
parentPtr := filePath.parentPath().tailPointer()
sao, err := newSetAttrOp(filePath.tailName(), parentPtr,
mtimeAttr, filePath.tailPointer())
if err != nil {
return err
}
sao.AddSelfUpdate(parentPtr)
// If the node has been unlinked, we can safely ignore this
// setmtime.
if fbo.nodeCache.IsUnlinked(file) {
fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v",
filePath.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(filePath)
dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache(
lState, filePath, de, sao.Attr)
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly())
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %s %v", getNodeIDStr(file), mtime)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetMtime %s %v done: %+v",
getNodeIDStr(file), mtime, err)
}()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.setMtimeLocked(ctx, lState, file, mtime)
})
}
type cleanupFn func(context.Context, *lockState, []BlockPointer, error)
// startSyncLocked readies the blocks and other state needed to sync a
// single file. It returns:
//
// * `doSync`: Whether or not the sync should actually happen.
// * `stillDirty`: Whether the file should still be considered dirty when
// this function returns. (That is, if `doSync` is false, and `stillDirty`
// is true, then the file has outstanding changes but the sync was vetoed for
// some other reason.)
// * `fblock`: the root file block for the file being sync'd.
// * `lbc`: A local block cache consisting of a dirtied version of the parent
// directory for this file.
// * `bps`: All the blocks that need to be put to the server.
// * `syncState`: Must be passed to the `FinishSyncLocked` call after the
// update completes.
// * `cleanupFn`: A function that, if non-nil, must be called after the sync
// is done. `cleanupFn` should be passed the set of bad blocks that couldn't
// be sync'd (if any), and the error.
// * `err`: The best, greatest return value, everyone says it's absolutely
// stunning.
func (fbo *folderBranchOps) startSyncLocked(ctx context.Context,
lState *lockState, md *RootMetadata, node Node, file path) (
doSync, stillDirty bool, fblock *FileBlock, lbc localBcache,
bps *blockPutState, syncState fileSyncState,
cleanup cleanupFn, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, false, nil, nil, nil, fileSyncState{}, nil, nil
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if fbo.nodeCache.IsUnlinked(node) {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file.
//
// Note in particular that if a file just had a dirty
// directory entry cached (due to an attribute change on a
// removed file, for example), this will clear that attribute
// change. If there's still an open file handle, the user
// won't be able to see the change anymore.
//
// TODO: Hook this in with the node cache GC logic to be
// perfectly accurate (but at the same time, we'd then have to
// fix up the intentional panic in the background flusher to
// be more tolerant of long-lived dirty, removed files).
err := fbo.blocks.ClearCacheInfo(lState, file)
if err != nil {
return false, false, nil, nil, nil, fileSyncState{}, nil, err
}
fbo.status.rmDirtyNode(node)
return false, true, nil, nil, nil, fileSyncState{}, nil, nil
}
if file.isValidForNotification() {
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
}
fblock, bps, lbc, syncState, err =
fbo.blocks.StartSync(ctx, lState, md, file)
cleanup = func(ctx context.Context, lState *lockState,
blocksToRemove []BlockPointer, err error) {
fbo.blocks.CleanupSyncState(
ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err)
}
if err != nil {
return false, true, nil, nil, nil, fileSyncState{}, cleanup, err
}
return true, true, fblock, lbc, bps, syncState, cleanup, nil
}
func addSelfUpdatesAndParent(
p path, op op, parentsToAddChainsFor map[BlockPointer]bool) {
for i, pn := range p.path {
if i == len(p.path)-1 {
op.AddSelfUpdate(pn.BlockPointer)
} else {
parentsToAddChainsFor[pn.BlockPointer] = true
}
}
}
func (fbo *folderBranchOps) syncAllLocked(
ctx context.Context, lState *lockState, excl Excl) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
dirtyDirs := fbo.blocks.GetDirtyDirBlockRefs(lState)
if len(dirtyFiles) == 0 && len(dirtyDirs) == 0 {
return nil
}
ctx = fbo.config.MaybeStartTrace(ctx, "FBO.SyncAll",
fmt.Sprintf("%d files, %d dirs", len(dirtyFiles), len(dirtyDirs)))
defer func() { fbo.config.MaybeFinishTrace(ctx, err) }()
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who call syncAll would get an
// error.
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
bps := newBlockPutState(0)
resolvedPaths := make(map[BlockPointer]path)
lbc := make(localBcache)
var cleanups []func(context.Context, *lockState, error)
defer func() {
for _, cf := range cleanups {
cf(ctx, lState, err)
}
}()
fbo.log.LazyTrace(ctx, "Syncing %d dir(s)", len(dirtyDirs))
// First prep all the directories.
fbo.log.CDebugf(ctx, "Syncing %d dir(s)", len(dirtyDirs))
for _, ref := range dirtyDirs {
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
dir := fbo.nodeCache.PathFromNode(node)
dblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dir, blockWrite)
if err != nil {
return err
}
lbc[dir.tailPointer()] = dblock
if !fbo.nodeCache.IsUnlinked(node) {
resolvedPaths[dir.tailPointer()] = dir
}
// On a successful sync, clean up the cached entries and the
// dirty blocks.
cleanups = append(cleanups,
func(ctx context.Context, lState *lockState, err error) {
if err != nil {
return
}
fbo.blocks.ClearCachedDirEntry(lState, dir)
fbo.status.rmDirtyNode(node)
})
}
defer func() {
// If the sync is successful, we can clear out all buffered
// directory operations.
if err == nil {
fbo.dirOps = nil
}
}()
fbo.log.LazyTrace(ctx, "Processing %d op(s)", len(fbo.dirOps))
newBlocks := make(map[BlockPointer]bool)
fileBlocks := make(fileBlockMap)
parentsToAddChainsFor := make(map[BlockPointer]bool)
for _, dop := range fbo.dirOps {
// Copy the op before modifying it, in case there's an error
// and we have to retry with the original ops.
newOp := dop.dirOp.deepCopy()
md.AddOp(newOp)
// Add "updates" for all the op updates, and make chains for
// the rest of the parent directories, so they're treated like
// updates during the prepping.
for _, n := range dop.nodes {
p := fbo.nodeCache.PathFromNode(n)
if _, ok := newOp.(*setAttrOp); ok {
// For a setattr, the node is the file, but that
// doesn't get updated, so use the current parent
// node.
p = *p.parentPath()
}
addSelfUpdatesAndParent(p, newOp, parentsToAddChainsFor)
}
var ref BlockRef
switch realOp := newOp.(type) {
case *createOp:
if realOp.Type == Sym {
continue
}
// New files and directories explicitly need
// pointer-updating, because the sync process will turn
// them into simple refs and will forget about the local,
// temporary ID.
newNode := dop.nodes[1]
newPath := fbo.nodeCache.PathFromNode(newNode)
newPointer := newPath.tailPointer()
newBlocks[newPointer] = true
if realOp.Type != Dir {
continue
}
dblock, ok := lbc[newPointer]
if !ok {
// New directories that aren't otherwise dirty need to
// be added to both the `lbc` and `resolvedPaths` so
// they are properly synced.
dblock, err = fbo.blocks.GetDirtyDir(
ctx, lState, md, newPath, blockWrite)
if err != nil {
return err
}
lbc[newPointer] = dblock
if !fbo.nodeCache.IsUnlinked(newNode) {
resolvedPaths[newPointer] = newPath
}
}
if len(dblock.Children) > 0 {
continue
}
// If the directory is empty, we need to explicitly clean
// up its entry after syncing.
ref = newPath.tailRef()
case *renameOp:
ref = realOp.Renamed.Ref()
case *setAttrOp:
ref = realOp.File.Ref()
default:
continue
}
// For create, rename and setattr ops, the target will have a
// dirty entry, but may not have any outstanding operations on
// it, so it needs to be cleaned up manually.
defer func() {
if err != nil {
return
}
wasCleared := fbo.blocks.ClearCachedRef(lState, ref)
if wasCleared {
node := fbo.nodeCache.Get(ref)
if node != nil {
fbo.status.rmDirtyNode(node)
}
}
}()
}
var blocksToRemove []BlockPointer
// TODO: find a way to avoid so many dynamic closure dispatches.
var afterUpdateFns []func() error
afterUpdateFns = append(afterUpdateFns, func() error {
// Any new files or directories need their pointers explicitly
// updated, because the sync will be treating them as a new
// ref, and not an update.
for _, bs := range bps.blockStates {
if newBlocks[bs.oldPtr] {
fbo.blocks.updatePointer(
md.ReadOnly(), bs.oldPtr, bs.blockPtr, false)
}
}
return nil
})
fbo.log.LazyTrace(ctx, "Syncing %d file(s)", len(dirtyFiles))
fbo.log.CDebugf(ctx, "Syncing %d file(s)", len(dirtyFiles))
fileSyncBlocks := newBlockPutState(1)
for _, ref := range dirtyFiles {
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
file := fbo.nodeCache.PathFromNode(node)
fbo.log.CDebugf(ctx, "Syncing file %v (%s)", ref, file)
// Start the sync for this dirty file.
doSync, stillDirty, fblock, newLbc, newBps, syncState, cleanup, err :=
fbo.startSyncLocked(ctx, lState, md, node, file)
if cleanup != nil {
// Note: This passes the same `blocksToRemove` into each
// cleanup function. That's ok, as only the ones
// pertaining to a particular syncing file will be acted
// on.
cleanups = append(cleanups,
func(ctx context.Context, lState *lockState, err error) {
cleanup(ctx, lState, blocksToRemove, err)
})
}
if err != nil {
return err
}
if !doSync {
if !stillDirty {
fbo.status.rmDirtyNode(node)
}
continue
}
// Merge the per-file sync info into the batch sync info.
bps.mergeOtherBps(newBps)
fileSyncBlocks.mergeOtherBps(newBps)
resolvedPaths[file.tailPointer()] = file
parent := file.parentPath().tailPointer()
if _, ok := fileBlocks[parent]; !ok {
fileBlocks[parent] = make(map[string]*FileBlock)
}
fileBlocks[parent][file.tailName()] = fblock
// Collect its `afterUpdateFn` along with all the others, so
// they all get invoked under the same lock, to avoid any
// weird races.
afterUpdateFns = append(afterUpdateFns, func() error {
// This will be called after the node cache is updated, so
// this newPath will be correct.
newPath := fbo.nodeCache.PathFromNode(node)
stillDirty, err := fbo.blocks.FinishSyncLocked(
ctx, lState, file, newPath, md.ReadOnly(), syncState, fbo.fbm)
if !stillDirty {
fbo.status.rmDirtyNode(node)
}
return err
})
// Add an "update" for all the parent directory updates, and
// make a chain for the file itself, so they're treated like
// updates during the prepping.
lastOp := md.Data().Changes.Ops[len(md.Data().Changes.Ops)-1]
addSelfUpdatesAndParent(file, lastOp, parentsToAddChainsFor)
// Update the combined local block cache with this file's
// dirty entry.
parentPtr := file.parentPath().tailPointer()
if _, ok := lbc[parentPtr]; ok {
lbc[parentPtr].Children[file.tailName()] =
newLbc[parentPtr].Children[file.tailName()]
} else {
lbc[parentPtr] = newLbc[parentPtr]
}
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
tempIRMD := ImmutableRootMetadata{
ReadOnlyRootMetadata: md.ReadOnly(),
lastWriterVerifyingKey: session.VerifyingKey,
}
fbo.log.LazyTrace(ctx, "Prepping update")
// Create a set of chains for this batch, a succinct summary of
// the file and directory blocks that need to change during this
// sync.
syncChains, err := newCRChains(
ctx, fbo.config.Codec(), []chainMetadata{tempIRMD}, &fbo.blocks, false)
if err != nil {
return err
}
for ptr := range parentsToAddChainsFor {
syncChains.addNoopChain(ptr)
}
// All originals never made it to the server, so don't unmerged
// them.
syncChains.doNotUnrefPointers = syncChains.createdOriginals
head, _ := fbo.getHead(lState)
dummyHeadChains := newCRChainsEmpty()
dummyHeadChains.mostRecentChainMDInfo = mostRecentChainMetadataInfo{
head, head.Data().Dir.BlockInfo}
// Squash the batch of updates together into a set of blocks and
// ready `md` for putting to the server.
md.AddOp(newResolutionOp())
_, newBps, blocksToDelete, err := fbo.prepper.prepUpdateForPaths(
ctx, lState, md, syncChains, dummyHeadChains, tempIRMD, head,
resolvedPaths, lbc, fileBlocks, fbo.config.DirtyBlockCache(),
prepFolderDontCopyIndirectFileBlocks)
if err != nil {
return err
}
if len(blocksToDelete) > 0 {
return errors.Errorf("Unexpectedly found unflushed blocks to delete "+
"during syncAllLocked: %v", blocksToDelete)
}
bps.mergeOtherBps(newBps)
defer func() {
if err != nil {
// Remove any blocks that are covered by file syncs --
// those might get reused upon sync retry. All other
// blocks are fair game for cleanup though.
bps.removeOtherBps(fileSyncBlocks)
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
// Put all the blocks.
blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return err
}
// Call this under the same blockLock as when the pointers are
// updated, so there's never any point in time where a read or
// write might slip in after the pointers are updated, but before
// the deferred writes are re-applied.
afterUpdateFn := func() error {
var errs []error
for _, auf := range afterUpdateFns {
err := auf()
if err != nil {
errs = append(errs, err)
}
}
if len(errs) == 1 {
return errs[0]
} else if len(errs) > 1 {
return errors.Errorf("Got errors %+v", errs)
}
return nil
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl,
func(md ImmutableRootMetadata) error {
// Just update the pointers using the resolutionOp, all
// the ops have already been notified.
err = fbo.blocks.UpdatePointers(
md, lState, md.data.Changes.Ops[0], false, afterUpdateFn)
if err != nil {
return err
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
return nil
})
}
func (fbo *folderBranchOps) syncAllUnlocked(
ctx context.Context, lState *lockState) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
select {
case <-ctx.Done():
// We've already been canceled, possibly because we're a CR
// and a write just called cr.ForceCancel. Don't allow the
// SyncAll to complete, because if no other writes happen
// we'll get stuck forever (see KBFS-2505). Instead, wait for
// the next `SyncAll` to trigger.
return ctx.Err()
default:
}
return fbo.syncAllLocked(ctx, lState, NoExcl)
}
// SyncAll implements the KBFSOps interface for folderBranchOps.
func (fbo *folderBranchOps) SyncAll(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncAll")
defer func() { fbo.deferLog.CDebugf(ctx, "SyncAll done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.syncAllLocked(ctx, lState, NoExcl)
})
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Status done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.status.getStatus(ctx, &fbo.blocks)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for all the ops in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.headLock.AssertLocked(lState)
for _, op := range md.data.Changes.Ops {
err := fbo.notifyOneOpLocked(ctx, lState, op, md.ReadOnly(), false)
if err != nil {
return err
}
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
return nil
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.allUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache,
[]BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) getUnlinkPathBeforeUpdatingPointers(
ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, op op) (
unlinkPath path, unlinkDe DirEntry, toUnlink bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
if len(md.data.Changes.Ops) == 0 {
return path{}, DirEntry{}, false, errors.New("md needs at least one op")
}
var node Node
var childName string
requireResFix := false
switch realOp := op.(type) {
case *rmOp:
if realOp.Dir.Ref == realOp.Dir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.Dir.Unref.Ref())
childName = realOp.OldName
case *renameOp:
if realOp.NewDir.Unref != zeroPtr {
// moving to a new dir
if realOp.NewDir.Ref == realOp.NewDir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.NewDir.Unref.Ref())
} else {
// moving to the same dir
if realOp.OldDir.Ref == realOp.OldDir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.OldDir.Unref.Ref())
}
childName = realOp.NewName
}
if node == nil {
return path{}, DirEntry{}, false, nil
}
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return path{}, DirEntry{}, false, err
}
// If the first op in this MD update is a resolutionOp, we need to
// inspect it to look for the *real* original pointer for this
// node. Though only do that if the op we're processing is
// actually a part of this MD object; if it's the latest cached
// dirOp, then the resOp we're looking at belongs to a previous
// revision.
if resOp, ok := md.data.Changes.Ops[0].(*resolutionOp); ok &&
(len(fbo.dirOps) == 0 || op != fbo.dirOps[len(fbo.dirOps)-1].dirOp) {
for _, update := range resOp.allUpdates() {
if update.Ref == p.tailPointer() {
fbo.log.CDebugf(ctx,
"Backing up ptr %v in op %s to original pointer %v",
p.tailPointer(), op, update.Unref)
p.path[len(p.path)-1].BlockPointer = update.Unref
requireResFix = false
break
}
}
}
if requireResFix {
// If we didn't fix up the pointer using a resolutionOp, the
// directory was likely created during this md update, and so
// no unlinking is needed.
fbo.log.CDebugf(ctx,
"Ignoring unlink when resolutionOp never fixed up %v",
p.tailPointer())
return path{}, DirEntry{}, false, nil
}
// If the original (clean) parent block is already GC'd from the
// server, this might not work, but hopefully we'd be
// fast-forwarding in that case anyway.
dblock, err := fbo.blocks.GetDir(ctx, lState, md, p, blockRead)
if err != nil {
fbo.log.CDebugf(ctx, "Couldn't get the dir entry for %s in %v: %+v",
childName, p.tailPointer(), err)
return path{}, DirEntry{}, false, nil
}
de, ok := dblock.Children[childName]
if !ok {
return path{}, DirEntry{}, false, nil
}
childPath := p.ChildPath(childName, de.BlockPointer)
return childPath, de, true, nil
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md ReadOnlyRootMetadata,
shouldPrefetch bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.config.Mode() == InitMinimal {
// There is no node cache in minimal mode, so there's nothing
// to update.
return nil
}
// We need to get unlinkPath before calling UpdatePointers so that
// nodeCache.Unlink can properly update cachedPath.
unlinkPath, unlinkDe, toUnlink, err :=
fbo.getUnlinkPathBeforeUpdatingPointers(ctx, lState, md, op)
if err != nil {
return err
}
err = fbo.blocks.UpdatePointers(md, lState, op, shouldPrefetch, nil)
if err != nil {
return err
}
var changes []NodeChange
switch realOp := op.(type) {
default:
fbo.log.CDebugf(ctx, "Unknown op: %s", op)
return nil
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %s",
realOp.NewName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %s",
realOp.OldName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
if toUnlink {
_ = fbo.nodeCache.Unlink(unlinkDe.Ref(), unlinkPath, unlinkDe)
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%s to %s/%s",
realOp.Renamed, realOp.OldName, getNodeIDStr(oldNode),
realOp.NewName, getNodeIDStr(newNode))
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md)
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
if toUnlink {
_ = fbo.nodeCache.Unlink(
unlinkDe.Ref(), unlinkPath, unlinkDe)
}
_, err := fbo.nodeCache.Move(
realOp.Renamed.Ref(), newNode, realOp.NewName)
if err != nil {
return err
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %s",
len(realOp.Writes), getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %s",
realOp.Attr, realOp.Name, getNodeIDStr(node))
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md, p, realOp)
if err != nil {
return err
}
if childNode == nil {
return nil // Nothing to do.
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *GCOp:
// Unreferenced blocks in a GCOp mean that we shouldn't cache
// them anymore
fbo.log.CDebugf(ctx, "notifyOneOp: GCOp with latest rev %d and %d unref'd blocks", realOp.LatestRev, len(realOp.Unrefs()))
bcache := fbo.config.BlockCache()
idsToDelete := make([]kbfsblock.ID, 0, len(realOp.Unrefs()))
for _, ptr := range realOp.Unrefs() {
idsToDelete = append(idsToDelete, ptr.ID)
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
diskCache := fbo.config.DiskBlockCache()
if diskCache != nil {
go diskCache.Delete(ctx, idsToDelete)
}
case *resolutionOp:
// If there are any unrefs of blocks that have a node, this is an
// implied rmOp (see KBFS-1424).
reverseUpdates := make(map[BlockPointer]BlockPointer)
for _, unref := range op.Unrefs() {
node := fbo.nodeCache.Get(unref.Ref())
if node == nil {
// TODO: even if we don't have the node that was
// unreferenced, we might have its parent, and that
// parent might need an invalidation.
continue
}
// If there is a node, unlink and invalidate.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get path: %v", err)
continue
}
if !p.hasValidParent() {
fbo.log.CErrorf(ctx, "Removed node %s has no parent", p)
continue
}
parentPath := p.parentPath()
parentNode := fbo.nodeCache.Get(parentPath.tailRef())
if parentNode != nil {
changes = append(changes, NodeChange{
Node: parentNode,
DirUpdated: []string{p.tailName()},
})
}
fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %s",
p.tailPointer(), getNodeIDStr(node))
// Revert the path back to the original BlockPointers,
// before the updates were applied.
if len(reverseUpdates) == 0 {
for _, update := range op.allUpdates() {
reverseUpdates[update.Ref] = update.Unref
}
}
for i, pNode := range p.path {
if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok {
p.path[i].BlockPointer = oldPtr
}
}
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, p)
if err != nil {
fbo.log.CDebugf(ctx,
"Couldn't get the dir entry for %s/%v: %+v",
p, p.tailPointer(), err)
}
_ = fbo.nodeCache.Unlink(p.tailRef(), p, de)
}
if len(changes) == 0 {
return nil
}
}
fbo.observers.batchChanges(ctx, changes)
return nil
}
func (fbo *folderBranchOps) notifyOneOp(ctx context.Context,
lState *lockState, op op, md ReadOnlyRootMetadata,
shouldPrefetch bool) error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
return fbo.notifyOneOpLocked(ctx, lState, op, md, shouldPrefetch)
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) kbfsmd.Revision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head.Revision()
}
return kbfsmd.RevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) kbfsmd.Revision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// If there's anything in the journal, don't apply these MDs.
// Wait for CR to happen.
if fbo.isMasterBranchLocked(lState) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err == errNoFlushedRevisions {
// If the journal is still on the initial revision, ignore
// the error and fall through to ignore CR.
mergedRev = kbfsmd.RevisionInitial
} else if err != nil {
return err
}
if mergedRev != kbfsmd.RevisionUninitialized {
if len(rmds) > 0 {
// We should update our view of the merged master though,
// to avoid re-registering for the same updates again.
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(
ctx, lState, rmds[len(rmds)-1].Revision(), false)
}()
}
fbo.log.CDebugf(ctx,
"Ignoring fetched revisions while MDs are in journal")
return nil
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if !fbo.isMasterBranchLocked(lState) {
if len(rmds) > 0 {
latestMerged := rmds[len(rmds)-1]
// Don't trust un-put updates here because they might have
// come from our own journal before the conflict was
// detected. Assume we'll hear about the conflict via
// callbacks from the journal.
if !latestMerged.putToServer {
return UnmergedError{}
}
// setHeadLocked takes care of merged case
fbo.setLatestMergedRevisionLocked(
ctx, lState, latestMerged.Revision(), false)
unmergedRev := kbfsmd.RevisionUninitialized
if fbo.head != (ImmutableRootMetadata{}) {
unmergedRev = fbo.head.Revision()
}
fbo.cr.Resolve(ctx, unmergedRev, latestMerged.Revision())
}
return UnmergedError{}
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.WithStack(NoUpdatesWhileDirtyError{})
}
appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds))
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if err := isReadableOrError(ctx, fbo.config.KBPKI(), rmd.ReadOnly()); err != nil {
return err
}
err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
err := fbo.notifyOneOpLocked(ctx, lState, op, rmd.ReadOnly(), true)
if err != nil {
return err
}
}
if rmd.IsRekeySet() {
// One might have concern that a MD update written by the device
// itself can slip in here, for example during the rekey after
// setting paper prompt, and the event may cause the paper prompt
// to be unset. This is not a problem because 1) the revision check
// above shouldn't allow MD update written by this device to reach
// here; 2) the rekey FSM doesn't touch anything if it has the
// paper prompt set and is in scheduled state.
fbo.rekeyFSM.Event(NewRekeyRequestEvent())
} else {
fbo.rekeyFSM.Event(NewRekeyNotNeededEvent())
}
appliedRevs = append(appliedRevs, rmd)
}
if len(appliedRevs) > 0 {
fbo.editHistory.UpdateHistory(ctx, appliedRevs)
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
//
// This duplicates a check in
// fbo.setHeadPredecessorLocked. TODO: Remove this
// duplication.
if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision(),
fbo.getCurrMDRevisionLocked(lState)}
}
// TODO: Check that the revisions are equal only for
// the first iteration.
if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) {
err := fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
io, err := invertOpForLocalNotifications(ops[j])
if err != nil {
fbo.log.CWarningf(ctx,
"got error %v when invert op %v; "+
"skipping. Open file handles "+
"may now be in an invalid "+
"state, which can be fixed by "+
"either closing them all or "+
"restarting KBFS.",
err, ops[j])
continue
}
err = fbo.notifyOneOpLocked(ctx, lState, io, rmd.ReadOnly(), false)
if err != nil {
return err
}
}
}
// TODO: update the edit history?
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) kbfsmd.Revision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.latestMergedRevision
}
// caller should have held fbo.headLock
func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev kbfsmd.Revision, allowBackward bool) {
fbo.headLock.AssertLocked(lState)
if rev == kbfsmd.RevisionUninitialized {
panic("Cannot set latest merged revision to an uninitialized value")
}
if fbo.latestMergedRevision < rev || allowBackward {
fbo.latestMergedRevision = rev
fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev)
} else {
fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+
"the new revision (%d); won't update.", fbo.latestMergedRevision, rev)
}
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, lockBeforeGet *keybase1.LockID,
applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getLatestMergedRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx,
fbo.config, fbo.id(), start, lockBeforeGet)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context,
lState *lockState) error {
fbo.log.CDebugf(ctx, "Fetching the newest unmerged head")
bid := func() kbfsmd.BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
// We can only ever be at most one revision behind, so fetch the
// latest unmerged revision and apply it as a successor.
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid)
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
// There is no unmerged revision, oops!
return errors.New("Couldn't find an unmerged head")
}
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if fbo.bid != bid {
// The branches switched (apparently CR completed), so just
// try again.
fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil {
return err
}
if err := fbo.notifyBatchLocked(ctx, lState, md); err != nil {
return err
}
return fbo.config.MDCache().Put(md)
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
// TLF's current unmerged branch and unmerged branch, between the
// merge point for the branch and the current head. The returned MDs
// are the same instances that are stored in the MD cache, so they
// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
kbfsmd.Revision, []ImmutableRootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() kbfsmd.BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
kbfsmd.Revision, []ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), kbfsmd.NullBranchID,
currHead, kbfsmd.Merged, nil)
if err != nil {
return nil, err
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true)
return nil
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.allUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState)
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if !wasMasterBranch {
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState, nil,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl,
func(md ImmutableRootMetadata) error {
return fbo.notifyBatchLocked(ctx, lState, md)
})
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx, "UnstageForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if fbo.isMasterBranch(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
freshCtx, cancel := fbo.newCtxWithFBOID()
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (res RekeyResult, err error) {
fbo.log.CDebugf(ctx, "rekeyLocked")
defer func() {
fbo.deferLog.CDebugf(ctx, "rekeyLocked done: %+v %+v", res, err)
}()
fbo.mdWriterLock.AssertLocked(lState)
if !fbo.isMasterBranchLocked(lState) {
return RekeyResult{}, errors.New("can't rekey while staged")
}
// untrusted head is ok here.
head, _ := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, nil, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(kbfsmd.MDRevisionMismatch); !ok ||
applyErr.Rev != applyErr.Curr {
return RekeyResult{}, err
}
}
}
md, lastWriterVerifyingKey, rekeyWasSet, err :=
fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return RekeyResult{}, err
}
currKeyGen := md.LatestKeyGeneration()
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Clear the rekey bit if any.
md.clearRekeyBit()
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return RekeyResult{}, err
}
// Readers can't clear the last revision, because:
// 1) They don't have access to the writer metadata, so can't clear the
// block changes.
// 2) Readers need the kbfsmd.MetadataFlagWriterMetadataCopied bit set for
// MDServer to authorize the write.
// Without this check, MDServer returns an Unauthorized error.
if md.GetTlfHandle().IsWriter(session.UID) {
md.clearLastRevision()
}
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError, NeedSelfRekeyError:
stillNeedsRekey = true
default:
if err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return RekeyResult{}, err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: true,
}, nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(
ctx, lState, md, lastWriterVerifyingKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
if currKeyGen >= kbfsmd.FirstValidKeyGen && rekeyDone {
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
}
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, nil
}
func (fbo *folderBranchOps) RequestRekey(_ context.Context, tlf tlf.ID) {
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
// TODO: log instead of panic?
panic(WrongOpsError{fbo.folderBranch, fb})
}
fbo.rekeyFSM.Event(NewRekeyRequestEvent())
}
func (fbo *folderBranchOps) SyncFromServerForTesting(ctx context.Context,
folderBranch FolderBranch, lockBeforeGet *keybase1.LockID) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServerForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx,
"SyncFromServerForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
// Make sure everything outstanding syncs to disk at least.
if err := fbo.syncAllUnlocked(ctx, lState); err != nil {
return err
}
// A journal flush before CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
// Loop until we're fully updated on the master branch.
for {
if !fbo.isMasterBranch(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if !fbo.isMasterBranch(lState) {
return errors.Errorf("Conflict resolution didn't take us out " +
"of staging.")
}
}
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
if len(dirtyFiles) > 0 {
for _, ref := range dirtyFiles {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("can't sync from server while dirty")
}
// A journal flush after CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if err := fbo.getAndApplyMDUpdates(
ctx, lState, lockBeforeGet, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(kbfsmd.MDRevisionMismatch); ok {
if applyErr.Rev == applyErr.Curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
if _, isUnmerged := err.(UnmergedError); isUnmerged {
continue
} else if err == errNoMergedRevWhileStaged {
continue
}
return err
}
break
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
if err := fbo.editHistory.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil {
return err
}
// A second journal flush if needed, to clear out any
// archive/remove calls caused by the above operations.
return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return CtxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) {
// No need to call NewContextReplayable since ctxWithFBOID calls
// ctxWithRandomIDReplayable, which attaches replayably.
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
panic(err)
}
return ctx, cancelFunc
}
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return ShutdownHappenedError{}
}
}
func (fbo *folderBranchOps) doFastForwardLocked(ctx context.Context,
lState *lockState, currHead ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d",
fbo.latestMergedRevision, currHead.Revision())
changes, err := fbo.blocks.FastForwardAllNodes(
ctx, lState, currHead.ReadOnly())
if err != nil {
return err
}
err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/)
if err != nil {
return err
}
// Invalidate all the affected nodes.
if len(changes) > 0 {
fbo.observers.batchChanges(ctx, changes)
}
// Reset the edit history. TODO: notify any listeners that we've
// done this.
fbo.editHistory.Shutdown()
fbo.editHistory = NewTlfEditHistory(fbo.config, fbo, fbo.log)
return nil
}
func (fbo *folderBranchOps) maybeFastForward(ctx context.Context,
lState *lockState, lastUpdate time.Time, currUpdate time.Time) (
fastForwardDone bool, err error) {
// Has it been long enough to try fast-forwarding?
if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) ||
!fbo.isMasterBranch(lState) {
return false, nil
}
fbo.log.CDebugf(ctx, "Checking head for possible "+
"fast-forwarding (last update time=%s)", lastUpdate)
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id(), nil)
if err != nil {
return false, err
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Don't update while the in-memory state is dirty.
if fbo.blocks.GetState(lState) != cleanState {
return false, nil
}
// If the journal has anything in it, don't fast-forward since we
// haven't finished flushing yet. If there was really a remote
// update on the server, we'll end up in CR eventually.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return false, err
}
if mergedRev != kbfsmd.RevisionUninitialized {
return false, nil
}
if !fbo.isMasterBranchLocked(lState) {
// Don't update if we're staged.
return false, nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh {
// Might as well fetch all the revisions.
return false, nil
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
return false, err
}
return true, nil
}
func (fbo *folderBranchOps) locallyFinalizeTLF(ctx context.Context) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return
}
// It's safe to give this a finalized number of 1 and a fake user
// name. The whole point here is to move the old finalized TLF
// name away to a new name, where the user won't be able to access
// it anymore, and if there's a conflict with a previously-moved
// TLF that shouldn't matter.
now := fbo.config.Clock().Now()
finalizedInfo, err := tlf.NewHandleExtension(
tlf.HandleExtensionFinalized, 1, libkb.NormalizedUsername("<unknown>"),
now)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't make finalized info: %+v", err)
return
}
fakeSignedHead := &RootMetadataSigned{RootMetadataSigned: kbfsmd.RootMetadataSigned{MD: fbo.head.bareMd}}
finalRmd, err := fakeSignedHead.MakeFinalCopy(
fbo.config.Codec(), now, finalizedInfo)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't finalize MD: %+v", err)
return
}
// Construct the data needed to fake a new head.
mdID, err := kbfsmd.MakeID(fbo.config.Codec(), finalRmd.MD)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized MD ID: %+v", err)
return
}
bareHandle, err := finalRmd.MD.MakeBareTlfHandle(fbo.head.Extra())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized bare handle: %+v", err)
return
}
handle, err := MakeTlfHandle(
ctx, bareHandle, fbo.id().Type(), fbo.config.KBPKI(),
fbo.config.KBPKI(), fbo.config.MDOps())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized handle: %+v", err)
return
}
finalBrmd, ok := finalRmd.MD.(kbfsmd.MutableRootMetadata)
if !ok {
fbo.log.CErrorf(ctx, "Couldn't get finalized mutable bare MD: %+v", err)
return
}
// We don't have a way to sign this with a valid key (and we might
// be logged out anyway), so just directly make the md immutable.
finalIrmd := ImmutableRootMetadata{
ReadOnlyRootMetadata: makeRootMetadata(
finalBrmd, fbo.head.Extra(), handle).ReadOnly(),
mdID: mdID,
}
// This will trigger the handle change notification to observers.
err = fbo.setHeadSuccessorLocked(ctx, lState, finalIrmd, false)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't set finalized MD: %+v", err)
return
}
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
var lastUpdate time.Time
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
fbo.cancelUpdatesLock.Lock()
if fbo.cancelUpdates != nil {
// It should be impossible to get here without having
// already called the cancel function, but just in case
// call it here again.
fbo.cancelUpdates()
}
ctx, fbo.cancelUpdates = context.WithCancel(ctx)
fbo.cancelUpdatesLock.Unlock()
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
currUpdate, err := fbo.waitForAndProcessUpdates(
newCtx, lastUpdate, updateChan)
switch errors.Cause(err).(type) {
case UnmergedError:
// skip the back-off timer and continue directly to next
// registerForUpdates
return nil
case kbfsmd.NewMetadataVersionError:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the newest metadata: %+v", err)
fbo.status.setPermErr(err)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return context.Canceled
case kbfsmd.ServerErrorCannotReadFinalizedTLF:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the finalized metadata for this TLF: %+v", err)
fbo.status.setPermErr(err)
// Locally finalize the TLF so new accesses
// through to the old folder name will find the
// new folder.
fbo.locallyFinalizeTLF(newCtx)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return context.Canceled
}
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
if err == nil {
lastUpdate = currUpdate
}
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdatesShouldFireNow() bool {
fbo.muLastGetHead.Lock()
defer fbo.muLastGetHead.Unlock()
return fbo.config.Clock().Now().Sub(fbo.lastGetHead) < registerForUpdatesFireNowThreshold
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getLatestMergedRevision(lState)
fireNow := false
if fbo.registerForUpdatesShouldFireNow() {
ctx = rpc.WithFireNow(ctx)
fireNow = true
}
fbo.log.CDebugf(ctx,
"Registering for updates (curr rev = %d, fire now = %v)",
currRev, fireNow)
defer func() {
fbo.deferLog.CDebugf(ctx,
"Registering for updates (curr rev = %d, fire now = %v) done: %+v",
currRev, fireNow, err)
}()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, lastUpdate time.Time,
updateChan <-chan error) (currUpdate time.Time, err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() {
fbo.deferLog.CDebugf(ctx, "Waiting for updates done: %+v", err)
}()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return time.Time{}, err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
currUpdate := fbo.config.Clock().Now()
ffDone, err :=
fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate)
if err != nil {
return time.Time{}, err
}
if ffDone {
return currUpdate, nil
}
err = fbo.getAndApplyMDUpdates(ctx, lState, nil, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return time.Time{}, err
}
return currUpdate, nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
}
}
func (fbo *folderBranchOps) getCachedDirOpsCount(lState *lockState) int {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return len(fbo.dirOps)
}
func (fbo *folderBranchOps) backgroundFlusher() {
lState := makeFBOLockState()
var prevDirtyFileMap map[BlockRef]bool
sameDirtyFileCount := 0
for {
doSelect := true
if fbo.blocks.GetState(lState) == dirtyState &&
fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) &&
sameDirtyFileCount < 10 {
// We have dirty files, and the system has a full buffer,
// so don't bother waiting for a signal, just get right to
// the main attraction.
doSelect = false
} else if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
doSelect = false
}
if doSelect {
// Wait until we really have a write waiting.
doWait := true
select {
case <-fbo.syncNeededChan:
if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
doWait = false
}
case <-fbo.forceSyncChan:
doWait = false
case <-fbo.shutdownChan:
return
}
if doWait {
timer := time.NewTimer(fbo.config.BGFlushPeriod())
// Loop until either a tick's worth of time passes,
// the batch size of directory ops is full, a sync is
// forced, or a shutdown happens.
loop:
for {
select {
case <-timer.C:
break loop
case <-fbo.syncNeededChan:
if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
break loop
}
case <-fbo.forceSyncChan:
break loop
case <-fbo.shutdownChan:
return
}
}
}
}
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
dirOpsCount := fbo.getCachedDirOpsCount(lState)
if len(dirtyFiles) == 0 && dirOpsCount == 0 {
sameDirtyFileCount = 0
continue
}
// Make sure we are making some progress
currDirtyFileMap := make(map[BlockRef]bool)
for _, ref := range dirtyFiles {
currDirtyFileMap[ref] = true
}
if reflect.DeepEqual(currDirtyFileMap, prevDirtyFileMap) {
sameDirtyFileCount++
} else {
sameDirtyFileCount = 0
}
prevDirtyFileMap = currDirtyFileMap
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = NewContextReplayable(ctx,
func(ctx context.Context) context.Context {
return context.WithValue(ctx, CtxBackgroundSyncKey, "1")
})
fbo.log.CDebugf(ctx, "Background sync triggered: %d dirty files, "+
"%d dir ops in batch", len(dirtyFiles), dirOpsCount)
if sameDirtyFileCount >= 100 {
// If the local journal is full, we might not be able to
// make progress until more data is flushed to the
// servers, so just warn here rather than just an outright
// panic.
fbo.log.CWarningf(ctx, "Making no Sync progress on dirty "+
"files after %d attempts: %v", sameDirtyFileCount,
dirtyFiles)
}
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
err = fbo.SyncAll(longCtx, fbo.folderBranch)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
fbo.log.CWarningf(ctx, "Couldn't sync all: %+v", err)
}
return nil
})
}
}
func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Lock(lState)
}
func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Unlock(lState)
}
func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
fbo.mdWriterLock.AssertLocked(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
irmd, err := fbo.config.MDOps().ResolveBranch(ctx, fbo.id(), fbo.bid,
blocksToDelete, md, session.VerifyingKey)
doUnmergedPut := isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
mdCopyWithLocalOps, err := md.deepCopy(fbo.config.Codec())
if err != nil {
return err
}
mdCopyWithLocalOps.data.Changes.Ops = newOps
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
err := fbo.notifyOneOpLocked(
ctx, lState, op, mdCopyWithLocalOps.ReadOnly(), false)
if err != nil {
return err
}
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd})
return nil
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.finalizeResolutionLocked(
ctx, lState, md, bps, newOps, blocksToDelete)
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// We don't want context cancellation after this point, so use a linked
// context. There is no race since the linked context has an independent
// Done channel.
//
// Generally we don't want to have any errors in unstageLocked since and
// this solution is chosen because:
// * If the error is caused by a cancelled context then the recovery (archiving)
// would need to use a separate context anyways.
// * In such cases we would have to be very careful where the error occurs
// and what to archive, making that solution much more complicated.
// * The other "common" error case is losing server connection and after
// detecting that we won't have much luck archiving things anyways.
ctx = newLinkedContext(ctx)
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context,
newBID kbfsmd.BranchID) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID)
if !fbo.isMasterBranchLocked(lState) {
if fbo.bid == newBID {
fbo.log.CDebugf(ctx, "Already on branch %s", newBID)
return
}
panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s",
newBID, fbo.bid))
}
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID)
if err != nil {
fbo.log.CWarningf(ctx,
"No unmerged head on journal branch change (bid=%s)", newBID)
return
}
if md == (ImmutableRootMetadata{}) || md.MergedStatus() != kbfsmd.Unmerged ||
md.BID() != newBID {
// This can happen if CR got kicked off in some other way and
// completed before we took the lock to process this
// notification.
fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d",
md, newBID)
return
}
// Everything we thought we knew about quota reclamation is now
// called into question.
fbo.fbm.clearLastQRData()
// Kick off conflict resolution and set the head to the correct branch.
fbo.setBranchIDLocked(lState, newBID)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/)
if err != nil {
fbo.log.CWarningf(ctx,
"Could not set head on journal branch change: %v", err)
return
}
}
func (fbo *folderBranchOps) onTLFBranchChange(newBID kbfsmd.BranchID) {
fbo.branchChanges.Add(1)
go func() {
defer fbo.branchChanges.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
// This only happens on a `PruneBranch` call, in which case we
// would have already updated fbo's local view of the branch/head.
if newBID == kbfsmd.NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring branch change back to master")
return
}
fbo.handleTLFBranchChange(ctx, newBID)
}()
}
func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid kbfsmd.BranchID,
rev kbfsmd.Revision) {
fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev)
lState := makeFBOLockState()
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false)
}()
// Get that revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), kbfsmd.NullBranchID,
rev, kbfsmd.Merged, nil)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v",
rev, err)
return
}
if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil {
fbo.log.CDebugf(
ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err)
return
}
fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly())
}
func (fbo *folderBranchOps) onMDFlush(bid kbfsmd.BranchID, rev kbfsmd.Revision) {
fbo.mdFlushes.Add(1)
go func() {
defer fbo.mdFlushes.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
if bid != kbfsmd.NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+
"revision %d", bid, rev)
return
}
fbo.handleMDFlush(ctx, bid, rev)
}()
}
// TeamNameChanged implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) TeamNameChanged(
ctx context.Context, tid keybase1.TeamID) {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Starting name change for team %s", tid)
// First check if this is an implicit team.
var newName libkb.NormalizedUsername
if fbo.id().Type() != tlf.SingleTeam {
iteamInfo, err := fbo.config.KBPKI().ResolveImplicitTeamByID(
ctx, tid, fbo.id().Type())
if err == nil {
newName = iteamInfo.Name
}
}
if newName == "" {
var err error
newName, err = fbo.config.KBPKI().GetNormalizedUsername(
ctx, tid.AsUserOrTeam())
if err != nil {
fbo.log.CWarningf(ctx, "Error getting new team name: %+v", err)
return
}
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
fbo.log.CWarningf(ctx, "No head to update")
return
}
oldHandle := fbo.head.GetTlfHandle()
if string(oldHandle.GetCanonicalName()) == string(newName) {
fbo.log.CDebugf(ctx, "Name didn't change: %s", newName)
return
}
if oldHandle.FirstResolvedWriter() != tid.AsUserOrTeam() {
fbo.log.CWarningf(ctx,
"Old handle doesn't include changed team ID: %s",
oldHandle.FirstResolvedWriter())
return
}
// Make a copy of `head` with the new handle.
newHandle := oldHandle.deepCopy()
newHandle.name = tlf.CanonicalName(newName)
newHandle.resolvedWriters[tid.AsUserOrTeam()] = newName
newHead, err := fbo.head.deepCopy(fbo.config.Codec())
if err != nil {
fbo.log.CWarningf(ctx, "Error copying head: %+v", err)
return
}
newHead.tlfHandle = newHandle
fbo.log.CDebugf(ctx, "Team name changed from %s to %s",
oldHandle.GetCanonicalName(), newHandle.GetCanonicalName())
fbo.head = MakeImmutableRootMetadata(
newHead, fbo.head.lastWriterVerifyingKey, fbo.head.mdID,
fbo.head.localTimestamp, fbo.head.putToServer)
if err != nil {
fbo.log.CWarningf(ctx, "Error setting head: %+v", err)
return
}
fbo.config.MDCache().ChangeHandleForID(oldHandle, newHandle)
fbo.observers.tlfHandleChange(ctx, newHandle)
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetUpdateHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
kbfsmd.RevisionInitial, nil)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.TlfID().String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter()]
if !ok {
name, err := fbo.config.KBPKI().GetNormalizedUsername(
ctx, rmd.LastModifyingWriter().AsUserOrTeam())
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter()] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision(),
Date: rmd.localTimestamp,
Writer: writer,
LiveBytes: rmd.DiskUsage(),
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.allUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// GetEditHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetEditHistory(ctx context.Context,
folderBranch FolderBranch) (edits TlfWriterEdits, err error) {
fbo.log.CDebugf(ctx, "GetEditHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetEditHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return nil, WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
head, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return nil, err
}
return fbo.editHistory.GetComplete(ctx, head)
}
// PushStatusChange forces a new status be fetched by status listeners.
func (fbo *folderBranchOps) PushStatusChange() {
fbo.config.KBFSOps().PushStatusChange()
}
// ClearPrivateFolderMD implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ClearPrivateFolderMD(ctx context.Context) {
if fbo.folderBranch.Tlf.Type() == tlf.Public {
return
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// Nothing to clear.
return
}
fbo.log.CDebugf(ctx, "Clearing folder MD")
// First cancel the background goroutine that's registered for
// updates, because the next time we set the head in this FBO
// we'll launch another one.
fbo.cancelUpdatesLock.Lock()
defer fbo.cancelUpdatesLock.Unlock()
if fbo.cancelUpdates != nil {
fbo.cancelUpdates()
select {
case <-fbo.updateDoneChan:
case <-ctx.Done():
fbo.log.CDebugf(
ctx, "Context canceled before updater was canceled")
return
}
fbo.config.MDServer().CancelRegistration(ctx, fbo.id())
}
fbo.head = ImmutableRootMetadata{}
fbo.headStatus = headUntrusted
fbo.latestMergedRevision = kbfsmd.RevisionUninitialized
fbo.hasBeenCleared = true
}
// ForceFastForward implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ForceFastForward(ctx context.Context) {
lState := makeFBOLockState()
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
return
}
if !fbo.hasBeenCleared {
// No reason to fast-forward here if it hasn't ever been
// cleared.
return
}
fbo.forcedFastForwards.Add(1)
go func() {
defer fbo.forcedFastForwards.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Forcing a fast-forward")
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id(), nil)
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
return
}
if currHead == (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "No MD yet")
return
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
fbo.log.CDebugf(ctx, "Already up-to-date: %v", err)
return
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
}
}()
}
// KickoffAllOutstandingRekeys (does not) implement the KBFSOps interface for
// KBFSOpsStandard.
func (fbo *folderBranchOps) KickoffAllOutstandingRekeys() error {
return errors.New(
"KickoffAllOutstandingRekeys is not supported on *folderBranchOps")
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus)
}
| 1 | 18,605 | There's a few `return nil` cases above this -- we should probably move this above the big switch. | keybase-kbfs | go |
@@ -33,6 +33,19 @@ func (tile TransactionInLedgerError) Error() string {
return fmt.Sprintf("transaction already in ledger: %v", tile.Txid)
}
+// LeaseInLedgerError is returned when a transaction cannot be added because it has a lease that already being used in the relavant rounds
+type LeaseInLedgerError struct {
+ txid transactions.Txid
+ lease txlease
+}
+
+// Error implements the error interface for the LeaseInLedgerError stuct
+func (lile LeaseInLedgerError) Error() string {
+ // format the lease as address.
+ addr := basics.Address(lile.lease.lease)
+ return fmt.Sprintf("transaction %v using an overlapping lease %s", lile.txid, addr.String())
+}
+
// BlockInLedgerError is returned when a block cannot be added because it has already been done
type BlockInLedgerError struct {
LastRound basics.Round | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"fmt"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
)
// TransactionInLedgerError is returned when a transaction cannot be added because it has already been done
type TransactionInLedgerError struct {
Txid transactions.Txid
}
// Error satisfies builtin interface `error`
func (tile TransactionInLedgerError) Error() string {
return fmt.Sprintf("transaction already in ledger: %v", tile.Txid)
}
// BlockInLedgerError is returned when a block cannot be added because it has already been done
type BlockInLedgerError struct {
LastRound basics.Round
NextRound basics.Round
}
// Error satisfies builtin interface `error`
func (bile BlockInLedgerError) Error() string {
return fmt.Sprintf("block number already in ledger: block %d < next Round %d", bile.LastRound, bile.NextRound)
}
// ErrNoEntry is used to indicate that a block is not present in the ledger.
type ErrNoEntry struct {
Round basics.Round
Latest basics.Round
Committed basics.Round
}
// Error satisfies builtin interface `error`
func (err ErrNoEntry) Error() string {
return fmt.Sprintf("ledger does not have entry %d (latest %d, committed %d)", err.Round, err.Latest, err.Committed)
}
| 1 | 40,589 | `lile *LeaseInLedgerError` to reduce copying? | algorand-go-algorand | go |
@@ -76,6 +76,8 @@ public abstract class NewSessionQueuer implements HasReadyState, Routable {
.with(requiresSecret),
get("/se/grid/newsessionqueuer/queue/size")
.to(() -> new GetNewSessionQueueSize(tracer, this)),
+ get("/se/grid/newsessionqueue")
+ .to(() -> new GetSessionQueue(tracer, this)),
delete("/se/grid/newsessionqueuer/queue")
.to(() -> new ClearSessionQueue(tracer, this))
.with(requiresSecret)); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.sessionqueue;
import static org.openqa.selenium.remote.http.Contents.reader;
import static org.openqa.selenium.remote.http.Route.combine;
import static org.openqa.selenium.remote.http.Route.delete;
import static org.openqa.selenium.remote.http.Route.get;
import static org.openqa.selenium.remote.http.Route.post;
import static org.openqa.selenium.remote.tracing.Tags.EXCEPTION;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.grid.data.RequestId;
import org.openqa.selenium.grid.security.RequiresSecretFilter;
import org.openqa.selenium.grid.security.Secret;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.NewSessionPayload;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.http.Routable;
import org.openqa.selenium.remote.http.Route;
import org.openqa.selenium.remote.tracing.AttributeKey;
import org.openqa.selenium.remote.tracing.EventAttribute;
import org.openqa.selenium.remote.tracing.EventAttributeValue;
import org.openqa.selenium.remote.tracing.Span;
import org.openqa.selenium.remote.tracing.Tracer;
import org.openqa.selenium.status.HasReadyState;
import java.io.IOException;
import java.io.Reader;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.UUID;
public abstract class NewSessionQueuer implements HasReadyState, Routable {
private final Route routes;
protected final Tracer tracer;
protected NewSessionQueuer(Tracer tracer, Secret registrationSecret) {
this.tracer = Require.nonNull("Tracer", tracer);
Require.nonNull("Registration secret", registrationSecret);
RequiresSecretFilter requiresSecret = new RequiresSecretFilter(registrationSecret);
routes = combine(
post("/session")
.to(() -> this::addToQueue),
post("/se/grid/newsessionqueuer/session")
.to(() -> new AddToSessionQueue(tracer, this)),
post("/se/grid/newsessionqueuer/session/retry/{requestId}")
.to(params -> new AddBackToSessionQueue(tracer, this, requestIdFrom(params)))
.with(requiresSecret),
get("/se/grid/newsessionqueuer/session/{requestId}")
.to(params -> new RemoveFromSessionQueue(tracer, this, requestIdFrom(params)))
.with(requiresSecret),
get("/se/grid/newsessionqueuer/queue/size")
.to(() -> new GetNewSessionQueueSize(tracer, this)),
delete("/se/grid/newsessionqueuer/queue")
.to(() -> new ClearSessionQueue(tracer, this))
.with(requiresSecret));
}
private RequestId requestIdFrom(Map<String, String> params) {
return new RequestId(UUID.fromString(params.get("requestId")));
}
public void validateSessionRequest(HttpRequest request) {
try (Span span = tracer.getCurrentContext().createSpan("newsession_queuer.validate")) {
Map<String, EventAttributeValue> attributeMap = new HashMap<>();
try (
Reader reader = reader(request);
NewSessionPayload payload = NewSessionPayload.create(reader)) {
Objects.requireNonNull(payload, "Requests to process must be set.");
attributeMap.put("request.payload", EventAttribute.setValue(payload.toString()));
Iterator<Capabilities> iterator = payload.stream().iterator();
if (!iterator.hasNext()) {
SessionNotCreatedException exception =
new SessionNotCreatedException("No capabilities found");
EXCEPTION.accept(attributeMap, exception);
attributeMap.put(
AttributeKey.EXCEPTION_MESSAGE.getKey(), EventAttribute.setValue(exception.getMessage()));
span.addEvent(AttributeKey.EXCEPTION_EVENT.getKey(), attributeMap);
throw exception;
}
} catch (IOException e) {
SessionNotCreatedException exception = new SessionNotCreatedException(e.getMessage(), e);
EXCEPTION.accept(attributeMap, exception);
String errorMessage = "IOException while reading the request payload. " +
exception.getMessage();
attributeMap.put(
AttributeKey.EXCEPTION_MESSAGE.getKey(), EventAttribute.setValue(errorMessage));
span.addEvent(AttributeKey.EXCEPTION_EVENT.getKey(), attributeMap);
throw exception;
}
}
}
public abstract HttpResponse addToQueue(HttpRequest request);
public abstract boolean retryAddToQueue(HttpRequest request, RequestId reqId);
public abstract Optional<HttpRequest> remove(RequestId reqId);
public abstract int clearQueue();
public abstract int getQueueSize();
@Override
public boolean matches(HttpRequest req) {
return routes.matches(req);
}
@Override
public HttpResponse execute(HttpRequest req) {
return routes.execute(req);
}
}
| 1 | 18,614 | should it be `newsessionqueue` or `newsessionqueuer`? In case we'd like to be consistent | SeleniumHQ-selenium | java |
@@ -35,9 +35,9 @@ namespace Microsoft.VisualStudio.TestPlatform.Utilities.Helpers
}
/// <inheritdoc/>
- public Stream GetStream(string filePath, FileMode mode)
+ public Stream GetStream(string filePath, FileMode mode, FileAccess access = FileAccess.ReadWrite)
{
- return new FileStream(filePath, mode);
+ return new FileStream(filePath, mode, access);
}
/// <inheritdoc/> | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace Microsoft.VisualStudio.TestPlatform.Utilities.Helpers
{
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text.RegularExpressions;
using Microsoft.VisualStudio.TestPlatform.Utilities.Helpers.Interfaces;
/// <summary>
/// The file helper.
/// </summary>
public class FileHelper : IFileHelper
{
/// <inheritdoc/>
public DirectoryInfo CreateDirectory(string path)
{
return Directory.CreateDirectory(path);
}
/// <inheritdoc/>
public bool Exists(string path)
{
return File.Exists(path);
}
/// <inheritdoc/>
public bool DirectoryExists(string path)
{
return Directory.Exists(path);
}
/// <inheritdoc/>
public Stream GetStream(string filePath, FileMode mode)
{
return new FileStream(filePath, mode);
}
/// <inheritdoc/>
public IEnumerable<string> EnumerateFiles(string directory, string pattern, SearchOption searchOption)
{
var regex = new Regex(pattern, RegexOptions.IgnoreCase);
return Directory.EnumerateFiles(directory, "*", searchOption).Where(f => regex.IsMatch(f));
}
/// <inheritdoc/>
public FileAttributes GetFileAttributes(string path)
{
return new FileInfo(path).Attributes;
}
}
}
| 1 | 11,368 | We don't have a requirement anywhere in Test Platform for GetStream() with write access. It is ok to directly change `return new FileStream(filePath, mode, FileAccess.Read)`. | microsoft-vstest | .cs |
@@ -22,12 +22,17 @@
import os
import os.path
import textwrap
+import string
+import functools
+import operator
import attr
import yaml
import pytest
import bs4
+import qutebrowser.browser.hints
+
def collect_tests():
basedir = os.path.dirname(__file__) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test hints based on html files with special comments."""
import os
import os.path
import textwrap
import attr
import yaml
import pytest
import bs4
def collect_tests():
basedir = os.path.dirname(__file__)
datadir = os.path.join(basedir, 'data', 'hints', 'html')
files = [f for f in os.listdir(datadir) if f != 'README.md']
return files
@attr.s
class ParsedFile:
target = attr.ib()
qtwebengine_todo = attr.ib()
class InvalidFile(Exception):
def __init__(self, test_name, msg):
super().__init__("Invalid comment found in {}, please read "
"tests/end2end/data/hints/html/README.md - {}".format(
test_name, msg))
def _parse_file(test_name):
"""Parse the given HTML file."""
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'hints', 'html', test_name)
with open(file_path, 'r', encoding='utf-8') as html:
soup = bs4.BeautifulSoup(html, 'html.parser')
comment = soup.find(text=lambda text: isinstance(text, bs4.Comment))
if comment is None:
raise InvalidFile(test_name, "no comment found")
data = yaml.load(comment)
if not isinstance(data, dict):
raise InvalidFile(test_name, "expected yaml dict but got {}".format(
type(data).__name__))
allowed_keys = {'target', 'qtwebengine_todo'}
if not set(data.keys()).issubset(allowed_keys):
raise InvalidFile(test_name, "expected keys {} but found {}".format(
', '.join(allowed_keys),
', '.join(set(data.keys()))))
if 'target' not in data:
raise InvalidFile(test_name, "'target' key not found")
qtwebengine_todo = data.get('qtwebengine_todo', None)
return ParsedFile(target=data['target'], qtwebengine_todo=qtwebengine_todo)
@pytest.mark.parametrize('test_name', collect_tests())
@pytest.mark.parametrize('zoom_text_only', [True, False])
@pytest.mark.parametrize('zoom_level', [100, 66, 33])
@pytest.mark.parametrize('find_implementation', ['javascript', 'python'])
def test_hints(test_name, zoom_text_only, zoom_level, find_implementation,
quteproc, request):
if zoom_text_only and request.config.webengine:
pytest.skip("QtWebEngine doesn't have zoom.text_only")
if find_implementation == 'python' and request.config.webengine:
pytest.skip("QtWebEngine doesn't have a python find implementation")
parsed = _parse_file(test_name)
if parsed.qtwebengine_todo is not None and request.config.webengine:
pytest.xfail("QtWebEngine TODO: {}".format(parsed.qtwebengine_todo))
url_path = 'data/hints/html/{}'.format(test_name)
quteproc.open_path(url_path)
# setup
if not request.config.webengine:
quteproc.set_setting('zoom.text_only', str(zoom_text_only))
quteproc.set_setting('hints.find_implementation', find_implementation)
quteproc.send_cmd(':zoom {}'.format(zoom_level))
# follow hint
quteproc.send_cmd(':hint links normal')
quteproc.wait_for(message='hints: a', category='hints')
quteproc.send_cmd(':follow-hint a')
quteproc.wait_for_load_finished('data/' + parsed.target)
# reset
quteproc.send_cmd(':zoom 100')
if not request.config.webengine:
quteproc.set_setting('zoom.text_only', 'false')
quteproc.set_setting('hints.find_implementation', 'javascript')
def test_word_hints_issue1393(quteproc, tmpdir):
dict_file = tmpdir / 'dict'
dict_file.write(textwrap.dedent("""
alph
beta
gamm
delt
epsi
"""))
targets = [
('words', 'words.txt'),
('smart', 'smart.txt'),
('hinting', 'hinting.txt'),
('alph', 'l33t.txt'),
('beta', 'l33t.txt'),
('gamm', 'l33t.txt'),
('delt', 'l33t.txt'),
('epsi', 'l33t.txt'),
]
quteproc.set_setting('hints.mode', 'word')
quteproc.set_setting('hints.dictionary', str(dict_file))
for hint, target in targets:
quteproc.open_path('data/hints/issue1393.html')
quteproc.send_cmd(':hint')
quteproc.wait_for(message='hints: *', category='hints')
quteproc.send_cmd(':follow-hint {}'.format(hint))
quteproc.wait_for_load_finished('data/{}'.format(target))
| 1 | 19,780 | FWIW `from qutebrowser.browser import hints` is okay - it's just things like `from qutebrowser.browser.hints import HintManager` I try to avoid. | qutebrowser-qutebrowser | py |
@@ -11,7 +11,12 @@
namespace HFG;
use HFG\Core\Builder\Header as HeaderBuilder;
+
+$transparent_header_class = '';
+if ( get_theme_mod( 'neve_transparent_header', false ) ) {
+ $transparent_header_class = ' neve-tansparent-header';
+}
?>
-<div id="header-grid" class="<?php echo esc_attr( get_builder( HeaderBuilder::BUILDER_NAME )->get_property( 'panel' ) ); ?> site-header">
+<div id="header-grid" class="<?php echo esc_attr( get_builder( HeaderBuilder::BUILDER_NAME )->get_property( 'panel' ) ) . esc_attr( $transparent_header_class ); ?> site-header">
<?php render_builder( HeaderBuilder::BUILDER_NAME ); ?>
</div> | 1 | <?php
/**
* Template used for header rendering.
*
* Name: Header Footer Grid
*
* @version 1.0.0
* @package HFG
*/
namespace HFG;
use HFG\Core\Builder\Header as HeaderBuilder;
?>
<div id="header-grid" class="<?php echo esc_attr( get_builder( HeaderBuilder::BUILDER_NAME )->get_property( 'panel' ) ); ?> site-header">
<?php render_builder( HeaderBuilder::BUILDER_NAME ); ?>
</div>
| 1 | 19,234 | @preda-bogdan let's use a filter for the header classes, something like, hfg_header_classes where we hook this logic, in neve pro | Codeinwp-neve | php |
@@ -21,7 +21,7 @@ import (
"time"
"github.com/containernetworking/plugins/pkg/ip"
- "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers" | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package noderoute
import (
"fmt"
"net"
"sync"
"time"
"github.com/containernetworking/plugins/pkg/ip"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/interfacestore"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow"
"github.com/vmware-tanzu/antrea/pkg/agent/route"
"github.com/vmware-tanzu/antrea/pkg/agent/util"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsconfig"
)
const (
controllerName = "AntreaAgentNodeRouteController"
// Interval of reprocessing every node.
nodeResyncPeriod = 60 * time.Second
// How long to wait before retrying the processing of a node change
minRetryDelay = 5 * time.Second
maxRetryDelay = 300 * time.Second
// Default number of workers processing a node change
defaultWorkers = 4
ovsExternalIDNodeName = "node-name"
)
// Controller is responsible for setting up necessary IP routes and Openflow entries for inter-node traffic.
type Controller struct {
kubeClient clientset.Interface
ovsBridgeClient ovsconfig.OVSBridgeClient
ofClient openflow.Client
routeClient route.Interface
interfaceStore interfacestore.InterfaceStore
networkConfig *config.NetworkConfig
nodeConfig *config.NodeConfig
nodeInformer coreinformers.NodeInformer
nodeLister corelisters.NodeLister
nodeListerSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
// installedNodes records routes and flows installation states of Nodes.
// The key is the host name of the Node, the value is the podCIDR of the Node.
// A node will be in the map after its flows and routes are installed successfully.
installedNodes *sync.Map
}
// NewNodeRouteController instantiates a new Controller object which will process Node events
// and ensure connectivity between different Nodes.
func NewNodeRouteController(
kubeClient clientset.Interface,
informerFactory informers.SharedInformerFactory,
client openflow.Client,
ovsBridgeClient ovsconfig.OVSBridgeClient,
routeClient route.Interface,
interfaceStore interfacestore.InterfaceStore,
networkConfig *config.NetworkConfig,
nodeConfig *config.NodeConfig) *Controller {
nodeInformer := informerFactory.Core().V1().Nodes()
controller := &Controller{
kubeClient: kubeClient,
ovsBridgeClient: ovsBridgeClient,
ofClient: client,
routeClient: routeClient,
interfaceStore: interfaceStore,
networkConfig: networkConfig,
nodeConfig: nodeConfig,
nodeInformer: nodeInformer,
nodeLister: nodeInformer.Lister(),
nodeListerSynced: nodeInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "noderoute"),
installedNodes: &sync.Map{}}
nodeInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: func(cur interface{}) {
controller.enqueueNode(cur)
},
UpdateFunc: func(old, cur interface{}) {
controller.enqueueNode(cur)
},
DeleteFunc: func(old interface{}) {
controller.enqueueNode(old)
},
},
nodeResyncPeriod,
)
return controller
}
// enqueueNode adds an object to the controller work queue
// obj could be an *v1.Node, or a DeletionFinalStateUnknown item.
func (c *Controller) enqueueNode(obj interface{}) {
node, isNode := obj.(*v1.Node)
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
klog.Errorf("DeletedFinalStateUnknown contains non-Node object: %v", deletedState.Obj)
return
}
}
// Ignore notifications for this Node, no need to establish connectivity to itself.
if node.Name != c.nodeConfig.Name {
c.queue.Add(node.Name)
}
}
// removeStaleGatewayRoutes removes all the gateway routes which no longer correspond to a Node in
// the cluster. If the antrea agent restarts and Nodes have left the cluster, this function will
// take care of removing routes which are no longer valid.
func (c *Controller) removeStaleGatewayRoutes() error {
nodes, err := c.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("error when listing Nodes: %v", err)
}
// We iterate over all current Nodes, including the Node on which this agent is
// running, so the route to local Pods will be desired as well.
var desiredPodCIDRs []string
for _, node := range nodes {
// PodCIDR is allocated by K8s NodeIpamController asynchronously so it's possible we see a Node
// with no PodCIDR set when it just joins the cluster.
if node.Spec.PodCIDR == "" {
continue
}
desiredPodCIDRs = append(desiredPodCIDRs, node.Spec.PodCIDR)
}
// routeClient will remove orphaned routes whose destinations are not in desiredPodCIDRs.
if err := c.routeClient.Reconcile(desiredPodCIDRs); err != nil {
return err
}
return nil
}
// removeStaleTunnelPorts removes all the tunnel ports which no longer correspond to a Node in the
// cluster. If the antrea agent restarts and Nodes have left the cluster, this function will take
// care of removing tunnel ports which are no longer valid. If the tunnel port configuration has
// changed, the tunnel port will also be deleted (the controller loop will later take care of
// re-creating the port with the correct configuration).
func (c *Controller) removeStaleTunnelPorts() error {
nodes, err := c.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("error when listing Nodes: %v", err)
}
// desiredInterfaces is the set of interfaces we wish to have, based on the current list of
// Nodes. If a tunnel port corresponds to a valid Node but its configuration is wrong, we
// will not include it in the set.
desiredInterfaces := make(map[string]bool)
// knownInterfaces is the list of interfaces currently in the local cache.
knownInterfaces := c.interfaceStore.GetInterfaceKeysByType(interfacestore.TunnelInterface)
if c.networkConfig.EnableIPSecTunnel {
for _, node := range nodes {
interfaceConfig, found := c.interfaceStore.GetNodeTunnelInterface(node.Name)
if !found {
// Tunnel port not created for this Node, nothing to do.
continue
}
peerNodeIP, err := GetNodeAddr(node)
if err != nil {
klog.Errorf("Failed to retrieve IP address of Node %s: %v", node.Name, err)
continue
}
ifaceID := util.GenerateNodeTunnelInterfaceKey(node.Name)
validConfiguration := interfaceConfig.PSK == c.networkConfig.IPSecPSK &&
interfaceConfig.RemoteIP.Equal(peerNodeIP) &&
interfaceConfig.TunnelInterfaceConfig.Type == c.networkConfig.TunnelType
if validConfiguration {
desiredInterfaces[ifaceID] = true
}
}
}
// remove all ports which are no longer needed or for which the configuration is no longer
// valid.
for _, ifaceID := range knownInterfaces {
if _, found := desiredInterfaces[ifaceID]; found {
// this interface matches an existing Node, nothing to do.
continue
}
interfaceConfig, found := c.interfaceStore.GetInterface(ifaceID)
if !found {
// should not happen, nothing should have concurrent access to the interface
// store for tunnel interfaces.
klog.Errorf("Interface %s can no longer be found in the interface store", ifaceID)
continue
}
if interfaceConfig.InterfaceName == c.nodeConfig.DefaultTunName {
continue
}
if err := c.ovsBridgeClient.DeletePort(interfaceConfig.PortUUID); err != nil {
klog.Errorf("Failed to delete OVS tunnel port %s: %v", interfaceConfig.InterfaceName, err)
} else {
c.interfaceStore.DeleteInterface(interfaceConfig)
}
}
return nil
}
func (c *Controller) reconcile() error {
klog.Infof("Reconciliation for %s", controllerName)
// reconciliation consists of removing stale routes and stale / invalid tunnel ports:
// missing routes and tunnel ports will be added normally by processNextWorkItem, which will
// also take care of updating incorrect routes.
if err := c.removeStaleGatewayRoutes(); err != nil {
return fmt.Errorf("error when removing stale routes: %v", err)
}
if err := c.removeStaleTunnelPorts(); err != nil {
return fmt.Errorf("error when removing stale tunnel ports: %v", err)
}
return nil
}
// Run will create defaultWorkers workers (go routines) which will process the Node events from the
// workqueue.
func (c *Controller) Run(stopCh <-chan struct{}) {
defer c.queue.ShutDown()
// If agent is running policy-only mode, it delegates routing to
// underlying network. Therefore it needs not know the routes to
// peer Pod CIDRs.
if c.networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() {
<-stopCh
return
}
klog.Infof("Starting %s", controllerName)
defer klog.Infof("Shutting down %s", controllerName)
klog.Infof("Waiting for caches to sync for %s", controllerName)
if !cache.WaitForCacheSync(stopCh, c.nodeListerSynced) {
klog.Errorf("Unable to sync caches for %s", controllerName)
return
}
klog.Infof("Caches are synced for %s", controllerName)
if err := c.reconcile(); err != nil {
klog.Errorf("Error during %s reconciliation", controllerName)
}
for i := 0; i < defaultWorkers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
// worker is a long-running function that will continually call the processNextWorkItem function in
// order to read and process a message on the workqueue.
func (c *Controller) worker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem processes an item in the "node" work queue, by calling syncNodeRoute after
// casting the item to a string (Node name). If syncNodeRoute returns an error, this function
// handles it by requeueing the item so that it can be processed again later. If syncNodeRoute is
// successful, the Node is removed from the queue until we get notified of a new change. This
// function returns false if and only if the work queue was shutdown (no more items will be
// processed).
func (c *Controller) processNextWorkItem() bool {
obj, quit := c.queue.Get()
if quit {
return false
}
// We call Done here so the workqueue knows we have finished processing this item. We also
// must remember to call Forget if we do not want this work item being re-queued. For
// example, we do not call Forget if a transient error occurs, instead the item is put back
// on the workqueue and attempted again after a back-off period.
defer c.queue.Done(obj)
// We expect strings (Node name) to come off the workqueue.
if key, ok := obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call Forget here else we'd
// go into a loop of attempting to process a work item that is invalid.
// This should not happen: enqueueNode only enqueues strings.
c.queue.Forget(obj)
klog.Errorf("Expected string in work queue but got %#v", obj)
return true
} else if err := c.syncNodeRoute(key); err == nil {
// If no error occurs we Forget this item so it does not get queued again until
// another change happens.
c.queue.Forget(key)
} else {
// Put the item back on the workqueue to handle any transient errors.
c.queue.AddRateLimited(key)
klog.Errorf("Error syncing Node %s, requeuing. Error: %v", key, err)
}
return true
}
// syncNode manages connectivity to "peer" Node with name nodeName
// If we have not established connectivity to the Node yet:
// * we install the appropriate Linux route:
// Destination Gateway Use Iface
// peerPodCIDR peerGatewayIP localGatewayIface (e.g antrea-gw0)
// * we install the appropriate OpenFlow flows to ensure that all the traffic destined to
// peerPodCIDR goes through the correct L3 tunnel.
// If the Node no longer exists (cannot be retrieved by name from nodeLister) we delete the route
// and OpenFlow flows associated with it.
func (c *Controller) syncNodeRoute(nodeName string) error {
startTime := time.Now()
defer func() {
klog.V(4).Infof("Finished syncing Node Route for %s. (%v)", nodeName, time.Since(startTime))
}()
// The work queue guarantees that concurrent goroutines cannot call syncNodeRoute on the
// same Node, which is required by the InstallNodeFlows / UninstallNodeFlows OF Client
// methods.
node, err := c.nodeLister.Get(nodeName)
if err != nil {
return c.deleteNodeRoute(nodeName)
}
return c.addNodeRoute(nodeName, node)
}
func (c *Controller) deleteNodeRoute(nodeName string) error {
klog.Infof("Deleting routes and flows to Node %s", nodeName)
podCIDR, installed := c.installedNodes.Load(nodeName)
if !installed {
// Route is not added for this Node.
return nil
}
if err := c.routeClient.DeleteRoutes(podCIDR.(*net.IPNet)); err != nil {
return fmt.Errorf("failed to delete the route to Node %s: %v", nodeName, err)
}
if err := c.ofClient.UninstallNodeFlows(nodeName); err != nil {
return fmt.Errorf("failed to uninstall flows to Node %s: %v", nodeName, err)
}
c.installedNodes.Delete(nodeName)
if c.networkConfig.EnableIPSecTunnel {
interfaceConfig, ok := c.interfaceStore.GetNodeTunnelInterface(nodeName)
if !ok {
// Tunnel port not created for this Node.
return nil
}
if err := c.ovsBridgeClient.DeletePort(interfaceConfig.PortUUID); err != nil {
klog.Errorf("Failed to delete OVS tunnel port %s for Node %s: %v",
interfaceConfig.InterfaceName, nodeName, err)
return fmt.Errorf("failed to delete OVS tunnel port for Node %s", nodeName)
}
c.interfaceStore.DeleteInterface(interfaceConfig)
}
return nil
}
func (c *Controller) addNodeRoute(nodeName string, node *v1.Node) error {
if _, installed := c.installedNodes.Load(nodeName); installed {
// Route is already added for this Node.
return nil
}
klog.Infof("Adding routes and flows to Node %s, podCIDR: %s, addresses: %v",
nodeName, node.Spec.PodCIDR, node.Status.Addresses)
if node.Spec.PodCIDR == "" {
klog.Errorf("PodCIDR is empty for Node %s", nodeName)
// Does not help to return an error and trigger controller retries.
return nil
}
peerPodCIDRAddr, peerPodCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
klog.Errorf("Failed to parse PodCIDR %s for Node %s", node.Spec.PodCIDR, nodeName)
return nil
}
peerNodeIP, err := GetNodeAddr(node)
if err != nil {
klog.Errorf("Failed to retrieve IP address of Node %s: %v", nodeName, err)
return nil
}
peerGatewayIP := ip.NextIP(peerPodCIDRAddr)
ipsecTunOFPort := int32(0)
if c.networkConfig.EnableIPSecTunnel {
// Create a separate tunnel port for the Node, as OVS IPSec monitor needs to
// read PSK and remote IP from the Node's tunnel interface to create IPSec
// security policies.
if ipsecTunOFPort, err = c.createIPSecTunnelPort(nodeName, peerNodeIP); err != nil {
return err
}
}
err = c.ofClient.InstallNodeFlows(
nodeName,
c.nodeConfig.GatewayConfig.MAC,
*peerPodCIDR,
peerGatewayIP,
peerNodeIP,
config.DefaultTunOFPort,
uint32(ipsecTunOFPort))
if err != nil {
return fmt.Errorf("failed to install flows to Node %s: %v", nodeName, err)
}
if err := c.routeClient.AddRoutes(peerPodCIDR, peerNodeIP, peerGatewayIP); err != nil {
return err
}
c.installedNodes.Store(nodeName, peerPodCIDR)
return err
}
// createIPSecTunnelPort creates an IPSec tunnel port for the remote Node if the
// tunnel does not exist, and returns the ofport number.
func (c *Controller) createIPSecTunnelPort(nodeName string, nodeIP net.IP) (int32, error) {
interfaceConfig, ok := c.interfaceStore.GetNodeTunnelInterface(nodeName)
if ok {
// TODO: check if Node IP, PSK, or tunnel type changes. This can
// happen if removeStaleTunnelPorts fails to remove a "stale"
// tunnel port for which the configuration has changed.
if interfaceConfig.OFPort != 0 {
return interfaceConfig.OFPort, nil
}
} else {
portName := util.GenerateNodeTunnelInterfaceName(nodeName)
ovsExternalIDs := map[string]interface{}{ovsExternalIDNodeName: nodeName}
portUUID, err := c.ovsBridgeClient.CreateTunnelPortExt(
portName,
c.networkConfig.TunnelType,
0, // ofPortRequest - let OVS allocate OFPort number.
"",
nodeIP.String(),
c.networkConfig.IPSecPSK,
ovsExternalIDs)
if err != nil {
return 0, fmt.Errorf("failed to create IPSec tunnel port for Node %s", nodeName)
}
klog.Infof("Created IPSec tunnel port %s for Node %s", portName, nodeName)
ovsPortConfig := &interfacestore.OVSPortConfig{PortUUID: portUUID}
interfaceConfig = interfacestore.NewIPSecTunnelInterface(
portName,
c.networkConfig.TunnelType,
nodeName,
nodeIP,
c.networkConfig.IPSecPSK)
interfaceConfig.OVSPortConfig = ovsPortConfig
c.interfaceStore.AddInterface(interfaceConfig)
}
// GetOFPort will wait for up to 1 second for OVSDB to report the OFPort number.
ofPort, err := c.ovsBridgeClient.GetOFPort(interfaceConfig.InterfaceName)
if err != nil {
// Could be a temporary OVSDB connection failure or timeout.
// Let NodeRouteController retry at errors.
return 0, fmt.Errorf("failed to get of_port of IPSec tunnel port for Node %s", nodeName)
}
interfaceConfig.OFPort = ofPort
return ofPort, nil
}
// ParseTunnelInterfaceConfig initializes and returns an InterfaceConfig struct
// for a tunnel interface. It reads tunnel type, remote IP, IPSec PSK from the
// OVS interface options, and NodeName from the OVS port external_ids.
// nil is returned, if the OVS port and interface configurations are not valid
// for a tunnel interface.
func ParseTunnelInterfaceConfig(
portData *ovsconfig.OVSPortData,
portConfig *interfacestore.OVSPortConfig) *interfacestore.InterfaceConfig {
if portData.Options == nil {
klog.V(2).Infof("OVS port %s has no options", portData.Name)
return nil
}
remoteIP, localIP, psk := ovsconfig.ParseTunnelInterfaceOptions(portData)
var interfaceConfig *interfacestore.InterfaceConfig
var nodeName string
if portData.ExternalIDs != nil {
nodeName = portData.ExternalIDs[ovsExternalIDNodeName]
}
if psk != "" {
interfaceConfig = interfacestore.NewIPSecTunnelInterface(
portData.Name,
ovsconfig.TunnelType(portData.IFType),
nodeName,
remoteIP,
psk)
} else {
interfaceConfig = interfacestore.NewTunnelInterface(portData.Name, ovsconfig.TunnelType(portData.IFType), localIP)
}
interfaceConfig.OVSPortConfig = portConfig
return interfaceConfig
}
// GetNodeAddr gets the available IP address of a Node. GetNodeAddr will first try to get the
// NodeInternalIP, then try to get the NodeExternalIP.
func GetNodeAddr(node *v1.Node) (net.IP, error) {
addresses := make(map[v1.NodeAddressType]string)
for _, addr := range node.Status.Addresses {
addresses[addr.Type] = addr.Address
}
var ipAddrStr string
if internalIP, ok := addresses[v1.NodeInternalIP]; ok {
ipAddrStr = internalIP
} else if externalIP, ok := addresses[v1.NodeExternalIP]; ok {
ipAddrStr = externalIP
} else {
return nil, fmt.Errorf("node %s has neither external ip nor internal ip", node.Name)
}
ipAddr := net.ParseIP(ipAddrStr)
if ipAddr == nil {
return nil, fmt.Errorf("<%v> is not a valid ip address", ipAddrStr)
}
return ipAddr, nil
}
| 1 | 22,207 | Out of curiosity.. is there a rule to determine the package name as prevDirectory+currentDirectory. | antrea-io-antrea | go |
@@ -53,7 +53,9 @@ get_xmm_vals(priv_mcontext_t *mc)
#ifdef X86
if (preserve_xmm_caller_saved()) {
ASSERT(proc_has_feature(FEATURE_SSE));
- if (YMM_ENABLED())
+ if (ZMM_ENABLED())
+ get_zmm_caller_saved(&mc->simd[0]);
+ else if (YMM_ENABLED())
get_ymm_caller_saved(&mc->simd[0]);
else
get_xmm_caller_saved(&mc->simd[0]); | 1 | /* **********************************************************
* Copyright (c) 2013-2019 Google, Inc. All rights reserved.
* Copyright (c) 2001-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2001 Hewlett-Packard Company */
/*
* x86_code.c - auxiliary C routines to assembly routines in x86.asm
*/
#include "../globals.h"
#include "../fragment.h"
#include "../dispatch.h"
#include "../monitor.h"
#include "arch.h"
/* Helper routine for the x86.asm PUSH_DR_MCONTEXT, to fill in the xmm0-5 values
* (or all for linux) (or ymm) only if necessary.
*/
void
get_xmm_vals(priv_mcontext_t *mc)
{
#ifdef X86
if (preserve_xmm_caller_saved()) {
ASSERT(proc_has_feature(FEATURE_SSE));
if (YMM_ENABLED())
get_ymm_caller_saved(&mc->simd[0]);
else
get_xmm_caller_saved(&mc->simd[0]);
}
#elif defined(ARM)
/* FIXME i#1551: no xmm but SIMD regs on ARM */
ASSERT_NOT_REACHED();
#endif
}
/* Just calls dynamo_thread_under_dynamo. We used to initialize dcontext here,
* but that would end up initializing it twice.
*/
static void
thread_starting(dcontext_t *dcontext)
{
ASSERT(dcontext->initialized);
dynamo_thread_under_dynamo(dcontext);
}
/* Initializes a dcontext with the supplied state and calls d_r_dispatch */
void
dynamo_start(priv_mcontext_t *mc)
{
priv_mcontext_t *mcontext;
dcontext_t *dcontext = get_thread_private_dcontext();
if (dcontext == NULL) {
/* This may be an initialized thread that is currently native (which results
* in a NULL dcontext via i#2089).
*/
os_thread_re_take_over();
dcontext = get_thread_private_dcontext();
}
if (dcontext == NULL) {
/* If dr_app_start is called from a different thread than the one
* that called dr_app_setup, we'll need to initialize this thread here.
*/
dcontext = os_thread_take_over_secondary(mc);
ASSERT(dcontext != NULL);
ASSERT(dr_api_entry);
}
/* Signal other threads for take over.
* This routine now calls dynamo_thread_under_dynamo() for this thread as well.
*/
dynamorio_take_over_threads(dcontext);
/* Set return address */
mc->pc = canonicalize_pc_target(dcontext, mc->pc);
dcontext->next_tag = mc->pc;
ASSERT(dcontext->next_tag != NULL);
/* transfer exec state to mcontext */
mcontext = get_mcontext(dcontext);
*mcontext = *mc;
/* clear pc */
mcontext->pc = 0;
DOLOG(2, LOG_TOP, {
byte *cur_esp;
GET_STACK_PTR(cur_esp);
LOG(THREAD, LOG_TOP, 2,
"%s: next_tag=" PFX ", cur xsp=" PFX ", mc->xsp=" PFX "\n", __FUNCTION__,
dcontext->next_tag, cur_esp, mc->xsp);
});
/* Swap stacks so d_r_dispatch is invoked outside the application. */
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, true /*return on error*/);
/* In release builds, this will simply return and continue native
* execution. That's better than calling unexpected_return() which
* goes into an infinite loop.
*/
ASSERT_NOT_REACHED();
}
/* auto_setup: called by dynamo_auto_start for non-early follow children.
* This routine itself would be dynamo_auto_start except that we want
* our own go-native path separate from load_dynamo (we could still have
* this by dynamo_auto_start and jump to an asm routine for go-native,
* but keeping the entry in asm is more flexible).
* Assumptions: The saved priv_mcontext_t for the start of the app is on
* the stack, followed by a pointer to a region of memory to free
* (which can be NULL) and its size. If we decide not to take over
* this process, this routine returns; otherwise it does not return.
*/
void
auto_setup(ptr_uint_t appstack)
{
dcontext_t *dcontext;
priv_mcontext_t *mcontext;
byte *pappstack;
byte *addr;
pappstack = (byte *)appstack;
/* Our parameter points at a priv_mcontext_t struct, beyond which are
* two other fields:
pappstack --> +0 priv_mcontext_t struct
+x addr of memory to free (can be NULL)
+y sizeof memory to free
*/
automatic_startup = true;
/* we should control all threads */
control_all_threads = true;
dynamorio_app_init();
if (INTERNAL_OPTION(nullcalls)) {
dynamorio_app_exit();
return;
}
LOG(GLOBAL, LOG_TOP, 1, "taking over via late injection in %s\n", __FUNCTION__);
/* For apps injected using follow_children, this is where control should be
* allowed to go native for hotp_only & thin_client.
*/
if (RUNNING_WITHOUT_CODE_CACHE())
return;
/* useful to debug fork-following */
DOLOG(4, LOG_TOP, { SYSLOG_INTERNAL_INFO("dynamo auto start"); });
dcontext = get_thread_private_dcontext();
ASSERT(dcontext);
#ifdef WINDOWS
LOG(THREAD, LOG_INTERP, 2, "thread_starting: interpreting thread " TIDFMT "\n",
d_r_get_thread_id());
#endif
/* Despite what *should* happen, there can be other threads if a statically
* imported lib created one in its DllMain (Cygwin does this), or if a
* thread was injected from the outside. We go ahead and check for and
* take over any other threads at this time. Xref i#1304.
* This routine now calls dynamo_thread_under_dynamo() for this thread as well.
* XXX i#1305: we should really suspend all these other threads for DR init.
*/
dynamorio_take_over_threads(dcontext);
/* copy over the app state into mcontext */
mcontext = get_mcontext(dcontext);
*mcontext = *((priv_mcontext_t *)pappstack);
pappstack += sizeof(priv_mcontext_t);
dcontext->next_tag = mcontext->pc;
ASSERT(dcontext->next_tag != NULL);
/* free memory */
addr = (byte *)*((byte **)pappstack);
pappstack += sizeof(byte *);
if (addr != NULL) {
size_t size = *((size_t *)pappstack);
heap_error_code_t error_code;
/* since this is rx it was added to our exec list, remove now
* ASSUMPTION: no fragments in the region so no need to flush
*/
/* flushing would align for us but we have to do it ourselves here */
size_t alloc_size = ALIGN_FORWARD(size, PAGE_SIZE);
DODEBUG({
if (SHARED_FRAGMENTS_ENABLED())
ASSERT(!thread_vm_area_overlap(GLOBAL_DCONTEXT, addr, addr + alloc_size));
});
ASSERT(!thread_vm_area_overlap(dcontext, addr, addr + alloc_size));
remove_executable_region(addr, alloc_size, false /*do not have lock*/);
os_heap_free(addr, size, &error_code);
}
/* FIXME : for transparency should we zero out the appstack where we
* stored injection information? would be safe to do so here */
LOG(THREAD, LOG_INTERP, 1, "DynamoRIO auto start at 0x%08x\n", dcontext->next_tag);
DOLOG(LOG_INTERP, 2, { dump_mcontext(mcontext, THREAD, DUMP_NOT_XML); });
/* We didn't swap the stack ptr at loader init b/c we were on the app stack
* then. We do so now.
*/
IF_WINDOWS(os_swap_context(dcontext, false /*to priv*/, DR_STATE_STACK_BOUNDS));
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, false /*shouldn't return*/);
ASSERT_NOT_REACHED();
}
/* Get the retstack index from the app stack and reset the mcontext to the
* original app state. The retstub saved it like this in x86.asm:
* push $retidx
* jmp back_from_native
* back_from_native:
* push mcontext
* call return_from_native(mc)
*/
int
native_get_retstack_idx(priv_mcontext_t *mc)
{
int retidx = (int)*(ptr_int_t *)mc->xsp;
mc->xsp += sizeof(void *); /* Undo the push. */
return retidx;
}
/****************************************************************************/
#ifdef UNIX
/* Called by new_thread_dynamo_start to initialize the dcontext
* structure for the current thread and start executing at the
* the pc stored in the clone_record_t * stored at *mc->xsp.
* Assumes that it is called on the dstack.
*
* CAUTION: don't add a lot of stack variables in this routine or call a lot
* of functions before get_clone_record() because get_clone_record()
* makes assumptions about the usage of stack being less than a page.
*/
void
new_thread_setup(priv_mcontext_t *mc)
{
dcontext_t *dcontext;
void *crec;
int rc;
/* this is where a new thread first touches other than the dstack,
* so we "enter" DR here
*/
ENTERING_DR();
/* i#149/PR 403015: clone_record_t is passed via dstack. */
crec = get_clone_record(mc->xsp);
LOG(GLOBAL, LOG_INTERP, 1,
"new_thread_setup: thread " TIDFMT ", dstack " PFX " clone record " PFX "\n",
d_r_get_thread_id(), get_clone_record_dstack(crec), crec);
/* As we used dstack as app thread stack to pass clone record, we now need
* to switch back to the real app thread stack before continuing.
*/
mc->xsp = get_clone_record_app_xsp(crec);
/* clear xax/r0 (was used as scratch in gencode, and app expects 0) */
mc->IF_X86_ELSE(xax, r0) = 0;
/* clear pc */
mc->pc = 0;
# ifdef AARCHXX
/* set the stolen register's app value */
set_stolen_reg_val(mc, get_clone_record_stolen_value(crec));
/* set the thread register if necessary */
set_thread_register_from_clone_record(crec);
# endif
rc = dynamo_thread_init(get_clone_record_dstack(crec), mc,
crec _IF_CLIENT_INTERFACE(false));
ASSERT(rc != -1); /* this better be a new thread */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
# ifdef AARCHXX
set_app_lib_tls_base_from_clone_record(dcontext, crec);
# endif
# ifdef ARM
dr_set_isa_mode(dcontext, get_clone_record_isa_mode(crec), NULL);
# endif
/* Restore the original stack parameter to the syscall, which we clobbered
* in create_clone_record(). Some apps examine it post-syscall (i#3171).
*/
restore_clone_param_from_clone_record(dcontext, crec);
thread_starting(dcontext);
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, false /*shouldn't return*/);
ASSERT_NOT_REACHED();
}
# ifdef MACOS
/* Called from new_bsdthread_intercept for targeting a bsd thread user function.
* new_bsdthread_intercept stored the arg to the user thread func in
* mc->xax. We're on the app stack -- but this is a temporary solution.
* i#1403 covers intercepting in an earlier and better manner.
*/
void
new_bsdthread_setup(priv_mcontext_t *mc)
{
dcontext_t *dcontext;
void *crec, *func_arg;
int rc;
/* this is where a new thread first touches other than the dstack,
* so we "enter" DR here
*/
ENTERING_DR();
crec = (void *)mc->xax; /* placed there by new_bsdthread_intercept */
func_arg = (void *)get_clone_record_thread_arg(crec);
LOG(GLOBAL, LOG_INTERP, 1,
"new_thread_setup: thread " TIDFMT ", dstack " PFX " clone record " PFX "\n",
d_r_get_thread_id(), get_clone_record_dstack(crec), crec);
rc = dynamo_thread_init(get_clone_record_dstack(crec), mc,
crec _IF_CLIENT_INTERFACE(false));
ASSERT(rc != -1); /* this better be a new thread */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
crec = NULL; /* now freed */
thread_starting(dcontext);
/* We assume that the only state that matters is the arg to the function. */
# ifdef X64
mc->rdi = (reg_t)func_arg;
# else
*(reg_t *)(mc->xsp + sizeof(reg_t)) = (reg_t)func_arg;
# endif
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, false /*shouldn't return*/);
ASSERT_NOT_REACHED();
}
# endif /* MACOS */
#endif /* UNIX */
#ifdef WINDOWS
/* Called by nt_continue_dynamo_start when we're about to execute
* the continuation of an exception or APC: after NtContinue.
* next_pc is bogus, the real next pc has been stored in dcontext->next_tag.
* This routine is also used by NtSetContextThread.
*/
void
nt_continue_setup(priv_mcontext_t *mc)
{
app_pc next_pc;
dcontext_t *dcontext;
ENTERING_DR();
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
/* save target in temp var during init of dcontext */
/* we have to use a different slot since next_tag ends up holding the do_syscall
* entry when entered from d_r_dispatch
*/
if (dcontext->asynch_target != NULL)
next_pc = dcontext->asynch_target;
else {
ASSERT(DYNAMO_OPTION(shared_syscalls));
next_pc = dcontext->next_tag;
}
LOG(THREAD, LOG_ASYNCH, 2, "nt_continue_setup: target is " PFX "\n", next_pc);
initialize_dynamo_context(dcontext);
dcontext->next_tag = next_pc;
ASSERT(dcontext->next_tag != NULL);
set_last_exit(dcontext, (linkstub_t *)get_asynch_linkstub());
dcontext->whereami = DR_WHERE_TRAMPOLINE;
*get_mcontext(dcontext) = *mc;
/* clear pc */
get_mcontext(dcontext)->pc = 0;
/* We came straight from fcache, so swap to priv now (i#25) */
IF_WINDOWS(swap_peb_pointer(dcontext, true /*to priv*/));
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, false /*shouldn't return*/);
ASSERT_NOT_REACHED();
}
#endif /* WINDOWS */
/****************************************************************************/
/* C-level wrapper around the asm implementation. Shuffles arguments and
* increments stats.
* We used to use try/except on Linux and NtReadVirtualMemory on Windows, but
* this is faster than both.
*/
bool
safe_read_fast(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
byte *stop_pc;
size_t nbytes;
stop_pc = safe_read_asm(out_buf, base, size);
nbytes = stop_pc - (byte *)base;
if (bytes_read != NULL)
*bytes_read = nbytes;
return (nbytes == size);
}
bool
is_safe_read_pc(app_pc pc)
{
return (pc == (app_pc)safe_read_asm_pre || pc == (app_pc)safe_read_asm_mid ||
pc == (app_pc)safe_read_asm_post);
}
app_pc
safe_read_resume_pc(void)
{
return (app_pc)&safe_read_asm_recover;
}
| 1 | 18,006 | It seems this needs to check the lazy cxt switching flag. It is used on the initial thread for early injection and in other places where the lazy switch should apply. | DynamoRIO-dynamorio | c |
@@ -21,9 +21,10 @@ import { isValidPropertyID } from './validation';
/**
* Parses the bits of a valid property ID into an object of its components.
*
+ * See: https://support.google.com/analytics/answer/7372977.
+ *
* @since 1.8.0
*
- * @see {@link https://support.google.com/analytics/answer/7372977}
* @param {string} propertyID Property ID to parse.
* @return {(Object|undefined)} Object of property ID components if valid, otherwise false.
*/ | 1 | /**
* Property ID parser.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { isValidPropertyID } from './validation';
/**
* Parses the bits of a valid property ID into an object of its components.
*
* @since 1.8.0
*
* @see {@link https://support.google.com/analytics/answer/7372977}
* @param {string} propertyID Property ID to parse.
* @return {(Object|undefined)} Object of property ID components if valid, otherwise false.
*/
export default function parsePropertyID( propertyID ) {
if ( ! isValidPropertyID( propertyID ) ) {
return false;
}
const [ , accountID, number ] = propertyID.match( /^UA-(\d+)-(\d+)/ );
return {
accountID,
propertyID,
number,
};
}
| 1 | 32,160 | Why not use the JSdoc annotation as it did before? | google-site-kit-wp | js |
@@ -108,7 +108,11 @@ public class ParseUtil {
String version = packageImplementationVersion;
String edition = ProcessEngineDetails.EDITION_COMMUNITY;
- if (version != null && version.contains("-ee")) {
+ if (version == null){
+ version = ProductPropertiesUtil.getProductVersion();
+ }
+
+ if (version.contains("-ee")) {
edition = ProcessEngineDetails.EDITION_ENTERPRISE;
if (trimSuffixEE) {
version = version.replace("-ee", ""); // trim `-ee` suffix | 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.util;
import java.util.Arrays;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.camunda.bpm.engine.ProcessEngineException;
import org.camunda.bpm.engine.exception.NotValidException;
import org.camunda.bpm.engine.impl.ProcessEngineLogger;
import org.camunda.bpm.engine.impl.bpmn.parser.FailedJobRetryConfiguration;
import org.camunda.bpm.engine.impl.calendar.DurationHelper;
import org.camunda.bpm.engine.impl.context.Context;
import org.camunda.bpm.engine.impl.el.Expression;
import org.camunda.bpm.engine.impl.el.ExpressionManager;
import org.camunda.bpm.engine.impl.telemetry.dto.Jdk;
public class ParseUtil {
private static final EngineUtilLogger LOG = ProcessEngineLogger.UTIL_LOGGER;
protected static final Pattern REGEX_TTL_ISO = Pattern.compile("^P(\\d+)D$");
/**
* Parse History Time To Live in ISO-8601 format to integer and set into the given entity
* @param historyTimeToLive
*/
public static Integer parseHistoryTimeToLive(String historyTimeToLive) {
Integer timeToLive = null;
if (historyTimeToLive != null && !historyTimeToLive.isEmpty()) {
Matcher matISO = REGEX_TTL_ISO.matcher(historyTimeToLive);
if (matISO.find()) {
historyTimeToLive = matISO.group(1);
}
timeToLive = parseIntegerAttribute("historyTimeToLive", historyTimeToLive);
}
if (timeToLive != null && timeToLive < 0) {
throw new NotValidException("Cannot parse historyTimeToLive: negative value is not allowed");
}
return timeToLive;
}
protected static Integer parseIntegerAttribute(String attributeName, String text) {
Integer result = null;
if (text != null && !text.isEmpty()) {
try {
result = Integer.parseInt(text);
}
catch (NumberFormatException e) {
throw new ProcessEngineException("Cannot parse " + attributeName + ": " + e.getMessage());
}
}
return result;
}
public static FailedJobRetryConfiguration parseRetryIntervals(String retryIntervals) {
if (retryIntervals != null && !retryIntervals.isEmpty()) {
if (StringUtil.isExpression(retryIntervals)) {
ExpressionManager expressionManager = Context.getProcessEngineConfiguration().getExpressionManager();
Expression expression = expressionManager.createExpression(retryIntervals);
return new FailedJobRetryConfiguration(expression);
}
String[] intervals = StringUtil.split(retryIntervals, ",");
int retries = intervals.length + 1;
if (intervals.length == 1) {
try {
DurationHelper durationHelper = new DurationHelper(intervals[0]);
if (durationHelper.isRepeat()) {
retries = durationHelper.getTimes();
}
} catch (Exception e) {
LOG.logParsingRetryIntervals(intervals[0], e);
return null;
}
}
return new FailedJobRetryConfiguration(retries, Arrays.asList(intervals));
} else {
return null;
}
}
public static ProcessEngineDetails parseProcessEngineVersion(String packageImplementationVersion, boolean trimSuffixEE) {
String version = packageImplementationVersion;
String edition = ProcessEngineDetails.EDITION_COMMUNITY;
if (version != null && version.contains("-ee")) {
edition = ProcessEngineDetails.EDITION_ENTERPRISE;
if (trimSuffixEE) {
version = version.replace("-ee", ""); // trim `-ee` suffix
}
}
return new ProcessEngineDetails(version, edition);
}
public static String parseServerVendor(String applicationServerInfo) {
String serverVendor = "";
Pattern pattern = Pattern.compile("[\\sA-Za-z]+");
Matcher matcher = pattern.matcher(applicationServerInfo);
if (matcher.find()) {
try {
serverVendor = matcher.group();
} catch (IllegalStateException ignored) {
}
serverVendor = serverVendor.trim();
if (serverVendor.contains("WildFly")) {
return "WildFly";
}
}
return serverVendor;
}
public static Jdk parseJdkDetails() {
String jdkVendor = System.getProperty("java.vm.vendor");
if (jdkVendor != null && jdkVendor.contains("Oracle")
&& System.getProperty("java.vm.name").contains("OpenJDK")) {
jdkVendor = "OpenJDK";
}
String jdkVersion = System.getProperty("java.version");
Jdk jdk = new Jdk(jdkVersion, jdkVendor);
return jdk;
}
}
| 1 | 11,663 | Why don't we drop the packageImplementationVersion in general? That way the version will be fetch all that time only from the properties file and it will be consistent. | camunda-camunda-bpm-platform | java |
@@ -1076,6 +1076,9 @@ class CppGenerator : public BaseGenerator {
code_ += " };";
}
+ // Found fields with union type.
+ std::vector<const FieldDef*> union_fields;
+
// Generate the accessors.
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) { | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include "flatbuffers/code_generators.h"
namespace flatbuffers {
static std::string GeneratedFileName(const std::string &path,
const std::string &file_name) {
return path + file_name + "_generated.h";
}
namespace cpp {
class CppGenerator : public BaseGenerator {
public:
CppGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "::"),
cur_name_space_(nullptr) {}
std::string GenIncludeGuard() const {
// Generate include guard.
std::string guard = file_name_;
// Remove any non-alpha-numeric characters that may appear in a filename.
struct IsAlnum {
bool operator()(char c) { return !isalnum(c); }
};
guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()),
guard.end());
guard = "FLATBUFFERS_GENERATED_" + guard;
guard += "_";
// For further uniqueness, also add the namespace.
auto name_space = parser_.namespaces_.back();
for (auto it = name_space->components.begin();
it != name_space->components.end(); ++it) {
guard += *it + "_";
}
guard += "H_";
std::transform(guard.begin(), guard.end(), guard.begin(), ::toupper);
return guard;
}
void GenIncludeDependencies() {
int num_includes = 0;
for (auto it = parser_.native_included_files_.begin();
it != parser_.native_included_files_.end(); ++it) {
code_ += "#include \"" + *it + "\"";
num_includes++;
}
for (auto it = parser_.included_files_.begin();
it != parser_.included_files_.end(); ++it) {
const auto basename =
flatbuffers::StripPath(flatbuffers::StripExtension(it->first));
if (basename != file_name_) {
code_ += "#include \"" + parser_.opts.include_prefix + basename +
"_generated.h\"";
num_includes++;
}
}
if (num_includes) code_ += "";
}
// Iterate through all definitions we haven't generate code for (enums,
// structs, and tables) and output them to a single file.
bool generate() {
if (IsEverythingGenerated()) return true;
code_.Clear();
code_ += "// " + std::string(FlatBuffersGeneratedWarning());
const auto include_guard = GenIncludeGuard();
code_ += "#ifndef " + include_guard;
code_ += "#define " + include_guard;
code_ += "";
code_ += "#include \"flatbuffers/flatbuffers.h\"";
code_ += "";
if (parser_.opts.include_dependence_headers) {
GenIncludeDependencies();
}
assert(!cur_name_space_);
// Generate forward declarations for all structs/tables, since they may
// have circular references.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
code_ += "struct " + struct_def.name + ";";
if (parser_.opts.generate_object_based_api && !struct_def.fixed) {
code_ += "struct " + NativeName(struct_def.name) + ";";
}
code_ += "";
}
}
// Generate code for all the enum declarations.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenEnum(enum_def);
}
}
// Generate code for all structs, then all tables.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenStruct(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTable(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTablePost(struct_def);
}
}
// Generate code for union verifiers.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (enum_def.is_union && !enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenUnionPost(enum_def);
}
}
// Generate convenient global helper functions:
if (parser_.root_struct_def_) {
auto &struct_def = *parser_.root_struct_def_;
SetNameSpace(struct_def.defined_namespace);
const auto &name = struct_def.name;
const auto qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(name);
const auto cpp_name = TranslateNameSpace(qualified_name);
code_.SetValue("STRUCT_NAME", name);
code_.SetValue("CPP_NAME", cpp_name);
// The root datatype accessor:
code_ += "inline \\";
code_ += "const {{CPP_NAME}} *Get{{STRUCT_NAME}}(const void *buf) {";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
if (parser_.opts.mutable_buffer) {
code_ += "inline \\";
code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {";
code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);";
code_ += "}";
code_ += "";
}
if (parser_.file_identifier_.length()) {
// Return the identifier
code_ += "inline const char *{{STRUCT_NAME}}Identifier() {";
code_ += " return \"" + parser_.file_identifier_ + "\";";
code_ += "}";
code_ += "";
// Check if a buffer has the identifier.
code_ += "inline \\";
code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {";
code_ += " return flatbuffers::BufferHasIdentifier(";
code_ += " buf, {{STRUCT_NAME}}Identifier());";
code_ += "}";
code_ += "";
}
// The root verifier.
if (parser_.file_identifier_.length()) {
code_.SetValue("ID", name + "Identifier()");
} else {
code_.SetValue("ID", "nullptr");
}
code_ += "inline bool Verify{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
if (parser_.file_extension_.length()) {
// Return the extension
code_ += "inline const char *{{STRUCT_NAME}}Extension() {";
code_ += " return \"" + parser_.file_extension_ + "\";";
code_ += "}";
code_ += "";
}
// Finish a buffer with a given root object:
code_ += "inline void Finish{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.Finish(root);";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// A convenient root unpack function.
auto native_name =
NativeName(WrapInNameSpace(struct_def));
code_.SetValue("UNPACK_RETURN",
GenTypeNativePtr(native_name, nullptr, false));
code_.SetValue("UNPACK_TYPE",
GenTypeNativePtr(native_name, nullptr, true));
code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}(";
code_ += " const void *buf,";
code_ += " const flatbuffers::resolver_function_t *res = nullptr) {";
code_ += " return {{UNPACK_TYPE}}\\";
code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));";
code_ += "}";
code_ += "";
}
}
assert(cur_name_space_);
SetNameSpace(nullptr);
// Close the include guard.
code_ += "#endif // " + include_guard;
const auto file_path = GeneratedFileName(path_, file_name_);
const auto final_code = code_.ToString();
return SaveFile(file_path.c_str(), final_code, false);
}
private:
CodeWriter code_;
// This tracks the current namespace so we can insert namespace declarations.
const Namespace *cur_name_space_;
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
// Translates a qualified name in flatbuffer text format to the same name in
// the equivalent C++ namespace.
static std::string TranslateNameSpace(const std::string &qualified_name) {
std::string cpp_qualified_name = qualified_name;
size_t start_pos = 0;
while ((start_pos = cpp_qualified_name.find(".", start_pos)) !=
std::string::npos) {
cpp_qualified_name.replace(start_pos, 1, "::");
}
return cpp_qualified_name;
}
void GenComment(const std::vector<std::string> &dc, const char *prefix = "") {
std::string text;
::flatbuffers::GenComment(dc, &text, nullptr, prefix);
code_ += text + "\\";
}
// Return a C++ type from the table in idl.h
std::string GenTypeBasic(const Type &type, bool user_facing_type) const {
static const char *ctypename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#CTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (user_facing_type) {
if (type.enum_def) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_BOOL) return "bool";
}
return ctypename[type.base_type];
}
// Return a C++ pointer type, specialized to the actual struct/table types,
// and vector element types.
std::string GenTypePointer(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return "flatbuffers::String";
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeWire(type.VectorType(), "", false);
return "flatbuffers::Vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
return WrapInNameSpace(*type.struct_def);
}
case BASE_TYPE_UNION:
// fall through
default: {
return "void";
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// building a flatbuffer.
std::string GenTypeWire(const Type &type, const char *postfix,
bool user_facing_type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + postfix;
} else if (IsStruct(type)) {
return "const " + GenTypePointer(type) + " *";
} else {
return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix;
}
}
// Return a C++ type for any type (scalar/pointer) that reflects its
// serialized size.
std::string GenTypeSize(const Type &type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, false);
} else if (IsStruct(type)) {
return GenTypePointer(type);
} else {
return "flatbuffers::uoffset_t";
}
}
// TODO(wvo): make this configurable.
static std::string NativeName(const std::string &name) { return name + "T"; }
const std::string &PtrType(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr;
return attr ? attr->constant : parser_.opts.cpp_object_api_pointer_type;
}
std::string GenTypeNativePtr(const std::string &type, const FieldDef *field,
bool is_constructor) {
auto &ptr_type = PtrType(field);
if (ptr_type != "naked") {
return ptr_type + "<" + type + ">";
} else if (is_constructor) {
return "";
} else {
return type + " *";
}
}
std::string GenPtrGet(const FieldDef &field) {
auto &ptr_type = PtrType(&field);
return ptr_type == "naked" ? "" : ".get()";
}
std::string GenTypeNative(const Type &type, bool invector,
const FieldDef &field) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return "std::string";
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeNative(type.VectorType(), true, field);
return "std::vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
auto type_name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
type_name = native_type->constant;
}
if (invector || field.native_inline) {
return type_name;
} else {
return GenTypeNativePtr(type_name, &field, false);
}
} else {
return GenTypeNativePtr(NativeName(type_name), &field, false);
}
}
case BASE_TYPE_UNION: {
return type.enum_def->name + "Union";
}
default: {
return GenTypeBasic(type, true);
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// using a flatbuffer.
std::string GenTypeGet(const Type &type, const char *afterbasic,
const char *beforeptr, const char *afterptr,
bool user_facing_type) {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + afterbasic;
} else {
return beforeptr + GenTypePointer(type) + afterptr;
}
}
std::string GenEnumDecl(const EnumDef &enum_def) const {
const IDLOptions &opts = parser_.opts;
return (opts.scoped_enums ? "enum class " : "enum ") + enum_def.name;
}
std::string GenEnumValDecl(const EnumDef &enum_def,
const std::string &enum_val) const {
const IDLOptions &opts = parser_.opts;
return opts.prefixed_enums ? enum_def.name + "_" + enum_val : enum_val;
}
std::string GetEnumValUse(const EnumDef &enum_def,
const EnumVal &enum_val) const {
const IDLOptions &opts = parser_.opts;
if (opts.scoped_enums) {
return enum_def.name + "::" + enum_val.name;
} else if (opts.prefixed_enums) {
return enum_def.name + "_" + enum_val.name;
} else {
return enum_val.name;
}
}
static std::string UnionVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name +
"(flatbuffers::Verifier &verifier, const void *obj, " +
enum_def.name + " type)";
}
static std::string UnionVectorVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name + "Vector" +
"(flatbuffers::Verifier &verifier, " +
"const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " +
"const flatbuffers::Vector<uint8_t> *types)";
}
static std::string UnionUnPackSignature(const EnumDef &enum_def,
bool inclass) {
return (inclass ? "static " : "") +
std::string("flatbuffers::NativeTable *") +
(inclass ? "" : enum_def.name + "Union::") +
"UnPack(const void *obj, " + enum_def.name +
" type, const flatbuffers::resolver_function_t *resolver)";
}
static std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) {
return "flatbuffers::Offset<void> " +
(inclass ? "" : enum_def.name + "Union::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableCreateSignature(const StructDef &struct_def,
bool predecl) {
return "flatbuffers::Offset<" + struct_def.name + "> Create" +
struct_def.name +
"(flatbuffers::FlatBufferBuilder &_fbb, const " +
NativeName(struct_def.name) +
" *_o, const flatbuffers::rehasher_function_t *_rehasher" +
(predecl ? " = nullptr" : "") + ")";
}
static std::string TablePackSignature(const StructDef &struct_def,
bool inclass) {
return std::string(inclass ? "static " : "") +
"flatbuffers::Offset<" + struct_def.name + "> " +
(inclass ? "" : struct_def.name + "::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const " + NativeName(struct_def.name) + "* _o, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ")";
}
static std::string TableUnPackSignature(const StructDef &struct_def,
bool inclass) {
return NativeName(struct_def.name) + " *" +
(inclass ? "" : struct_def.name + "::") +
"UnPack(const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableUnPackToSignature(const StructDef &struct_def,
bool inclass) {
return "void " + (inclass ? "" : struct_def.name + "::") +
"UnPackTo(" + NativeName(struct_def.name) + " *" + "_o, " +
"const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
// Generate an enum declaration and an enum string lookup table.
void GenEnum(const EnumDef &enum_def) {
code_.SetValue("ENUM_NAME", enum_def.name);
code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false));
code_.SetValue("SEP", "");
GenComment(enum_def.doc_comment);
code_ += GenEnumDecl(enum_def) + "\\";
if (parser_.opts.scoped_enums)
code_ += " : {{BASE_TYPE}}\\";
code_ += " {";
int64_t anyv = 0;
const EnumVal *minv = nullptr, *maxv = nullptr;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
GenComment(ev.doc_comment, " ");
code_.SetValue("KEY", GenEnumValDecl(enum_def, ev.name));
code_.SetValue("VALUE", NumToString(ev.value));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("SEP", ",\n");
minv = !minv || minv->value > ev.value ? &ev : minv;
maxv = !maxv || maxv->value < ev.value ? &ev : maxv;
anyv |= ev.value;
}
if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) {
assert(minv && maxv);
code_.SetValue("SEP", ",\n");
if (enum_def.attributes.Lookup("bit_flags")) {
code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE"));
code_.SetValue("VALUE", "0");
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY"));
code_.SetValue("VALUE", NumToString(anyv));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
} else { // MIN & MAX are useless for bit_flags
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MIN"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, minv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MAX"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, maxv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
}
}
code_ += "";
code_ += "};";
if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) {
code_ += "DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
}
code_ += "";
// Generate a generate string table for enum values.
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range =
enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1;
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) <
kMaxSparseness) {
code_ += "inline const char **EnumNames{{ENUM_NAME}}() {";
code_ += " static const char *names[] = {";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
while (val++ != ev.value) {
code_ += " \"\",";
}
code_ += " \"" + ev.name + "\",";
}
code_ += " nullptr";
code_ += " };";
code_ += " return names;";
code_ += "}";
code_ += "";
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " const size_t index = static_cast<int>(e)\\";
if (enum_def.vals.vec.front()->value) {
auto vals = GetEnumValUse(enum_def, *enum_def.vals.vec.front());
code_ += " - static_cast<int>(" + vals + ")\\";
}
code_ += ";";
code_ += " return EnumNames{{ENUM_NAME}}()[index];";
code_ += "}";
code_ += "";
}
// Generate type traits for unions to map from a type to union enum value.
if (enum_def.is_union) {
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (it == enum_def.vals.vec.begin()) {
code_ += "template<typename T> struct {{ENUM_NAME}}Traits {";
}
else {
auto name = WrapInNameSpace(*ev.struct_def);
code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {";
}
auto value = GetEnumValUse(enum_def, ev);
code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";";
code_ += "};";
code_ += "";
}
}
if (parser_.opts.generate_object_based_api && enum_def.is_union) {
// Generate a union type
code_.SetValue("NAME", enum_def.name);
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "struct {{NAME}}Union {";
code_ += " {{NAME}} type;";
code_ += " flatbuffers::NativeTable *table;";
code_ += "";
code_ += " {{NAME}}Union() : type({{NONE}}), table(nullptr) {}";
code_ += " {{NAME}}Union({{NAME}}Union&& u):";
code_ += " type(std::move(u.type)), table(std::move(u.table)) {}";
code_ += " {{NAME}}Union(const {{NAME}}Union &);";
code_ += " {{NAME}}Union &operator=(const {{NAME}}Union &);";
code_ += " ~{{NAME}}Union() { Reset(); }";
code_ += "";
code_ += " void Reset();";
code_ += "";
code_ += " template <typename T>";
code_ += " void Set(T&& value) {";
code_ += " Reset();";
code_ += " type = {{NAME}}Traits<typename T::TableType>::enum_value;";
code_ += " if (type != {{NONE}}) {";
code_ += " table = new T(std::forward<T>(value));";
code_ += " }";
code_ += " }";
code_ += "";
code_ += " " + UnionUnPackSignature(enum_def, true) + ";";
code_ += " " + UnionPackSignature(enum_def, true) + ";";
code_ += "";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
const auto native_type = NativeName(WrapInNameSpace(*ev.struct_def));
code_.SetValue("NATIVE_TYPE", native_type);
code_.SetValue("NATIVE_NAME", ev.name);
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(table) : nullptr;";
code_ += " }";
}
code_ += "};";
code_ += "";
}
if (enum_def.is_union) {
code_ += UnionVerifySignature(enum_def) + ";";
code_ += UnionVectorVerifySignature(enum_def) + ";";
code_ += "";
}
}
void GenUnionPost(const EnumDef &enum_def) {
// Generate a verifier function for this union that can be called by the
// table verifier functions. It uses a switch case to select a specific
// verifier function to call, this should be safe even if the union type
// has been corrupted, since the verifiers will simply fail when called
// on the wrong type.
code_.SetValue("ENUM_NAME", enum_def.name);
code_ += "inline " + UnionVerifySignature(enum_def) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
if (ev.value) {
code_.SetValue("TYPE", WrapInNameSpace(*ev.struct_def));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
code_ += " return verifier.VerifyTable(ptr);";
code_ += " }";
} else {
code_ += " case {{LABEL}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: return false;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {";
code_ += " if (values->size() != types->size()) return false;";
code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {";
code_ += " if (!Verify" + enum_def.name + "(";
code_ += " verifier, values->Get(i), types->GetEnum<" + enum_def.name + ">(i))) {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += " return true;";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// Generate union Unpack() and Pack() functions.
code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", WrapInNameSpace(*ev.struct_def));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
code_ += " return ptr->UnPack(resolver);";
code_ += " }";
}
code_ += " default: return nullptr;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(WrapInNameSpace(*ev.struct_def)));
code_.SetValue("NAME", ev.struct_def->name);
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(table);";
code_ += " return Create{{NAME}}(_fbb, ptr, _rehasher).Union();";
code_ += " }";
}
code_ += " default: return 0;";
code_ += " }";
code_ += "}";
code_ += "";
// Union Reset() function.
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "inline void {{ENUM_NAME}}Union::Reset() {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(WrapInNameSpace(*ev.struct_def)));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(table);";
code_ += " delete ptr;";
code_ += " break;";
code_ += " }";
}
code_ += " default: break;";
code_ += " }";
code_ += " table = nullptr;";
code_ += " type = {{NONE}};";
code_ += "}";
code_ += "";
}
}
// Generates a value with optionally a cast applied if the field has a
// different underlying type from its interface type (currently only the
// case for enums. "from" specify the direction, true meaning from the
// underlying type to the interface type.
std::string GenUnderlyingCast(const FieldDef &field, bool from,
const std::string &val) {
if (from && field.value.type.base_type == BASE_TYPE_BOOL) {
return val + " != 0";
} else if ((field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) ||
field.value.type.base_type == BASE_TYPE_BOOL) {
return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" +
val + ")";
} else {
return val;
}
}
std::string GenFieldOffsetName(const FieldDef &field) {
std::string uname = field.name;
std::transform(uname.begin(), uname.end(), uname.begin(), ::toupper);
return "VT_" + uname;
}
void GenFullyQualifiedNameGetter(const std::string &name) {
if (!parser_.opts.generate_name_strings) {
return;
}
auto fullname = parser_.namespaces_.back()->GetFullyQualifiedName(name);
code_.SetValue("NAME", fullname);
code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR");
code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {";
code_ += " return \"{{NAME}}\";";
code_ += " }";
}
std::string GenDefaultConstant(const FieldDef &field) {
return field.value.type.base_type == BASE_TYPE_FLOAT
? field.value.constant + "f"
: field.value.constant;
}
std::string GetDefaultScalarValue(const FieldDef &field) {
if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) {
auto ev = field.value.type.enum_def->ReverseLookup(
static_cast<int>(StringToInt(field.value.constant.c_str())), false);
if (ev) {
return WrapInNameSpace(
field.value.type.enum_def->defined_namespace,
GetEnumValUse(*field.value.type.enum_def, *ev));
} else {
return GenUnderlyingCast(field, true, field.value.constant);
}
} else if (field.value.type.base_type == BASE_TYPE_BOOL) {
return field.value.constant == "0" ? "false" : "true";
} else {
return GenDefaultConstant(field);
}
}
void GenParam(const FieldDef &field, bool direct, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("PARAM_NAME", field.name);
if (direct && field.value.type.base_type == BASE_TYPE_STRING) {
code_.SetValue("PARAM_TYPE", "const char *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else {
code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field));
}
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
}
// Generate a member, including a default value for scalars and raw pointers.
void GenMember(const FieldDef &field) {
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto type = GenTypeNative(field.value.type, false, field);
auto cpp_type = field.attributes.Lookup("cpp_type");
auto full_type = (cpp_type ? cpp_type->constant + " *" : type + " ");
code_.SetValue("FIELD_TYPE", full_type);
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};";
}
}
// Generate the default constructor for this struct. Properly initialize all
// scalar members with default values.
void GenDefaultConstructor(const StructDef& struct_def) {
std::string initializer_list;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto cpp_type = field.attributes.Lookup("cpp_type");
// Scalar types get parsed defaults, raw pointers get nullptrs.
if (IsScalar(field.value.type.base_type)) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name;
initializer_list += "(" + GetDefaultScalarValue(field) + ")";
} else if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_default = field.attributes.Lookup("native_default");
if (native_default) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list +=
field.name + "(" + native_default->constant + ")";
}
}
} else if (cpp_type) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name + "(0)";
}
}
}
if (!initializer_list.empty()) {
initializer_list = "\n : " + initializer_list;
}
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name));
code_.SetValue("INIT_LIST", initializer_list);
code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {";
code_ += " }";
}
void GenNativeTable(const StructDef &struct_def) {
const auto native_name = NativeName(struct_def.name);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", native_name);
// Generate a C++ object that can hold an unpacked version of this table.
code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {";
code_ += " typedef {{STRUCT_NAME}} TableType;";
GenFullyQualifiedNameGetter(native_name);
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
GenMember(**it);
}
GenDefaultConstructor(struct_def);
code_ += "};";
code_ += "";
}
// Generate the code to call the appropriate Verify function(s) for a field.
void GenVerifyCall(const FieldDef &field, const char* prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("NAME", field.name);
code_.SetValue("REQUIRED", field.required ? "Required" : "");
code_.SetValue("SIZE", GenTypeSize(field.value.type));
code_.SetValue("OFFSET", GenFieldOffsetName(field));
code_ += "{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\";
switch (field.value.type.base_type) {
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_.SetValue("SUFFIX", UnionTypeFieldSuffix());
code_ += "{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), "
"{{NAME}}{{SUFFIX}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\";
}
break;
}
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
break;
}
case BASE_TYPE_VECTOR: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
switch (field.value.type.element) {
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\";
}
break;
}
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_ += "{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), {{NAME}}_type())\\";
break;
}
default:
break;
}
break;
}
default: {
break;
}
}
}
// Generate an accessor struct, builder structs & function for a table.
void GenTable(const StructDef &struct_def) {
if (parser_.opts.generate_object_based_api) {
GenNativeTable(struct_def);
}
// Generate an accessor struct, with methods of the form:
// type name() const { return GetField<type>(offset, defaultval); }
GenComment(struct_def.doc_comment);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS"
" : private flatbuffers::Table {";
if (parser_.opts.generate_object_based_api) {
code_ += " typedef {{NATIVE_NAME}} NativeTableType;";
}
GenFullyQualifiedNameGetter(struct_def.name);
// Generate field id constants.
if (struct_def.fields.vec.size() > 0) {
// We need to add a trailing comma to all elements except the last one as
// older versions of gcc complain about this.
code_.SetValue("SEP", "");
code_ += " enum {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset));
code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\";
code_.SetValue("SEP", ",\n");
}
code_ += "";
code_ += " };";
}
// Generate the accessors.
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
const bool is_struct = IsStruct(field.value.type);
const bool is_scalar = IsScalar(field.value.type.base_type);
code_.SetValue("FIELD_NAME", field.name);
// Call a different accessor for pointers, that indirects.
std::string accessor = "";
if (is_scalar) {
accessor = "GetField<";
} else if (is_struct) {
accessor = "GetStruct<";
} else {
accessor = "GetPointer<";
}
auto offset_str = GenFieldOffsetName(field);
auto offset_type =
GenTypeGet(field.value.type, "", "const ", " *", false);
auto call = accessor + offset_type + ">(" + offset_str;
// Default value as second arg for non-pointer types.
if (is_scalar) {
call += ", " + GenDefaultConstant(field);
}
call += ")";
GenComment(field.doc_comment, " ");
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "const ", " *", true));
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
code_.SetValue("OFFSET_NAME", offset_str);
code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_ += " bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} "
"_{{FIELD_NAME}}) {";
code_ += " return SetField({{OFFSET_NAME}}, {{FIELD_VALUE}});";
code_ += " }";
} else {
auto type = GenTypeGet(field.value.type, " ", "", " *", true);
auto underlying = accessor + type + ">(" + offset_str + ")";
code_.SetValue("FIELD_TYPE", type);
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, true, underlying));
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
}
}
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
std::string qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(
nested->constant);
auto nested_root = parser_.structs_.Lookup(qualified_name);
assert(nested_root); // Guaranteed to exist by parser.
(void)nested_root;
code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name));
code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {";
code_ += " const uint8_t* data = {{FIELD_NAME}}()->Data();";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(data);";
code_ += " }";
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING);
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
if (is_string) {
code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();";
} else {
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
}
code_ += " }";
if (is_string) {
code_ += " int KeyCompareWithValue(const char *val) const {";
code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);";
code_ += " }";
} else {
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ += " const auto key = {{FIELD_NAME}}();";
code_ += " if (key < val) {";
code_ += " return -1;";
code_ += " } else if (key > val) {";
code_ += " return 1;";
code_ += " } else {";
code_ += " return 0;";
code_ += " }";
code_ += " }";
}
}
}
// Generate a verifier function that can check a buffer from an untrusted
// source will never cause reads outside the buffer.
code_ += " bool Verify(flatbuffers::Verifier &verifier) const {";
code_ += " return VerifyTableStart(verifier)\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
GenVerifyCall(field, " &&\n ");
}
code_ += " &&\n verifier.EndTable();";
code_ += " }";
if (parser_.opts.generate_object_based_api) {
// Generate the UnPack() pre declaration.
code_ += " " + TableUnPackSignature(struct_def, true) + ";";
code_ += " " + TableUnPackToSignature(struct_def, true) + ";";
code_ += " " + TablePackSignature(struct_def, true) + ";";
}
code_ += "};"; // End of table.
code_ += "";
GenBuilders(struct_def);
if (parser_.opts.generate_object_based_api) {
// Generate a pre-declaration for a CreateX method that works with an
// unpacked C++ object.
code_ += TableCreateSignature(struct_def, true) + ";";
code_ += "";
}
}
void GenBuilders(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
// Generate a builder struct:
code_ += "struct {{STRUCT_NAME}}Builder {";
code_ += " flatbuffers::FlatBufferBuilder &fbb_;";
code_ += " flatbuffers::uoffset_t start_;";
bool has_string_or_vector_fields = false;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
const bool is_scalar = IsScalar(field.value.type.base_type);
const bool is_string = field.value.type.base_type == BASE_TYPE_STRING;
const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR;
if (is_string || is_vector) {
has_string_or_vector_fields = true;
}
std::string offset = GenFieldOffsetName(field);
std::string name = GenUnderlyingCast(field, false, field.name);
std::string value = is_scalar ? GenDefaultConstant(field) : "";
// Generate accessor functions of the form:
// void add_name(type name) {
// fbb_.AddElement<type>(offset, name, default);
// }
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("ADD_OFFSET", struct_def.name + "::" + offset);
code_.SetValue("ADD_NAME", name);
code_.SetValue("ADD_VALUE", value);
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("ADD_FN", "AddElement<" + type + ">");
} else if (IsStruct(field.value.type)) {
code_.SetValue("ADD_FN", "AddStruct");
} else {
code_.SetValue("ADD_FN", "AddOffset");
}
code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {";
code_ += " fbb_.{{ADD_FN}}(\\";
if (is_scalar) {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});";
} else {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});";
}
code_ += " }";
}
}
// Builder constructor
code_ += " {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder &_fbb)";
code_ += " : fbb_(_fbb) {";
code_ += " start_ = fbb_.StartTable();";
code_ += " }";
// Assignment operator;
code_ += " {{STRUCT_NAME}}Builder &operator="
"(const {{STRUCT_NAME}}Builder &);";
// Finish() function.
auto num_fields = NumToString(struct_def.fields.vec.size());
code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {";
code_ += " const auto end = fbb_.EndTable(start_, " + num_fields + ");";
code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && field.required) {
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});";
}
}
code_ += " return o;";
code_ += " }";
code_ += "};";
code_ += "";
// Generate a convenient CreateX function that uses the above builder
// to create a table in one go.
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, false, ",\n ");
}
}
code_ += ") {";
code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size; size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
const auto &field = **it;
if (!field.deprecated && (!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code_.SetValue("FIELD_NAME", field.name);
code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});";
}
}
}
code_ += " return builder_.Finish();";
code_ += "}";
code_ += "";
// Generate a CreateXDirect function with vector types as parameters
if (has_string_or_vector_fields) {
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}Direct(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, true, ",\n ");
}
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += ") {";
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", field.name);
if (field.value.type.base_type == BASE_TYPE_STRING) {
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateString({{FIELD_NAME}}) : 0\\";
} else if (field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateVector<" + type + ">(*{{FIELD_NAME}}) : 0\\";
} else {
code_ += ",\n {{FIELD_NAME}}\\";
}
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
std::string GenUnpackVal(const Type &type, const std::string &val,
bool invector, const FieldDef &afield) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return val + "->str()";
}
case BASE_TYPE_STRUCT: {
const auto name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
return "flatbuffers::UnPack(*" + val + ")";
} else if (invector || afield.native_inline) {
return "*" + val;
} else {
const auto ptype = GenTypeNativePtr(name, &afield, true);
return ptype + "(new " + name + "(*" + val + "))";
}
} else {
const auto ptype = GenTypeNativePtr(NativeName(name), &afield, true);
return ptype + "(" + val + "->UnPack(_resolver))";
}
}
default: {
return val;
break;
}
}
};
std::string GenUnpackFieldStatement(const FieldDef &field,
const FieldDef *union_field) {
std::string code;
switch (field.value.type.base_type) {
case BASE_TYPE_VECTOR: {
std::string indexing;
if (field.value.type.enum_def) {
indexing += "(" + field.value.type.enum_def->name + ")";
}
indexing += "_e->Get(_i)";
if (field.value.type.element == BASE_TYPE_BOOL) {
indexing += " != 0";
}
// Generate code that pushes data from _e to _o in the form:
// for (uoffset_t i = 0; i < _e->size(); ++i) {
// _o->field.push_back(_e->Get(_i));
// }
code += "for (flatbuffers::uoffset_t _i = 0;";
code += " _i < _e->size(); _i++) { ";
code += "_o->" + field.name + ".push_back(";
code += GenUnpackVal(field.value.type.VectorType(),
indexing, true, field);
code += "); }";
break;
}
case BASE_TYPE_UTYPE: {
assert(union_field->value.type.base_type == BASE_TYPE_UNION);
// Generate code that sets the union type, of the form:
// _o->field.type = _e;
code += "_o->" + union_field->name + ".type = _e;";
break;
}
case BASE_TYPE_UNION: {
// Generate code that sets the union table, of the form:
// _o->field.table = Union::Unpack(_e, field_type(), resolver);
code += "_o->" + field.name + ".table = ";
code += field.value.type.enum_def->name + "Union::UnPack(";
code += "_e, " + field.name + UnionTypeFieldSuffix() + "(),";
code += "_resolver);";
break;
}
default: {
auto cpp_type = field.attributes.Lookup("cpp_type");
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + field.name + "), ";
code += "static_cast<flatbuffers::hash_value_t>(_e));";
code += " else ";
code += "_o->" + field.name + " = nullptr;";
} else {
// Generate code for assigning the value, of the form:
// _o->field = value;
code += "_o->" + field.name + " = ";
code += GenUnpackVal(field.value.type, "_e", false, field) + ";";
}
break;
}
}
return code;
}
std::string GenCreateParam(const FieldDef &field) {
std::string value = "_o->";
if (field.value.type.base_type == BASE_TYPE_UTYPE) {
value += field.name.substr(0, field.name.size() -
strlen(UnionTypeFieldSuffix()));
value += ".type";
} else {
value += field.name;
}
if (field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(field.value.type, false);
value = "_rehasher ? "
"static_cast<" + type + ">((*_rehasher)(" + value + ")) : 0";
}
std::string code;
switch (field.value.type.base_type) {
// String fields are of the form:
// _fbb.CreateString(_o->field)
case BASE_TYPE_STRING: {
code += "_fbb.CreateString(" + value + ")";
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
// Vector fields come in several flavours, of the forms:
// _fbb.CreateVector(_o->field);
// _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size());
// _fbb.CreateVectorOfStrings(_o->field)
// _fbb.CreateVectorOfStructs(_o->field)
// _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) {
// return CreateT(_fbb, _o->Get(i), rehasher);
// });
case BASE_TYPE_VECTOR: {
auto vector_type = field.value.type.VectorType();
switch (vector_type.base_type) {
case BASE_TYPE_STRING: {
code += "_fbb.CreateVectorOfStrings(" + value + ")";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(vector_type)) {
code += "_fbb.CreateVectorOfStructs(" + value + ")";
} else {
code += "_fbb.CreateVector<flatbuffers::Offset<";
code += WrapInNameSpace(*vector_type.struct_def) + ">>";
code += "(" + value + ".size(), [&](size_t i) {";
code += " return Create" + vector_type.struct_def->name;
code += "(_fbb, " + value + "[i]" + GenPtrGet(field) + ", ";
code += "_rehasher); })";
}
break;
}
case BASE_TYPE_BOOL: {
code += "_fbb.CreateVector(" + value + ")";
break;
}
default: {
if (field.value.type.enum_def) {
// For enumerations, we need to get access to the array data for
// the underlying storage type (eg. uint8_t).
const auto basetype = GenTypeBasic(
field.value.type.enum_def->underlying_type, false);
code += "_fbb.CreateVector((const " + basetype + "*)" + value +
".data(), " + value + ".size())";
} else {
code += "_fbb.CreateVector(" + value + ")";
}
break;
}
}
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
case BASE_TYPE_UNION: {
// _o->field.Pack(_fbb);
code += value + ".Pack(_fbb)";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "flatbuffers::Pack(" + value + ")";
} else if (field.native_inline) {
code += "&" + value;
} else {
code += value + " ? " + value + GenPtrGet(field) + " : 0";
}
} else {
// _o->field ? CreateT(_fbb, _o->field.get(), _rehasher);
const auto type = field.value.type.struct_def->name;
code += value + " ? Create" + type;
code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)";
code += " : 0";
}
break;
}
default: {
code += value;
break;
}
}
return code;
}
// Generate code for tables that needs to come after the regular definition.
void GenTablePost(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name));
if (parser_.opts.generate_object_based_api) {
// Generate the X::UnPack() method.
code_ += "inline " + TableUnPackSignature(struct_def, false) + " {";
code_ += " auto _o = new {{NATIVE_NAME}}();";
code_ += " UnPackTo(_o, _resolver);";
code_ += " return _o;";
code_ += "}";
code_ += "";
code_ += "inline " + TableUnPackToSignature(struct_def, false) + " {";
code_ += " (void)_o;";
code_ += " (void)_resolver;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
// Assign a value from |this| to |_o|. Values from |this| are stored
// in a variable |_e| by calling this->field_type(). The value is then
// assigned to |_o| using the GenUnpackFieldStatement.
const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE;
const auto statement =
GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr);
code_.SetValue("FIELD_NAME", field.name);
auto prefix = " { auto _e = {{FIELD_NAME}}(); ";
auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) ";
auto postfix = " };";
code_ += std::string(prefix) + check + statement + postfix;
}
code_ += "}";
code_ += "";
// Generate the X::Pack member function that simply calls the global
// CreateX function.
code_ += "inline " + TablePackSignature(struct_def, false) + " {";
code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);";
code_ += "}";
code_ += "";
// Generate a CreateX method that works with an unpacked C++ object.
code_ += "inline " + TableCreateSignature(struct_def, false) + " {";
code_ += " (void)_rehasher;";
code_ += " (void)_o;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
code_ += " auto _" + field.name + " = " + GenCreateParam(field) + ";";
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
bool pass_by_address = false;
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
pass_by_address = true;
}
}
}
// Call the CreateX function using values from |_o|.
if (pass_by_address) {
code_ += ",\n &_" + field.name + "\\";
} else {
code_ += ",\n _" + field.name + "\\";
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
static void GenPadding(
const FieldDef &field, std::string *code_ptr, int *id,
const std::function<void(int bits, std::string *code_ptr, int *id)> &f) {
if (field.padding) {
for (int i = 0; i < 4; i++) {
if (static_cast<int>(field.padding) & (1 << i)) {
f((1 << i) * 8, code_ptr, id);
}
}
assert(!(field.padding & ~0xF));
}
}
static void PaddingDefinition(int bits, std::string *code_ptr, int *id) {
*code_ptr += " int" + NumToString(bits) + "_t padding" +
NumToString((*id)++) + "__;";
}
static void PaddingInitializer(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += ",\n padding" + NumToString((*id)++) + "__(0)";
}
static void PaddingNoop(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += " (void)padding" + NumToString((*id)++) + "__;";
}
// Generate an accessor struct with constructor for a flatbuffers struct.
void GenStruct(const StructDef &struct_def) {
// Generate an accessor struct, with private variables of the form:
// type name_;
// Generates manual padding and alignment.
// Variables are private because they contain little endian data on all
// platforms.
GenComment(struct_def.doc_comment);
code_.SetValue("ALIGN", NumToString(struct_def.minalign));
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
"{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {";
code_ += " private:";
int padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "", " ", false));
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}_;";
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingDefinition);
code_ += padding;
}
}
// Generate GetFullyQualifiedName
code_ += "";
code_ += " public:";
GenFullyQualifiedNameGetter(struct_def.name);
// Generate a default constructor.
code_ += " {{STRUCT_NAME}}() {";
code_ += " memset(this, 0, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a copy constructor.
code_ += " {{STRUCT_NAME}}(const {{STRUCT_NAME}} &_o) {";
code_ += " memcpy(this, &_o, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a constructor that takes all fields as arguments.
std::string arg_list;
std::string init_list;
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
const auto member_name = field.name + "_";
const auto arg_name = "_" + field.name;
const auto arg_type =
GenTypeGet(field.value.type, " ", "const ", " &", true);
if (it != struct_def.fields.vec.begin()) {
arg_list += ", ";
init_list += ",\n ";
}
arg_list += arg_type;
arg_list += arg_name;
init_list += member_name;
if (IsScalar(field.value.type.base_type)) {
auto type = GenUnderlyingCast(field, false, arg_name);
init_list += "(flatbuffers::EndianScalar(" + type + "))";
} else {
init_list += "(" + arg_name + ")";
}
if (field.padding) {
GenPadding(field, &init_list, &padding_id, PaddingInitializer);
}
}
code_.SetValue("ARG_LIST", arg_list);
code_.SetValue("INIT_LIST", init_list);
code_ += " {{STRUCT_NAME}}({{ARG_LIST}})";
code_ += " : {{INIT_LIST}} {";
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingNoop);
code_ += padding;
}
}
code_ += " }";
// Generate accessor methods of the form:
// type name() const { return flatbuffers::EndianScalar(name_); }
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
auto field_type = GenTypeGet(field.value.type, " ", "const ", " &", true);
auto is_scalar = IsScalar(field.value.type.base_type);
auto member = field.name + "_";
auto value = is_scalar ? "flatbuffers::EndianScalar(" + member + ")"
: member;
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", field_type);
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value));
GenComment(field.doc_comment, " ");
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
code_.SetValue("ARG", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {";
code_ += " flatbuffers::WriteScalar(&{{FIELD_NAME}}_, "
"{{FIELD_VALUE}});";
code_ += " }";
} else {
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_NAME}}_;";
code_ += " }";
}
}
}
code_ += "};";
code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize));
code_ += "STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
code_ += "";
}
// Set up the correct namespace. Only open a namespace if the existing one is
// different (closing/opening only what is necessary).
//
// The file must start and end with an empty (or null) namespace so that
// namespaces are properly opened and closed.
void SetNameSpace(const Namespace *ns) {
if (cur_name_space_ == ns) {
return;
}
// Compute the size of the longest common namespace prefix.
// If cur_name_space is A::B::C::D and ns is A::B::E::F::G,
// the common prefix is A::B:: and we have old_size = 4, new_size = 5
// and common_prefix_size = 2
size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0;
size_t new_size = ns ? ns->components.size() : 0;
size_t common_prefix_size = 0;
while (common_prefix_size < old_size && common_prefix_size < new_size &&
ns->components[common_prefix_size] ==
cur_name_space_->components[common_prefix_size]) {
common_prefix_size++;
}
// Close cur_name_space in reverse order to reach the common prefix.
// In the previous example, D then C are closed.
for (size_t j = old_size; j > common_prefix_size; --j) {
code_ += "} // namespace " + cur_name_space_->components[j - 1];
}
if (old_size != common_prefix_size) {
code_ += "";
}
// open namespace parts to reach the ns namespace
// in the previous example, E, then F, then G are opened
for (auto j = common_prefix_size; j != new_size; ++j) {
code_ += "namespace " + ns->components[j] + " {";
}
if (new_size != common_prefix_size) {
code_ += "";
}
cur_name_space_ = ns;
}
};
} // namespace cpp
bool GenerateCPP(const Parser &parser, const std::string &path,
const std::string &file_name) {
cpp::CppGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string CPPMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
const auto filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(file_name));
const auto included_files = parser.GetIncludedFilesRecursive(file_name);
std::string make_rule = GeneratedFileName(path, filebase) + ": ";
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 11,338 | Please use the style of the rest of the code, a space between the type and `*` | google-flatbuffers | java |
@@ -25,6 +25,8 @@ namespace Microsoft.TestPlatform.VsTestConsole.TranslationLayer.UnitTests
private MockProcessManager mockProcessManager;
+
+
[TestInitialize]
public void TestInit()
{ | 1 | // Copyright (c) Microsoft. All rights reserved.
namespace Microsoft.TestPlatform.VsTestConsole.TranslationLayer.UnitTests
{
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.TestPlatform.VsTestConsole.TranslationLayer.Interfaces;
using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.Interfaces;
using Microsoft.VisualStudio.TestPlatform.ObjectModel;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client.Interfaces;
using Moq;
[TestClass]
public class VsTestConsoleWrapperTests
{
private IVsTestConsoleWrapper consoleWrapper;
private MockTranslationLayerSender mockSender;
private MockProcessManager mockProcessManager;
[TestInitialize]
public void TestInit()
{
this.mockSender = new MockTranslationLayerSender();
this.mockProcessManager = new MockProcessManager();
this.consoleWrapper = new VsTestConsoleWrapper(mockSender, mockProcessManager);
}
[TestMethod]
public void StartSessionShouldStartVsTestConsoleWithCorrectArguments()
{
var inputPort = 123;
int expectedParentProcessId = System.Diagnostics.Process.GetCurrentProcess().Id;
string actualParentProcessIdString = "";
string actualPortString = "";
this.mockSender.SetupPort(inputPort);
var startProcessCalled = false;
this.mockProcessManager.VerifyArgs = (args) =>
{
startProcessCalled = true;
actualParentProcessIdString = args.Length > 0 ? args[0] : "";
actualPortString = args.Length > 1 ? args[1] : "";
};
this.consoleWrapper.StartSession();
int actualPort = int.Parse(actualPortString.Split(':')[1]);
int actualParentProcessId = int.Parse(actualParentProcessIdString.Split(':')[1]);
Assert.IsTrue(startProcessCalled, "Start Process must be called");
Assert.AreEqual(expectedParentProcessId, actualParentProcessId, "Incorrect Parent Process Id fed to process args");
Assert.AreEqual(inputPort, actualPort, "Incorrect Port number fed to process args");
}
[TestMethod]
public void StartSessionShouldThrowExceptionOnBadPort()
{
var inputPort = -1;
this.mockSender.SetupPort(inputPort);
Assert.ThrowsException<TransationLayerException>(() => this.consoleWrapper.StartSession());
}
[TestMethod]
public void InitializeExtensionsShouldSucceed()
{
this.mockSender.SetConnectionResult(true);
bool initExtCalled = false;
Action<IEnumerable<string>, bool> assertPaths = (paths, loadOnlyWellKnownExtensions) =>
{
initExtCalled = true;
Assert.IsTrue(paths != null && paths.Count() == 2, "Extension Paths must be set correctly.");
};
this.mockSender.SetInitExtFunc(assertPaths);
this.consoleWrapper.InitializeExtensions(new List<string>() { "Hello", "World" });
Assert.IsTrue(initExtCalled, "Initialize Extensions must be called");
}
[TestMethod]
public void InitializeExtensionsShouldThrowExceptionOnBadConnection()
{
this.mockSender.SetConnectionResult(false);
bool initExtCalled = false;
Action<IEnumerable<string>, bool> assertPaths = (paths, loadOnlyWellKnownExtensions) =>
{
initExtCalled = true;
};
Assert.ThrowsException<TransationLayerException>(() => this.consoleWrapper.InitializeExtensions(new List<string>() { "Hello", "World" }));
Assert.IsFalse(initExtCalled, "Initialize Extensions must NOT be called if connection failed");
}
[TestMethod]
public void DiscoverTestsShouldSucceed()
{
this.mockSender.SetConnectionResult(true);
bool discoverTestsCalled = false;
Action<IEnumerable<string>, string, ITestDiscoveryEventsHandler> assertSources =
(paths, settings, handler) =>
{
discoverTestsCalled = true;
Assert.IsTrue(paths != null && paths.Count() == 2, "Sources must be set correctly.");
Assert.IsNotNull(handler, "TestDiscoveryEventsHandler must be set correctly.");
};
this.mockSender.SetupDiscoverTests(assertSources);
this.consoleWrapper.DiscoverTests(new List<string>() { "Hello", "World" }, null, new Mock<ITestDiscoveryEventsHandler>().Object);
Assert.IsTrue(discoverTestsCalled, "Discover Tests must be called on translation layer");
}
[TestMethod]
public void DiscoverTestsShouldThrowExceptionOnBadConnection()
{
this.mockSender.SetConnectionResult(false);
bool discoverTestsCalled = false;
Action<IEnumerable<string>, string, ITestDiscoveryEventsHandler> assertSources =
(paths, settings, handler) =>
{
discoverTestsCalled = true;
};
Assert.ThrowsException<TransationLayerException>(() => this.consoleWrapper.DiscoverTests(new List<string>() { "Hello", "World" }, null, new Mock<ITestDiscoveryEventsHandler>().Object));
Assert.IsFalse(discoverTestsCalled, "Discover Tests must NOT be called on translation layer when connection is bad.");
}
[TestMethod]
public void RunTestsWithSourcesShouldSucceed()
{
this.mockSender.SetConnectionResult(true);
bool runTestsCalled = false;
Action<IEnumerable<string>, string, ITestRunEventsHandler> assertSources =
(sources, settings, handler) =>
{
runTestsCalled = true;
Assert.IsTrue(sources != null && sources.Count() == 2, "Sources must be set correctly.");
Assert.IsTrue(!string.IsNullOrEmpty(settings), "RunSettings must be set correctly.");
Assert.IsNotNull(handler, "TestRunEventsHandler must be set correctly.");
};
this.mockSender.SetupRunTestsWithSources(assertSources);
this.consoleWrapper.RunTests(new List<string>() { "Hello", "World" }, "RunSettings", new Mock<ITestRunEventsHandler>().Object);
Assert.IsTrue(runTestsCalled, "Run Tests must be called on translation layer");
}
[TestMethod]
public void RunTestsWithSourcesAndCustomHostShouldSucceed()
{
this.mockSender.SetConnectionResult(true);
bool runTestsCalled = false;
Action<IEnumerable<string>, string, ITestRunEventsHandler, ITestHostLauncher> assertSources =
(sources, settings, handler, customLauncher) =>
{
runTestsCalled = true;
Assert.IsTrue(sources != null && sources.Count() == 2, "Sources must be set correctly.");
Assert.IsTrue(!string.IsNullOrEmpty(settings), "RunSettings must be set correctly.");
Assert.IsNotNull(handler, "TestRunEventsHandler must be set correctly.");
Assert.IsNotNull(customLauncher, "Custom Launcher must be set correctly.");
};
this.mockSender.SetupRunTestsWithSourcesAndCustomHost(assertSources);
this.consoleWrapper.RunTestsWithCustomTestHost(new List<string>() { "Hello", "World" }, "RunSettings",
new Mock<ITestRunEventsHandler>().Object, new Mock<ITestHostLauncher>().Object);
Assert.IsTrue(runTestsCalled, "Run Tests must be called on translation layer");
}
[TestMethod]
public void RunTestsWithSelectedTestsShouldSucceed()
{
this.mockSender.SetConnectionResult(true);
bool runTestsCalled = false;
Action<IEnumerable<TestCase>, string, ITestRunEventsHandler> assertTests =
(tests, settings, handler) =>
{
runTestsCalled = true;
Assert.IsTrue(tests != null && tests.Count() == 2, "TestCases must be set correctly.");
Assert.IsTrue(!string.IsNullOrEmpty(settings), "RunSettings must be set correctly.");
Assert.IsNotNull(handler, "TestRunEventsHandler must be set correctly.");
};
this.mockSender.SetupRunTestsWithSelectedTests(assertTests);
var testCases = new List<TestCase>();
testCases.Add(new TestCase("a.b.c", new Uri("d://uri"), "a.dll"));
testCases.Add(new TestCase("d.e.f", new Uri("g://uri"), "d.dll"));
this.consoleWrapper.RunTests(testCases, "RunSettings", new Mock<ITestRunEventsHandler>().Object);
Assert.IsTrue(runTestsCalled, "Run Tests must be called on translation layer");
}
[TestMethod]
public void RunTestsWithSelectedTestsAndCustomLauncherShouldSucceed()
{
this.mockSender.SetConnectionResult(true);
bool runTestsCalled = false;
Action<IEnumerable<TestCase>, string, ITestRunEventsHandler, ITestHostLauncher> assertTests =
(tests, settings, handler, customLauncher) =>
{
runTestsCalled = true;
Assert.IsTrue(tests != null && tests.Count() == 2, "TestCases must be set correctly.");
Assert.IsTrue(!string.IsNullOrEmpty(settings), "RunSettings must be set correctly.");
Assert.IsNotNull(handler, "TestRunEventsHandler must be set correctly.");
Assert.IsNotNull(customLauncher, "Custom Launcher must be set correctly.");
};
this.mockSender.SetupRunTestsWithSelectedTestsAndCustomHost(assertTests);
var testCases = new List<TestCase>();
testCases.Add(new TestCase("a.b.c", new Uri("d://uri"), "a.dll"));
testCases.Add(new TestCase("d.e.f", new Uri("g://uri"), "d.dll"));
this.consoleWrapper.RunTestsWithCustomTestHost(testCases, "RunSettings",
new Mock<ITestRunEventsHandler>().Object, new Mock<ITestHostLauncher>().Object);
Assert.IsTrue(runTestsCalled, "Run Tests must be called on translation layer");
}
[TestMethod]
public void EndSessionShouldSucceed()
{
this.consoleWrapper.EndSession();
Assert.IsTrue(this.mockSender.IsCloseCalled, "Close method must be called on sender");
Assert.IsTrue(this.mockSender.IsSessionEnded, "SessionEnd method must be called on sender");
}
private class MockTranslationLayerSender : ITranslationLayerRequestSender
{
public bool IsCloseCalled = false;
public bool IsSessionEnded = false;
public void Close()
{
IsCloseCalled = true;
}
public void DiscoverTests(IEnumerable<string> sources, string runSettings, ITestDiscoveryEventsHandler discoveryEventsHandler)
{
this.discoverFunc(sources, runSettings, discoveryEventsHandler);
}
public void Dispose()
{
}
public void EndSession()
{
IsSessionEnded = true;
}
public int InitializeCommunication()
{
return port;
}
public void InitializeExtensions(IEnumerable<string> pathToAdditionalExtensions)
{
this.initExtFunc(pathToAdditionalExtensions, false);
}
public void StartTestRun(IEnumerable<TestCase> testCases, string runSettings, ITestRunEventsHandler runEventsHandler)
{
this.runTestsWithSelectedTestsFunc.Invoke(testCases, runSettings, runEventsHandler);
}
public void StartTestRun(IEnumerable<string> sources, string runSettings, ITestRunEventsHandler runEventsHandler)
{
this.runTestsWithSourcesFunc.Invoke(sources, runSettings, runEventsHandler);
}
public void StartTestRunWithCustomHost(IEnumerable<string> sources, string runSettings,
ITestRunEventsHandler runEventsHandler, ITestHostLauncher customTestHostLauncher)
{
this.runTestsWithSourcesAndCustomLauncherFunc(sources, runSettings, runEventsHandler, customTestHostLauncher);
}
public void StartTestRunWithCustomHost(IEnumerable<TestCase> testCases, string runSettings,
ITestRunEventsHandler runEventsHandler, ITestHostLauncher customTestHostLauncher)
{
this.runTestsWithSelectedTestsAndCustomHostFunc(testCases, runSettings, runEventsHandler, customTestHostLauncher);
}
public bool WaitForRequestHandlerConnection(int connectionTimeout)
{
return this.connectionResult;
}
private int port;
internal void SetupPort(int inputPort)
{
this.port = inputPort;
}
private bool connectionResult;
internal void SetConnectionResult(bool connectionResult)
{
this.connectionResult = connectionResult;
}
private Action<IEnumerable<string>, bool> initExtFunc;
internal void SetInitExtFunc(Action<IEnumerable<string>, bool> initExtFunc)
{
this.initExtFunc = initExtFunc;
}
private Action<IEnumerable<string>, string, ITestDiscoveryEventsHandler> discoverFunc;
internal void SetupDiscoverTests(Action<IEnumerable<string>, string, ITestDiscoveryEventsHandler> discoverFunc)
{
this.discoverFunc = discoverFunc;
}
private Action<IEnumerable<string>, string, ITestRunEventsHandler> runTestsWithSourcesFunc;
internal void SetupRunTestsWithSources(Action<IEnumerable<string>, string, ITestRunEventsHandler> runTestsFunc)
{
this.runTestsWithSourcesFunc = runTestsFunc;
}
private Action<IEnumerable<string>, string, ITestRunEventsHandler, ITestHostLauncher> runTestsWithSourcesAndCustomLauncherFunc;
internal void SetupRunTestsWithSourcesAndCustomHost(Action<IEnumerable<string>, string, ITestRunEventsHandler, ITestHostLauncher> runTestsFunc)
{
this.runTestsWithSourcesAndCustomLauncherFunc = runTestsFunc;
}
private Action<IEnumerable<TestCase>, string, ITestRunEventsHandler> runTestsWithSelectedTestsFunc;
internal void SetupRunTestsWithSelectedTests(Action<IEnumerable<TestCase>, string, ITestRunEventsHandler> runTestsFunc)
{
this.runTestsWithSelectedTestsFunc = runTestsFunc;
}
private Action<IEnumerable<TestCase>, string, ITestRunEventsHandler, ITestHostLauncher> runTestsWithSelectedTestsAndCustomHostFunc;
internal void SetupRunTestsWithSelectedTestsAndCustomHost(Action<IEnumerable<TestCase>, string, ITestRunEventsHandler, ITestHostLauncher> runTestsFunc)
{
this.runTestsWithSelectedTestsAndCustomHostFunc = runTestsFunc;
}
public void CancelTestRun()
{
throw new NotImplementedException();
}
public void AbortTestRun()
{
throw new NotImplementedException();
}
}
private class MockProcessManager : IProcessManager
{
public Action<string[]> VerifyArgs;
public bool IsProcessInitialized()
{
return true;
}
public void ShutdownProcess()
{
}
public void StartProcess(string[] args)
{
if(VerifyArgs != null)
{
VerifyArgs(args);
}
}
}
}
}
| 1 | 11,179 | nit: remove extra blank lines. | microsoft-vstest | .cs |
@@ -39,6 +39,7 @@ const (
appEnvOptionNone = "None (run in default VPC)"
defaultDockerfilePath = "Dockerfile"
imageTagLatest = "latest"
+ taskGroupNameDefault = "copilot-task"
)
const ( | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"path/filepath"
awscloudformation "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudwatchlogs"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/ecr"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/aws/resourcegroups"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/docker"
"github.com/aws/copilot-cli/internal/pkg/repository"
"github.com/aws/copilot-cli/internal/pkg/task"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/dustin/go-humanize/english"
"github.com/spf13/afero"
"github.com/spf13/cobra"
)
const (
appEnvOptionNone = "None (run in default VPC)"
defaultDockerfilePath = "Dockerfile"
imageTagLatest = "latest"
)
const (
fmtRepoName = "copilot-%s"
fmtImageURI = "%s:%s"
fmtTaskLogGroupName = "/copilot/%s"
)
var (
errNumNotPositive = errors.New("number of tasks must be positive")
errCpuNotPositive = errors.New("CPU units must be positive")
errMemNotPositive = errors.New("memory must be positive")
)
var (
taskRunAppPrompt = fmt.Sprintf("In which %s would you like to run this %s?", color.Emphasize("application"), color.Emphasize("task"))
taskRunEnvPrompt = fmt.Sprintf("In which %s would you like to run this %s?", color.Emphasize("environment"), color.Emphasize("task"))
taskRunGroupNamePrompt = fmt.Sprintf("What would you like to %s your task group?", color.Emphasize("name"))
taskRunAppPromptHelp = fmt.Sprintf(`Task will be deployed to the selected application.
Select %s to run the task in your default VPC instead of any existing application.`, color.Emphasize(appEnvOptionNone))
taskRunEnvPromptHelp = fmt.Sprintf(`Task will be deployed to the selected environment.
Select %s to run the task in your default VPC instead of any existing environment.`, color.Emphasize(appEnvOptionNone))
taskRunGroupNamePromptHelp = `The group name of the task. Tasks with the same group name share the same
set of resources, including CloudFormation stack, CloudWatch log group,
task definition and ECR repository.`
)
type runTaskVars struct {
*GlobalOpts
count int
cpu int
memory int
groupName string
image string
dockerfilePath string
imageTag string
taskRole string
executionRole string
subnets []string
securityGroups []string
env string
useDefaultSubnets bool
envVars map[string]string
command string
follow bool
}
type runTaskOpts struct {
runTaskVars
isDockerfileSet bool
// Interfaces to interact with dependencies.
fs afero.Fs
store store
sel appEnvSelector
spinner progress
// Fields below are configured at runtime.
deployer taskDeployer
repository repositoryService
runner taskRunner
eventsWriter eventsWriter
defaultClusterGetter defaultClusterGetter
sess *session.Session
targetEnvironment *config.Environment
// Configurer methods.
configureRuntimeOpts func() error
configureRepository func() error
// NOTE: configureEventsWriter is only called when tailing logs (i.e. --follow is specified)
configureEventsWriter func(tasks []*task.Task)
}
func newTaskRunOpts(vars runTaskVars) (*runTaskOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, fmt.Errorf("new config store: %w", err)
}
opts := runTaskOpts{
runTaskVars: vars,
fs: &afero.Afero{Fs: afero.NewOsFs()},
store: store,
sel: selector.NewSelect(vars.prompt, store),
spinner: termprogress.NewSpinner(),
}
opts.configureRuntimeOpts = func() error {
opts.runner = opts.configureRunner()
opts.deployer = cloudformation.New(opts.sess)
opts.defaultClusterGetter = ecs.New(opts.sess)
return nil
}
opts.configureRepository = func() error {
repoName := fmt.Sprintf(fmtRepoName, opts.groupName)
registry := ecr.New(opts.sess)
repository, err := repository.New(repoName, registry)
if err != nil {
return fmt.Errorf("initialize repository %s: %w", repoName, err)
}
opts.repository = repository
return nil
}
opts.configureEventsWriter = func(tasks []*task.Task) {
logGroupName := fmt.Sprintf(fmtTaskLogGroupName, opts.groupName)
opts.eventsWriter = &task.EventsWriter{
GroupName: logGroupName,
Tasks: tasks,
Describer: ecs.New(opts.sess),
EventsLogger: cloudwatchlogs.New(opts.sess),
Writer: log.OutputWriter,
}
}
return &opts, nil
}
func (o *runTaskOpts) configureRunner() taskRunner {
vpcGetter := ec2.New(o.sess)
ecsService := ecs.New(o.sess)
if o.env != "" {
return &task.EnvRunner{
Count: o.count,
GroupName: o.groupName,
App: o.AppName(),
Env: o.env,
VPCGetter: vpcGetter,
ClusterGetter: resourcegroups.New(o.sess),
Starter: ecsService,
}
}
return &task.NetworkConfigRunner{
Count: o.count,
GroupName: o.groupName,
Subnets: o.subnets,
SecurityGroups: o.securityGroups,
VPCGetter: vpcGetter,
ClusterGetter: ecsService,
Starter: ecsService,
}
}
func (o *runTaskOpts) configureSessAndEnv() error {
var sess *session.Session
var env *config.Environment
provider := sessions.NewProvider()
if o.env != "" {
var err error
env, err = o.targetEnv()
if err != nil {
return err
}
sess, err = provider.FromRole(env.ManagerRoleARN, env.Region)
if err != nil {
return fmt.Errorf("get session from role %s and region %s: %w", env.ManagerRoleARN, env.Region, err)
}
} else {
var err error
sess, err = provider.Default()
if err != nil {
return fmt.Errorf("get default session: %w", err)
}
}
o.targetEnvironment = env
o.sess = sess
return nil
}
// Validate returns an error if the flag values passed by the user are invalid.
func (o *runTaskOpts) Validate() error {
if o.count <= 0 {
return errNumNotPositive
}
if o.cpu <= 0 {
return errCpuNotPositive
}
if o.memory <= 0 {
return errMemNotPositive
}
if o.groupName != "" {
if err := basicNameValidation(o.groupName); err != nil {
return err
}
}
if o.image != "" && o.isDockerfileSet {
return errors.New("cannot specify both `--image` and `--dockerfile`")
}
if o.isDockerfileSet {
if _, err := o.fs.Stat(o.dockerfilePath); err != nil {
return err
}
}
if err := o.validateFlagsWithDefaultCluster(); err != nil {
return err
}
if err := o.validateFlagsWithSubnets(); err != nil {
return err
}
if err := o.validateFlagsWithSecurityGroups(); err != nil {
return err
}
if o.appName != "" {
if err := o.validateAppName(); err != nil {
return err
}
}
if o.env != "" {
if err := o.validateEnvName(); err != nil {
return err
}
}
return nil
}
func (o *runTaskOpts) validateFlagsWithDefaultCluster() error {
if !o.useDefaultSubnets {
return nil
}
if o.subnets != nil {
return fmt.Errorf("cannot specify both `--subnets` and `--default`")
}
if o.appName != "" {
return fmt.Errorf("cannot specify both `--app` and `--default`")
}
if o.env != "" {
return fmt.Errorf("cannot specify both `--env` and `--default`")
}
return nil
}
func (o *runTaskOpts) validateFlagsWithSubnets() error {
if o.subnets == nil {
return nil
}
if o.useDefaultSubnets {
fmt.Errorf("cannot specify both `--subnets` and `--default`")
}
if o.appName != "" {
return fmt.Errorf("cannot specify both `--subnets` and `--app`")
}
if o.env != "" {
return fmt.Errorf("cannot specify both `--subnets` and `--env`")
}
return nil
}
func (o *runTaskOpts) validateFlagsWithSecurityGroups() error {
if o.securityGroups == nil {
return nil
}
if o.appName != "" {
return fmt.Errorf("cannot specify both `--security-groups` and `--app`")
}
if o.env != "" {
return fmt.Errorf("cannot specify both `--security-groups` and `--env`")
}
return nil
}
// Ask prompts the user for any required or important fields that are not provided.
func (o *runTaskOpts) Ask() error {
if err := o.askTaskGroupName(); err != nil {
return err
}
if o.shouldPromptForAppEnv() {
if err := o.askAppName(); err != nil {
return err
}
if err := o.askEnvName(); err != nil {
return err
}
}
return nil
}
func (o *runTaskOpts) shouldPromptForAppEnv() bool {
// NOTE: if security groups are specified but subnets are not, then we use the default subnets with the
// specified security groups.
useDefault := o.useDefaultSubnets || (o.securityGroups != nil && o.subnets == nil)
useConfig := o.subnets != nil
// if user hasn't specified that they want to use the default subnets, and that they didn't provide specific subnets
// that they want to use, then we prompt.
return !useDefault && !useConfig
}
// Execute deploys and runs the task.
func (o *runTaskOpts) Execute() error {
// NOTE: all runtime options must be configured only after session is configured
if err := o.configureSessAndEnv(); err != nil {
return err
}
if err := o.configureRuntimeOpts(); err != nil {
return err
}
if o.env == "" {
hasDefaultCluster, err := o.defaultClusterGetter.HasDefaultCluster()
if err != nil {
return fmt.Errorf(`find "default" cluster to deploy the task to: %v`, err)
}
if !hasDefaultCluster {
return errors.New(`cannot find a "default" cluster to deploy the task to`)
}
}
if err := o.deployTaskResources(); err != nil {
return err
}
// NOTE: repository has to be configured only after task resources are deployed
if err := o.configureRepository(); err != nil {
return err
}
// NOTE: if image is not provided, then we build the image and push to ECR repo
if o.image == "" {
if err := o.buildAndPushImage(); err != nil {
return err
}
tag := imageTagLatest
if o.imageTag != "" {
tag = o.imageTag
}
o.image = fmt.Sprintf(fmtImageURI, o.repository.URI(), tag)
if err := o.updateTaskResources(); err != nil {
return err
}
}
tasks, err := o.runTask()
if err != nil {
return err
}
if o.follow {
o.configureEventsWriter(tasks)
if err := o.displayLogStream(); err != nil {
return err
}
}
return nil
}
func (o *runTaskOpts) displayLogStream() error {
if err := o.eventsWriter.WriteEventsUntilStopped(); err != nil {
return fmt.Errorf("write events: %w", err)
}
log.Infof("%s %s stopped.\n",
english.PluralWord(o.count, "Task", ""),
english.PluralWord(o.count, "has", "have"))
return nil
}
func (o *runTaskOpts) runTask() ([]*task.Task, error) {
o.spinner.Start(fmt.Sprintf("Waiting for %s to be running for %s.", english.Plural(o.count, "task", ""), o.groupName))
tasks, err := o.runner.Run()
if err != nil {
o.spinner.Stop(log.Serrorf("Failed to run %s.\n", o.groupName))
return nil, fmt.Errorf("run task %s: %w", o.groupName, err)
}
o.spinner.Stop(log.Ssuccessf("%s %s %s running.\n", english.PluralWord(o.count, "Task", ""), o.groupName, english.PluralWord(o.count, "is", "are")))
return tasks, nil
}
func (o *runTaskOpts) buildAndPushImage() error {
var additionalTags []string
if o.imageTag != "" {
additionalTags = append(additionalTags, o.imageTag)
}
if err := o.repository.BuildAndPush(docker.New(), &docker.BuildArguments{
Dockerfile: o.dockerfilePath,
Context: filepath.Dir(o.dockerfilePath),
ImageTag: imageTagLatest,
AdditionalTags: additionalTags,
}); err != nil {
return fmt.Errorf("build and push image: %w", err)
}
return nil
}
func (o *runTaskOpts) deployTaskResources() error {
o.spinner.Start(fmt.Sprintf("Provisioning resources and permissions for task %s.", color.HighlightUserInput(o.groupName)))
if err := o.deploy(); err != nil {
o.spinner.Stop(log.Serrorln("Failed to provision task resources."))
return fmt.Errorf("provision resources for task %s: %w", o.groupName, err)
}
o.spinner.Stop(log.Ssuccessln("Successfully provisioned task resources."))
return nil
}
func (o *runTaskOpts) updateTaskResources() error {
o.spinner.Start(fmt.Sprintf("Updating image to task %s.", color.HighlightUserInput(o.groupName)))
if err := o.deploy(); err != nil {
o.spinner.Stop(log.Serrorln("Failed to update task resources."))
return fmt.Errorf("update resources for task %s: %w", o.groupName, err)
}
o.spinner.Stop(log.Ssuccessln("Successfully updated image to task."))
return nil
}
func (o *runTaskOpts) deploy() error {
var deployOpts []awscloudformation.StackOption
if o.env != "" {
deployOpts = []awscloudformation.StackOption{awscloudformation.WithRoleARN(o.targetEnvironment.ExecutionRoleARN)}
}
input := &deploy.CreateTaskResourcesInput{
Name: o.groupName,
CPU: o.cpu,
Memory: o.memory,
Image: o.image,
TaskRole: o.taskRole,
ExecutionRole: o.executionRole,
Command: o.command,
EnvVars: o.envVars,
App: o.AppName(),
Env: o.env,
}
return o.deployer.DeployTask(input, deployOpts...)
}
func (o *runTaskOpts) validateAppName() error {
if _, err := o.store.GetApplication(o.appName); err != nil {
return fmt.Errorf("get application: %w", err)
}
return nil
}
func (o *runTaskOpts) validateEnvName() error {
if o.AppName() != "" {
if _, err := o.targetEnv(); err != nil {
return err
}
} else {
return errNoAppInWorkspace
}
return nil
}
func (o *runTaskOpts) askTaskGroupName() error {
if o.groupName != "" {
return nil
}
// TODO during Execute: list existing tasks like in ListApplications, ask whether to use existing tasks
groupName, err := o.prompt.Get(
taskRunGroupNamePrompt,
taskRunGroupNamePromptHelp,
basicNameValidation,
prompt.WithFinalMessage("Task group name:"))
if err != nil {
return fmt.Errorf("prompt get task group name: %w", err)
}
o.groupName = groupName
return nil
}
func (o *runTaskOpts) askAppName() error {
if o.AppName() != "" {
return nil
}
// If the application is empty then the user wants to run in the default VPC. Do not prompt for an environment name.
app, err := o.sel.Application(taskRunAppPrompt, taskRunAppPromptHelp, appEnvOptionNone)
if err != nil {
return fmt.Errorf("ask for application: %w", err)
}
if app == appEnvOptionNone {
return nil
}
o.appName = app
return nil
}
func (o *runTaskOpts) askEnvName() error {
if o.env != "" {
return nil
}
// If the application is empty then the user wants to run in the default VPC. Do not prompt for an environment name.
if o.AppName() == "" || o.subnets != nil {
return nil
}
env, err := o.sel.Environment(taskRunEnvPrompt, taskRunEnvPromptHelp, o.AppName(), appEnvOptionNone)
if err != nil {
return fmt.Errorf("ask for environment: %w", err)
}
if env == appEnvOptionNone {
return nil
}
o.env = env
return nil
}
func (o *runTaskOpts) targetEnv() (*config.Environment, error) {
env, err := o.store.GetEnvironment(o.AppName(), o.env)
if err != nil {
return nil, fmt.Errorf("get environment %s config: %w", o.env, err)
}
return env, nil
}
// BuildTaskRunCmd build the command for running a new task
func BuildTaskRunCmd() *cobra.Command {
vars := runTaskVars{
GlobalOpts: NewGlobalOpts(),
}
cmd := &cobra.Command{
Use: "run",
Short: "Run a one-off task on Amazon ECS.",
Example: `
Run a task using your local Dockerfile.
You will be prompted to specify a task group name and an environment for the tasks to run in.
/code $ copilot task run
Run a task named "db-migrate" in the "test" environment under the current workspace.
/code $ copilot task run -n db-migrate --env test
Run 4 tasks with 2GB memory, an existing image, and a custom task role.
/code $ copilot task run --num 4 --memory 2048 --image=rds-migrate --task-role migrate-role
Run a task with environment variables.
/code $ copilot task run --env-vars name=myName,user=myUser
Run a task using the current workspace with specific subnets and security groups.
/code $ copilot task run --subnets subnet-123,subnet-456 --security-groups sg-123,sg-456
Run a task with a command.
/code $ copilot task run --command "python migrate-script.py"`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newTaskRunOpts(vars)
if err != nil {
return err
}
if cmd.Flags().Changed(dockerFileFlag) {
opts.isDockerfileSet = true
}
if err := opts.Validate(); err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
if err := opts.Execute(); err != nil {
return err
}
return nil
}),
}
cmd.Flags().IntVar(&vars.count, countFlag, 1, countFlagDescription)
cmd.Flags().IntVar(&vars.cpu, cpuFlag, 256, cpuFlagDescription)
cmd.Flags().IntVar(&vars.memory, memoryFlag, 512, memoryFlagDescription)
cmd.Flags().StringVarP(&vars.groupName, taskGroupNameFlag, nameFlagShort, "", taskGroupFlagDescription)
cmd.Flags().StringVar(&vars.image, imageFlag, "", imageFlagDescription)
cmd.Flags().StringVar(&vars.dockerfilePath, dockerFileFlag, defaultDockerfilePath, dockerFileFlagDescription)
cmd.Flags().StringVar(&vars.imageTag, imageTagFlag, "", taskImageTagFlagDescription)
cmd.Flags().StringVar(&vars.taskRole, taskRoleFlag, "", taskRoleFlagDescription)
cmd.Flags().StringVar(&vars.executionRole, executionRoleFlag, "", executionRoleFlagDescription)
cmd.Flags().StringVar(&vars.appName, appFlag, "", taskAppFlagDescription)
cmd.Flags().StringVar(&vars.env, envFlag, "", taskEnvFlagDescription)
cmd.Flags().StringSliceVar(&vars.subnets, subnetsFlag, nil, subnetsFlagDescription)
cmd.Flags().StringSliceVar(&vars.securityGroups, securityGroupsFlag, nil, securityGroupsFlagDescription)
cmd.Flags().BoolVar(&vars.useDefaultSubnets, taskDefaultFlag, false, taskDefaultFlagDescription)
cmd.Flags().StringToStringVar(&vars.envVars, envVarsFlag, nil, envVarsFlagDescription)
cmd.Flags().StringVar(&vars.command, commandFlag, "", commandFlagDescription)
cmd.Flags().BoolVar(&vars.follow, followFlag, false, followFlagDescription)
return cmd
}
| 1 | 14,549 | nit: What do you think of `copilot-task-group`? | aws-copilot-cli | go |
@@ -156,6 +156,7 @@ export function setProperty(dom, name, value, oldValue, isSvg) {
*/
function eventProxy(e) {
this._listeners[e.type + false](options.event ? options.event(e) : e);
+ if (e.type === 'input' || e.type === 'change') this.value = this._prevValue || '';
}
function eventProxyCapture(e) { | 1 | import { IS_NON_DIMENSIONAL } from '../constants';
import options from '../options';
/**
* Diff the old and new properties of a VNode and apply changes to the DOM node
* @param {import('../internal').PreactElement} dom The DOM node to apply
* changes to
* @param {object} newProps The new props
* @param {object} oldProps The old props
* @param {boolean} isSvg Whether or not this node is an SVG node
* @param {boolean} hydrate Whether or not we are in hydration mode
*/
export function diffProps(dom, newProps, oldProps, isSvg, hydrate) {
let i;
for (i in oldProps) {
if (i !== 'children' && i !== 'key' && !(i in newProps)) {
setProperty(dom, i, null, oldProps[i], isSvg);
}
}
for (i in newProps) {
if (
(!hydrate || typeof newProps[i] == 'function') &&
i !== 'children' &&
i !== 'key' &&
i !== 'value' &&
i !== 'checked' &&
oldProps[i] !== newProps[i]
) {
setProperty(dom, i, newProps[i], oldProps[i], isSvg);
}
}
}
function setStyle(style, key, value) {
if (key[0] === '-') {
style.setProperty(key, value);
} else if (value == null) {
style[key] = '';
} else if (typeof value != 'number' || IS_NON_DIMENSIONAL.test(key)) {
style[key] = value;
} else {
style[key] = value + 'px';
}
}
/**
* Set a property value on a DOM node
* @param {import('../internal').PreactElement} dom The DOM node to modify
* @param {string} name The name of the property to set
* @param {*} value The value to set the property to
* @param {*} oldValue The old value the property had
* @param {boolean} isSvg Whether or not this DOM node is an SVG node or not
*/
export function setProperty(dom, name, value, oldValue, isSvg) {
let useCapture, nameLower, proxy;
if (isSvg && name == 'className') name = 'class';
// if (isSvg) {
// if (name === 'className') name = 'class';
// } else if (name === 'class') name += 'Name';
if (name === 'style') {
if (typeof value == 'string') {
dom.style.cssText = value;
} else {
if (typeof oldValue == 'string') {
dom.style.cssText = oldValue = '';
}
if (oldValue) {
for (name in oldValue) {
if (!(value && name in value)) {
setStyle(dom.style, name, '');
}
}
}
if (value) {
for (name in value) {
if (!oldValue || value[name] !== oldValue[name]) {
setStyle(dom.style, name, value[name]);
}
}
}
}
}
// Benchmark for comparison: https://esbench.com/bench/574c954bdb965b9a00965ac6
else if (name[0] === 'o' && name[1] === 'n') {
useCapture = name !== (name = name.replace(/Capture$/, ''));
nameLower = name.toLowerCase();
if (nameLower in dom) name = nameLower;
name = name.slice(2);
if (!dom._listeners) dom._listeners = {};
dom._listeners[name + useCapture] = value;
proxy = useCapture ? eventProxyCapture : eventProxy;
if (value) {
if (!oldValue) dom.addEventListener(name, proxy, useCapture);
} else {
dom.removeEventListener(name, proxy, useCapture);
}
} else if (
name !== 'list' &&
name !== 'tagName' &&
// HTMLButtonElement.form and HTMLInputElement.form are read-only but can be set using
// setAttribute
name !== 'form' &&
name !== 'type' &&
name !== 'size' &&
name !== 'download' &&
name !== 'href' &&
!isSvg &&
name in dom
) {
dom[name] = value == null ? '' : value;
} else if (typeof value != 'function' && name !== 'dangerouslySetInnerHTML') {
if (name !== (name = name.replace(/xlink:?/, ''))) {
if (value == null || value === false) {
dom.removeAttributeNS(
'http://www.w3.org/1999/xlink',
name.toLowerCase()
);
} else {
dom.setAttributeNS(
'http://www.w3.org/1999/xlink',
name.toLowerCase(),
value
);
}
} else if (
value == null ||
(value === false &&
// ARIA-attributes have a different notion of boolean values.
// The value `false` is different from the attribute not
// existing on the DOM, so we can't remove it. For non-boolean
// ARIA-attributes we could treat false as a removal, but the
// amount of exceptions would cost us too many bytes. On top of
// that other VDOM frameworks also always stringify `false`.
!/^ar/.test(name))
) {
dom.removeAttribute(name);
} else {
dom.setAttribute(name, value);
}
}
}
/**
* Proxy an event to hooked event handlers
* @param {Event} e The event object from the browser
* @private
*/
function eventProxy(e) {
this._listeners[e.type + false](options.event ? options.event(e) : e);
}
function eventProxyCapture(e) {
this._listeners[e.type + true](options.event ? options.event(e) : e);
}
| 1 | 16,269 | We should check whether or not the input node has a value since we can just have an onChange on an uncontrolled component as well. | preactjs-preact | js |
@@ -13,7 +13,7 @@ how the API has changed as well as the range of API versions supported by this b
"""
CURRENT = (buildVersion.version_year, buildVersion.version_major, buildVersion.version_minor)
-BACK_COMPAT_TO = (2019, 3, 0)
+BACK_COMPAT_TO = (2021, 1, 0)
"""
As BACK_COMPAT_TO is incremented, the changed / removed parts / or reasoning should be added below.
EG: (x, y, z): Large changes to speech.py | 1 | #A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2018 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import buildVersion
import re
from logHandler import log
"""
This module contains add-on API version information for this build of NVDA. This file provides information on
how the API has changed as well as the range of API versions supported by this build of NVDA
"""
CURRENT = (buildVersion.version_year, buildVersion.version_major, buildVersion.version_minor)
BACK_COMPAT_TO = (2019, 3, 0)
"""
As BACK_COMPAT_TO is incremented, the changed / removed parts / or reasoning should be added below.
EG: (x, y, z): Large changes to speech.py
---
(0, 0, 0): API version zero, used to signify addons released prior to API version checks.
(2019, 3, 0): speech refactor, Python 3
"""
#: Compiled regular expression to match an addon API version string.
#: Supports year.major.minor versions (e.g. 2018.1.1).
# Although year and major are mandatory, minor is optional.
#: Resulting match objects expose three groups reflecting release year, release major, and release minor version,
# respectively.
# As minor is optional, the final group in the resulting match object may be None if minor is not provided in the original string. In this case it should be treated as being 0.
#: @type: RegexObject
ADDON_API_VERSION_REGEX = re.compile(r"^(0|\d{4})\.(\d)(?:\.(\d))?$")
def getAPIVersionTupleFromString(version):
"""Converts a string containing an NVDA version to a tuple of the form (versionYear, versionMajor, versionMinor)"""
match = ADDON_API_VERSION_REGEX.match(version)
if not match:
raise ValueError(version)
return tuple(int(i) if i is not None else 0 for i in match.groups())
def formatForGUI(versionTuple):
"""Converts a version tuple to a string for displaying in the GUI
Examples:
- (2018, 1, 1) becomes "2018.1.1"
- (2018, 1, 0) becomes "2018.1"
- (0, 0, 0) becomes "0.0"
"""
try:
year, major, minor = versionTuple
return buildVersion.formatVersionForGUI(year, major, minor)
except (
ValueError, # Too few/many values to unpack
TypeError # versionTuple is None or some other incorrect type
):
# This path should never be hit. But the appearance of "unknown" in the GUI is a better outcome
# than an exception and unusable dialog.
# Translators: shown when an addon API version string is unknown
default = _("unknown")
log.error("Unable to format versionTuple: {}".format(repr(versionTuple)), exc_info=True)
return default
| 1 | 31,429 | I think there should be some reasoning below this line about the version updating. | nvaccess-nvda | py |
@@ -371,6 +371,17 @@ static int on_body(h2o_http1client_t *client, const char *errstr)
return 0;
}
+static char compress_hint_to_enum(const char *val, size_t len)
+{
+ if (!strncasecmp("ON", val, len)) {
+ return H2O_COMPRESS_HINT_ENABLE;
+ }
+ if (!strncasecmp("OFF", val, len)) {
+ return H2O_COMPRESS_HINT_DISABLE;
+ }
+ return H2O_COMPRESS_HINT_AUTO;
+}
+
static h2o_http1client_body_cb on_head(h2o_http1client_t *client, const char *errstr, int minor_version, int status,
h2o_iovec_t msg, h2o_http1client_header_t *headers, size_t num_headers)
{ | 1 | /*
* Copyright (c) 2014,2015 DeNA Co., Ltd., Kazuho Oku, Masahiro Nagano
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include "picohttpparser.h"
#include "h2o.h"
#include "h2o/http1.h"
#include "h2o/http1client.h"
#include "h2o/tunnel.h"
struct rp_generator_t {
h2o_generator_t super;
h2o_req_t *src_req;
h2o_http1client_t *client;
struct {
h2o_iovec_t bufs[2]; /* first buf is the request line and headers, the second is the POST content */
int is_head;
} up_req;
h2o_buffer_t *last_content_before_send;
h2o_doublebuffer_t sending;
int is_websocket_handshake;
int had_body_error; /* set if an error happened while fetching the body so that we can propagate the error */
};
struct rp_ws_upgrade_info_t {
h2o_context_t *ctx;
h2o_timeout_t *timeout;
h2o_socket_t *upstream_sock;
};
static h2o_http1client_ctx_t *get_client_ctx(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
if (overrides != NULL && overrides->client_ctx != NULL)
return overrides->client_ctx;
return &req->conn->ctx->proxy.client_ctx;
}
static h2o_iovec_t rewrite_location(h2o_mem_pool_t *pool, const char *location, size_t location_len, h2o_url_t *match,
const h2o_url_scheme_t *req_scheme, h2o_iovec_t req_authority, h2o_iovec_t req_basepath)
{
h2o_url_t loc_parsed;
if (h2o_url_parse(location, location_len, &loc_parsed) != 0)
goto NoRewrite;
if (loc_parsed.scheme != &H2O_URL_SCHEME_HTTP)
goto NoRewrite;
if (!h2o_lcstris(loc_parsed.host.base, loc_parsed.host.len, match->host.base, match->host.len))
goto NoRewrite;
if (h2o_url_get_port(&loc_parsed) != h2o_url_get_port(match))
goto NoRewrite;
if (loc_parsed.path.len < match->path.len)
goto NoRewrite;
if (memcmp(loc_parsed.path.base, match->path.base, match->path.len) != 0)
goto NoRewrite;
return h2o_concat(pool, req_scheme->name, h2o_iovec_init(H2O_STRLIT("://")), req_authority, req_basepath,
h2o_iovec_init(loc_parsed.path.base + match->path.len, loc_parsed.path.len - match->path.len));
NoRewrite:
return (h2o_iovec_t){NULL};
}
static h2o_iovec_t build_request_merge_headers(h2o_mem_pool_t *pool, h2o_iovec_t merged, h2o_iovec_t added, int seperator)
{
if (added.len == 0)
return merged;
if (merged.len == 0)
return added;
size_t newlen = merged.len + 2 + added.len;
char *buf = h2o_mem_alloc_pool(pool, newlen);
memcpy(buf, merged.base, merged.len);
buf[merged.len] = seperator;
buf[merged.len + 1] = ' ';
memcpy(buf + merged.len + 2, added.base, added.len);
merged.base = buf;
merged.len = newlen;
return merged;
}
/*
* A request without neither Content-Length or Transfer-Encoding header implies a zero-length request body (see 6th rule of RFC 7230
* 3.3.3).
* OTOH, section 3.3.3 states:
*
* A user agent SHOULD send a Content-Length in a request message when
* no Transfer-Encoding is sent and the request method defines a meaning
* for an enclosed payload body. For example, a Content-Length header
* field is normally sent in a POST request even when the value is 0
* (indicating an empty payload body). A user agent SHOULD NOT send a
* Content-Length header field when the request message does not contain
* a payload body and the method semantics do not anticipate such a
* body.
*
* PUT and POST define a meaning for the payload body, let's emit a
* Content-Length header if it doesn't exist already, since the server
* might send a '411 Length Required' response.
*
* see also: ML thread starting at https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0580.html
*/
static int req_requires_content_length(h2o_req_t *req)
{
int is_put_or_post =
(req->method.len >= 1 && req->method.base[0] == 'P' && (h2o_memis(req->method.base, req->method.len, H2O_STRLIT("POST")) ||
h2o_memis(req->method.base, req->method.len, H2O_STRLIT("PUT"))));
return is_put_or_post && h2o_find_header(&req->res.headers, H2O_TOKEN_TRANSFER_ENCODING, -1) == -1;
}
static h2o_iovec_t build_request(h2o_req_t *req, int keepalive, int is_websocket_handshake, int use_proxy_protocol)
{
h2o_iovec_t buf;
size_t offset = 0, remote_addr_len = SIZE_MAX;
char remote_addr[NI_MAXHOST];
struct sockaddr_storage ss;
socklen_t sslen;
h2o_iovec_t cookie_buf = {NULL}, xff_buf = {NULL}, via_buf = {NULL};
int preserve_x_forwarded_proto = req->conn->ctx->globalconf->proxy.preserve_x_forwarded_proto;
int emit_x_forwarded_headers = req->conn->ctx->globalconf->proxy.emit_x_forwarded_headers;
/* for x-f-f */
if ((sslen = req->conn->callbacks->get_peername(req->conn, (void *)&ss)) != 0)
remote_addr_len = h2o_socket_getnumerichost((void *)&ss, sslen, remote_addr);
/* build response */
buf.len = req->method.len + req->path.len + req->authority.len + 512;
if (use_proxy_protocol)
buf.len += H2O_PROXY_HEADER_MAX_LENGTH;
buf.base = h2o_mem_alloc_pool(&req->pool, buf.len);
#define RESERVE(sz) \
do { \
size_t required = offset + sz + 4 /* for "\r\n\r\n" */; \
if (required > buf.len) { \
do { \
buf.len *= 2; \
} while (required > buf.len); \
char *newp = h2o_mem_alloc_pool(&req->pool, buf.len); \
memcpy(newp, buf.base, offset); \
buf.base = newp; \
} \
} while (0)
#define APPEND(s, l) \
do { \
memcpy(buf.base + offset, (s), (l)); \
offset += (l); \
} while (0)
#define APPEND_STRLIT(lit) APPEND((lit), sizeof(lit) - 1)
#define FLATTEN_PREFIXED_VALUE(prefix, value, add_size) \
do { \
RESERVE(sizeof(prefix) - 1 + value.len + 2 + add_size); \
APPEND_STRLIT(prefix); \
if (value.len != 0) { \
APPEND(value.base, value.len); \
if (add_size != 0) { \
buf.base[offset++] = ','; \
buf.base[offset++] = ' '; \
} \
} \
} while (0)
if (use_proxy_protocol)
offset += h2o_stringify_proxy_header(req->conn, buf.base + offset);
APPEND(req->method.base, req->method.len);
buf.base[offset++] = ' ';
APPEND(req->path.base, req->path.len);
APPEND_STRLIT(" HTTP/1.1\r\nconnection: ");
if (is_websocket_handshake) {
APPEND_STRLIT("upgrade\r\nupgrade: websocket\r\nhost: ");
} else if (keepalive) {
APPEND_STRLIT("keep-alive\r\nhost: ");
} else {
APPEND_STRLIT("close\r\nhost: ");
}
APPEND(req->authority.base, req->authority.len);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
assert(offset <= buf.len);
if (req->entity.base != NULL || req_requires_content_length(req)) {
RESERVE(sizeof("content-length: " H2O_UINT64_LONGEST_STR) - 1);
offset += sprintf(buf.base + offset, "content-length: %zu\r\n", req->entity.len);
}
{
const h2o_header_t *h, *h_end;
for (h = req->headers.entries, h_end = h + req->headers.size; h != h_end; ++h) {
if (h2o_iovec_is_token(h->name)) {
const h2o_token_t *token = (void *)h->name;
if (token->proxy_should_drop) {
continue;
} else if (token == H2O_TOKEN_COOKIE) {
/* merge the cookie headers; see HTTP/2 8.1.2.5 and HTTP/1 (RFC6265 5.4) */
/* FIXME current algorithm is O(n^2) against the number of cookie headers */
cookie_buf = build_request_merge_headers(&req->pool, cookie_buf, h->value, ';');
continue;
} else if (token == H2O_TOKEN_VIA) {
via_buf = build_request_merge_headers(&req->pool, via_buf, h->value, ',');
continue;
} else if (token == H2O_TOKEN_X_FORWARDED_FOR) {
if (!emit_x_forwarded_headers) {
goto AddHeader;
}
xff_buf = build_request_merge_headers(&req->pool, xff_buf, h->value, ',');
continue;
}
}
if (!preserve_x_forwarded_proto && h2o_lcstris(h->name->base, h->name->len, H2O_STRLIT("x-forwarded-proto")))
continue;
AddHeader:
RESERVE(h->name->len + h->value.len + 2);
APPEND(h->name->base, h->name->len);
buf.base[offset++] = ':';
buf.base[offset++] = ' ';
APPEND(h->value.base, h->value.len);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
}
if (cookie_buf.len != 0) {
FLATTEN_PREFIXED_VALUE("cookie: ", cookie_buf, 0);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
if (emit_x_forwarded_headers) {
if (!preserve_x_forwarded_proto) {
FLATTEN_PREFIXED_VALUE("x-forwarded-proto: ", req->input.scheme->name, 0);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
if (remote_addr_len != SIZE_MAX) {
FLATTEN_PREFIXED_VALUE("x-forwarded-for: ", xff_buf, remote_addr_len);
APPEND(remote_addr, remote_addr_len);
} else {
FLATTEN_PREFIXED_VALUE("x-forwarded-for: ", xff_buf, 0);
}
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
FLATTEN_PREFIXED_VALUE("via: ", via_buf, sizeof("1.1 ") - 1 + req->input.authority.len);
if (req->version < 0x200) {
buf.base[offset++] = '1';
buf.base[offset++] = '.';
buf.base[offset++] = '0' + (0x100 <= req->version && req->version <= 0x109 ? req->version - 0x100 : 0);
} else {
buf.base[offset++] = '2';
}
buf.base[offset++] = ' ';
APPEND(req->input.authority.base, req->input.authority.len);
APPEND_STRLIT("\r\n\r\n");
#undef RESERVE
#undef APPEND
#undef APPEND_STRLIT
#undef FLATTEN_PREFIXED_VALUE
/* set the length */
assert(offset <= buf.len);
buf.len = offset;
return buf;
}
static void do_close(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
if (self->client != NULL) {
h2o_http1client_cancel(self->client);
self->client = NULL;
}
}
static void do_send(struct rp_generator_t *self)
{
h2o_iovec_t vecs[1];
size_t veccnt;
h2o_send_state_t ststate;
assert(self->sending.bytes_inflight == 0);
vecs[0] = h2o_doublebuffer_prepare(&self->sending,
self->client != NULL ? &self->client->sock->input : &self->last_content_before_send,
self->src_req->preferred_chunk_size);
if (self->client == NULL && vecs[0].len == self->sending.buf->size && self->last_content_before_send->size == 0) {
veccnt = vecs[0].len != 0 ? 1 : 0;
ststate = H2O_SEND_STATE_FINAL;
} else {
if (vecs[0].len == 0)
return;
veccnt = 1;
ststate = H2O_SEND_STATE_IN_PROGRESS;
}
if (self->had_body_error)
ststate = H2O_SEND_STATE_ERROR;
h2o_send(self->src_req, vecs, veccnt, ststate);
}
static void do_proceed(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
h2o_doublebuffer_consume(&self->sending);
do_send(self);
}
static void on_websocket_upgrade_complete(void *_info, h2o_socket_t *sock, size_t reqsize)
{
struct rp_ws_upgrade_info_t *info = _info;
if (sock != NULL) {
h2o_tunnel_establish(info->ctx, sock, info->upstream_sock, info->timeout);
} else {
h2o_socket_close(info->upstream_sock);
}
free(info);
}
static inline void on_websocket_upgrade(struct rp_generator_t *self, h2o_timeout_t *timeout)
{
h2o_req_t *req = self->src_req;
h2o_socket_t *sock = h2o_http1client_steal_socket(self->client);
struct rp_ws_upgrade_info_t *info = h2o_mem_alloc(sizeof(*info));
info->upstream_sock = sock;
info->timeout = timeout;
info->ctx = req->conn->ctx;
h2o_http1_upgrade(req, NULL, 0, on_websocket_upgrade_complete, info);
}
static int on_body(h2o_http1client_t *client, const char *errstr)
{
struct rp_generator_t *self = client->data;
if (errstr != NULL) {
/* detach the content */
self->last_content_before_send = self->client->sock->input;
h2o_buffer_init(&self->client->sock->input, &h2o_socket_buffer_prototype);
self->client = NULL;
if (errstr != h2o_http1client_error_is_eos) {
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
self->had_body_error = 1;
}
}
if (self->sending.bytes_inflight == 0)
do_send(self);
return 0;
}
static h2o_http1client_body_cb on_head(h2o_http1client_t *client, const char *errstr, int minor_version, int status,
h2o_iovec_t msg, h2o_http1client_header_t *headers, size_t num_headers)
{
struct rp_generator_t *self = client->data;
h2o_req_t *req = self->src_req;
size_t i;
if (errstr != NULL && errstr != h2o_http1client_error_is_eos) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", errstr);
h2o_send_error_502(req, "Gateway Error", errstr, 0);
return NULL;
}
/* copy the response (note: all the headers must be copied; http1client discards the input once we return from this callback) */
req->res.status = status;
req->res.reason = h2o_strdup(&req->pool, msg.base, msg.len).base;
for (i = 0; i != num_headers; ++i) {
const h2o_token_t *token = h2o_lookup_token(headers[i].name, headers[i].name_len);
h2o_iovec_t value;
if (token != NULL) {
if (token->proxy_should_drop) {
goto Skip;
}
if (token == H2O_TOKEN_CONTENT_LENGTH) {
if (req->res.content_length != SIZE_MAX ||
(req->res.content_length = h2o_strtosize(headers[i].value, headers[i].value_len)) == SIZE_MAX) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", "invalid response from upstream (malformed content-length)");
h2o_send_error_502(req, "Gateway Error", "invalid response from upstream", 0);
return NULL;
}
goto Skip;
} else if (token == H2O_TOKEN_LOCATION) {
if (req->res_is_delegated && (300 <= status && status <= 399) && status != 304) {
self->client = NULL;
h2o_iovec_t method = h2o_get_redirect_method(req->method, status);
h2o_send_redirect_internal(req, method, headers[i].value, headers[i].value_len, 1);
return NULL;
}
if (req->overrides != NULL && req->overrides->location_rewrite.match != NULL) {
value =
rewrite_location(&req->pool, headers[i].value, headers[i].value_len, req->overrides->location_rewrite.match,
req->input.scheme, req->input.authority, req->overrides->location_rewrite.path_prefix);
if (value.base != NULL)
goto AddHeader;
}
goto AddHeaderDuped;
} else if (token == H2O_TOKEN_LINK) {
h2o_push_path_in_link_header(req, headers[i].value, headers[i].value_len);
}
/* default behaviour, transfer the header downstream */
AddHeaderDuped:
value = h2o_strdup(&req->pool, headers[i].value, headers[i].value_len);
AddHeader:
h2o_add_header(&req->pool, &req->res.headers, token, value.base, value.len);
Skip:;
} else {
h2o_iovec_t name = h2o_strdup(&req->pool, headers[i].name, headers[i].name_len);
h2o_iovec_t value = h2o_strdup(&req->pool, headers[i].value, headers[i].value_len);
h2o_add_header_by_str(&req->pool, &req->res.headers, name.base, name.len, 0, value.base, value.len);
}
}
if (self->is_websocket_handshake && req->res.status == 101) {
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
assert(client_ctx->websocket_timeout != NULL);
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_UPGRADE, H2O_STRLIT("websocket"));
on_websocket_upgrade(self, client_ctx->websocket_timeout);
self->client = NULL;
return NULL;
}
/* declare the start of the response */
h2o_start_response(req, &self->super);
if (errstr == h2o_http1client_error_is_eos) {
self->client = NULL;
h2o_send(req, NULL, 0, H2O_SEND_STATE_FINAL);
return NULL;
}
return on_body;
}
static int on_1xx(h2o_http1client_t *client, int minor_version, int status, h2o_iovec_t msg, h2o_http1client_header_t *headers,
size_t num_headers)
{
struct rp_generator_t *self = client->data;
size_t i;
for (i = 0; i != num_headers; ++i) {
if (h2o_memis(headers[i].name, headers[i].name_len, H2O_STRLIT("link")))
h2o_push_path_in_link_header(self->src_req, headers[i].value, headers[i].value_len);
}
return 0;
}
static h2o_http1client_head_cb on_connect(h2o_http1client_t *client, const char *errstr, h2o_iovec_t **reqbufs, size_t *reqbufcnt,
int *method_is_head)
{
struct rp_generator_t *self = client->data;
if (errstr != NULL) {
self->client = NULL;
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
h2o_send_error_502(self->src_req, "Gateway Error", errstr, 0);
return NULL;
}
*reqbufs = self->up_req.bufs;
*reqbufcnt = self->up_req.bufs[1].base != NULL ? 2 : 1;
*method_is_head = self->up_req.is_head;
self->client->informational_cb = on_1xx;
return on_head;
}
static void on_generator_dispose(void *_self)
{
struct rp_generator_t *self = _self;
if (self->client != NULL) {
h2o_http1client_cancel(self->client);
self->client = NULL;
}
h2o_buffer_dispose(&self->last_content_before_send);
h2o_doublebuffer_dispose(&self->sending);
}
static struct rp_generator_t *proxy_send_prepare(h2o_req_t *req, int keepalive, int use_proxy_protocol)
{
struct rp_generator_t *self = h2o_mem_alloc_shared(&req->pool, sizeof(*self), on_generator_dispose);
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
self->super.proceed = do_proceed;
self->super.stop = do_close;
self->src_req = req;
if (client_ctx->websocket_timeout != NULL && h2o_lcstris(req->upgrade.base, req->upgrade.len, H2O_STRLIT("websocket"))) {
self->is_websocket_handshake = 1;
} else {
self->is_websocket_handshake = 0;
}
self->had_body_error = 0;
self->up_req.bufs[0] = build_request(req, keepalive, self->is_websocket_handshake, use_proxy_protocol);
self->up_req.bufs[1] = req->entity;
self->up_req.is_head = h2o_memis(req->method.base, req->method.len, H2O_STRLIT("HEAD"));
h2o_buffer_init(&self->last_content_before_send, &h2o_socket_buffer_prototype);
h2o_doublebuffer_init(&self->sending, &h2o_socket_buffer_prototype);
return self;
}
void h2o__proxy_process_request(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
struct rp_generator_t *self;
if (overrides != NULL) {
if (overrides->socketpool != NULL) {
if (overrides->use_proxy_protocol)
assert(!"proxy protocol cannot be used for a persistent upstream connection");
self = proxy_send_prepare(req, 1, 0);
h2o_http1client_connect_with_pool(&self->client, self, client_ctx, overrides->socketpool, on_connect);
return;
} else if (overrides->hostport.host.base != NULL) {
self = proxy_send_prepare(req, 0, overrides->use_proxy_protocol);
h2o_http1client_connect(&self->client, self, client_ctx, req->overrides->hostport.host, req->overrides->hostport.port,
0, on_connect);
return;
}
}
{ /* default logic */
h2o_iovec_t host;
uint16_t port;
if (h2o_url_parse_hostport(req->authority.base, req->authority.len, &host, &port) == NULL) {
h2o_req_log_error(req, "lib/core/proxy.c", "invalid URL supplied for internal redirection:%s://%.*s%.*s",
req->scheme->name.base, (int)req->authority.len, req->authority.base, (int)req->path.len,
req->path.base);
h2o_send_error_502(req, "Gateway Error", "internal error", 0);
return;
}
if (port == 65535)
port = req->scheme->default_port;
self = proxy_send_prepare(req, 0, overrides != NULL && overrides->use_proxy_protocol);
h2o_http1client_connect(&self->client, self, client_ctx, host, port, req->scheme == &H2O_URL_SCHEME_HTTPS, on_connect);
return;
}
}
| 1 | 11,681 | Please use `h2o_lcstris` instead of `strncasecmp` so that the comparison would be a complete match (instead of a prefix match; current code would yield true if [val,len] is ["O", 1]) and that it would be locale-independent. | h2o-h2o | c |
@@ -333,7 +333,13 @@ var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
- nodeCache := newNodeCacheStandard(fb)
+ var nodeCache NodeCache
+ if config.Mode() != InitMinimal {
+ nodeCache = newNodeCacheStandard(fb)
+ } else {
+ // If we're in minimal mode, let the block cache remain nil to
+ // ensure that the user doesn't try any data reads or writes.
+ }
// make logger
branchSuffix := "" | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// mdReadType indicates whether a read needs identifies.
type mdReadType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReadType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
)
// mdUpdateType indicates update type.
type mdUpdateType int
const (
mdWrite mdUpdateType = iota
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
// MaxBlockSizeBytesDefault is the default maximum block size for KBFS.
// 512K blocks by default, block changes embedded max == 8K.
// Block size was chosen somewhat arbitrarily by trying to
// minimize the overall size of the history written by a user when
// appending 1KB writes to a file, up to a 1GB total file. Here
// is the output of a simple script that approximates that
// calculation:
//
// Total history size for 0065536-byte blocks: 1134341128192 bytes
// Total history size for 0131072-byte blocks: 618945052672 bytes
// Total history size for 0262144-byte blocks: 412786622464 bytes
// Total history size for 0524288-byte blocks: 412786622464 bytes
// Total history size for 1048576-byte blocks: 618945052672 bytes
// Total history size for 2097152-byte blocks: 1134341128192 bytes
// Total history size for 4194304-byte blocks: 2216672886784 bytes
MaxBlockSizeBytesDefault = 512 << 10
// Maximum number of blocks that can be sent in parallel
maxParallelBlockPuts = 100
// Maximum number of blocks that can be fetched in parallel
maxParallelBlockGets = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Time between checks for dirty files to flush, in case Sync is
// never called.
secondsBetweenBackgroundFlushes = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
// If it's been more than this long since our last update, check
// the current head before downloading all of the new revisions.
fastForwardTimeThresh = 15 * time.Minute
// If there are more than this many new revisions, fast forward
// rather than downloading them all.
fastForwardRevThresh = 50
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead fboMutexLevel = 2
fboBlock fboMutexLevel = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// headTrustStatus marks whether the head is from a trusted or
// untrusted source. When rekeying we get the head MD by folder id
// and do not check the tlf handle
type headTrustStatus int
const (
headUntrusted headTrustStatus = iota
headTrusted
)
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid BranchID // protected by mdWriterLock
bType branchType
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
// protects access to head, headStatus, latestMergedRevision,
// and hasBeenCleared.
headLock leveledRWMutex
head ImmutableRootMetadata
headStatus headTrustStatus
// latestMergedRevision tracks the latest heard merged revision on server
latestMergedRevision MetadataRevision
// Has this folder ever been cleared?
hasBeenCleared bool
blocks folderBlockOps
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log logger.Logger
deferLog logger.Logger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
cancelUpdatesLock sync.Mutex
// Cancels the goroutine currently waiting on TLF MD updates.
cancelUpdates context.CancelFunc
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
rekeyFSM RekeyFSM
editHistory *TlfEditHistory
branchChanges kbfssync.RepeatedWaitGroup
mdFlushes kbfssync.RepeatedWaitGroup
forcedFastForwards kbfssync.RepeatedWaitGroup
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
nodeCache := newNodeCacheStandard(fb)
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
unrefCache: make(map[BlockRef]*syncInfo),
deCache: make(map[BlockRef]DirEntry),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: log,
deferLog: log.CloneWithAddedDepth(1),
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
fbo.editHistory = NewTlfEditHistory(config, fbo, log)
fbo.rekeyFSM = NewRekeyFSM(fbo)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher(secondsBetweenBackgroundFlushes * time.Second)
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown(ctx context.Context) error {
if fbo.config.CheckStateOnShutdown() {
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if !fbo.isMasterBranch(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
fbo.editHistory.Shutdown()
fbo.rekeyFSM.Shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) AddFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("AddFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
return OpsCantHandleFavorite{"Can't add a favorite without a handle"}
}
return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created)
}
func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context,
favorites *Favorites, handle *TlfHandle, created bool) (err error) {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
favorites.AddAsync(ctx, handle.toFavToAdd(created))
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
// This can happen when identifies fail and the head is never set.
return OpsCantHandleFavorite{"Can't delete a favorite without a handle"}
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
func (fbo *folderBranchOps) doFavoritesOp(ctx context.Context,
favs *Favorites, fop FavoritesOp, handle *TlfHandle) error {
switch fop {
case FavoritesOpNoChange:
return nil
case FavoritesOpAdd:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, false)
}
return fbo.addToFavorites(ctx, favs, false)
case FavoritesOpAddNewlyCreated:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, true)
}
return fbo.addToFavorites(ctx, favs, true)
case FavoritesOpRemove:
return fbo.deleteFromFavorites(ctx, favs)
default:
return InvalidFavoritesOpError{}
}
}
// getTrustedHead should not be called outside of folder_branch_ops.go.
// Returns ImmutableRootMetadata{} when the head is not trusted.
// See the comment on headTrustedStatus for more information.
func (fbo *folderBranchOps) getTrustedHead(lState *lockState) ImmutableRootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.headStatus == headUntrusted {
return ImmutableRootMetadata{}
}
return fbo.head
}
// getHead should not be called outside of folder_branch_ops.go.
func (fbo *folderBranchOps) getHead(lState *lockState) (
ImmutableRootMetadata, headTrustStatus) {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.head, fbo.headStatus
}
// isMasterBranch should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
if fbo.bid != bid {
fbo.cr.BeginNewBranch()
}
fbo.bid = bid
if bid == NullBranchID {
fbo.status.setCRSummary(nil, nil)
}
}
var errNoFlushedRevisions = errors.New("No flushed MDs yet")
var errNoMergedRevWhileStaged = errors.New(
"Cannot find most recent merged revision while staged")
// getJournalPredecessorRevision returns the revision that precedes
// the current journal head if journaling enabled and there are
// unflushed MD updates; otherwise it returns
// MetadataRevisionUninitialized. If there aren't any flushed MD
// revisions, it returns errNoFlushedRevisions.
func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) (
MetadataRevision, error) {
jServer, err := GetJournalServer(fbo.config)
if err != nil {
// Journaling is disabled entirely.
return MetadataRevisionUninitialized, nil
}
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
// Journaling is disabled for this TLF, so use the local head.
// TODO: JournalStatus could return other errors (likely
// file/disk corruption) that indicate a real problem, so it
// might be nice to type those errors so we can distinguish
// them.
return MetadataRevisionUninitialized, nil
}
if jStatus.BranchID != NullBranchID.String() {
return MetadataRevisionUninitialized, errNoMergedRevWhileStaged
}
if jStatus.RevisionStart == MetadataRevisionUninitialized {
// The journal is empty, so the local head must be the most recent.
return MetadataRevisionUninitialized, nil
} else if jStatus.RevisionStart == MetadataRevisionInitial {
// Nothing has been flushed to the servers yet, so don't
// return anything.
return MetadataRevisionUninitialized, errNoFlushedRevisions
}
return jStatus.RevisionStart - 1, nil
}
// validateHeadLocked validates an untrusted head and sets it as trusted.
// see headTrustedState comment for more information.
func (fbo *folderBranchOps) validateHeadLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.headLock.AssertLocked(lState)
// Validate fbo against fetched md and discard the fetched one.
if fbo.head.TlfID() != md.TlfID() {
fbo.log.CCriticalf(ctx, "Fake untrusted TLF encountered %v %v %v %v", fbo.head.TlfID(), md.TlfID(), fbo.head.mdID, md.mdID)
return MDTlfIDMismatch{fbo.head.TlfID(), md.TlfID()}
}
fbo.headStatus = headTrusted
return nil
}
func (fbo *folderBranchOps) setHeadLocked(
ctx context.Context, lState *lockState,
md ImmutableRootMetadata, headStatus headTrustStatus) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == ImmutableRootMetadata{}
wasReadable := false
if !isFirstHead {
if headStatus == headUntrusted {
panic("setHeadLocked: Trying to set an untrusted head over an existing head")
}
wasReadable = fbo.head.IsReadable()
if fbo.headStatus == headUntrusted {
err := fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
if fbo.head.mdID == md.mdID {
return nil
}
}
if fbo.head.mdID == md.mdID {
panic(errors.Errorf("Re-putting the same MD: %s", md.mdID))
}
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision())
err := fbo.config.MDCache().Put(md)
if err != nil {
return err
}
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == Unmerged {
fbo.setBranchIDLocked(lState, md.BID())
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
} else if md.MergedStatus() == Merged {
journalEnabled := TLFJournalEnabled(fbo.config, fbo.id())
if journalEnabled {
if isFirstHead {
// If journaling is on, and this is the first head
// we're setting, we have to make sure we use the
// server's notion of the latest MD, not the one
// potentially coming from our journal. If there are
// no flushed revisions, it's not a hard error, and we
// just leave the latest merged revision
// uninitialized.
journalPred, err := fbo.getJournalPredecessorRevision(ctx)
switch err {
case nil:
// journalPred will be
// MetadataRevisionUninitialized when the journal
// is empty.
if journalPred >= MetadataRevisionInitial {
fbo.setLatestMergedRevisionLocked(
ctx, lState, journalPred, false)
} else {
fbo.setLatestMergedRevisionLocked(ctx, lState,
md.Revision(), false)
}
case errNoFlushedRevisions:
// The server has no revisions, so leave the
// latest merged revision uninitialized.
default:
return err
}
} else {
// If this isn't the first head, then this is either
// an update from the server, or an update just
// written by the client. But since journaling is on,
// then latter case will be handled by onMDFlush when
// the update is properly flushed to the server. So
// ignore updates written by this device.
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if session.VerifyingKey != md.LastModifyingWriterVerifyingKey() {
fbo.setLatestMergedRevisionLocked(
ctx, lState, md.Revision(), false)
}
}
} else {
// This is a merged revision, and journaling is disabled,
// so it's definitely the latest revision on the server as
// well.
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
}
// Make sure that any unembedded block changes have been swapped
// back in.
if md.data.Changes.Info.BlockPointer != zeroPtr &&
len(md.data.Changes.Ops) == 0 {
return errors.New("Must swap in block changes before setting head")
}
fbo.head = md
if isFirstHead && headStatus == headTrusted {
fbo.headStatus = headTrusted
}
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle(), md.TlfID().IsPublic()))
}
return nil
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched not due to a user action, i.e. via a Rekey
// notification, and we don't have a TLF name to check against.
func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headUntrusted)
}
// setNewInitialHeadLocked is for when we're creating a brand-new TLF.
// This is trusted.
func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setNewInitialHeadLocked")
}
if md.Revision() != MetadataRevisionInitial {
return errors.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision())
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setInitialHeadTrustedLocked is for when the given RootMetadata
// was fetched due to a user action, and will be checked against the
// TLF name.
func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadSuccessorLocked is for when we're applying updates from the
// server or when we're applying new updates we created ourselves.
func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata, rebased bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// This can happen in tests via SyncFromServerForTesting().
return fbo.setInitialHeadTrustedLocked(ctx, lState, md)
}
if !rebased {
err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly())
if err != nil {
return err
}
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// Newer handles should be equal or more resolved over time.
//
// TODO: In some cases, they shouldn't, e.g. if we're on an
// unmerged branch. Add checks for this.
resolvesTo, partialResolvedOldHandle, err :=
oldHandle.ResolvesTo(
ctx, fbo.config.Codec(), fbo.config.KBPKI(),
*newHandle)
if err != nil {
return err
}
oldName := oldHandle.GetCanonicalName()
newName := newHandle.GetCanonicalName()
if !resolvesTo {
return IncompatibleHandleError{
oldName,
partialResolvedOldHandle.GetCanonicalName(),
newName,
}
}
err = fbo.setHeadLocked(ctx, lState, md, headTrusted)
if err != nil {
return err
}
if oldName != newName {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldName, newName)
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle())
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
return nil
}
// setHeadPredecessorLocked is for when we're unstaging updates.
func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return errors.New("Unexpected nil head in setHeadPredecessorLocked")
}
if fbo.head.Revision() <= MetadataRevisionInitial {
return errors.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision())
}
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadPredecessorLocked")
}
err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly())
if err != nil {
return err
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// The two handles must be the same, since no rekeying is done
// while unmerged.
eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle)
if err != nil {
return err
}
if !eq {
return errors.Errorf(
"head handle %v unexpectedly not equal to new handle = %v",
oldHandle, newHandle)
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadConflictResolvedLocked is for when we're setting the merged
// update with resolved conflicts.
func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadConflictResolvedLocked")
}
if md.MergedStatus() != Merged {
return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md ReadOnlyRootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
ei := getExtendedIdentify(ctx)
if fbo.identifyDone && !ei.behavior.AlwaysRunIdentify() {
// TODO: provide a way for the service to break this cache when identify
// state changes on a TLF. For now, we do it this way to make chat work.
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() &&
len(ei.getTlfBreakAndClose().Breaks) > 0 {
fbo.log.CDebugf(ctx,
"Identify finished with no error but broken proof warnings")
} else {
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
}
return nil
}
// getMDForReadLocked returns an existing md for a read
// operation. Note that mds will not be fetched here.
func (fbo *folderBranchOps) getMDForReadLocked(
ctx context.Context, lState *lockState, rtype mdReadType) (
md ImmutableRootMetadata, err error) {
if rtype != mdReadNeedIdentify && rtype != mdReadNoIdentify {
panic("Invalid rtype in getMDLockedForRead")
}
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
if rtype != mdReadNoIdentify {
err = fbo.identifyOnce(ctx, md.ReadOnly())
}
return md, err
}
return ImmutableRootMetadata{}, MDWriteNeededInRequest{}
}
// getMDForWriteOrRekeyLocked can fetch MDs, identify them and
// contains the fancy logic. For reading use getMDLockedForRead.
// Here we actually can fetch things from the server.
// rekeys are untrusted.
func (fbo *folderBranchOps) getMDForWriteOrRekeyLocked(
ctx context.Context, lState *lockState, mdType mdUpdateType) (
md ImmutableRootMetadata, err error) {
defer func() {
if err != nil || mdType == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md.ReadOnly())
}()
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
return md, nil
}
// MDs coming from from rekey notifications are marked untrusted.
//
// TODO: Make tests not take this code path.
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID)
if err != nil {
return ImmutableRootMetadata{}, err
}
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedMD == (ImmutableRootMetadata{}) {
return ImmutableRootMetadata{},
errors.WithStack(NoMergedMDError{fbo.id()})
}
if md == (ImmutableRootMetadata{}) {
// There are no unmerged MDs for this device, so just use the current head.
md = mergedMD
} else {
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// We don't need to do this for merged head
// because the setHeadLocked() already does
// that anyway.
fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false)
}()
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
return ImmutableRootMetadata{}, errors.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable())
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
headStatus := headTrusted
if mdType == mdRekey {
// If we already have a head (that has been filled after the initial
// check, but before we acquired the lock), then just return it.
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head, nil
}
headStatus = headUntrusted
}
err = fbo.setHeadLocked(ctx, lState, md, headStatus)
if err != nil {
return ImmutableRootMetadata{}, err
}
return md, nil
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReadType) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForReadLocked(ctx, lState, rtype)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.TlfID().IsPublic() {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.GetTlfHandle().IsReader(session.UID) {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
// getMostRecentFullyMergedMD is a helper method that returns the most
// recent merged MD that has been flushed to the server. This could
// be different from the current local head if journaling is on. If
// the journal is on a branch, it returns an error.
func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) (
ImmutableRootMetadata, error) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedRev == MetadataRevisionUninitialized {
// No unflushed journal entries, so use the local head.
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
// Otherwise, use the specified revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
mergedRev, Merged)
if err != nil {
return ImmutableRootMetadata{}, err
}
fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev)
return rmd, nil
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
// getMDForReadNeedIdentifyOnMaybeFirstAccess should be called by a
// code path (like chat) that might be accessing this folder for the
// first time. Other folderBranchOps methods like Lookup which know
// the folder has already been accessed at least once (to get the root
// node, for example) do not need to call this. Unlike other getMD
// calls, this one may return a nil ImmutableRootMetadata along with a
// nil error, to indicate that there isn't any MD for this TLF yet and
// one must be created by the caller.
func (fbo *folderBranchOps) getMDForReadNeedIdentifyOnMaybeFirstAccess(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}
if _, noMD := errors.Cause(err).(NoMergedMDError); noMD {
return ImmutableRootMetadata{}, nil
}
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.TlfID().IsPublic() {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.GetTlfHandle().IsReader(session.UID) {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
// getMDForWriteLocked returns a new RootMetadata object with an
// incremented version number for modification. If the returned object
// is put to the MDServer (via MDOps), mdWriterLock must be held until
// then. (See comments for mdWriterLock above.)
func (fbo *folderBranchOps) getMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getMDForWriteLockedForFilename(ctx, lState, "")
}
func (fbo *folderBranchOps) getMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
if err != nil {
return nil, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
if !md.GetTlfHandle().IsWriter(session.UID) {
return nil, NewWriteAccessError(
md.GetTlfHandle(), session.Name, filename)
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into
// syncBlockAndCheckEmbedLocked or the changes will be lost.
newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.KeyManager(), md.mdID, true)
if err != nil {
return nil, err
}
return newMd, nil
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (
rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey,
wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdRekey)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(session.UID) {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(md.GetTlfHandle(), session.Name)
}
newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.KeyManager(), md.mdID, handle.IsWriter(session.UID))
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(session.UID) && !newMd.IsWriterMetadataCopiedSet() {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(handle, session.Name)
}
return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context,
md *RootMetadata) (*blockPutState, error) {
if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
return nil, nil
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
bps := newBlockPutState(1)
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, session.UID)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return nil, err
}
if len(ptrsToDelete) > 0 {
return nil, errors.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
return bps, nil
}
// ResetRootBlock creates a new empty dir block and sets the given
// metadata's root block to it.
func ResetRootBlock(ctx context.Context, config Config,
currentUID keybase1.UID, rmd *RootMetadata) (
Block, BlockInfo, ReadyBlockData, error) {
newDblock := NewDirBlock()
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, config.BlockCache(), config.BlockOps(),
config.Crypto(), rmd.ReadOnly(), newDblock, currentUID,
keybase1.BlockType_DATA)
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
now := config.Clock().Now().UnixNano()
rmd.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
prevDiskUsage := rmd.DiskUsage()
rmd.SetDiskUsage(0)
// Redundant, since this is called only for brand-new or
// successor RMDs, but leave in to be defensive.
rmd.ClearBlockChanges()
co := newCreateOpForRootDir()
rmd.AddOp(co)
rmd.AddRefBlock(rmd.data.Dir.BlockInfo)
// Set unref bytes to the previous disk usage, so that the
// accounting works out.
rmd.AddUnrefBytes(prevDiskUsage)
return newDblock, info, readyBlockData, nil
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
if !handle.IsWriter(session.UID) {
return NewWriteAccessError(
handle, session.Name, handle.GetCanonicalPath())
}
var expectedKeyGen KeyGen
var tlfCryptKey *kbfscrypto.TLFCryptKey
if md.TlfID().IsPublic() {
expectedKeyGen = PublicKeyGen
} else {
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return errors.Errorf("Initial rekey unexpectedly not done for "+
"private TLF %v", md.TlfID())
}
expectedKeyGen = FirstValidKeyGen
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return InvalidKeyGenerationError{md.TlfID(), keyGen}
}
// create a dblock since one doesn't exist yet
newDblock, info, readyBlockData, err :=
ResetRootBlock(ctx, fbo.config, session.UID, md)
if err != nil {
return err
}
// Some other thread got here first, so give up and let it go
// before we push anything to the servers.
if h, _ := fbo.getHead(lState); h != (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "Head was already set, aborting")
return nil
}
if err = PutBlockCheckQuota(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil {
return err
}
if err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil {
return err
}
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
return err
}
md.loadCachedBlockChanges(ctx, bps)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.TlfID(), fbo.head.mdID)
}
fbo.setNewInitialHeadLocked(ctx, lState, MakeImmutableRootMetadata(
md, session.VerifyingKey, mdID, fbo.config.Clock().Now()))
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context,
h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) {
return nil, tlf.ID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (tlf.ID, error) {
return tlf.ID{}, errors.New("GetTLFID is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
// SetInitialHeadFromServer sets the head to the given
// ImmutableRootMetadata, which must be retrieved from the MD server.
func (fbo *folderBranchOps) SetInitialHeadFromServer(
ctx context.Context, md ImmutableRootMetadata) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)",
md.Revision(), md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx,
"SetInitialHeadFromServer, revision=%d (%s) done: %+v",
md.Revision(), md.MergedStatus(), err)
}()
if md.IsReadable() {
// We will prefetch this as on-demand so that it triggers downstream
// prefetches.
fbo.config.BlockOps().Prefetcher().PrefetchBlock(
&DirBlock{}, md.data.Dir.BlockPointer, md,
defaultOnDemandRequestPriority)
} else {
fbo.log.CDebugf(ctx,
"Setting an unreadable head with revision=%d", md.Revision())
}
// Return early if the head is already set. This avoids taking
// mdWriterLock for no reason, and it also avoids any side effects
// (e.g., calling `identifyOnce` and downloading the merged
// head) if head is already set.
lState := makeFBOLockState()
head, headStatus := fbo.getHead(lState)
if headStatus == headTrusted && head != (ImmutableRootMetadata{}) && head.mdID == md.mdID {
fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+
"need to set initial head again", md.Revision(), md.MergedStatus())
return nil
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.MergedStatus() == Unmerged {
mdops := fbo.config.MDOps()
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return err
}
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState,
mergedMD.Revision(), false)
}()
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == (ImmutableRootMetadata{}) {
err = fbo.setInitialHeadTrustedLocked(ctx, lState, md)
if err != nil {
return err
}
} else if headStatus == headUntrusted {
err = fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
})
}
// SetInitialHeadToNew creates a brand-new ImmutableRootMetadata
// object and sets the head to that. This is trusted.
func (fbo *folderBranchOps) SetInitialHeadToNew(
ctx context.Context, id tlf.ID, handle *TlfHandle) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadToNew %s", id)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetInitialHeadToNew %s done: %+v",
id, err)
}()
rmd, err := makeInitialRootMetadata(
fbo.config.MetadataVersion(), id, handle)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{rmd.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, rmd.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.initMDLocked(ctx, lState, rmd)
})
}
func getNodeIDStr(n Node) string {
if n == nil {
return "NodeID(nil)"
}
return fmt.Sprintf("NodeID(%v)", n.GetID())
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
fbo.deferLog.CDebugf(ctx, "getRootNode done: %s %+v",
getNodeIDStr(node), err)
}()
lState := makeFBOLockState()
var md ImmutableRootMetadata
md, err = fbo.getMDForReadLocked(ctx, lState, mdReadNoIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
func() {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}()
}
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := isReadableOrError(ctx, fbo.config.KBPKI(), md.ReadOnly()); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %s", getNodeIDStr(dir))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetDirChildren %s done: %+v",
getNodeIDStr(dir), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
err = runUnlessCanceled(ctx, func() error {
var err error
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node
// has been unlinked. Probably we have fast-forwarded, and
// missed all the updates deleting the children in this
// directory. In that case, just return an empty set of
// children so we don't return an incorrect set from the
// cache.
if md.data.Dir.BlockPointer.ID != dirPath.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Returning an empty children set for "+
"unlinked directory %v", dirPath.tailPointer())
return nil
}
children, err = fbo.blocks.GetDirtyDirChildren(
ctx, lState, md.ReadOnly(), dirPath)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return children, nil
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "Lookup %s %s done: %v %+v",
getNodeIDStr(dir), name, getNodeIDStr(node), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
node, de, err = fbo.blocks.Lookup(ctx, lState, md.ReadOnly(), dir, name)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, EntryInfo{}, err
}
return node, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md ImmutableRootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntry(
ctx, lState, md.ReadOnly(), nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "Stat %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) (
ei NodeMetadata, err error) {
fbo.log.CDebugf(ctx, "GetNodeMetadata %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetNodeMetadata %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
var res NodeMetadata
if err != nil {
return res, err
}
res.BlockInfo = de.BlockInfo
uid := de.Writer
if uid == keybase1.UID("") {
uid = de.Creator
}
res.LastWriterUnverified, err =
fbo.config.KBPKI().GetNormalizedUsername(ctx, uid)
if err != nil {
return res, err
}
return res, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb})
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
func (fbo *folderBranchOps) readyBlockMultiple(ctx context.Context,
kmd KeyMetadata, currBlock Block, uid keybase1.UID,
bps *blockPutState, bType keybase1.BlockType) (
info BlockInfo, plainSize int, err error) {
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, fbo.config.BlockCache(), fbo.config.BlockOps(),
fbo.config.Crypto(), kmd, currBlock, uid, bType)
if err != nil {
return
}
bps.addNewBlock(info.BlockPointer, currBlock, readyBlockData, nil)
return
}
func (fbo *folderBranchOps) unembedBlockChanges(
ctx context.Context, bps *blockPutState, md *RootMetadata,
changes *BlockChanges, uid keybase1.UID) error {
buf, err := fbo.config.Codec().Encode(changes)
if err != nil {
return err
}
// Treat the block change list as a file so we can reuse all the
// indirection code in fileData.
block := NewFileBlock().(*FileBlock)
bid, err := fbo.config.Crypto().MakeTemporaryBlockID()
if err != nil {
return err
}
ptr := BlockPointer{
ID: bid,
KeyGen: md.LatestKeyGeneration(),
DataVer: fbo.config.DataVersion(),
DirectType: DirectBlock,
Context: kbfsblock.MakeFirstContext(uid, keybase1.BlockType_MD),
}
file := path{fbo.folderBranch,
[]pathNode{{ptr, fmt.Sprintf("<MD rev %d>", md.Revision())}}}
dirtyBcache := simpleDirtyBlockCacheStandard()
// Simple dirty bcaches don't need to be shut down.
getter := func(_ context.Context, _ KeyMetadata, ptr BlockPointer,
_ path, _ blockReqType) (*FileBlock, bool, error) {
block, err := dirtyBcache.Get(fbo.id(), ptr, fbo.branch())
if err != nil {
return nil, false, err
}
fblock, ok := block.(*FileBlock)
if !ok {
return nil, false,
errors.Errorf("Block for %s is not a file block", ptr)
}
return fblock, true, nil
}
cacher := func(ptr BlockPointer, block Block) error {
return dirtyBcache.Put(fbo.id(), ptr, fbo.branch(), block)
}
// Start off the cache with the new block
err = cacher(ptr, block)
if err != nil {
return err
}
df := newDirtyFile(file, dirtyBcache)
fd := newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), md.ReadOnly(), getter, cacher, fbo.log)
// Write all the data.
_, _, _, _, _, err = fd.write(ctx, buf, 0, block, DirEntry{}, df)
if err != nil {
return err
}
// There might be a new top block.
topBlock, err := dirtyBcache.Get(fbo.id(), ptr, fbo.branch())
if err != nil {
return err
}
block, ok := topBlock.(*FileBlock)
if !ok {
return errors.New("Top block change block no longer a file block")
}
// Ready all the child blocks.
infos, err := fd.ready(ctx, fbo.id(), fbo.config.BlockCache(),
dirtyBcache, fbo.config.BlockOps(), bps, block, df)
if err != nil {
return err
}
for info := range infos {
md.AddMDRefBytes(uint64(info.EncodedSize))
md.AddMDDiskUsage(uint64(info.EncodedSize))
}
fbo.log.CDebugf(ctx, "%d unembedded child blocks", len(infos))
// Ready the top block.
info, _, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), block, uid, bps, keybase1.BlockType_MD)
if err != nil {
return err
}
md.AddMDRefBytes(uint64(info.EncodedSize))
md.AddMDDiskUsage(uint64(info.EncodedSize))
md.data.cachedChanges = *changes
changes.Info = info
changes.Ops = nil
return nil
}
type localBcache map[BlockPointer]*DirBlock
// syncBlock updates, and readies, the blocks along the path for the
// given write, up to the root of the tree or stopAt (if specified).
// When it updates the root of the tree, it also modifies the given
// head object with a new revision number and root block ID. It first
// checks the provided lbc for blocks that may have been modified by
// previous syncBlock calls or the FS calls themselves. It returns
// the updated path to the changed directory, the new or updated
// directory entry created as part of the call, and a summary of all
// the blocks that now must be put to the block server.
//
// This function is safe to use unlocked, but may modify MD to have
// the same revision number as another one. All functions in this file
// must call syncBlockLocked instead, which holds mdWriterLock and
// thus serializes the revision numbers. Conflict resolution may call
// syncBlockForConflictResolution, which doesn't hold the lock, since
// it already handles conflicts correctly.
//
// entryType must not be Sym.
//
// TODO: deal with multiple nodes for indirect blocks
func (fbo *folderBranchOps) syncBlock(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
// now ready each dblock and write the DirEntry for the next one
// in the path
currBlock := newBlock
currName := name
newPath := path{
FolderBranch: dir.FolderBranch,
path: make([]pathNode, 0, len(dir.path)),
}
bps := newBlockPutState(len(dir.path))
refPath := dir.ChildPathNoPtr(name)
var newDe DirEntry
doSetTime := true
now := fbo.nowUnixNano()
for len(newPath.path) < len(dir.path)+1 {
info, plainSize, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), currBlock, uid, bps, keybase1.BlockType_DATA)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// prepend to path and setup next one
newPath.path = append([]pathNode{{info.BlockPointer, currName}},
newPath.path...)
// get the parent block
prevIdx := len(dir.path) - len(newPath.path)
var prevDblock *DirBlock
var de DirEntry
var nextName string
nextDoSetTime := false
if prevIdx < 0 {
// root dir, update the MD instead
de = md.data.Dir
} else {
prevDir := path{
FolderBranch: dir.FolderBranch,
path: dir.path[:prevIdx+1],
}
// First, check the localBcache, which could contain
// blocks that were modified across multiple calls to
// syncBlock.
var ok bool
prevDblock, ok = lbc[prevDir.tailPointer()]
if !ok {
// If the block isn't in the local bcache, we
// have to fetch it, possibly from the
// network. Directory blocks are only ever
// modified while holding mdWriterLock, so it's
// safe to fetch them one at a time.
prevDblock, err = fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(),
prevDir, blockWrite)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
// modify the direntry for currName; make one
// if it doesn't exist (which should only
// happen the first time around).
//
// TODO: Pull the creation out of here and
// into createEntryLocked().
if de, ok = prevDblock.Children[currName]; !ok {
// If this isn't the first time
// around, we have an error.
if len(newPath.path) > 1 {
return path{}, DirEntry{}, nil, NoSuchNameError{currName}
}
// If this is a file, the size should be 0. (TODO:
// Ensure this.) If this is a directory, the size will
// be filled in below. The times will be filled in
// below as well, since we should only be creating a
// new directory entry when doSetTime is true.
de = DirEntry{
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
},
}
// If we're creating a new directory entry, the
// parent's times must be set as well.
nextDoSetTime = true
}
currBlock = prevDblock
nextName = prevDir.tailName()
}
if de.Type == Dir {
// TODO: When we use indirect dir blocks,
// we'll have to calculate the size some other
// way.
de.Size = uint64(plainSize)
}
if prevIdx < 0 {
md.AddUpdate(md.data.Dir.BlockInfo, info)
} else if prevDe, ok := prevDblock.Children[currName]; ok {
md.AddUpdate(prevDe.BlockInfo, info)
} else {
// this is a new block
md.AddRefBlock(info)
}
if len(refPath.path) > 1 {
refPath = *refPath.parentPath()
}
de.BlockInfo = info
if doSetTime {
if mtime {
de.Mtime = now
}
if ctime {
de.Ctime = now
}
}
if !newDe.IsInitialized() {
newDe = de
}
if prevIdx < 0 {
md.data.Dir = de
} else {
prevDblock.Children[currName] = de
}
currName = nextName
// Stop before we get to the common ancestor; it will be taken care of
// on the next sync call
if prevIdx >= 0 && dir.path[prevIdx].BlockPointer == stopAt {
// Put this back into the cache as dirty -- the next
// syncBlock call will ready it.
dblock, ok := currBlock.(*DirBlock)
if !ok {
return path{}, DirEntry{}, nil, BadDataError{stopAt.ID}
}
lbc[stopAt] = dblock
break
}
doSetTime = nextDoSetTime
}
return newPath, newDe, bps, nil
}
// syncBlockLock calls syncBlock under mdWriterLock.
func (fbo *folderBranchOps) syncBlockLocked(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.syncBlock(ctx, lState, uid, md, newBlock, dir, name,
entryType, mtime, ctime, stopAt, lbc)
}
// syncBlockForConflictResolution calls syncBlock unlocked, since
// conflict resolution can handle MD revision number conflicts
// correctly.
func (fbo *folderBranchOps) syncBlockForConflictResolution(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
return fbo.syncBlock(
ctx, lState, uid, md, newBlock, dir,
name, entryType, mtime, ctime, stopAt, lbc)
}
// entryType must not be Sym.
func (fbo *folderBranchOps) syncBlockAndCheckEmbedLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, lbc localBcache) (
path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return path{}, DirEntry{}, nil, err
}
newPath, newDe, bps, err := fbo.syncBlockLocked(
ctx, lState, session.UID, md, newBlock, dir, name, entryType,
mtime, ctime, stopAt, lbc)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// Do the block changes need their own blocks? Unembed only if
// this is the final call to this function with this MD.
if stopAt == zeroPtr {
bsplit := fbo.config.BlockSplitter()
if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) {
err = fbo.unembedBlockChanges(
ctx, bps, md, &md.data.Changes, session.UID)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
}
return newPath, newDe, bps, nil
}
// Returns whether the given error is one that shouldn't block the
// removal of a file or directory.
//
// TODO: Consider other errors recoverable, e.g. ones that arise from
// present but corrupted blocks?
func isRecoverableBlockErrorForRemoval(err error) bool {
return isRecoverableBlockError(err)
}
func isRetriableError(err error, retries int) bool {
_, isExclOnUnmergedError := err.(ExclOnUnmergedError)
_, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError)
recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError ||
isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error {
if bps == nil {
return nil
}
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
return err
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(MDServerErrorConflictRevision)
_, isConflictPrevRoot := err.(MDServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(MDServerErrorConflictDiskUsage)
_, isConditionFailed := err.(MDServerErrorConditionFailed)
_, isConflictFolderMapping := err.(MDServerErrorConflictFolderMapping)
_, isJournal := err.(MDJournalConflictError)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping || isJournal
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl,
afterUpdateFn func() error) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut := true
mergedRev := MetadataRevisionUninitialized
oldPrevRoot := md.PrevRoot()
var mdID MdID
// This puts on a delay on any cancellations arriving to ctx. It is intended
// to work sort of like a critical section, except that there isn't an
// explicit call to exit the critical section. The cancellation, if any, is
// triggered after a timeout (i.e.
// fbo.config.DelayedCancellationGracePeriod()).
//
// The purpose of trying to avoid cancellation once we start MD write is to
// avoid having an unpredictable perceived MD state. That is, when
// runUnlessCanceled returns Canceled on cancellation, application receives
// an EINTR, and would assume the operation didn't succeed. But the MD write
// continues, and there's a chance the write will succeed, meaning the
// operation succeeds. This contradicts with the application's perception
// through error code and can lead to horrible situations. An easily caught
// situation is when application calls Create with O_EXCL set, gets an EINTR
// while MD write succeeds, retries and gets an EEXIST error. If users hit
// Ctrl-C, this might not be a big deal. However, it also happens for other
// interrupts. For applications that use signals to communicate, e.g.
// SIGALRM and SIGUSR1, this can happen pretty often, which renders broken.
if err = EnableDelayedCancellationWithGracePeriod(
ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil {
return err
}
// we don't explicitly clean up (by using a defer) CancellationDelayer here
// because sometimes fuse makes another call using the same ctx. For example, in
// fuse's Create call handler, a dir.Create is followed by an Attr call. If
// we do a deferred cleanup here, if an interrupt has been received, it can
// cause ctx to be canceled before Attr call finishes, which causes FUSE to
// return EINTR for the Create request. But at this point, the request may
// have already succeeded. Returning EINTR makes application thinks the file
// is not created successfully.
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if fbo.isMasterBranchLocked(lState) {
// only do a normal Put if we're not already staged.
mdID, err = mdops.Put(ctx, md)
if doUnmergedPut = isRevisionConflict(err); doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision()
if excl == WithExcl {
// If this was caused by an exclusive create, we shouldn't do an
// UnmergedPut, but rather try to get newest update from server, and
// retry afterwards.
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
return ExclOnUnmergedError{}
}
} else if err != nil {
return err
}
} else if excl == WithExcl {
return ExclOnUnmergedError{}
}
doResolve := false
resolveMergedRev := mergedRev
if doUnmergedPut {
// We're out of date, and this is not an exclusive write, so put it as an
// unmerged MD.
mdID, err = mdops.PutUnmerged(ctx, md)
if isRevisionConflict(err) {
// Self-conflicts are retried in `doMDWriteWithRetry`.
err = UnmergedSelfConflictError{err}
}
if err != nil {
return err
}
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
} else {
fbo.setBranchIDLocked(lState, NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
}
md.loadCachedBlockChanges(ctx, bps)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
resolveMergedRev = MetadataRevisionUninitialized
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
irmd := MakeImmutableRootMetadata(
md, session.VerifyingKey, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
err = fbo.notifyBatchLocked(ctx, lState, irmd, afterUpdateFn)
if err != nil {
return err
}
// Call Resolve() after the head is set, to make sure it fetches
// the correct unmerged MD range during resolution.
if doResolve {
fbo.cr.Resolve(md.Revision(), resolveMergedRev)
}
return nil
}
func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context,
lState *lockState, jServer *JournalServer) error {
fbo.mdWriterLock.AssertLocked(lState)
if !TLFJournalEnabled(fbo.config, fbo.id()) {
// Nothing to do.
return nil
}
if err := jServer.Wait(ctx, fbo.id()); err != nil {
return err
}
// Make sure everything flushed successfully, since we're holding
// the writer lock, no other revisions could have snuck in.
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
return err
}
if jStatus.RevisionEnd != MetadataRevisionUninitialized {
return errors.Errorf("Couldn't flush all MD revisions; current "+
"revision end for the journal is %d", jStatus.RevisionEnd)
}
if jStatus.LastFlushErr != "" {
return errors.Errorf("Couldn't flush the journal: %s",
jStatus.LastFlushErr)
}
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata,
lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
oldPrevRoot := md.PrevRoot()
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so wait for the journal to flush and then push
// straight to the server. TODO: we're holding the writer lock
// while flushing the journal here (just like for exclusive
// writes), which may end up blocking incoming writes for a long
// time. Rekeys are pretty rare, but if this becomes an issue
// maybe we should consider letting these hit the journal and
// scrubbing them when converting it to a branch.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
mdID, err := mdOps.Put(ctx, md)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// Drop this block. We've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. We'll queue another rekey just in case. It should
// be safe as it's idempotent. We don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.TlfID())
return RekeyConflictError{err}
}
fbo.setBranchIDLocked(lState, NullBranchID)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
md.loadCachedBlockChanges(ctx, nil)
var key kbfscrypto.VerifyingKey
if md.IsWriterMetadataCopiedSet() {
key = lastWriterVerifyingKey
} else {
var err error
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
key = session.VerifyingKey
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState,
MakeImmutableRootMetadata(md, key, mdID, fbo.config.Clock().Now()),
rebased)
if err != nil {
return err
}
// Explicitly set the latest merged revision, since if journaling
// is on, `setHeadLocked` will not do it for us (even though
// rekeys bypass the journal).
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
return nil
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
// TODO: if the revision number of this new commit is sequential
// with `LatestRev`, we can probably change this to
// `gco.LatestRev+1`.
md.SetLastGCRevision(gco.LatestRev)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
oldPrevRoot := md.PrevRoot()
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
md.loadCachedBlockChanges(ctx, bps)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
irmd := MakeImmutableRootMetadata(
md, session.VerifyingKey, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
return fbo.notifyBatchLocked(ctx, lState, irmd, nil)
}
func (fbo *folderBranchOps) syncBlockAndFinalizeLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, excl Excl) (de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
_, de, bps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newBlock, dir, name, entryType, mtime,
ctime, zeroPtr, nil)
if err != nil {
return DirEntry{}, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return DirEntry{}, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl, nil)
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func checkDisallowedPrefixes(name string) error {
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata,
dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// PathType returns path type
func (fbo *folderBranchOps) PathType() PathType {
if fbo.folderBranch.Tlf.IsPublic() {
return PublicPathType
}
return PrivatePathType
}
// canonicalPath returns full canonical path for dir node and name.
func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) {
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return "", err
}
return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType, excl Excl) (Node, DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(name); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
filename, err := fbo.canonicalPath(ctx, dir, name)
if err != nil {
return nil, DirEntry{}, err
}
// verify we have permission to write
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), dirPath, name); err != nil {
return nil, DirEntry{}, err
}
co, err := newCreateOp(name, dirPath.tailPointer(), entryType)
if err != nil {
return nil, DirEntry{}, err
}
co.setFinalPath(dirPath)
md.AddOp(co)
// create new data block
var newBlock Block
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
de, err := fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, newBlock, dirPath, name, entryType,
true, true, zeroPtr, excl)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBranchOps) maybeWaitForSquash(
ctx context.Context, bid BranchID) {
if bid != PendingLocalSquashBranchID {
return
}
fbo.log.CDebugf(ctx, "Blocking until squash finishes")
// Limit the time we wait to just under the ctx deadline if there
// is one, or 10s if there isn't.
deadline, ok := ctx.Deadline()
if ok {
deadline = deadline.Add(-1 * time.Second)
} else {
// Can't use config.Clock() since context doesn't respect it.
deadline = time.Now().Add(10 * time.Second)
}
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
// Wait for CR to finish. Note that if the user is issuing
// concurrent writes, the current CR could be canceled, and when
// the call belows returns, the branch still won't be squashed.
// That's ok, this is just an optimization.
err := fbo.cr.Wait(ctx)
if err != nil {
fbo.log.CDebugf(ctx, "Error while waiting for CR: %+v", err)
}
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
bid := fbo.bid
fbo.mdWriterLock.Unlock(lState)
// Don't let a pending squash get too big.
fbo.maybeWaitForSquash(ctx, bid)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
if _, ok := err.(ExclOnUnmergedError); ok {
if err = fbo.cr.Wait(ctx); err != nil {
return err
}
} else if _, ok := err.(UnmergedSelfConflictError); ok {
// We can only get here if we are already on an
// unmerged branch and an errored PutUnmerged did make
// it to the mdserver. Let's force sync, with a fresh
// context so the observer doesn't ignore the updates
// (but tie the cancels together).
newCtx := fbo.ctxWithFBOID(context.Background())
newCtx, cancel := context.WithCancel(newCtx)
defer cancel()
go func() {
select {
case <-ctx.Done():
cancel()
case <-newCtx.Done():
}
}()
fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+
"(%v); forcing a sync", err)
err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState)
if err != nil {
return err
}
cancel()
}
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %s %s", getNodeIDStr(dir), path)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateDir %s %s done: %v %+v",
getNodeIDStr(dir), path, getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl)
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool, excl Excl) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %s %s isExec=%v Excl=%s",
getNodeIDStr(dir), path, isExec, excl)
defer func() {
fbo.deferLog.CDebugf(ctx,
"CreateFile %s %s isExec=%v Excl=%s done: %v %+v",
getNodeIDStr(dir), path, isExec, excl,
getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
// If journaling is turned on, an exclusive create may end up on a
// conflict branch.
if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.")
excl = NoExcl
}
if excl == WithExcl {
if err = fbo.cr.Wait(ctx); err != nil {
return nil, EntryInfo{}, err
}
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl)
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(fromName); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(),
dirPath, fromName); err != nil {
return DirEntry{}, err
}
co, err := newCreateOp(fromName, dirPath.tailPointer(), Sym)
if err != nil {
return DirEntry{}, err
}
co.setFinalPath(dirPath)
md.AddOp(co)
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
dblock.Children[fromName] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *dirPath.parentPath(),
dirPath.tailName(), Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return DirEntry{}, err
}
return dblock.Children[fromName], nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %s %s -> %s",
getNodeIDStr(dir), fromName, toPath)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateLink %s %s -> %s done: %+v",
getNodeIDStr(dir), fromName, toPath, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return EntryInfo{}, err
}
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set ei directly, as that can cause a race when
// the Create is canceled.
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return retEntryInfo, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntry(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, de DirEntry,
name string) error {
md.AddUnrefBlock(de.BlockInfo)
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, md.ReadOnly(), childPath)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
}
for _, blockInfo := range blockInfos {
md.AddUnrefBlock(blockInfo)
}
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, name string) error {
fbo.mdWriterLock.AssertLocked(lState)
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dir, blockWrite)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
ro, err := newRmOp(name, dir.tailPointer())
if err != nil {
return err
}
ro.setFinalPath(dir)
md.AddOp(ro)
err = fbo.unrefEntry(ctx, lState, md, dir, de, name)
if err != nil {
return err
}
// the actual unlink
delete(pblock.Children, name)
// sync the parent directory
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, pblock, *dir.parentPath(), dir.tailName(),
Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), childPath, blockRead)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
} else if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %s %s", getNodeIDStr(dir), dirName)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveDir %s %s done: %+v",
getNodeIDStr(dir), dirName, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveEntry %s %s done: %+v",
getNodeIDStr(dir), name, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent path,
oldName string, newParent path, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
oldPBlock, newPBlock, newDe, lbc, err := fbo.blocks.PrepRename(
ctx, lState, md, oldParent, oldName, newParent, newName)
if err != nil {
return err
}
// does name exist?
if de, ok := newPBlock.Children[newName]; ok {
// Usually higher-level programs check these, but just in case.
if de.Type == Dir && newDe.Type != Dir {
return NotDirError{newParent.ChildPathNoPtr(newName)}
} else if de.Type != Dir && newDe.Type == Dir {
return NotFileError{newParent.ChildPathNoPtr(newName)}
}
if de.Type == Dir {
// The directory must be empty.
oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState,
md.ReadOnly(), de.BlockPointer, newParent.Branch,
newParent.ChildPathNoPtr(newName))
if err != nil {
return err
}
if len(oldTargetDir.Children) != 0 {
fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+
" (%s/%s) not allowed.", newParent, newName)
return DirNotEmptyError{newName}
}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntry(ctx, lState, md, newParent, de, newName)
if err != nil {
return err
}
}
// only the ctime changes
newDe.Ctime = fbo.nowUnixNano()
newPBlock.Children[newName] = newDe
delete(oldPBlock.Children, oldName)
// find the common ancestor
var i int
found := false
// the root block will always be the same, so start at number 1
for i = 1; i < len(oldParent.path) && i < len(newParent.path); i++ {
if oldParent.path[i].ID != newParent.path[i].ID {
found = true
i--
break
}
}
if !found {
// if we couldn't find one, then the common ancestor is the
// last node in the shorter path
if len(oldParent.path) < len(newParent.path) {
i = len(oldParent.path) - 1
} else {
i = len(newParent.path) - 1
}
}
commonAncestor := oldParent.path[i].BlockPointer
oldIsCommon := oldParent.tailPointer() == commonAncestor
newIsCommon := newParent.tailPointer() == commonAncestor
newOldPath := path{FolderBranch: oldParent.FolderBranch}
var oldBps *blockPutState
if oldIsCommon {
if newIsCommon {
// if old and new are both the common ancestor, there is
// nothing to do (syncBlock will take care of everything)
} else {
// If the old one is common and the new one is
// not, then the last
// syncBlockAndCheckEmbedLocked call will need
// to access the old one.
lbc[oldParent.tailPointer()] = oldPBlock
}
} else {
if newIsCommon {
// If the new one is common, then the first
// syncBlockAndCheckEmbedLocked call will need to access
// it.
lbc[newParent.tailPointer()] = newPBlock
}
// The old one is not the common ancestor, so we need to sync it.
// TODO: optimize by pushing blocks from both paths in parallel
newOldPath, _, oldBps, err = fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, oldPBlock, *oldParent.parentPath(), oldParent.tailName(),
Dir, true, true, commonAncestor, lbc)
if err != nil {
return err
}
}
newNewPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newPBlock, *newParent.parentPath(), newParent.tailName(),
Dir, true, true, zeroPtr, lbc)
if err != nil {
return err
}
// newOldPath is really just a prefix now. A copy is necessary as an
// append could cause the new path to contain nodes from the old path.
newOldPath.path = append(make([]pathNode, i+1, i+1), newOldPath.path...)
copy(newOldPath.path[:i+1], newNewPath.path[:i+1])
// merge and finalize the blockPutStates
if oldBps != nil {
newBps.mergeOtherBps(oldBps)
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), newBps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(),
fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *newBps)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, newBps, NoExcl, nil)
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %s/%s -> %s/%s", getNodeIDStr(oldParent),
oldName, getNodeIDStr(newParent), newName)
defer func() {
fbo.deferLog.CDebugf(ctx, "Rename %s/%s -> %s/%s done: %+v",
getNodeIDStr(oldParent), oldName,
getNodeIDStr(newParent), newName, err)
}()
err = fbo.checkNode(newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// only works for paths within the same topdir
if oldParentPath.FolderBranch != newParentPath.FolderBranch {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParentPath, oldName,
newParentPath, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %s %d %d", getNodeIDStr(file),
len(dest), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Read %s %d %d done: %+v",
getNodeIDStr(file), len(dest), off, err)
}()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return 0, err
}
{
// It seems git isn't handling EINTR from some of its read calls (likely
// fread), which causes it to get corrupted data (which leads to coredumps
// later) when a read system call on pack files gets interrupted. This
// enables delayed cancellation for Read if the file path contains `.git`.
//
// TODO: get a patch in git, wait for sufficiently long time for people to
// upgrade, and remove this.
// allow turning this feature off by env var to make life easier when we
// try to fix git.
if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet {
for _, n := range filePath.path {
if n.Name == ".git" {
EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod())
break
}
}
}
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
bytesRead, err = fbo.blocks.Read(
ctx, lState, md.ReadOnly(), filePath, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %s %d %d", getNodeIDStr(file),
len(data), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Write %s %d %d done: %+v",
getNodeIDStr(file), len(data), off, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %s %d", getNodeIDStr(file), size)
defer func() {
fbo.deferLog.CDebugf(ctx, "Truncate %s %d done: %+v",
getNodeIDStr(file), size, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(
ctx, lState, md.ReadOnly(), file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file path,
ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym || de.Type == Dir {
fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type)
return nil
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
} else {
// Treating this as a no-op, without updating the ctime, is a
// POSIX violation, but it's an important optimization to keep
// permissions-preserving rsyncs fast.
fbo.log.CDebugf(ctx, "Ignoring no-op setex")
return nil
}
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
exAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this setex.
if md.data.Dir.BlockPointer.ID != file.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(file)
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %s %t", getNodeIDStr(file), ex)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetEx %s %t done: %+v",
getNodeIDStr(file), ex, err)
}()
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setExLocked(ctx, lState, filePath, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file path,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
mtimeAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this
// setmtime.
if md.data.Dir.BlockPointer.ID != file.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(file)
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %s %v", getNodeIDStr(file), mtime)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetMtime %s %v done: %+v",
getNodeIDStr(file), mtime, err)
}()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setMtimeLocked(ctx, lState, filePath, mtime)
})
}
func (fbo *folderBranchOps) syncLocked(ctx context.Context,
lState *lockState, file path) (stillDirty bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, nil
}
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who sync clean files on close
// would get an error.
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return true, err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if md.data.Dir.BlockPointer.ID != file.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file.
//
// Note in particular that if a file just had a dirty
// directory entry cached (due to an attribute change on a
// removed file, for example), this will clear that attribute
// change. If there's still an open file handle, the user
// won't be able to see the change anymore.
//
// TODO: Hook this in with the node cache GC logic to be
// perfectly accurate (but at the same time, we'd then have to
// fix up the intentional panic in the background flusher to
// be more tolerant of long-lived dirty, removed files).
return true, fbo.blocks.ClearCacheInfo(lState, file)
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return true, err
}
if file.isValidForNotification() {
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
}
// Filled in by doBlockPuts below.
var blocksToRemove []BlockPointer
fblock, bps, lbc, syncState, err :=
fbo.blocks.StartSync(ctx, lState, md, session.UID, file)
defer func() {
fbo.blocks.CleanupSyncState(
ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err)
}()
if err != nil {
return true, err
}
newPath, _, newBps, err :=
fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, fblock, *file.parentPath(),
file.tailName(), File, true, true, zeroPtr, lbc)
if err != nil {
return true, err
}
bps.mergeOtherBps(newBps)
// Note: We explicitly don't call fbo.fbm.cleanUpBlockState here
// when there's an error, because it's possible some of the blocks
// will be reused in a future attempt at this same sync, and we
// don't want them cleaned up in that case. Instead, the
// FinishSync call below will take care of that.
blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return true, err
}
// Call this under the same blockLock as when the pointers are
// updated, so there's never any point in time where a read or
// write might slip in after the pointers are updated, but before
// the deferred writes are re-applied.
afterUpdateFn := func() error {
stillDirty, err = fbo.blocks.FinishSyncLocked(
ctx, lState, file, newPath, md.ReadOnly(), syncState, fbo.fbm)
return err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl, afterUpdateFn)
if err != nil {
return true, err
}
return stillDirty, err
}
func (fbo *folderBranchOps) Sync(ctx context.Context, file Node) (err error) {
fbo.log.CDebugf(ctx, "Sync %s", getNodeIDStr(file))
defer func() {
fbo.deferLog.CDebugf(ctx, "Sync %s done: %+v",
getNodeIDStr(file), err)
}()
err = fbo.checkNode(file)
if err != nil {
return
}
var stillDirty bool
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
stillDirty, err = fbo.syncLocked(ctx, lState, filePath)
return err
})
if err != nil {
return err
}
if !stillDirty {
fbo.status.rmDirtyNode(file)
}
return nil
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Status done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.status.getStatus(ctx, &fbo.blocks)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for the most recent op
// in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata,
afterUpdateFn func() error) error {
fbo.headLock.AssertLocked(lState)
lastOp := md.data.Changes.Ops[len(md.data.Changes.Ops)-1]
err := fbo.notifyOneOpLocked(ctx, lState, lastOp, md, false, afterUpdateFn)
if err != nil {
return err
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
return nil
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.allUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache,
[]BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) unlinkFromCache(op op, unlinkPath path) error {
// The entry could be under any one of the unref'd blocks, and
// it's safe to perform this when the pointer isn't real, so just
// try them all to avoid the overhead of looking up the right
// pointer in the old version of the block.
for _, ptr := range op.Unrefs() {
// It's ok to modify this path, since we break as soon as the
// node cache takes a reference to it.
unlinkPath.path[len(unlinkPath.path)-1].BlockPointer = ptr
found := fbo.nodeCache.Unlink(ptr.Ref(), unlinkPath)
if found {
break
}
}
return nil
}
func (fbo *folderBranchOps) getUnlinkPathBeforeUpdatingPointers(
ctx context.Context, op op) (unlinkPath path, toUnlink bool, err error) {
var node Node
var childName string
switch realOp := op.(type) {
case *rmOp:
node = fbo.nodeCache.Get(realOp.Dir.Unref.Ref())
childName = realOp.OldName
case *renameOp:
if realOp.NewDir.Unref != zeroPtr {
// moving to a new dir
node = fbo.nodeCache.Get(realOp.NewDir.Unref.Ref())
} else {
// moving to the same dir
node = fbo.nodeCache.Get(realOp.OldDir.Unref.Ref())
}
childName = realOp.NewName
}
if node == nil {
return path{}, false, nil
}
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return path{}, false, err
}
return p.ChildPathNoPtr(childName), true, nil
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md ImmutableRootMetadata, shouldPrefetch bool,
afterUpdateFn func() error) error {
fbo.headLock.AssertLocked(lState)
// We need to get unlinkPath before calling UpdatePointers so that
// nodeCache.Unlink can properly update cachedPath.
unlinkPath, toUnlink, err := fbo.getUnlinkPathBeforeUpdatingPointers(ctx, op)
if err != nil {
return err
}
err = fbo.blocks.UpdatePointers(
md, lState, op, shouldPrefetch, afterUpdateFn)
if err != nil {
return err
}
var changes []NodeChange
switch realOp := op.(type) {
default:
fbo.log.CDebugf(ctx, "Unknown op: %s", op)
return nil
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %s",
realOp.NewName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %s",
realOp.OldName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
if toUnlink {
err := fbo.unlinkFromCache(op, unlinkPath)
if err != nil {
return err
}
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%s to %s/%s",
realOp.Renamed, realOp.OldName, getNodeIDStr(oldNode),
realOp.NewName, getNodeIDStr(newNode))
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md.ReadOnly())
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
if toUnlink {
err := fbo.unlinkFromCache(op, unlinkPath)
if err != nil {
return err
}
}
err := fbo.nodeCache.Move(
realOp.Renamed.Ref(), newNode, realOp.NewName)
if err != nil {
return err
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %s",
len(realOp.Writes), getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return nil // Nothing to do.
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %s",
realOp.Attr, realOp.Name, getNodeIDStr(node))
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md.ReadOnly(), p, realOp)
if err != nil {
return err
}
if childNode == nil {
return nil // Nothing to do.
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *GCOp:
// Unreferenced blocks in a GCOp mean that we shouldn't cache
// them anymore
fbo.log.CDebugf(ctx, "notifyOneOp: GCOp with latest rev %d and %d unref'd blocks", realOp.LatestRev, len(realOp.Unrefs()))
bcache := fbo.config.BlockCache()
idsToDelete := make([]kbfsblock.ID, 0, len(realOp.Unrefs()))
for _, ptr := range realOp.Unrefs() {
idsToDelete = append(idsToDelete, ptr.ID)
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
diskCache := fbo.config.DiskBlockCache()
if diskCache != nil {
go diskCache.DeleteByTLF(ctx, md.TlfID(), idsToDelete)
}
case *resolutionOp:
// If there are any unrefs of blocks that have a node, this is an
// implied rmOp (see KBFS-1424).
reverseUpdates := make(map[BlockPointer]BlockPointer)
for _, unref := range op.Unrefs() {
// TODO: I will add logic here to unlink and invalidate any
// corresponding unref'd nodes.
node := fbo.nodeCache.Get(unref.Ref())
if node == nil {
// TODO: even if we don't have the node that was
// unreferenced, we might have its parent, and that
// parent might need an invalidation.
continue
}
// If there is a node, unlink and invalidate.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get path: %v", err)
continue
}
if !p.hasValidParent() {
fbo.log.CErrorf(ctx, "Removed node %s has no parent", p)
continue
}
parentPath := p.parentPath()
parentNode := fbo.nodeCache.Get(parentPath.tailPointer().Ref())
if parentNode != nil {
changes = append(changes, NodeChange{
Node: parentNode,
DirUpdated: []string{p.tailName()},
})
}
fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %s",
p.tailPointer(), getNodeIDStr(node))
// Revert the path back to the original BlockPointers,
// before the updates were applied.
if len(reverseUpdates) == 0 {
for _, update := range op.allUpdates() {
reverseUpdates[update.Ref] = update.Unref
}
}
for i, pNode := range p.path {
if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok {
p.path[i].BlockPointer = oldPtr
}
}
fbo.nodeCache.Unlink(p.tailPointer().Ref(), p)
}
if len(changes) == 0 {
return nil
}
}
fbo.observers.batchChanges(ctx, changes)
return nil
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) MetadataRevision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head.Revision()
}
return MetadataRevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// If there's anything in the journal, don't apply these MDs.
// Wait for CR to happen.
if fbo.isMasterBranchLocked(lState) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return err
}
if mergedRev != MetadataRevisionUninitialized {
if len(rmds) > 0 {
// We should update our view of the merged master though,
// to avoid re-registering for the same updates again.
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(
ctx, lState, rmds[len(rmds)-1].Revision(), false)
}()
}
fbo.log.CDebugf(ctx,
"Ignoring fetched revisions while MDs are in journal")
return nil
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if !fbo.isMasterBranchLocked(lState) {
if len(rmds) > 0 {
latestMerged := rmds[len(rmds)-1]
// If we're running a journal, don't trust our own updates
// here because they might have come from our own journal
// before the conflict was detected. Assume we'll hear
// about the conflict via callbacks from the journal.
if TLFJournalEnabled(fbo.config, fbo.id()) {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if session.VerifyingKey == latestMerged.LastModifyingWriterVerifyingKey() {
return UnmergedError{}
}
}
// setHeadLocked takes care of merged case
fbo.setLatestMergedRevisionLocked(
ctx, lState, latestMerged.Revision(), false)
unmergedRev := MetadataRevisionUninitialized
if fbo.head != (ImmutableRootMetadata{}) {
unmergedRev = fbo.head.Revision()
}
fbo.cr.Resolve(unmergedRev, latestMerged.Revision())
}
return UnmergedError{}
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.New("Ignoring MD updates while writes are dirty")
}
appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds))
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if err := isReadableOrError(ctx, fbo.config.KBPKI(), rmd.ReadOnly()); err != nil {
return err
}
err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
err := fbo.notifyOneOpLocked(ctx, lState, op, rmd, true, nil)
if err != nil {
return err
}
}
appliedRevs = append(appliedRevs, rmd)
}
if len(appliedRevs) > 0 {
fbo.editHistory.UpdateHistory(ctx, appliedRevs)
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
//
// This duplicates a check in
// fbo.setHeadPredecessorLocked. TODO: Remove this
// duplication.
if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision(),
fbo.getCurrMDRevisionLocked(lState)}
}
// TODO: Check that the revisions are equal only for
// the first iteration.
if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) {
err := fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
io, err := invertOpForLocalNotifications(ops[j])
if err != nil {
fbo.log.CWarningf(ctx,
"got error %v when invert op %v; "+
"skipping. Open file handles "+
"may now be in an invalid "+
"state, which can be fixed by "+
"either closing them all or "+
"restarting KBFS.",
err, ops[j])
continue
}
err = fbo.notifyOneOpLocked(ctx, lState, io, rmd, false, nil)
if err != nil {
return err
}
}
}
// TODO: update the edit history?
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.latestMergedRevision
}
// caller should have held fbo.headLock
func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev MetadataRevision, allowBackward bool) {
fbo.headLock.AssertLocked(lState)
if rev == MetadataRevisionUninitialized {
panic("Cannot set latest merged revision to an uninitialized value")
}
if fbo.latestMergedRevision < rev || allowBackward {
fbo.latestMergedRevision = rev
fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev)
} else {
fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+
"the new revision (%d); won't update.", fbo.latestMergedRevision, rev)
}
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getLatestMergedRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context,
lState *lockState) error {
fbo.log.CDebugf(ctx, "Fetching the newest unmerged head")
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
// We can only ever be at most one revision behind, so fetch the
// latest unmerged revision and apply it as a successor.
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid)
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
// There is no unmerged revision, oops!
return errors.New("Couldn't find an unmerged head")
}
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if fbo.bid != bid {
// The branches switched (apparently CR completed), so just
// try again.
fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil {
return err
}
if err := fbo.notifyBatchLocked(ctx, lState, md, nil); err != nil {
return err
}
if err := fbo.config.MDCache().Put(md); err != nil {
return err
}
return nil
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
// TLF's current unmerged branch and unmerged branch, between the
// merge point for the branch and the current head. The returned MDs
// are the same instances that are stored in the MD cache, so they
// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setBranchIDLocked(lState, NullBranchID)
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
currHead, Merged)
if err != nil {
return nil, err
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true)
return nil
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.allUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState)
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if !wasMasterBranch {
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl, nil)
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx, "UnstageForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if fbo.isMasterBranch(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
freshCtx, cancel := fbo.newCtxWithFBOID()
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (res RekeyResult, err error) {
fbo.log.CDebugf(ctx, "rekeyLocked")
defer func() {
fbo.deferLog.CDebugf(ctx, "rekeyLocked done: %+v %+v", res, err)
}()
fbo.mdWriterLock.AssertLocked(lState)
if !fbo.isMasterBranchLocked(lState) {
return RekeyResult{}, errors.New("can't rekey while staged")
}
// untrusted head is ok here.
head, _ := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); !ok ||
applyErr.rev != applyErr.curr {
return RekeyResult{}, err
}
}
}
md, lastWriterVerifyingKey, rekeyWasSet, err :=
fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return RekeyResult{}, err
}
currKeyGen := md.LatestKeyGeneration()
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Clear the rekey bit if any.
md.clearRekeyBit()
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return RekeyResult{}, err
}
// Readers can't clear the last revision, because:
// 1) They don't have access to the writer metadata, so can't clear the
// block changes.
// 2) Readers need the MetadataFlagWriterMetadataCopied bit set for
// MDServer to authorize the write.
// Without this check, MDServer returns an Unauthorized error.
if md.GetTlfHandle().IsWriter(session.UID) {
md.clearLastRevision()
}
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError, NeedSelfRekeyError:
stillNeedsRekey = true
default:
if err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return RekeyResult{}, err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: true,
}, nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(
ctx, lState, md, lastWriterVerifyingKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
if currKeyGen >= FirstValidKeyGen {
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
}
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, nil
}
func (fbo *folderBranchOps) RequestRekey(_ context.Context, tlf tlf.ID) {
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
// TODO: log instead of panic?
panic(WrongOpsError{fbo.folderBranch, fb})
}
fbo.rekeyFSM.Event(NewRekeyRequestEvent())
}
func (fbo *folderBranchOps) SyncFromServerForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServerForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx,
"SyncFromServerForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
// A journal flush before CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
// Loop until we're fully updated on the master branch.
for {
if !fbo.isMasterBranch(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if !fbo.isMasterBranch(lState) {
return errors.Errorf("Conflict resolution didn't take us out " +
"of staging.")
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) > 0 {
for _, ref := range dirtyRefs {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("can't sync from server while dirty")
}
// A journal flush after CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); ok {
if applyErr.rev == applyErr.curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
if _, isUnmerged := err.(UnmergedError); isUnmerged {
continue
} else if err == errNoMergedRevWhileStaged {
continue
}
return err
}
break
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
if err := fbo.editHistory.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil {
return err
}
// A second journal flush if needed, to clear out any
// archive/remove calls caused by the above operations.
return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return ctxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) {
// No need to call NewContextReplayable since ctxWithFBOID calls
// ctxWithRandomIDReplayable, which attaches replayably.
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
panic(err)
}
return ctx, cancelFunc
}
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return ShutdownHappenedError{}
}
}
func (fbo *folderBranchOps) doFastForwardLocked(ctx context.Context,
lState *lockState, currHead ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d",
fbo.latestMergedRevision, currHead.Revision())
changes, err := fbo.blocks.FastForwardAllNodes(
ctx, lState, currHead.ReadOnly())
if err != nil {
return err
}
err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/)
if err != nil {
return err
}
// Invalidate all the affected nodes.
if len(changes) > 0 {
fbo.observers.batchChanges(ctx, changes)
}
// Reset the edit history. TODO: notify any listeners that we've
// done this.
fbo.editHistory.Shutdown()
fbo.editHistory = NewTlfEditHistory(fbo.config, fbo, fbo.log)
return nil
}
func (fbo *folderBranchOps) maybeFastForward(ctx context.Context,
lState *lockState, lastUpdate time.Time, currUpdate time.Time) (
fastForwardDone bool, err error) {
// Has it been long enough to try fast-forwarding?
if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) ||
!fbo.isMasterBranch(lState) {
return false, nil
}
fbo.log.CDebugf(ctx, "Checking head for possible "+
"fast-forwarding (last update time=%s)", lastUpdate)
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id())
if err != nil {
return false, err
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// If the journal has anything in it, don't fast-forward since we
// haven't finished flushing yet. If there was really a remote
// update on the server, we'll end up in CR eventually.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return false, err
}
if mergedRev != MetadataRevisionUninitialized {
return false, nil
}
if !fbo.isMasterBranchLocked(lState) {
// Don't update if we're staged.
return false, nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh {
// Might as well fetch all the revisions.
return false, nil
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
return false, err
}
return true, nil
}
func (fbo *folderBranchOps) locallyFinalizeTLF(ctx context.Context) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return
}
// It's safe to give this a finalized number of 1 and a fake user
// name. The whole point here is to move the old finalized TLF
// name away to a new name, where the user won't be able to access
// it anymore, and if there's a conflict with a previously-moved
// TLF that shouldn't matter.
now := fbo.config.Clock().Now()
finalizedInfo, err := tlf.NewHandleExtension(
tlf.HandleExtensionFinalized, 1, libkb.NormalizedUsername("<unknown>"),
now)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't make finalized info: %+v", err)
return
}
fakeSignedHead := &RootMetadataSigned{MD: fbo.head.bareMd}
finalRmd, err := fakeSignedHead.MakeFinalCopy(
fbo.config.Codec(), now, finalizedInfo)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't finalize MD: %+v", err)
return
}
// Construct the data needed to fake a new head.
mdID, err := fbo.config.Crypto().MakeMdID(finalRmd.MD)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized MD ID: %+v", err)
return
}
bareHandle, err := finalRmd.MD.MakeBareTlfHandle(fbo.head.Extra())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized bare handle: %+v", err)
return
}
handle, err := MakeTlfHandle(ctx, bareHandle, fbo.config.KBPKI())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized handle: %+v", err)
return
}
finalBrmd, ok := finalRmd.MD.(MutableBareRootMetadata)
if !ok {
fbo.log.CErrorf(ctx, "Couldn't get finalized mutable bare MD: %+v", err)
return
}
// We don't have a way to sign this with a valid key (and we might
// be logged out anyway), so just directly make the md immutable.
finalIrmd := ImmutableRootMetadata{
ReadOnlyRootMetadata: makeRootMetadata(
finalBrmd, fbo.head.Extra(), handle).ReadOnly(),
mdID: mdID,
}
// This will trigger the handle change notification to observers.
err = fbo.setHeadSuccessorLocked(ctx, lState, finalIrmd, false)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't set finalized MD: %+v", err)
return
}
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
var lastUpdate time.Time
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
fbo.cancelUpdatesLock.Lock()
if fbo.cancelUpdates != nil {
// It should be impossible to get here without having
// already called the cancel function, but just in case
// call it here again.
fbo.cancelUpdates()
}
ctx, fbo.cancelUpdates = context.WithCancel(ctx)
fbo.cancelUpdatesLock.Unlock()
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
currUpdate, err := fbo.waitForAndProcessUpdates(
newCtx, lastUpdate, updateChan)
switch errors.Cause(err).(type) {
case UnmergedError:
// skip the back-off timer and continue directly to next
// registerForUpdates
return nil
case NewMetadataVersionError:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the newest metadata: %+v", err)
fbo.status.setPermErr(err)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return ctx.Err()
case MDServerErrorCannotReadFinalizedTLF:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the finalized metadata for this TLF: %+v", err)
fbo.status.setPermErr(err)
// Locally finalize the TLF so new accesses
// through to the old folder name will find the
// new folder.
fbo.locallyFinalizeTLF(newCtx)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return ctx.Err()
}
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
if err == nil {
lastUpdate = currUpdate
}
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getLatestMergedRevision(lState)
fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d)", currRev)
defer func() {
fbo.deferLog.CDebugf(ctx,
"Registering for updates (curr rev = %d) done: %+v",
currRev, err)
}()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, lastUpdate time.Time,
updateChan <-chan error) (currUpdate time.Time, err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() {
fbo.deferLog.CDebugf(ctx, "Waiting for updates done: %+v", err)
}()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return time.Time{}, err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
currUpdate := fbo.config.Clock().Now()
ffDone, err :=
fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate)
if err != nil {
return time.Time{}, err
}
if ffDone {
return currUpdate, nil
}
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return time.Time{}, err
}
return currUpdate, nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
}
}
func (fbo *folderBranchOps) backgroundFlusher(betweenFlushes time.Duration) {
ticker := time.NewTicker(betweenFlushes)
defer ticker.Stop()
lState := makeFBOLockState()
var prevDirtyRefMap map[BlockRef]bool
sameDirtyRefCount := 0
for {
doSelect := true
if fbo.blocks.GetState(lState) == dirtyState &&
fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) &&
sameDirtyRefCount < 10 {
// We have dirty files, and the system has a full buffer,
// so don't bother waiting for a signal, just get right to
// the main attraction.
doSelect = false
}
if doSelect {
select {
case <-ticker.C:
case <-fbo.forceSyncChan:
case <-fbo.shutdownChan:
return
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) == 0 {
sameDirtyRefCount = 0
continue
}
// Make sure we are making some progress
currDirtyRefMap := make(map[BlockRef]bool)
for _, ref := range dirtyRefs {
currDirtyRefMap[ref] = true
}
if reflect.DeepEqual(currDirtyRefMap, prevDirtyRefMap) {
sameDirtyRefCount++
} else {
sameDirtyRefCount = 0
}
prevDirtyRefMap = currDirtyRefMap
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = NewContextReplayable(ctx,
func(ctx context.Context) context.Context {
return context.WithValue(ctx, CtxBackgroundSyncKey, "1")
})
if sameDirtyRefCount >= 10 {
// If the local journal is full, we might not be able to
// make progress until more data is flushed to the
// servers, so just warn here rather than just an outright
// panic.
fbo.log.CWarningf(ctx, "Making no Sync progress on dirty refs "+
"after %d attempts: %v", sameDirtyRefCount, dirtyRefs)
}
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
// Make sure this loop doesn't starve user requests for
// too long. But use the longer-timeout version in the
// actual Sync command, to avoid unnecessary errors.
shortCtx, shortCancel := context.WithTimeout(ctx, 1*time.Second)
defer shortCancel()
for _, ref := range dirtyRefs {
select {
case <-shortCtx.Done():
fbo.log.CDebugf(ctx,
"Stopping background sync early due to timeout")
return nil
default:
}
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
err := fbo.Sync(longCtx, node)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
p := fbo.nodeCache.PathFromNode(node)
fbo.log.CWarningf(ctx, "Couldn't sync dirty file with "+
"ref=%v, nodeID=%s, and path=%v: %v",
ref, getNodeIDStr(node), p, err)
}
}
return nil
})
}
}
func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Lock(lState)
}
func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Unlock(lState)
}
func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
fbo.mdWriterLock.AssertLocked(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
mdID, err := fbo.config.MDOps().ResolveBranch(ctx, fbo.id(), fbo.bid,
blocksToDelete, md)
doUnmergedPut := isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
md.loadCachedBlockChanges(ctx, bps)
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
irmd := MakeImmutableRootMetadata(
md, session.VerifyingKey, mdID, fbo.config.Clock().Now())
err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
err := fbo.notifyOneOpLocked(ctx, lState, op, irmd, false, nil)
if err != nil {
return err
}
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd})
return nil
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.finalizeResolutionLocked(
ctx, lState, md, bps, newOps, blocksToDelete)
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context,
newBID BranchID) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID)
if !fbo.isMasterBranchLocked(lState) {
if fbo.bid == newBID {
fbo.log.CDebugf(ctx, "Already on branch %s", newBID)
return
}
panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s",
newBID, fbo.bid))
}
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID)
if err != nil {
fbo.log.CWarningf(ctx,
"No unmerged head on journal branch change (bid=%s)", newBID)
return
}
if md == (ImmutableRootMetadata{}) || md.MergedStatus() != Unmerged ||
md.BID() != newBID {
// This can happen if CR got kicked off in some other way and
// completed before we took the lock to process this
// notification.
fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d",
md, newBID)
return
}
// Everything we thought we knew about quota reclamation is now
// called into question.
fbo.fbm.clearLastQRData()
// Kick off conflict resolution and set the head to the correct branch.
fbo.setBranchIDLocked(lState, newBID)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/)
if err != nil {
fbo.log.CWarningf(ctx,
"Could not set head on journal branch change: %v", err)
return
}
}
func (fbo *folderBranchOps) onTLFBranchChange(newBID BranchID) {
fbo.branchChanges.Add(1)
go func() {
defer fbo.branchChanges.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
// This only happens on a `PruneBranch` call, in which case we
// would have already updated fbo's local view of the branch/head.
if newBID == NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring branch change back to master")
return
}
fbo.handleTLFBranchChange(ctx, newBID)
}()
}
func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid BranchID,
rev MetadataRevision) {
fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev)
lState := makeFBOLockState()
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false)
}()
// Get that revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
rev, Merged)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v",
rev, err)
return
}
if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil {
fbo.log.CDebugf(
ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err)
return
}
fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly())
}
func (fbo *folderBranchOps) onMDFlush(bid BranchID, rev MetadataRevision) {
fbo.mdFlushes.Add(1)
go func() {
defer fbo.mdFlushes.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
if bid != NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+
"revision %d", bid, rev)
return
}
fbo.handleMDFlush(ctx, bid, rev)
}()
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetUpdateHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
MetadataRevisionInitial)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.TlfID().String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter()]
if !ok {
name, err := fbo.config.KBPKI().
GetNormalizedUsername(ctx, rmd.LastModifyingWriter())
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter()] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision(),
Date: time.Unix(0, rmd.data.Dir.Mtime),
Writer: writer,
LiveBytes: rmd.DiskUsage(),
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.allUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// GetEditHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetEditHistory(ctx context.Context,
folderBranch FolderBranch) (edits TlfWriterEdits, err error) {
fbo.log.CDebugf(ctx, "GetEditHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetEditHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return nil, WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
head, err := fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
if err != nil {
return nil, err
}
return fbo.editHistory.GetComplete(ctx, head)
}
// PushStatusChange forces a new status be fetched by status listeners.
func (fbo *folderBranchOps) PushStatusChange() {
fbo.config.KBFSOps().PushStatusChange()
}
// ClearPrivateFolderMD implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ClearPrivateFolderMD(ctx context.Context) {
if fbo.folderBranch.Tlf.IsPublic() {
return
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// Nothing to clear.
return
}
fbo.log.CDebugf(ctx, "Clearing folder MD")
// First cancel the background goroutine that's registered for
// updates, because the next time we set the head in this FBO
// we'll launch another one.
fbo.cancelUpdatesLock.Lock()
defer fbo.cancelUpdatesLock.Unlock()
if fbo.cancelUpdates != nil {
fbo.cancelUpdates()
select {
case <-fbo.updateDoneChan:
case <-ctx.Done():
fbo.log.CDebugf(
ctx, "Context canceled before updater was canceled")
return
}
fbo.config.MDServer().CancelRegistration(ctx, fbo.id())
}
fbo.head = ImmutableRootMetadata{}
fbo.headStatus = headUntrusted
fbo.latestMergedRevision = MetadataRevisionUninitialized
fbo.hasBeenCleared = true
}
// ForceFastForward implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ForceFastForward(ctx context.Context) {
lState := makeFBOLockState()
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
return
}
if !fbo.hasBeenCleared {
// No reason to fast-forward here if it hasn't ever been
// cleared.
return
}
fbo.forcedFastForwards.Add(1)
go func() {
defer fbo.forcedFastForwards.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Forcing a fast-forward")
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id())
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
return
}
if currHead == (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "No MD yet")
return
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
fbo.log.CDebugf(ctx, "Already up-to-date: %v", err)
return
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
}
}()
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus)
}
| 1 | 16,221 | I prefer if possible for `if`/`else` statements to have the positive case first. | keybase-kbfs | go |
@@ -33,9 +33,18 @@ from scapy.base_classes import BasePacketList
## Tools ##
###########
+def issubtype(x, t):
+ """issubtype(C, B) -> bool
+
+ Return whether C is a class and if it is a subclass of class B.
+ When using a tuple as the second argument issubtype(X, (A, B, ...)),
+ is a shortcut for issubtype(X, A) or issubtype(X, B) or ... (etc.).
+ """
+ return isinstance(x, type) and issubclass(x, t)
+
def get_temp_file(keep=False, autoext=""):
"""Create a temporary file and return its name. When keep is False,
-the file is deleted when scapy exits.
+ the file is deleted when scapy exits.
"""
fname = tempfile.NamedTemporaryFile(prefix="scapy", suffix=autoext, | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
General utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
import os, sys, socket, types
import collections
import random, time
import gzip, zlib
import re, struct, array
import subprocess
import tempfile
import warnings
import scapy.modules.six as six
from scapy.modules.six.moves import range
warnings.filterwarnings("ignore","tempnam",RuntimeWarning, __name__)
from scapy.config import conf
from scapy.consts import DARWIN, WINDOWS
from scapy.data import MTU
from scapy.compat import *
from scapy.error import log_runtime, log_loading, log_interactive, Scapy_Exception, warning
from scapy.base_classes import BasePacketList
###########
## Tools ##
###########
def get_temp_file(keep=False, autoext=""):
"""Create a temporary file and return its name. When keep is False,
the file is deleted when scapy exits.
"""
fname = tempfile.NamedTemporaryFile(prefix="scapy", suffix=autoext,
delete=False).name
if not keep:
conf.temp_files.append(fname)
return fname
def sane_color(x):
r=""
for i in x:
j = orb(i)
if (j < 32) or (j >= 127):
r=r+conf.color_theme.not_printable(".")
else:
r=r+chr(j)
return r
def sane(x):
r=""
for i in x:
j = orb(i)
if (j < 32) or (j >= 127):
r=r+"."
else:
r=r+chr(j)
return r
@conf.commands.register
def restart():
"""Restarts scapy"""
if not conf.interactive or not os.path.isfile(sys.argv[0]):
raise OSError("Scapy was not started from console")
if WINDOWS:
os._exit(subprocess.call([sys.executable] + sys.argv))
os.execv(sys.executable, [sys.executable] + sys.argv)
def lhex(x):
if type(x) in six.integer_types:
return hex(x)
elif isinstance(x, tuple):
return "(%s)" % ", ".join(map(lhex, x))
elif isinstance(x, list):
return "[%s]" % ", ".join(map(lhex, x))
else:
return x
@conf.commands.register
def hexdump(x, dump=False):
""" Build a tcpdump like hexadecimal view
:param x: a Packet
:param dump: define if the result must be printed or returned in a variable
:returns: a String only when dump=True
"""
s = ""
x = raw(x)
l = len(x)
i = 0
while i < l:
s += "%04x " % i
for j in range(16):
if i+j < l:
s += "%02X" % orb(x[i+j])
else:
s += " "
if j%16 == 7:
s += ""
s += " "
s += sane_color(x[i:i+16])
i += 16
s += "\n"
# remove trailing \n
if s.endswith("\n"):
s = s[:-1]
if dump:
return s
else:
print(s)
@conf.commands.register
def linehexdump(x, onlyasc=0, onlyhex=0, dump=False):
""" Build an equivalent view of hexdump() on a single line
Note that setting both onlyasc and onlyhex to 1 results in a empty output
:param x: a Packet
:param onlyasc: 1 to display only the ascii view
:param onlyhex: 1 to display only the hexadecimal view
:param dump: print the view if False
:returns: a String only when dump=True
"""
s = ""
x = raw(x)
l = len(x)
if not onlyasc:
for i in range(l):
s += "%02X" % orb(x[i])
if not onlyhex: # separate asc & hex if both are displayed
s += " "
if not onlyhex:
s += sane_color(x)
if dump:
return s
else:
print(s)
@conf.commands.register
def chexdump(x, dump=False):
""" Build a per byte hexadecimal representation
Example:
>>> chexdump(IP())
0x45, 0x00, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe7, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01
:param x: a Packet
:param dump: print the view if False
:returns: a String only if dump=True
"""
x = raw(x)
s = ", ".join("%#04x" % orb(x) for x in x)
if dump:
return s
else:
print(s)
@conf.commands.register
def hexstr(x, onlyasc=0, onlyhex=0):
s = []
if not onlyasc:
s.append(" ".join("%02x" % orb(b) for b in x))
if not onlyhex:
s.append(sane(x))
return " ".join(s)
def repr_hex(s):
""" Convert provided bitstring to a simple string of hex digits """
return "".join("%02x" % orb(x) for x in s)
@conf.commands.register
def hexdiff(x,y):
"""Show differences between 2 binary strings"""
x=raw(x)[::-1]
y=raw(y)[::-1]
SUBST=1
INSERT=1
d = {(-1, -1): (0, (-1, -1))}
for j in range(len(y)):
d[-1,j] = d[-1,j-1][0]+INSERT, (-1,j-1)
for i in range(len(x)):
d[i,-1] = d[i-1,-1][0]+INSERT, (i-1,-1)
for j in range(len(y)):
for i in range(len(x)):
d[i,j] = min( ( d[i-1,j-1][0]+SUBST*(x[i] != y[j]), (i-1,j-1) ),
( d[i-1,j][0]+INSERT, (i-1,j) ),
( d[i,j-1][0]+INSERT, (i,j-1) ) )
backtrackx = []
backtracky = []
i=len(x)-1
j=len(y)-1
while not (i == j == -1):
i2,j2 = d[i,j][1]
backtrackx.append(x[i2+1:i+1])
backtracky.append(y[j2+1:j+1])
i,j = i2,j2
x = y = i = 0
colorize = { 0: lambda x:x,
-1: conf.color_theme.left,
1: conf.color_theme.right }
dox=1
doy=0
l = len(backtrackx)
while i < l:
separate=0
linex = backtrackx[i:i+16]
liney = backtracky[i:i+16]
xx = sum(len(k) for k in linex)
yy = sum(len(k) for k in liney)
if dox and not xx:
dox = 0
doy = 1
if dox and linex == liney:
doy=1
if dox:
xd = y
j = 0
while not linex[j]:
j += 1
xd -= 1
print(colorize[doy-dox]("%04x" % xd), end=' ')
x += xx
line=linex
else:
print(" ", end=' ')
if doy:
yd = y
j = 0
while not liney[j]:
j += 1
yd -= 1
print(colorize[doy-dox]("%04x" % yd), end=' ')
y += yy
line=liney
else:
print(" ", end=' ')
print(" ", end=' ')
cl = ""
for j in range(16):
if i+j < l:
if line[j]:
col = colorize[(linex[j]!=liney[j])*(doy-dox)]
print(col("%02X" % orb(line[j])), end=' ')
if linex[j]==liney[j]:
cl += sane_color(line[j])
else:
cl += col(sane(line[j]))
else:
print(" ", end=' ')
cl += " "
else:
print(" ", end=' ')
if j == 7:
print("", end=' ')
print(" ",cl)
if doy or not yy:
doy=0
dox=1
i += 16
else:
if yy:
dox=0
doy=1
else:
i += 16
if struct.pack("H",1) == b"\x00\x01": # big endian
def checksum(pkt):
if len(pkt) % 2 == 1:
pkt += b"\0"
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
return s & 0xffff
else:
def checksum(pkt):
if len(pkt) % 2 == 1:
pkt += b"\0"
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
return (((s>>8)&0xff)|s<<8) & 0xffff
def _fletcher16(charbuf):
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/>
c0 = c1 = 0
for char in charbuf:
c0 += orb(char)
c1 += c0
c0 %= 255
c1 %= 255
return (c0,c1)
@conf.commands.register
def fletcher16_checksum(binbuf):
""" Calculates Fletcher-16 checksum of the given buffer.
Note:
If the buffer contains the two checkbytes derived from the Fletcher-16 checksum
the result of this function has to be 0. Otherwise the buffer has been corrupted.
"""
(c0,c1)= _fletcher16(binbuf)
return (c1 << 8) | c0
@conf.commands.register
def fletcher16_checkbytes(binbuf, offset):
""" Calculates the Fletcher-16 checkbytes returned as 2 byte binary-string.
Including the bytes into the buffer (at the position marked by offset) the
global Fletcher-16 checksum of the buffer will be 0. Thus it is easy to verify
the integrity of the buffer on the receiver side.
For details on the algorithm, see RFC 2328 chapter 12.1.7 and RFC 905 Annex B.
"""
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/>
if len(binbuf) < offset:
raise Exception("Packet too short for checkbytes %d" % len(binbuf))
binbuf = binbuf[:offset] + b"\x00\x00" + binbuf[offset + 2:]
(c0,c1)= _fletcher16(binbuf)
x = ((len(binbuf) - offset - 1) * c0 - c1) % 255
if (x <= 0):
x += 255
y = 510 - c0 - x
if (y > 255):
y -= 255
return chb(x) + chb(y)
def mac2str(mac):
return b"".join(chb(int(x, 16)) for x in mac.split(':'))
def str2mac(s):
if isinstance(s, str):
return ("%02x:"*6)[:-1] % tuple(map(ord, s))
return ("%02x:"*6)[:-1] % tuple(s)
def randstring(l):
"""
Returns a random string of length l (l >= 0)
"""
return b"".join(struct.pack('B', random.randint(0, 255)) for _ in range(l))
def zerofree_randstring(l):
"""
Returns a random string of length l (l >= 0) without zero in it.
"""
return b"".join(struct.pack('B', random.randint(1, 255)) for _ in range(l))
def strxor(s1, s2):
"""
Returns the binary XOR of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return b"".join(map(lambda x,y:chb(orb(x)^orb(y)), s1, s2))
def strand(s1, s2):
"""
Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return b"".join(map(lambda x,y:chb(orb(x)&orb(y)), s1, s2))
# Workaround bug 643005 : https://sourceforge.net/tracker/?func=detail&atid=105470&aid=643005&group_id=5470
try:
socket.inet_aton("255.255.255.255")
except socket.error:
def inet_aton(x):
if x == "255.255.255.255":
return b"\xff"*4
else:
return socket.inet_aton(x)
else:
inet_aton = socket.inet_aton
inet_ntoa = socket.inet_ntoa
from scapy.pton_ntop import *
def atol(x):
try:
ip = inet_aton(x)
except socket.error:
ip = inet_aton(socket.gethostbyname(x))
return struct.unpack("!I", ip)[0]
def ltoa(x):
return inet_ntoa(struct.pack("!I", x&0xffffffff))
def itom(x):
return (0xffffffff00000000>>x)&0xffffffff
class ContextManagerSubprocess(object):
"""
Context manager that eases checking for unknown command.
Example:
>>> with ContextManagerSubprocess("my custom message"):
>>> subprocess.Popen(["unknown_command"])
"""
def __init__(self, name, prog):
self.name = name
self.prog = prog
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if isinstance(exc_value, (OSError, TypeError)):
msg = "%s: executing %r failed" % (self.name, self.prog) if self.prog else "Could not execute %s, is it installed ?" % self.name
if not conf.interactive:
raise OSError(msg)
else:
log_runtime.error(msg, exc_info=True)
return True # Suppress the exception
class ContextManagerCaptureOutput(object):
"""
Context manager that intercept the console's output.
Example:
>>> with ContextManagerCaptureOutput() as cmco:
... print("hey")
... assert cmco.get_output() == "hey"
"""
def __init__(self):
self.result_export_object = ""
try:
import mock
except:
raise ImportError("The mock module needs to be installed !")
def __enter__(self):
import mock
def write(s, decorator=self):
decorator.result_export_object += s
mock_stdout = mock.Mock()
mock_stdout.write = write
self.bck_stdout = sys.stdout
sys.stdout = mock_stdout
return self
def __exit__(self, *exc):
sys.stdout = self.bck_stdout
return False
def get_output(self, eval_bytes=False):
if self.result_export_object.startswith("b'") and eval_bytes:
return plain_str(eval(self.result_export_object))
return self.result_export_object
def do_graph(graph,prog=None,format=None,target=None,type=None,string=None,options=None):
"""do_graph(graph, prog=conf.prog.dot, format="svg",
target="| conf.prog.display", options=None, [string=1]):
string: if not None, simply return the graph string
graph: GraphViz graph description
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: filename or redirect. Defaults pipe to Imagemagick's display program
prog: which graphviz program to use
options: options to be passed to prog"""
if format is None:
if WINDOWS:
format = "png" # use common format to make sure a viewer is installed
else:
format = "svg"
if string:
return graph
if type is not None:
format=type
if prog is None:
prog = conf.prog.dot
start_viewer=False
if target is None:
if WINDOWS:
target = get_temp_file(autoext="."+format)
start_viewer = True
else:
with ContextManagerSubprocess("do_graph()", conf.prog.display):
target = subprocess.Popen([conf.prog.display],
stdin=subprocess.PIPE).stdin
if format is not None:
format = "-T%s" % format
if isinstance(target, str):
if target.startswith('|'):
target = subprocess.Popen(target[1:].lstrip(), shell=True,
stdin=subprocess.PIPE).stdin
elif target.startswith('>'):
target = open(target[1:].lstrip(), "wb")
else:
target = open(os.path.abspath(target), "wb")
proc = subprocess.Popen("\"%s\" %s %s" % (prog, options or "", format or ""),
shell=True, stdin=subprocess.PIPE, stdout=target)
proc.stdin.write(raw(graph))
proc.stdin.close()
try:
target.close()
except:
pass
if start_viewer:
# Workaround for file not found error: We wait until tempfile is written.
waiting_start = time.time()
while not os.path.exists(target.name):
time.sleep(0.1)
if time.time() - waiting_start > 3:
warning("Temporary file '%s' could not be written. Graphic will not be displayed.", tempfile)
break
else:
if conf.prog.display == conf.prog._default:
os.startfile(target.name)
else:
with ContextManagerSubprocess("do_graph()", conf.prog.display):
subprocess.Popen([conf.prog.display, target.name])
_TEX_TR = {
"{":"{\\tt\\char123}",
"}":"{\\tt\\char125}",
"\\":"{\\tt\\char92}",
"^":"\\^{}",
"$":"\\$",
"#":"\\#",
"~":"\\~",
"_":"\\_",
"&":"\\&",
"%":"\\%",
"|":"{\\tt\\char124}",
"~":"{\\tt\\char126}",
"<":"{\\tt\\char60}",
">":"{\\tt\\char62}",
}
def tex_escape(x):
s = ""
for c in x:
s += _TEX_TR.get(c,c)
return s
def colgen(*lstcol,**kargs):
"""Returns a generator that mixes provided quantities forever
trans: a function to convert the three arguments into a color. lambda x,y,z:(x,y,z) by default"""
if len(lstcol) < 2:
lstcol *= 2
trans = kargs.get("trans", lambda x,y,z: (x,y,z))
while True:
for i in range(len(lstcol)):
for j in range(len(lstcol)):
for k in range(len(lstcol)):
if i != j or j != k or k != i:
yield trans(lstcol[(i+j)%len(lstcol)],lstcol[(j+k)%len(lstcol)],lstcol[(k+i)%len(lstcol)])
def incremental_label(label="tag%05i", start=0):
while True:
yield label % start
start += 1
def binrepr(val):
return bin(val)[2:]
def long_converter(s):
return int(s.replace('\n', '').replace(' ', ''), 16)
#########################
#### Enum management ####
#########################
class EnumElement:
_value=None
def __init__(self, key, value):
self._key = key
self._value = value
def __repr__(self):
return "<%s %s[%r]>" % (self.__dict__.get("_name", self.__class__.__name__), self._key, self._value)
def __getattr__(self, attr):
return getattr(self._value, attr)
def __str__(self):
return self._key
def __bytes__(self):
return raw(self.__str__())
def __hash__(self):
return self._value
def __int__(self):
return int(self._value)
def __eq__(self, other):
return self._value == int(other)
def __neq__(self, other):
return not self.__eq__(other)
class Enum_metaclass(type):
element_class = EnumElement
def __new__(cls, name, bases, dct):
rdict={}
for k,v in six.iteritems(dct):
if isinstance(v, int):
v = cls.element_class(k,v)
dct[k] = v
rdict[v] = k
dct["__rdict__"] = rdict
return super(Enum_metaclass, cls).__new__(cls, name, bases, dct)
def __getitem__(self, attr):
return self.__rdict__[attr]
def __contains__(self, val):
return val in self.__rdict__
def get(self, attr, val=None):
return self.__rdict__.get(attr, val)
def __repr__(self):
return "<%s>" % self.__dict__.get("name", self.__name__)
###################
## Object saving ##
###################
def export_object(obj):
print(bytes_base64(gzip.zlib.compress(six.moves.cPickle.dumps(obj, 2), 9)))
def import_object(obj=None):
if obj is None:
obj = sys.stdin.read()
return six.moves.cPickle.loads(gzip.zlib.decompress(base64_bytes(obj.strip())))
def save_object(fname, obj):
"""Pickle a Python object"""
fd = gzip.open(fname, "wb")
six.moves.cPickle.dump(obj, fd)
fd.close()
def load_object(fname):
"""unpickle a Python object"""
return six.moves.cPickle.load(gzip.open(fname,"rb"))
@conf.commands.register
def corrupt_bytes(s, p=0.01, n=None):
"""Corrupt a given percentage or number of bytes from a string"""
s = array.array("B",raw(s))
l = len(s)
if n is None:
n = max(1,int(l*p))
for i in random.sample(range(l), n):
s[i] = (s[i]+random.randint(1,255))%256
return s.tostring()
@conf.commands.register
def corrupt_bits(s, p=0.01, n=None):
"""Flip a given percentage or number of bits from a string"""
s = array.array("B",raw(s))
l = len(s)*8
if n is None:
n = max(1,int(l*p))
for i in random.sample(range(l), n):
s[i // 8] ^= 1 << (i % 8)
return s.tostring()
#############################
## pcap capture file stuff ##
#############################
@conf.commands.register
def wrpcap(filename, pkt, *args, **kargs):
"""Write a list of packets to a pcap file
filename: the name of the file to write packets to, or an open,
writable file-like object. The file descriptor will be
closed at the end of the call, so do not use an object you
do not want to close (e.g., running wrpcap(sys.stdout, [])
in interactive mode will crash Scapy).
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness
sync: do not bufferize writes to the capture file
"""
with PcapWriter(filename, *args, **kargs) as fdesc:
fdesc.write(pkt)
@conf.commands.register
def rdpcap(filename, count=-1):
"""Read a pcap or pcapng file and return a packet list
count: read only <count> packets
"""
with PcapReader(filename) as fdesc:
return fdesc.read_all(count=count)
class PcapReader_metaclass(type):
"""Metaclass for (Raw)Pcap(Ng)Readers"""
def __new__(cls, name, bases, dct):
"""The `alternative` class attribute is declared in the PcapNg
variant, and set here to the Pcap variant.
"""
newcls = super(PcapReader_metaclass, cls).__new__(cls, name, bases, dct)
if 'alternative' in dct:
dct['alternative'].alternative = newcls
return newcls
def __call__(cls, filename):
"""Creates a cls instance, use the `alternative` if that
fails.
"""
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
filename, fdesc, magic = cls.open(filename)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
if "alternative" in cls.__dict__:
cls = cls.__dict__["alternative"]
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
raise
try:
i.f.seek(-4, 1)
except:
pass
raise Scapy_Exception("Not a supported capture file")
return i
@staticmethod
def open(filename):
"""Open (if necessary) filename, and read the magic."""
if isinstance(filename, six.string_types):
try:
fdesc = gzip.open(filename,"rb")
magic = fdesc.read(4)
except IOError:
fdesc = open(filename, "rb")
magic = fdesc.read(4)
else:
fdesc = filename
filename = (fdesc.name
if hasattr(fdesc, "name") else
"No name")
magic = fdesc.read(4)
return filename, fdesc, magic
RawPcapReader_PacketMetadata = collections.namedtuple("RawPcapReader_PacketMetadata",
["sec", "usec", "wirelen", "caplen"])
class RawPcapReader(six.with_metaclass(PcapReader_metaclass)):
"""A stateful pcap reader. Each packet is returned as a string"""
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
if magic == b"\xa1\xb2\xc3\xd4": # big endian
self.endian = ">"
self.nano = False
elif magic == b"\xd4\xc3\xb2\xa1": # little endian
self.endian = "<"
self.nano = False
elif magic == b"\xa1\xb2\x3c\x4d": # big endian, nanosecond-precision
self.endian = ">"
self.nano = True
elif magic == b"\x4d\x3c\xb2\xa1": # little endian, nanosecond-precision
self.endian = "<"
self.nano = True
else:
raise Scapy_Exception(
"Not a pcap capture file (bad magic: %r)" % magic
)
hdr = self.f.read(20)
if len(hdr)<20:
raise Scapy_Exception("Invalid pcap file (too short)")
vermaj, vermin, tz, sig, snaplen, linktype = struct.unpack(
self.endian + "HHIIII", hdr
)
self.linktype = linktype
def __iter__(self):
return self
def next(self):
"""implement the iterator protocol on a set of packets in a pcap file"""
pkt = self.read_packet()
if pkt == None:
raise StopIteration
return pkt
__next__ = next
def read_packet(self, size=MTU):
"""return a single packet read from the file
returns None when no more packets are available
"""
hdr = self.f.read(16)
if len(hdr) < 16:
return None
sec,usec,caplen,wirelen = struct.unpack(self.endian+"IIII", hdr)
return (self.f.read(caplen)[:size],
RawPcapReader_PacketMetadata(sec=sec, usec=usec,
wirelen=wirelen, caplen=caplen))
def dispatch(self, callback):
"""call the specified callback routine for each packet read
This is just a convenience function for the main loop
that allows for easy launching of packet processing in a
thread.
"""
for p in self:
callback(p)
def read_all(self,count=-1):
"""return a list of all packets in the pcap file
"""
res=[]
while count != 0:
count -= 1
p = self.read_packet()
if p is None:
break
res.append(p)
return res
def recv(self, size=MTU):
""" Emulate a socket
"""
return self.read_packet(size=size)[0]
def fileno(self):
return self.f.fileno()
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.close()
class PcapReader(RawPcapReader):
def __init__(self, filename, fdesc, magic):
RawPcapReader.__init__(self, filename, fdesc, magic)
try:
self.LLcls = conf.l2types[self.linktype]
except KeyError:
warning("PcapReader: unknown LL type [%i]/[%#x]. Using Raw packets" % (self.linktype,self.linktype))
self.LLcls = conf.raw_layer
def read_packet(self, size=MTU):
rp = super(PcapReader, self).read_packet(size=size)
if rp is None:
return None
s, pkt_info = rp
try:
p = self.LLcls(s)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
p = conf.raw_layer(s)
p.time = pkt_info.sec + (0.000000001 if self.nano else 0.000001) * pkt_info.usec
p.wirelen = pkt_info.wirelen
return p
def read_all(self,count=-1):
res = RawPcapReader.read_all(self, count)
from scapy import plist
return plist.PacketList(res,name = os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet(size=size)
RawPcapNgReader_PacketMetadata = collections.namedtuple("RawPcapNgReader_PacketMetadata",
["linktype", "tsresol",
"tshigh", "tslow", "wirelen"])
class RawPcapNgReader(RawPcapReader):
"""A stateful pcapng reader. Each packet is returned as a
string.
"""
alternative = RawPcapReader
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
# A list of (linktype, snaplen, tsresol); will be populated by IDBs.
self.interfaces = []
self.blocktypes = {
1: self.read_block_idb,
2: self.read_block_pkt,
3: self.read_block_spb,
6: self.read_block_epb,
}
if magic != b"\x0a\x0d\x0d\x0a": # PcapNg:
raise Scapy_Exception(
"Not a pcapng capture file (bad magic: %r)" % magic
)
# see https://github.com/pcapng/pcapng
blocklen, magic = self.f.read(4), self.f.read(4)
if magic == b"\x1a\x2b\x3c\x4d":
self.endian = ">"
elif magic == b"\x4d\x3c\x2b\x1a":
self.endian = "<"
else:
raise Scapy_Exception("Not a pcapng capture file (bad magic)")
try:
self.f.seek(0)
except:
pass
def read_packet(self, size=MTU):
"""Read blocks until it reaches either EOF or a packet, and
returns None or (packet, (linktype, sec, usec, wirelen)),
where packet is a string.
"""
while True:
try:
blocktype, blocklen = struct.unpack(self.endian + "2I",
self.f.read(8))
except struct.error:
return None
block = self.f.read(blocklen - 12)
if blocklen % 4:
pad = self.f.read(4 - (blocklen % 4))
warning("PcapNg: bad blocklen %d (MUST be a multiple of 4. "
"Ignored padding %r" % (blocklen, pad))
try:
if (blocklen,) != struct.unpack(self.endian + 'I',
self.f.read(4)):
warning("PcapNg: Invalid pcapng block (bad blocklen)")
except struct.error:
return None
res = self.blocktypes.get(blocktype,
lambda block, size: None)(block, size)
if res is not None:
return res
def read_block_idb(self, block, _):
"""Interface Description Block"""
options = block[16:]
tsresol = 1000000
while len(options) >= 4:
code, length = struct.unpack(self.endian + "HH", options[:4])
# PCAP Next Generation (pcapng) Capture File Format
# 4.2. - Interface Description Block
# http://xml2rfc.tools.ietf.org/cgi-bin/xml2rfc.cgi?url=https://raw.githubusercontent.com/pcapng/pcapng/master/draft-tuexen-opsawg-pcapng.xml&modeAsFormat=html/ascii&type=ascii#rfc.section.4.2
if code == 9 and length == 1 and len(options) >= 5:
tsresol = orb(options[4])
tsresol = (2 if tsresol & 128 else 10) ** (tsresol & 127)
if code == 0:
if length != 0:
warning("PcapNg: invalid option length %d for end-of-option" % length)
break
if length % 4:
length += (4 - (length % 4))
options = options[4 + length:]
self.interfaces.append(struct.unpack(self.endian + "HxxI", block[:8])
+ (tsresol,))
def read_block_epb(self, block, size):
"""Enhanced Packet Block"""
intid, tshigh, tslow, caplen, wirelen = struct.unpack(
self.endian + "5I",
block[:20],
)
return (block[20:20 + caplen][:size],
RawPcapNgReader_PacketMetadata(linktype=self.interfaces[intid][0],
tsresol=self.interfaces[intid][2],
tshigh=tshigh,
tslow=tslow,
wirelen=wirelen))
def read_block_spb(self, block, size):
"""Simple Packet Block"""
# "it MUST be assumed that all the Simple Packet Blocks have
# been captured on the interface previously specified in the
# first Interface Description Block."
intid = 0
wirelen, = struct.unpack(self.endian + "I", block[:4])
caplen = min(wirelen, self.interfaces[intid][1])
return (block[4:4 + caplen][:size],
RawPcapNgReader_PacketMetadata(linktype=self.interfaces[intid][0],
tsresol=self.interfaces[intid][2],
tshigh=None,
tslow=None,
wirelen=wirelen))
def read_block_pkt(self, block, size):
"""(Obsolete) Packet Block"""
intid, drops, tshigh, tslow, caplen, wirelen = struct.unpack(
self.endian + "HH4I",
block[:20],
)
return (block[20:20 + caplen][:size],
RawPcapNgReader_PacketMetadata(linktype=self.interfaces[intid][0],
tsresol=self.interfaces[intid][2],
tshigh=tshigh,
tslow=tslow,
wirelen=wirelen))
class PcapNgReader(RawPcapNgReader):
alternative = PcapReader
def __init__(self, filename, fdesc, magic):
RawPcapNgReader.__init__(self, filename, fdesc, magic)
def read_packet(self, size=MTU):
rp = super(PcapNgReader, self).read_packet(size=size)
if rp is None:
return None
s, (linktype, tsresol, tshigh, tslow, wirelen) = rp
try:
p = conf.l2types[linktype](s)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
p = conf.raw_layer(s)
if tshigh is not None:
p.time = float((tshigh << 32) + tslow) / tsresol
p.wirelen = wirelen
return p
def read_all(self,count=-1):
res = RawPcapNgReader.read_all(self, count)
from scapy import plist
return plist.PacketList(res, name=os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet()
class RawPcapWriter:
"""A stream PCAP writer with more control than wrpcap()"""
def __init__(self, filename, linktype=None, gz=False, endianness="",
append=False, sync=False, nano=False):
"""
filename: the name of the file to write packets to, or an open,
writable file-like object.
linktype: force linktype to a given value. If None, linktype is taken
from the first writer packet
gz: compress the capture on the fly
endianness: force an endianness (little:"<", big:">"). Default is native
append: append packets to the capture file instead of truncating it
sync: do not bufferize writes to the capture file
nano: use nanosecond-precision (requires libpcap >= 1.5.0)
"""
self.linktype = linktype
self.header_present = 0
self.append = append
self.gz = gz
self.endian = endianness
self.sync = sync
self.nano = nano
bufsz=4096
if sync:
bufsz = 0
if isinstance(filename, six.string_types):
self.filename = filename
self.f = [open,gzip.open][gz](filename,append and "ab" or "wb", gz and 9 or bufsz)
else:
self.f = filename
self.filename = (filename.name
if hasattr(filename, "name") else
"No name")
def fileno(self):
return self.f.fileno()
def _write_header(self, pkt):
self.header_present=1
if self.append:
# Even if prone to race conditions, this seems to be
# safest way to tell whether the header is already present
# because we have to handle compressed streams that
# are not as flexible as basic files
g = [open,gzip.open][self.gz](self.filename,"rb")
if g.read(16):
return
self.f.write(struct.pack(self.endian+"IHHIIII", 0xa1b23c4d if self.nano else 0xa1b2c3d4,
2, 4, 0, 0, MTU, self.linktype))
self.f.flush()
def write(self, pkt):
"""accepts either a single packet or a list of packets to be
written to the dumpfile
"""
if isinstance(pkt, bytes):
if not self.header_present:
self._write_header(pkt)
self._write_packet(pkt)
else:
pkt = pkt.__iter__()
if not self.header_present:
try:
p = next(pkt)
except StopIteration:
return
self._write_header(p)
self._write_packet(p)
for p in pkt:
self._write_packet(p)
def _write_packet(self, packet, sec=None, usec=None, caplen=None, wirelen=None):
"""writes a single packet to the pcap file
"""
if isinstance(packet, tuple):
for pkt in packet:
self._write_packet(pkt, sec=sec, usec=usec, caplen=caplen,
wirelen=wirelen)
return
if caplen is None:
caplen = len(packet)
if wirelen is None:
wirelen = caplen
if sec is None or usec is None:
t=time.time()
it = int(t)
if sec is None:
sec = it
if usec is None:
usec = int(round((t - it) * (1000000000 if self.nano else 1000000)))
self.f.write(struct.pack(self.endian+"IIII", sec, usec, caplen, wirelen))
self.f.write(packet)
if self.sync:
self.f.flush()
def flush(self):
return self.f.flush()
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.flush()
self.close()
class PcapWriter(RawPcapWriter):
"""A stream PCAP writer with more control than wrpcap()"""
def _write_header(self, pkt):
if isinstance(pkt, tuple) and pkt:
pkt = pkt[0]
if self.linktype == None:
try:
self.linktype = conf.l2types[pkt.__class__]
except KeyError:
warning("PcapWriter: unknown LL type for %s. Using type 1 (Ethernet)", pkt.__class__.__name__)
self.linktype = 1
RawPcapWriter._write_header(self, pkt)
def _write_packet(self, packet):
if isinstance(packet, tuple):
for pkt in packet:
self._write_packet(pkt)
return
sec = int(packet.time)
usec = int(round((packet.time - sec) * (1000000000 if self.nano else 1000000)))
rawpkt = raw(packet)
caplen = len(rawpkt)
RawPcapWriter._write_packet(self, rawpkt, sec=sec, usec=usec, caplen=caplen,
wirelen=packet.wirelen or caplen)
re_extract_hexcap = re.compile("^((0x)?[0-9a-fA-F]{2,}[ :\t]{,3}|) *(([0-9a-fA-F]{2} {,2}){,16})")
@conf.commands.register
def import_hexcap():
p = ""
try:
while True:
l = input().strip()
try:
p += re_extract_hexcap.match(l).groups()[2]
except:
warning("Parsing error during hexcap")
continue
except EOFError:
pass
p = p.replace(" ","")
return p.decode("hex")
@conf.commands.register
def wireshark(pktlist):
"""Run wireshark on a list of packets"""
f = get_temp_file()
wrpcap(f, pktlist)
with ContextManagerSubprocess("wireshark()", conf.prog.wireshark):
subprocess.Popen([conf.prog.wireshark, "-r", f])
@conf.commands.register
def tcpdump(pktlist, dump=False, getfd=False, args=None,
prog=None, getproc=False, quiet=False):
"""Run tcpdump or tshark on a list of packets
pktlist: a Packet instance, a PacketList instance or a list of Packet
instances. Can also be a filename (as a string) or an open
file-like object that must be a file format readable by
tshark (Pcap, PcapNg, etc.)
dump: when set to True, returns a string instead of displaying it.
getfd: when set to True, returns a file-like object to read data
from tcpdump or tshark from.
getproc: when set to True, the subprocess.Popen object is returned
args: arguments (as a list) to pass to tshark (example for tshark:
args=["-T", "json"]). Defaults to ["-n"].
prog: program to use (defaults to tcpdump, will work with tshark)
quiet: when set to True, the process stderr is discarded
Examples:
>>> tcpdump([IP()/TCP(), IP()/UDP()])
reading from file -, link-type RAW (Raw IP)
16:46:00.474515 IP 127.0.0.1.20 > 127.0.0.1.80: Flags [S], seq 0, win 8192, length 0
16:46:00.475019 IP 127.0.0.1.53 > 127.0.0.1.53: [|domain]
>>> tcpdump([IP()/TCP(), IP()/UDP()], prog=conf.prog.tshark)
1 0.000000 127.0.0.1 -> 127.0.0.1 TCP 40 20->80 [SYN] Seq=0 Win=8192 Len=0
2 0.000459 127.0.0.1 -> 127.0.0.1 UDP 28 53->53 Len=0
To get a JSON representation of a tshark-parsed PacketList(), one can:
>>> import json, pprint
>>> json_data = json.load(tcpdump(IP(src="217.25.178.5", dst="45.33.32.156"),
... prog=conf.prog.tshark, args=["-T", "json"],
... getfd=True))
>>> pprint.pprint(json_data)
[{u'_index': u'packets-2016-12-23',
u'_score': None,
u'_source': {u'layers': {u'frame': {u'frame.cap_len': u'20',
u'frame.encap_type': u'7',
[...]
u'frame.time_relative': u'0.000000000'},
u'ip': {u'ip.addr': u'45.33.32.156',
u'ip.checksum': u'0x0000a20d',
[...]
u'ip.ttl': u'64',
u'ip.version': u'4'},
u'raw': u'Raw packet data'}},
u'_type': u'pcap_file'}]
>>> json_data[0]['_source']['layers']['ip']['ip.ttl']
u'64'
"""
getfd = getfd or getproc
if prog is None:
prog = [conf.prog.tcpdump]
elif isinstance(prog, six.string_types):
prog = [prog]
_prog_name = "windump()" if WINDOWS else "tcpdump()"
if pktlist is None:
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + (args if args is not None else []),
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
elif isinstance(pktlist, six.string_types):
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + ["-r", pktlist] + (args if args is not None else []),
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
elif DARWIN:
# Tcpdump cannot read from stdin, see
# <http://apple.stackexchange.com/questions/152682/>
tmpfile = tempfile.NamedTemporaryFile(delete=False)
try:
tmpfile.writelines(iter(lambda: pktlist.read(1048576), b""))
except AttributeError:
wrpcap(tmpfile, pktlist)
else:
tmpfile.close()
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + ["-r", tmpfile.name] + (args if args is not None else []),
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
conf.temp_files.append(tmpfile.name)
else:
with ContextManagerSubprocess(_prog_name, prog[0]):
proc = subprocess.Popen(
prog + ["-r", "-"] + (args if args is not None else []),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
try:
proc.stdin.writelines(iter(lambda: pktlist.read(1048576), b""))
except AttributeError:
wrpcap(proc.stdin, pktlist)
else:
proc.stdin.close()
if dump:
return b"".join(iter(lambda: proc.stdout.read(1048576), b""))
if getproc:
return proc
if getfd:
return proc.stdout
proc.wait()
@conf.commands.register
def hexedit(x):
x = str(x)
f = get_temp_file()
open(f,"wb").write(x)
with ContextManagerSubprocess("hexedit()", conf.prog.hexedit):
subprocess.call([conf.prog.hexedit, f])
x = open(f).read()
os.unlink(f)
return x
def get_terminal_width():
"""Get terminal width if in a window"""
if WINDOWS:
from ctypes import windll, create_string_buffer
# http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
#sizey = bottom - top + 1
return sizex
else:
return None
else:
sizex = 0
try:
import struct, fcntl, termios
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
sizex = struct.unpack('HHHH', x)[1]
except IOError:
pass
if not sizex:
try:
sizex = int(os.environ['COLUMNS'])
except:
pass
if sizex:
return sizex
else:
return None
def pretty_list(rtlst, header, sortBy=0):
"""Pretty list to fit the terminal, and add header"""
_space = " "
# Windows has a fat terminal border
_spacelen = len(_space) * (len(header)-1) + (10 if WINDOWS else 0)
_croped = False
# Sort correctly
rtlst.sort(key=lambda x: x[sortBy])
# Append tag
rtlst = header + rtlst
# Detect column's width
colwidth = [max([len(y) for y in x]) for x in zip(*rtlst)]
# Make text fit in box (if required)
width = get_terminal_width()
if conf.auto_crop_tables and width:
width = width - _spacelen
while sum(colwidth) > width:
_croped = True
# Needs to be cropped
# Get the longest row
i = colwidth.index(max(colwidth))
# Get all elements of this row
row = [len(x[i]) for x in rtlst]
# Get biggest element of this row: biggest of the array
j = row.index(max(row))
# Re-build column tuple with the edited element
t = list(rtlst[j])
t[i] = t[i][:-2]+"_"
rtlst[j] = tuple(t)
# Update max size
row[j] = len(t[i])
colwidth[i] = max(row)
if _croped:
log_runtime.info("Table cropped to fit the terminal (conf.auto_crop_tables==True)")
# Generate padding scheme
fmt = _space.join(["%%-%ds" % x for x in colwidth])
# Compile
rt = "\n".join(((fmt % x).strip() for x in rtlst))
return rt
def __make_table(yfmtfunc, fmtfunc, endline, data, fxyz, sortx=None, sorty=None, seplinefunc=None):
vx = {}
vy = {}
vz = {}
vxf = {}
vyf = {}
l = 0
for e in data:
xx, yy, zz = [str(s) for s in fxyz(e)]
l = max(len(yy),l)
vx[xx] = max(vx.get(xx,0), len(xx), len(zz))
vy[yy] = None
vz[(xx,yy)] = zz
vxk = list(vx)
vyk = list(vy)
if sortx:
vxk.sort(key=sortx)
else:
try:
vxk.sort(key=int)
except:
try:
vxk.sort(key=atol)
except:
vxk.sort()
if sorty:
vyk.sort(key=sorty)
else:
try:
vyk.sort(key=int)
except:
try:
vyk.sort(key=atol)
except:
vyk.sort()
if seplinefunc:
sepline = seplinefunc(l, [vx[x] for x in vxk])
print(sepline)
fmt = yfmtfunc(l)
print(fmt % "", end=' ')
for x in vxk:
vxf[x] = fmtfunc(vx[x])
print(vxf[x] % x, end=' ')
print(endline)
if seplinefunc:
print(sepline)
for y in vyk:
print(fmt % y, end=' ')
for x in vxk:
print(vxf[x] % vz.get((x,y), "-"), end=' ')
print(endline)
if seplinefunc:
print(sepline)
def make_table(*args, **kargs):
__make_table(lambda l:"%%-%is" % l, lambda l:"%%-%is" % l, "", *args, **kargs)
def make_lined_table(*args, **kargs):
__make_table(lambda l:"%%-%is |" % l, lambda l:"%%-%is |" % l, "",
seplinefunc=lambda a,x:"+".join('-'*(y+2) for y in [a-1]+x+[-2]),
*args, **kargs)
def make_tex_table(*args, **kargs):
__make_table(lambda l: "%s", lambda l: "& %s", "\\\\", seplinefunc=lambda a,x:"\\hline", *args, **kargs)
####################
### WHOIS CLIENT ###
####################
def whois(ip_address):
"""Whois client for Python"""
whois_ip = str(ip_address)
try:
query = socket.gethostbyname(whois_ip)
except:
query = whois_ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("whois.ripe.net", 43))
s.send(query.encode("utf8") + b"\r\n")
answer = b""
while True:
d = s.recv(4096)
answer += d
if not d:
break
s.close()
ignore_tag = b"remarks:"
# ignore all lines starting with the ignore_tag
lines = [ line for line in answer.split(b"\n") if not line or (line and not line.startswith(ignore_tag))]
# remove empty lines at the bottom
for i in range(1, len(lines)):
if not lines[-i].strip():
del lines[-i]
else:
break
return b"\n".join(lines[3:])
| 1 | 12,513 | Docstring would be nice | secdev-scapy | py |
@@ -23,6 +23,7 @@ import org.gradle.api.provider.ListProperty;
public class BaselineErrorProneExtension {
private static final ImmutableList<String> DEFAULT_PATCH_CHECKS = ImmutableList.of(
// Baseline checks
+ "CatchBlockLogException",
"ExecutorSubmitRunnableFutureIgnored",
"LambdaMethodReference",
"OptionalOrElseMethodInvocation", | 1 | /*
* (c) Copyright 2019 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.extensions;
import com.google.common.collect.ImmutableList;
import org.gradle.api.Project;
import org.gradle.api.provider.ListProperty;
public class BaselineErrorProneExtension {
private static final ImmutableList<String> DEFAULT_PATCH_CHECKS = ImmutableList.of(
// Baseline checks
"ExecutorSubmitRunnableFutureIgnored",
"LambdaMethodReference",
"OptionalOrElseMethodInvocation",
"PreferBuiltInConcurrentKeySet",
"PreferCollectionTransform",
"PreferListsPartition",
"PreferSafeLoggableExceptions",
"PreferSafeLoggingPreconditions",
"ReadReturnValueIgnored",
"Slf4jLevelCheck",
"StrictUnusedVariable",
"StringBuilderConstantParameters",
"ThrowError",
// Built-in checks
"ArrayEquals",
"MissingOverride",
"UnnecessaryParentheses");
private final ListProperty<String> patchChecks;
public BaselineErrorProneExtension(Project project) {
patchChecks = project.getObjects().listProperty(String.class);
patchChecks.set(DEFAULT_PATCH_CHECKS);
}
public final ListProperty<String> getPatchChecks() {
return patchChecks;
}
}
| 1 | 7,814 | Low risk to fix this by default because existing baseline consumers pass this check. We don't attempt to fix checks that have been opted out of. | palantir-gradle-baseline | java |
@@ -77,7 +77,16 @@ void OpenMPTargetInternal::impl_finalize() {
Kokkos::kokkos_free<Kokkos::Experimental::OpenMPTargetSpace>(
space.m_uniquetoken_ptr);
}
-void OpenMPTargetInternal::impl_initialize() { m_is_initialized = true; }
+void OpenMPTargetInternal::impl_initialize() {
+ m_is_initialized = true;
+
+ // FIXME_OPENMPTARGET: Only fix the number of teams for NVIDIA architectures.
+#if defined(KOKKOS_ARCH_VOLTA70) || defined(KOKKOS_ARCH_PASCAL60)
+#if defined(KOKKOS_COMPILER_CLANG) && (KOKKOS_COMPILER_CLANG >= 1300)
+ omp_set_num_teams(512);
+#endif
+#endif
+}
int OpenMPTargetInternal::impl_is_initialized() {
return m_is_initialized ? 1 : 0;
} | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(_OPENMP)
// FIXME_OPENMPTARGET - macro for workaround implementation in UniqueToken
// constructor. undef'ed at the end
#define KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
#include <Kokkos_OpenMPTarget.hpp>
#include <OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp>
#include <OpenMPTarget/Kokkos_OpenMPTarget_Instance.hpp>
#include <sstream>
namespace Kokkos {
namespace Experimental {
namespace Impl {
void OpenMPTargetInternal::fence() {}
int OpenMPTargetInternal::concurrency() { return 128000; }
const char* OpenMPTargetInternal::name() { return "OpenMPTarget"; }
void OpenMPTargetInternal::print_configuration(std::ostream& /*stream*/,
const bool) {
// FIXME_OPENMPTARGET
printf("Using OpenMPTarget\n");
}
void OpenMPTargetInternal::impl_finalize() {
m_is_initialized = false;
Kokkos::Impl::OpenMPTargetExec space;
if (space.m_lock_array != nullptr) space.clear_lock_array();
if (space.m_uniquetoken_ptr != nullptr)
Kokkos::kokkos_free<Kokkos::Experimental::OpenMPTargetSpace>(
space.m_uniquetoken_ptr);
}
void OpenMPTargetInternal::impl_initialize() { m_is_initialized = true; }
int OpenMPTargetInternal::impl_is_initialized() {
return m_is_initialized ? 1 : 0;
}
OpenMPTargetInternal* OpenMPTargetInternal::impl_singleton() {
static OpenMPTargetInternal self;
return &self;
}
} // Namespace Impl
OpenMPTarget::OpenMPTarget()
: m_space_instance(Impl::OpenMPTargetInternal::impl_singleton()) {}
const char* OpenMPTarget::name() {
return Impl::OpenMPTargetInternal::impl_singleton()->name();
}
void OpenMPTarget::print_configuration(std::ostream& stream,
const bool detail) {
m_space_instance->print_configuration(stream, detail);
}
int OpenMPTarget::concurrency() {
return Impl::OpenMPTargetInternal::impl_singleton()->concurrency();
}
void OpenMPTarget::fence() {
Impl::OpenMPTargetInternal::impl_singleton()->fence();
}
void OpenMPTarget::impl_initialize() { m_space_instance->impl_initialize(); }
void OpenMPTarget::impl_finalize() { m_space_instance->impl_finalize(); }
int OpenMPTarget::impl_is_initialized() {
return Impl::OpenMPTargetInternal::impl_singleton()->impl_is_initialized();
}
} // Namespace Experimental
namespace Impl {
int g_openmptarget_space_factory_initialized =
Kokkos::Impl::initialize_space_factory<OpenMPTargetSpaceInitializer>(
"160_OpenMPTarget");
void OpenMPTargetSpaceInitializer::initialize(const InitArguments& args) {
// Prevent "unused variable" warning for 'args' input struct. If
// Serial::initialize() ever needs to take arguments from the input
// struct, you may remove this line of code.
(void)args;
if (std::is_same<Kokkos::Experimental::OpenMPTarget,
Kokkos::DefaultExecutionSpace>::value) {
Kokkos::Experimental::OpenMPTarget().impl_initialize();
// std::cout << "Kokkos::initialize() fyi: OpenMP enabled and initialized"
// << std::endl ;
} else {
// std::cout << "Kokkos::initialize() fyi: OpenMP enabled but not
// initialized" << std::endl ;
}
}
void OpenMPTargetSpaceInitializer::finalize(const bool all_spaces) {
if (std::is_same<Kokkos::Experimental::OpenMPTarget,
Kokkos::DefaultExecutionSpace>::value ||
all_spaces) {
if (Kokkos::Experimental::OpenMPTarget().impl_is_initialized())
Kokkos::Experimental::OpenMPTarget().impl_finalize();
}
}
void OpenMPTargetSpaceInitializer::fence() {
Kokkos::Experimental::OpenMPTarget::fence();
}
void OpenMPTargetSpaceInitializer::print_configuration(std::ostream& msg,
const bool detail) {
msg << "OpenMPTarget Execution Space:" << std::endl;
msg << " KOKKOS_ENABLE_OPENMPTARGET: ";
msg << "yes" << std::endl;
msg << "\nOpenMPTarget Runtime Configuration:" << std::endl;
Kokkos::Experimental::OpenMPTarget().print_configuration(msg, detail);
}
} // namespace Impl
} // Namespace Kokkos
namespace Kokkos {
namespace Experimental {
UniqueToken<Kokkos::Experimental::OpenMPTarget,
Kokkos::Experimental::UniqueTokenScope::Global>::
UniqueToken(Kokkos::Experimental::OpenMPTarget const&) {
#ifdef KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
uint32_t* ptr = Kokkos::Impl::OpenMPTargetExec::m_uniquetoken_ptr;
int count = Kokkos::Experimental::OpenMPTarget().concurrency();
if (ptr == nullptr) {
int size = count * sizeof(uint32_t);
ptr = static_cast<uint32_t*>(
Kokkos::kokkos_malloc<Kokkos::Experimental::OpenMPTargetSpace>(
"Kokkos::OpenMPTarget::m_uniquetoken_ptr", size));
std::vector<uint32_t> h_buf(count, 0);
OMPT_SAFE_CALL(omp_target_memcpy(ptr, h_buf.data(), size, 0, 0,
omp_get_default_device(),
omp_get_initial_device()));
Kokkos::Impl::OpenMPTargetExec::m_uniquetoken_ptr = ptr;
}
#else
// FIXME_OPENMPTARGET - 2 versions of non-working implementations to fill `ptr`
// with 0's
// Version 1 - Creating a target region and filling the
// pointer Error - CUDA error: named symbol not found
#pragma omp target teams distribute parallel for is_device_ptr(ptr) \
map(to \
: size)
for (int i = 0; i < count; ++i) ptr[i] = 0;
// Version 2 : Allocating a view on the device and filling it with a scalar
// value of 0.
Kokkos::View<uint32_t*, Kokkos::Experimental::OpenMPTargetSpace> ptr_view(
ptr, count);
Kokkos::deep_copy(ptr_view, 0);
#endif
m_buffer = ptr;
m_count = count;
}
} // namespace Experimental
} // namespace Kokkos
#undef KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
#endif // defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(_OPENMP)
| 1 | 29,167 | What about `VOLTA72`, `TURING75`, `AMPERE80` and `AMPERE86`? We only want to set the number of teams for these two architectures or for all the architectures newer than Maxwell? | kokkos-kokkos | cpp |
@@ -1,7 +1,7 @@
# model settings
model = dict(
type='FasterRCNN',
- pretrained='modelzoo://resnet50',
+ pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50, | 1 | # model settings
model = dict(
type='FasterRCNN',
pretrained='modelzoo://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=9,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_train.json',
img_prefix=data_root + 'train/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
img_prefix=data_root + 'val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
img_prefix=data_root + 'val/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[6])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 8 # actual epoch = 8 * 8 = 64
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 1 | 18,522 | Since we have specified `load_from`, `pretrained` can be left None. | open-mmlab-mmdetection | py |
@@ -71,6 +71,9 @@ func logParse(c *caddy.Controller) ([]*Rule, error) {
},
Format: DefaultLogFormat,
})
+ } else if len(args) > 3 {
+ // Maxiumum number of args in log directive is 3.
+ return nil, c.ArgErr()
} else {
// Path scope, output file, and maybe a format specified
| 1 | package log
import (
"strings"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyhttp/httpserver"
)
// setup sets up the logging middleware.
func setup(c *caddy.Controller) error {
rules, err := logParse(c)
if err != nil {
return err
}
for _, rule := range rules {
for _, entry := range rule.Entries {
entry.Log.Attach(c)
}
}
httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
return Logger{Next: next, Rules: rules, ErrorFunc: httpserver.DefaultErrorFunc}
})
return nil
}
func logParse(c *caddy.Controller) ([]*Rule, error) {
var rules []*Rule
for c.Next() {
args := c.RemainingArgs()
var logRoller *httpserver.LogRoller
logRoller = httpserver.DefaultLogRoller()
for c.NextBlock() {
what := c.Val()
if !c.NextArg() {
return nil, c.ArgErr()
}
where := c.Val()
// only support roller related options inside a block
if !httpserver.IsLogRollerSubdirective(what) {
return nil, c.ArgErr()
}
if err := httpserver.ParseRoller(logRoller, what, where); err != nil {
return nil, err
}
}
if len(args) == 0 {
// Nothing specified; use defaults
rules = appendEntry(rules, "/", &Entry{
Log: &httpserver.Logger{
Output: DefaultLogFilename,
Roller: logRoller,
},
Format: DefaultLogFormat,
})
} else if len(args) == 1 {
// Only an output file specified
rules = appendEntry(rules, "/", &Entry{
Log: &httpserver.Logger{
Output: args[0],
Roller: logRoller,
},
Format: DefaultLogFormat,
})
} else {
// Path scope, output file, and maybe a format specified
format := DefaultLogFormat
if len(args) > 2 {
format = strings.Replace(args[2], "{common}", CommonLogFormat, -1)
format = strings.Replace(format, "{combined}", CombinedLogFormat, -1)
}
rules = appendEntry(rules, args[0], &Entry{
Log: &httpserver.Logger{
Output: args[1],
Roller: logRoller,
},
Format: format,
})
}
}
return rules, nil
}
func appendEntry(rules []*Rule, pathScope string, entry *Entry) []*Rule {
for _, rule := range rules {
if rule.PathScope == pathScope {
rule.Entries = append(rule.Entries, entry)
return rules
}
}
rules = append(rules, &Rule{
PathScope: pathScope,
Entries: []*Entry{entry},
})
return rules
}
| 1 | 10,848 | I prefer to translate these `else if` into `switch` for more readability. | caddyserver-caddy | go |
@@ -3,12 +3,13 @@ class Api::V1::StatusesController < ApiController
def create
exercise.statuses.create!(user: resource_owner, state: params[:state])
+ exercise.update_trails_state_for(resource_owner)
render nothing: true
end
private
def exercise
- Exercise.find_by!(uuid: params[:exercise_uuid])
+ @exercise ||= Exercise.find_by!(uuid: params[:exercise_uuid])
end
end | 1 | class Api::V1::StatusesController < ApiController
doorkeeper_for :all
def create
exercise.statuses.create!(user: resource_owner, state: params[:state])
render nothing: true
end
private
def exercise
Exercise.find_by!(uuid: params[:exercise_uuid])
end
end
| 1 | 12,496 | This can also be a local variable in `create`. | thoughtbot-upcase | rb |
@@ -1273,6 +1273,19 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e
}
}
+ // Create/Remove git-daemon-export-ok for git-daemon...
+ daemonExportFile := strings.Join([]string{repo.RepoPath(), `git-daemon-export-ok`}, "/")
+ if repo.IsPrivate {
+ // NOTE: Gogs doesn't actually care about this file so we don't do any error-checking :D
+ os.Remove(daemonExportFile)
+ } else {
+ // NOTE: Gogs only cares to check errors so we don't get other errors by closing a file that isn't open...
+ f, err := os.Create(daemonExportFile)
+ if err == nil {
+ f.Close()
+ }
+ }
+
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err) | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"bytes"
"errors"
"fmt"
"html/template"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/Unknwon/cae/zip"
"github.com/Unknwon/com"
"github.com/go-xorm/xorm"
"github.com/mcuadros/go-version"
"gopkg.in/ini.v1"
git "github.com/gogits/git-module"
api "github.com/gogits/go-gogs-client"
"github.com/gogits/gogs/modules/bindata"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/markdown"
"github.com/gogits/gogs/modules/process"
"github.com/gogits/gogs/modules/setting"
)
const (
_TPL_UPDATE_HOOK = "#!/usr/bin/env %s\n%s update $1 $2 $3 --config='%s'\n"
)
var (
ErrRepoFileNotExist = errors.New("Repository file does not exist")
ErrRepoFileNotLoaded = errors.New("Repository file not loaded")
ErrMirrorNotExist = errors.New("Mirror does not exist")
ErrInvalidReference = errors.New("Invalid reference specified")
ErrNameEmpty = errors.New("Name is empty")
)
var (
Gitignores, Licenses, Readmes []string
// Maximum items per page in forks, watchers and stars of a repo
ItemsPerPage = 40
)
func LoadRepoConfig() {
// Load .gitignore and license files and readme templates.
types := []string{"gitignore", "license", "readme"}
typeFiles := make([][]string, 3)
for i, t := range types {
files, err := bindata.AssetDir("conf/" + t)
if err != nil {
log.Fatal(4, "Fail to get %s files: %v", t, err)
}
customPath := path.Join(setting.CustomPath, "conf", t)
if com.IsDir(customPath) {
customFiles, err := com.StatDir(customPath)
if err != nil {
log.Fatal(4, "Fail to get custom %s files: %v", t, err)
}
for _, f := range customFiles {
if !com.IsSliceContainsStr(files, f) {
files = append(files, f)
}
}
}
typeFiles[i] = files
}
Gitignores = typeFiles[0]
Licenses = typeFiles[1]
Readmes = typeFiles[2]
sort.Strings(Gitignores)
sort.Strings(Licenses)
sort.Strings(Readmes)
}
func NewRepoContext() {
zip.Verbose = false
// Check Git installation.
if _, err := exec.LookPath("git"); err != nil {
log.Fatal(4, "Fail to test 'git' command: %v (forgotten install?)", err)
}
// Check Git version.
gitVer, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Fail to get Git version: %v", err)
}
log.Info("Git Version: %s", gitVer)
if version.Compare("1.7.1", gitVer, ">") {
log.Fatal(4, "Gogs requires Git version greater or equal to 1.7.1")
}
// Git requires setting user.name and user.email in order to commit changes.
for configKey, defaultValue := range map[string]string{"user.name": "Gogs", "user.email": "gogs@fake.local"} {
if stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" {
// ExitError indicates this config is not set
if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" {
if _, stderr, gerr := process.Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil {
log.Fatal(4, "Fail to set git %s(%s): %s", configKey, gerr, stderr)
}
log.Info("Git config %s set to %s", configKey, defaultValue)
} else {
log.Fatal(4, "Fail to get git %s(%s): %s", configKey, err, stderr)
}
}
}
// Set git some configurations.
if _, stderr, err := process.Exec("NewRepoContext(git config --global core.quotepath false)",
"git", "config", "--global", "core.quotepath", "false"); err != nil {
log.Fatal(4, "Fail to execute 'git config --global core.quotepath false': %s", stderr)
}
RemoveAllWithNotice("Clean up repository temporary data", filepath.Join(setting.AppDataPath, "tmp"))
}
// Repository represents a git repository.
type Repository struct {
ID int64 `xorm:"pk autoincr"`
OwnerID int64 `xorm:"UNIQUE(s)"`
Owner *User `xorm:"-"`
LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"INDEX NOT NULL"`
Description string
Website string
DefaultBranch string
NumWatches int
NumStars int
NumForks int
NumIssues int
NumClosedIssues int
NumOpenIssues int `xorm:"-"`
NumPulls int
NumClosedPulls int
NumOpenPulls int `xorm:"-"`
NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumOpenMilestones int `xorm:"-"`
NumTags int `xorm:"-"`
IsPrivate bool
IsBare bool
IsMirror bool
*Mirror `xorm:"-"`
// Advanced settings
EnableWiki bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalWiki bool
ExternalWikiURL string
EnableIssues bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalTracker bool
ExternalTrackerFormat string
ExternalTrackerStyle string
ExternalMetas map[string]string `xorm:"-"`
EnablePulls bool `xorm:"NOT NULL DEFAULT true"`
IsFork bool `xorm:"NOT NULL DEFAULT false"`
ForkID int64
BaseRepo *Repository `xorm:"-"`
Created time.Time `xorm:"-"`
CreatedUnix int64
Updated time.Time `xorm:"-"`
UpdatedUnix int64
}
func (repo *Repository) BeforeInsert() {
repo.CreatedUnix = time.Now().UTC().Unix()
repo.UpdatedUnix = repo.CreatedUnix
}
func (repo *Repository) BeforeUpdate() {
repo.UpdatedUnix = time.Now().UTC().Unix()
}
func (repo *Repository) AfterSet(colName string, _ xorm.Cell) {
switch colName {
case "default_branch":
// FIXME: use models migration to solve all at once.
if len(repo.DefaultBranch) == 0 {
repo.DefaultBranch = "master"
}
case "num_closed_issues":
repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
case "num_closed_pulls":
repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
case "num_closed_milestones":
repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
case "external_tracker_style":
if len(repo.ExternalTrackerStyle) == 0 {
repo.ExternalTrackerStyle = markdown.ISSUE_NAME_STYLE_NUMERIC
}
case "created_unix":
repo.Created = time.Unix(repo.CreatedUnix, 0).Local()
case "updated_unix":
repo.Updated = time.Unix(repo.UpdatedUnix, 0)
}
}
func (repo *Repository) getOwner(e Engine) (err error) {
if repo.Owner != nil {
return nil
}
repo.Owner, err = getUserByID(e, repo.OwnerID)
return err
}
func (repo *Repository) GetOwner() error {
return repo.getOwner(x)
}
func (repo *Repository) mustOwner(e Engine) *User {
if err := repo.getOwner(e); err != nil {
return &User{
Name: "error",
FullName: err.Error(),
}
}
return repo.Owner
}
// MustOwner always returns a valid *User object to avoid
// conceptually impossible error handling.
// It creates a fake object that contains error deftail
// when error occurs.
func (repo *Repository) MustOwner() *User {
return repo.mustOwner(x)
}
// ComposeMetas composes a map of metas for rendering external issue tracker URL.
func (repo *Repository) ComposeMetas() map[string]string {
if !repo.EnableExternalTracker {
return nil
} else if repo.ExternalMetas == nil {
repo.ExternalMetas = map[string]string{
"format": repo.ExternalTrackerFormat,
"user": repo.MustOwner().Name,
"repo": repo.Name,
}
switch repo.ExternalTrackerStyle {
case markdown.ISSUE_NAME_STYLE_ALPHANUMERIC:
repo.ExternalMetas["style"] = markdown.ISSUE_NAME_STYLE_ALPHANUMERIC
default:
repo.ExternalMetas["style"] = markdown.ISSUE_NAME_STYLE_NUMERIC
}
}
return repo.ExternalMetas
}
// DeleteWiki removes the actual and local copy of repository wiki.
func (repo *Repository) DeleteWiki() {
wikiPaths := []string{repo.WikiPath(), repo.LocalWikiPath()}
for _, wikiPath := range wikiPaths {
RemoveAllWithNotice("Delete repository wiki", wikiPath)
}
}
// GetAssignees returns all users that have write access of repository.
func (repo *Repository) GetAssignees() (_ []*User, err error) {
if err = repo.GetOwner(); err != nil {
return nil, err
}
accesses := make([]*Access, 0, 10)
if err = x.Where("repo_id=? AND mode>=?", repo.ID, ACCESS_MODE_WRITE).Find(&accesses); err != nil {
return nil, err
}
users := make([]*User, 0, len(accesses)+1) // Just waste 1 unit does not matter.
if !repo.Owner.IsOrganization() {
users = append(users, repo.Owner)
}
var u *User
for i := range accesses {
u, err = GetUserByID(accesses[i].UserID)
if err != nil {
return nil, err
}
users = append(users, u)
}
return users, nil
}
// GetAssigneeByID returns the user that has write access of repository by given ID.
func (repo *Repository) GetAssigneeByID(userID int64) (*User, error) {
return GetAssigneeByID(repo, userID)
}
// GetMilestoneByID returns the milestone belongs to repository by given ID.
func (repo *Repository) GetMilestoneByID(milestoneID int64) (*Milestone, error) {
return GetRepoMilestoneByID(repo.ID, milestoneID)
}
// IssueStats returns number of open and closed repository issues by given filter mode.
func (repo *Repository) IssueStats(uid int64, filterMode int, isPull bool) (int64, int64) {
return GetRepoIssueStats(repo.ID, uid, filterMode, isPull)
}
func (repo *Repository) GetMirror() (err error) {
repo.Mirror, err = GetMirror(repo.ID)
return err
}
func (repo *Repository) GetBaseRepo() (err error) {
if !repo.IsFork {
return nil
}
repo.BaseRepo, err = GetRepositoryByID(repo.ForkID)
return err
}
func (repo *Repository) repoPath(e Engine) string {
return RepoPath(repo.mustOwner(e).Name, repo.Name)
}
func (repo *Repository) RepoPath() string {
return repo.repoPath(x)
}
func (repo *Repository) GitConfigPath() string {
return filepath.Join(repo.RepoPath(), "config")
}
func (repo *Repository) RepoLink() string {
return setting.AppSubUrl + "/" + repo.MustOwner().Name + "/" + repo.Name
}
func (repo *Repository) RepoRelLink() string {
return "/" + repo.MustOwner().Name + "/" + repo.Name
}
func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string {
return fmt.Sprintf("%s/%s/compare/%s...%s", repo.MustOwner().Name, repo.Name, oldCommitID, newCommitID)
}
func (repo *Repository) FullRepoLink() string {
return setting.AppUrl + repo.MustOwner().Name + "/" + repo.Name
}
func (repo *Repository) HasAccess(u *User) bool {
has, _ := HasAccess(u, repo, ACCESS_MODE_READ)
return has
}
func (repo *Repository) IsOwnedBy(userID int64) bool {
return repo.OwnerID == userID
}
// CanBeForked returns true if repository meets the requirements of being forked.
func (repo *Repository) CanBeForked() bool {
return !repo.IsBare
}
// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
func (repo *Repository) CanEnablePulls() bool {
return !repo.IsMirror
}
// AllowPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
func (repo *Repository) AllowsPulls() bool {
return repo.CanEnablePulls() && repo.EnablePulls
}
func (repo *Repository) NextIssueIndex() int64 {
return int64(repo.NumIssues+repo.NumPulls) + 1
}
var (
DescPattern = regexp.MustCompile(`https?://\S+`)
)
// DescriptionHtml does special handles to description and return HTML string.
func (repo *Repository) DescriptionHtml() template.HTML {
sanitize := func(s string) string {
return fmt.Sprintf(`<a href="%[1]s" target="_blank">%[1]s</a>`, s)
}
return template.HTML(DescPattern.ReplaceAllStringFunc(markdown.Sanitizer.Sanitize(repo.Description), sanitize))
}
func (repo *Repository) LocalCopyPath() string {
return path.Join(setting.AppDataPath, "tmp/local", com.ToStr(repo.ID))
}
func updateLocalCopy(repoPath, localPath string) error {
if !com.IsExist(localPath) {
if err := git.Clone(repoPath, localPath, git.CloneRepoOptions{
Timeout: time.Duration(setting.Git.Timeout.Clone) * time.Second,
}); err != nil {
return fmt.Errorf("Clone: %v", err)
}
} else {
if err := git.Pull(localPath, git.PullRemoteOptions{
All: true,
Timeout: time.Duration(setting.Git.Timeout.Pull) * time.Second,
}); err != nil {
return fmt.Errorf("Pull: %v", err)
}
}
return nil
}
// UpdateLocalCopy makes sure the local copy of repository is up-to-date.
func (repo *Repository) UpdateLocalCopy() error {
return updateLocalCopy(repo.RepoPath(), repo.LocalCopyPath())
}
// PatchPath returns corresponding patch file path of repository by given issue ID.
func (repo *Repository) PatchPath(index int64) (string, error) {
if err := repo.GetOwner(); err != nil {
return "", err
}
return filepath.Join(RepoPath(repo.Owner.Name, repo.Name), "pulls", com.ToStr(index)+".patch"), nil
}
// SavePatch saves patch data to corresponding location by given issue ID.
func (repo *Repository) SavePatch(index int64, patch []byte) error {
patchPath, err := repo.PatchPath(index)
if err != nil {
return fmt.Errorf("PatchPath: %v", err)
}
os.MkdirAll(filepath.Dir(patchPath), os.ModePerm)
if err = ioutil.WriteFile(patchPath, patch, 0644); err != nil {
return fmt.Errorf("WriteFile: %v", err)
}
return nil
}
// ComposePayload composes and returns *api.PayloadRepo corresponding to the repository.
func (repo *Repository) ComposePayload() *api.PayloadRepo {
cl := repo.CloneLink()
return &api.PayloadRepo{
ID: repo.ID,
Name: repo.Name,
URL: repo.FullRepoLink(),
SSHURL: cl.SSH,
CloneURL: cl.HTTPS,
Description: repo.Description,
Website: repo.Website,
Watchers: repo.NumWatches,
Owner: &api.PayloadAuthor{
Name: repo.MustOwner().DisplayName(),
Email: repo.MustOwner().Email,
UserName: repo.MustOwner().Name,
},
Private: repo.IsPrivate,
DefaultBranch: repo.DefaultBranch,
}
}
func isRepositoryExist(e Engine, u *User, repoName string) (bool, error) {
has, err := e.Get(&Repository{
OwnerID: u.Id,
LowerName: strings.ToLower(repoName),
})
return has && com.IsDir(RepoPath(u.Name, repoName)), err
}
// IsRepositoryExist returns true if the repository with given name under user has already existed.
func IsRepositoryExist(u *User, repoName string) (bool, error) {
return isRepositoryExist(x, u, repoName)
}
// CloneLink represents different types of clone URLs of repository.
type CloneLink struct {
SSH string
HTTPS string
Git string
}
func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
repoName := repo.Name
if isWiki {
repoName += ".wiki"
}
repo.Owner = repo.MustOwner()
cl := new(CloneLink)
if setting.SSH.Port != 22 {
cl.SSH = fmt.Sprintf("ssh://%s@%s:%d/%s/%s.git", setting.RunUser, setting.SSH.Domain, setting.SSH.Port, repo.Owner.Name, repoName)
} else {
cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", setting.RunUser, setting.SSH.Domain, repo.Owner.Name, repoName)
}
cl.HTTPS = fmt.Sprintf("%s%s/%s.git", setting.AppUrl, repo.Owner.Name, repoName)
return cl
}
// CloneLink returns clone URLs of repository.
func (repo *Repository) CloneLink() (cl *CloneLink) {
return repo.cloneLink(false)
}
var (
reservedNames = []string{"debug", "raw", "install", "api", "avatar", "user", "org", "help", "stars", "issues", "pulls", "commits", "repo", "template", "admin", "new"}
reservedPatterns = []string{"*.git", "*.keys", "*.wiki"}
)
// IsUsableName checks if name is reserved or pattern of name is not allowed.
func IsUsableName(name string) error {
name = strings.TrimSpace(strings.ToLower(name))
if utf8.RuneCountInString(name) == 0 {
return ErrNameEmpty
}
for i := range reservedNames {
if name == reservedNames[i] {
return ErrNameReserved{name}
}
}
for _, pat := range reservedPatterns {
if pat[0] == '*' && strings.HasSuffix(name, pat[1:]) ||
(pat[len(pat)-1] == '*' && strings.HasPrefix(name, pat[:len(pat)-1])) {
return ErrNamePatternNotAllowed{pat}
}
}
return nil
}
// Mirror represents a mirror information of repository.
type Mirror struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64
Repo *Repository `xorm:"-"`
Interval int // Hour.
Updated time.Time `xorm:"-"`
UpdatedUnix int64
NextUpdate time.Time `xorm:"-"`
NextUpdateUnix int64
address string `xorm:"-"`
}
func (m *Mirror) BeforeInsert() {
m.NextUpdateUnix = m.NextUpdate.UTC().Unix()
}
func (m *Mirror) BeforeUpdate() {
m.UpdatedUnix = time.Now().UTC().Unix()
m.NextUpdateUnix = m.NextUpdate.UTC().Unix()
}
func (m *Mirror) AfterSet(colName string, _ xorm.Cell) {
var err error
switch colName {
case "repo_id":
m.Repo, err = GetRepositoryByID(m.RepoID)
if err != nil {
log.Error(3, "GetRepositoryByID[%d]: %v", m.ID, err)
}
case "updated_unix":
m.Updated = time.Unix(m.UpdatedUnix, 0).Local()
case "next_updated_unix":
m.NextUpdate = time.Unix(m.NextUpdateUnix, 0).Local()
}
}
func (m *Mirror) readAddress() {
if len(m.address) > 0 {
return
}
cfg, err := ini.Load(m.Repo.GitConfigPath())
if err != nil {
log.Error(4, "Load: %v", err)
return
}
m.address = cfg.Section("remote \"origin\"").Key("url").Value()
}
// HandleCloneUserCredentials replaces user credentials from HTTP/HTTPS URL
// with placeholder <credentials>.
// It will fail for any other forms of clone addresses.
func HandleCloneUserCredentials(url string, mosaics bool) string {
i := strings.Index(url, "@")
if i == -1 {
return url
}
start := strings.Index(url, "://")
if start == -1 {
return url
}
if mosaics {
return url[:start+3] + "<credentials>" + url[i:]
}
return url[:start+3] + url[i+1:]
}
// Address returns mirror address from Git repository config without credentials.
func (m *Mirror) Address() string {
m.readAddress()
return HandleCloneUserCredentials(m.address, false)
}
// FullAddress returns mirror address from Git repository config.
func (m *Mirror) FullAddress() string {
m.readAddress()
return m.address
}
// SaveAddress writes new address to Git repository config.
func (m *Mirror) SaveAddress(addr string) error {
configPath := m.Repo.GitConfigPath()
cfg, err := ini.Load(configPath)
if err != nil {
return fmt.Errorf("Load: %v", err)
}
cfg.Section("remote \"origin\"").Key("url").SetValue(addr)
return cfg.SaveToIndent(configPath, "\t")
}
func getMirror(e Engine, repoId int64) (*Mirror, error) {
m := &Mirror{RepoID: repoId}
has, err := e.Get(m)
if err != nil {
return nil, err
} else if !has {
return nil, ErrMirrorNotExist
}
return m, nil
}
// GetMirror returns mirror object by given repository ID.
func GetMirror(repoId int64) (*Mirror, error) {
return getMirror(x, repoId)
}
func updateMirror(e Engine, m *Mirror) error {
_, err := e.Id(m.ID).Update(m)
return err
}
func UpdateMirror(m *Mirror) error {
return updateMirror(x, m)
}
func DeleteMirrorByRepoID(repoID int64) error {
_, err := x.Delete(&Mirror{RepoID: repoID})
return err
}
func createUpdateHook(repoPath string) error {
return git.SetUpdateHook(repoPath,
fmt.Sprintf(_TPL_UPDATE_HOOK, setting.ScriptType, "\""+setting.AppPath+"\"", setting.CustomConf))
}
type MigrateRepoOptions struct {
Name string
Description string
IsPrivate bool
IsMirror bool
RemoteAddr string
}
// MigrateRepository migrates a existing repository from other project hosting.
func MigrateRepository(u *User, opts MigrateRepoOptions) (*Repository, error) {
repo, err := CreateRepository(u, CreateRepoOptions{
Name: opts.Name,
Description: opts.Description,
IsPrivate: opts.IsPrivate,
IsMirror: opts.IsMirror,
})
if err != nil {
return nil, err
}
// Clone to temprory path and do the init commit.
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().Nanosecond()))
os.MkdirAll(tmpDir, os.ModePerm)
repoPath := RepoPath(u.Name, opts.Name)
if u.IsOrganization() {
t, err := u.GetOwnerTeam()
if err != nil {
return nil, err
}
repo.NumWatches = t.NumMembers
} else {
repo.NumWatches = 1
}
os.RemoveAll(repoPath)
if err = git.Clone(opts.RemoteAddr, repoPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: time.Duration(setting.Git.Timeout.Migrate) * time.Second,
}); err != nil {
return repo, fmt.Errorf("Clone: %v", err)
}
// Check if repository is empty.
_, stderr, err := com.ExecCmdDir(repoPath, "git", "log", "-1")
if err != nil {
if strings.Contains(stderr, "fatal: bad default revision 'HEAD'") {
repo.IsBare = true
} else {
return repo, fmt.Errorf("check bare: %v - %s", err, stderr)
}
}
if !repo.IsBare {
// Try to get HEAD branch and set it as default branch.
gitRepo, err := git.OpenRepository(repoPath)
if err != nil {
return repo, fmt.Errorf("OpenRepository: %v", err)
}
headBranch, err := gitRepo.GetHEADBranch()
if err != nil {
return repo, fmt.Errorf("GetHEADBranch: %v", err)
}
if headBranch != nil {
repo.DefaultBranch = headBranch.Name
}
}
if opts.IsMirror {
if _, err = x.InsertOne(&Mirror{
RepoID: repo.ID,
Interval: 24,
NextUpdate: time.Now().Add(24 * time.Hour),
}); err != nil {
return repo, fmt.Errorf("InsertOne: %v", err)
}
repo.IsMirror = true
return repo, UpdateRepository(repo, false)
}
return CleanUpMigrateInfo(repo, repoPath)
}
// Finish migrating repository with things that don't need to be done for mirrors.
func CleanUpMigrateInfo(repo *Repository, repoPath string) (*Repository, error) {
if err := createUpdateHook(repoPath); err != nil {
return repo, fmt.Errorf("createUpdateHook: %v", err)
}
// Clean up mirror info which prevents "push --all".
// This also removes possible user credentials.
configPath := repo.GitConfigPath()
cfg, err := ini.Load(configPath)
if err != nil {
return repo, fmt.Errorf("open config file: %v", err)
}
cfg.DeleteSection("remote \"origin\"")
if err = cfg.SaveToIndent(configPath, "\t"); err != nil {
return repo, fmt.Errorf("save config file: %v", err)
}
return repo, UpdateRepository(repo, false)
}
// initRepoCommit temporarily changes with work directory.
func initRepoCommit(tmpPath string, sig *git.Signature) (err error) {
var stderr string
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git add): %s", tmpPath),
"git", "add", "--all"); err != nil {
return fmt.Errorf("git add: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git commit): %s", tmpPath),
"git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email),
"-m", "initial commit"); err != nil {
return fmt.Errorf("git commit: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git push): %s", tmpPath),
"git", "push", "origin", "master"); err != nil {
return fmt.Errorf("git push: %s", stderr)
}
return nil
}
type CreateRepoOptions struct {
Name string
Description string
Gitignores string
License string
Readme string
IsPrivate bool
IsMirror bool
AutoInit bool
}
func getRepoInitFile(tp, name string) ([]byte, error) {
relPath := path.Join("conf", tp, name)
// Use custom file when available.
customPath := path.Join(setting.CustomPath, relPath)
if com.IsFile(customPath) {
return ioutil.ReadFile(customPath)
}
return bindata.Asset(relPath)
}
func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error {
// Clone to temprory path and do the init commit.
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir)
if err != nil {
return fmt.Errorf("git clone: %v - %s", err, stderr)
}
// README
data, err := getRepoInitFile("readme", opts.Readme)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.Readme, err)
}
cloneLink := repo.CloneLink()
match := map[string]string{
"Name": repo.Name,
"Description": repo.Description,
"CloneURL.SSH": cloneLink.SSH,
"CloneURL.HTTPS": cloneLink.HTTPS,
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "README.md"),
[]byte(com.Expand(string(data), match)), 0644); err != nil {
return fmt.Errorf("write README.md: %v", err)
}
// .gitignore
if len(opts.Gitignores) > 0 {
var buf bytes.Buffer
names := strings.Split(opts.Gitignores, ",")
for _, name := range names {
data, err = getRepoInitFile("gitignore", name)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", name, err)
}
buf.WriteString("# ---> " + name + "\n")
buf.Write(data)
buf.WriteString("\n")
}
if buf.Len() > 0 {
if err = ioutil.WriteFile(filepath.Join(tmpDir, ".gitignore"), buf.Bytes(), 0644); err != nil {
return fmt.Errorf("write .gitignore: %v", err)
}
}
}
// LICENSE
if len(opts.License) > 0 {
data, err = getRepoInitFile("license", opts.License)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.License, err)
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "LICENSE"), data, 0644); err != nil {
return fmt.Errorf("write LICENSE: %v", err)
}
}
return nil
}
// InitRepository initializes README and .gitignore if needed.
func initRepository(e Engine, repoPath string, u *User, repo *Repository, opts CreateRepoOptions) (err error) {
// Somehow the directory could exist.
if com.IsExist(repoPath) {
return fmt.Errorf("initRepository: path already exists: %s", repoPath)
}
// Init bare new repository.
if err = git.InitRepository(repoPath, true); err != nil {
return fmt.Errorf("InitRepository: %v", err)
} else if err = createUpdateHook(repoPath); err != nil {
return fmt.Errorf("createUpdateHook: %v", err)
}
tmpDir := filepath.Join(os.TempDir(), "gogs-"+repo.Name+"-"+com.ToStr(time.Now().Nanosecond()))
// Initialize repository according to user's choice.
if opts.AutoInit {
os.MkdirAll(tmpDir, os.ModePerm)
defer os.RemoveAll(tmpDir)
if err = prepareRepoCommit(repo, tmpDir, repoPath, opts); err != nil {
return fmt.Errorf("prepareRepoCommit: %v", err)
}
// Apply changes and commit.
if err = initRepoCommit(tmpDir, u.NewGitSig()); err != nil {
return fmt.Errorf("initRepoCommit: %v", err)
}
}
// Re-fetch the repository from database before updating it (else it would
// override changes that were done earlier with sql)
if repo, err = getRepositoryByID(e, repo.ID); err != nil {
return fmt.Errorf("getRepositoryByID: %v", err)
}
if !opts.AutoInit {
repo.IsBare = true
}
repo.DefaultBranch = "master"
if err = updateRepository(e, repo, false); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return nil
}
func createRepository(e *xorm.Session, u *User, repo *Repository) (err error) {
if err = IsUsableName(repo.Name); err != nil {
return err
}
has, err := isRepositoryExist(e, u, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, repo.Name}
}
if _, err = e.Insert(repo); err != nil {
return err
}
u.NumRepos++
// Remember visibility preference.
u.LastRepoVisibility = repo.IsPrivate
if err = updateUser(e, u); err != nil {
return fmt.Errorf("updateUser: %v", err)
}
// Give access to all members in owner team.
if u.IsOrganization() {
t, err := u.getOwnerTeam(e)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(e, repo); err != nil {
return fmt.Errorf("addRepository: %v", err)
}
} else {
// Organization automatically called this in addRepository method.
if err = repo.recalculateAccesses(e); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
if err = watchRepo(e, u.Id, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = newRepoAction(e, u, repo); err != nil {
return fmt.Errorf("newRepoAction: %v", err)
}
return nil
}
// CreateRepository creates a repository for given user or organization.
func CreateRepository(u *User, opts CreateRepoOptions) (_ *Repository, err error) {
if !u.CanCreateRepo() {
return nil, ErrReachLimitOfRepo{u.MaxRepoCreation}
}
repo := &Repository{
OwnerID: u.Id,
Owner: u,
Name: opts.Name,
LowerName: strings.ToLower(opts.Name),
Description: opts.Description,
IsPrivate: opts.IsPrivate,
EnableWiki: true,
EnableIssues: true,
EnablePulls: true,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
// No need for init mirror.
if !opts.IsMirror {
repoPath := RepoPath(u.Name, repo.Name)
if err = initRepository(sess, repoPath, u, repo, opts); err != nil {
if err2 := os.RemoveAll(repoPath); err2 != nil {
log.Error(4, "initRepository: %v", err)
return nil, fmt.Errorf(
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2)
}
return nil, fmt.Errorf("initRepository: %v", err)
}
_, stderr, err := process.ExecDir(-1,
repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, errors.New("CreateRepository(git update-server-info): " + stderr)
}
}
return repo, sess.Commit()
}
func countRepositories(showPrivate bool) int64 {
sess := x.NewSession()
if !showPrivate {
sess.Where("is_private=?", false)
}
count, err := sess.Count(new(Repository))
if err != nil {
log.Error(4, "countRepositories: %v", err)
}
return count
}
// CountRepositories returns number of repositories.
func CountRepositories() int64 {
return countRepositories(true)
}
// CountPublicRepositories returns number of public repositories.
func CountPublicRepositories() int64 {
return countRepositories(false)
}
func Repositories(page, pageSize int) (_ []*Repository, err error) {
repos := make([]*Repository, 0, pageSize)
return repos, x.Limit(pageSize, (page-1)*pageSize).Asc("id").Find(&repos)
}
// RepositoriesWithUsers returns number of repos in given page.
func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) {
repos, err := Repositories(page, pageSize)
if err != nil {
return nil, fmt.Errorf("Repositories: %v", err)
}
for i := range repos {
if err = repos[i].GetOwner(); err != nil {
return nil, err
}
}
return repos, nil
}
// RepoPath returns repository path by given user and repository name.
func RepoPath(userName, repoName string) string {
return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
}
// TransferOwnership transfers all corresponding setting from old user to new one.
func TransferOwnership(u *User, newOwnerName string, repo *Repository) error {
newOwner, err := GetUserByName(newOwnerName)
if err != nil {
return fmt.Errorf("get new owner '%s': %v", newOwnerName, err)
}
// Check if new owner has repository with same name.
has, err := IsRepositoryExist(newOwner, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{newOwnerName, repo.Name}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return fmt.Errorf("sess.Begin: %v", err)
}
owner := repo.Owner
// Note: we have to set value here to make sure recalculate accesses is based on
// new owner.
repo.OwnerID = newOwner.Id
repo.Owner = newOwner
// Update repository.
if _, err := sess.Id(repo.ID).Update(repo); err != nil {
return fmt.Errorf("update owner: %v", err)
}
// Remove redundant collaborators.
collaborators, err := repo.getCollaborators(sess)
if err != nil {
return fmt.Errorf("getCollaborators: %v", err)
}
// Dummy object.
collaboration := &Collaboration{RepoID: repo.ID}
for _, c := range collaborators {
collaboration.UserID = c.Id
if c.Id == newOwner.Id || newOwner.IsOrgMember(c.Id) {
if _, err = sess.Delete(collaboration); err != nil {
return fmt.Errorf("remove collaborator '%d': %v", c.Id, err)
}
}
}
// Remove old team-repository relations.
if owner.IsOrganization() {
if err = owner.getTeams(sess); err != nil {
return fmt.Errorf("getTeams: %v", err)
}
for _, t := range owner.Teams {
if !t.hasRepository(sess, repo.ID) {
continue
}
t.NumRepos--
if _, err := sess.Id(t.ID).AllCols().Update(t); err != nil {
return fmt.Errorf("decrease team repository count '%d': %v", t.ID, err)
}
}
if err = owner.removeOrgRepo(sess, repo.ID); err != nil {
return fmt.Errorf("removeOrgRepo: %v", err)
}
}
if newOwner.IsOrganization() {
t, err := newOwner.getOwnerTeam(sess)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(sess, repo); err != nil {
return fmt.Errorf("add to owner team: %v", err)
}
} else {
// Organization called this in addRepository method.
if err = repo.recalculateAccesses(sess); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
// Update repository count.
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos+1 WHERE id=?", newOwner.Id); err != nil {
return fmt.Errorf("increase new owner repository count: %v", err)
} else if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", owner.Id); err != nil {
return fmt.Errorf("decrease old owner repository count: %v", err)
}
if err = watchRepo(sess, newOwner.Id, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = transferRepoAction(sess, u, owner, newOwner, repo); err != nil {
return fmt.Errorf("transferRepoAction: %v", err)
}
// Rename remote repository to new path and delete local copy.
if err = os.Rename(RepoPath(owner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
RemoveAllWithNotice("Delete repository local copy", repo.LocalCopyPath())
// Rename remote wiki repository to new path and delete local copy.
wikiPath := WikiPath(owner.Name, repo.Name)
if com.IsExist(wikiPath) {
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
if err = os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
}
return sess.Commit()
}
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
func ChangeRepositoryName(u *User, oldRepoName, newRepoName string) (err error) {
oldRepoName = strings.ToLower(oldRepoName)
newRepoName = strings.ToLower(newRepoName)
if err = IsUsableName(newRepoName); err != nil {
return err
}
has, err := IsRepositoryExist(u, newRepoName)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, newRepoName}
}
repo, err := GetRepositoryByName(u.Id, oldRepoName)
if err != nil {
return fmt.Errorf("GetRepositoryByName: %v", err)
}
// Change repository directory name.
if err = os.Rename(repo.RepoPath(), RepoPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
wikiPath := repo.WikiPath()
if com.IsExist(wikiPath) {
if err = os.Rename(wikiPath, WikiPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
}
return nil
}
func getRepositoriesByForkID(e Engine, forkID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, e.Where("fork_id=?", forkID).Find(&repos)
}
// GetRepositoriesByForkID returns all repositories with given fork ID.
func GetRepositoriesByForkID(forkID int64) ([]*Repository, error) {
return getRepositoriesByForkID(x, forkID)
}
func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) {
repo.LowerName = strings.ToLower(repo.Name)
if len(repo.Description) > 255 {
repo.Description = repo.Description[:255]
}
if len(repo.Website) > 255 {
repo.Website = repo.Website[:255]
}
if _, err = e.Id(repo.ID).AllCols().Update(repo); err != nil {
return fmt.Errorf("update: %v", err)
}
if visibilityChanged {
if err = repo.getOwner(e); err != nil {
return fmt.Errorf("getOwner: %v", err)
}
if repo.Owner.IsOrganization() {
// Organization repository need to recalculate access table when visivility is changed.
if err = repo.recalculateTeamAccesses(e, 0); err != nil {
return fmt.Errorf("recalculateTeamAccesses: %v", err)
}
}
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err)
}
for i := range forkRepos {
forkRepos[i].IsPrivate = repo.IsPrivate
if err = updateRepository(e, forkRepos[i], true); err != nil {
return fmt.Errorf("updateRepository[%d]: %v", forkRepos[i].ID, err)
}
}
}
return nil
}
func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) {
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if err = updateRepository(x, repo, visibilityChanged); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return sess.Commit()
}
// DeleteRepository deletes a repository for a user or organization.
func DeleteRepository(uid, repoID int64) error {
repo := &Repository{ID: repoID, OwnerID: uid}
has, err := x.Get(repo)
if err != nil {
return err
} else if !has {
return ErrRepoNotExist{repoID, uid, ""}
}
// In case is a organization.
org, err := GetUserByID(uid)
if err != nil {
return err
}
if org.IsOrganization() {
if err = org.GetTeams(); err != nil {
return err
}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if org.IsOrganization() {
for _, t := range org.Teams {
if !t.hasRepository(sess, repoID) {
continue
} else if err = t.removeRepository(sess, repo, false); err != nil {
return err
}
}
}
if err = deleteBeans(sess,
&Repository{ID: repoID},
&Access{RepoID: repo.ID},
&Action{RepoID: repo.ID},
&Watch{RepoID: repoID},
&Star{RepoID: repoID},
&Mirror{RepoID: repoID},
&IssueUser{RepoID: repoID},
&Milestone{RepoID: repoID},
&Release{RepoID: repoID},
&Collaboration{RepoID: repoID},
&PullRequest{BaseRepoID: repoID},
); err != nil {
return fmt.Errorf("deleteBeans: %v", err)
}
// Delete comments and attachments.
issues := make([]*Issue, 0, 25)
attachmentPaths := make([]string, 0, len(issues))
if err = sess.Where("repo_id=?", repoID).Find(&issues); err != nil {
return err
}
for i := range issues {
if _, err = sess.Delete(&Comment{IssueID: issues[i].ID}); err != nil {
return err
}
attachments := make([]*Attachment, 0, 5)
if err = sess.Where("issue_id=?", issues[i].ID).Find(&attachments); err != nil {
return err
}
for j := range attachments {
attachmentPaths = append(attachmentPaths, attachments[j].LocalPath())
}
if _, err = sess.Delete(&Attachment{IssueID: issues[i].ID}); err != nil {
return err
}
}
if _, err = sess.Delete(&Issue{RepoID: repoID}); err != nil {
return err
}
if repo.IsFork {
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil {
return fmt.Errorf("decrease fork count: %v", err)
}
}
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", uid); err != nil {
return err
}
// Remove repository files.
repoPath := repo.repoPath(sess)
RemoveAllWithNotice("Delete repository files", repoPath)
repo.DeleteWiki()
// Remove attachment files.
for i := range attachmentPaths {
RemoveAllWithNotice("Delete attachment", attachmentPaths[i])
}
if err = sess.Commit(); err != nil {
return fmt.Errorf("Commit: %v", err)
}
if repo.NumForks > 0 {
if repo.IsPrivate {
forkRepos, err := GetRepositoriesByForkID(repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err)
}
for i := range forkRepos {
if err = DeleteRepository(forkRepos[i].OwnerID, forkRepos[i].ID); err != nil {
log.Error(4, "DeleteRepository [%d]: %v", forkRepos[i].ID, err)
}
}
} else {
if _, err = x.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil {
log.Error(4, "reset 'fork_id' and 'is_fork': %v", err)
}
}
}
return nil
}
// GetRepositoryByRef returns a Repository specified by a GFM reference.
// See https://help.github.com/articles/writing-on-github#references for more information on the syntax.
func GetRepositoryByRef(ref string) (*Repository, error) {
n := strings.IndexByte(ref, byte('/'))
if n < 2 {
return nil, ErrInvalidReference
}
userName, repoName := ref[:n], ref[n+1:]
user, err := GetUserByName(userName)
if err != nil {
return nil, err
}
return GetRepositoryByName(user.Id, repoName)
}
// GetRepositoryByName returns the repository by given name under user if exists.
func GetRepositoryByName(uid int64, repoName string) (*Repository, error) {
repo := &Repository{
OwnerID: uid,
LowerName: strings.ToLower(repoName),
}
has, err := x.Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{0, uid, repoName}
}
return repo, err
}
func getRepositoryByID(e Engine, id int64) (*Repository, error) {
repo := new(Repository)
has, err := e.Id(id).Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{id, 0, ""}
}
return repo, nil
}
// GetRepositoryByID returns the repository by given id if exists.
func GetRepositoryByID(id int64) (*Repository, error) {
return getRepositoryByID(x, id)
}
// GetRepositories returns a list of repositories of given user.
func GetRepositories(uid int64, private bool) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
sess := x.Desc("updated_unix")
if !private {
sess.Where("is_private=?", false)
}
return repos, sess.Find(&repos, &Repository{OwnerID: uid})
}
// GetRecentUpdatedRepositories returns the list of repositories that are recently updated.
func GetRecentUpdatedRepositories(page, pageSize int) (repos []*Repository, err error) {
return repos, x.Limit(pageSize, (page-1)*pageSize).
Where("is_private=?", false).Limit(pageSize).Desc("updated_unix").Find(&repos)
}
func getRepositoryCount(e Engine, u *User) (int64, error) {
return x.Count(&Repository{OwnerID: u.Id})
}
// GetRepositoryCount returns the total number of repositories of user.
func GetRepositoryCount(u *User) (int64, error) {
return getRepositoryCount(x, u)
}
type SearchRepoOptions struct {
Keyword string
OwnerID int64
OrderBy string
Private bool // Include private repositories in results
Page int
PageSize int // Can be smaller than or equal to setting.ExplorePagingNum
}
// SearchRepositoryByName takes keyword and part of repository name to search,
// it returns results in given range and number of total results.
func SearchRepositoryByName(opts *SearchRepoOptions) (repos []*Repository, _ int64, _ error) {
if len(opts.Keyword) == 0 {
return repos, 0, nil
}
opts.Keyword = strings.ToLower(opts.Keyword)
if opts.PageSize <= 0 || opts.PageSize > setting.ExplorePagingNum {
opts.PageSize = setting.ExplorePagingNum
}
if opts.Page <= 0 {
opts.Page = 1
}
repos = make([]*Repository, 0, opts.PageSize)
// Append conditions
sess := x.Where("LOWER(lower_name) LIKE ?", "%"+opts.Keyword+"%")
if opts.OwnerID > 0 {
sess.And("owner_id = ?", opts.OwnerID)
}
if !opts.Private {
sess.And("is_private=?", false)
}
var countSess xorm.Session
countSess = *sess
count, err := countSess.Count(new(Repository))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
if len(opts.OrderBy) > 0 {
sess.OrderBy(opts.OrderBy)
}
return repos, count, sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&repos)
}
// DeleteRepositoryArchives deletes all repositories' archives.
func DeleteRepositoryArchives() error {
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return os.RemoveAll(filepath.Join(repo.RepoPath(), "archives"))
})
}
func gatherMissingRepoRecords() ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
if err := x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if !com.IsDir(repo.RepoPath()) {
repos = append(repos, repo)
}
return nil
}); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil {
return nil, fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
return repos, nil
}
// DeleteMissingRepositories deletes all repository records that lost Git files.
func DeleteMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID)
if err := DeleteRepository(repo.OwnerID, repo.ID); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// ReinitMissingRepositories reinitializes all repository records that lost Git files.
func ReinitMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID)
if err := git.InitRepository(repo.RepoPath(), true); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// RewriteRepositoryUpdateHook rewrites all repositories' update hook.
func RewriteRepositoryUpdateHook() error {
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return createUpdateHook(repo.RepoPath())
})
}
// statusPool represents a pool of status with true/false.
type statusPool struct {
lock sync.RWMutex
pool map[string]bool
}
// Start sets value of given name to true in the pool.
func (p *statusPool) Start(name string) {
p.lock.Lock()
defer p.lock.Unlock()
p.pool[name] = true
}
// Stop sets value of given name to false in the pool.
func (p *statusPool) Stop(name string) {
p.lock.Lock()
defer p.lock.Unlock()
p.pool[name] = false
}
// IsRunning checks if value of given name is set to true in the pool.
func (p *statusPool) IsRunning(name string) bool {
p.lock.RLock()
defer p.lock.RUnlock()
return p.pool[name]
}
// Prevent duplicate running tasks.
var taskStatusPool = &statusPool{
pool: make(map[string]bool),
}
const (
_MIRROR_UPDATE = "mirror_update"
_GIT_FSCK = "git_fsck"
_CHECK_REPOs = "check_repos"
)
// MirrorUpdate checks and updates mirror repositories.
func MirrorUpdate() {
if taskStatusPool.IsRunning(_MIRROR_UPDATE) {
return
}
taskStatusPool.Start(_MIRROR_UPDATE)
defer taskStatusPool.Stop(_MIRROR_UPDATE)
log.Trace("Doing: MirrorUpdate")
mirrors := make([]*Mirror, 0, 10)
if err := x.Iterate(new(Mirror), func(idx int, bean interface{}) error {
m := bean.(*Mirror)
if m.NextUpdate.After(time.Now()) {
return nil
}
if m.Repo == nil {
log.Error(4, "Disconnected mirror repository found: %d", m.ID)
return nil
}
repoPath := m.Repo.RepoPath()
if _, stderr, err := process.ExecDir(
time.Duration(setting.Git.Timeout.Mirror)*time.Second,
repoPath, fmt.Sprintf("MirrorUpdate: %s", repoPath),
"git", "remote", "update", "--prune"); err != nil {
desc := fmt.Sprintf("Fail to update mirror repository(%s): %s", repoPath, stderr)
log.Error(4, desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(4, "CreateRepositoryNotice: %v", err)
}
return nil
}
m.NextUpdate = time.Now().Add(time.Duration(m.Interval) * time.Hour)
mirrors = append(mirrors, m)
return nil
}); err != nil {
log.Error(4, "MirrorUpdate: %v", err)
}
for i := range mirrors {
if err := UpdateMirror(mirrors[i]); err != nil {
log.Error(4, "UpdateMirror[%d]: %v", mirrors[i].ID, err)
}
}
}
// GitFsck calls 'git fsck' to check repository health.
func GitFsck() {
if taskStatusPool.IsRunning(_GIT_FSCK) {
return
}
taskStatusPool.Start(_GIT_FSCK)
defer taskStatusPool.Stop(_GIT_FSCK)
log.Trace("Doing: GitFsck")
if err := x.Where("id>0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
repoPath := repo.RepoPath()
if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil {
desc := fmt.Sprintf("Fail to health check repository (%s): %v", repoPath, err)
log.Warn(desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(4, "CreateRepositoryNotice: %v", err)
}
}
return nil
}); err != nil {
log.Error(4, "GitFsck: %v", err)
}
}
func GitGcRepos() error {
args := append([]string{"gc"}, setting.Git.GcArgs...)
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if err := repo.GetOwner(); err != nil {
return err
}
_, stderr, err := process.ExecDir(-1, RepoPath(repo.Owner.Name, repo.Name), "Repository garbage collection", "git", args...)
if err != nil {
return fmt.Errorf("%v: %v", err, stderr)
}
return nil
})
}
type repoChecker struct {
querySQL, correctSQL string
desc string
}
func repoStatsCheck(checker *repoChecker) {
results, err := x.Query(checker.querySQL)
if err != nil {
log.Error(4, "Select %s: %v", checker.desc, err)
return
}
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", checker.desc, id)
_, err = x.Exec(checker.correctSQL, id, id)
if err != nil {
log.Error(4, "Update %s[%d]: %v", checker.desc, id, err)
}
}
}
func CheckRepoStats() {
if taskStatusPool.IsRunning(_CHECK_REPOs) {
return
}
taskStatusPool.Start(_CHECK_REPOs)
defer taskStatusPool.Stop(_CHECK_REPOs)
log.Trace("Doing: CheckRepoStats")
checkers := []*repoChecker{
// Repository.NumWatches
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=?) WHERE id=?",
"repository count 'num_watches'",
},
// Repository.NumStars
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?",
"repository count 'num_stars'",
},
// Label.NumIssues
{
"SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)",
"UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?",
"label count 'num_issues'",
},
// User.NumRepos
{
"SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)",
"UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?",
"user count 'num_repos'",
},
// Issue.NumComments
{
"SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)",
"UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?",
"issue count 'num_comments'",
},
}
for i := range checkers {
repoStatsCheck(checkers[i])
}
// FIXME: use checker when v0.9, stop supporting old fork repo format.
// ***** START: Repository.NumForks *****
results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
if err != nil {
log.Error(4, "Select repository count 'num_forks': %v", err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating repository count 'num_forks': %d", id)
repo, err := GetRepositoryByID(id)
if err != nil {
log.Error(4, "GetRepositoryByID[%d]: %v", id, err)
continue
}
rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID)
if err != nil {
log.Error(4, "Select count of forks[%d]: %v", repo.ID, err)
continue
}
repo.NumForks = int(parseCountResult(rawResult))
if err = UpdateRepository(repo, false); err != nil {
log.Error(4, "UpdateRepository[%d]: %v", id, err)
continue
}
}
}
// ***** END: Repository.NumForks *****
}
// __ __ __ .__
// / \ / \_____ _/ |_ ____ | |__
// \ \/\/ /\__ \\ __\/ ___\| | \
// \ / / __ \| | \ \___| Y \
// \__/\ / (____ /__| \___ >___| /
// \/ \/ \/ \/
// Watch is connection request for receiving repository notification.
type Watch struct {
ID int64 `xorm:"pk autoincr"`
UserID int64 `xorm:"UNIQUE(watch)"`
RepoID int64 `xorm:"UNIQUE(watch)"`
}
func isWatching(e Engine, uid, repoId int64) bool {
has, _ := e.Get(&Watch{0, uid, repoId})
return has
}
// IsWatching checks if user has watched given repository.
func IsWatching(uid, repoId int64) bool {
return isWatching(x, uid, repoId)
}
func watchRepo(e Engine, uid, repoId int64, watch bool) (err error) {
if watch {
if isWatching(e, uid, repoId) {
return nil
}
if _, err = e.Insert(&Watch{RepoID: repoId, UserID: uid}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?", repoId)
} else {
if !isWatching(e, uid, repoId) {
return nil
}
if _, err = e.Delete(&Watch{0, uid, repoId}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches=num_watches-1 WHERE id=?", repoId)
}
return err
}
// Watch or unwatch repository.
func WatchRepo(uid, repoId int64, watch bool) (err error) {
return watchRepo(x, uid, repoId, watch)
}
func getWatchers(e Engine, repoID int64) ([]*Watch, error) {
watches := make([]*Watch, 0, 10)
return watches, e.Find(&watches, &Watch{RepoID: repoID})
}
// GetWatchers returns all watchers of given repository.
func GetWatchers(repoID int64) ([]*Watch, error) {
return getWatchers(x, repoID)
}
// Repository.GetWatchers returns range of users watching given repository.
func (repo *Repository) GetWatchers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("watch.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "watch", `"user".id=watch.user_id`)
} else {
sess = sess.Join("LEFT", "watch", "user.id=watch.user_id")
}
return users, sess.Find(&users)
}
func notifyWatchers(e Engine, act *Action) error {
// Add feeds for user self and all watchers.
watches, err := getWatchers(e, act.RepoID)
if err != nil {
return fmt.Errorf("get watchers: %v", err)
}
// Add feed for actioner.
act.UserID = act.ActUserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new actioner: %v", err)
}
for i := range watches {
if act.ActUserID == watches[i].UserID {
continue
}
act.ID = 0
act.UserID = watches[i].UserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new action: %v", err)
}
}
return nil
}
// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(act *Action) error {
return notifyWatchers(x, act)
}
// _________ __
// / _____// |______ _______
// \_____ \\ __\__ \\_ __ \
// / \| | / __ \| | \/
// /_______ /|__| (____ /__|
// \/ \/
type Star struct {
ID int64 `xorm:"pk autoincr"`
UID int64 `xorm:"UNIQUE(s)"`
RepoID int64 `xorm:"UNIQUE(s)"`
}
// Star or unstar repository.
func StarRepo(uid, repoId int64, star bool) (err error) {
if star {
if IsStaring(uid, repoId) {
return nil
}
if _, err = x.Insert(&Star{UID: uid, RepoID: repoId}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoId); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", uid)
} else {
if !IsStaring(uid, repoId) {
return nil
}
if _, err = x.Delete(&Star{0, uid, repoId}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoId); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", uid)
}
return err
}
// IsStaring checks if user has starred given repository.
func IsStaring(uid, repoId int64) bool {
has, _ := x.Get(&Star{0, uid, repoId})
return has
}
func (repo *Repository) GetStargazers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("star.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "star", `"user".id=star.uid`)
} else {
sess = sess.Join("LEFT", "star", "user.id=star.uid")
}
return users, sess.Find(&users)
}
// ___________ __
// \_ _____/__________| | __
// | __)/ _ \_ __ \ |/ /
// | \( <_> ) | \/ <
// \___ / \____/|__| |__|_ \
// \/ \/
// HasForkedRepo checks if given user has already forked a repository with given ID.
func HasForkedRepo(ownerID, repoID int64) (*Repository, bool) {
repo := new(Repository)
has, _ := x.Where("owner_id=? AND fork_id=?", ownerID, repoID).Get(repo)
return repo, has
}
func ForkRepository(u *User, oldRepo *Repository, name, desc string) (_ *Repository, err error) {
repo := &Repository{
OwnerID: u.Id,
Owner: u,
Name: name,
LowerName: strings.ToLower(name),
Description: desc,
DefaultBranch: oldRepo.DefaultBranch,
IsPrivate: oldRepo.IsPrivate,
IsFork: true,
ForkID: oldRepo.ID,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", oldRepo.ID); err != nil {
return nil, err
}
repoPath := RepoPath(u.Name, repo.Name)
_, stderr, err := process.ExecTimeout(10*time.Minute,
fmt.Sprintf("ForkRepository(git clone): %s/%s", u.Name, repo.Name),
"git", "clone", "--bare", oldRepo.RepoPath(), repoPath)
if err != nil {
return nil, fmt.Errorf("git clone: %v", stderr)
}
_, stderr, err = process.ExecDir(-1,
repoPath, fmt.Sprintf("ForkRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, fmt.Errorf("git update-server-info: %v", err)
}
if err = createUpdateHook(repoPath); err != nil {
return nil, fmt.Errorf("createUpdateHook: %v", err)
}
return repo, sess.Commit()
}
func (repo *Repository) GetForks() ([]*Repository, error) {
forks := make([]*Repository, 0, repo.NumForks)
return forks, x.Find(&forks, &Repository{ForkID: repo.ID})
}
| 1 | 10,966 | Why not use `path.Join`? | gogs-gogs | go |
@@ -7,4 +7,8 @@ if ( typeof global.googlesitekit === 'undefined' ) {
global.googlesitekit = {};
}
-global.googlesitekit.api = API;
+if ( typeof global.googlesitekit.api === 'undefined' ) {
+ global.googlesitekit.api = API;
+}
+
+export * from 'assets/js/googlesitekit/api'; | 1 | /**
* External dependencies
*/
import * as API from 'assets/js/googlesitekit/api';
if ( typeof global.googlesitekit === 'undefined' ) {
global.googlesitekit = {};
}
global.googlesitekit.api = API;
| 1 | 26,502 | Why not `export default API` instead? This ensures we export the same that we export on the global. | google-site-kit-wp | js |
@@ -552,7 +552,7 @@ def imread(filename, pixel_type=None, fallback_only=False):
increase_dimension=False
kwargs={'FileName':filename}
if pixel_type:
- imageIO = itk.ImageIOFactory.CreateImageIO(io_filename, itk.ImageIOFactory.ReadMode)
+ imageIO = itk.ImageIOFactory.CreateImageIO(io_filename, itk.ImageIOFactory.FileModeType_ReadMode)
if not imageIO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
imageIO.SetFileName(io_filename) | 1 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import re
# The following line defines an ascii string used for dynamically refreshing
# the import and progress callbacks on the same terminal line.
# See http://www.termsys.demon.co.uk/vtansi.htm
# \033 is the C-style octal code for an escape character
# [2000D moves the cursor back 2000 columns, this is a brute force way of
# getting to the start of the line.
# [K erases the end of the line
clrLine = "\033[2000D\033[K"
def auto_not_in_place(v=True):
"""Force it to not run in place
"""
import itkConfig
itkConfig.NotInPlace = v
def auto_progress(progress_type=1):
"""Set up auto progress report
progress_type:
1 or True -> auto progress be used in a terminal
2 -> simple auto progress (without special characters)
0 or False -> disable auto progress
"""
import itkConfig
if progress_type is True or progress_type == 1:
itkConfig.ImportCallback = terminal_import_callback
itkConfig.ProgressCallback = terminal_progress_callback
elif progress_type == 2:
itkConfig.ImportCallback = simple_import_callback
itkConfig.ProgressCallback = simple_progress_callback
elif progress_type is False or progress_type == 0:
itkConfig.ImportCallback = None
itkConfig.ProgressCallback = None
else:
raise ValueError("Invalid auto progress type: " + repr(progress_type))
def terminal_progress_callback(name, p):
"""Display the progress of an object and clean the display once complete
This function can be used with itkConfig.ProgressCallback
"""
import sys
print(clrLine + "%s: %f" % (name, p), file=sys.stderr, end="")
if p == 1:
print(clrLine, file=sys.stderr, end="")
def terminal_import_callback(name, p):
"""Display the loading of a module and clean the display once complete
This function can be used with itkConfig.ImportCallback
"""
import sys
print(clrLine + "Loading %s... " % name, file=sys.stderr, end="")
if p == 1:
print(clrLine, file=sys.stderr, end="")
def simple_import_callback(name, p):
"""Print a message when a module is loading
This function can be used with itkConfig.ImportCallback
"""
import sys
if p == 0:
print("Loading %s... " % name, file=sys.stderr, end="")
elif p == 1:
print("done", file=sys.stderr)
def simple_progress_callback(name, p):
"""Print a message when an object is running
This function can be used with itkConfig.ProgressCallback
"""
import sys
if p == 0:
print("Running %s... " % name, file=sys.stderr, end="")
elif p == 1:
print("done", file=sys.stderr)
def force_load():
"""force itk to load all the submodules"""
import itk
for k in dir(itk):
getattr(itk, k)
import sys
def echo(object, f=sys.stderr):
"""Print an object is f
If the object has a method Print(), this method is used.
repr(object) is used otherwise
"""
print(f, object)
del sys
def size(image_or_filter):
"""Return the size of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetLargestPossibleRegion().GetSize()
def physical_size(image_or_filter):
"""Return the physical size of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# required because range is overloaded in this module
import sys
from builtins import range
spacing_ = spacing(image_or_filter)
size_ = size(image_or_filter)
result = []
for i in range(0, spacing_.Size()):
result.append(spacing_.GetElement(i) * size_.GetElement(i))
return result
def spacing(image_or_filter):
"""Return the spacing of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetSpacing()
def origin(image_or_filter):
"""Return the origin of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetOrigin()
def index(image_or_filter):
"""Return the index of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetLargestPossibleRegion().GetIndex()
def region(image_or_filter):
"""Return the region of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetLargestPossibleRegion()
HAVE_NUMPY = True
try:
import numpy
except ImportError:
HAVE_NUMPY = False
def _get_itk_pixelid(numpy_array_type):
"""Returns a ITK PixelID given a numpy array."""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
import itk
# This is a Mapping from numpy array types to itk pixel types.
_np_itk = {numpy.uint8:itk.UC,
numpy.uint16:itk.US,
numpy.uint32:itk.UI,
numpy.uint64:itk.UL,
numpy.int8:itk.SC,
numpy.int16:itk.SS,
numpy.int32:itk.SI,
numpy.int64:itk.SL,
numpy.float32:itk.F,
numpy.float64:itk.D,
numpy.complex64:itk.complex[itk.F],
numpy.complex128:itk.complex[itk.D]
}
try:
return _np_itk[numpy_array_type.dtype.type]
except KeyError as e:
for key in _np_itk:
if numpy.issubdtype(numpy_array_type.dtype.type, key):
return _np_itk[key]
raise e
def _GetArrayFromImage(image_or_filter, function, keep_axes, update):
"""Get an Array with the content of the image buffer
"""
# Check for numpy
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
# Finds the image type
import itk
keys = [k for k in itk.PyBuffer.keys() if k[0] == output(image_or_filter).__class__]
if len(keys ) == 0:
raise RuntimeError("No suitable template parameter can be found.")
ImageType = keys[0]
# Create a numpy array of the type of the input image
templatedFunction = getattr(itk.PyBuffer[keys[0]], function)
return templatedFunction(output(image_or_filter), keep_axes, update)
def GetArrayFromImage(image_or_filter, keep_axes=False, update=True):
"""Get an array with the content of the image buffer
"""
return _GetArrayFromImage(image_or_filter, "GetArrayFromImage", keep_axes, update)
array_from_image = GetArrayFromImage
def GetArrayViewFromImage(image_or_filter, keep_axes=False, update=True):
"""Get an array view with the content of the image buffer
"""
return _GetArrayFromImage(image_or_filter, "GetArrayViewFromImage", keep_axes, update)
array_view_from_image = GetArrayViewFromImage
def _GetImageFromArray(arr, function, is_vector):
"""Get an ITK image from a Python array.
"""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
import itk
PixelType = _get_itk_pixelid(arr)
if is_vector:
Dimension = arr.ndim - 1
if arr.flags['C_CONTIGUOUS']:
VectorDimension = arr.shape[-1]
else:
VectorDimension = arr.shape[0]
if PixelType == itk.UC:
if VectorDimension == 3:
ImageType = itk.Image[ itk.RGBPixel[itk.UC], Dimension ]
elif VectorDimension == 4:
ImageType = itk.Image[ itk.RGBAPixel[itk.UC], Dimension ]
else:
ImageType = itk.Image[ itk.Vector[PixelType, VectorDimension] , Dimension]
else:
Dimension = arr.ndim
ImageType = itk.Image[PixelType, Dimension]
templatedFunction = getattr(itk.PyBuffer[ImageType], function)
return templatedFunction(arr, is_vector)
def GetImageFromArray(arr, is_vector=False):
"""Get an ITK image from a Python array.
"""
return _GetImageFromArray(arr, "GetImageFromArray", is_vector)
image_from_array = GetImageFromArray
def GetImageViewFromArray(arr, is_vector=False):
"""Get an ITK image view from a Python array.
"""
return _GetImageFromArray(arr, "GetImageViewFromArray", is_vector)
image_view_from_array = GetImageFromArray
def _GetArrayFromVnlObject(vnl_object, function):
"""Get an array with the content of vnl_object
"""
# Check for numpy
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
# Finds the vnl object type
import itk
PixelType = itk.template(vnl_object)[1][0]
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys ) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the vnl object
templatedFunction = getattr(itk.PyVnl[keys[0]], function)
return templatedFunction(vnl_object)
def GetArrayFromVnlVector(vnl_vector):
"""Get an array with the content of vnl_vector
"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayFromVnlVector")
array_from_vnl_vector = GetArrayFromVnlVector
def GetArrayViewFromVnlVector(vnl_vector):
"""Get an array view of vnl_vector
"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayViewFromVnlVector")
array_view_from_vnl_vector = GetArrayFromVnlVector
def GetArrayFromVnlMatrix(vnl_matrix):
"""Get an array with the content of vnl_matrix
"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayFromVnlMatrix")
def GetArrayViewFromVnlMatrix(vnl_matrix):
"""Get an array view of vnl_matrix
"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayViewFromVnlMatrix")
array_from_vnl_matrix = GetArrayFromVnlMatrix
def _GetVnlObjectFromArray(arr, function):
"""Get a vnl object from a Python array.
"""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
import itk
PixelType = _get_itk_pixelid(arr)
templatedFunction = getattr(itk.PyVnl[PixelType], function)
return templatedFunction(arr)
def GetVnlVectorFromArray(arr):
"""Get a vnl vector from a Python array.
"""
return _GetVnlObjectFromArray(arr, "GetVnlVectorFromArray")
vnl_vector_from_array = GetVnlVectorFromArray
def GetVnlMatrixFromArray(arr):
"""Get a vnl matrix from a Python array.
"""
return _GetVnlObjectFromArray(arr, "GetVnlMatrixFromArray")
vnl_matrix_from_array = GetVnlMatrixFromArray
def GetArrayFromMatrix(itk_matrix):
return GetArrayFromVnlMatrix(itk_matrix.GetVnlMatrix().as_matrix())
array_from_matrix = GetArrayFromMatrix
def GetMatrixFromArray(arr):
import itk
vnl_matrix = GetVnlMatrixFromArray(arr)
dims = arr.shape
PixelType = _get_itk_pixelid(arr)
m = itk.Matrix[PixelType, dims[0], dims[1]](vnl_matrix)
return m
matrix_from_array = GetMatrixFromArray
# return an image
from itkTemplate import image, output
def template(cl):
"""Return the template of a class (or of the class of an object) and
its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
"""
from itkTemplate import itkTemplate
return itkTemplate.__class_to_template__[class_(cl)]
def ctype(s):
"""Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
"""
from itkTypes import itkCType
ret = itkCType.GetCType(" ".join(s.split()))
if ret is None:
raise KeyError("Unrecognized C type '%s'" % s)
return ret
def class_(obj):
"""Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
"""
import inspect
if inspect.isclass(obj):
# obj is already a class !
return obj
else:
return obj.__class__
def python_type(obj):
"""Returns the Python type name of an object
The Python name corresponding to the given instantiated object is printed.
This includes both the Python name and the parameters of the object. A user
can copy and paste the printed value to instantiate a new object of the
same type."""
import itkTemplate
from itkTypes import itkCType
def in_itk(name):
import itk
# Remove "itk::" and "std::" from template name.
# Only happens for ITK objects.
shortname = name.split('::')[-1]
shortname = shortname.split('itk')[-1]
namespace = itk
# A type cannot be part of ITK if its name was not modified above. This
# check avoids having an input of type `list` and return `itk.list` that
# also exists.
likely_itk = (shortname != name or name[:3] == 'vnl')
if likely_itk and hasattr(namespace, shortname):
return namespace.__name__ + '.' + shortname # Prepend name with 'itk.'
else:
return name
def recursive(obj, level):
try:
T, P = template(obj)
name = in_itk(T.__name__)
parameters = []
for t in P:
parameters.append(recursive(t, level+1))
return name + "[" + ",".join(parameters) + "]"
except KeyError:
if isinstance(obj, itkCType): # Handles CTypes differently
return 'itk.' + obj.short_name
elif hasattr(obj, "__name__"):
# This should be where most ITK types end up.
return in_itk(obj.__name__)
elif (not isinstance(obj, type)
and type(obj) != itkTemplate.itkTemplate and level != 0):
# obj should actually be considered a value, not a type,
# or it is already an itkTemplate type.
# A value can be an integer that is a template parameter.
# This does not happen at the first level of the recursion
# as it is not possible that this object would be a template
# parameter. Checking the level `0` allows e.g. to find the
# type of an object that is a `list` or an `int`.
return str(obj)
else:
return in_itk(type(obj).__name__)
return recursive(obj, 0)
def range(image_or_filter):
"""Return the range of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
range() take care of updating the pipeline
"""
import itk
img = output(image_or_filter)
img.UpdateOutputInformation()
img.Update()
# don't put that calculator in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
auto_pipeline.current = tmp_auto_pipeline
comp.Compute()
return (comp.GetMinimum(), comp.GetMaximum())
def imwrite(image_or_filter, filename, compression=False):
"""Write a image or the output image of a filter to a file.
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter).
"""
import itk
img = output(image_or_filter)
img.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.ImageFileWriter[type(img)].New(
Input=img,
FileName=filename,
UseCompression=compression)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def imread(filename, pixel_type=None, fallback_only=False):
"""Read an image from a file or series of files and return an itk.Image.
The reader is instantiated with the image type of the image file if
`pixel_type` is not provided (default). The dimension of the image is
automatically found. If the given filename is a list or a tuple, the
reader will use an itk.ImageSeriesReader object to read the files.
If `fallback_only` is set to `True`, `imread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically
happen if the pixel type is not supported (e.g. it is not currently
wrapped).
"""
import itk
if fallback_only == True:
if pixel_type is None:
raise Exception("pixel_type must be set when using the fallback_only option")
try:
return imread(filename)
except KeyError:
pass
if type(filename) in [list, tuple]:
TemplateReaderType=itk.ImageSeriesReader
io_filename=filename[0]
increase_dimension=True
kwargs={'FileNames':filename}
else:
TemplateReaderType=itk.ImageFileReader
io_filename=filename
increase_dimension=False
kwargs={'FileName':filename}
if pixel_type:
imageIO = itk.ImageIOFactory.CreateImageIO(io_filename, itk.ImageIOFactory.ReadMode)
if not imageIO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
imageIO.SetFileName(io_filename)
imageIO.ReadImageInformation()
dimension = imageIO.GetNumberOfDimensions()
# Increase dimension if last dimension is not of size one.
if increase_dimension and imageIO.GetDimensions(dimension-1) != 1:
dimension += 1
ImageType = itk.Image[pixel_type, dimension]
reader = TemplateReaderType[ImageType].New(**kwargs)
else:
reader = TemplateReaderType.New(**kwargs)
reader.Update()
return reader.GetOutput()
def meshwrite(mesh, filename, compression=False):
"""Write a mesh to a file.
The writer is instantiated according to the type of the input mesh.
"""
import itk
mesh.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.MeshFileWriter[type(mesh)].New(
Input=mesh,
FileName=filename,
UseCompression=compression)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def meshread(filename, pixel_type=None, fallback_only=False):
"""Read a mesh from a file and return an itk.Mesh.
The reader is instantiated with the mesh type of the mesh file if
`pixel_type` is not provided (default). The dimension of the mesh is
automatically found.
If `fallback_only` is set to `True`, `meshread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically
happen if the pixel type is not supported (e.g. it is not currently
wrapped).
"""
import itk
if fallback_only == True:
if pixel_type is None:
raise Exception("pixel_type must be set when using the fallback_only option")
try:
return meshread(filename)
except KeyError:
pass
TemplateReaderType=itk.MeshFileReader
io_filename=filename
increase_dimension=False
kwargs={'FileName':filename}
if pixel_type:
meshIO = itk.MeshIOFactory.CreateMeshIO(io_filename, itk.MeshIOFactory.ReadMode)
if not meshIO:
raise RuntimeError("No MeshIO is registered to handle the given file.")
meshIO.SetFileName(io_filename)
meshIO.ReadMeshInformation()
dimension = meshIO.GetPointDimension()
# Increase dimension if last dimension is not of size one.
if increase_dimension and meshIO.GetDimensions(dimension-1) != 1:
dimension += 1
MeshType = itk.Mesh[pixel_type, dimension]
reader = TemplateReaderType[MeshType].New(**kwargs)
else:
reader = TemplateReaderType.New(**kwargs)
reader.Update()
return reader.GetOutput()
def search(s, case_sensitive=False): # , fuzzy=True):
"""Search for a class name in the itk module.
"""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = sorted(dir(itk))
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res
# Helpers for set_inputs snake case to CamelCase keyword argument conversion
_snake_underscore_re = re.compile('(_)([a-z0-9A-Z])')
def _underscore_upper(matchobj):
return matchobj.group(2).upper()
def _snake_to_camel(keyword):
camel = keyword[0].upper()
if _snake_underscore_re.search(keyword[1:]):
return camel + _snake_underscore_re.sub(_underscore_upper, keyword[1:])
return camel + keyword[1:]
def set_inputs(new_itk_object, args=[], kargs={}):
"""Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the new_itk_object
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# useful with filter which take 2 input (or more) like SubtractImageFiler
# Ex: subtract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
try:
for setInputNb, arg in enumerate(args):
methodName = 'SetInput%i' % (setInputNb + 1)
if methodName in dir(new_itk_object):
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of
# multiple input types
getattr(new_itk_object, methodName)(arg)
else:
# no method called SetInput?
# try with the standard SetInput(nb, input)
new_itk_object.SetInput(setInputNb, arg)
except TypeError as e:
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0:
raise e
# it's the first input, try to use the SetInput() method without input
# number
new_itk_object.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1:
raise TypeError('Object accepts only 1 input.')
except AttributeError:
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1:
raise TypeError('Object accepts only 1 input.')
methodList = ['SetImage', 'SetInputImage']
methodName = None
for m in methodList:
if m in dir(new_itk_object):
methodName = m
if methodName:
getattr(new_itk_object, methodName)(args[0])
else:
raise AttributeError('No method found to set the input.')
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.items():
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than
# with the full name
# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"]:
if attribName.islower():
attribName = _snake_to_camel(attribName)
attrib = getattr(new_itk_object, 'Set' + attribName)
# Do not use try-except mechanism as this leads to
# segfaults. Instead limit the number of types that are
# tested. The list of tested type could maybe be replaced by
# a test that would check for iterables.
if type(value) in [list, tuple]:
try:
output_value = [output(x) for x in value]
attrib(*output_value)
except:
attrib(output(value))
else:
attrib(output(value))
class templated_class:
"""This class is used to mimic the behavior of the templated C++ classes.
It is used this way:
class CustomClass:
# class definition here
CustomClass = templated_class(CustomClass)
customObject = CustomClass[template, parameters].New()
The template parameters are passed to the custom class constructor as a
named parameter 'template_parameters' in a tuple.
The custom class may implement a static method
check_template_parameters(parameters) which should raise an exception if
the template parameters provided are not suitable to instantiate the custom
class.
"""
def __init__(self, cls):
"""cls is the custom class
"""
self.__cls__ = cls
self.__templates__ = {}
def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters.
"""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(itk.class_(o) for o in args)
return self[types].New(*args, **kargs)
def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return (
templated_class.__templated_class_and_parameters__(
self,
template_parameters)
)
def check_template_parameters(self, template_parameters):
"""Check the template parameters passed in parameter.
"""
# this method is there mainly to make possible to reuse it in the
# custom class constructor after having used templated_class().
# Without that, the following example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# other init stuff
# def check_template_parameters(template_parameters):
# check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters)
def add_template(self, name, params):
if not isinstance(params, list) and not isinstance(params, tuple):
params = (params,)
params = tuple(params)
val = self[params]
self.__templates__[params] = val
setattr(self, name, val)
def add_image_templates(self, *args):
import itk
if args == []:
return
combinations = [[t] for t in args[0]]
for types in args[1:]:
temp = []
for t in types:
for c in combinations:
temp.append(c + [t])
combinations = temp
for d in itk.DIMS:
for c in combinations:
parameters = []
name = ""
for t in c:
parameters.append(itk.Image[t, d])
name += "I" + t.short_name + str(d)
self.add_template(name, tuple(parameters))
class __templated_class_and_parameters__:
"""Inner class used to store the pair class-template parameters ready
to instantiate.
"""
def __init__(self, templated_class, template_parameters):
self.__templated_class__ = templated_class
self.__template_parameters__ = template_parameters
if "check_template_parameters" in dir(templated_class.__cls__):
templated_class.__cls__.check_template_parameters(
template_parameters)
def New(self, *args, **kargs):
"""A New() method to mimic the ITK default behavior, even if the
class doesn't provide any New() method.
"""
kargs["template_parameters"] = self.__template_parameters__
if "New" in dir(self.__templated_class__.__cls__):
obj = self.__templated_class__.__cls__.New(*args, **kargs)
else:
obj = self.__templated_class__.__cls__(*args, **kargs)
setattr(
obj,
"__template_parameters__",
self.__template_parameters__)
setattr(obj, "__templated_class__", self.__templated_class__)
return obj
def __call__(self, *args, **kargs):
return self.New(*args, **kargs)
def keys(self):
return self.__templates__.keys()
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return key in self
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __len__(self):
return len(self.keys())
class pipeline:
"""A convenient class to store the reference to the filters of a pipeline
With this class, a method can create a pipeline of several filters and
return it without losing the references to the filters in this pipeline.
The pipeline object act almost like a filter (it has a GetOutput() method)
and thus can be simply integrated in another pipeline.
"""
def __init__(self, *args, **kargs):
self.clear()
self.input = None
set_inputs(self, args, kargs)
def connect(self, filter):
"""Connect a new filter to the pipeline
The output of the first filter will be used as the input of this
one and the filter passed as parameter will be added to the list
"""
if self.GetOutput() is not None:
set_inputs(filter, [self.GetOutput()])
self.append(filter)
def append(self, filter):
"""Add a new filter to the pipeline
The new filter will not be connected. The user must connect it.
"""
self.filters.append(filter)
def clear(self):
"""Clear the filter list
"""
self.filters = []
def GetOutput(self, index=0):
"""Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method,
subclass pipeline to implement another GetOutput() method, or use
expose()
"""
if len(self.filters) == 0:
return self.GetInput()
else:
filter = self.filters[-1]
if hasattr(filter, "__getitem__"):
return filter[index]
try:
return filter.GetOutput(index)
except:
if index == 0:
return filter.GetOutput()
else:
raise ValueError("Index can only be 0 on that object")
def GetNumberOfOutputs(self):
"""Return the number of outputs
"""
if len(self.filters) == 0:
return 1
else:
return self.filters[-1].GetNumberOfOutputs()
def SetInput(self, input):
"""Set the input of the pipeline
"""
if len(self.filters) != 0:
set_inputs(self.filters[0], [input])
self.input = input
def GetInput(self):
"""Get the input of the pipeline
"""
return self.input
def Update(self):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].Update()
def UpdateLargestPossibleRegion(self):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].UpdateLargestPossibleRegion()
def UpdateOutputInformation(self):
if "UpdateOutputInformation" in dir(self.filters[-1]):
self.filters[-1].UpdateOutputInformation()
else:
self.Update()
def __len__(self):
return self.GetNumberOfOutputs()
def __getitem__(self, item):
return self.GetOutput(item)
def __call__(self, *args, **kargs):
set_inputs(self, args, kargs)
self.UpdateLargestPossibleRegion()
return self
def expose(self, name, new_name=None, position=-1):
"""Expose an attribute from a filter of the minipeline.
Once called, the pipeline instance has a new Set/Get set of methods to
access directly the corresponding method of one of the filter of the
pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same
name than the one of the filter, but it can be changed by providing a
value to new_name.
The last filter of the pipeline is used by default, but another one may
be used by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
"""
if new_name is None:
new_name = name
src = self.filters[position]
ok = False
set_name = "Set" + name
if set_name in dir(src):
setattr(self, "Set" + new_name, getattr(src, set_name))
ok = True
get_name = "Get" + name
if get_name in dir(src):
setattr(self, "Get" + new_name, getattr(src, get_name))
ok = True
if not ok:
raise RuntimeError(
"No attribute %s at position %s." %
(name, position))
class auto_pipeline(pipeline):
current = None
def __init__(self, *args, **kargs):
pipeline.__init__(self, *args, **kargs)
self.Start()
def Start(self):
auto_pipeline.current = self
def Stop(self):
auto_pipeline.current = None
def down_cast(obj):
"""Down cast an itkLightObject (or a object of a subclass) to its most
specialized type.
"""
import itk
import itkTemplate
className = obj.GetNameOfClass()
t = getattr(itk, className)
if isinstance(t, itkTemplate.itkTemplate):
for c in t.values():
try:
return c.cast(obj)
except:
# fail silently for now
pass
raise RuntimeError(
"Can't downcast to a specialization of %s" %
className)
else:
return t.cast(obj)
def attribute_list(i, name):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=name,
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects() + 1):
l.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
return l
def attributes_list(i, names):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=names[0],
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects() + 1):
attrs = []
for name in names:
attrs.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
l.append(tuple(attrs))
return l
def attribute_dict(i, name):
"""Returns a dict with the attribute values in keys and a list of the
corresponding objects in value
i: the input LabelImage
name: the name of the attribute
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=name,
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
d = {}
for i in range(1, r.GetNumberOfLabelObjects() + 1):
lo = r.GetLabelObject(i)
v = lo.__getattribute__("Get" + name)()
l = d.get(v, [])
l.append(lo)
d[v] = l
return d
def number_of_objects(i):
"""Returns the number of objets in the image.
i: the input LabelImage
"""
import itk
i.UpdateLargestPossibleRegion()
i = itk.output(i)
return i.GetNumberOfLabelObjects()
def ipython_kw_matches(text):
"""Match named ITK object's named parameters"""
import IPython
import itk
import re
import inspect
import itkTemplate
regexp = re.compile(r'''
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
ip = IPython.get_ipython()
if "." in text: # a parameter cannot be dotted
return []
# 1. Find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
if ip.Completer.readline:
textUntilCursor = ip.Completer.readline.get_line_buffer()[:ip.Completer.readline.get_endidx()]
else:
# IPython >= 5.0.0, which is based on the Python Prompt Toolkit
textUntilCursor = ip.Completer.text_until_cursor
tokens = regexp.findall(textUntilCursor)
tokens.reverse()
iterTokens = iter(tokens)
openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(iterTokens.next())
if not isId(ids[-1]):
ids.pop()
break
if not iterTokens.next() == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = ip.Completer.global_matches(ids[0])
else:
callableMatches = ip.Completer.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
# drop the .New at this end, so we can search in the class members
if callableMatch.endswith(".New"):
callableMatch = callableMatch[:-4]
elif not re.findall('([A-Z])', callableMatch): # True if snake case
# Split at the last '.' occurence
splitted = callableMatch.split('.')
namespace = splitted[:-1]
function_name = splitted[-1]
# Find corresponding object name
object_name = _snake_to_camel(function_name)
# Check that this object actually exists
try:
objectCallableMatch = ".".join(namespace + [object_name])
eval(objectCallableMatch, ip.Completer.namespace)
# Reconstruct full object name
callableMatch = objectCallableMatch
except AttributeError:
# callableMatch is not a snake case function with a
# corresponding object.
pass
try:
object = eval(callableMatch, ip.Completer.namespace)
if isinstance(object, itkTemplate.itkTemplate):
# this is a template - lets grab the first entry to search for
# the methods
object = object.values()[0]
namedArgs = []
isin = isinstance(object, itk.LightObject)
if inspect.isclass(object):
issub = issubclass(object, itk.LightObject)
if isin or (inspect.isclass(object) and issub):
namedArgs = [n[3:] for n in dir(object) if n.startswith("Set")]
except Exception as e:
print(e)
continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append(u"%s=" % namedArg)
return argMatches
# install progress callback and custom completer if we are in ipython
# interpreter
try:
import itkConfig
import IPython
if IPython.get_ipython():
IPython.get_ipython().Completer.matchers.insert(0, ipython_kw_matches)
# some cleanup
del itkConfig, IPython
except (ImportError, AttributeError):
# fail silently
pass
| 1 | 12,146 | This is a good change I think. It greatly helps reduce name conflicts. | InsightSoftwareConsortium-ITK | cpp |
@@ -46,6 +46,12 @@ type jetStreamCluster struct {
consumerResults *subscription
}
+// Used to guide placement of streams in clustered JetStream.
+type Placement struct {
+ Cluster string `json:"cluster"`
+ Tags []string `json:"tags,omitempty"`
+}
+
// Define types of the entry.
type entryOp uint8
| 1 | // Copyright 2020-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math/rand"
"path"
"sort"
"strings"
"sync/atomic"
"time"
"github.com/klauspost/compress/s2"
"github.com/nats-io/nuid"
)
// jetStreamCluster holds information about the meta group and stream assignments.
type jetStreamCluster struct {
// The metacontroller raftNode.
meta RaftNode
// For stream and consumer assignments. All servers will have this be the same.
// ACC -> STREAM -> Stream Assignment -> Consumers
streams map[string]map[string]*streamAssignment
// Server.
s *Server
// Internal client.
c *client
// Processing assignment results.
streamResults *subscription
consumerResults *subscription
}
// Define types of the entry.
type entryOp uint8
const (
// Meta ops.
assignStreamOp entryOp = iota
assignConsumerOp
removeStreamOp
removeConsumerOp
// Stream ops.
streamMsgOp
purgeStreamOp
deleteMsgOp
// Consumer ops
updateDeliveredOp
updateAcksOp
// Compressed consumer assignments.
assignCompressedConsumerOp
)
// raftGroups are controlled by the metagroup controller.
// The raftGroups will house streams and consumers.
type raftGroup struct {
Name string `json:"name"`
Peers []string `json:"peers"`
Storage StorageType `json:"store"`
Preferred string `json:"preferred,omitempty"`
// Internal
node RaftNode
}
// streamAssignment is what the meta controller uses to assign streams to peers.
type streamAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Config *StreamConfig `json:"stream"`
Group *raftGroup `json:"group"`
Sync string `json:"sync"`
Subject string `json:"subject"`
Reply string `json:"reply"`
Restore *StreamState `json:"restore_state,omitempty"`
// Internal
consumers map[string]*consumerAssignment
responded bool
err error
}
// consumerAssignment is what the meta controller uses to assign consumers to streams.
type consumerAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Name string `json:"name"`
Stream string `json:"stream"`
Config *ConsumerConfig `json:"consumer"`
Group *raftGroup `json:"group"`
Subject string `json:"subject"`
Reply string `json:"reply"`
State *ConsumerState `json:"state,omitempty"`
// Internal
responded bool
err error
}
// streamPurge is what the stream leader will replicate when purging a stream.
type streamPurge struct {
Client *ClientInfo `json:"client,omitempty"`
Stream string `json:"stream"`
Subject string `json:"subject"`
Reply string `json:"reply"`
}
// streamMsgDelete is what the stream leader will replicate when deleting a message.
type streamMsgDelete struct {
Client *ClientInfo `json:"client,omitempty"`
Stream string `json:"stream"`
Seq uint64 `json:"seq"`
Subject string `json:"subject"`
Reply string `json:"reply"`
}
const (
defaultStoreDirName = "_js_"
defaultMetaGroupName = "_meta_"
defaultMetaFSBlkSize = 64 * 1024
)
// For validating clusters.
func validateJetStreamOptions(o *Options) error {
// If not clustered no checks.
if !o.JetStream || o.Cluster.Port == 0 {
return nil
}
if o.ServerName == _EMPTY_ {
return fmt.Errorf("jetstream cluster requires `server_name` to be set")
}
if o.Cluster.Name == _EMPTY_ {
return fmt.Errorf("jetstream cluster requires `cluster.name` to be set")
}
return nil
}
func (s *Server) getJetStreamCluster() (*jetStream, *jetStreamCluster) {
s.mu.Lock()
shutdown := s.shutdown
js := s.js
s.mu.Unlock()
if shutdown || js == nil {
return nil, nil
}
js.mu.RLock()
cc := js.cluster
js.mu.RUnlock()
if cc == nil {
return nil, nil
}
return js, cc
}
func (s *Server) JetStreamIsClustered() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
isClustered := js.cluster != nil
js.mu.RUnlock()
return isClustered
}
func (s *Server) JetStreamIsLeader() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isLeader()
}
func (s *Server) JetStreamIsCurrent() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isCurrent()
}
func (s *Server) JetStreamSnapshotMeta() error {
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if !cc.isLeader() {
return errNotLeader
}
return cc.meta.Snapshot(js.metaSnapshot())
}
func (s *Server) JetStreamStepdownStream(account, stream string) error {
js, cc := s.getJetStreamCluster()
if js == nil {
return ErrJetStreamNotEnabled
}
if cc == nil {
return ErrJetStreamNotClustered
}
// Grab account
acc, err := s.LookupAccount(account)
if err != nil {
return err
}
// Grab stream
mset, err := acc.LookupStream(stream)
if err != nil {
return err
}
if node := mset.raftNode(); node != nil && node.Leader() {
node.StepDown()
}
return nil
}
func (s *Server) JetStreamSnapshotStream(account, stream string) error {
js, cc := s.getJetStreamCluster()
if js == nil {
return ErrJetStreamNotEnabled
}
if cc == nil {
return ErrJetStreamNotClustered
}
// Grab account
acc, err := s.LookupAccount(account)
if err != nil {
return err
}
// Grab stream
mset, err := acc.LookupStream(stream)
if err != nil {
return err
}
mset.mu.RLock()
if !mset.node.Leader() {
mset.mu.RUnlock()
return ErrJetStreamNotLeader
}
n := mset.node
mset.mu.RUnlock()
n.PausePropose()
err = n.Snapshot(mset.snapshot())
n.ResumePropose()
return err
}
func (s *Server) JetStreamClusterPeers() []string {
js := s.getJetStream()
if js == nil {
return nil
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if !cc.isLeader() {
return nil
}
peers := cc.meta.Peers()
var nodes []string
for _, p := range peers {
nodes = append(nodes, p.ID)
}
return nodes
}
// Read lock should be held.
func (cc *jetStreamCluster) isLeader() bool {
if cc == nil {
// Non-clustered mode
return true
}
return cc.meta.Leader()
}
// isCurrent will determine if this node is a leader or an up to date follower.
// Read lock should be held.
func (cc *jetStreamCluster) isCurrent() bool {
if cc == nil {
// Non-clustered mode
return true
}
return cc.meta.Current()
}
// isStreamCurrent will determine if this node is a participant for the stream and if its up to date.
// Read lock should be held.
func (cc *jetStreamCluster) isStreamCurrent(account, stream string) bool {
if cc == nil {
// Non-clustered mode
return true
}
as := cc.streams[account]
if as == nil {
return false
}
sa := as[stream]
if sa == nil {
return false
}
rg := sa.Group
if rg == nil || rg.node == nil {
return false
}
isCurrent := rg.node.Current()
if isCurrent {
// Check if we are processing a snapshot and are catching up.
acc, err := cc.s.LookupAccount(account)
if err != nil {
return false
}
mset, err := acc.LookupStream(stream)
if err != nil {
return false
}
if mset.isCatchingUp() {
return false
}
}
return isCurrent
}
func (a *Account) getJetStreamFromAccount() (*Server, *jetStream, *jsAccount) {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil, nil, nil
}
jsa.mu.RLock()
js := jsa.js
jsa.mu.RUnlock()
if js == nil {
return nil, nil, nil
}
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
return s, js, jsa
}
func (s *Server) JetStreamIsStreamLeader(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isStreamLeader(account, stream)
}
func (a *Account) JetStreamIsStreamLeader(stream string) bool {
s, js, jsa := a.getJetStreamFromAccount()
if s == nil || js == nil || jsa == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isStreamLeader(a.Name, stream)
}
func (s *Server) JetStreamIsStreamCurrent(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isStreamCurrent(account, stream)
}
func (a *Account) JetStreamIsConsumerLeader(stream, consumer string) bool {
s, js, jsa := a.getJetStreamFromAccount()
if s == nil || js == nil || jsa == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isConsumerLeader(a.Name, stream, consumer)
}
func (s *Server) JetStreamIsConsumerLeader(account, stream, consumer string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isConsumerLeader(account, stream, consumer)
}
func (s *Server) enableJetStreamClustering() error {
if !s.isRunning() {
return nil
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
// Already set.
if js.cluster != nil {
return nil
}
s.Noticef("Starting JetStream cluster")
// We need to determine if we have a stable cluster name and expected number of servers.
s.Debugf("JetStream cluster checking for stable cluster name and peers")
if s.isClusterNameDynamic() || s.configuredRoutes() == 0 {
return errors.New("JetStream cluster requires cluster name and explicit routes")
}
return js.setupMetaGroup()
}
func (js *jetStream) setupMetaGroup() error {
s := js.srv
s.Noticef("Creating JetStream metadata controller")
// Setup our WAL for the metagroup.
sysAcc := s.SystemAccount()
stateDir := path.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, defaultMetaGroupName)
fs, bootstrap, err := newFileStore(
FileStoreConfig{StoreDir: stateDir, BlockSize: defaultMetaFSBlkSize},
StreamConfig{Name: defaultMetaGroupName, Storage: FileStorage},
)
if err != nil {
s.Errorf("Error creating filestore: %v", err)
return err
}
cfg := &RaftConfig{Name: defaultMetaGroupName, Store: stateDir, Log: fs}
if bootstrap {
s.Noticef("JetStream cluster bootstrapping")
// FIXME(dlc) - Make this real.
peers := s.activePeers()
s.Debugf("JetStream cluster initial peers: %+v", peers)
s.bootstrapRaftNode(cfg, peers, false)
} else {
s.Noticef("JetStream cluster recovering state")
}
// Start up our meta node.
n, err := s.startRaftNode(cfg)
if err != nil {
s.Warnf("Could not start metadata controller: %v", err)
return err
}
c := s.createInternalJetStreamClient()
sacc := s.SystemAccount()
js.mu.Lock()
defer js.mu.Unlock()
js.cluster = &jetStreamCluster{
meta: n,
streams: make(map[string]map[string]*streamAssignment),
s: s,
c: c,
}
c.registerWithAccount(sacc)
js.srv.startGoRoutine(js.monitorCluster)
return nil
}
func (js *jetStream) getMetaGroup() RaftNode {
js.mu.RLock()
defer js.mu.RUnlock()
if js.cluster == nil {
return nil
}
return js.cluster.meta
}
func (js *jetStream) server() *Server {
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
return s
}
// Will respond iff we are a member and we know we have no leader.
func (js *jetStream) isGroupLeaderless(rg *raftGroup) bool {
if rg == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
// If we are not a member we can not say..
if !rg.isMember(cc.meta.ID()) {
return false
}
// Single peer groups always have a leader if we are here.
if rg.node == nil {
return false
}
return rg.node.GroupLeader() == _EMPTY_
}
func (s *Server) JetStreamIsStreamAssigned(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
acc, _ := s.LookupAccount(account)
if acc == nil {
return false
}
return cc.isStreamAssigned(acc, stream)
}
// streamAssigned informs us if this server has this stream assigned.
func (jsa *jsAccount) streamAssigned(stream string) bool {
jsa.mu.RLock()
js, acc := jsa.js, jsa.account
jsa.mu.RUnlock()
if js == nil {
return false
}
js.mu.RLock()
assigned := js.cluster.isStreamAssigned(acc, stream)
js.mu.RUnlock()
return assigned
}
// Read lock should be held.
func (cc *jetStreamCluster) isStreamAssigned(a *Account, stream string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
as := cc.streams[a.Name]
if as == nil {
return false
}
sa := as[stream]
if sa == nil {
return false
}
rg := sa.Group
if rg == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
return true
}
}
return false
}
// Read lock should be held.
func (cc *jetStreamCluster) isStreamLeader(account, stream string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
var sa *streamAssignment
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
if sa == nil {
return false
}
rg := sa.Group
if rg == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
if len(rg.Peers) == 1 || rg.node != nil && rg.node.Leader() {
return true
}
}
}
return false
}
// Read lock should be held.
func (cc *jetStreamCluster) isConsumerLeader(account, stream, consumer string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
var sa *streamAssignment
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
if sa == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to this consumer.
ourID := cc.meta.ID()
for _, ca := range sa.consumers {
rg := ca.Group
for _, peer := range rg.Peers {
if peer == ourID {
if len(rg.Peers) == 1 || (rg.node != nil && rg.node.Leader()) {
return true
}
}
}
}
return false
}
func (js *jetStream) monitorCluster() {
const (
compactInterval = 5 * time.Minute
compactSizeLimit = 64 * 1024
)
s, cc, n := js.server(), js.cluster, js.getMetaGroup()
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
defer s.grWG.Done()
s.Debugf("Starting metadata monitor")
defer s.Debugf("Exiting metadata monitor")
t := time.NewTicker(compactInterval)
defer t.Stop()
isLeader := cc.isLeader()
var lastSnap []byte
var snapout bool
// Only to be called from leader.
attemptSnapshot := func() {
if snapout {
return
}
n.PausePropose()
defer n.ResumePropose()
if snap := js.metaSnapshot(); !bytes.Equal(lastSnap, snap) {
if err := n.Snapshot(snap); err == nil {
lastSnap = snap
snapout = true
}
}
}
isRecovering := true
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
if ce == nil {
// Signals we have replayed all of our metadata.
isRecovering = false
s.Debugf("Recovered JetStream cluster metadata")
continue
}
// FIXME(dlc) - Deal with errors.
if hadSnapshot, err := js.applyMetaEntries(ce.Entries, isRecovering); err == nil {
n.Applied(ce.Index)
if hadSnapshot {
snapout = false
n.Compact(ce.Index)
}
}
if isLeader && !snapout {
_, b := n.Size()
if b > compactSizeLimit {
attemptSnapshot()
}
}
case isLeader = <-lch:
js.processLeaderChange(isLeader)
case <-t.C:
if isLeader && !snapout {
attemptSnapshot()
}
}
}
}
// Represents our stable meta state that we can write out.
type writeableStreamAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Config *StreamConfig `json:"stream"`
Group *raftGroup `json:"group"`
Sync string `json:"sync"`
Consumers []*consumerAssignment
}
func (js *jetStream) metaSnapshot() []byte {
var streams []writeableStreamAssignment
js.mu.RLock()
cc := js.cluster
for _, asa := range cc.streams {
for _, sa := range asa {
wsa := writeableStreamAssignment{
Client: sa.Client,
Created: sa.Created,
Config: sa.Config,
Group: sa.Group,
Sync: sa.Sync,
}
for _, ca := range sa.consumers {
wsa.Consumers = append(wsa.Consumers, ca)
}
streams = append(streams, wsa)
}
}
js.mu.RUnlock()
if len(streams) == 0 {
return nil
}
b, _ := json.Marshal(streams)
return s2.EncodeBetter(nil, b)
}
func (js *jetStream) applyMetaSnapshot(buf []byte, isRecovering bool) error {
jse, err := s2.Decode(nil, buf)
if err != nil {
return err
}
var wsas []writeableStreamAssignment
if err = json.Unmarshal(jse, &wsas); err != nil {
return err
}
// Build our new version here outside of js.
streams := make(map[string]map[string]*streamAssignment)
for _, wsa := range wsas {
as := streams[wsa.Client.Account]
if as == nil {
as = make(map[string]*streamAssignment)
streams[wsa.Client.Account] = as
}
sa := &streamAssignment{Client: wsa.Client, Created: wsa.Created, Config: wsa.Config, Group: wsa.Group, Sync: wsa.Sync}
if len(wsa.Consumers) > 0 {
sa.consumers = make(map[string]*consumerAssignment)
for _, ca := range wsa.Consumers {
sa.consumers[ca.Name] = ca
}
}
as[wsa.Config.Name] = sa
}
js.mu.Lock()
cc := js.cluster
var saAdd, saDel, saChk []*streamAssignment
// Walk through the old list to generate the delete list.
for account, asa := range cc.streams {
nasa := streams[account]
for sn, sa := range asa {
if nsa := nasa[sn]; nsa == nil {
saDel = append(saDel, sa)
} else {
saChk = append(saChk, nsa)
}
}
}
// Walk through the new list to generate the add list.
for account, nasa := range streams {
asa := cc.streams[account]
for sn, sa := range nasa {
if asa[sn] == nil {
saAdd = append(saAdd, sa)
}
}
}
// Now walk the ones to check and process consumers.
var caAdd, caDel []*consumerAssignment
for _, sa := range saChk {
if osa := js.streamAssignment(sa.Client.Account, sa.Config.Name); osa != nil {
for _, ca := range osa.consumers {
if sa.consumers[ca.Name] == nil {
caDel = append(caDel, ca)
} else {
caAdd = append(caAdd, ca)
}
}
}
}
js.mu.Unlock()
// Do removals first.
for _, sa := range saDel {
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamRemoval(sa)
}
// Now do add for the streams. Also add in all consumers.
for _, sa := range saAdd {
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamAssignment(sa)
// We can simply add the consumers.
for _, ca := range sa.consumers {
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
}
}
// Now do the deltas for existing stream's consumers.
for _, ca := range caDel {
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerRemoval(ca)
}
for _, ca := range caAdd {
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
}
return nil
}
// Called on recovery to make sure we do not process like original
func (js *jetStream) setStreamAssignmentResponded(sa *streamAssignment) {
js.mu.Lock()
defer js.mu.Unlock()
sa.responded = true
sa.Restore = nil
}
// Called on recovery to make sure we do not process like original
func (js *jetStream) setConsumerAssignmentResponded(ca *consumerAssignment) {
js.mu.Lock()
defer js.mu.Unlock()
ca.responded = true
}
// Just copied over and changes out the group so it can be encoded.
// Lock should be held.
func (sa *streamAssignment) copyGroup() *streamAssignment {
csa, cg := *sa, *sa.Group
csa.Group = &cg
csa.Group.Peers = append(sa.Group.Peers[:0:0], sa.Group.Peers...)
return &csa
}
func (js *jetStream) processRemovePeer(peer string) {
js.mu.Lock()
defer js.mu.Unlock()
cc := js.cluster
// Only leader should process and re-assign mappings.
if !cc.isLeader() {
return
}
// Grab our nodes.
// FIXME(dlc) - Make sure these are live.
// Need to search for this peer in our stream assignments for potential remapping.
for _, as := range cc.streams {
for _, sa := range as {
if sa.Group.isMember(peer) {
js.removePeerFromStream(sa, peer)
}
}
}
}
// Assumes all checks have already been done.
// Lock should be held.
func (js *jetStream) removePeerFromStream(sa *streamAssignment, peer string) {
s, cc := js.srv, js.cluster
csa := sa.copyGroup()
if !cc.remapStreamAssignment(csa, peer) {
s.Warnf("JetStream cluster could not remap stream '%s > %s'", sa.Client.Account, sa.Config.Name)
}
// Send our proposal for this csa. Also use same group definition for all the consumers as well.
cc.meta.Propose(encodeAddStreamAssignment(csa))
rg := csa.Group
for _, ca := range sa.consumers {
cca := *ca
cca.Group.Peers = rg.Peers
cc.meta.Propose(encodeAddConsumerAssignment(&cca))
}
}
func (js *jetStream) applyMetaEntries(entries []*Entry, isRecovering bool) (bool, error) {
var didSnap bool
for _, e := range entries {
if e.Type == EntrySnapshot {
js.applyMetaSnapshot(e.Data, isRecovering)
didSnap = true
} else if e.Type == EntryRemovePeer {
js.processRemovePeer(string(e.Data))
} else {
buf := e.Data
switch entryOp(buf[0]) {
case assignStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, err
}
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamAssignment(sa)
case removeStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, err
}
if isRecovering {
js.setStreamAssignmentResponded(sa)
}
js.processStreamRemoval(sa)
case assignConsumerOp:
ca, err := decodeConsumerAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode consumer assigment: %q", buf[1:])
return didSnap, err
}
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
case assignCompressedConsumerOp:
ca, err := decodeConsumerAssignmentCompressed(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode compressed consumer assigment: %q", buf[1:])
return didSnap, err
}
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerAssignment(ca)
case removeConsumerOp:
ca, err := decodeConsumerAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode consumer assigment: %q", buf[1:])
return didSnap, err
}
if isRecovering {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerRemoval(ca)
default:
panic("JetStream Cluster Unknown meta entry op type")
}
}
}
return didSnap, nil
}
func (rg *raftGroup) isMember(id string) bool {
if rg == nil {
return false
}
for _, peer := range rg.Peers {
if peer == id {
return true
}
}
return false
}
func (rg *raftGroup) setPreferred() {
if rg == nil || len(rg.Peers) == 0 {
return
}
if len(rg.Peers) == 1 {
rg.Preferred = rg.Peers[0]
} else {
// For now just randomly select a peer for the preferred.
pi := rand.Int31n(int32(len(rg.Peers)))
rg.Preferred = rg.Peers[pi]
}
}
// createRaftGroup is called to spin up this raft group if needed.
func (js *jetStream) createRaftGroup(rg *raftGroup) error {
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
// If this is a single peer raft group or we are not a member return.
if len(rg.Peers) <= 1 || !rg.isMember(cc.meta.ID()) {
// Nothing to do here.
return nil
}
// We already have this assigned.
if node := s.lookupRaftNode(rg.Name); node != nil {
s.Debugf("JetStream cluster already has raft group %q assigned", rg.Name)
rg.node = node
return nil
}
s.Debugf("JetStream cluster creating raft group:%+v", rg)
sysAcc := s.SystemAccount()
if sysAcc == nil {
s.Debugf("JetStream cluster detected shutdown processing raft group: %+v", rg)
return errors.New("shutting down")
}
stateDir := path.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, rg.Name)
fs, bootstrap, err := newFileStore(
FileStoreConfig{StoreDir: stateDir, BlockSize: 32 * 1024 * 1024},
StreamConfig{Name: rg.Name, Storage: FileStorage},
)
if err != nil {
s.Errorf("Error creating filestore: %v", err)
return err
}
cfg := &RaftConfig{Name: rg.Name, Store: stateDir, Log: fs}
if bootstrap {
s.bootstrapRaftNode(cfg, rg.Peers, true)
}
n, err := s.startRaftNode(cfg)
if err != nil {
s.Debugf("Error creating raft group: %v", err)
return err
}
rg.node = n
// See if we are preferred and should start campaign immediately.
if n.ID() == rg.Preferred {
n.Campaign()
}
return nil
}
func (mset *Stream) raftGroup() *raftGroup {
if mset == nil {
return nil
}
mset.mu.RLock()
defer mset.mu.RUnlock()
if mset.sa == nil {
return nil
}
return mset.sa.Group
}
func (mset *Stream) raftNode() RaftNode {
if mset == nil {
return nil
}
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.node
}
// Monitor our stream node for this stream.
func (js *jetStream) monitorStream(mset *Stream, sa *streamAssignment) {
s, cc, n := js.server(), js.cluster, sa.Group.node
defer s.grWG.Done()
if n == nil {
s.Warnf("No RAFT group for '%s > %s", sa.Client.Account, sa.Config.Name)
return
}
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
const (
compactInterval = 10 * time.Minute
compactSizeLimit = 64 * 1024 * 1024
compactMinWait = 5 * time.Second
)
s.Debugf("Starting stream monitor for '%s > %s'", sa.Client.Account, sa.Config.Name)
defer s.Debugf("Exiting stream monitor for '%s > %s'", sa.Client.Account, sa.Config.Name)
t := time.NewTicker(compactInterval)
defer t.Stop()
js.mu.RLock()
isLeader := cc.isStreamLeader(sa.Client.Account, sa.Config.Name)
isRestore := sa.Restore != nil
js.mu.RUnlock()
acc, err := s.LookupAccount(sa.Client.Account)
if err != nil {
s.Warnf("Could not retrieve account for stream '%s > %s", sa.Client.Account, sa.Config.Name)
return
}
var (
lastSnap []byte
snapout bool
lastFailed time.Time
)
// Only to be called from leader.
attemptSnapshot := func() {
if mset == nil || isRestore || snapout {
return
}
n.PausePropose()
defer n.ResumePropose()
if snap := mset.snapshot(); !bytes.Equal(lastSnap, snap) {
if !lastFailed.IsZero() && time.Since(lastFailed) <= compactMinWait {
s.Debugf("Stream compaction delayed")
return
}
if err := n.Snapshot(snap); err != nil {
lastFailed = time.Now()
} else {
lastSnap = snap
snapout = true
lastFailed = time.Time{}
}
}
}
// We will establish a restoreDoneCh no matter what. Will never be triggered unless
// we replace with the restore chan.
restoreDoneCh := make(<-chan error)
for {
select {
case err := <-restoreDoneCh:
// We have completed a restore from snapshot on this server. The stream assignment has
// already been assigned but the replicas will need to catch up out of band. Consumers
// will need to be assigned by forwarding the proposal and stamping the initial state.
s.Debugf("Stream restore for '%s > %s' completed", sa.Client.Account, sa.Config.Name)
if err != nil {
s.Debugf("Stream restore failed: %v", err)
}
isRestore = false
sa.Restore = nil
// If we were successful lookup up our stream now.
if err == nil {
mset, err = acc.LookupStream(sa.Config.Name)
if mset != nil {
mset.setStreamAssignment(sa)
}
}
if err != nil {
if mset != nil {
mset.Delete()
}
js.mu.Lock()
sa.err = err
sa.responded = true
if n != nil {
n.Delete()
}
result := &streamAssignmentResult{
Account: sa.Client.Account,
Stream: sa.Config.Name,
Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}},
}
result.Restore.Error = jsError(sa.err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, b)
return
}
if !isLeader {
panic("Finished restore but not leader")
}
js.processStreamLeaderChange(mset, isLeader)
attemptSnapshot()
// Check to see if we have restored consumers here.
// These are not currently assigned so we will need to do so here.
if consumers := mset.Consumers(); len(consumers) > 0 {
for _, o := range mset.Consumers() {
rg := cc.createGroupForConsumer(sa)
// Pick a preferred leader.
rg.setPreferred()
name, cfg := o.Name(), o.Config()
// Place our initial state here as well for assignment distribution.
ca := &consumerAssignment{
Group: rg,
Stream: sa.Config.Name,
Name: name,
Config: &cfg,
Client: sa.Client,
Created: o.Created(),
State: o.readStoreState(),
}
// We make these compressed in case state is complex.
addEntry := encodeAddConsumerAssignmentCompressed(ca)
cc.meta.ForwardProposal(addEntry)
// Check to make sure we see the assignment.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca, meta := js.consumerAssignment(ca.Client.Account, sa.Config.Name, name), cc.meta
js.mu.RUnlock()
if ca == nil {
s.Warnf("Consumer assignment has not been assigned, retrying")
meta.ForwardProposal(addEntry)
} else {
return
}
}
}()
}
}
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
// No special processing needed for when we are caught up on restart.
if ce == nil {
continue
}
if mset == nil && isRestore {
isRestore = false
sa.Restore = nil
if mset, err = acc.addStream(sa.Config, nil, sa); err != nil {
s.Warnf("Could not add stream after restore '%s > %s': %v", sa.Client.Account, sa.Config.Name, err)
return
}
}
// Apply our entries.
if hadSnapshot, err := js.applyStreamEntries(mset, ce); err == nil {
n.Applied(ce.Index)
if hadSnapshot {
snapout = false
}
} else {
s.Warnf("Error applying entries to '%s > %s'", sa.Client.Account, sa.Config.Name)
}
if isLeader && !snapout {
if _, b := n.Size(); b > compactSizeLimit {
attemptSnapshot()
}
}
case isLeader = <-lch:
if isLeader && isRestore {
acc, _ := s.LookupAccount(sa.Client.Account)
restoreDoneCh = s.processStreamRestore(sa.Client, acc, sa.Config.Name, _EMPTY_, sa.Reply, _EMPTY_)
} else {
if !isLeader && n.GroupLeader() != noLeader {
js.setStreamAssignmentResponded(sa)
}
js.processStreamLeaderChange(mset, isLeader)
}
case <-t.C:
if isLeader {
attemptSnapshot()
}
}
}
}
func (js *jetStream) applyStreamEntries(mset *Stream, ce *CommittedEntry) (bool, error) {
var didSnap bool
for _, e := range ce.Entries {
if e.Type == EntrySnapshot {
mset.processSnapshot(e.Data)
didSnap = true
} else if e.Type == EntryRemovePeer {
js.mu.RLock()
ourID := js.cluster.meta.ID()
js.mu.RUnlock()
if peer := string(e.Data); peer == ourID {
mset.stop(true, false)
}
return false, nil
} else {
buf := e.Data
switch entryOp(buf[0]) {
case streamMsgOp:
subject, reply, hdr, msg, lseq, ts, err := decodeStreamMsg(buf[1:])
if err != nil {
panic(err.Error())
}
// Skip by hand here since first msg special case.
// Reason is sequence is unsigned and for lseq being 0
// the lseq under stream would have be -1.
if lseq == 0 && mset.lastSeq() != 0 {
continue
}
if err := mset.processJetStreamMsg(subject, reply, hdr, msg, lseq, ts); err != nil {
js.srv.Debugf("Got error processing JetStream msg: %v", err)
}
case deleteMsgOp:
md, err := decodeMsgDelete(buf[1:])
if err != nil {
panic(err.Error())
}
s, cc := js.server(), js.cluster
removed, err := mset.EraseMsg(md.Seq)
if err != nil {
s.Warnf("JetStream cluster failed to delete msg %d from stream %q for account %q: %v", md.Seq, md.Stream, md.Client.Account, err)
}
js.mu.RLock()
isLeader := cc.isStreamLeader(md.Client.Account, md.Stream)
js.mu.RUnlock()
if isLeader {
var resp = JSApiMsgDeleteResponse{ApiResponse: ApiResponse{Type: JSApiMsgDeleteResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
} else if !removed {
resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", md.Seq)}
s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
case purgeStreamOp:
sp, err := decodeStreamPurge(buf[1:])
if err != nil {
panic(err.Error())
}
s := js.server()
purged, err := mset.Purge()
if err != nil {
s.Warnf("JetStream cluster failed to purge stream %q for account %q: %v", sp.Stream, sp.Client.Account, err)
}
js.mu.RLock()
isLeader := js.cluster.isStreamLeader(sp.Client.Account, sp.Stream)
js.mu.RUnlock()
if isLeader {
var resp = JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(sp.Client, mset.account(), sp.Subject, sp.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Purged = purged
resp.Success = true
s.sendAPIResponse(sp.Client, mset.account(), sp.Subject, sp.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
default:
panic("JetStream Cluster Unknown group entry op type!")
}
}
}
return didSnap, nil
}
// Returns the PeerInfo for all replicas of a raft node. This is different than node.Peers()
// and is used for external facing advisories.
func (s *Server) replicas(node RaftNode) []*PeerInfo {
now := time.Now()
var replicas []*PeerInfo
for _, rp := range node.Peers() {
pi := &PeerInfo{Name: s.serverNameForNode(rp.ID), Current: rp.Current, Active: now.Sub(rp.Last)}
replicas = append(replicas, pi)
}
return replicas
}
// Will check our node peers and see if we should remove a peer.
func (js *jetStream) checkPeers(rg *raftGroup) {
js.mu.Lock()
defer js.mu.Unlock()
// FIXME(dlc) - Single replicas?
if rg == nil || rg.node == nil {
return
}
for _, peer := range rg.node.Peers() {
if !rg.isMember(peer.ID) {
rg.node.ProposeRemovePeer(peer.ID)
}
}
}
func (js *jetStream) processStreamLeaderChange(mset *Stream, isLeader bool) {
sa := mset.streamAssignment()
if sa == nil {
return
}
js.mu.Lock()
s, account, err := js.srv, sa.Client.Account, sa.err
client, subject, reply := sa.Client, sa.Subject, sa.Reply
hasResponded := sa.responded
sa.responded = true
js.mu.Unlock()
stream := mset.Name()
if isLeader {
s.Noticef("JetStream cluster new stream leader for '%s > %s'", sa.Client.Account, stream)
s.sendStreamLeaderElectAdvisory(mset)
// Check for peer removal and process here if needed.
js.checkPeers(sa.Group)
} else {
// We are stepping down.
// Make sure if we are doing so because we have lost quorum that we send the appropriate advisories.
if node := mset.raftNode(); node != nil && !node.Quorum() && time.Since(node.Created()) > time.Second {
s.sendStreamLostQuorumAdvisory(mset)
}
}
// Tell stream to switch leader status.
mset.setLeader(isLeader)
if !isLeader || hasResponded {
return
}
acc, _ := s.LookupAccount(account)
if acc == nil {
return
}
// Send our response.
var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
resp.StreamInfo = &StreamInfo{Created: mset.Created(), State: mset.State(), Config: mset.Config(), Cluster: js.clusterInfo(mset.raftGroup())}
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
if node := mset.raftNode(); node != nil {
mset.sendCreateAdvisory()
}
}
}
// Fixed value ok for now.
const lostQuorumAdvInterval = 10 * time.Second
// Determines if we should send lost quorum advisory. We throttle these after first one.
func (mset *Stream) shouldSendLostQuorum() bool {
mset.mu.Lock()
defer mset.mu.Unlock()
if time.Since(mset.lqsent) >= lostQuorumAdvInterval {
mset.lqsent = time.Now()
return true
}
return false
}
func (s *Server) sendStreamLostQuorumAdvisory(mset *Stream) {
if mset == nil {
return
}
node, stream, acc := mset.raftNode(), mset.Name(), mset.account()
if node == nil {
return
}
if !mset.shouldSendLostQuorum() {
return
}
s.Warnf("JetStream cluster stream '%s > %s' has NO quorum, stalled.", acc.GetName(), stream)
subj := JSAdvisoryStreamQuorumLostPre + "." + stream
adv := &JSStreamQuorumLostAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamQuorumLostAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
func (s *Server) sendStreamLeaderElectAdvisory(mset *Stream) {
if mset == nil {
return
}
node, stream, acc := mset.raftNode(), mset.Name(), mset.account()
if node == nil {
return
}
subj := JSAdvisoryStreamLeaderElectedPre + "." + stream
adv := &JSStreamLeaderElectedAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamLeaderElectedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Leader: s.serverNameForNode(node.GroupLeader()),
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
// Will lookup a stream assignment.
// Lock should be held.
func (js *jetStream) streamAssignment(account, stream string) (sa *streamAssignment) {
cc := js.cluster
if cc == nil {
return nil
}
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
return sa
}
// processStreamAssignment is called when followers have replicated an assignment.
func (js *jetStream) processStreamAssignment(sa *streamAssignment) {
js.mu.RLock()
s, cc := js.srv, js.cluster
js.mu.RUnlock()
if s == nil || cc == nil {
// TODO(dlc) - debug at least
return
}
acc, err := s.LookupAccount(sa.Client.Account)
if err != nil {
// TODO(dlc) - log error
return
}
stream := sa.Config.Name
js.mu.Lock()
ourID := cc.meta.ID()
var isMember bool
if sa.Group != nil && cc.meta != nil {
isMember = sa.Group.isMember(ourID)
}
accStreams := cc.streams[acc.Name]
if accStreams == nil {
accStreams = make(map[string]*streamAssignment)
} else if osa := accStreams[stream]; osa != nil {
// Copy over private existing state from former SA.
sa.Group.node = osa.Group.node
sa.consumers = osa.consumers
sa.responded = osa.responded
sa.err = osa.err
}
// Update our state.
accStreams[stream] = sa
cc.streams[acc.Name] = accStreams
js.mu.Unlock()
// Check if this is for us..
if isMember {
js.processClusterCreateStream(acc, sa)
} else if mset, _ := acc.LookupStream(sa.Config.Name); mset != nil {
// We have one here even though we are not a member. This can happen on re-assignment.
s.Debugf("JetStream removing stream '%s > %s' from this server, re-assigned", sa.Client.Account, sa.Config.Name)
if node := mset.raftNode(); node != nil {
node.ProposeRemovePeer(ourID)
}
mset.stop(true, false)
}
}
// processClusterCreateStream is called when we have a stream assignment that
// has been committed and this server is a member of the peer group.
func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignment) {
if sa == nil {
return
}
js.mu.RLock()
s, rg := js.srv, sa.Group
alreadyRunning := rg.node != nil
js.mu.RUnlock()
// Process the raft group and make sure it's running if needed.
err := js.createRaftGroup(rg)
// If we are restoring, create the stream if we are R>1 and not the preferred who handles the
// receipt of the snapshot itself.
shouldCreate := true
if sa.Restore != nil {
if len(rg.Peers) == 1 || rg.node != nil && rg.node.ID() == rg.Preferred {
shouldCreate = false
}
}
// Our stream.
var mset *Stream
// Process here if not restoring or not the leader.
if shouldCreate && err == nil {
// Go ahead and create or update the stream.
mset, err = acc.LookupStream(sa.Config.Name)
if err == nil && mset != nil {
mset.setStreamAssignment(sa)
if err := mset.Update(sa.Config); err != nil {
s.Warnf("JetStream cluster error updating stream %q for account %q: %v", sa.Config.Name, acc.Name, err)
}
} else if err == ErrJetStreamStreamNotFound {
// Add in the stream here.
mset, err = acc.addStream(sa.Config, nil, sa)
}
if mset != nil {
mset.setCreated(sa.Created)
}
}
// This is an error condition.
if err != nil {
s.Debugf("Stream create failed for '%s > %s': %v", sa.Client.Account, sa.Config.Name, err)
js.mu.Lock()
sa.err = err
sa.responded = true
if rg.node != nil {
rg.node.Delete()
}
result := &streamAssignmentResult{
Account: sa.Client.Account,
Stream: sa.Config.Name,
Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}},
}
result.Response.Error = jsError(err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, b)
return
}
// Start our monitoring routine.
if rg.node != nil {
if !alreadyRunning {
s.startGoRoutine(func() { js.monitorStream(mset, sa) })
}
} else {
// Single replica stream, process manually here.
// If we are restoring, process that first.
if sa.Restore != nil {
// We are restoring a stream here.
restoreDoneCh := s.processStreamRestore(sa.Client, acc, sa.Config.Name, _EMPTY_, sa.Reply, _EMPTY_)
s.startGoRoutine(func() {
defer s.grWG.Done()
select {
case err := <-restoreDoneCh:
if err == nil {
mset, err = acc.LookupStream(sa.Config.Name)
if mset != nil {
mset.setStreamAssignment(sa)
mset.setCreated(sa.Created)
}
}
if err != nil {
if mset != nil {
mset.Delete()
}
js.mu.Lock()
sa.err = err
sa.responded = true
result := &streamAssignmentResult{
Account: sa.Client.Account,
Stream: sa.Config.Name,
Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}},
}
result.Restore.Error = jsError(sa.err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, b)
return
}
js.processStreamLeaderChange(mset, true)
// Check to see if we have restored consumers here.
// These are not currently assigned so we will need to do so here.
if consumers := mset.Consumers(); len(consumers) > 0 {
js.mu.RLock()
cc := js.cluster
js.mu.RUnlock()
for _, o := range mset.Consumers() {
rg := cc.createGroupForConsumer(sa)
name, cfg := o.Name(), o.Config()
// Place our initial state here as well for assignment distribution.
ca := &consumerAssignment{
Group: rg,
Stream: sa.Config.Name,
Name: name,
Config: &cfg,
Client: sa.Client,
Created: o.Created(),
}
addEntry := encodeAddConsumerAssignment(ca)
cc.meta.ForwardProposal(addEntry)
// Check to make sure we see the assignment.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca, meta := js.consumerAssignment(ca.Client.Account, sa.Config.Name, name), cc.meta
js.mu.RUnlock()
if ca == nil {
s.Warnf("Consumer assignment has not been assigned, retrying")
meta.ForwardProposal(addEntry)
} else {
return
}
}
}()
}
}
case <-s.quitCh:
return
}
})
} else {
js.processStreamLeaderChange(mset, true)
}
}
}
// processStreamRemoval is called when followers have replicated an assignment.
func (js *jetStream) processStreamRemoval(sa *streamAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
stream := sa.Config.Name
isMember := sa.Group.isMember(cc.meta.ID())
wasLeader := cc.isStreamLeader(sa.Client.Account, stream)
// Check if we already have this assigned.
accStreams := cc.streams[sa.Client.Account]
needDelete := accStreams != nil && accStreams[stream] != nil
if needDelete {
delete(accStreams, stream)
if len(accStreams) == 0 {
delete(cc.streams, sa.Client.Account)
}
}
js.mu.Unlock()
if needDelete {
js.processClusterDeleteStream(sa, isMember, wasLeader)
}
}
func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, wasLeader bool) {
if sa == nil {
return
}
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
acc, err := s.LookupAccount(sa.Client.Account)
if err != nil {
s.Debugf("JetStream cluster failed to lookup account %q: %v", sa.Client.Account, err)
return
}
var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}}
// Go ahead and delete the stream.
mset, err := acc.LookupStream(sa.Config.Name)
if err != nil {
resp.Error = jsNotFoundError(err)
} else if mset != nil {
if mset.Config().internal {
err = errors.New("not allowed to delete internal stream")
} else {
err = mset.stop(true, wasLeader)
}
}
if sa.Group.node != nil {
sa.Group.node.Delete()
}
if !isMember || !wasLeader && sa.Group.node != nil && sa.Group.node.GroupLeader() != noLeader {
return
}
if err != nil {
if resp.Error == nil {
resp.Error = jsError(err)
}
s.sendAPIErrResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
// processConsumerAssignment is called when followers have replicated an assignment for a consumer.
func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
acc, err := s.LookupAccount(ca.Client.Account)
if err != nil {
// TODO(dlc) - log error
return
}
sa := js.streamAssignment(ca.Client.Account, ca.Stream)
if sa == nil {
s.Debugf("Consumer create failed, could not locate stream '%s > %s'", ca.Client.Account, ca.Stream)
ca.err = ErrJetStreamStreamNotFound
result := &consumerAssignmentResult{
Account: ca.Client.Account,
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
js.mu.Unlock()
return
}
if sa.consumers == nil {
sa.consumers = make(map[string]*consumerAssignment)
} else if oca := sa.consumers[ca.Name]; oca != nil {
// Copy over private existing state from former CA.
ca.Group.node = oca.Group.node
ca.responded = oca.responded
ca.err = oca.err
}
// Place into our internal map under the stream assignment.
// Ok to replace an existing one, we check on process call below.
sa.consumers[ca.Name] = ca
// See if we are a member
ourID := cc.meta.ID()
isMember := ca.Group.isMember(ourID)
js.mu.Unlock()
// Check if this is for us..
if isMember {
js.processClusterCreateConsumer(ca)
} else {
// We are not a member, if we have this consumer on this
// server remove it.
if mset, _ := acc.LookupStream(ca.Stream); mset != nil {
if o := mset.LookupConsumer(ca.Name); o != nil {
s.Debugf("JetStream removing consumer '%s > %s > %s' from this server, re-assigned",
ca.Client.Account, ca.Stream, ca.Name)
if node := o.raftNode(); node != nil {
node.ProposeRemovePeer(ourID)
}
o.stop(true, false, false)
}
}
}
}
func (js *jetStream) processConsumerRemoval(ca *consumerAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
isMember := ca.Group.isMember(cc.meta.ID())
wasLeader := cc.isConsumerLeader(ca.Client.Account, ca.Stream, ca.Name)
// Delete from our state.
var needDelete bool
if accStreams := cc.streams[ca.Client.Account]; accStreams != nil {
if sa := accStreams[ca.Stream]; sa != nil && sa.consumers != nil && sa.consumers[ca.Name] != nil {
needDelete = true
delete(sa.consumers, ca.Name)
}
}
js.mu.Unlock()
if needDelete {
js.processClusterDeleteConsumer(ca, isMember, wasLeader)
}
}
type consumerAssignmentResult struct {
Account string `json:"account"`
Stream string `json:"stream"`
Consumer string `json:"consumer"`
Response *JSApiConsumerCreateResponse `json:"response,omitempty"`
}
// processClusterCreateConsumer is when we are a member fo the group and need to create the consumer.
func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment) {
if ca == nil {
return
}
js.mu.RLock()
s := js.srv
acc, err := s.LookupAccount(ca.Client.Account)
if err != nil {
s.Warnf("JetStream cluster failed to lookup account %q: %v", ca.Client.Account, err)
js.mu.RUnlock()
return
}
rg := ca.Group
alreadyRunning := rg.node != nil
js.mu.RUnlock()
// Go ahead and create or update the consumer.
mset, err := acc.LookupStream(ca.Stream)
if err != nil {
js.mu.Lock()
s.Debugf("Consumer create failed, could not locate stream '%s > %s'", ca.Client.Account, ca.Stream)
ca.err = ErrJetStreamStreamNotFound
result := &consumerAssignmentResult{
Account: ca.Client.Account,
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
js.mu.Unlock()
return
}
// Process the raft group and make sure its running if needed.
js.createRaftGroup(rg)
// Check if we already have this consumer running.
o := mset.LookupConsumer(ca.Name)
if o != nil {
if o.isDurable() && o.isPushMode() {
ocfg := o.Config()
if configsEqualSansDelivery(ocfg, *ca.Config) && (ocfg.allowNoInterest || o.hasNoLocalInterest()) {
o.updateDeliverSubject(ca.Config.DeliverSubject)
}
}
o.setConsumerAssignment(ca)
s.Debugf("JetStream cluster, consumer was already running")
}
// Add in the consumer if needed.
if o == nil {
o, err = mset.addConsumer(ca.Config, ca.Name, ca)
}
// If we have an initial state set apply that now.
if ca.State != nil && o != nil {
err = o.setStoreState(ca.State)
}
if err != nil {
js.srv.Debugf("Consumer create failed for '%s > %s > %s': %v\n", ca.Client.Account, ca.Stream, ca.Name, err)
ca.err = err
if rg.node != nil {
rg.node.Delete()
}
result := &consumerAssignmentResult{
Account: ca.Client.Account,
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsError(err)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
} else {
o.setCreated(ca.Created)
// Start our monitoring routine.
if rg.node != nil {
if !alreadyRunning {
s.startGoRoutine(func() { js.monitorConsumer(o, ca) })
}
} else {
// Single replica consumer, process manually here.
js.processConsumerLeaderChange(o, true)
}
}
}
func (js *jetStream) processClusterDeleteConsumer(ca *consumerAssignment, isMember, wasLeader bool) {
if ca == nil {
return
}
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
acc, err := s.LookupAccount(ca.Client.Account)
if err != nil {
s.Warnf("JetStream cluster failed to lookup account %q: %v", ca.Client.Account, err)
return
}
var resp = JSApiConsumerDeleteResponse{ApiResponse: ApiResponse{Type: JSApiConsumerDeleteResponseType}}
// Go ahead and delete the consumer.
mset, err := acc.LookupStream(ca.Stream)
if err != nil {
resp.Error = jsNotFoundError(err)
} else if mset != nil {
if mset.Config().internal {
err = errors.New("not allowed to delete internal consumer")
} else if o := mset.LookupConsumer(ca.Name); o != nil {
err = o.stop(true, true, wasLeader)
} else {
resp.Error = jsNoConsumerErr
}
}
if ca.Group.node != nil {
ca.Group.node.Delete()
}
if !wasLeader || ca.Reply == _EMPTY_ {
return
}
if err != nil {
if resp.Error == nil {
resp.Error = jsError(err)
}
s.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
// Returns the consumer assignment, or nil if not present.
// Lock should be held.
func (js *jetStream) consumerAssignment(account, stream, consumer string) *consumerAssignment {
if sa := js.streamAssignment(account, stream); sa != nil {
return sa.consumers[consumer]
}
return nil
}
// consumerAssigned informs us if this server has this consumer assigned.
func (jsa *jsAccount) consumerAssigned(stream, consumer string) bool {
jsa.mu.RLock()
defer jsa.mu.RUnlock()
js, acc := jsa.js, jsa.account
if js == nil {
return false
}
return js.cluster.isConsumerAssigned(acc, stream, consumer)
}
// Read lock should be held.
func (cc *jetStreamCluster) isConsumerAssigned(a *Account, stream, consumer string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
var sa *streamAssignment
accStreams := cc.streams[a.Name]
if accStreams != nil {
sa = accStreams[stream]
}
if sa == nil {
// TODO(dlc) - This should not happen.
return false
}
ca := sa.consumers[consumer]
if ca == nil {
return false
}
rg := ca.Group
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
return true
}
}
return false
}
func (o *Consumer) raftGroup() *raftGroup {
if o == nil {
return nil
}
o.mu.RLock()
defer o.mu.RUnlock()
if o.ca == nil {
return nil
}
return o.ca.Group
}
func (o *Consumer) raftNode() RaftNode {
if o == nil {
return nil
}
o.mu.RLock()
defer o.mu.RUnlock()
return o.node
}
func (js *jetStream) monitorConsumer(o *Consumer, ca *consumerAssignment) {
s, n := js.server(), o.raftNode()
defer s.grWG.Done()
if n == nil {
s.Warnf("No RAFT group for consumer")
return
}
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
const (
compactInterval = 1 * time.Minute
compactSizeLimit = 8 * 1024 * 1024
)
s.Debugf("Starting consumer monitor for '%s > %s > %s", o.acc.Name, ca.Stream, ca.Name)
defer s.Debugf("Exiting consumer monitor for '%s > %s > %s'", o.acc.Name, ca.Stream, ca.Name)
t := time.NewTicker(compactInterval)
defer t.Stop()
// Our last applied.
last := uint64(0)
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
// No special processing needed for when we are caught up on restart.
if ce == nil {
continue
}
if _, err := js.applyConsumerEntries(o, ce); err == nil {
n.Applied(ce.Index)
last = ce.Index
if _, b := n.Size(); b > compactSizeLimit {
n.Compact(last)
}
}
case isLeader := <-lch:
if !isLeader && n.GroupLeader() != noLeader {
js.setConsumerAssignmentResponded(ca)
}
js.processConsumerLeaderChange(o, isLeader)
case <-t.C:
// TODO(dlc) - We should have this delayed a bit to not race the invariants.
if last != 0 {
n.Compact(last)
}
}
}
}
func (js *jetStream) applyConsumerEntries(o *Consumer, ce *CommittedEntry) (bool, error) {
var didSnap bool
for _, e := range ce.Entries {
if e.Type == EntrySnapshot {
// No-op needed?
state, err := decodeConsumerState(e.Data)
if err != nil {
panic(err.Error())
}
o.store.Update(state)
didSnap = true
} else if e.Type == EntryRemovePeer {
js.mu.RLock()
ourID := js.cluster.meta.ID()
js.mu.RUnlock()
if peer := string(e.Data); peer == ourID {
o.stop(true, false, false)
}
return false, nil
} else {
buf := e.Data
switch entryOp(buf[0]) {
case updateDeliveredOp:
dseq, sseq, dc, ts, err := decodeDeliveredUpdate(buf[1:])
if err != nil {
panic(err.Error())
}
if err := o.store.UpdateDelivered(dseq, sseq, dc, ts); err != nil {
panic(err.Error())
}
case updateAcksOp:
dseq, sseq, err := decodeAckUpdate(buf[1:])
if err != nil {
panic(err.Error())
}
o.store.UpdateAcks(dseq, sseq)
default:
panic(fmt.Sprintf("JetStream Cluster Unknown group entry op type! %v", entryOp(buf[0])))
}
}
}
return didSnap, nil
}
var errBadAckUpdate = errors.New("jetstream cluster bad replicated ack update")
var errBadDeliveredUpdate = errors.New("jetstream cluster bad replicated delivered update")
func decodeAckUpdate(buf []byte) (dseq, sseq uint64, err error) {
var bi, n int
if dseq, n = binary.Uvarint(buf); n < 0 {
return 0, 0, errBadAckUpdate
}
bi += n
if sseq, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, errBadAckUpdate
}
return dseq, sseq, nil
}
func decodeDeliveredUpdate(buf []byte) (dseq, sseq, dc uint64, ts int64, err error) {
var bi, n int
if dseq, n = binary.Uvarint(buf); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if sseq, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if dc, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if ts, n = binary.Varint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
return dseq, sseq, dc, ts, nil
}
func (js *jetStream) processConsumerLeaderChange(o *Consumer, isLeader bool) {
ca := o.consumerAssignment()
if ca == nil {
return
}
js.mu.Lock()
s, account, err := js.srv, ca.Client.Account, ca.err
client, subject, reply := ca.Client, ca.Subject, ca.Reply
hasResponded := ca.responded
ca.responded = true
js.mu.Unlock()
stream := o.Stream()
consumer := o.Name()
acc, _ := s.LookupAccount(account)
if acc == nil {
return
}
if isLeader {
s.Noticef("JetStream cluster new consumer leader for '%s > %s > %s'", ca.Client.Account, stream, consumer)
s.sendConsumerLeaderElectAdvisory(o)
// Check for peer removal and process here if needed.
js.checkPeers(ca.Group)
} else {
// We are stepping down.
// Make sure if we are doing so because we have lost quorum that we send the appropriate advisories.
if node := o.raftNode(); node != nil && !node.Quorum() && time.Since(node.Created()) > time.Second {
s.sendConsumerLostQuorumAdvisory(o)
}
}
// Tell consumer to switch leader status.
o.setLeader(isLeader)
if !isLeader || hasResponded {
return
}
var resp = JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
resp.ConsumerInfo = o.Info()
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
if node := o.raftNode(); node != nil {
o.sendCreateAdvisory()
}
}
}
// Determines if we should send lost quorum advisory. We throttle these after first one.
func (o *Consumer) shouldSendLostQuorum() bool {
o.mu.Lock()
defer o.mu.Unlock()
if time.Since(o.lqsent) >= lostQuorumAdvInterval {
o.lqsent = time.Now()
return true
}
return false
}
func (s *Server) sendConsumerLostQuorumAdvisory(o *Consumer) {
if o == nil {
return
}
node, stream, consumer, acc := o.raftNode(), o.Stream(), o.Name(), o.account()
if node == nil {
return
}
if !o.shouldSendLostQuorum() {
return
}
s.Warnf("JetStream cluster consumer '%s > %s >%s' has NO quorum, stalled.", acc.GetName(), stream, consumer)
subj := JSAdvisoryConsumerQuorumLostPre + "." + stream + "." + consumer
adv := &JSConsumerQuorumLostAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerQuorumLostAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Consumer: consumer,
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
func (s *Server) sendConsumerLeaderElectAdvisory(o *Consumer) {
if o == nil {
return
}
node, stream, consumer, acc := o.raftNode(), o.Stream(), o.Name(), o.account()
if node == nil {
return
}
subj := JSAdvisoryConsumerLeaderElectedPre + "." + stream + "." + consumer
adv := &JSConsumerLeaderElectedAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerLeaderElectedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Consumer: consumer,
Leader: s.serverNameForNode(node.GroupLeader()),
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
type streamAssignmentResult struct {
Account string `json:"account"`
Stream string `json:"stream"`
Response *JSApiStreamCreateResponse `json:"create_response,omitempty"`
Restore *JSApiStreamRestoreResponse `json:"restore_response,omitempty"`
}
// Process error results of stream and consumer assignments.
// Success will be handled by stream leader.
func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) {
var result streamAssignmentResult
if err := json.Unmarshal(msg, &result); err != nil {
// TODO(dlc) - log
return
}
acc, _ := js.srv.LookupAccount(result.Account)
if acc == nil {
// TODO(dlc) - log
return
}
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
// FIXME(dlc) - suppress duplicates?
if sa := js.streamAssignment(result.Account, result.Stream); sa != nil {
var resp string
if result.Response != nil {
resp = s.jsonResponse(result.Response)
} else if result.Restore != nil {
resp = s.jsonResponse(result.Restore)
}
js.srv.sendAPIErrResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, resp)
sa.responded = true
// TODO(dlc) - Could have mixed results, should track per peer.
// Set sa.err while we are deleting so we will not respond to list/names requests.
sa.err = ErrJetStreamNotAssigned
cc.meta.Propose(encodeDeleteStreamAssignment(sa))
}
}
func (js *jetStream) processConsumerAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) {
var result consumerAssignmentResult
if err := json.Unmarshal(msg, &result); err != nil {
// TODO(dlc) - log
return
}
acc, _ := js.srv.LookupAccount(result.Account)
if acc == nil {
// TODO(dlc) - log
return
}
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
if sa := js.streamAssignment(result.Account, result.Stream); sa != nil && sa.consumers != nil {
if ca := sa.consumers[result.Consumer]; ca != nil && !ca.responded {
js.srv.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(result.Response))
ca.responded = true
// Check if this failed.
// TODO(dlc) - Could have mixed results, should track per peer.
if result.Response.Error != nil {
// So while we are delting we will not respond to list/names requests.
ca.err = ErrJetStreamNotAssigned
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
}
}
}
const (
streamAssignmentSubj = "$SYS.JSC.STREAM.ASSIGNMENT.RESULT"
consumerAssignmentSubj = "$SYS.JSC.CONSUMER.ASSIGNMENT.RESULT"
)
// Lock should be held.
func (js *jetStream) startUpdatesSub() {
cc, s, c := js.cluster, js.srv, js.cluster.c
if cc.streamResults == nil {
cc.streamResults, _ = s.systemSubscribe(streamAssignmentSubj, _EMPTY_, false, c, js.processStreamAssignmentResults)
}
if cc.consumerResults == nil {
cc.consumerResults, _ = s.systemSubscribe(consumerAssignmentSubj, _EMPTY_, false, c, js.processConsumerAssignmentResults)
}
}
// Lock should be held.
func (js *jetStream) stopUpdatesSub() {
cc := js.cluster
if cc.streamResults != nil {
cc.s.sysUnsubscribe(cc.streamResults)
cc.streamResults = nil
}
if cc.consumerResults != nil {
cc.s.sysUnsubscribe(cc.consumerResults)
cc.consumerResults = nil
}
}
func (js *jetStream) processLeaderChange(isLeader bool) {
if isLeader {
js.srv.Noticef("JetStream cluster new metadata leader")
}
js.mu.Lock()
defer js.mu.Unlock()
if isLeader {
js.startUpdatesSub()
} else {
js.stopUpdatesSub()
// TODO(dlc) - stepdown.
}
}
// Lock should be held.
func (cc *jetStreamCluster) remapStreamAssignment(sa *streamAssignment, removePeer string) bool {
// Need to select a replacement peer
for _, p := range cc.meta.Peers() {
if !sa.Group.isMember(p.ID) {
for i, peer := range sa.Group.Peers {
if peer == removePeer {
sa.Group.Peers[i] = p.ID
break
}
}
// Don't influence preferred leader.
sa.Group.Preferred = _EMPTY_
return true
}
}
return true
}
// selectPeerGroup will select a group of peers to start a raft group.
// TODO(dlc) - For now randomly select. Can be way smarter.
func (cc *jetStreamCluster) selectPeerGroup(r int) []string {
var nodes []string
peers := cc.meta.Peers()
// Make sure they are active
s := cc.s
ourID := cc.meta.ID()
for _, p := range peers {
// FIXME(dlc) - cluster scoped.
if p.ID == ourID || s.getRouteByHash([]byte(p.ID)) != nil {
nodes = append(nodes, p.ID)
}
}
if len(nodes) < r {
return nil
}
// Don't depend on range.
rand.Shuffle(len(nodes), func(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] })
return nodes[:r]
}
func groupNameForStream(peers []string, storage StorageType) string {
return groupName("S", peers, storage)
}
func groupNameForConsumer(peers []string, storage StorageType) string {
return groupName("C", peers, storage)
}
func groupName(prefix string, peers []string, storage StorageType) string {
var gns string
if len(peers) == 1 {
gns = peers[0]
} else {
gns = string(getHash(nuid.Next()))
}
return fmt.Sprintf("%s-R%d%s-%s", prefix, len(peers), storage.String()[:1], gns)
}
// createGroupForStream will create a group for assignment for the stream.
// Lock should be held.
func (cc *jetStreamCluster) createGroupForStream(cfg *StreamConfig) *raftGroup {
replicas := cfg.Replicas
if replicas == 0 {
replicas = 1
}
// Need to create a group here.
// TODO(dlc) - Can be way smarter here.
peers := cc.selectPeerGroup(replicas)
if len(peers) == 0 {
return nil
}
return &raftGroup{Name: groupNameForStream(peers, cfg.Storage), Storage: cfg.Storage, Peers: peers}
}
func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, subject, reply string, rmsg []byte, cfg *StreamConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}
acc, err := s.LookupAccount(ci.Account)
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
js.mu.RLock()
numStreams := len(cc.streams[ci.Account])
js.mu.RUnlock()
// Grab our jetstream account info.
acc.mu.RLock()
jsa := acc.js
acc.mu.RUnlock()
// Check for stream limits here before proposing.
jsa.mu.RLock()
exceeded := jsa.limits.MaxStreams > 0 && numStreams >= jsa.limits.MaxStreams
jsa.mu.RUnlock()
if exceeded {
resp.Error = jsError(fmt.Errorf("maximum number of streams reached"))
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Now process the request and proposal.
js.mu.Lock()
defer js.mu.Unlock()
if sa := js.streamAssignment(ci.Account, cfg.Name); sa != nil {
resp.Error = jsError(ErrJetStreamStreamAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Raft group selection and placement.
rg := cc.createGroupForStream(cfg)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
// Sync subject for post snapshot sync.
sa := &streamAssignment{Group: rg, Sync: syncSubjForStream(), Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now()}
cc.meta.Propose(encodeAddStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
osa := js.streamAssignment(ci.Account, stream)
if osa == nil {
acc, err := s.LookupAccount(ci.Account)
if err == nil {
var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}}
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
}
return
}
// Remove any remaining consumers as well.
for _, ca := range osa.consumers {
ca.Reply, ca.State = _EMPTY_, nil
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
sa := &streamAssignment{Group: osa.Group, Config: osa.Config, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeDeleteStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamPurgeRequest(ci *ClientInfo, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
sa := js.streamAssignment(ci.Account, stream)
if sa == nil || sa.Group == nil || sa.Group.node == nil {
resp := JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
acc, err := s.LookupAccount(ci.Account)
if err != nil {
resp.Error = jsError(err)
} else {
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
}
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
n := sa.Group.node
sp := &streamPurge{Stream: stream, Subject: subject, Reply: reply, Client: ci}
n.Propose(encodeStreamPurge(sp))
}
func (s *Server) jsClusteredStreamRestoreRequest(ci *ClientInfo, acc *Account, req *JSApiStreamRestoreRequest, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
cfg := &req.Config
resp := JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}}
if sa := js.streamAssignment(ci.Account, cfg.Name); sa != nil {
resp.Error = jsError(ErrJetStreamStreamAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Raft group selection and placement.
rg := cc.createGroupForStream(cfg)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
sa := &streamAssignment{Group: rg, Sync: syncSubjForStream(), Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now()}
// Now add in our restore state and pre-select a peer to handle the actual receipt of the snapshot.
sa.Restore = &req.State
cc.meta.Propose(encodeAddStreamAssignment(sa))
}
// This will do a scatter and gather operation for all streams for this account. This is only called from metadata leader.
// This will be running in a separate Go routine.
func (s *Server) jsClusteredStreamListRequest(acc *Account, ci *ClientInfo, offset int, subject, reply string, rmsg []byte) {
defer s.grWG.Done()
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
var streams []*streamAssignment
for _, sa := range cc.streams[acc.Name] {
streams = append(streams, sa)
}
// Needs to be sorted.
if len(streams) > 1 {
sort.Slice(streams, func(i, j int) bool {
return strings.Compare(streams[i].Config.Name, streams[j].Config.Name) < 0
})
}
scnt := len(streams)
if offset > scnt {
offset = scnt
}
if offset > 0 {
streams = streams[offset:]
}
if len(streams) > JSApiListLimit {
streams = streams[:JSApiListLimit]
}
var resp = JSApiStreamListResponse{
ApiResponse: ApiResponse{Type: JSApiStreamListResponseType},
Streams: make([]*StreamInfo, 0, len(streams)),
}
if len(streams) == 0 {
js.mu.Unlock()
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
return
}
// Create an inbox for our responses and send out requests.
inbox := infoReplySubject()
rc := make(chan *StreamInfo, len(streams))
rsub, _ := s.systemSubscribe(inbox, _EMPTY_, false, cc.c, func(_ *subscription, _ *client, _, reply string, msg []byte) {
var si StreamInfo
if err := json.Unmarshal(msg, &si); err != nil {
s.Warnf("Error unmarshaling clustered stream info response:%v", err)
return
}
select {
case rc <- &si:
default:
s.Warnf("Failed placing stream info result on internal chan")
}
})
defer s.sysUnsubscribe(rsub)
// Send out our requests here.
for _, sa := range streams {
isubj := fmt.Sprintf(clusterStreamInfoT, sa.Client.Account, sa.Config.Name)
s.sendInternalMsgLocked(isubj, inbox, nil, nil)
}
// Don't hold lock.
js.mu.Unlock()
const timeout = 2 * time.Second
notActive := time.NewTimer(timeout)
defer notActive.Stop()
LOOP:
for {
select {
case <-s.quitCh:
return
case <-notActive.C:
s.Warnf("Did not receive all stream info results for %q", acc)
resp.Error = jsClusterIncompleteErr
break LOOP
case si := <-rc:
resp.Streams = append(resp.Streams, si)
// Check to see if we are done.
if len(resp.Streams) == len(streams) {
break LOOP
}
}
}
// Needs to be sorted as well.
if len(resp.Streams) > 1 {
sort.Slice(resp.Streams, func(i, j int) bool {
return strings.Compare(resp.Streams[i].Config.Name, resp.Streams[j].Config.Name) < 0
})
}
resp.Total = len(resp.Streams)
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
// This will do a scatter and gather operation for all consumers for this stream and account.
// This will be running in a separate Go routine.
func (s *Server) jsClusteredConsumerListRequest(acc *Account, ci *ClientInfo, offset int, stream, subject, reply string, rmsg []byte) {
defer s.grWG.Done()
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
var consumers []*consumerAssignment
if sas := cc.streams[acc.Name]; sas != nil {
if sa := sas[stream]; sa != nil {
// Copy over since we need to sort etc.
for _, ca := range sa.consumers {
consumers = append(consumers, ca)
}
}
}
// Needs to be sorted.
if len(consumers) > 1 {
sort.Slice(consumers, func(i, j int) bool {
return strings.Compare(consumers[i].Name, consumers[j].Name) < 0
})
}
ocnt := len(consumers)
if offset > ocnt {
offset = ocnt
}
if offset > 0 {
consumers = consumers[offset:]
}
if len(consumers) > JSApiListLimit {
consumers = consumers[:JSApiListLimit]
}
// Send out our requests here.
var resp = JSApiConsumerListResponse{
ApiResponse: ApiResponse{Type: JSApiConsumerListResponseType},
Consumers: []*ConsumerInfo{},
}
if len(consumers) == 0 {
js.mu.Unlock()
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
return
}
// Create an inbox for our responses and send out requests.
inbox := infoReplySubject()
rc := make(chan *ConsumerInfo, len(consumers))
rsub, _ := s.systemSubscribe(inbox, _EMPTY_, false, cc.c, func(_ *subscription, _ *client, _, reply string, msg []byte) {
var ci ConsumerInfo
if err := json.Unmarshal(msg, &ci); err != nil {
s.Warnf("Error unmarshaling clustered consumer info response:%v", err)
return
}
select {
case rc <- &ci:
default:
s.Warnf("Failed placing consumer info result on internal chan")
}
})
defer s.sysUnsubscribe(rsub)
for _, ca := range consumers {
isubj := fmt.Sprintf(clusterConsumerInfoT, ca.Client.Account, stream, ca.Name)
s.sendInternalMsgLocked(isubj, inbox, nil, nil)
}
js.mu.Unlock()
const timeout = 2 * time.Second
notActive := time.NewTimer(timeout)
defer notActive.Stop()
LOOP:
for {
select {
case <-s.quitCh:
return
case <-notActive.C:
s.Warnf("Did not receive all stream info results for %q", acc)
break LOOP
case ci := <-rc:
resp.Consumers = append(resp.Consumers, ci)
// Check to see if we are done.
if len(resp.Consumers) == len(consumers) {
break LOOP
}
}
}
// Needs to be sorted as well.
if len(resp.Consumers) > 1 {
sort.Slice(resp.Consumers, func(i, j int) bool {
return strings.Compare(resp.Consumers[i].Name, resp.Consumers[j].Name) < 0
})
}
resp.Total = len(resp.Consumers)
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
func encodeStreamPurge(sp *streamPurge) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(purgeStreamOp))
json.NewEncoder(&bb).Encode(sp)
return bb.Bytes()
}
func decodeStreamPurge(buf []byte) (*streamPurge, error) {
var sp streamPurge
err := json.Unmarshal(buf, &sp)
return &sp, err
}
func (s *Server) jsClusteredConsumerDeleteRequest(ci *ClientInfo, stream, consumer, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
acc, err := s.LookupAccount(ci.Account)
if err != nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiConsumerDeleteResponse{ApiResponse: ApiResponse{Type: JSApiConsumerDeleteResponseType}}
sa := js.streamAssignment(ci.Account, stream)
if sa == nil {
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
if sa.consumers == nil {
resp.Error = jsNoConsumerErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
oca := sa.consumers[consumer]
if oca == nil {
resp.Error = jsNoConsumerErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
ca := &consumerAssignment{Group: oca.Group, Stream: stream, Name: consumer, Config: oca.Config, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
func encodeMsgDelete(md *streamMsgDelete) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(deleteMsgOp))
json.NewEncoder(&bb).Encode(md)
return bb.Bytes()
}
func decodeMsgDelete(buf []byte) (*streamMsgDelete, error) {
var md streamMsgDelete
err := json.Unmarshal(buf, &md)
return &md, err
}
func (s *Server) jsClusteredMsgDeleteRequest(ci *ClientInfo, stream, subject, reply string, seq uint64, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
sa := js.streamAssignment(ci.Account, stream)
if sa == nil || sa.Group == nil || sa.Group.node == nil {
// TODO(dlc) - Should respond? Log?
return
}
n := sa.Group.node
md := &streamMsgDelete{Seq: seq, Stream: stream, Subject: subject, Reply: reply, Client: ci}
n.Propose(encodeMsgDelete(md))
}
func encodeAddStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(assignStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func encodeDeleteStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(removeStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func decodeStreamAssignment(buf []byte) (*streamAssignment, error) {
var sa streamAssignment
err := json.Unmarshal(buf, &sa)
return &sa, err
}
// createGroupForConsumer will create a new group with same peer set as the stream.
func (cc *jetStreamCluster) createGroupForConsumer(sa *streamAssignment) *raftGroup {
peers := sa.Group.Peers
if len(peers) == 0 {
return nil
}
return &raftGroup{Name: groupNameForConsumer(peers, sa.Config.Storage), Storage: sa.Config.Storage, Peers: peers}
}
func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, subject, reply string, rmsg []byte, stream string, cfg *ConsumerConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}
acc, err := s.LookupAccount(ci.Account)
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Lookup the stream assignment.
sa := js.streamAssignment(ci.Account, stream)
if sa == nil {
resp.Error = jsError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
rg := cc.createGroupForConsumer(sa)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
// We need to set the ephemeral here before replicating.
var oname string
if !isDurableConsumer(cfg) {
for {
oname = createConsumerName()
if sa.consumers != nil {
if sa.consumers[oname] != nil {
continue
}
}
break
}
} else {
oname = cfg.Durable
if sa.consumers[oname] != nil {
resp.Error = jsError(ErrJetStreamConsumerAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
}
ca := &consumerAssignment{Group: rg, Stream: stream, Name: oname, Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now()}
cc.meta.Propose(encodeAddConsumerAssignment(ca))
}
func encodeAddConsumerAssignment(ca *consumerAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(assignConsumerOp))
json.NewEncoder(&bb).Encode(ca)
return bb.Bytes()
}
func encodeDeleteConsumerAssignment(ca *consumerAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(removeConsumerOp))
json.NewEncoder(&bb).Encode(ca)
return bb.Bytes()
}
func decodeConsumerAssignment(buf []byte) (*consumerAssignment, error) {
var ca consumerAssignment
err := json.Unmarshal(buf, &ca)
return &ca, err
}
func encodeAddConsumerAssignmentCompressed(ca *consumerAssignment) []byte {
b, err := json.Marshal(ca)
if err != nil {
return nil
}
// TODO(dlc) - Streaming better approach here probably.
var bb bytes.Buffer
bb.WriteByte(byte(assignCompressedConsumerOp))
bb.Write(s2.Encode(nil, b))
return bb.Bytes()
}
func decodeConsumerAssignmentCompressed(buf []byte) (*consumerAssignment, error) {
var ca consumerAssignment
js, err := s2.Decode(nil, buf)
if err != nil {
return nil, err
}
err = json.Unmarshal(js, &ca)
return &ca, err
}
var errBadStreamMsg = errors.New("jetstream cluster bad replicated stream msg")
func decodeStreamMsg(buf []byte) (subject, reply string, hdr, msg []byte, lseq uint64, ts int64, err error) {
var le = binary.LittleEndian
if len(buf) < 26 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
lseq = le.Uint64(buf)
buf = buf[8:]
ts = int64(le.Uint64(buf))
buf = buf[8:]
sl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < sl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
subject = string(buf[:sl])
buf = buf[sl:]
if len(buf) < 2 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
rl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < rl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
reply = string(buf[:rl])
buf = buf[rl:]
if len(buf) < 2 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
hl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < hl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
hdr = buf[:hl]
buf = buf[hl:]
if len(buf) < 4 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
ml := int(le.Uint32(buf))
buf = buf[4:]
if len(buf) < ml {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
msg = buf[:ml]
return subject, reply, hdr, msg, lseq, ts, nil
}
func encodeStreamMsg(subject, reply string, hdr, msg []byte, lseq uint64, ts int64) []byte {
elen := 1 + 8 + 8 + len(subject) + len(reply) + len(hdr) + len(msg)
elen += (2 + 2 + 2 + 4) // Encoded lengths, 4bytes
// TODO(dlc) - check sizes of subject, reply and hdr, make sure uint16 ok.
buf := make([]byte, elen)
buf[0] = byte(streamMsgOp)
var le = binary.LittleEndian
wi := 1
le.PutUint64(buf[wi:], lseq)
wi += 8
le.PutUint64(buf[wi:], uint64(ts))
wi += 8
le.PutUint16(buf[wi:], uint16(len(subject)))
wi += 2
copy(buf[wi:], subject)
wi += len(subject)
le.PutUint16(buf[wi:], uint16(len(reply)))
wi += 2
copy(buf[wi:], reply)
wi += len(reply)
le.PutUint16(buf[wi:], uint16(len(hdr)))
wi += 2
if len(hdr) > 0 {
copy(buf[wi:], hdr)
wi += len(hdr)
}
le.PutUint32(buf[wi:], uint32(len(msg)))
wi += 4
if len(msg) > 0 {
copy(buf[wi:], msg)
wi += len(msg)
}
return buf[:wi]
}
// StreamSnapshot is used for snapshotting and out of band catch up in clustered mode.
type streamSnapshot struct {
Msgs uint64 `json:"messages"`
Bytes uint64 `json:"bytes"`
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
Deleted []uint64 `json:"deleted,omitempty"`
}
// Grab a snapshot of a stream for clustered mode.
func (mset *Stream) snapshot() []byte {
mset.mu.RLock()
defer mset.mu.RUnlock()
state := mset.store.State()
snap := &streamSnapshot{
Msgs: state.Msgs,
Bytes: state.Bytes,
FirstSeq: state.FirstSeq,
LastSeq: state.LastSeq,
Deleted: state.Deleted,
}
b, _ := json.Marshal(snap)
return b
}
// processClusteredMsg will propose the inbound message to the underlying raft group.
func (mset *Stream) processClusteredInboundMsg(subject, reply string, hdr, msg []byte) error {
// For possible error response.
var response []byte
mset.mu.RLock()
canRespond := !mset.config.NoAck && len(reply) > 0
s, jsa, st, rf, sendq := mset.srv, mset.jsa, mset.config.Storage, mset.config.Replicas, mset.sendq
maxMsgSize := int(mset.config.MaxMsgSize)
mset.mu.RUnlock()
// Check here pre-emptively if we have exceeded our account limits.
var exceeded bool
jsa.mu.RLock()
if st == MemoryStorage {
total := jsa.storeTotal + int64(memStoreMsgSize(subject, hdr, msg)*uint64(rf))
if jsa.limits.MaxMemory > 0 && total > jsa.limits.MaxMemory {
exceeded = true
}
} else {
total := jsa.storeTotal + int64(fileStoreMsgSize(subject, hdr, msg)*uint64(rf))
if jsa.limits.MaxStore > 0 && total > jsa.limits.MaxStore {
exceeded = true
}
}
jsa.mu.RUnlock()
// If we have exceeded our account limits go ahead and return.
if exceeded {
err := fmt.Errorf("JetStream resource limits exceeded for account: %q", jsa.acc().Name)
s.Warnf(err.Error())
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: mset.Name()}}
resp.Error = &ApiError{Code: 400, Description: "resource limits exceeded for account"}
response, _ = json.Marshal(resp)
sendq <- &jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0}
}
return err
}
// Check msgSize if we have a limit set there. Again this works if it goes through but better to be pre-emptive.
if maxMsgSize >= 0 && (len(hdr)+len(msg)) > maxMsgSize {
err := fmt.Errorf("JetStream message size exceeds limits for '%s > %s'", jsa.acc().Name, mset.config.Name)
s.Warnf(err.Error())
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: mset.Name()}}
resp.Error = &ApiError{Code: 400, Description: "message size exceeds maximum allowed"}
response, _ = json.Marshal(resp)
sendq <- &jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0}
}
return err
}
// Proceed with proposing this message.
mset.mu.Lock()
// We only use mset.clseq for clustering and in case we run ahead of actual commits.
// Check if we need to set initial value here
if mset.clseq < mset.lseq {
mset.clseq = mset.lseq
}
// Do proposal.
err := mset.node.Propose(encodeStreamMsg(subject, reply, hdr, msg, mset.clseq, time.Now().UnixNano()))
if err != nil {
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: mset.config.Name}}
resp.Error = &ApiError{Code: 503, Description: err.Error()}
response, _ = json.Marshal(resp)
}
} else {
mset.clseq++
}
mset.mu.Unlock()
// If we errored out respond here.
if err != nil && canRespond {
sendq <- &jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0}
}
return err
}
// For requesting messages post raft snapshot to catch up streams post server restart.
// Any deleted msgs etc will be handled inline on catchup.
type streamSyncRequest struct {
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
}
// Given a stream state that represents a snapshot, calculate the sync request based on our current state.
func (mset *Stream) calculateSyncRequest(state *StreamState, snap *streamSnapshot) *streamSyncRequest {
// Quick check if we are already caught up.
if state.LastSeq >= snap.LastSeq {
return nil
}
return &streamSyncRequest{FirstSeq: state.LastSeq + 1, LastSeq: snap.LastSeq}
}
// processSnapshotDeletes will update our current store based on the snapshot
// but only processing deletes and new FirstSeq / purges.
func (mset *Stream) processSnapshotDeletes(snap *streamSnapshot) {
state := mset.store.State()
// Adjust if FirstSeq has moved.
if snap.FirstSeq > state.FirstSeq {
mset.store.Compact(snap.FirstSeq)
state = mset.store.State()
}
// Range the deleted and delete if applicable.
for _, dseq := range snap.Deleted {
if dseq <= state.LastSeq {
mset.store.RemoveMsg(dseq)
}
}
}
func (mset *Stream) setCatchingUp() {
mset.mu.Lock()
mset.catchup = true
mset.mu.Unlock()
}
func (mset *Stream) clearCatchingUp() {
mset.mu.Lock()
mset.catchup = false
mset.mu.Unlock()
}
func (mset *Stream) isCatchingUp() bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.catchup
}
// Process a stream snapshot.
func (mset *Stream) processSnapshot(buf []byte) {
var snap streamSnapshot
if err := json.Unmarshal(buf, &snap); err != nil {
// Log error.
return
}
// Update any deletes, etc.
mset.processSnapshotDeletes(&snap)
mset.mu.Lock()
state := mset.store.State()
sreq := mset.calculateSyncRequest(&state, &snap)
s, subject, n := mset.srv, mset.sa.Sync, mset.node
mset.mu.Unlock()
// Just return if up to date..
if sreq == nil {
return
}
// Pause the apply channel for our raft group while we catch up.
n.PauseApply()
defer n.ResumeApply()
// Set our catchup state.
mset.setCatchingUp()
defer mset.clearCatchingUp()
js := s.getJetStream()
RETRY:
// Grab sync request again on failures.
if sreq == nil {
mset.mu.Lock()
state := mset.store.State()
sreq = mset.calculateSyncRequest(&state, &snap)
mset.mu.Unlock()
if sreq == nil {
return
}
}
msgsC := make(chan []byte, 1024)
// Send our catchup request here.
reply := syncReplySubject()
sub, err := s.sysSubscribe(reply, func(_ *subscription, _ *client, _, reply string, msg []byte) {
// Make copies - https://github.com/go101/go101/wiki
// TODO(dlc) - Since we are using a buffer from the inbound client/route.
if len(msg) > 0 {
msg = append(msg[:0:0], msg...)
}
msgsC <- msg
if reply != _EMPTY_ {
s.sendInternalMsgLocked(reply, _EMPTY_, nil, nil)
}
})
if err != nil {
return
}
defer s.sysUnsubscribe(sub)
b, _ := json.Marshal(sreq)
s.sendInternalMsgLocked(subject, reply, nil, b)
// Clear our sync request and capture last.
last := sreq.LastSeq
sreq = nil
const activityInterval = 5 * time.Second
notActive := time.NewTimer(activityInterval)
defer notActive.Stop()
// Run our own select loop here.
for qch, lch := n.QuitC(), n.LeadChangeC(); ; {
select {
case msg := <-msgsC:
notActive.Reset(activityInterval)
// Check eof signaling.
if len(msg) == 0 {
goto RETRY
}
if lseq, err := mset.processCatchupMsg(msg); err == nil {
if lseq >= last {
return
}
} else {
goto RETRY
}
case <-notActive.C:
s.Warnf("Catchup for stream '%s > %s' stalled", mset.account(), mset.Name())
goto RETRY
case <-s.quitCh:
return
case <-qch:
return
case isLeader := <-lch:
js.processStreamLeaderChange(mset, isLeader)
}
}
}
// processCatchupMsg will be called to process out of band catchup msgs from a sync request.
func (mset *Stream) processCatchupMsg(msg []byte) (uint64, error) {
if len(msg) == 0 || entryOp(msg[0]) != streamMsgOp {
// TODO(dlc) - This is error condition, log.
return 0, errors.New("bad catchup msg")
}
subj, _, hdr, msg, seq, ts, err := decodeStreamMsg(msg[1:])
if err != nil {
return 0, errors.New("bad catchup msg")
}
// Put into our store
// Messages to be skipped have no subject or timestamp.
// TODO(dlc) - formalize witrh skipMsgOp
if subj == _EMPTY_ && ts == 0 {
lseq := mset.store.SkipMsg()
if lseq != seq {
return 0, errors.New("wrong sequence for skipped msg")
}
} else if err := mset.store.StoreRawMsg(subj, hdr, msg, seq, ts); err != nil {
return 0, err
}
// Update our lseq.
mset.setLastSeq(seq)
return seq, nil
}
func (mset *Stream) handleClusterSyncRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
var sreq streamSyncRequest
if err := json.Unmarshal(msg, &sreq); err != nil {
// Log error.
return
}
mset.srv.startGoRoutine(func() { mset.runCatchup(reply, &sreq) })
}
// clusterInfo will report on the status of the raft group.
func (js *jetStream) clusterInfo(rg *raftGroup) *ClusterInfo {
if js == nil {
return nil
}
js.mu.RLock()
defer js.mu.RUnlock()
s := js.srv
if rg == nil || rg.node == nil {
return &ClusterInfo{
Name: s.ClusterName(),
Leader: s.Name(),
}
}
n := rg.node
ci := &ClusterInfo{
Name: s.ClusterName(),
Leader: s.serverNameForNode(n.GroupLeader()),
}
now := time.Now()
id, peers := n.ID(), n.Peers()
for _, rp := range peers {
if rp.ID != id && rg.isMember(rp.ID) {
lastSeen := now.Sub(rp.Last)
current := rp.Current
if current && lastSeen > lostQuorumInterval {
current = false
}
pi := &PeerInfo{Name: s.serverNameForNode(rp.ID), Current: current, Active: lastSeen}
ci.Replicas = append(ci.Replicas, pi)
}
}
return ci
}
func (mset *Stream) handleClusterStreamInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
mset.mu.RLock()
if mset.client == nil {
mset.mu.RUnlock()
return
}
s, js, config := mset.srv, mset.srv.js, mset.config
mset.mu.RUnlock()
si := &StreamInfo{Created: mset.Created(), State: mset.State(), Config: config, Cluster: js.clusterInfo(mset.raftGroup())}
b, _ := json.Marshal(si)
s.sendInternalMsgLocked(reply, _EMPTY_, nil, b)
}
func (mset *Stream) runCatchup(sendSubject string, sreq *streamSyncRequest) {
s := mset.srv
defer s.grWG.Done()
const maxOut = int64(48 * 1024 * 1024) // 48MB for now.
out := int64(0)
// Flow control processing.
ackReplySize := func(subj string) int64 {
if li := strings.LastIndexByte(subj, btsep); li > 0 && li < len(subj) {
return parseAckReplyNum(subj[li+1:])
}
return 0
}
nextBatchC := make(chan struct{}, 1)
nextBatchC <- struct{}{}
// Setup ackReply for flow control.
ackReply := syncAckSubject()
ackSub, _ := s.sysSubscribe(ackReply, func(sub *subscription, c *client, subject, reply string, msg []byte) {
sz := ackReplySize(subject)
atomic.AddInt64(&out, -sz)
select {
case nextBatchC <- struct{}{}:
default:
}
})
defer s.sysUnsubscribe(ackSub)
ackReplyT := strings.ReplaceAll(ackReply, ".*", ".%d")
// EOF
defer s.sendInternalMsgLocked(sendSubject, _EMPTY_, nil, nil)
const activityInterval = 5 * time.Second
notActive := time.NewTimer(activityInterval)
defer notActive.Stop()
// Setup sequences to walk through.
seq, last := sreq.FirstSeq, sreq.LastSeq
sendNextBatch := func() {
for ; seq <= last && atomic.LoadInt64(&out) <= maxOut; seq++ {
subj, hdr, msg, ts, err := mset.store.LoadMsg(seq)
// if this is not a deleted msg, bail out.
if err != nil && err != ErrStoreMsgNotFound && err != errDeletedMsg {
// break, something changed.
seq = last + 1
return
}
// S2?
em := encodeStreamMsg(subj, _EMPTY_, hdr, msg, seq, ts)
// Place size in reply subject for flow control.
reply := fmt.Sprintf(ackReplyT, len(em))
atomic.AddInt64(&out, int64(len(em)))
s.sendInternalMsgLocked(sendSubject, reply, nil, em)
}
}
// Grab stream quit channel.
mset.mu.RLock()
qch := mset.qch
mset.mu.RUnlock()
if qch == nil {
return
}
// Run as long as we are still active and need catchup.
// FIXME(dlc) - Purge event? Stream delete?
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case <-notActive.C:
s.Warnf("Catchup for stream '%s > %s' stalled", mset.account(), mset.Name())
return
case <-nextBatchC:
// Update our activity timer.
notActive.Reset(activityInterval)
sendNextBatch()
// Check if we are finished.
if seq >= last {
s.Debugf("Done resync for stream '%s > %s'", mset.account(), mset.Name())
return
}
}
}
}
func syncSubjForStream() string {
return syncSubject("$JSC.SYNC")
}
func syncReplySubject() string {
return syncSubject("$JSC.R")
}
func infoReplySubject() string {
return syncSubject("$JSC.R")
}
func syncAckSubject() string {
return syncSubject("$JSC.ACK") + ".*"
}
func syncSubject(pre string) string {
var sb strings.Builder
sb.WriteString(pre)
sb.WriteByte(btsep)
var b [replySuffixLen]byte
rn := rand.Int63()
for i, l := 0, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
sb.Write(b[:])
return sb.String()
}
const (
clusterStreamInfoT = "$JSC.SI.%s.%s"
clusterConsumerInfoT = "$JSC.CI.%s.%s.%s"
jsaUpdatesSubT = "$JSC.ARU.%s.*"
jsaUpdatesPubT = "$JSC.ARU.%s.%s"
)
| 1 | 12,486 | cluster not having omitempty, does this imply that cluster is required? seems empty is valid so just checking | nats-io-nats-server | go |
@@ -758,3 +758,19 @@ func (c *Container) ShouldCreateWithSSMSecret() bool {
}
return false
}
+
+// MergeEnvironmentVariables appends additional envVarName:envVarValue pairs to
+// the the container's enviornment values structure
+func (c *Container) MergeEnvironmentVariables(secrets map[string]string) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // don't assume that the environment variable map has been initialized by others
+ if c.Environment == nil {
+ c.Environment = make(map[string]string)
+ }
+
+ for k, v := range secrets {
+ c.Environment[k] = v
+ }
+} | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package container
import (
"fmt"
"strconv"
"sync"
"time"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/credentials"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
"github.com/aws/aws-sdk-go/aws"
docker "github.com/fsouza/go-dockerclient"
)
const (
// defaultContainerSteadyStateStatus defines the container status at
// which the container is assumed to be in steady state. It is set
// to 'ContainerRunning' unless overridden
defaultContainerSteadyStateStatus = apicontainerstatus.ContainerRunning
// awslogsAuthExecutionRole is the string value passed in the task payload
// that specifies that the log driver should be authenticated using the
// execution role
awslogsAuthExecutionRole = "ExecutionRole"
// DockerHealthCheckType is the type of container health check provided by docker
DockerHealthCheckType = "docker"
// AuthTypeECR is to use image pull auth over ECR
AuthTypeECR = "ecr"
// AuthTypeASM is to use image pull auth over AWS Secrets Manager
AuthTypeASM = "asm"
// MetadataURIEnvironmentVariableName defines the name of the environment
// variable in containers' config, which can be used by the containers to access the
// v3 metadata endpoint
MetadataURIEnvironmentVariableName = "ECS_CONTAINER_METADATA_URI"
// MetadataURIFormat defines the URI format for v3 metadata endpoint
MetadataURIFormat = "http://169.254.170.2/v3/%s"
// SecretProviderSSM is to show secret provider being SSM
SecretProviderSSM = "ssm"
)
// DockerConfig represents additional metadata about a container to run. It's
// remodeled from the `ecsacs` api model file. Eventually it should not exist
// once this remodeling is refactored out.
type DockerConfig struct {
// Config is the configuration used to create container
Config *string `json:"config"`
// HostConfig is the configuration of container related to host resource
HostConfig *string `json:"hostConfig"`
// Version specifies the docker client API version to use
Version *string `json:"version"`
}
// HealthStatus contains the health check result returned by docker
type HealthStatus struct {
// Status is the container health status
Status apicontainerstatus.ContainerHealthStatus `json:"status,omitempty"`
// Since is the timestamp when container health status changed
Since *time.Time `json:"statusSince,omitempty"`
// ExitCode is the exitcode of health check if failed
ExitCode int `json:"exitCode,omitempty"`
// Output is the output of health check
Output string `json:"output,omitempty"`
}
// Container is the internal representation of a container in the ECS agent
type Container struct {
// Name is the name of the container specified in the task definition
Name string
// V3EndpointID is a container identifier used to construct v3 metadata endpoint; it's unique among
// all the containers managed by the agent
V3EndpointID string
// Image is the image name specified in the task definition
Image string
// ImageID is the local ID of the image used in the container
ImageID string
// Command is the command to run in the container which is specified in the task definition
Command []string
// CPU is the cpu limitation of the container which is specified in the task definition
CPU uint `json:"Cpu"`
// Memory is the memory limitation of the container which is specified in the task definition
Memory uint
// Links contains a list of containers to link, corresponding to docker option: --link
Links []string
// VolumesFrom contains a list of container's volume to use, corresponding to docker option: --volumes-from
VolumesFrom []VolumeFrom `json:"volumesFrom"`
// MountPoints contains a list of volume mount paths
MountPoints []MountPoint `json:"mountPoints"`
// Ports contains a list of ports binding configuration
Ports []PortBinding `json:"portMappings"`
// Secrets contains a list of secret
Secrets []Secret `json:"secrets"`
// Essential denotes whether the container is essential or not
Essential bool
// EntryPoint is entrypoint of the container, corresponding to docker option: --entrypoint
EntryPoint *[]string
// Environment is the environment variable set in the container
Environment map[string]string `json:"environment"`
// Overrides contains the configuration to override of a container
Overrides ContainerOverrides `json:"overrides"`
// DockerConfig is the configuration used to create the container
DockerConfig DockerConfig `json:"dockerConfig"`
// RegistryAuthentication is the auth data used to pull image
RegistryAuthentication *RegistryAuthenticationData `json:"registryAuthentication"`
// HealthCheckType is the mechnism to use for the container health check
// currently it only supports 'DOCKER'
HealthCheckType string `json:"healthCheckType,omitempty"`
// Health contains the health check information of container health check
Health HealthStatus `json:"-"`
// LogsAuthStrategy specifies how the logs driver for the container will be
// authenticated
LogsAuthStrategy string
// lock is used for fields that are accessed and updated concurrently
lock sync.RWMutex
// DesiredStatusUnsafe represents the state where the container should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler,
// though the agent may also set the DesiredStatusUnsafe if a different "essential"
// container in the task exits. The DesiredStatus is almost always either
// ContainerRunning or ContainerStopped.
// NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `GetDesiredStatus`
// and `SetDesiredStatus`.
// TODO DesiredStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
DesiredStatusUnsafe apicontainerstatus.ContainerStatus `json:"desiredStatus"`
// KnownStatusUnsafe represents the state where the container is.
// NOTE: Do not access `KnownStatusUnsafe` directly. Instead, use `GetKnownStatus`
// and `SetKnownStatus`.
// TODO KnownStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe apicontainerstatus.ContainerStatus `json:"KnownStatus"`
// TransitionDependenciesMap is a map of the dependent container status to other
// dependencies that must be satisfied in order for this container to transition.
TransitionDependenciesMap TransitionDependenciesMap `json:"TransitionDependencySet"`
// SteadyStateDependencies is a list of containers that must be in "steady state" before
// this one is created
// Note: Current logic requires that the containers specified here are run
// before this container can even be pulled.
//
// Deprecated: Use TransitionDependencySet instead. SteadyStateDependencies is retained for compatibility with old
// state files.
SteadyStateDependencies []string `json:"RunDependencies"`
// Type specifies the container type. Except the 'Normal' type, all other types
// are not directly specified by task definitions, but created by the agent. The
// JSON tag is retained as this field's previous name 'IsInternal' for maintaining
// backwards compatibility. Please see JSON parsing hooks for this type for more
// details
Type ContainerType `json:"IsInternal"`
// AppliedStatus is the status that has been "applied" (e.g., we've called Pull,
// Create, Start, or Stop) but we don't yet know that the application was successful.
// No need to save it in the state file, as agent will synchronize the container status
// on restart and for some operation eg: pull, it has to be recalled again.
AppliedStatus apicontainerstatus.ContainerStatus `json:"-"`
// ApplyingError is an error that occurred trying to transition the container
// to its desired state. It is propagated to the backend in the form
// 'Name: ErrorString' as the 'reason' field.
ApplyingError *apierrors.DefaultNamedError
// SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS
// SubmitContainerStateChange API.
// TODO SentStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON is
// handled properly so that the state storage continues to work.
SentStatusUnsafe apicontainerstatus.ContainerStatus `json:"SentStatus"`
// MetadataFileUpdated is set to true when we have completed updating the
// metadata file
MetadataFileUpdated bool `json:"metadataFileUpdated"`
// KnownExitCodeUnsafe specifies the exit code for the container.
// It is exposed outside of the package so that it's marshalled/unmarshalled in
// the JSON body while saving the state.
// NOTE: Do not access KnownExitCodeUnsafe directly. Instead, use `GetKnownExitCode`
// and `SetKnownExitCode`.
KnownExitCodeUnsafe *int `json:"KnownExitCode"`
// KnownPortBindingsUnsafe is an array of port bindings for the container.
KnownPortBindingsUnsafe []PortBinding `json:"KnownPortBindings"`
// VolumesUnsafe is an array of volume mounts in the container.
VolumesUnsafe []docker.Mount `json:"-"`
// SteadyStateStatusUnsafe specifies the steady state status for the container
// If uninitialized, it's assumed to be set to 'ContainerRunning'. Even though
// it's not only supposed to be set when the container is being created, it's
// exposed outside of the package so that it's marshalled/unmarshalled in the
// the JSON body while saving the state
SteadyStateStatusUnsafe *apicontainerstatus.ContainerStatus `json:"SteadyStateStatus,omitempty"`
createdAt time.Time
startedAt time.Time
finishedAt time.Time
labels map[string]string
}
// DockerContainer is a mapping between containers-as-docker-knows-them and
// containers-as-we-know-them.
// This is primarily used in DockerState, but lives here such that tasks and
// containers know how to convert themselves into Docker's desired config format
type DockerContainer struct {
DockerID string `json:"DockerId"`
DockerName string // needed for linking
Container *Container
}
// MountPoint describes the in-container location of a Volume and references
// that Volume by name.
type MountPoint struct {
SourceVolume string `json:"sourceVolume"`
ContainerPath string `json:"containerPath"`
ReadOnly bool `json:"readOnly"`
}
// VolumeFrom is a volume which references another container as its source.
type VolumeFrom struct {
SourceContainer string `json:"sourceContainer"`
ReadOnly bool `json:"readOnly"`
}
// Secret contains all essential attributes needed for ECS secrets vending as environment variables/tmpfs files
type Secret struct {
Name string `json:"name"`
ValueFrom string `json:"valueFrom"`
Region string `json:"region"`
ContainerPath string `json:"containerPath"`
Type string `json:"type"`
Provider string `json:"provider"`
}
// String returns a human readable string representation of DockerContainer
func (dc *DockerContainer) String() string {
if dc == nil {
return "nil"
}
return fmt.Sprintf("Id: %s, Name: %s, Container: %s", dc.DockerID, dc.DockerName, dc.Container.String())
}
// NewContainerWithSteadyState creates a new Container object with the specified
// steady state. Containers that need the non default steady state set will
// use this method instead of setting it directly
func NewContainerWithSteadyState(steadyState apicontainerstatus.ContainerStatus) *Container {
steadyStateStatus := steadyState
return &Container{
SteadyStateStatusUnsafe: &steadyStateStatus,
}
}
// KnownTerminal returns true if the container's known status is STOPPED
func (c *Container) KnownTerminal() bool {
return c.GetKnownStatus().Terminal()
}
// DesiredTerminal returns true if the container's desired status is STOPPED
func (c *Container) DesiredTerminal() bool {
return c.GetDesiredStatus().Terminal()
}
// GetKnownStatus returns the known status of the container
func (c *Container) GetKnownStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownStatusUnsafe
}
// SetKnownStatus sets the known status of the container and update the container
// applied status
func (c *Container) SetKnownStatus(status apicontainerstatus.ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownStatusUnsafe = status
c.updateAppliedStatusUnsafe(status)
}
// GetDesiredStatus gets the desired status of the container
func (c *Container) GetDesiredStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.DesiredStatusUnsafe
}
// SetDesiredStatus sets the desired status of the container
func (c *Container) SetDesiredStatus(status apicontainerstatus.ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.DesiredStatusUnsafe = status
}
// GetSentStatus safely returns the SentStatusUnsafe of the container
func (c *Container) GetSentStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.SentStatusUnsafe
}
// SetSentStatus safely sets the SentStatusUnsafe of the container
func (c *Container) SetSentStatus(status apicontainerstatus.ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.SentStatusUnsafe = status
}
// SetKnownExitCode sets exit code field in container struct
func (c *Container) SetKnownExitCode(i *int) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownExitCodeUnsafe = i
}
// GetKnownExitCode returns the container exit code
func (c *Container) GetKnownExitCode() *int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownExitCodeUnsafe
}
// SetRegistryAuthCredentials sets the credentials for pulling image from ECR
func (c *Container) SetRegistryAuthCredentials(credential credentials.IAMRoleCredentials) {
c.lock.Lock()
defer c.lock.Unlock()
c.RegistryAuthentication.ECRAuthData.SetPullCredentials(credential)
}
// ShouldPullWithExecutionRole returns whether this container has its own ECR credentials
func (c *Container) ShouldPullWithExecutionRole() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.RegistryAuthentication != nil &&
c.RegistryAuthentication.Type == AuthTypeECR &&
c.RegistryAuthentication.ECRAuthData != nil &&
c.RegistryAuthentication.ECRAuthData.UseExecutionRole
}
// String returns a human readable string representation of this object
func (c *Container) String() string {
ret := fmt.Sprintf("%s(%s) (%s->%s)", c.Name, c.Image,
c.GetKnownStatus().String(), c.GetDesiredStatus().String())
if c.GetKnownExitCode() != nil {
ret += " - Exit: " + strconv.Itoa(*c.GetKnownExitCode())
}
return ret
}
// GetSteadyStateStatus returns the steady state status for the container. If
// Container.steadyState is not initialized, the default steady state status
// defined by `defaultContainerSteadyStateStatus` is returned. The 'pause'
// container's steady state differs from that of other containers, as the
// 'pause' container can reach its teady state once networking resources
// have been provisioned for it, which is done in the `ContainerResourcesProvisioned`
// state
func (c *Container) GetSteadyStateStatus() apicontainerstatus.ContainerStatus {
if c.SteadyStateStatusUnsafe == nil {
return defaultContainerSteadyStateStatus
}
return *c.SteadyStateStatusUnsafe
}
// IsKnownSteadyState returns true if the `KnownState` of the container equals
// the `steadyState` defined for the container
func (c *Container) IsKnownSteadyState() bool {
knownStatus := c.GetKnownStatus()
return knownStatus == c.GetSteadyStateStatus()
}
// GetNextKnownStateProgression returns the state that the container should
// progress to based on its `KnownState`. The progression is
// incremental until the container reaches its steady state. From then on,
// it transitions to `ContainerStopped`.
//
// For example:
// a. if the steady state of the container is defined as `ContainerRunning`,
// the progression is:
// Container: None -> Pulled -> Created -> Running* -> Stopped -> Zombie
//
// b. if the steady state of the container is defined as `ContainerResourcesProvisioned`,
// the progression is:
// Container: None -> Pulled -> Created -> Running -> Provisioned* -> Stopped -> Zombie
//
// c. if the steady state of the container is defined as `ContainerCreated`,
// the progression is:
// Container: None -> Pulled -> Created* -> Stopped -> Zombie
func (c *Container) GetNextKnownStateProgression() apicontainerstatus.ContainerStatus {
if c.IsKnownSteadyState() {
return apicontainerstatus.ContainerStopped
}
return c.GetKnownStatus() + 1
}
// IsInternal returns true if the container type is either `ContainerEmptyHostVolume`
// or `ContainerCNIPause`. It returns false otherwise
func (c *Container) IsInternal() bool {
if c.Type == ContainerNormal {
return false
}
return true
}
// IsRunning returns true if the container's known status is either RUNNING
// or RESOURCES_PROVISIONED. It returns false otherwise
func (c *Container) IsRunning() bool {
return c.GetKnownStatus().IsRunning()
}
// IsMetadataFileUpdated returns true if the metadata file has been once the
// metadata file is ready and will no longer change
func (c *Container) IsMetadataFileUpdated() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.MetadataFileUpdated
}
// SetMetadataFileUpdated sets the container's MetadataFileUpdated status to true
func (c *Container) SetMetadataFileUpdated() {
c.lock.Lock()
defer c.lock.Unlock()
c.MetadataFileUpdated = true
}
// IsEssential returns whether the container is an essential container or not
func (c *Container) IsEssential() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.Essential
}
// AWSLogAuthExecutionRole returns true if the auth is by execution role
func (c *Container) AWSLogAuthExecutionRole() bool {
return c.LogsAuthStrategy == awslogsAuthExecutionRole
}
// SetCreatedAt sets the timestamp for container's creation time
func (c *Container) SetCreatedAt(createdAt time.Time) {
if createdAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.createdAt = createdAt
}
// SetStartedAt sets the timestamp for container's start time
func (c *Container) SetStartedAt(startedAt time.Time) {
if startedAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.startedAt = startedAt
}
// SetFinishedAt sets the timestamp for container's stopped time
func (c *Container) SetFinishedAt(finishedAt time.Time) {
if finishedAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.finishedAt = finishedAt
}
// GetCreatedAt sets the timestamp for container's creation time
func (c *Container) GetCreatedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.createdAt
}
// GetStartedAt sets the timestamp for container's start time
func (c *Container) GetStartedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.startedAt
}
// GetFinishedAt sets the timestamp for container's stopped time
func (c *Container) GetFinishedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.finishedAt
}
// SetLabels sets the labels for a container
func (c *Container) SetLabels(labels map[string]string) {
c.lock.Lock()
defer c.lock.Unlock()
c.labels = labels
}
// GetLabels gets the labels for a container
func (c *Container) GetLabels() map[string]string {
c.lock.RLock()
defer c.lock.RUnlock()
return c.labels
}
// SetKnownPortBindings sets the ports for a container
func (c *Container) SetKnownPortBindings(ports []PortBinding) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownPortBindingsUnsafe = ports
}
// GetKnownPortBindings gets the ports for a container
func (c *Container) GetKnownPortBindings() []PortBinding {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownPortBindingsUnsafe
}
// SetVolumes sets the volumes mounted in a container
func (c *Container) SetVolumes(volumes []docker.Mount) {
c.lock.Lock()
defer c.lock.Unlock()
c.VolumesUnsafe = volumes
}
// GetVolumes returns the volumes mounted in a container
func (c *Container) GetVolumes() []docker.Mount {
c.lock.RLock()
defer c.lock.RUnlock()
return c.VolumesUnsafe
}
// HealthStatusShouldBeReported returns true if the health check is defined in
// the task definition
func (c *Container) HealthStatusShouldBeReported() bool {
return c.HealthCheckType == DockerHealthCheckType
}
// SetHealthStatus sets the container health status
func (c *Container) SetHealthStatus(health HealthStatus) {
c.lock.Lock()
defer c.lock.Unlock()
if c.Health.Status == health.Status {
return
}
c.Health.Status = health.Status
c.Health.Since = aws.Time(time.Now())
c.Health.Output = health.Output
// Set the health exit code if the health check failed
if c.Health.Status == apicontainerstatus.ContainerUnhealthy {
c.Health.ExitCode = health.ExitCode
}
}
// GetHealthStatus returns the container health information
func (c *Container) GetHealthStatus() HealthStatus {
c.lock.RLock()
defer c.lock.RUnlock()
// Copy the pointer to avoid race condition
copyHealth := c.Health
if c.Health.Since != nil {
copyHealth.Since = aws.Time(aws.TimeValue(c.Health.Since))
}
return copyHealth
}
// BuildContainerDependency adds a new dependency container and satisfied status
// to the dependent container
func (c *Container) BuildContainerDependency(contName string,
satisfiedStatus apicontainerstatus.ContainerStatus,
dependentStatus apicontainerstatus.ContainerStatus) {
contDep := ContainerDependency{
ContainerName: contName,
SatisfiedStatus: satisfiedStatus,
}
if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok {
c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{}
}
deps := c.TransitionDependenciesMap[dependentStatus]
deps.ContainerDependencies = append(deps.ContainerDependencies, contDep)
c.TransitionDependenciesMap[dependentStatus] = deps
}
// BuildResourceDependency adds a new resource dependency by taking in the required status
// of the resource that satisfies the dependency and the dependent container status,
// whose transition is dependent on the resource.
// example: if container's PULLED transition is dependent on volume resource's
// CREATED status, then RequiredStatus=VolumeCreated and dependentStatus=ContainerPulled
func (c *Container) BuildResourceDependency(resourceName string,
requiredStatus resourcestatus.ResourceStatus,
dependentStatus apicontainerstatus.ContainerStatus) {
resourceDep := ResourceDependency{
Name: resourceName,
RequiredStatus: requiredStatus,
}
if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok {
c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{}
}
deps := c.TransitionDependenciesMap[dependentStatus]
deps.ResourceDependencies = append(deps.ResourceDependencies, resourceDep)
c.TransitionDependenciesMap[dependentStatus] = deps
}
// updateAppliedStatusUnsafe updates the container transitioning status
func (c *Container) updateAppliedStatusUnsafe(knownStatus apicontainerstatus.ContainerStatus) {
if c.AppliedStatus == apicontainerstatus.ContainerStatusNone {
return
}
// Check if the container transition has already finished
if c.AppliedStatus <= knownStatus {
c.AppliedStatus = apicontainerstatus.ContainerStatusNone
}
}
// SetAppliedStatus sets the applied status of container and returns whether
// the container is already in a transition
func (c *Container) SetAppliedStatus(status apicontainerstatus.ContainerStatus) bool {
c.lock.Lock()
defer c.lock.Unlock()
if c.AppliedStatus != apicontainerstatus.ContainerStatusNone {
// return false to indicate the set operation failed
return false
}
c.AppliedStatus = status
return true
}
// GetAppliedStatus returns the transitioning status of container
func (c *Container) GetAppliedStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.AppliedStatus
}
// ShouldPullWithASMAuth returns true if this container needs to retrieve
// private registry authentication data from ASM
func (c *Container) ShouldPullWithASMAuth() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.RegistryAuthentication != nil &&
c.RegistryAuthentication.Type == AuthTypeASM &&
c.RegistryAuthentication.ASMAuthData != nil
}
// SetASMDockerAuthConfig add the docker auth config data to the
// RegistryAuthentication struct held by the container, this is then passed down
// to the docker client to pull the image
func (c *Container) SetASMDockerAuthConfig(dac docker.AuthConfiguration) {
c.RegistryAuthentication.ASMAuthData.SetDockerAuthConfig(dac)
}
// SetV3EndpointID sets the v3 endpoint id of container
func (c *Container) SetV3EndpointID(v3EndpointID string) {
c.lock.Lock()
defer c.lock.Unlock()
c.V3EndpointID = v3EndpointID
}
// GetV3EndpointID returns the v3 endpoint id of container
func (c *Container) GetV3EndpointID() string {
c.lock.RLock()
defer c.lock.RUnlock()
return c.V3EndpointID
}
// InjectV3MetadataEndpoint injects the v3 metadata endpoint as an environment variable for a container
func (c *Container) InjectV3MetadataEndpoint() {
c.lock.Lock()
defer c.lock.Unlock()
// don't assume that the environment variable map has been initialized by others
if c.Environment == nil {
c.Environment = make(map[string]string)
}
c.Environment[MetadataURIEnvironmentVariableName] =
fmt.Sprintf(MetadataURIFormat, c.V3EndpointID)
}
// ShouldCreateWithSSMSecret returns true if this container needs to get secret
// value from SSM Parameter Store
func (c *Container) ShouldCreateWithSSMSecret() bool {
c.lock.RLock()
defer c.lock.RUnlock()
//Secrets field will be nil if there is no secrets for container
if c.Secrets == nil {
return false
}
for _, secret := range c.Secrets {
if secret.Provider == SecretProviderSSM {
return true
}
}
return false
}
| 1 | 21,176 | @aws/aws-ecs-agent, @yumex93: how concerned should we be about user provided envvar names clobbering existing envvars? i'm not convinced we should be doing additional validation here. | aws-amazon-ecs-agent | go |
@@ -0,0 +1,14 @@
+'use strict';
+
+function defineAspects(operation, aspects) {
+ aspects = new Set(aspects);
+ Object.defineProperty(operation, 'aspects', {
+ value: aspects,
+ writable: false
+ });
+ return aspects;
+}
+
+module.exports = {
+ defineAspects
+}; | 1 | 1 | 15,318 | nit: aspects are defined in `OperationBase`, should `defineAspects` live there as well? | mongodb-node-mongodb-native | js |
|
@@ -5,12 +5,15 @@ from collections import OrderedDict
from kinto.core.permission import PermissionBase
from kinto.core.storage.postgresql.client import create_from_config
+from kinto.core.storage.postgresql.migrator import Migrator
logger = logging.getLogger(__name__)
+HERE = os.path.abspath(os.path.dirname(__file__))
-class Permission(PermissionBase):
+
+class Permission(PermissionBase, Migrator):
"""Permission backend using PostgreSQL.
Enable in configuration:: | 1 | import logging
import os
from collections import OrderedDict
from kinto.core.permission import PermissionBase
from kinto.core.storage.postgresql.client import create_from_config
logger = logging.getLogger(__name__)
class Permission(PermissionBase):
"""Permission backend using PostgreSQL.
Enable in configuration::
kinto.permission_backend = kinto.core.permission.postgresql
Database location URI can be customized::
kinto.permission_url = postgres://user:pass@db.server.lan:5432/dbname
Alternatively, username and password could also rely on system user ident
or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).
.. note::
Some tables and indices are created when ``kinto migrate`` is run.
This requires some privileges on the database, or some error will
be raised.
**Alternatively**, the schema can be initialized outside the
python application, using the SQL file located in
:file:`kinto/core/permission/postgresql/schema.sql`. This allows to
distinguish schema manipulation privileges from schema usage.
A connection pool is enabled by default::
kinto.permission_pool_size = 10
kinto.permission_maxoverflow = 10
kinto.permission_max_backlog = -1
kinto.permission_pool_recycle = -1
kinto.permission_pool_timeout = 30
kinto.cache_poolclass =
kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog
The ``max_backlog`` limits the number of threads that can be in the queue
waiting for a connection. Once this limit has been reached, any further
attempts to acquire a connection will be rejected immediately, instead of
locking up all threads by keeping them waiting in the queue.
See `dedicated section in SQLAlchemy documentation
<http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_
for default values and behaviour.
.. note::
Using a `dedicated connection pool <http://pgpool.net>`_ is still
recommended to allow load balancing, replication or limit the number
of connections used in a multi-process deployment.
:noindex:
""" # NOQA
def __init__(self, client, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = client
def initialize_schema(self, dry_run=False):
# Check if user_principals table exists.
query = """
SELECT 1
FROM information_schema.tables
WHERE table_name = 'user_principals';
"""
with self.client.connect(readonly=True) as conn:
result = conn.execute(query)
if result.rowcount > 0:
logger.info('PostgreSQL permission schema is up-to-date.')
return
# Create schema
here = os.path.abspath(os.path.dirname(__file__))
sql_file = os.path.join(here, 'schema.sql')
if dry_run:
logger.info("Create permission schema from '{}'".format(sql_file))
return
# Since called outside request, force commit.
with open(sql_file) as f:
schema = f.read()
with self.client.connect(force_commit=True) as conn:
conn.execute(schema)
logger.info('Created PostgreSQL permission tables')
def flush(self):
query = """
DELETE FROM user_principals;
DELETE FROM access_control_entries;
"""
# Since called outside request (e.g. tests), force commit.
with self.client.connect(force_commit=True) as conn:
conn.execute(query)
logger.debug('Flushed PostgreSQL permission tables')
def add_user_principal(self, user_id, principal):
query = """
INSERT INTO user_principals (user_id, principal)
SELECT :user_id, :principal
WHERE NOT EXISTS (
SELECT principal
FROM user_principals
WHERE user_id = :user_id
AND principal = :principal
);"""
with self.client.connect() as conn:
conn.execute(query, dict(user_id=user_id, principal=principal))
def remove_user_principal(self, user_id, principal):
query = """
DELETE FROM user_principals
WHERE user_id = :user_id
AND principal = :principal;"""
with self.client.connect() as conn:
conn.execute(query, dict(user_id=user_id, principal=principal))
def remove_principal(self, principal):
query = """
DELETE FROM user_principals
WHERE principal = :principal;"""
with self.client.connect() as conn:
conn.execute(query, dict(principal=principal))
def get_user_principals(self, user_id):
query = """
SELECT principal
FROM user_principals
WHERE user_id = :user_id
OR user_id = 'system.Authenticated';"""
with self.client.connect(readonly=True) as conn:
result = conn.execute(query, dict(user_id=user_id))
results = result.fetchall()
return set([r['principal'] for r in results])
def add_principal_to_ace(self, object_id, permission, principal):
query = """
INSERT INTO access_control_entries (object_id, permission, principal)
SELECT :object_id, :permission, :principal
WHERE NOT EXISTS (
SELECT principal
FROM access_control_entries
WHERE object_id = :object_id
AND permission = :permission
AND principal = :principal
);"""
with self.client.connect() as conn:
conn.execute(query, dict(object_id=object_id,
permission=permission,
principal=principal))
def remove_principal_from_ace(self, object_id, permission, principal):
query = """
DELETE FROM access_control_entries
WHERE object_id = :object_id
AND permission = :permission
AND principal = :principal;"""
with self.client.connect() as conn:
conn.execute(query, dict(object_id=object_id,
permission=permission,
principal=principal))
def get_object_permission_principals(self, object_id, permission):
query = """
SELECT principal
FROM access_control_entries
WHERE object_id = :object_id
AND permission = :permission;"""
with self.client.connect(readonly=True) as conn:
result = conn.execute(query, dict(object_id=object_id,
permission=permission))
results = result.fetchall()
return set([r['principal'] for r in results])
def get_authorized_principals(self, bound_permissions):
# XXX: this method is not used, except in test suites :(
if not bound_permissions:
return set()
placeholders = {}
perm_values = []
for i, (obj, perm) in enumerate(bound_permissions):
placeholders['obj_{}'.format(i)] = obj
placeholders['perm_{}'.format(i)] = perm
perm_values.append('(:obj_{0}, :perm_{0})'.format(i))
query = """
WITH required_perms AS (
VALUES {}
)
SELECT principal
FROM required_perms JOIN access_control_entries
ON (object_id = column1 AND permission = column2);
""".format(','.join(perm_values))
with self.client.connect(readonly=True) as conn:
result = conn.execute(query, placeholders)
results = result.fetchall()
return set([r['principal'] for r in results])
def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):
placeholders = {}
if bound_permissions is None:
# Return all objects on which the specified principals have some
# permissions.
# (e.g. permissions endpoint which lists everything)
query = """
SELECT object_id, permission
FROM access_control_entries
WHERE principal IN :principals
"""
placeholders['principals'] = tuple(principals)
elif len(bound_permissions) == 0:
# If the list of object permissions to filter on is empty, then
# do not bother querying the backend. The result will be empty.
# (e.g. root object /buckets)
return {}
else:
principals_values = []
for i, principal in enumerate(principals):
placeholders['principal_{}'.format(i)] = principal
principals_values.append('(:principal_{})'.format(i))
perm_values = []
for i, (obj, perm) in enumerate(bound_permissions):
placeholders['obj_{}'.format(i)] = obj.replace('*', '%')
placeholders['perm_{}'.format(i)] = perm
perm_values.append('(:obj_{0}, :perm_{0})'.format(i))
if with_children:
object_id_condition = 'object_id LIKE pattern'
else:
object_id_condition = ('object_id LIKE pattern '
"AND object_id NOT LIKE pattern || '/%'")
query = """
WITH required_perms AS (
VALUES {perms}
),
user_principals AS (
VALUES {principals}
),
potential_objects AS (
SELECT object_id, permission, required_perms.column1 AS pattern
FROM access_control_entries
JOIN user_principals
ON (principal = user_principals.column1)
JOIN required_perms
ON (permission = required_perms.column2)
)
SELECT object_id, permission
FROM potential_objects
WHERE {object_id_condition};
""".format(perms=','.join(perm_values),
principals=','.join(principals_values),
object_id_condition=object_id_condition)
with self.client.connect(readonly=True) as conn:
result = conn.execute(query, placeholders)
results = result.fetchall()
perms_by_id = {}
for r in results:
perms_by_id.setdefault(r['object_id'], set()).add(r['permission'])
return perms_by_id
def check_permission(self, principals, bound_permissions):
if not bound_permissions:
return False
placeholders = {}
perms_values = []
for i, (obj, perm) in enumerate(bound_permissions):
placeholders['obj_{}'.format(i)] = obj
placeholders['perm_{}'.format(i)] = perm
perms_values.append('(:obj_{0}, :perm_{0})'.format(i))
principals_values = []
for i, principal in enumerate(principals):
placeholders['principal_{}'.format(i)] = principal
principals_values.append('(:principal_{})'.format(i))
query = """
WITH required_perms AS (
VALUES {perms}
),
allowed_principals AS (
SELECT principal
FROM required_perms JOIN access_control_entries
ON (object_id = column1 AND permission = column2)
),
required_principals AS (
VALUES {principals}
)
SELECT COUNT(*) AS matched
FROM required_principals JOIN allowed_principals
ON (required_principals.column1 = principal);
""".format(perms=','.join(perms_values),
principals=','.join(principals_values))
with self.client.connect(readonly=True) as conn:
result = conn.execute(query, placeholders)
total = result.fetchone()
return total['matched'] > 0
def get_objects_permissions(self, objects_ids, permissions=None):
object_ids_values = []
placeholders = {}
for i, obj_id in enumerate(objects_ids):
object_ids_values.append('({0}, :obj_id_{0})'.format(i))
placeholders['obj_id_{}'.format(i)] = obj_id
query = """
WITH required_object_ids AS (
VALUES {objects_ids}
)
SELECT object_id, permission, principal
FROM required_object_ids JOIN access_control_entries
ON (object_id = column2)
{permissions_condition}
ORDER BY column1 ASC;
"""
safeholders = {
'objects_ids': ','.join(object_ids_values),
'permissions_condition': ''
}
if permissions is not None:
safeholders['permissions_condition'] = """
WHERE permission IN :permissions"""
placeholders['permissions'] = tuple(permissions)
with self.client.connect(readonly=True) as conn:
result = conn.execute(query.format_map(safeholders), placeholders)
rows = result.fetchall()
groupby_id = OrderedDict()
for object_id in objects_ids:
groupby_id[object_id] = {}
for row in rows:
object_id, permission, principal = (row['object_id'],
row['permission'],
row['principal'])
groupby_id[object_id].setdefault(permission, set()).add(principal)
return list(groupby_id.values())
def replace_object_permissions(self, object_id, permissions):
if not permissions:
return
placeholders = {
'object_id': object_id
}
new_aces = []
specified_perms = []
for i, (perm, principals) in enumerate(permissions.items()):
placeholders['perm_{}'.format(i)] = perm
specified_perms.append('(:perm_{})'.format(i))
for principal in set(principals):
j = len(new_aces)
placeholders['principal_{}'.format(j)] = principal
new_aces.append('(:perm_{}, :principal_{})'.format(i, j))
if not new_aces:
query = """
WITH specified_perms AS (
VALUES {specified_perms}
)
DELETE FROM access_control_entries
USING specified_perms
WHERE object_id = :object_id AND permission = column1
""".format(specified_perms=','.join(specified_perms))
else:
query = """
WITH specified_perms AS (
VALUES {specified_perms}
),
delete_specified AS (
DELETE FROM access_control_entries
USING specified_perms
WHERE object_id = :object_id AND permission = column1
RETURNING object_id
),
affected_object AS (
SELECT object_id FROM delete_specified
UNION SELECT :object_id
),
new_aces AS (
VALUES {new_aces}
)
INSERT INTO access_control_entries(object_id, permission, principal)
SELECT DISTINCT d.object_id, n.column1, n.column2
FROM new_aces AS n, affected_object AS d;
""".format(specified_perms=','.join(specified_perms),
new_aces=','.join(new_aces))
with self.client.connect() as conn:
conn.execute(query, placeholders)
def delete_object_permissions(self, *object_id_list):
if len(object_id_list) == 0:
return
object_ids_values = []
placeholders = {}
for i, obj_id in enumerate(object_id_list):
object_ids_values.append('(:obj_id_{})'.format(i))
placeholders['obj_id_{}'.format(i)] = obj_id.replace('*', '%')
query = """
WITH object_ids AS (
VALUES {object_ids_values}
)
DELETE FROM access_control_entries
USING object_ids
WHERE object_id LIKE column1;"""
safeholders = {
'object_ids_values': ','.join(object_ids_values)
}
with self.client.connect() as conn:
conn.execute(query.format_map(safeholders), placeholders)
def load_from_config(config):
client = create_from_config(config, prefix='permission_')
return Permission(client=client)
| 1 | 11,351 | Why the `os.path.abspath`? `os.path.dirname` should always give a valid directory path. | Kinto-kinto | py |
@@ -143,4 +143,8 @@ public class ControllerConfiguration {
settingsConfiguration.settingsManager()
);
}
+
+ public String getWineEnginesPath() {
+ return this.wineEnginesPath;
+ }
} | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.javafx.controller;
import org.phoenicis.javafx.views.common.ThemeConfiguration;
import org.phoenicis.library.LibraryConfiguration;
import org.phoenicis.settings.SettingsConfiguration;
import org.phoenicis.apps.AppsConfiguration;
import org.phoenicis.containers.ContainersConfiguration;
import org.phoenicis.engines.EnginesConfiguration;
import org.phoenicis.javafx.controller.apps.AppsController;
import org.phoenicis.javafx.controller.containers.ContainersController;
import org.phoenicis.javafx.controller.engines.EnginesController;
import org.phoenicis.javafx.controller.library.LibraryController;
import org.phoenicis.javafx.controller.library.console.ConsoleController;
import org.phoenicis.javafx.controller.settings.SettingsController;
import org.phoenicis.javafx.views.ViewsConfiguration;
import org.phoenicis.scripts.ScriptsConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class ControllerConfiguration {
@Value("${application.name}")
private String applicationName;
@Value("${application.user.engines.wine}")
private String wineEnginesPath;
@Autowired
private ThemeConfiguration themeConfiguration;
@Autowired
private ViewsConfiguration viewsConfiguration;
@Autowired
private ScriptsConfiguration scriptsConfiguration;
@Autowired
private AppsConfiguration appsConfiguration;
@Autowired
private LibraryConfiguration libraryConfiguration;
@Autowired
private EnginesConfiguration enginesConfiguration;
@Autowired
private ContainersConfiguration containersConfiguration;
@Autowired
private SettingsConfiguration settingsConfiguration;
@Bean
public MainController mainController() {
return new MainController(
applicationName,
libraryController(),
appsController(),
enginesController(),
containersController(),
settingsController(),
themeConfiguration.themeManager(),
viewsConfiguration.phoenicisLogo());
}
@Bean
public ContainersController containersController() {
return new ContainersController(
viewsConfiguration.viewContainers(),
containersConfiguration.backgroundContainersManager(),
viewsConfiguration.winePrefixContainerPanelFactory(),
containersConfiguration.winePrefixContainerController(),
enginesConfiguration.wineVersionsFetcher()
);
}
@Bean
public EnginesController enginesController() {
return new EnginesController(
viewsConfiguration.viewEngines(),
enginesConfiguration.wineVersionsFetcher(),
wineEnginesPath,
scriptsConfiguration.scriptInterpreter(),
themeConfiguration.themeManager()
);
}
@Bean
public LibraryController libraryController() {
return new LibraryController(
viewsConfiguration.viewLibrary(),
consoleController(),
libraryConfiguration.libraryManager(),
libraryConfiguration.shortcutRunner(),
libraryConfiguration.shortcutManager(),
scriptsConfiguration.scriptInterpreter()
);
}
@Bean
public AppsController appsController() {
return new AppsController(
viewsConfiguration.viewApps(),
appsConfiguration.repositoryManager(),
scriptsConfiguration.scriptInterpreter(),
themeConfiguration.themeManager()
);
}
@Bean
public ConsoleController consoleController() {
return new ConsoleController(
viewsConfiguration.consoleTabFactory(),
scriptsConfiguration.scriptInterpreter()
);
}
@Bean
public SettingsController settingsController() {
return new SettingsController(
viewsConfiguration.viewSettings(),
settingsConfiguration.settingsManager()
);
}
}
| 1 | 9,544 | I think we should not consider special engine types on this level. I would prefer to build the specific engine path based on `application.user.engines` later on. | PhoenicisOrg-phoenicis | java |
@@ -78,6 +78,12 @@ bool IndexPolicyMaker::buildPolicy() {
if (exist == scanItems_.end()) {
break;
}
+ // Stop build policy when last operator is range scan,
+ // And other fields will use expression filtering.
+ auto it = scanItems_.rbegin()->second;
+ if (it.endBound_.rel_ != RelationType::kEQRel) {
+ break;
+ }
}
// re-check operatorList_.
// if operatorList_ is not empty, that means there are still fields to filter | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "storage/index/IndexPolicyMaker.h"
#include "utils/NebulaKeyUtils.h"
namespace nebula {
namespace storage {
cpp2::ErrorCode IndexPolicyMaker::preparePolicy(const std::string &filter) {
auto ret = decodeExpression(filter);
if (ret != cpp2::ErrorCode::SUCCEEDED) {
return ret;
}
/**
* Traverse the expression tree and
* collect the relationship for execution policies.
*/
ret = traversalExpression(exp_.get());
return ret;
}
cpp2::ErrorCode IndexPolicyMaker::decodeExpression(const std::string &filter) {
cpp2::ErrorCode code = cpp2::ErrorCode::SUCCEEDED;
auto expRet = Expression::decode(filter);
if (!expRet.ok()) {
VLOG(1) << "Can't decode the filter " << filter;
return cpp2::ErrorCode::E_INVALID_FILTER;
}
exp_ = std::move(expRet).value();
if (expCtx_ == nullptr) {
expCtx_ = std::make_unique<ExpressionContext>();
}
exp_->setContext(this->expCtx_.get());
auto status = exp_->prepare();
if (!status.ok()) {
return cpp2::ErrorCode::E_INVALID_FILTER;
}
return code;
}
bool IndexPolicyMaker::buildPolicy() {
bool nextCol = true;
for (auto& col : index_->get_fields()) {
auto itr = operatorList_.begin();
while (nextCol && itr != operatorList_.end()) {
if (col.get_name() == std::get<0>(*itr)) {
/**
* TODO sky : drop the sub-exp from root expression tree.
*/
if (std::get<2>(*itr) == RelationalExpression::Operator::NE) {
// The build policy will be interrupted when '!=' expression occur.
// And '!=' expression filtering will also be done in the result set.
requiredFilter_ = true;
nextCol = false;
break;
}
if (!writeScanItem(col.get_name(), *itr)) {
return false;
}
// Delete operator item if hint.
operatorList_.erase(itr);
} else {
++itr;
}
}
/**
* If index field does not hit the filter condition,
* means there is no need to loop the index fields. for example :
* index (c1, c2, c3)
* where c1 > 1 and c3 == 1
* Field c2 is missing from the operatorList_,
* So we just need using c1 to range scan and filter c3.
*/
auto exist = scanItems_.find(col.get_name());
if (exist == scanItems_.end()) {
break;
}
}
// re-check operatorList_.
// if operatorList_ is not empty, that means there are still fields to filter
if (!requiredFilter_ && operatorList_.size() > 0) {
requiredFilter_ = true;
}
return true;
}
cpp2::ErrorCode IndexPolicyMaker::traversalExpression(const Expression *expr) {
cpp2::ErrorCode code = cpp2::ErrorCode::SUCCEEDED;
Getters getters;
/**
* TODO (sky) :
* Handler error for FuncExpr or ArithmeticExpr contains
* AliasPropExpr , for example :
* "tag1.col1 > tag1.col2" or "tag1.col2 > (tag1.col3 - 100)" , etc.
*/
getters.getAliasProp = [](const std::string&,
const std::string&) -> OptVariantType {
return OptVariantType(Status::Error("Alias expression cannot be evaluated"));
};
switch (expr->kind()) {
case nebula::Expression::kLogical : {
// OR logical expression is not allowed in graph layer.
// Make sure all logical expression is 'AND' at here.
auto* lExpr = dynamic_cast<const LogicalExpression*>(expr);
auto* left = lExpr->left();
traversalExpression(left);
auto* right = lExpr->right();
traversalExpression(right);
break;
}
case nebula::Expression::kRelational : {
std::string prop;
VariantType v;
auto* rExpr = dynamic_cast<const RelationalExpression*>(expr);
auto* left = rExpr->left();
auto* right = rExpr->right();
RelationalExpression::Operator op;
if (left->kind() == nebula::Expression::kAliasProp) {
auto* aExpr = dynamic_cast<const AliasPropertyExpression*>(left);
prop = *aExpr->prop();
auto value = right->eval(getters);
if (!value.ok()) {
VLOG(1) << "Can't evaluate the expression " << right->toString();
return cpp2::ErrorCode::E_INVALID_FILTER;
}
v = value.value();
op = rExpr->op();
} else if (right->kind() == nebula::Expression::kAliasProp) {
auto* aExpr = dynamic_cast<const AliasPropertyExpression*>(right);
prop = *aExpr->prop();
auto value = left->eval(getters);
if (!value.ok()) {
VLOG(1) << "Can't evaluate the expression " << left->toString();
return cpp2::ErrorCode::E_INVALID_FILTER;
}
v = value.value();
op = reversalRelationalExprOP(rExpr->op());
}
operatorList_.emplace_back(std::make_tuple(std::move(prop), std::move(v), op));
break;
}
default : {
return cpp2::ErrorCode::E_INVALID_FILTER;
}
}
return code;
}
bool IndexPolicyMaker::exprEval(Getters &getters) {
if (exp_ != nullptr) {
auto value = exp_->eval(getters);
return (value.ok() && Expression::asBool(value.value()));
}
return true;
}
RelationType IndexPolicyMaker::toRel(RelationalExpression::Operator op) {
switch (op) {
case RelationalExpression::Operator::LT :
return RelationType::kLTRel;
case RelationalExpression::Operator::LE :
return RelationType::kLERel;
case RelationalExpression::Operator::GT :
return RelationType::kGTRel;
case RelationalExpression::Operator::GE:
return RelationType::kGERel;
case RelationalExpression::Operator::EQ :
return RelationType::kEQRel;
default :
return RelationType::kNull;
}
}
RelationalExpression::Operator
IndexPolicyMaker::reversalRelationalExprOP(RelationalExpression::Operator op) {
switch (op) {
case RelationalExpression::Operator::LT: {
return RelationalExpression::Operator::GT;
}
case RelationalExpression::Operator::LE: {
return RelationalExpression::Operator::GE;
}
case RelationalExpression::Operator::GT: {
return RelationalExpression::Operator::LT;
}
case RelationalExpression::Operator::GE: {
return RelationalExpression::Operator::LE;
}
default : {
return op;
}
}
}
bool IndexPolicyMaker::writeScanItem(const std::string& prop, const OperatorItem& item) {
auto op = std::get<2>(item);
switch (op) {
// for example col > 1, means the operator is GT. if col >= 1 ,means the opertor is GE.
// if operator is GT or GE . the 1 should a begin value.
case RelationalExpression::Operator::GE :
case RelationalExpression::Operator::GT : {
auto v = scanItems_.find(prop);
if (v == scanItems_.end()) {
// if the field did not exist in scanItems_, add an new one.
// default value is invalid VariantType.
scanItems_[prop] = ScanBound(Bound(toRel(op), std::get<1>(item)), Bound());
} else {
if (v->second.beginBound_.rel_ == RelationType::kNull) {
// if value is invalid VariantType, reset it.
v->second.beginBound_ = Bound(toRel(op), std::get<1>(item));
} else if (v->second.beginBound_.val_ < std::get<1>(item)) {
// This might be the case where c1 > 1 and c1 > 5 , so the 5 should be save.
v->second.beginBound_.val_ = std::get<1>(item);
} else if (v->second.beginBound_.rel_ == RelationType::kEQRel) {
// If this field appears in scanItems_ ,
// means that the filter conditions are wrong, for example :
// c1 == 1 and c1 > 2
VLOG(1) << "Repeated conditional expression for field : " << prop;
return false;
}
}
break;
}
// if col < 1, means the operator is LT. if col <= 1 ,means the opertor is LE.
// if operator is LT or LE . the 1 should a end value.
case RelationalExpression::Operator::LE :
case RelationalExpression::Operator::LT : {
auto v = scanItems_.find(prop);
if (v == scanItems_.end()) {
scanItems_[prop] = ScanBound(Bound(), Bound(toRel(op), std::get<1>(item)));
} else {
if (v->second.endBound_.rel_ == RelationType::kNull) {
v->second.endBound_ = Bound(toRel(op), std::get<1>(item));
} else if (v->second.endBound_.val_ > std::get<1>(item)) {
// This might be the case where c1 < 1 and c1 < 5 , so the 1 should be save.
v->second.endBound_.val_ = std::get<1>(item);
} else if (v->second.endBound_.rel_ == RelationType::kEQRel) {
// If this field appears in scanItems_ ,
// means that the filter conditions are wrong, for example :
// c1 == 1 and c1 < 2
VLOG(1) << "Repeated conditional expression for field : " << prop;
return false;
}
}
break;
}
case RelationalExpression::Operator::EQ: {
auto v = scanItems_.find(prop);
if (v == scanItems_.end()) {
scanItems_[prop] = ScanBound(Bound(toRel(op), std::get<1>(item)),
Bound(toRel(op), std::get<1>(item)));
} else {
// If this field appears in scanItems_ ,
// means that the filter conditions are wrong, for example :
// c1 == 1 and c1 == 2
VLOG(1) << "Repeated conditional expression for field : " << prop;
return false;
}
}
case RelationalExpression::Operator::NE : {
break;
}
default : {
VLOG(1) << "Unknown operation of RelationalExpression. column : " << prop;
return false;
}
}
return true;
}
} // namespace storage
} // namespace nebula
| 1 | 30,062 | Is `rbegin` correct? We can't make sure that the last index column is the `rbegin` of `scanItems`. | vesoft-inc-nebula | cpp |
@@ -108,6 +108,8 @@ func main() {
glog.Fatalln("EVM Network ID is not set, call config.New() first")
}
+ config.SetChainID(cfg.Chain.ID)
+
cfg.Genesis = genesisCfg
cfgToLog := cfg
cfgToLog.Chain.ProducerPrivKey = "" | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
// Usage:
// make build
// ./bin/server -config-file=./config.yaml
//
package main
import (
"context"
"flag"
"fmt"
glog "log"
"os"
"os/signal"
"strings"
"syscall"
"github.com/iotexproject/go-pkgs/hash"
_ "go.uber.org/automaxprocs"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/probe"
"github.com/iotexproject/iotex-core/server/itx"
)
/**
* overwritePath is the path to the config file which overwrite default values
* secretPath is the path to the config file store secret values
*/
var (
genesisPath string
_overwritePath string
_secretPath string
_subChainPath string
_plugins strs
)
type strs []string
func (ss *strs) String() string {
return strings.Join(*ss, ",")
}
func (ss *strs) Set(str string) error {
*ss = append(*ss, str)
return nil
}
func init() {
flag.StringVar(&genesisPath, "genesis-path", "", "Genesis path")
flag.StringVar(&_overwritePath, "config-path", "", "Config path")
flag.StringVar(&_secretPath, "secret-path", "", "Secret path")
flag.StringVar(&_subChainPath, "sub-config-path", "", "Sub chain Config path")
flag.Var(&_plugins, "plugin", "Plugin of the node")
flag.Usage = func() {
_, _ = fmt.Fprintf(os.Stderr,
"usage: server -config-path=[string]\n")
flag.PrintDefaults()
os.Exit(2)
}
flag.Parse()
}
func main() {
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt)
signal.Notify(stop, syscall.SIGTERM)
ctx, cancel := context.WithCancel(context.Background())
stopped := make(chan struct{})
livenessCtx, livenessCancel := context.WithCancel(context.Background())
genesisCfg, err := genesis.New(genesisPath)
if err != nil {
glog.Fatalln("Failed to new genesis config.", zap.Error(err))
}
// set genesis timestamp
genesis.SetGenesisTimestamp(genesisCfg.Timestamp)
if genesis.Timestamp() == 0 {
glog.Fatalln("Genesis timestamp is not set, call genesis.New() first")
}
// load genesis block's hash
block.LoadGenesisHash(&genesisCfg)
if block.GenesisHash() == hash.ZeroHash256 {
glog.Fatalln("Genesis hash is not set, call block.LoadGenesisHash() first")
}
cfg, err := config.New([]string{_overwritePath, _secretPath}, _plugins)
if err != nil {
glog.Fatalln("Failed to new config.", zap.Error(err))
}
if err = initLogger(cfg); err != nil {
glog.Fatalln("Cannot config global logger, use default one: ", zap.Error(err))
}
// populdate chain ID
config.SetEVMNetworkID(cfg.Chain.EVMNetworkID)
if config.EVMNetworkID() == 0 {
glog.Fatalln("EVM Network ID is not set, call config.New() first")
}
cfg.Genesis = genesisCfg
cfgToLog := cfg
cfgToLog.Chain.ProducerPrivKey = ""
cfgToLog.Network.MasterKey = ""
log.S().Infof("Config in use: %+v", cfgToLog)
log.S().Infof("EVM Network ID: %d", config.EVMNetworkID())
log.S().Infof("Genesis timestamp: %d", genesisCfg.Timestamp)
log.S().Infof("Genesis hash: %x", block.GenesisHash())
// liveness start
probeSvr := probe.New(cfg.System.HTTPStatsPort)
if err := probeSvr.Start(ctx); err != nil {
log.L().Fatal("Failed to start probe server.", zap.Error(err))
}
go func() {
<-stop
// start stopping
cancel()
<-stopped
// liveness end
if err := probeSvr.Stop(livenessCtx); err != nil {
log.L().Error("Error when stopping probe server.", zap.Error(err))
}
livenessCancel()
}()
// create and start the node
svr, err := itx.NewServer(cfg)
if err != nil {
log.L().Fatal("Failed to create server.", zap.Error(err))
}
var cfgsub config.Config
if _subChainPath != "" {
cfgsub, err = config.NewSub([]string{_secretPath, _subChainPath})
if err != nil {
log.L().Fatal("Failed to new sub chain config.", zap.Error(err))
}
} else {
cfgsub = config.Config{}
}
if cfgsub.Chain.ID != 0 {
if err := svr.NewSubChainService(cfgsub); err != nil {
log.L().Fatal("Failed to new sub chain.", zap.Error(err))
}
}
itx.StartServer(ctx, svr, probeSvr, cfg)
close(stopped)
<-livenessCtx.Done()
}
func initLogger(cfg config.Config) error {
addr := cfg.ProducerAddress()
return log.InitLoggers(cfg.Log, cfg.SubLogs, zap.Fields(
zap.String("ioAddr", addr.String()),
))
}
| 1 | 23,638 | check it is != 0, just like `EVMNetworkID` above | iotexproject-iotex-core | go |
@@ -186,6 +186,8 @@ namespace OpenTelemetry
this.shutdownDrainTarget = this.circularBuffer.AddedCount;
this.shutdownTrigger.Set();
+ OpenTelemetrySdkEventSource.Log.DroppedExportProcessorItems(nameof(BatchExportProcessor<T>), typeof(T).Name, this.exporter.GetType().Name, this.droppedCount);
+
if (timeoutMilliseconds == Timeout.Infinite)
{
this.exporterThread.Join(); | 1 | // <copyright file="BatchExportProcessor.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
using System.Threading;
using OpenTelemetry.Internal;
namespace OpenTelemetry
{
/// <summary>
/// Implements processor that batches telemetry objects before calling exporter.
/// </summary>
/// <typeparam name="T">The type of telemetry object to be exported.</typeparam>
public abstract class BatchExportProcessor<T> : BaseExportProcessor<T>
where T : class
{
internal const int DefaultMaxQueueSize = 2048;
internal const int DefaultScheduledDelayMilliseconds = 5000;
internal const int DefaultExporterTimeoutMilliseconds = 30000;
internal const int DefaultMaxExportBatchSize = 512;
private readonly CircularBuffer<T> circularBuffer;
private readonly int scheduledDelayMilliseconds;
private readonly int exporterTimeoutMilliseconds;
private readonly int maxExportBatchSize;
private readonly Thread exporterThread;
private readonly AutoResetEvent exportTrigger = new AutoResetEvent(false);
private readonly ManualResetEvent dataExportedNotification = new ManualResetEvent(false);
private readonly ManualResetEvent shutdownTrigger = new ManualResetEvent(false);
private long shutdownDrainTarget = long.MaxValue;
private long droppedCount;
/// <summary>
/// Initializes a new instance of the <see cref="BatchExportProcessor{T}"/> class.
/// </summary>
/// <param name="exporter">Exporter instance.</param>
/// <param name="maxQueueSize">The maximum queue size. After the size is reached data are dropped. The default value is 2048.</param>
/// <param name="scheduledDelayMilliseconds">The delay interval in milliseconds between two consecutive exports. The default value is 5000.</param>
/// <param name="exporterTimeoutMilliseconds">How long the export can run before it is cancelled. The default value is 30000.</param>
/// <param name="maxExportBatchSize">The maximum batch size of every export. It must be smaller or equal to maxQueueSize. The default value is 512.</param>
protected BatchExportProcessor(
BaseExporter<T> exporter,
int maxQueueSize = DefaultMaxQueueSize,
int scheduledDelayMilliseconds = DefaultScheduledDelayMilliseconds,
int exporterTimeoutMilliseconds = DefaultExporterTimeoutMilliseconds,
int maxExportBatchSize = DefaultMaxExportBatchSize)
: base(exporter)
{
if (maxQueueSize <= 0)
{
throw new ArgumentOutOfRangeException(nameof(maxQueueSize), maxQueueSize, "maxQueueSize should be greater than zero.");
}
if (maxExportBatchSize <= 0 || maxExportBatchSize > maxQueueSize)
{
throw new ArgumentOutOfRangeException(nameof(maxExportBatchSize), maxExportBatchSize, "maxExportBatchSize should be greater than zero and less than maxQueueSize.");
}
if (scheduledDelayMilliseconds <= 0)
{
throw new ArgumentOutOfRangeException(nameof(scheduledDelayMilliseconds), scheduledDelayMilliseconds, "scheduledDelayMilliseconds should be greater than zero.");
}
if (exporterTimeoutMilliseconds < 0)
{
throw new ArgumentOutOfRangeException(nameof(exporterTimeoutMilliseconds), exporterTimeoutMilliseconds, "exporterTimeoutMilliseconds should be non-negative.");
}
this.circularBuffer = new CircularBuffer<T>(maxQueueSize);
this.scheduledDelayMilliseconds = scheduledDelayMilliseconds;
this.exporterTimeoutMilliseconds = exporterTimeoutMilliseconds;
this.maxExportBatchSize = maxExportBatchSize;
this.exporterThread = new Thread(new ThreadStart(this.ExporterProc))
{
IsBackground = true,
Name = $"OpenTelemetry-{nameof(BatchExportProcessor<T>)}-{exporter.GetType().Name}",
};
this.exporterThread.Start();
}
/// <summary>
/// Gets the number of telemetry objects dropped by the processor.
/// </summary>
internal long DroppedCount => this.droppedCount;
/// <summary>
/// Gets the number of telemetry objects received by the processor.
/// </summary>
internal long ReceivedCount => this.circularBuffer.AddedCount + this.DroppedCount;
/// <summary>
/// Gets the number of telemetry objects processed by the underlying exporter.
/// </summary>
internal long ProcessedCount => this.circularBuffer.RemovedCount;
/// <inheritdoc/>
protected override void OnExport(T data)
{
if (this.circularBuffer.TryAdd(data, maxSpinCount: 50000))
{
if (this.circularBuffer.Count >= this.maxExportBatchSize)
{
this.exportTrigger.Set();
}
return; // enqueue succeeded
}
// either the queue is full or exceeded the spin limit, drop the item on the floor
Interlocked.Increment(ref this.droppedCount);
}
/// <inheritdoc/>
protected override bool OnForceFlush(int timeoutMilliseconds)
{
var tail = this.circularBuffer.RemovedCount;
var head = this.circularBuffer.AddedCount;
if (head == tail)
{
return true; // nothing to flush
}
this.exportTrigger.Set();
if (timeoutMilliseconds == 0)
{
return false;
}
var triggers = new WaitHandle[] { this.dataExportedNotification, this.shutdownTrigger };
var sw = Stopwatch.StartNew();
// There is a chance that the export thread finished processing all the data from the queue,
// and signaled before we enter wait here, use polling to prevent being blocked indefinitely.
const int pollingMilliseconds = 1000;
while (true)
{
if (timeoutMilliseconds == Timeout.Infinite)
{
WaitHandle.WaitAny(triggers, pollingMilliseconds);
}
else
{
var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds;
if (timeout <= 0)
{
return this.circularBuffer.RemovedCount >= head;
}
WaitHandle.WaitAny(triggers, Math.Min((int)timeout, pollingMilliseconds));
}
if (this.circularBuffer.RemovedCount >= head)
{
return true;
}
if (this.shutdownDrainTarget != long.MaxValue)
{
return false;
}
}
}
/// <inheritdoc/>
protected override bool OnShutdown(int timeoutMilliseconds)
{
this.shutdownDrainTarget = this.circularBuffer.AddedCount;
this.shutdownTrigger.Set();
if (timeoutMilliseconds == Timeout.Infinite)
{
this.exporterThread.Join();
return this.exporter.Shutdown();
}
if (timeoutMilliseconds == 0)
{
return this.exporter.Shutdown(0);
}
var sw = Stopwatch.StartNew();
this.exporterThread.Join(timeoutMilliseconds);
var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds;
return this.exporter.Shutdown((int)Math.Max(timeout, 0));
}
private void ExporterProc()
{
var triggers = new WaitHandle[] { this.exportTrigger, this.shutdownTrigger };
while (true)
{
// only wait when the queue doesn't have enough items, otherwise keep busy and send data continuously
if (this.circularBuffer.Count < this.maxExportBatchSize)
{
WaitHandle.WaitAny(triggers, this.scheduledDelayMilliseconds);
}
if (this.circularBuffer.Count > 0)
{
using (var batch = new Batch<T>(this.circularBuffer, this.maxExportBatchSize))
{
this.exporter.Export(batch);
}
this.dataExportedNotification.Set();
this.dataExportedNotification.Reset();
}
if (this.circularBuffer.RemovedCount >= this.shutdownDrainTarget)
{
break;
}
}
}
}
}
| 1 | 21,322 | if you just do "this.GetType().Name" and "this.exporter.GetType().Name", we get what we need. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -26,16 +26,13 @@
*/
package com.salesforce.androidsdk.rest;
-import com.salesforce.androidsdk.R;
-import com.salesforce.androidsdk.app.SalesforceSDKManager;
/**
* This is where all the API version info lives. This allows us to change one
* line here and affect all our api calls.
*/
public class ApiVersionStrings {
- public static final String VERSION_NUMBER = SalesforceSDKManager.getInstance().
- getAppContext().getResources().getString(R.string.api_version);
+ public static final String VERSION_NUMBER = "v29.0";
public static final String API_PREFIX = "/services/data/";
public static final String BASE_PATH = API_PREFIX + VERSION_NUMBER;
public static final String BASE_CHATTER_PATH = BASE_PATH + "/chatter/"; | 1 | /*
* Copyright (c) 2013, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.rest;
import com.salesforce.androidsdk.R;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
/**
* This is where all the API version info lives. This allows us to change one
* line here and affect all our api calls.
*/
public class ApiVersionStrings {
public static final String VERSION_NUMBER = SalesforceSDKManager.getInstance().
getAppContext().getResources().getString(R.string.api_version);
public static final String API_PREFIX = "/services/data/";
public static final String BASE_PATH = API_PREFIX + VERSION_NUMBER;
public static final String BASE_CHATTER_PATH = BASE_PATH + "/chatter/";
public static final String BASE_CONNECT_PATH = BASE_PATH + "/connect/";
public static final String BASE_SOBJECT_PATH = BASE_PATH + "/sobjects/";
}
| 1 | 13,797 | That's the code that would prevent any SalesforceSDKTest from running. At class loading time, SalesforceSDKManager.getInstance() would throw a RuntimeException because init() had never been called. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -34,4 +34,6 @@ public interface DefinitionConst {
String VERSION_RULE_LATEST = "latest";
String VERSION_RULE_ALL = "0.0.0+";
+
+ String DEFAULT_REVISION = "0";
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.serviceregistry.definition;
public interface DefinitionConst {
String CONFIG_QUALIFIED_INSTANCE_ENVIRONMENT_KEY = "instance_description.environment";
String CONFIG_ALLOW_CROSS_APP_KEY = "allowCrossApp";
String DEFAULT_APPLICATION_ID = "default";
String DEFAULT_MICROSERVICE_VERSION = "1.0.0";
String DEFAULT_STAGE = "prod";
String DEFAULT_INSTANCE_ENVIRONMENT = "production";
String VERSION_RULE_LATEST = "latest";
String VERSION_RULE_ALL = "0.0.0+";
}
| 1 | 8,340 | there is no "DEFAULT_REVISION" logic, no need to define this. | apache-servicecomb-java-chassis | java |
@@ -289,6 +289,9 @@ public class SalesforceSDKManager {
loginOptions = new LoginOptions(url, getPasscodeHash(), config.getOauthRedirectURI(),
config.getRemoteAccessConsumerKey(), config.getOauthScopes(), null, jwt);
}
+ } else {
+ loginOptions.setJwt(jwt);
+ loginOptions.setUrl(url);
}
return loginOptions;
} | 1 | /*
* Copyright (c) 2014, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.app;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.accounts.AccountManagerCallback;
import android.accounts.AccountManagerFuture;
import android.annotation.TargetApi;
import android.app.Activity;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.content.pm.PackageInfo;
import android.content.pm.PackageManager.NameNotFoundException;
import android.content.res.Resources;
import android.os.AsyncTask;
import android.os.Build;
import android.os.SystemClock;
import android.provider.Settings;
import android.text.TextUtils;
import android.util.Log;
import android.webkit.CookieManager;
import android.webkit.CookieSyncManager;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.accounts.UserAccountManager;
import com.salesforce.androidsdk.auth.AuthenticatorService;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.auth.OAuth2;
import com.salesforce.androidsdk.config.AdminPermsManager;
import com.salesforce.androidsdk.config.AdminSettingsManager;
import com.salesforce.androidsdk.config.BootConfig;
import com.salesforce.androidsdk.config.LoginServerManager;
import com.salesforce.androidsdk.push.PushMessaging;
import com.salesforce.androidsdk.push.PushNotificationInterface;
import com.salesforce.androidsdk.rest.ClientManager;
import com.salesforce.androidsdk.rest.ClientManager.LoginOptions;
import com.salesforce.androidsdk.security.Encryptor;
import com.salesforce.androidsdk.security.PasscodeManager;
import com.salesforce.androidsdk.ui.AccountSwitcherActivity;
import com.salesforce.androidsdk.ui.LoginActivity;
import com.salesforce.androidsdk.ui.PasscodeActivity;
import com.salesforce.androidsdk.ui.SalesforceR;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import java.net.URI;
import java.util.List;
/**
* This class serves as an interface to the various
* functions of the Salesforce SDK. In order to use the SDK,
* your app must first instantiate the singleton SalesforceSDKManager
* object by calling the static init() method. After calling init(),
* use the static getInstance() method to access the
* singleton SalesforceSDKManager object.
*/
@SuppressWarnings("deprecation")
public class SalesforceSDKManager {
/**
* Current version of this SDK.
*/
public static final String SDK_VERSION = "4.3.0.unstable";
/**
* Default app name.
*/
private static final String DEFAULT_APP_DISPLAY_NAME = "Salesforce";
/**
* Instance of the SalesforceSDKManager to use for this process.
*/
protected static SalesforceSDKManager INSTANCE;
/**
* Timeout value for push un-registration.
*/
private static final int PUSH_UNREGISTER_TIMEOUT_MILLIS = 30000;
protected Context context;
protected KeyInterface keyImpl;
protected LoginOptions loginOptions;
protected Class<? extends Activity> mainActivityClass;
protected Class<? extends Activity> loginActivityClass = LoginActivity.class;
protected Class<? extends PasscodeActivity> passcodeActivityClass = PasscodeActivity.class;
protected Class<? extends AccountSwitcherActivity> switcherActivityClass = AccountSwitcherActivity.class;
private String encryptionKey;
private SalesforceR salesforceR = new SalesforceR();
private PasscodeManager passcodeManager;
private LoginServerManager loginServerManager;
private boolean isTestRun = false;
private boolean isLoggingOut = false;
private AdminSettingsManager adminSettingsManager;
private AdminPermsManager adminPermsManager;
private PushNotificationInterface pushNotificationInterface;
private String uid; // device id
private volatile boolean loggedOut = false;
/**
* PasscodeManager object lock.
*/
private Object passcodeManagerLock = new Object();
/**
* Returns a singleton instance of this class.
*
* @return Singleton instance of SalesforceSDKManager.
*/
public static SalesforceSDKManager getInstance() {
if (INSTANCE != null) {
return INSTANCE;
} else {
throw new RuntimeException("Applications need to call SalesforceSDKManager.init() first.");
}
}
/**
*
* @return true if SalesforceSDKManager has been initialized already
*/
public static boolean hasInstance() {
return INSTANCE != null;
}
/**
* Protected constructor.
* @param context Application context.
* @param keyImpl Implementation for KeyInterface.
* @param mainActivity Activity that should be launched after the login flow.
* @param loginActivity Login activity.
*/
protected SalesforceSDKManager(Context context, KeyInterface keyImpl,
Class<? extends Activity> mainActivity, Class<? extends Activity> loginActivity) {
this.uid = Settings.Secure.getString(context.getContentResolver(), Settings.Secure.ANDROID_ID);
this.context = context;
this.keyImpl = keyImpl;
this.mainActivityClass = mainActivity;
if (loginActivity != null) {
this.loginActivityClass = loginActivity;
}
}
/**
* Returns the class for the main activity.
*
* @return The class for the main activity.
*/
public Class<? extends Activity> getMainActivityClass() {
return mainActivityClass;
}
/**
* Returns the class for the account switcher activity.
*
* @return The class for the account switcher activity.
*/
public Class<? extends AccountSwitcherActivity> getAccountSwitcherActivityClass() {
return switcherActivityClass;
}
/**
* Returns the class for the account switcher activity.
*
* @return The class for the account switcher activity.
*/
public void setAccountSwitcherActivityClass(Class<? extends AccountSwitcherActivity> activity) {
if (activity != null) {
switcherActivityClass = activity;
}
}
public interface KeyInterface {
/**
* Defines a single function for retrieving the key
* associated with a given name.
*
* For the given name, this function must return the same key
* even when the application is restarted. The value this
* function returns must be Base64 encoded.
*
* {@link Encryptor#isBase64Encoded(String)} can be used to
* determine whether the generated key is Base64 encoded.
*
* {@link Encryptor#hash(String, String)} can be used to
* generate a Base64 encoded string.
*
* For example:
* <code>
* Encryptor.hash(name + "12s9adfgret=6235inkasd=012", name + "12kl0dsakj4-cuygsdf625wkjasdol8");
* </code>
*
* @param name The name associated with the key.
* @return The key used for encrypting salts and keys.
*/
public String getKey(String name);
}
/**
* For the given name, this function must return the same key
* even when the application is restarted. The value this
* function returns must be Base64 encoded.
*
* {@link Encryptor#isBase64Encoded(String)} can be used to
* determine whether the generated key is Base64 encoded.
*
* {@link Encryptor#hash(String, String)} can be used to
* generate a Base64 encoded string.
*
* For example:
* <code>
* Encryptor.hash(name + "12s9adfgret=6235inkasd=012", name + "12kl0dsakj4-cuygsdf625wkjasdol8");
* </code>
*
* @param name The name associated with the key.
* @return The key used for encrypting salts and keys.
*/
public String getKey(String name) {
String key = null;
if (keyImpl != null) {
key = keyImpl.getKey(name);
}
return key;
}
/**
* Before Mobile SDK 1.3, SalesforceSDK was packaged as a jar, and each project had to provide
* a subclass of SalesforceR.
*
* Since 1.3, SalesforceSDK is packaged as a library project, so the SalesforceR subclass is no longer needed.
* @return SalesforceR object which allows reference to resources living outside the SDK.
*/
public SalesforceR getSalesforceR() {
return salesforceR;
}
/**
* Returns the class of the activity used to perform the login process and create the account.
*
* @return the class of the activity used to perform the login process and create the account.
*/
public Class<? extends Activity> getLoginActivityClass() {
return loginActivityClass;
}
/**
* Returns login options associated with the app.
*
* @return LoginOptions instance.
*/
public LoginOptions getLoginOptions() {
return getLoginOptions(null, null);
}
public LoginOptions getLoginOptions(String jwt, String url) {
if (loginOptions == null) {
final BootConfig config = BootConfig.getBootConfig(context);
if (TextUtils.isEmpty(jwt)) {
loginOptions = new LoginOptions(url, getPasscodeHash(), config.getOauthRedirectURI(),
config.getRemoteAccessConsumerKey(), config.getOauthScopes(), null);
} else {
loginOptions = new LoginOptions(url, getPasscodeHash(), config.getOauthRedirectURI(),
config.getRemoteAccessConsumerKey(), config.getOauthScopes(), null, jwt);
}
}
return loginOptions;
}
/**
* For internal use only. Initializes required components.
* @param context Application context.
* @param keyImpl Implementation of KeyInterface.
* @param mainActivity Activity to be launched after the login flow.
* @param loginActivity Login activity.
*/
private static void init(Context context, KeyInterface keyImpl,
Class<? extends Activity> mainActivity, Class<? extends Activity> loginActivity) {
if (INSTANCE == null) {
INSTANCE = new SalesforceSDKManager(context, keyImpl, mainActivity, loginActivity);
}
initInternal(context);
}
/**
* For internal use by Salesforce Mobile SDK or by subclasses
* of SalesforceSDKManager. Initializes required components.
*
* @param context Application context.
*/
public static void initInternal(Context context) {
// Initializes the encryption module.
Encryptor.init(context);
// Initializes the HTTP client.
HttpAccess.init(context, INSTANCE.getUserAgent());
// Upgrades to the latest version.
SalesforceSDKUpgradeManager.getInstance().upgrade();
EventsObservable.get().notifyEvent(EventType.AppCreateComplete);
}
/**
* Initializes required components. Native apps must call one overload of
* this method before using the Salesforce Mobile SDK.
*
* @param context Application context.
* @param keyImpl Implementation of KeyInterface.
* @param mainActivity Activity that should be launched after the login flow.
*/
public static void initNative(Context context, KeyInterface keyImpl, Class<? extends Activity> mainActivity) {
SalesforceSDKManager.init(context, keyImpl, mainActivity, LoginActivity.class);
}
/**
* Initializes required components. Native apps must call one overload of
* this method before using the Salesforce Mobile SDK.
*
* @param context Application context.
* @param keyImpl Implementation of KeyInterface.
* @param mainActivity Activity that should be launched after the login flow.
* @param loginActivity Login activity.
*/
public static void initNative(Context context, KeyInterface keyImpl,
Class<? extends Activity> mainActivity, Class<? extends Activity> loginActivity) {
SalesforceSDKManager.init(context, keyImpl, mainActivity, loginActivity);
}
/**
* Sets a custom passcode activity class to be used instead of the default class.
* The custom class must subclass PasscodeActivity.
*
* @param activity Subclass of PasscodeActivity.
*/
public void setPasscodeActivity(Class<? extends PasscodeActivity> activity) {
if (activity != null) {
passcodeActivityClass = activity;
}
}
/**
* Returns the descriptor of the passcode activity class that's currently in use.
*
* @return Passcode activity class descriptor.
*/
public Class<? extends PasscodeActivity> getPasscodeActivity() {
return passcodeActivityClass;
}
/**
* Indicates whether the SDK should automatically log out when the
* access token is revoked. If you override this method to return
* false, your app is responsible for handling its own cleanup when the
* access token is revoked.
*
* @return True if the SDK should automatically logout.
*/
public boolean shouldLogoutWhenTokenRevoked() {
return true;
}
/**
* Returns the application context.
*
* @return Application context.
*/
public Context getAppContext() {
return context;
}
/**
* Returns the login server manager associated with SalesforceSDKManager.
*
* @return LoginServerManager instance.
*/
public synchronized LoginServerManager getLoginServerManager() {
if (loginServerManager == null) {
loginServerManager = new LoginServerManager(context);
}
return loginServerManager;
}
/**
* Sets a receiver that handles received push notifications.
*
* @param pnInterface Implementation of PushNotificationInterface.
*/
public synchronized void setPushNotificationReceiver(PushNotificationInterface pnInterface) {
pushNotificationInterface = pnInterface;
}
/**
* Returns the receiver that's configured to handle incoming push notifications.
*
* @return Configured implementation of PushNotificationInterface.
*/
public synchronized PushNotificationInterface getPushNotificationReceiver() {
return pushNotificationInterface;
}
/**
* Returns the passcode manager that's associated with SalesforceSDKManager.
*
* @return PasscodeManager instance.
*/
public PasscodeManager getPasscodeManager() {
synchronized (passcodeManagerLock) {
if (passcodeManager == null) {
passcodeManager = new PasscodeManager(context);
}
return passcodeManager;
}
}
/**
* Returns the user account manager that's associated with SalesforceSDKManager.
*
* @return UserAccountManager instance.
*/
public UserAccountManager getUserAccountManager() {
return UserAccountManager.getInstance();
}
/**
* Returns the administrator settings manager that's associated with SalesforceSDKManager.
*
* @return AdminSettingsManager instance.
*/
public synchronized AdminSettingsManager getAdminSettingsManager() {
if (adminSettingsManager == null) {
adminSettingsManager = new AdminSettingsManager();
}
return adminSettingsManager;
}
/**
* Returns the administrator permissions manager that's associated with SalesforceSDKManager.
*
* @return AdminPermsManager instance.
*/
public synchronized AdminPermsManager getAdminPermsManager() {
if (adminPermsManager == null) {
adminPermsManager = new AdminPermsManager();
}
return adminPermsManager;
}
/**
* Changes the passcode to a new value.
*
* @param oldPass Old passcode.
* @param newPass New passcode.
*/
public synchronized void changePasscode(String oldPass, String newPass) {
if (!isNewPasscode(oldPass, newPass)) {
return;
}
// Resets the cached encryption key, since the passcode has changed.
encryptionKey = null;
ClientManager.changePasscode(oldPass, newPass);
}
/**
* Indicates whether the new passcode is different from the old passcode.
*
* @param oldPass Old passcode.
* @param newPass New passcode.
* @return True if the new passcode is different from the old passcode.
*/
protected boolean isNewPasscode(String oldPass, String newPass) {
return !((oldPass == null && newPass == null)
|| (oldPass != null && newPass != null && oldPass.trim().equals(newPass.trim())));
}
/**
* Returns the encryption key being used.
*
* @param actualPass Passcode.
* @return Encryption key for passcode.
*/
public synchronized String getEncryptionKeyForPasscode(String actualPass) {
if (actualPass != null && !actualPass.trim().equals("")) {
return actualPass;
}
if (encryptionKey == null) {
encryptionKey = getPasscodeManager().hashForEncryption("");
}
return encryptionKey;
}
/**
* Returns the app display name used by the passcode dialog.
*
* @return App display string.
*/
public String getAppDisplayString() {
return DEFAULT_APP_DISPLAY_NAME;
}
/**
* Returns the passcode hash being used.
*
* @return The hashed passcode, or null if it's not required.
*/
public String getPasscodeHash() {
return getPasscodeManager().getPasscodeHash();
}
/**
* Returns the name of the application (as defined in AndroidManifest.xml).
*
* @return The name of the application.
*/
public String getApplicationName() {
return context.getPackageManager().getApplicationLabel(context.getApplicationInfo()).toString();
}
/**
* Checks if network connectivity exists.
*
* @return True if a network connection is available.
*/
public boolean hasNetwork() {
return HttpAccess.DEFAULT.hasNetwork();
}
/**
* Cleans up cached credentials and data.
*
* @param frontActivity Front activity.
* @param account Account.
*/
protected void cleanUp(Activity frontActivity, Account account) {
final List<UserAccount> users = getUserAccountManager().getAuthenticatedUsers();
// Finishes front activity if specified, and if this is the last account.
if (frontActivity != null && (users == null || users.size() <= 1)) {
frontActivity.finish();
}
/*
* Checks how many accounts are left that are authenticated. If only one
* account is left, this is the account that is being removed. In this
* case, we can safely reset passcode manager, admin prefs, and encryption keys.
* Otherwise, we don't reset passcode manager and admin prefs since
* there might be other accounts on that same org, and these policies
* are stored at the org level.
*/
if (users == null || users.size() <= 1) {
getAdminSettingsManager().resetAll();
getAdminPermsManager().resetAll();
adminSettingsManager = null;
adminPermsManager = null;
getPasscodeManager().reset(context);
passcodeManager = null;
encryptionKey = null;
UUIDManager.resetUuids();
}
}
/**
* Starts login flow if user account has been removed.
*/
protected void startLoginPage() {
// Clears cookies.
removeAllCookies();
// Restarts the application.
final Intent i = new Intent(context, getMainActivityClass());
i.setPackage(getAppContext().getPackageName());
i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
context.startActivity(i);
}
/**
* Starts account switcher activity if an account has been removed.
*/
public void startSwitcherActivityIfRequired() {
// Clears cookies.
removeAllCookies();
/*
* If the number of accounts remaining is 0, shows the login page.
* If the number of accounts remaining is 1, switches to that user
* automatically. If there is more than 1 account logged in, shows
* the account switcher screen, so that the user can pick which
* account to switch to.
*/
final UserAccountManager userAccMgr = getUserAccountManager();
final List<UserAccount> accounts = userAccMgr.getAuthenticatedUsers();
if (accounts == null || accounts.size() == 0) {
startLoginPage();
} else if (accounts.size() == 1) {
userAccMgr.switchToUser(accounts.get(0));
} else {
final Intent i = new Intent(context, switcherActivityClass);
i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
context.startActivity(i);
}
}
/**
* Unregisters from push notifications for both GCM (Android) and SFDC, and waits either for
* unregistration to complete or for the operation to time out. The timeout period is defined
* in PUSH_UNREGISTER_TIMEOUT_MILLIS.
*
* If timeout occurs while the user is logged in, this method attempts to unregister the push
* unregistration receiver, and then removes the user's account.
*
* @param clientMgr ClientManager instance.
* @param showLoginPage True - if the login page should be shown, False - otherwise.
* @param refreshToken Refresh token.
* @param clientId Client ID.
* @param loginServer Login server.
* @param account Account instance.
* @param frontActivity Front activity.
*/
private void unregisterPush(final ClientManager clientMgr, final boolean showLoginPage,
final String refreshToken, final String clientId,
final String loginServer, final Account account, final Activity frontActivity) {
final IntentFilter intentFilter = new IntentFilter(PushMessaging.UNREGISTERED_ATTEMPT_COMPLETE_EVENT);
final BroadcastReceiver pushUnregisterReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (intent.getAction().equals(PushMessaging.UNREGISTERED_ATTEMPT_COMPLETE_EVENT)) {
postPushUnregister(this, clientMgr, showLoginPage,
refreshToken, clientId, loginServer, account, frontActivity);
}
}
};
getAppContext().registerReceiver(pushUnregisterReceiver, intentFilter);
// Unregisters from notifications on logout.
final UserAccount userAcc = getUserAccountManager().buildUserAccount(account);
PushMessaging.unregister(context, userAcc);
/*
* Starts a background thread to wait up to the timeout period. If
* another thread has already performed logout, we exit immediately.
*/
(new Thread() {
public void run() {
long startTime = System.currentTimeMillis();
while ((System.currentTimeMillis() - startTime) < PUSH_UNREGISTER_TIMEOUT_MILLIS && !loggedOut) {
// Waits for half a second at a time.
SystemClock.sleep(500);
}
postPushUnregister(pushUnregisterReceiver, clientMgr, showLoginPage,
refreshToken, clientId, loginServer, account, frontActivity);
};
}).start();
}
/**
* This method is called either when unregistration for push notifications
* is complete and the user has logged out, or when a timeout occurs while waiting.
* If the user has not logged out, this method attempts to unregister the push
* notification unregistration receiver, and then removes the user's account.
*
* @param pushReceiver Broadcast receiver.
* @param clientMgr ClientManager instance.
* @param showLoginPage True - if the login page should be shown, False - otherwise.
* @param refreshToken Refresh token.
* @param clientId Client ID.
* @param loginServer Login server.
* @param account Account instance.
* @param frontActivity Front activity.
*/
private synchronized void postPushUnregister(BroadcastReceiver pushReceiver,
final ClientManager clientMgr, final boolean showLoginPage,
final String refreshToken, final String clientId,
final String loginServer, final Account account, Activity frontActivity) {
if (!loggedOut) {
try {
context.unregisterReceiver(pushReceiver);
} catch (Exception e) {
Log.e("SalesforceSDKManager:postPushUnregister", "Exception occurred while un-registering.", e);
}
removeAccount(clientMgr, showLoginPage, refreshToken, clientId, loginServer, account, frontActivity);
}
}
/**
* Destroys the stored authentication credentials (removes the account).
*
* @param frontActivity Front activity.
*/
public void logout(Activity frontActivity) {
logout(frontActivity, true);
}
/**
* Destroys the stored authentication credentials (removes the account).
*
* @param account Account.
* @param frontActivity Front activity.
*/
public void logout(Account account, Activity frontActivity) {
logout(account, frontActivity, true);
}
/**
* Destroys the stored authentication credentials (removes the account)
* and, if requested, restarts the app.
*
* @param frontActivity Front activity.
* @param showLoginPage If true, displays the login page after removing the account.
*/
public void logout(Activity frontActivity, final boolean showLoginPage) {
final ClientManager clientMgr = new ClientManager(context, getAccountType(),
null, shouldLogoutWhenTokenRevoked());
final Account account = clientMgr.getAccount();
logout(account, frontActivity, showLoginPage);
}
/**
* Destroys the stored authentication credentials (removes the account)
* and, if requested, restarts the app.
*
* @param account Account.
* @param frontActivity Front activity.
* @param showLoginPage If true, displays the login page after removing the account.
*/
public void logout(Account account, Activity frontActivity, final boolean showLoginPage) {
final ClientManager clientMgr = new ClientManager(context, getAccountType(),
null, shouldLogoutWhenTokenRevoked());
isLoggingOut = true;
final AccountManager mgr = AccountManager.get(context);
String refreshToken = null;
String clientId = null;
String loginServer = null;
if (account != null) {
String passcodeHash = getPasscodeHash();
refreshToken = SalesforceSDKManager.decryptWithPasscode(mgr.getPassword(account),
passcodeHash);
clientId = SalesforceSDKManager.decryptWithPasscode(mgr.getUserData(account,
AuthenticatorService.KEY_CLIENT_ID), passcodeHash);
loginServer = SalesforceSDKManager.decryptWithPasscode(mgr.getUserData(account,
AuthenticatorService.KEY_INSTANCE_URL), passcodeHash);
}
/*
* Makes a call to un-register from push notifications, only
* if the refresh token is available.
*/
final UserAccount userAcc = getUserAccountManager().buildUserAccount(account);
if (PushMessaging.isRegistered(context, userAcc) && refreshToken != null) {
loggedOut = false;
unregisterPush(clientMgr, showLoginPage, refreshToken, clientId,
loginServer, account, frontActivity);
} else {
removeAccount(clientMgr, showLoginPage, refreshToken, clientId,
loginServer, account, frontActivity);
}
}
/**
* Removes the account upon logout.
*
* @param clientMgr ClientManager instance.
* @param showLoginPage If true, displays the login page after removing the account.
* @param refreshToken Refresh token.
* @param clientId Client ID.
* @param loginServer Login server.
* @param account Account instance.
* @param frontActivity Front activity.
*/
private void removeAccount(ClientManager clientMgr, final boolean showLoginPage,
String refreshToken, String clientId, String loginServer,
Account account, Activity frontActivity) {
loggedOut = true;
cleanUp(frontActivity, account);
/*
* Removes the existing account, if any. 'account == null' does not
* guarantee that there are no accounts to remove. In the 'Forgot Passcode'
* flow there could be accounts to remove, but we don't have them, since
* we don't have the passcode hash to decrypt them. Hence, we query
* AccountManager directly here and remove the accounts for the case
* where 'account == null'. If AccountManager doesn't have accounts
* either, then there's nothing to do.
*/
if (account == null) {
final AccountManager accMgr = AccountManager.get(context);
if (accMgr != null) {
final Account[] accounts = accMgr.getAccountsByType(getAccountType());
if (accounts.length > 0) {
for (int i = 0; i < accounts.length - 1; i++) {
clientMgr.removeAccounts(accounts);
}
clientMgr.removeAccountAsync(accounts[accounts.length - 1],
new AccountManagerCallback<Boolean>() {
@Override
public void run(AccountManagerFuture<Boolean> arg0) {
notifyLogoutComplete(showLoginPage);
}
});
} else {
notifyLogoutComplete(showLoginPage);
}
} else {
notifyLogoutComplete(showLoginPage);
}
} else {
clientMgr.removeAccountAsync(account, new AccountManagerCallback<Boolean>() {
@Override
public void run(AccountManagerFuture<Boolean> arg0) {
notifyLogoutComplete(showLoginPage);
}
});
}
isLoggingOut = false;
// Revokes the existing refresh token.
if (shouldLogoutWhenTokenRevoked() && account != null && refreshToken != null) {
new RevokeTokenTask(refreshToken, clientId, loginServer).execute();
}
}
private void notifyLogoutComplete(boolean showLoginPage) {
EventsObservable.get().notifyEvent(EventType.LogoutComplete);
if (showLoginPage) {
startSwitcherActivityIfRequired();
}
}
/**
* Returns a user agent string based on the Mobile SDK version. The user agent takes the following form:
* SalesforceMobileSDK/{salesforceSDK version} android/{android OS version} appName/appVersion {Native|Hybrid} uid_{device id}
*
* @return The user agent string to use for all requests.
*/
public final String getUserAgent() {
return getUserAgent("");
}
public String getUserAgent(String qualifier) {
String appName = "";
String appVersion = "";
try {
PackageInfo packageInfo = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
appName = context.getString(packageInfo.applicationInfo.labelRes);
appVersion = packageInfo.versionName;
} catch (NameNotFoundException e) {
Log.w("SalesforceSDKManager:getUserAgent", e);
} catch (Resources.NotFoundException nfe) {
// A test harness such as Gradle does NOT have an application name.
Log.w("SalesforceSDKManager:getUserAgent", nfe);
}
String appTypeWithQualifier = getAppType() + qualifier;
return String.format("SalesforceMobileSDK/%s android mobile/%s (%s) %s/%s %s uid_%s",
SDK_VERSION, Build.VERSION.RELEASE, Build.MODEL, appName, appVersion, appTypeWithQualifier, uid);
}
/**
* @return app type as String
*/
public String getAppType() {
return "Native";
}
/**
* Indicates whether the application is a hybrid application.
*
* @return True if this is a hybrid application.
*/
public boolean isHybrid() {
return false;
}
/**
* Returns the authentication account type (which should match authenticator.xml).
*
* @return Account type string.
*/
public String getAccountType() {
return context.getString(getSalesforceR().stringAccountType());
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(this.getClass()).append(": {\n")
.append(" accountType: ").append(getAccountType()).append("\n")
.append(" userAgent: ").append(getUserAgent()).append("\n")
.append(" mainActivityClass: ").append(getMainActivityClass()).append("\n")
.append(" isFileSystemEncrypted: ").append(Encryptor.isFileSystemEncrypted()).append("\n");
if (passcodeManager != null) {
// passcodeManager may be null at startup if the app is running in debug mode.
sb.append(" hasStoredPasscode: ").append(passcodeManager.hasStoredPasscode(context)).append("\n");
}
sb.append("}\n");
return sb.toString();
}
/**
* Encrypts the given data using the given passcode as the encryption key.
*
* @param data Data to be encrypted.
* @param passcode Encryption key.
* @return Encrypted data.
*/
public static String encryptWithPasscode(String data, String passcode) {
return Encryptor.encrypt(data, SalesforceSDKManager.INSTANCE.getEncryptionKeyForPasscode(passcode));
}
/**
* Decrypts the given data using the given passcode as the decryption key.
*
* @param data Data to be decrypted.
* @param passcode Decryption key.
* @return Decrypted data.
*/
public static String decryptWithPasscode(String data, String passcode) {
return Encryptor.decrypt(data, SalesforceSDKManager.INSTANCE.getEncryptionKeyForPasscode(passcode));
}
/**
* Asynchronous task for revoking the refresh token on logout.
*
* @author bhariharan
*/
private class RevokeTokenTask extends AsyncTask<Void, Void, Void> {
private String refreshToken;
private String clientId;
private String loginServer;
public RevokeTokenTask(String refreshToken, String clientId, String loginServer) {
this.refreshToken = refreshToken;
this.clientId = clientId;
this.loginServer = loginServer;
}
@Override
protected Void doInBackground(Void... nothings) {
try {
OAuth2.revokeRefreshToken(HttpAccess.DEFAULT, new URI(loginServer), refreshToken);
} catch (Exception e) {
Log.w("SalesforceSDKManager:revokeToken", e);
}
return null;
}
}
/**
* Retrieves a property value that indicates whether the current run is a test run.
*
* @return True if the current run is a test run.
*/
public boolean getIsTestRun() {
return INSTANCE.isTestRun;
}
/**
* Sets a property that indicates whether the current run is a test run.
*
* @param isTestRun True if the current run is a test run.
*/
public void setIsTestRun(boolean isTestRun) {
INSTANCE.isTestRun = isTestRun;
}
/**
* Retrieves a property value that indicates whether logout is in progress.
*
* @return True if logout is in progress.
*/
public boolean isLoggingOut() {
return isLoggingOut;
}
/**
* @return ClientManager
*/
public ClientManager getClientManager() {
return new ClientManager(getAppContext(), getAccountType(), getLoginOptions(), true);
}
/**
* @return ClientManager
*/
public ClientManager getClientManager(String jwt, String url) {
return new ClientManager(getAppContext(), getAccountType(), getLoginOptions(jwt, url), true);
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
public void removeAllCookies() {
/*
* TODO: Remove this conditional once 'minApi >= 21'.
*/
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
CookieManager.getInstance().removeAllCookies(null);
} else {
CookieSyncManager.createInstance(context);
CookieManager.getInstance().removeAllCookie();
}
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
public void removeSessionCookies() {
/*
* TODO: Remove this conditional once 'minApi >= 21'.
*/
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
CookieManager.getInstance().removeSessionCookies(null);
} else {
CookieSyncManager.createInstance(context);
CookieManager.getInstance().removeSessionCookie();
}
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
public void syncCookies() {
/*
* TODO: Remove this conditional once 'minApi >= 21'.
*/
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
CookieManager.getInstance().flush();
} else {
CookieSyncManager.createInstance(context);
CookieSyncManager.getInstance().sync();
}
}
}
| 1 | 15,491 | so this is to fix a scenario where the app is first launched normally, then background and foreground through the link, we are not updating loginOption | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -322,6 +322,14 @@ class Application extends BaseApplication {
});
}
+ checkLayout(layout) {
+ if (layout === 'viewer') {
+ this.focusElement_('noteTextViewer');
+ } else {
+ this.focusElement_('noteBody');
+ }
+ }
+
async updateMenu(screen) {
if (this.lastMenuScreen_ === screen) return;
| 1 | require('app-module-path').addPath(__dirname);
const { BaseApplication } = require('lib/BaseApplication');
const { FoldersScreenUtils } = require('lib/folders-screen-utils.js');
const Setting = require('lib/models/Setting.js');
const { shim } = require('lib/shim.js');
const MasterKey = require('lib/models/MasterKey');
const Note = require('lib/models/Note');
const { MarkupToHtml } = require('lib/joplin-renderer');
const { _, setLocale } = require('lib/locale.js');
const { Logger } = require('lib/logger.js');
const fs = require('fs-extra');
const Tag = require('lib/models/Tag.js');
const { reg } = require('lib/registry.js');
const { defaultState } = require('lib/reducer.js');
const packageInfo = require('./packageInfo.js');
const AlarmService = require('lib/services/AlarmService.js');
const AlarmServiceDriverNode = require('lib/services/AlarmServiceDriverNode');
const DecryptionWorker = require('lib/services/DecryptionWorker');
const InteropService = require('lib/services/InteropService');
const InteropServiceHelper = require('./InteropServiceHelper.js');
const ResourceService = require('lib/services/ResourceService');
const ClipperServer = require('lib/ClipperServer');
const ExternalEditWatcher = require('lib/services/ExternalEditWatcher');
const { bridge } = require('electron').remote.require('./bridge');
const { shell, webFrame, clipboard } = require('electron');
const Menu = bridge().Menu;
const PluginManager = require('lib/services/PluginManager');
const RevisionService = require('lib/services/RevisionService');
const MigrationService = require('lib/services/MigrationService');
const TemplateUtils = require('lib/TemplateUtils');
const CssUtils = require('lib/CssUtils');
const pluginClasses = [
require('./plugins/GotoAnything.min'),
];
const appDefaultState = Object.assign({}, defaultState, {
route: {
type: 'NAV_GO',
routeName: 'Main',
props: {},
},
navHistory: [],
fileToImport: null,
windowCommand: null,
noteVisiblePanes: ['editor', 'viewer'],
sidebarVisibility: true,
noteListVisibility: true,
windowContentSize: bridge().windowContentSize(),
watchedNoteFiles: [],
lastEditorScrollPercents: {},
devToolsVisible: false,
});
class Application extends BaseApplication {
constructor() {
super();
this.lastMenuScreen_ = null;
}
hasGui() {
return true;
}
checkForUpdateLoggerPath() {
return `${Setting.value('profileDir')}/log-autoupdater.txt`;
}
reducer(state = appDefaultState, action) {
let newState = state;
try {
switch (action.type) {
case 'NAV_BACK':
case 'NAV_GO':
{
const goingBack = action.type === 'NAV_BACK';
if (goingBack && !state.navHistory.length) break;
const currentRoute = state.route;
newState = Object.assign({}, state);
const newNavHistory = state.navHistory.slice();
if (goingBack) {
let newAction = null;
while (newNavHistory.length) {
newAction = newNavHistory.pop();
if (newAction.routeName !== state.route.routeName) break;
}
if (!newAction) break;
action = newAction;
}
if (!goingBack) newNavHistory.push(currentRoute);
newState.navHistory = newNavHistory;
newState.route = action;
}
break;
case 'WINDOW_CONTENT_SIZE_SET':
newState = Object.assign({}, state);
newState.windowContentSize = action.size;
break;
case 'WINDOW_COMMAND':
{
newState = Object.assign({}, state);
const command = Object.assign({}, action);
delete command.type;
newState.windowCommand = command.name ? command : null;
}
break;
case 'NOTE_VISIBLE_PANES_TOGGLE':
{
const getNextLayout = (currentLayout) => {
currentLayout = panes.length === 2 ? 'both' : currentLayout[0];
let paneOptions;
if (state.settings.layoutButtonSequence === Setting.LAYOUT_EDITOR_VIEWER) {
paneOptions = ['editor', 'viewer'];
} else if (state.settings.layoutButtonSequence === Setting.LAYOUT_EDITOR_SPLIT) {
paneOptions = ['editor', 'both'];
} else if (state.settings.layoutButtonSequence === Setting.LAYOUT_VIEWER_SPLIT) {
paneOptions = ['viewer', 'both'];
} else if (state.settings.layoutButtonSequence === Setting.LAYOUT_SPLIT_WYSIWYG) {
paneOptions = ['both', 'wysiwyg'];
} else {
paneOptions = ['editor', 'viewer', 'both'];
}
const currentLayoutIndex = paneOptions.indexOf(currentLayout);
const nextLayoutIndex = currentLayoutIndex === paneOptions.length - 1 ? 0 : currentLayoutIndex + 1;
const nextLayout = paneOptions[nextLayoutIndex];
return nextLayout === 'both' ? ['editor', 'viewer'] : [nextLayout];
};
newState = Object.assign({}, state);
const panes = state.noteVisiblePanes.slice();
newState.noteVisiblePanes = getNextLayout(panes);
}
break;
case 'NOTE_VISIBLE_PANES_SET':
newState = Object.assign({}, state);
newState.noteVisiblePanes = action.panes;
break;
case 'SIDEBAR_VISIBILITY_TOGGLE':
newState = Object.assign({}, state);
newState.sidebarVisibility = !state.sidebarVisibility;
break;
case 'SIDEBAR_VISIBILITY_SET':
newState = Object.assign({}, state);
newState.sidebarVisibility = action.visibility;
break;
case 'NOTELIST_VISIBILITY_TOGGLE':
newState = Object.assign({}, state);
newState.noteListVisibility = !state.noteListVisibility;
break;
case 'NOTELIST_VISIBILITY_SET':
newState = Object.assign({}, state);
newState.noteListVisibility = action.visibility;
break;
case 'NOTE_FILE_WATCHER_ADD':
if (newState.watchedNoteFiles.indexOf(action.id) < 0) {
newState = Object.assign({}, state);
const watchedNoteFiles = newState.watchedNoteFiles.slice();
watchedNoteFiles.push(action.id);
newState.watchedNoteFiles = watchedNoteFiles;
}
break;
case 'NOTE_FILE_WATCHER_REMOVE':
{
newState = Object.assign({}, state);
const idx = newState.watchedNoteFiles.indexOf(action.id);
if (idx >= 0) {
const watchedNoteFiles = newState.watchedNoteFiles.slice();
watchedNoteFiles.splice(idx, 1);
newState.watchedNoteFiles = watchedNoteFiles;
}
}
break;
case 'NOTE_FILE_WATCHER_CLEAR':
if (state.watchedNoteFiles.length) {
newState = Object.assign({}, state);
newState.watchedNoteFiles = [];
}
break;
case 'EDITOR_SCROLL_PERCENT_SET':
{
newState = Object.assign({}, state);
const newPercents = Object.assign({}, newState.lastEditorScrollPercents);
newPercents[action.noteId] = action.percent;
newState.lastEditorScrollPercents = newPercents;
}
break;
case 'NOTE_DEVTOOLS_TOGGLE':
newState = Object.assign({}, state);
newState.devToolsVisible = !newState.devToolsVisible;
break;
case 'NOTE_DEVTOOLS_SET':
newState = Object.assign({}, state);
newState.devToolsVisible = action.value;
break;
}
} catch (error) {
error.message = `In reducer: ${error.message} Action: ${JSON.stringify(action)}`;
throw error;
}
return super.reducer(newState, action);
}
toggleDevTools(visible) {
if (visible) {
bridge().openDevTools();
} else {
bridge().closeDevTools();
}
}
async generalMiddleware(store, next, action) {
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'locale' || action.type == 'SETTING_UPDATE_ALL') {
setLocale(Setting.value('locale'));
// The bridge runs within the main process, with its own instance of locale.js
// so it needs to be set too here.
bridge().setLocale(Setting.value('locale'));
await this.refreshMenu();
}
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'showTrayIcon' || action.type == 'SETTING_UPDATE_ALL') {
this.updateTray();
}
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'style.editor.fontFamily' || action.type == 'SETTING_UPDATE_ALL') {
this.updateEditorFont();
}
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'windowContentZoomFactor' || action.type == 'SETTING_UPDATE_ALL') {
webFrame.setZoomFactor(Setting.value('windowContentZoomFactor') / 100);
}
if (['EVENT_NOTE_ALARM_FIELD_CHANGE', 'NOTE_DELETE'].indexOf(action.type) >= 0) {
await AlarmService.updateNoteNotification(action.id, action.type === 'NOTE_DELETE');
}
const result = await super.generalMiddleware(store, next, action);
const newState = store.getState();
if (action.type === 'NAV_GO' || action.type === 'NAV_BACK') {
app().updateMenu(newState.route.routeName);
}
if (['NOTE_VISIBLE_PANES_TOGGLE', 'NOTE_VISIBLE_PANES_SET'].indexOf(action.type) >= 0) {
Setting.setValue('noteVisiblePanes', newState.noteVisiblePanes);
const layout = newState.noteVisiblePanes[0];
this.updateMenuItemStates(layout);
}
if (['SIDEBAR_VISIBILITY_TOGGLE', 'SIDEBAR_VISIBILITY_SET'].indexOf(action.type) >= 0) {
Setting.setValue('sidebarVisibility', newState.sidebarVisibility);
}
if (['NOTELIST_VISIBILITY_TOGGLE', 'NOTELIST_VISIBILITY_SET'].indexOf(action.type) >= 0) {
Setting.setValue('noteListVisibility', newState.noteListVisibility);
}
if (action.type.indexOf('NOTE_SELECT') === 0 || action.type.indexOf('FOLDER_SELECT') === 0) {
const layout = newState.noteVisiblePanes[0];
this.updateMenuItemStates(layout, newState);
}
if (['NOTE_DEVTOOLS_TOGGLE', 'NOTE_DEVTOOLS_SET'].indexOf(action.type) >= 0) {
this.toggleDevTools(newState.devToolsVisible);
this.updateMenuItemStates(newState);
}
return result;
}
async refreshMenu() {
const screen = this.lastMenuScreen_;
this.lastMenuScreen_ = null;
await this.updateMenu(screen);
}
focusElement_(target) {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'focusElement',
target: target,
});
}
async updateMenu(screen) {
if (this.lastMenuScreen_ === screen) return;
const sortNoteFolderItems = (type) => {
const sortItems = [];
const sortOptions = Setting.enumOptions(`${type}.sortOrder.field`);
for (const field in sortOptions) {
if (!sortOptions.hasOwnProperty(field)) continue;
sortItems.push({
label: sortOptions[field],
screens: ['Main'],
type: 'checkbox',
checked: Setting.value(`${type}.sortOrder.field`) === field,
click: () => {
Setting.setValue(`${type}.sortOrder.field`, field);
this.refreshMenu();
},
});
}
sortItems.push({ type: 'separator' });
sortItems.push({
label: Setting.settingMetadata(`${type}.sortOrder.reverse`).label(),
type: 'checkbox',
checked: Setting.value(`${type}.sortOrder.reverse`),
screens: ['Main'],
click: () => {
Setting.setValue(`${type}.sortOrder.reverse`, !Setting.value(`${type}.sortOrder.reverse`));
},
});
return sortItems;
};
const sortNoteItems = sortNoteFolderItems('notes');
const sortFolderItems = sortNoteFolderItems('folders');
const focusItems = [];
focusItems.push({
label: _('Sidebar'),
click: () => { this.focusElement_('sideBar'); },
accelerator: 'CommandOrControl+Shift+S',
});
focusItems.push({
label: _('Note list'),
click: () => { this.focusElement_('noteList'); },
accelerator: 'CommandOrControl+Shift+L',
});
focusItems.push({
label: _('Note title'),
click: () => { this.focusElement_('noteTitle'); },
accelerator: 'CommandOrControl+Shift+N',
});
focusItems.push({
label: _('Note body'),
click: () => { this.focusElement_('noteBody'); },
accelerator: 'CommandOrControl+Shift+B',
});
let toolsItems = [];
const importItems = [];
const exportItems = [];
const toolsItemsFirst = [];
const templateItems = [];
const ioService = new InteropService();
const ioModules = ioService.modules();
for (let i = 0; i < ioModules.length; i++) {
const module = ioModules[i];
if (module.type === 'exporter') {
if (module.canDoMultiExport !== false) {
exportItems.push({
label: module.fullLabel(),
screens: ['Main'],
click: async () => {
await InteropServiceHelper.export(this.dispatch.bind(this), module);
},
});
}
} else {
for (let j = 0; j < module.sources.length; j++) {
const moduleSource = module.sources[j];
importItems.push({
label: module.fullLabel(moduleSource),
screens: ['Main'],
click: async () => {
let path = null;
const selectedFolderId = this.store().getState().selectedFolderId;
if (moduleSource === 'file') {
path = bridge().showOpenDialog({
filters: [{ name: module.description, extensions: module.fileExtensions }],
});
} else {
path = bridge().showOpenDialog({
properties: ['openDirectory', 'createDirectory'],
});
}
if (!path || (Array.isArray(path) && !path.length)) return;
if (Array.isArray(path)) path = path[0];
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'showModalMessage',
message: _('Importing from "%s" as "%s" format. Please wait...', path, module.format),
});
const importOptions = {
path,
format: module.format,
modulePath: module.path,
onError: console.warn,
destinationFolderId:
!module.isNoteArchive && moduleSource === 'file'
? selectedFolderId
: null,
};
const service = new InteropService();
try {
const result = await service.import(importOptions);
console.info('Import result: ', result);
} catch (error) {
bridge().showErrorMessageBox(error.message);
}
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'hideModalMessage',
});
},
});
}
}
}
exportItems.push({
label: `PDF - ${_('PDF File')}`,
screens: ['Main'],
click: async () => {
const selectedNoteIds = this.store().getState().selectedNoteIds;
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'exportPdf',
noteIds: selectedNoteIds,
});
},
});
// We need a dummy entry, otherwise the ternary operator to show a
// menu item only on a specific OS does not work.
const noItem = {
type: 'separator',
visible: false,
};
const syncStatusItem = {
label: _('Synchronisation Status'),
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Status',
});
},
};
const newNoteItem = {
label: _('New note'),
accelerator: 'CommandOrControl+N',
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newNote',
});
},
};
const newTodoItem = {
label: _('New to-do'),
accelerator: 'CommandOrControl+T',
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newTodo',
});
},
};
const newNotebookItem = {
label: _('New notebook'),
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newNotebook',
});
},
};
const newSubNotebookItem = {
label: _('New sub-notebook'),
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newSubNotebook',
activeFolderId: Setting.value('activeFolderId'),
});
},
};
const printItem = {
label: _('Print'),
accelerator: 'CommandOrControl+P',
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'print',
});
},
};
toolsItemsFirst.push(syncStatusItem, {
type: 'separator',
screens: ['Main'],
});
const templateDirExists = await shim.fsDriver().exists(Setting.value('templateDir'));
templateItems.push({
label: _('Create note from template'),
visible: templateDirExists,
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'selectTemplate',
noteType: 'note',
});
},
}, {
label: _('Create to-do from template'),
visible: templateDirExists,
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'selectTemplate',
noteType: 'todo',
});
},
}, {
label: _('Insert template'),
visible: templateDirExists,
accelerator: 'CommandOrControl+Alt+I',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'selectTemplate',
});
},
}, {
label: _('Open template directory'),
click: () => {
const templateDir = Setting.value('templateDir');
if (!templateDirExists) shim.fsDriver().mkdir(templateDir);
shell.openItem(templateDir);
},
}, {
label: _('Refresh templates'),
click: async () => {
const templates = await TemplateUtils.loadTemplates(Setting.value('templateDir'));
this.store().dispatch({
type: 'TEMPLATE_UPDATE_ALL',
templates: templates,
});
},
});
// we need this workaround, because on macOS the menu is different
const toolsItemsWindowsLinux = toolsItemsFirst.concat([{
label: _('Options'),
visible: !shim.isMac(),
accelerator: 'CommandOrControl+,',
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Config',
});
},
}]);
// the following menu items will be available for all OS under Tools
const toolsItemsAll = [{
label: _('Resources'),
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Resources',
});
},
}];
if (!shim.isMac()) {
toolsItems = toolsItems.concat(toolsItemsWindowsLinux);
}
toolsItems = toolsItems.concat(toolsItemsAll);
function _checkForUpdates(ctx) {
bridge().checkForUpdates(false, bridge().window(), ctx.checkForUpdateLoggerPath(), { includePreReleases: Setting.value('autoUpdate.includePreReleases') });
}
function _showAbout() {
const p = packageInfo;
let gitInfo = '';
if ('git' in p) {
gitInfo = _('Revision: %s (%s)', p.git.hash, p.git.branch);
}
const copyrightText = 'Copyright © 2016-YYYY Laurent Cozic';
const message = [
p.description,
'',
copyrightText.replace('YYYY', new Date().getFullYear()),
_('%s %s (%s, %s)', p.name, p.version, Setting.value('env'), process.platform),
'',
_('Client ID: %s', Setting.value('clientId')),
_('Sync Version: %s', Setting.value('syncVersion')),
_('Profile Version: %s', reg.db().version()),
];
if (gitInfo) {
message.push(`\n${gitInfo}`);
console.info(gitInfo);
}
const text = message.join('\n');
const copyToClipboard = bridge().showMessageBox(text, {
icon: `${bridge().electronApp().buildDir()}/icons/128x128.png`,
buttons: [_('Copy'), _('OK')],
cancelId: 1,
defaultId: 1,
});
if (copyToClipboard === 0) {
clipboard.writeText(message.splice(3).join('\n'));
}
}
const rootMenuFile = {
// Using a dummy entry for macOS here, because first menu
// becomes 'Joplin' and we need a nenu called 'File' later.
label: shim.isMac() ? '&JoplinMainMenu' : _('&File'),
// `&` before one of the char in the label name mean, that
// <Alt + F> will open this menu. It's needed becase electron
// opens the first menu on Alt press if no hotkey assigned.
// Issue: https://github.com/laurent22/joplin/issues/934
submenu: [{
label: _('About Joplin'),
visible: shim.isMac() ? true : false,
click: () => _showAbout(),
}, {
type: 'separator',
visible: shim.isMac() ? true : false,
}, {
label: _('Preferences...'),
visible: shim.isMac() ? true : false,
accelerator: 'CommandOrControl+,',
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Config',
});
},
}, {
label: _('Check for updates...'),
visible: shim.isMac() ? true : false,
click: () => _checkForUpdates(this),
}, {
type: 'separator',
visible: shim.isMac() ? true : false,
},
shim.isMac() ? noItem : newNoteItem,
shim.isMac() ? noItem : newTodoItem,
shim.isMac() ? noItem : newNotebookItem,
shim.isMac() ? noItem : newSubNotebookItem, {
type: 'separator',
visible: shim.isMac() ? false : true,
}, {
label: _('Templates'),
visible: shim.isMac() ? false : true,
submenu: templateItems,
}, {
type: 'separator',
visible: shim.isMac() ? false : true,
}, {
label: _('Import'),
visible: shim.isMac() ? false : true,
submenu: importItems,
}, {
label: _('Export'),
visible: shim.isMac() ? false : true,
submenu: exportItems,
}, {
type: 'separator',
}, {
label: _('Synchronise'),
accelerator: 'CommandOrControl+S',
screens: ['Main'],
click: async () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'synchronize',
});
},
}, shim.isMac() ? syncStatusItem : noItem, {
type: 'separator',
}, shim.isMac() ? noItem : printItem, {
type: 'separator',
platforms: ['darwin'],
}, {
label: _('Hide %s', 'Joplin'),
platforms: ['darwin'],
accelerator: 'CommandOrControl+H',
click: () => { bridge().electronApp().hide(); },
}, {
type: 'separator',
}, {
label: _('Quit'),
accelerator: 'CommandOrControl+Q',
click: () => { bridge().electronApp().quit(); },
}],
};
const rootMenuFileMacOs = {
label: _('&File'),
visible: shim.isMac() ? true : false,
submenu: [
newNoteItem,
newTodoItem,
newNotebookItem,
newSubNotebookItem, {
label: _('Close Window'),
platforms: ['darwin'],
accelerator: 'Command+W',
selector: 'performClose:',
}, {
type: 'separator',
}, {
label: _('Templates'),
submenu: templateItems,
}, {
type: 'separator',
}, {
label: _('Import'),
submenu: importItems,
}, {
label: _('Export'),
submenu: exportItems,
}, {
type: 'separator',
},
printItem,
],
};
const layoutButtonSequenceOptions = Object.entries(Setting.enumOptions('layoutButtonSequence')).map(([layoutKey, layout]) => ({
label: layout,
screens: ['Main'],
type: 'checkbox',
checked: Setting.value('layoutButtonSequence') == layoutKey,
click: () => {
Setting.setValue('layoutButtonSequence', layoutKey);
this.refreshMenu();
},
}));
const rootMenus = {
edit: {
id: 'edit',
label: _('&Edit'),
submenu: [{
id: 'edit:copy',
label: _('Copy'),
role: 'copy',
accelerator: 'CommandOrControl+C',
}, {
id: 'edit:cut',
label: _('Cut'),
role: 'cut',
accelerator: 'CommandOrControl+X',
}, {
id: 'edit:paste',
label: _('Paste'),
role: 'paste',
accelerator: 'CommandOrControl+V',
}, {
id: 'edit:selectAll',
label: _('Select all'),
role: 'selectall',
accelerator: 'CommandOrControl+A',
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:bold',
label: _('Bold'),
screens: ['Main'],
accelerator: 'CommandOrControl+B',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textBold',
});
},
}, {
id: 'edit:italic',
label: _('Italic'),
screens: ['Main'],
accelerator: 'CommandOrControl+I',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textItalic',
});
},
}, {
id: 'edit:link',
label: _('Link'),
screens: ['Main'],
accelerator: 'CommandOrControl+K',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textLink',
});
},
}, {
id: 'edit:code',
label: _('Code'),
screens: ['Main'],
accelerator: 'CommandOrControl+`',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textCode',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:insertDateTime',
label: _('Insert Date Time'),
screens: ['Main'],
accelerator: 'CommandOrControl+Shift+T',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'insertDateTime',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:commandStartExternalEditing',
label: _('Edit in external editor'),
screens: ['Main'],
accelerator: 'CommandOrControl+E',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'commandStartExternalEditing',
});
},
}, {
id: 'edit:setTags',
label: _('Tags'),
screens: ['Main'],
accelerator: 'CommandOrControl+Alt+T',
click: () => {
const selectedNoteIds = this.store().getState().selectedNoteIds;
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'setTags',
noteIds: selectedNoteIds,
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:focusSearch',
label: _('Search in all the notes'),
screens: ['Main'],
accelerator: shim.isMac() ? 'Shift+Command+F' : 'F6',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'focusSearch',
});
},
}, {
id: 'edit:showLocalSearch',
label: _('Search in current note'),
screens: ['Main'],
accelerator: 'CommandOrControl+F',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'showLocalSearch',
});
},
}],
},
view: {
label: _('&View'),
submenu: [{
label: _('Toggle sidebar'),
screens: ['Main'],
accelerator: shim.isMac() ? 'Option+Command+S' : 'F10',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'toggleSidebar',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
label: _('Layout button sequence'),
screens: ['Main'],
submenu: layoutButtonSequenceOptions,
}, {
label: _('Toggle note list'),
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'toggleNoteList',
});
},
}, {
label: _('Toggle editor layout'),
screens: ['Main'],
accelerator: 'CommandOrControl+L',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'toggleVisiblePanes',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
label: Setting.settingMetadata('notes.sortOrder.field').label(),
screens: ['Main'],
submenu: sortNoteItems,
}, {
label: Setting.settingMetadata('folders.sortOrder.field').label(),
screens: ['Main'],
submenu: sortFolderItems,
}, {
label: Setting.settingMetadata('showNoteCounts').label(),
type: 'checkbox',
checked: Setting.value('showNoteCounts'),
screens: ['Main'],
click: () => {
Setting.setValue('showNoteCounts', !Setting.value('showNoteCounts'));
},
}, {
label: Setting.settingMetadata('uncompletedTodosOnTop').label(),
type: 'checkbox',
checked: Setting.value('uncompletedTodosOnTop'),
screens: ['Main'],
click: () => {
Setting.setValue('uncompletedTodosOnTop', !Setting.value('uncompletedTodosOnTop'));
},
}, {
label: Setting.settingMetadata('showCompletedTodos').label(),
type: 'checkbox',
checked: Setting.value('showCompletedTodos'),
screens: ['Main'],
click: () => {
Setting.setValue('showCompletedTodos', !Setting.value('showCompletedTodos'));
},
}, {
type: 'separator',
screens: ['Main'],
}, {
label: _('Focus'),
screens: ['Main'],
submenu: focusItems,
}, {
type: 'separator',
screens: ['Main'],
}, {
label: _('Actual Size'),
click: () => {
Setting.setValue('windowContentZoomFactor', 100);
},
accelerator: 'CommandOrControl+0',
}, {
label: _('Zoom In'),
click: () => {
Setting.incValue('windowContentZoomFactor', 10);
},
accelerator: 'CommandOrControl+=',
}, {
label: _('Zoom Out'),
click: () => {
Setting.incValue('windowContentZoomFactor', -10);
},
accelerator: 'CommandOrControl+-',
}],
},
tools: {
label: _('&Tools'),
submenu: toolsItems,
},
help: {
label: _('&Help'),
submenu: [{
label: _('Website and documentation'),
accelerator: 'F1',
click() { bridge().openExternal('https://joplinapp.org'); },
}, {
label: _('Joplin Forum'),
click() { bridge().openExternal('https://discourse.joplinapp.org'); },
}, {
label: _('Make a donation'),
click() { bridge().openExternal('https://joplinapp.org/donate/'); },
}, {
label: _('Check for updates...'),
visible: shim.isMac() ? false : true,
click: () => _checkForUpdates(this),
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'help:toggleDevTools',
type: 'checkbox',
label: _('Toggle development tools'),
visible: true,
click: () => {
this.dispatch({
type: 'NOTE_DEVTOOLS_TOGGLE',
});
},
}, {
type: 'separator',
visible: shim.isMac() ? false : true,
screens: ['Main'],
}, {
label: _('About Joplin'),
visible: shim.isMac() ? false : true,
click: () => _showAbout(),
}],
},
};
if (shim.isMac()) {
rootMenus.macOsApp = rootMenuFile;
rootMenus.file = rootMenuFileMacOs;
} else {
rootMenus.file = rootMenuFile;
}
// It seems the "visible" property of separators is ignored by Electron, making
// it display separators that we want hidden. So this function iterates through
// them and remove them completely.
const cleanUpSeparators = items => {
const output = [];
for (const item of items) {
if ('visible' in item && item.type === 'separator' && !item.visible) continue;
output.push(item);
}
return output;
};
for (const key in rootMenus) {
if (!rootMenus.hasOwnProperty(key)) continue;
if (!rootMenus[key].submenu) continue;
rootMenus[key].submenu = cleanUpSeparators(rootMenus[key].submenu);
}
const pluginMenuItems = PluginManager.instance().menuItems();
for (const item of pluginMenuItems) {
const itemParent = rootMenus[item.parent] ? rootMenus[item.parent] : 'tools';
itemParent.submenu.push(item);
}
const template = [
rootMenus.file,
rootMenus.edit,
rootMenus.view,
rootMenus.tools,
rootMenus.help,
];
if (shim.isMac()) template.splice(0, 0, rootMenus.macOsApp);
function isEmptyMenu(template) {
for (let i = 0; i < template.length; i++) {
const t = template[i];
if (t.type !== 'separator') return false;
}
return true;
}
function removeUnwantedItems(template, screen) {
const platform = shim.platformName();
let output = [];
for (let i = 0; i < template.length; i++) {
const t = Object.assign({}, template[i]);
if (t.screens && t.screens.indexOf(screen) < 0) continue;
if (t.platforms && t.platforms.indexOf(platform) < 0) continue;
if (t.submenu) t.submenu = removeUnwantedItems(t.submenu, screen);
if (('submenu' in t) && isEmptyMenu(t.submenu)) continue;
output.push(t);
}
// Remove empty separator for now empty sections
const temp = [];
let previous = null;
for (let i = 0; i < output.length; i++) {
const t = Object.assign({}, output[i]);
if (t.type === 'separator') {
if (!previous) continue;
if (previous.type === 'separator') continue;
}
temp.push(t);
previous = t;
}
output = temp;
return output;
}
const screenTemplate = removeUnwantedItems(template, screen);
const menu = Menu.buildFromTemplate(screenTemplate);
Menu.setApplicationMenu(menu);
this.lastMenuScreen_ = screen;
}
async updateMenuItemStates(layout, state = null) {
if (!this.lastMenuScreen_) return;
if (!this.store() && !state) return;
if (!state) state = this.store().getState();
const selectedNoteIds = state.selectedNoteIds;
const note = selectedNoteIds.length === 1 ? await Note.load(selectedNoteIds[0]) : null;
for (const itemId of ['copy', 'paste', 'cut', 'selectAll', 'bold', 'italic', 'link', 'code', 'insertDateTime', 'commandStartExternalEditing', 'showLocalSearch']) {
const menuItem = Menu.getApplicationMenu().getMenuItemById(`edit:${itemId}`);
if (!menuItem) continue;
const isHtmlNote = !!note && note.markup_language === MarkupToHtml.MARKUP_LANGUAGE_HTML;
menuItem.enabled = !isHtmlNote && layout !== 'viewer' && !!note;
}
const menuItem = Menu.getApplicationMenu().getMenuItemById('help:toggleDevTools');
menuItem.checked = state.devToolsVisible;
}
updateTray() {
const app = bridge().electronApp();
if (app.trayShown() === Setting.value('showTrayIcon')) return;
if (!Setting.value('showTrayIcon')) {
app.destroyTray();
} else {
const contextMenu = Menu.buildFromTemplate([
{ label: _('Open %s', app.electronApp().name), click: () => { app.window().show(); } },
{ type: 'separator' },
{ label: _('Exit'), click: () => { app.quit(); } },
]);
app.createTray(contextMenu);
}
}
updateEditorFont() {
const fontFamilies = [];
if (Setting.value('style.editor.fontFamily')) fontFamilies.push(`"${Setting.value('style.editor.fontFamily')}"`);
fontFamilies.push('monospace');
// The '*' and '!important' parts are necessary to make sure Russian text is displayed properly
// https://github.com/laurent22/joplin/issues/155
const css = `.ace_editor * { font-family: ${fontFamilies.join(', ')} !important; }`;
const styleTag = document.createElement('style');
styleTag.type = 'text/css';
styleTag.appendChild(document.createTextNode(css));
document.head.appendChild(styleTag);
}
async loadCustomCss(filePath) {
let cssString = '';
if (await fs.pathExists(filePath)) {
try {
cssString = await fs.readFile(filePath, 'utf-8');
} catch (error) {
let msg = error.message ? error.message : '';
msg = `Could not load custom css from ${filePath}\n${msg}`;
error.message = msg;
throw error;
}
}
return cssString;
}
// async createManyNotes() {
// return;
// const folderIds = [];
// const randomFolderId = (folderIds) => {
// if (!folderIds.length) return '';
// const idx = Math.floor(Math.random() * folderIds.length);
// if (idx > folderIds.length - 1) throw new Error('Invalid index ' + idx + ' / ' + folderIds.length);
// return folderIds[idx];
// }
// let rootFolderCount = 0;
// let folderCount = 100;
// for (let i = 0; i < folderCount; i++) {
// let parentId = '';
// if (Math.random() >= 0.9 || rootFolderCount >= folderCount / 10) {
// parentId = randomFolderId(folderIds);
// } else {
// rootFolderCount++;
// }
// const folder = await Folder.save({ title: 'folder' + i, parent_id: parentId });
// folderIds.push(folder.id);
// }
// for (let i = 0; i < 10000; i++) {
// const parentId = randomFolderId(folderIds);
// Note.save({ title: 'note' + i, parent_id: parentId });
// }
// }
async start(argv) {
const electronIsDev = require('electron-is-dev');
// If running inside a package, the command line, instead of being "node.exe <path> <flags>" is "joplin.exe <flags>" so
// insert an extra argument so that they can be processed in a consistent way everywhere.
if (!electronIsDev) argv.splice(1, 0, '.');
argv = await super.start(argv);
// Loads app-wide styles. (Markdown preview-specific styles loaded in app.js)
const dir = Setting.value('profileDir');
const filename = Setting.custom_css_files.JOPLIN_APP;
await CssUtils.injectCustomStyles(`${dir}/${filename}`);
AlarmService.setDriver(new AlarmServiceDriverNode({ appName: packageInfo.build.appId }));
AlarmService.setLogger(reg.logger());
reg.setShowErrorMessageBoxHandler((message) => { bridge().showErrorMessageBox(message); });
if (Setting.value('flagOpenDevTools')) {
bridge().openDevTools();
}
PluginManager.instance().dispatch_ = this.dispatch.bind(this);
PluginManager.instance().setLogger(reg.logger());
PluginManager.instance().register(pluginClasses);
this.updateMenu('Main');
this.initRedux();
// Since the settings need to be loaded before the store is created, it will never
// receive the SETTING_UPDATE_ALL even, which mean state.settings will not be
// initialised. So we manually call dispatchUpdateAll() to force an update.
Setting.dispatchUpdateAll();
await FoldersScreenUtils.refreshFolders();
const tags = await Tag.allWithNotes();
this.dispatch({
type: 'TAG_UPDATE_ALL',
items: tags,
});
const masterKeys = await MasterKey.all();
this.dispatch({
type: 'MASTERKEY_UPDATE_ALL',
items: masterKeys,
});
this.store().dispatch({
type: 'FOLDER_SELECT',
id: Setting.value('activeFolderId'),
});
this.store().dispatch({
type: 'FOLDER_SET_COLLAPSED_ALL',
ids: Setting.value('collapsedFolderIds'),
});
// Loads custom Markdown preview styles
const cssString = await CssUtils.loadCustomCss(`${Setting.value('profileDir')}/userstyle.css`);
this.store().dispatch({
type: 'LOAD_CUSTOM_CSS',
css: cssString,
});
const templates = await TemplateUtils.loadTemplates(Setting.value('templateDir'));
this.store().dispatch({
type: 'TEMPLATE_UPDATE_ALL',
templates: templates,
});
this.store().dispatch({
type: 'NOTE_DEVTOOLS_SET',
value: Setting.value('flagOpenDevTools'),
});
// Note: Auto-update currently doesn't work in Linux: it downloads the update
// but then doesn't install it on exit.
if (shim.isWindows() || shim.isMac()) {
const runAutoUpdateCheck = () => {
if (Setting.value('autoUpdateEnabled')) {
bridge().checkForUpdates(true, bridge().window(), this.checkForUpdateLoggerPath(), { includePreReleases: Setting.value('autoUpdate.includePreReleases') });
}
};
// Initial check on startup
setTimeout(() => { runAutoUpdateCheck(); }, 5000);
// Then every x hours
setInterval(() => { runAutoUpdateCheck(); }, 12 * 60 * 60 * 1000);
}
this.updateTray();
setTimeout(() => {
AlarmService.garbageCollect();
}, 1000 * 60 * 60);
if (Setting.value('startMinimized') && Setting.value('showTrayIcon')) {
// Keep it hidden
} else {
bridge().window().show();
}
ResourceService.runInBackground();
if (Setting.value('env') === 'dev') {
AlarmService.updateAllNotifications();
} else {
reg.scheduleSync().then(() => {
// Wait for the first sync before updating the notifications, since synchronisation
// might change the notifications.
AlarmService.updateAllNotifications();
DecryptionWorker.instance().scheduleStart();
});
}
const clipperLogger = new Logger();
clipperLogger.addTarget('file', { path: `${Setting.value('profileDir')}/log-clipper.txt` });
clipperLogger.addTarget('console');
ClipperServer.instance().setLogger(clipperLogger);
ClipperServer.instance().setDispatch(this.store().dispatch);
if (Setting.value('clipperServer.autoStart')) {
ClipperServer.instance().start();
}
ExternalEditWatcher.instance().setLogger(reg.logger());
ExternalEditWatcher.instance().dispatch = this.store().dispatch;
RevisionService.instance().runInBackground();
this.updateMenuItemStates();
// Make it available to the console window - useful to call revisionService.collectRevisions()
window.revisionService = RevisionService.instance();
window.migrationService = MigrationService.instance();
window.decryptionWorker = DecryptionWorker.instance();
}
}
let application_ = null;
function app() {
if (!application_) application_ = new Application();
return application_;
}
module.exports = { app };
| 1 | 13,250 | It doesn't seem like the right way to implement this, because you add a new element that doesn't really exist (noteTextViewer). Instead you should modify the command handler `if (command.name === 'focusElement' && command.target === 'noteBody') {` in NoteText.jsx. Then focus either the editor or the viewer depending on what's currently visible. | laurent22-joplin | js |
@@ -21,7 +21,12 @@ package org.apache.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
+
+import java.util.HashSet;
+import java.util.LinkedList;
import java.util.List;
+import java.util.Set;
+
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import java.util.List;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
class SchemaToType extends AvroSchemaVisitor<Type> {
private final Schema root;
SchemaToType(Schema root) {
this.root = root;
if (root.getType() == Schema.Type.RECORD) {
this.nextId = root.getFields().size();
}
}
private int nextId = 1;
private int getElementId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.ELEMENT_ID_PROP) != null) {
return AvroSchemaUtil.getElementId(schema);
} else {
return allocateId();
}
}
private int getKeyId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.KEY_ID_PROP) != null) {
return AvroSchemaUtil.getKeyId(schema);
} else {
return allocateId();
}
}
private int getValueId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.VALUE_ID_PROP) != null) {
return AvroSchemaUtil.getValueId(schema);
} else {
return allocateId();
}
}
private int getId(Schema.Field field) {
if (field.getObjectProp(AvroSchemaUtil.FIELD_ID_PROP) != null) {
return AvroSchemaUtil.getFieldId(field);
} else {
return allocateId();
}
}
private int allocateId() {
int current = nextId;
nextId += 1;
return current;
}
@Override
public Type record(Schema record, List<String> names, List<Type> fieldTypes) {
List<Schema.Field> fields = record.getFields();
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fields.size());
if (root == record) {
this.nextId = 0;
}
for (int i = 0; i < fields.size(); i += 1) {
Schema.Field field = fields.get(i);
Type fieldType = fieldTypes.get(i);
int fieldId = getId(field);
if (AvroSchemaUtil.isOptionSchema(field.schema())) {
newFields.add(Types.NestedField.optional(fieldId, field.name(), fieldType));
} else {
newFields.add(Types.NestedField.required(fieldId, field.name(), fieldType));
}
}
return Types.StructType.of(newFields);
}
@Override
public Type union(Schema union, List<Type> options) {
Preconditions.checkArgument(AvroSchemaUtil.isOptionSchema(union),
"Unsupported type: non-option union: %s", union);
// records, arrays, and maps will check nullability later
if (options.get(0) == null) {
return options.get(1);
} else {
return options.get(0);
}
}
@Override
public Type array(Schema array, Type elementType) {
if (array.getLogicalType() instanceof LogicalMap) {
// map stored as an array
Schema keyValueSchema = array.getElementType();
Preconditions.checkArgument(AvroSchemaUtil.isKeyValueSchema(keyValueSchema),
"Invalid key-value pair schema: %s", keyValueSchema);
Types.StructType keyValueType = elementType.asStructType();
Types.NestedField keyField = keyValueType.field("key");
Types.NestedField valueField = keyValueType.field("value");
if (keyValueType.field("value").isOptional()) {
return Types.MapType.ofOptional(
keyField.fieldId(), valueField.fieldId(), keyField.type(), valueField.type());
} else {
return Types.MapType.ofRequired(
keyField.fieldId(), valueField.fieldId(), keyField.type(), valueField.type());
}
} else {
// normal array
Schema elementSchema = array.getElementType();
int id = getElementId(array);
if (AvroSchemaUtil.isOptionSchema(elementSchema)) {
return Types.ListType.ofOptional(id, elementType);
} else {
return Types.ListType.ofRequired(id, elementType);
}
}
}
@Override
public Type map(Schema map, Type valueType) {
Schema valueSchema = map.getValueType();
int keyId = getKeyId(map);
int valueId = getValueId(map);
if (AvroSchemaUtil.isOptionSchema(valueSchema)) {
return Types.MapType.ofOptional(keyId, valueId, Types.StringType.get(), valueType);
} else {
return Types.MapType.ofRequired(keyId, valueId, Types.StringType.get(), valueType);
}
}
@Override
public Type primitive(Schema primitive) {
// first check supported logical types
LogicalType logical = primitive.getLogicalType();
if (logical != null) {
String name = logical.getName();
if (logical instanceof LogicalTypes.Decimal) {
return Types.DecimalType.of(
((LogicalTypes.Decimal) logical).getPrecision(),
((LogicalTypes.Decimal) logical).getScale());
} else if (logical instanceof LogicalTypes.Date) {
return Types.DateType.get();
} else if (
logical instanceof LogicalTypes.TimeMillis ||
logical instanceof LogicalTypes.TimeMicros) {
return Types.TimeType.get();
} else if (
logical instanceof LogicalTypes.TimestampMillis ||
logical instanceof LogicalTypes.TimestampMicros) {
Object adjustToUTC = primitive.getObjectProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP);
Preconditions.checkArgument(adjustToUTC instanceof Boolean,
"Invalid value for adjust-to-utc: %s", adjustToUTC);
if ((Boolean) adjustToUTC) {
return Types.TimestampType.withZone();
} else {
return Types.TimestampType.withoutZone();
}
} else if (LogicalTypes.uuid().getName().equals(name)) {
return Types.UUIDType.get();
}
}
switch (primitive.getType()) {
case BOOLEAN:
return Types.BooleanType.get();
case INT:
return Types.IntegerType.get();
case LONG:
return Types.LongType.get();
case FLOAT:
return Types.FloatType.get();
case DOUBLE:
return Types.DoubleType.get();
case STRING:
case ENUM:
return Types.StringType.get();
case FIXED:
return Types.FixedType.ofLength(primitive.getFixedSize());
case BYTES:
return Types.BinaryType.get();
case NULL:
return null;
}
throw new UnsupportedOperationException(
"Unsupported primitive type: " + primitive);
}
}
| 1 | 20,047 | We don't add blank lines in imports. | apache-iceberg | java |
@@ -1,7 +1,7 @@
# -*- coding: UTF-8 -*-
#addonHandler.py
#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2012-2018 Rui Batista, NV Access Limited, Noelia Ruiz Martínez, Joseph Lee, Babbage B.V.
+#Copyright (C) 2012-2019 Rui Batista, NV Access Limited, Noelia Ruiz Martínez, Joseph Lee, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
| 1 | # -*- coding: UTF-8 -*-
#addonHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2012-2018 Rui Batista, NV Access Limited, Noelia Ruiz Martínez, Joseph Lee, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import sys
import os.path
import gettext
import tempfile
import inspect
import itertools
import collections
import pkgutil
import shutil
from six.moves import cStringIO as StringIO, cPickle
from six import string_types
import zipfile
from configobj import ConfigObj
from configobj.validate import Validator
import config
import globalVars
import languageHandler
from logHandler import log
import winKernel
import addonAPIVersion
from . import addonVersionCheck
from .addonVersionCheck import isAddonCompatible
MANIFEST_FILENAME = "manifest.ini"
stateFilename="addonsState.pickle"
BUNDLE_EXTENSION = "nvda-addon"
BUNDLE_MIMETYPE = "application/x-nvda-addon"
NVDA_ADDON_PROG_ID = "NVDA.Addon.1"
ADDON_PENDINGINSTALL_SUFFIX=".pendingInstall"
DELETEDIR_SUFFIX=".delete"
state={}
# addons that are blocked from running because they are incompatible
_blockedAddons=set()
def loadState():
global state
statePath=os.path.join(globalVars.appArgs.configPath,stateFilename)
try:
state = cPickle.load(file(statePath, "r"))
if "disabledAddons" not in state:
state["disabledAddons"] = set()
if "pendingDisableSet" not in state:
state["pendingDisableSet"] = set()
if "pendingEnableSet" not in state:
state["pendingEnableSet"] = set()
except:
# Defaults.
state = {
"pendingRemovesSet":set(),
"pendingInstallsSet":set(),
"disabledAddons":set(),
"pendingEnableSet":set(),
"pendingDisableSet":set(),
}
def saveState():
statePath=os.path.join(globalVars.appArgs.configPath,stateFilename)
try:
cPickle.dump(state, file(statePath, "wb"))
except:
log.debugWarning("Error saving state", exc_info=True)
def getRunningAddons():
""" Returns currently loaded addons.
"""
return getAvailableAddons(filterFunc=lambda addon: addon.isRunning)
def getIncompatibleAddons(
currentAPIVersion=addonAPIVersion.CURRENT,
backCompatToAPIVersion=addonAPIVersion.BACK_COMPAT_TO):
""" Returns a generator of the add-ons that are not compatible.
"""
return getAvailableAddons(
filterFunc=lambda addon: (
not addonVersionCheck.isAddonCompatible(
addon,
currentAPIVersion=currentAPIVersion,
backwardsCompatToVersion=backCompatToAPIVersion
)
))
def completePendingAddonRemoves():
"""Removes any addons that could not be removed on the last run of NVDA"""
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
pendingRemovesSet=state['pendingRemovesSet']
for addonName in list(pendingRemovesSet):
addonPath=os.path.join(user_addons,addonName)
if os.path.isdir(addonPath):
addon=Addon(addonPath)
try:
addon.completeRemove()
except RuntimeError:
log.exception("Failed to remove %s add-on"%addonName)
continue
pendingRemovesSet.discard(addonName)
def completePendingAddonInstalls():
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
pendingInstallsSet=state['pendingInstallsSet']
for addonName in pendingInstallsSet:
newPath=os.path.join(user_addons,addonName)
oldPath=newPath+ADDON_PENDINGINSTALL_SUFFIX
try:
os.rename(oldPath,newPath)
except:
log.error("Failed to complete addon installation for %s"%addonName,exc_info=True)
pendingInstallsSet.clear()
def removeFailedDeletions():
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
for p in os.listdir(user_addons):
if p.endswith(DELETEDIR_SUFFIX):
path=os.path.join(user_addons,p)
shutil.rmtree(path,ignore_errors=True)
if os.path.exists(path):
log.error("Failed to delete path %s, try removing manually"%path)
_disabledAddons = set()
def disableAddonsIfAny():
"""
Disables add-ons if told to do so by the user from add-ons manager.
This is usually executed before refreshing the list of available add-ons.
"""
global _disabledAddons
# Pull in and enable add-ons that should be disabled and enabled, respectively.
state["disabledAddons"] |= state["pendingDisableSet"]
state["disabledAddons"] -= state["pendingEnableSet"]
_disabledAddons = state["disabledAddons"]
state["pendingDisableSet"].clear()
state["pendingEnableSet"].clear()
def initialize():
""" Initializes the add-ons subsystem. """
if config.isAppX:
log.info("Add-ons not supported when running as a Windows Store application")
return
loadState()
removeFailedDeletions()
completePendingAddonRemoves()
completePendingAddonInstalls()
# #3090: Are there add-ons that are supposed to not run for this session?
disableAddonsIfAny()
getAvailableAddons(refresh=True)
saveState()
def terminate():
""" Terminates the add-ons subsystem. """
pass
def _getDefaultAddonPaths():
""" Returns paths where addons can be found.
For now, only <userConfig\addons is supported.
@rtype: list(string)
"""
addon_paths = []
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
if os.path.isdir(user_addons):
addon_paths.append(user_addons)
return addon_paths
def _getAvailableAddonsFromPath(path):
""" Gets available add-ons from path.
An addon is only considered available if the manifest file is loaded with no errors.
@param path: path from where to find addon directories.
@type path: string
@rtype generator of Addon instances
"""
log.debug("Listing add-ons from %s", path)
for p in os.listdir(path):
if p.endswith(DELETEDIR_SUFFIX): continue
addon_path = os.path.join(path, p)
if os.path.isdir(addon_path) and addon_path not in ('.', '..'):
if not len(os.listdir(addon_path)):
log.error("Error loading Addon from path: %s", addon_path)
else:
log.debug("Loading add-on from %s", addon_path)
try:
a = Addon(addon_path)
name = a.manifest['name']
log.debug(
"Found add-on {name} - {a.version}."
" Requires API: {a.minimumNVDAVersion}."
" Last-tested API: {a.lastTestedNVDAVersion}".format(
name=name,
a=a
))
if a.isDisabled:
log.debug("Disabling add-on %s", name)
if not isAddonCompatible(a):
log.debugWarning("Add-on %s is considered incompatible", name)
_blockedAddons.add(a.name)
yield a
except:
log.error("Error loading Addon from path: %s", addon_path, exc_info=True)
_availableAddons = collections.OrderedDict()
def getAvailableAddons(refresh=False, filterFunc=None):
""" Gets all available addons on the system.
@param refresh: Whether or not to query the file system for available add-ons.
@type refresh: bool
@param filterFunc: A function that allows filtering of add-ons.
It takes an L{Addon} as its only argument
and returns a C{bool} indicating whether the add-on matches the provided filter.
@type filterFunc: callable
@rtype generator of Addon instances.
"""
if filterFunc and not callable(filterFunc):
raise TypeError("The provided filterFunc is not callable")
if refresh:
_availableAddons.clear()
generators = [_getAvailableAddonsFromPath(path) for path in _getDefaultAddonPaths()]
for addon in itertools.chain(*generators):
_availableAddons[addon.path] = addon
return (addon for addon in _availableAddons.itervalues() if not filterFunc or filterFunc(addon))
def installAddonBundle(bundle):
"""Extracts an Addon bundle in to a unique subdirectory of the user addons directory, marking the addon as needing install completion on NVDA restart."""
addonPath = os.path.join(globalVars.appArgs.configPath, "addons",bundle.manifest['name']+ADDON_PENDINGINSTALL_SUFFIX)
bundle.extract(addonPath)
addon=Addon(addonPath)
# #2715: The add-on must be added to _availableAddons here so that
# translations can be used in installTasks module.
_availableAddons[addon.path]=addon
try:
addon.runInstallTask("onInstall")
except:
log.error("task 'onInstall' on addon '%s' failed"%addon.name,exc_info=True)
del _availableAddons[addon.path]
addon.completeRemove(runUninstallTask=False)
raise AddonError("Installation failed")
state['pendingInstallsSet'].add(bundle.manifest['name'])
saveState()
return addon
class AddonError(Exception):
""" Represents an exception coming from the addon subsystem. """
class AddonBase(object):
"""The base class for functionality that is available both for add-on bundles and add-ons on the file system.
Subclasses should at least implement L{manifest}.
"""
@property
def name(self):
return self.manifest['name']
@property
def version(self):
return self.manifest['version']
@property
def minimumNVDAVersion(self):
return self.manifest.get('minimumNVDAVersion')
@property
def lastTestedNVDAVersion(self):
return self.manifest.get('lastTestedNVDAVersion')
class Addon(AddonBase):
""" Represents an Add-on available on the file system."""
def __init__(self, path):
""" Constructs an L[Addon} from.
@param path: the base directory for the addon data.
@type path: string
"""
self.path = os.path.abspath(path)
self._extendedPackages = set()
manifest_path = os.path.join(path, MANIFEST_FILENAME)
with open(manifest_path) as f:
translatedInput = None
for translatedPath in _translatedManifestPaths():
p = os.path.join(self.path, translatedPath)
if os.path.exists(p):
log.debug("Using manifest translation from %s", p)
translatedInput = open(p, 'r')
break
self.manifest = AddonManifest(f, translatedInput)
@property
def isPendingInstall(self):
"""True if this addon has not yet been fully installed."""
return self.path.endswith(ADDON_PENDINGINSTALL_SUFFIX)
@property
def isPendingRemove(self):
"""True if this addon is marked for removal."""
return not self.isPendingInstall and self.name in state['pendingRemovesSet']
def requestRemove(self):
"""Markes this addon for removal on NVDA restart."""
if self.isPendingInstall:
self.completeRemove()
state['pendingInstallsSet'].discard(self.name)
#Force availableAddons to be updated
getAvailableAddons(refresh=True)
else:
state['pendingRemovesSet'].add(self.name)
# There's no point keeping a record of this add-on pending being disabled now.
# However, if the addon is in _disabledAddons, then it needs to stay there so that
# the status in addonsManager continues to say "disabled"
state['pendingDisableSet'].discard(self.name)
saveState()
def completeRemove(self,runUninstallTask=True):
if runUninstallTask:
try:
# #2715: The add-on must be added to _availableAddons here so that
# translations can be used in installTasks module.
_availableAddons[self.path] = self
self.runInstallTask("onUninstall")
except:
log.error("task 'onUninstall' on addon '%s' failed"%self.name,exc_info=True)
finally:
del _availableAddons[self.path]
tempPath=tempfile.mktemp(suffix=DELETEDIR_SUFFIX,dir=os.path.dirname(self.path))
try:
os.rename(self.path,tempPath)
except (WindowsError,IOError):
raise RuntimeError("Cannot rename add-on path for deletion")
shutil.rmtree(tempPath,ignore_errors=True)
if os.path.exists(tempPath):
log.error("Error removing addon directory %s, deferring until next NVDA restart"%self.path)
# clean up the addons state. If an addon with the same name is installed, it should not be automatically
# disabled / blocked.
log.debug("removing addon {} from _disabledAddons/_blockedAddons".format(self.name))
_disabledAddons.discard(self.name)
_blockedAddons.discard(self.name)
saveState()
def addToPackagePath(self, package):
""" Adds this L{Addon} extensions to the specific package path if those exist.
This allows the addon to "run" / be available because the package is able to search its path,
looking for particular modules. This is used by the following:
- `globalPlugins`
- `appModules`
- `synthDrivers`
- `brailleDisplayDrivers`
@param package: the python module representing the package.
@type package: python module.
"""
# #3090: Ensure that we don't add disabled / blocked add-ons to package path.
# By returning here the addon does not "run"/ become active / registered.
if self.isDisabled or self.isBlocked:
return
extension_path = os.path.join(self.path, package.__name__)
if not os.path.isdir(extension_path):
# This addon does not have extension points for this package
return
# Python 2.x doesn't properly handle unicode import paths, so convert them before adding.
converted_path = self._getPathForInclusionInPackage(package)
package.__path__.insert(0, converted_path)
self._extendedPackages.add(package)
log.debug("Addon %s added to %s package path", self.manifest['name'], package.__name__)
def enable(self, shouldEnable):
"""Sets this add-on to be disabled or enabled when NVDA restarts."""
if shouldEnable:
if not isAddonCompatible(self):
import addonAPIVersion
raise AddonError(
"Add-on is not compatible:"
" minimum NVDA version {}, last tested version {},"
" NVDA current {}, NVDA backwards compatible to {}".format(
self.manifest['minimumNVDAVersion'],
self.manifest['lastTestedNVDAVersion'],
addonAPIVersion.CURRENT,
addonAPIVersion.BACK_COMPAT_TO
)
)
if self.name in state["pendingDisableSet"]:
# Undoing a pending disable.
state["pendingDisableSet"].discard(self.name)
else:
state["pendingEnableSet"].add(self.name)
else:
if self.name in state["pendingEnableSet"]:
# Undoing a pending enable.
state["pendingEnableSet"].discard(self.name)
# No need to disable an addon that is already disabled.
# This also prevents the status in the add-ons dialog from saying "disabled, pending disable"
elif self.name not in state["disabledAddons"]:
state["pendingDisableSet"].add(self.name)
# Record enable/disable flags as a way of preparing for disaster such as sudden NVDA crash.
saveState()
@property
def isRunning(self):
return not (self.isPendingInstall or self.isDisabled or self.isBlocked)
@property
def isDisabled(self):
return self.name in _disabledAddons
@property
def isBlocked(self):
return self.name in _blockedAddons
@property
def isPendingEnable(self):
return self.name in state["pendingEnableSet"]
@property
def isPendingDisable(self):
return self.name in state["pendingDisableSet"]
def _getPathForInclusionInPackage(self, package):
extension_path = os.path.join(self.path, package.__name__)
return extension_path.encode("mbcs")
def loadModule(self, name):
""" loads a python module from the addon directory
@param name: the module name
@type name: string
@returns the python module with C[name}
@rtype python module
"""
log.debug("Importing module %s from plugin %s", name, self.name)
importer = pkgutil.ImpImporter(self.path)
loader = importer.find_module(name)
if not loader:
return None
# Create a qualified full name to avoid modules with the same name on sys.modules.
fullname = "addons.%s.%s" % (self.name, name)
try:
return loader.load_module(fullname)
except ImportError:
# in this case return None, any other error throw to be handled elsewhere
return None
def getTranslationsInstance(self, domain='nvda'):
""" Gets the gettext translation instance for this addon.
<addon-path<\locale will be used to find .mo files, if exists.
If a translation file is not found the default fallback null translation is returned.
@param domain: the tranlation domain to retrieve. The 'nvda' default should be used in most cases.
@returns: the gettext translation class.
"""
localedir = os.path.join(self.path, "locale")
return gettext.translation(domain, localedir=localedir, languages=[languageHandler.getLanguage()], fallback=True)
def runInstallTask(self,taskName,*args,**kwargs):
"""
Executes the function having the given taskName with the given args and kwargs in the addon's installTasks module if it exists.
"""
if not hasattr(self,'_installTasksModule'):
self._installTasksModule=self.loadModule('installTasks')
if self._installTasksModule:
func=getattr(self._installTasksModule,taskName,None)
if func:
func(*args,**kwargs)
def getDocFilePath(self, fileName=None):
"""Get the path to a documentation file for this add-on.
The file should be located in C{doc\lang\file} inside the add-on,
where C{lang} is the language code and C{file} is the requested file name.
Failing that, the language without country is tried.
English is tried as a last resort.
An add-on can specify a default documentation file name
via the docFileName parameter in its manifest.
@param fileName: The requested file name or C{None} for the add-on's default.
@type fileName: basestring
@return: The path to the requested file or C{None} if it wasn't found.
@rtype: basestring
"""
if not fileName:
fileName = self.manifest["docFileName"]
if not fileName:
return None
docRoot = os.path.join(self.path, "doc")
lang = languageHandler.getLanguage()
langs = [lang]
if "_" in lang:
lang = lang.split("_", 1)[0]
langs.append(lang)
if lang != "en":
langs.append("en")
for lang in langs:
docFile = os.path.join(docRoot, lang, fileName)
if os.path.isfile(docFile):
return docFile
return None
def getCodeAddon(obj=None, frameDist=1):
""" Returns the L{Addon} where C{obj} is defined. If obj is None the caller code frame is assumed to allow simple retrieval of "current calling addon".
@param obj: python object or None for default behaviour.
@param frameDist: howmany frames is the caller code. Only change this for functions in this module.
@return: L{Addon} instance or None if no code does not belong to a add-on package.
@rtype: C{Addon}
"""
global _availableAddons
if obj is None:
obj = sys._getframe(frameDist)
fileName = inspect.getfile(obj)
dir= unicode(os.path.abspath(os.path.dirname(fileName)), "mbcs")
# if fileName is not a subdir of one of the addon paths
# It does not belong to an addon.
for p in _getDefaultAddonPaths():
if dir.startswith(p):
break
else:
raise AddonError("Code does not belong to an addon package.")
curdir = dir
while curdir not in _getDefaultAddonPaths():
if curdir in _availableAddons.keys():
return _availableAddons[curdir]
curdir = os.path.abspath(os.path.join(curdir, ".."))
# Not found!
raise AddonError("Code does not belong to an addon")
def initTranslation():
addon = getCodeAddon(frameDist=2)
translations = addon.getTranslationsInstance()
# Point _ to the translation object in the globals namespace of the caller frame
# FIXME: shall we retrieve the caller module object explicitly?
try:
callerFrame = inspect.currentframe().f_back
callerFrame.f_globals['_'] = translations.ugettext
# Install our pgettext function.
callerFrame.f_globals['pgettext'] = languageHandler.makePgettext(translations)
finally:
del callerFrame # Avoid reference problems with frames (per python docs)
def _translatedManifestPaths(lang=None, forBundle=False):
if lang is None:
lang = languageHandler.getLanguage() # can't rely on default keyword arguments here.
langs=[lang]
if '_' in lang:
langs.append(lang.split('_')[0])
if lang!='en' and not lang.startswith('en_'):
langs.append('en')
sep = "/" if forBundle else os.path.sep
return [sep.join(("locale", lang, MANIFEST_FILENAME)) for lang in langs]
class AddonBundle(AddonBase):
""" Represents the contents of an NVDA addon suitable for distribution.
The bundle is compressed using the zip file format. Manifest information
is available without the need for extraction."""
def __init__(self, bundlePath):
""" Constructs an L{AddonBundle} from a filename.
@param bundlePath: The path for the bundle file.
"""
self._path = bundlePath if isinstance(bundlePath, unicode) else unicode(bundlePath, "mbcs")
# Read manifest:
translatedInput=None
with zipfile.ZipFile(self._path, 'r') as z:
for translationPath in _translatedManifestPaths(forBundle=True):
try:
translatedInput = z.open(translationPath, 'r')
break
except KeyError:
pass
self._manifest = AddonManifest(z.open(MANIFEST_FILENAME), translatedInput=translatedInput)
if self.manifest.errors is not None:
_report_manifest_errors(self.manifest)
raise AddonError("Manifest file has errors.")
def extract(self, addonPath):
""" Extracts the bundle content to the specified path.
The addon will be extracted to L{addonPath}
@param addonPath: Path where to extract contents.
@type addonPath: string
"""
with zipfile.ZipFile(self._path, 'r') as z:
for info in z.infolist():
if isinstance(info.filename, str):
# #2505: Handle non-Unicode file names.
# Most archivers seem to use the local OEM code page, even though the spec says only cp437.
# HACK: Overriding info.filename is a bit ugly, but it avoids a lot of code duplication.
info.filename = info.filename.decode("cp%d" % winKernel.kernel32.GetOEMCP())
z.extract(info, addonPath)
@property
def manifest(self):
""" Gets the manifest for the represented Addon.
@rtype: AddonManifest
"""
return self._manifest
def __repr__(self):
return "<AddonBundle at %s>" % self._path
def createAddonBundleFromPath(path, destDir=None):
""" Creates a bundle from a directory that contains a a addon manifest file."""
basedir = os.path.abspath(path)
# If caller did not provide a destination directory name
# Put the bundle at the same level of the addon's top directory,
# That is, basedir/..
if destDir is None:
destDir = os.path.dirname(basedir)
manifest_path = os.path.join(basedir, MANIFEST_FILENAME)
if not os.path.isfile(manifest_path):
raise AddonError("Can't find %s manifest file." % manifest_path)
with open(manifest_path) as f:
manifest = AddonManifest(f)
if manifest.errors is not None:
_report_manifest_errors(manifest)
raise AddonError("Manifest file has errors.")
bundleFilename = "%s-%s.%s" % (manifest['name'], manifest['version'], BUNDLE_EXTENSION)
bundleDestination = os.path.join(destDir, bundleFilename)
with zipfile.ZipFile(bundleDestination, 'w') as z:
# FIXME: the include/exclude feature may or may not be useful. Also python files can be pre-compiled.
for dir, dirnames, filenames in os.walk(basedir):
relativePath = os.path.relpath(dir, basedir)
for filename in filenames:
pathInBundle = os.path.join(relativePath, filename)
absPath = os.path.join(dir, filename)
z.write(absPath, pathInBundle)
return AddonBundle(bundleDestination)
def _report_manifest_errors(manifest):
log.warning("Error loading manifest:\n%s", manifest.errors)
class AddonManifest(ConfigObj):
""" Add-on manifest file. It contains metadata about an NVDA add-on package. """
configspec = ConfigObj(StringIO(
"""
# NVDA Add-on Manifest configuration specification
# Add-on unique name
name = string()
# short summary (label) of the add-on to show to users.
summary = string()
# Long description with further information and instructions
description = string(default=None)
# Name of the author or entity that created the add-on
author = string()
# Version of the add-on. Should preferably in some standard format such as x.y.z
version = string()
# The minimum required NVDA version for this add-on to work correctly.
# Should be less than or equal to lastTestedNVDAVersion
minimumNVDAVersion = apiVersion(default="0.0.0")
# Must be greater than or equal to minimumNVDAVersion
lastTestedNVDAVersion = apiVersion(default="0.0.0")
# URL for more information about the add-on. New versions and such.
url= string(default=None)
# Name of default documentation file for the add-on.
docFileName = string(default=None)
# NOTE: apiVersion:
# Eg: 2019.1.0 or 0.0.0
# Must have 3 integers separated by dots.
# The first integer must be a Year (4 characters)
# "0.0.0" is also valid.
# The final integer can be left out, and in that case will default to 0. E.g. 2019.1
"""))
def __init__(self, input, translatedInput=None):
""" Constructs an L{AddonManifest} instance from manifest string data
@param input: data to read the manifest informatinon
@type input: a fie-like object.
@param translatedInput: translated manifest input
@type translatedInput: file-like object
"""
super(AddonManifest, self).__init__(input, configspec=self.configspec, encoding='utf-8', default_encoding='utf-8')
self._errors = None
val = Validator({"apiVersion":validate_apiVersionString})
result = self.validate(val, copy=True, preserve_errors=True)
if result != True:
self._errors = result
elif True != self._validateApiVersionRange():
self._errors = "Constraint not met: minimumNVDAVersion ({}) <= lastTestedNVDAVersion ({})".format(
self.get("minimumNVDAVersion"),
self.get("lastTestedNVDAVersion")
)
self._translatedConfig = None
if translatedInput is not None:
self._translatedConfig = ConfigObj(translatedInput, encoding='utf-8', default_encoding='utf-8')
for k in ('summary','description'):
val=self._translatedConfig.get(k)
if val:
self[k]=val
@property
def errors(self):
return self._errors
def _validateApiVersionRange(self):
lastTested = self.get("lastTestedNVDAVersion")
minRequiredVersion = self.get("minimumNVDAVersion")
return minRequiredVersion <= lastTested
def validate_apiVersionString(value):
from configobj.validate import ValidateError
if not isinstance(value, string_types):
raise ValidateError('Expected an apiVersion in the form of a string. EG "2019.1.0"')
try:
tuple = addonAPIVersion.getAPIVersionTupleFromString(value)
return tuple
except ValueError as e:
raise ValidateError('"{}" is not a valid API Version string: {}'.format(value, e))
| 1 | 24,946 | Please add your name to the copyright list. | nvaccess-nvda | py |
@@ -243,6 +243,11 @@ func (r *replacer) getSubstitution(key string) string {
case "{path_escaped}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return url.QueryEscape(u.Path)
+ case "{request_id}":
+ reqid, _ := r.request.Context().Value(RequestIDCtxKey).(string)
+ if reqid != "" {
+ return reqid
+ }
case "{rewrite_path}":
return r.request.URL.Path
case "{rewrite_path_escaped}": | 1 | package httpserver
import (
"bytes"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/mholt/caddy"
)
// requestReplacer is a strings.Replacer which is used to
// encode literal \r and \n characters and keep everything
// on one line
var requestReplacer = strings.NewReplacer(
"\r", "\\r",
"\n", "\\n",
)
var now = time.Now
// Replacer is a type which can replace placeholder
// substrings in a string with actual values from a
// http.Request and ResponseRecorder. Always use
// NewReplacer to get one of these. Any placeholders
// made with Set() should overwrite existing values if
// the key is already used.
type Replacer interface {
Replace(string) string
Set(key, value string)
}
// replacer implements Replacer. customReplacements
// is used to store custom replacements created with
// Set() until the time of replacement, at which point
// they will be used to overwrite other replacements
// if there is a name conflict.
type replacer struct {
customReplacements map[string]string
emptyValue string
responseRecorder *ResponseRecorder
request *http.Request
requestBody *limitWriter
}
type limitWriter struct {
w bytes.Buffer
remain int
}
func newLimitWriter(max int) *limitWriter {
return &limitWriter{
w: bytes.Buffer{},
remain: max,
}
}
func (lw *limitWriter) Write(p []byte) (int, error) {
// skip if we are full
if lw.remain <= 0 {
return len(p), nil
}
if n := len(p); n > lw.remain {
p = p[:lw.remain]
}
n, err := lw.w.Write(p)
lw.remain -= n
return n, err
}
func (lw *limitWriter) String() string {
return lw.w.String()
}
// NewReplacer makes a new replacer based on r and rr which
// are used for request and response placeholders, respectively.
// Request placeholders are created immediately, whereas
// response placeholders are not created until Replace()
// is invoked. rr may be nil if it is not available.
// emptyValue should be the string that is used in place
// of empty string (can still be empty string).
func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Replacer {
rb := newLimitWriter(MaxLogBodySize)
if r.Body != nil {
r.Body = struct {
io.Reader
io.Closer
}{io.TeeReader(r.Body, rb), io.Closer(r.Body)}
}
return &replacer{
request: r,
requestBody: rb,
responseRecorder: rr,
customReplacements: make(map[string]string),
emptyValue: emptyValue,
}
}
func canLogRequest(r *http.Request) bool {
if r.Method == "POST" || r.Method == "PUT" {
for _, cType := range r.Header[headerContentType] {
// the cType could have charset and other info
if strings.Contains(cType, contentTypeJSON) || strings.Contains(cType, contentTypeXML) {
return true
}
}
}
return false
}
// Replace performs a replacement of values on s and returns
// the string with the replaced values.
func (r *replacer) Replace(s string) string {
// Do not attempt replacements if no placeholder is found.
if !strings.ContainsAny(s, "{}") {
return s
}
result := ""
for {
idxStart := strings.Index(s, "{")
if idxStart == -1 {
// no placeholder anymore
break
}
idxEnd := strings.Index(s[idxStart:], "}")
if idxEnd == -1 {
// unpaired placeholder
break
}
idxEnd += idxStart
// get a replacement
placeholder := s[idxStart : idxEnd+1]
replacement := r.getSubstitution(placeholder)
// append prefix + replacement
result += s[:idxStart] + replacement
// strip out scanned parts
s = s[idxEnd+1:]
}
// append unscanned parts
return result + s
}
func roundDuration(d time.Duration) time.Duration {
if d >= time.Millisecond {
return round(d, time.Millisecond)
} else if d >= time.Microsecond {
return round(d, time.Microsecond)
}
return d
}
// round rounds d to the nearest r
func round(d, r time.Duration) time.Duration {
if r <= 0 {
return d
}
neg := d < 0
if neg {
d = -d
}
if m := d % r; m+m < r {
d = d - m
} else {
d = d + r - m
}
if neg {
return -d
}
return d
}
// getSubstitution retrieves value from corresponding key
func (r *replacer) getSubstitution(key string) string {
// search custom replacements first
if value, ok := r.customReplacements[key]; ok {
return value
}
// search request headers then
if key[1] == '>' {
want := key[2 : len(key)-1]
for key, values := range r.request.Header {
// Header placeholders (case-insensitive)
if strings.EqualFold(key, want) {
return strings.Join(values, ",")
}
}
}
// next check for cookies
if key[1] == '~' {
name := key[2 : len(key)-1]
if cookie, err := r.request.Cookie(name); err == nil {
return cookie.Value
}
}
// next check for query argument
if key[1] == '?' {
query := r.request.URL.Query()
name := key[2 : len(key)-1]
return query.Get(name)
}
// search default replacements in the end
switch key {
case "{method}":
return r.request.Method
case "{scheme}":
if r.request.TLS != nil {
return "https"
}
return "http"
case "{hostname}":
name, err := os.Hostname()
if err != nil {
return r.emptyValue
}
return name
case "{host}":
return r.request.Host
case "{hostonly}":
host, _, err := net.SplitHostPort(r.request.Host)
if err != nil {
return r.request.Host
}
return host
case "{path}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return u.Path
case "{path_escaped}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return url.QueryEscape(u.Path)
case "{rewrite_path}":
return r.request.URL.Path
case "{rewrite_path_escaped}":
return url.QueryEscape(r.request.URL.Path)
case "{query}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return u.RawQuery
case "{query_escaped}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return url.QueryEscape(u.RawQuery)
case "{fragment}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return u.Fragment
case "{proto}":
return r.request.Proto
case "{remote}":
host, _, err := net.SplitHostPort(r.request.RemoteAddr)
if err != nil {
return r.request.RemoteAddr
}
return host
case "{port}":
_, port, err := net.SplitHostPort(r.request.RemoteAddr)
if err != nil {
return r.emptyValue
}
return port
case "{uri}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return u.RequestURI()
case "{uri_escaped}":
u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
return url.QueryEscape(u.RequestURI())
case "{rewrite_uri}":
return r.request.URL.RequestURI()
case "{rewrite_uri_escaped}":
return url.QueryEscape(r.request.URL.RequestURI())
case "{when}":
return now().Format(timeFormat)
case "{when_iso}":
return now().UTC().Format(timeFormatISOUTC)
case "{file}":
_, file := path.Split(r.request.URL.Path)
return file
case "{dir}":
dir, _ := path.Split(r.request.URL.Path)
return dir
case "{request}":
dump, err := httputil.DumpRequest(r.request, false)
if err != nil {
return r.emptyValue
}
return requestReplacer.Replace(string(dump))
case "{request_body}":
if !canLogRequest(r.request) {
return r.emptyValue
}
_, err := ioutil.ReadAll(r.request.Body)
if err != nil {
if err == ErrMaxBytesExceeded {
return r.emptyValue
}
}
return requestReplacer.Replace(r.requestBody.String())
case "{mitm}":
if val, ok := r.request.Context().Value(caddy.CtxKey("mitm")).(bool); ok {
if val {
return "likely"
}
return "unlikely"
}
return "unknown"
case "{status}":
if r.responseRecorder == nil {
return r.emptyValue
}
return strconv.Itoa(r.responseRecorder.status)
case "{size}":
if r.responseRecorder == nil {
return r.emptyValue
}
return strconv.Itoa(r.responseRecorder.size)
case "{latency}":
if r.responseRecorder == nil {
return r.emptyValue
}
return roundDuration(time.Since(r.responseRecorder.start)).String()
case "{latency_ms}":
if r.responseRecorder == nil {
return r.emptyValue
}
elapsedDuration := time.Since(r.responseRecorder.start)
return strconv.FormatInt(convertToMilliseconds(elapsedDuration), 10)
}
return r.emptyValue
}
//convertToMilliseconds returns the number of milliseconds in the given duration
func convertToMilliseconds(d time.Duration) int64 {
return d.Nanoseconds() / 1e6
}
// Set sets key to value in the r.customReplacements map.
func (r *replacer) Set(key, value string) {
r.customReplacements["{"+key+"}"] = value
}
const (
timeFormat = "02/Jan/2006:15:04:05 -0700"
timeFormatISOUTC = "2006-01-02T15:04:05Z" // ISO 8601 with timezone to be assumed as UTC
headerContentType = "Content-Type"
contentTypeJSON = "application/json"
contentTypeXML = "application/xml"
// MaxLogBodySize limits the size of logged request's body
MaxLogBodySize = 100 * 1024
)
| 1 | 11,001 | You can elide the `if` check for empty string, because if it's not a value that is set, the string will be empty anyway. | caddyserver-caddy | go |
@@ -1,8 +1,8 @@
/*global countlyVue, CV, countlyCommon, Promise, moment*/
(function(countlyPushNotification) {
- var messagesSentLabel = CV.i18n('push-notification.time-chart-serie-messages-sent');
- var actionsPerformedLabel = CV.i18n('push-notification.time-chart-serie-actions-performed');
+ var messagesSentLabel = CV.i18n('push-notification.messages-sent-serie-name');
+ var actionsPerformedLabel = CV.i18n('push-notification.actions-performed-serie-name');
var StatusFinderHelper = {
STATUS_SHIFT_OPERATOR_ENUM: { | 1 | /*global countlyVue, CV, countlyCommon, Promise, moment*/
(function(countlyPushNotification) {
var messagesSentLabel = CV.i18n('push-notification.time-chart-serie-messages-sent');
var actionsPerformedLabel = CV.i18n('push-notification.time-chart-serie-actions-performed');
var StatusFinderHelper = {
STATUS_SHIFT_OPERATOR_ENUM: {
NotCreated: 0, // 0
Created: 1 << 0, // 1
Scheduled: 1 << 1, // 2
Sending: 1 << 2, // 4
Done: 1 << 3, // 8
Error: 1 << 4, // 16
Success: 1 << 5, // 32
Aborted: 1 << 10, // 1024
Deleted: 1 << 11, // 2048
},
isSending: function(status) {
return (status & this.STATUS_SHIFT_OPERATOR_ENUM.Sending) > 0;
},
isInitial: function(status) {
return status === this.STATUS_SHIFT_OPERATOR_ENUM.NotCreated;
},
isCreated: function(status) {
return (status & this.STATUS_SHIFT_OPERATOR_ENUM.Created) > 0;
},
isScheduled: function(status) {
return (status & this.STATUS_SHIFT_OPERATOR_ENUM.Scheduled) > 0;
},
isAborted: function(status) {
return (status & this.STATUS_SHIFT_OPERATOR_ENUM.Aborted) > 0;
},
isDone: function(status) {
return (status & this.STATUS_SHIFT_OPERATOR_ENUM.Done) > 0;
},
};
countlyPushNotification.helper = {
getInitialSeriesStateByType: function(type) {
if (type === countlyPushNotification.service.TypeEnum.ONE_TIME) {
return {
monthly: [{data: [], label: actionsPerformedLabel}, {data: [], label: messagesSentLabel}],
weekly: [{data: [], label: actionsPerformedLabel}, {data: [], label: messagesSentLabel}]
};
}
return {
daily: [{data: [], label: actionsPerformedLabel}, {data: [], label: messagesSentLabel}]
};
},
getInitialPeriodsStateByType: function(type) {
if (type === countlyPushNotification.service.TypeEnum.ONE_TIME) {
return {
periods: {monthly: [], weekly: []},
};
}
return {
periods: {daily: []},
};
}
};
countlyPushNotification.service = {
TypeEnum: {
ONE_TIME: "oneTime",
AUTOMATIC: "automatic",
TRANSACTIONAL: "transactional"
},
PeriodEnum: {
WEEKLY: "weekly",
MONTHLY: "monthly",
DAILY: "daily"
},
PlatformEnum: {
ANDROID: "android",
ALL: "all",
IOS: "ios"
},
StatusEnum: {
ALL: "all",
SENT: "sent",
SENDING: "sending",
DRAFT: "draft",
NOT_APPROVED: "notApproved",
ABORTED: "aborted",
FAILED: "failed",
STOPPED: "stopped",
SCHEDULED: "scheduled"
},
mapType: function(type) {
switch (type) {
case this.TypeEnum.AUTOMATIC: {
return {auto: true, tx: false};
}
case this.TypeEnum.TRANSACTIONAL: {
return {auto: false, tx: true};
}
default: {
return { auto: false, tx: false};
}
}
},
mapStatus: function(status, error) {
if (StatusFinderHelper.isSending(status)) {
if (error) {
return {value: 'sending-errors', label: CV.i18n('push-notification.status-sending-errors')};
}
else {
return {value: this.StatusEnum.SENDING, label: CV.i18n('push-notification.status-sending')};
}
}
else if (StatusFinderHelper.isAborted(status)) {
return {value: this.StatusEnum.ABORTED, label: CV.i18n('push-notification.status-aborted')};
}
else if (StatusFinderHelper.isDone(status)) {
if (error) {
return {value: 'sent-errors', label: CV.i18n('push-notification.status-sent-errors')};
}
else {
return {value: this.StatusEnum.SENT, label: CV.i18n('push-notification.status-sent')};
}
}
else if (StatusFinderHelper.isScheduled(status)) {
return {value: this.StatusEnum.SCHEDULED, label: CV.i18n('push-notification.status-scheduled')};
}
else if (StatusFinderHelper.isCreated(status)) {
return {value: 'created', label: CV.i18n('push-notification.status-created')};
}
else if (StatusFinderHelper.isInitial(status)) {
return {value: 'initial', label: CV.i18n('push-notification.status-initial')};
}
else {
return {value: status, label: status};
}
},
mapSeriesDtoToModel: function(dto, type) {
if (type === this.TypeEnum.ONE_TIME) {
return {
monthly: [{data: dto.actions.monthly.data || [], label: actionsPerformedLabel}, {data: dto.sent.monthly.data || [], label: messagesSentLabel}],
weekly: [{data: dto.actions.weekly.data || [], label: actionsPerformedLabel}, {data: dto.sent.weekly.data || [], label: messagesSentLabel}],
};
}
else if (type === this.TypeEnum.AUTOMATIC) {
return {
daily: [{data: dto.actions_automated.daily.data || [], label: actionsPerformedLabel}, {data: dto.sent_automated.daily.data || [], label: messagesSentLabel}]
};
}
else {
return {
daily: [{data: dto.actions_tx.daily.data || [], label: actionsPerformedLabel}, {data: dto.sent_tx.daily.data || [], label: messagesSentLabel}]
};
}
},
mapPeriods: function(dto, type) {
if (type === this.TypeEnum.ONE_TIME) {
return {weekly: dto.actions.weekly.keys, monthly: dto.actions.monthly.keys};
}
else if (type === this.TypeEnum.AUTOMATIC) {
return {weekly: dto.actions_automated.daily.keys};
}
else {
return {weekly: dto.actions_tx.daily.keys};
}
},
mapPlatforms: function(dto) {
return dto.map(function(platformItem) {
if (platformItem === 'i') {
return CV.i18n("push-notification.platform-ios");
}
if (platformItem === 'a') {
return CV.i18n("push-notification.platform-android");
}
});
},
mapRowsDtoToModel: function(dto) {
var self = this;
var rowsModel = [];
dto.aaData.forEach(function(pushNotificationDtoItem, index) {
rowsModel[index] = {
_id: pushNotificationDtoItem._id,
name: pushNotificationDtoItem.messagePerLocale["default|t"] || "-",
status: self.mapStatus(pushNotificationDtoItem.result.status, pushNotificationDtoItem.result.error),
createdDateTime: {
date: moment(pushNotificationDtoItem.created).format("MMMM Do YYYY"),
time: moment(pushNotificationDtoItem.created).format("h:mm:ss a")
},
sentDateTime: {
date: moment(pushNotificationDtoItem.date).format("MMMM Do YYYY"),
time: moment(pushNotificationDtoItem.date).format("h:mm:ss a")
},
sent: pushNotificationDtoItem.result.sent,
actioned: pushNotificationDtoItem.result.actioned,
createdBy: pushNotificationDtoItem.creator,
platform: self.mapPlatforms(pushNotificationDtoItem.platforms),
content: pushNotificationDtoItem.messagePerLocale.default,
};
});
return rowsModel;
},
fetchAll: function(type) {
var self = this;
return new Promise(function(resolve, reject) {
Promise.all([self.fetchByType(type), self.fetchDashboard(type)])
.then(function(responses) {
var rowsModel = self.mapRowsDtoToModel(responses[0]);
var seriesModel = self.mapSeriesDtoToModel(responses[1], type);
var periods = self.mapPeriods(responses[1], type);
var pushNotificationModel = {
rows: rowsModel,
series: seriesModel,
periods: periods,
totalAppUsers: responses[1].users,
enabledUsers: responses[1].enabled
};
resolve(pushNotificationModel);
}).catch(function(error) {
reject(error);
});
});
},
fetchByType: function(type) {
var data = {
app_id: countlyCommon.ACTIVE_APP_ID,
};
Object.assign(data, this.mapType(type));
return new Promise(function(resolve, reject) {
CV.$.ajax({
type: "POST",
url: countlyCommon.API_PARTS.data.r + "/pushes/all",
data: data,
dataType: "json"
}).then(function(response) {
resolve(response);
}).catch(function(error) {
reject(error);
});
});
},
fetchById: function(id) {
return new Promise(function(resolve, reject) {
CV.$.ajax({
type: "GET",
url: countlyCommon.API_PARTS.data.i + "/pushes/message",
data: {
args: JSON.stringify({_id: id})
},
dataType: "json"
}).then(function(response) {
resolve(response);
}).catch(function(error) {
reject(error);
});
});
},
fetchDashboard: function() {
var data = {
app_id: countlyCommon.ACTIVE_APP_ID,
};
return new Promise(function(resolve, reject) {
CV.$.ajax({
type: "GET",
url: window.countlyCommon.API_URL + '/i/pushes/dashboard',
data: data,
dataType: "json"
}).then(function(response) {
resolve(response);
}).catch(function(error) {
reject(error);
});
});
},
deleteById: function(id) {
var data = {
app_id: countlyCommon.ACTIVE_APP_ID,
_id: id
};
return new Promise(function(resolve, reject) {
CV.$.ajax({
method: 'GET',
url: window.countlyCommon.API_URL + '/i/pushes/delete',
data: data,
dataType: "json"
}).then(function(response) {
resolve(response);
}).catch(function(error) {
reject(error);
});
});
}
};
countlyPushNotification.getVuexModule = function() {
var getInitialState = function() {
return {
selectedPushNotificationType: countlyPushNotification.service.TypeEnum.ONE_TIME,
pushNotifications: {
series: {
monthly: [{data: [], label: actionsPerformedLabel}, {data: [], label: messagesSentLabel}],
weekly: [{data: [], label: actionsPerformedLabel}, {data: [], label: messagesSentLabel}]
},
rows: [],
periods: {monthly: [], weekly: []},
totalAppUsers: null,
enabledUsers: null
},
statusFilter: countlyPushNotification.service.StatusEnum.ALL,
platformFilter: countlyPushNotification.service.PlatformEnum.ALL,
isLoading: false,
hasError: false,
error: null,
totalPushMessagesSent: null,
totalUserActionsPerformed: null,
};
};
var pushNotificationActions = {
fetchAll: function(context) {
context.dispatch('onFetchInit');
countlyPushNotification.service.fetchAll(context.state.selectedPushNotificationType)
.then(function(response) {
context.commit('setPushNotifications', response);
context.dispatch('onFetchSuccess');
}).catch(function(error) {
context.dispatch('onFetchError', error);
});
},
onDeletePushNotification: function(context, id) {
context.dispatch('onFetchInit');
countlyPushNotification.service.deleteById(id)
.then(function() {
context.dispatch('fetchAll');
}).catch(function() {
//TODO:dispatch notification toast with error message
});
},
// eslint-disable-next-line no-unused-vars
onDuplicatePushNotification: function(context, id) {
//TODO: open create push notification drawer
},
// eslint-disable-next-line no-unused-vars
onResendPushNotification: function(context, id) {
//TODO: resend push notification
},
onSetPushNotificationType: function(context, value) {
context.commit('setPushNotificationType', value);
context.commit('resetPushNotifications');
},
onSetPlatformFilter: function(context, value) {
context.commit('setPlatformFilter', value);
},
onSetStatusFilter: function(context, value) {
context.commit('setStatusFilter', value);
},
onFetchInit: function(context) {
context.commit('setFetchInit');
},
onFetchError: function(context, error) {
context.commit('setFetchError', error);
},
onFetchSuccess: function(context) {
context.commit('setFetchSuccess');
},
};
var pushNotificationMutations = {
setPushNotificationType: function(state, value) {
state.selectedPushNotificationType = value;
},
resetPushNotifications: function(state) {
state.pushNotifications = {
series: countlyPushNotification.helper.getInitialSeriesStateByType(state.selectedPushNotificationType),
rows: [],
periods: countlyPushNotification.helper.getInitialPeriodsStateByType(state.selectedPushNotificationType),
};
},
setPushNotifications: function(state, value) {
state.pushNotifications = value;
},
setStatusFilter: function(state, value) {
state.statusFilter = value;
},
setPlatformFilter: function(state, value) {
state.platformFilter = value;
},
setFetchInit: function(state) {
state.isLoading = true;
state.hasError = false;
state.error = null;
},
setFetchError: function(state, error) {
state.isLoading = false;
state.hasError = true;
state.error = error;
},
setFetchSuccess: function(state) {
state.isLoading = false;
state.hasError = false;
state.error = null;
}
};
return countlyVue.vuex.Module("countlyPushNotification", {
state: getInitialState,
actions: pushNotificationActions,
mutations: pushNotificationMutations
});
};
}(window.countlyPushNotification = window.countlyPushNotification || {})); | 1 | 14,058 | Different i18n keys. By mistake or on purpose? | Countly-countly-server | js |
@@ -59,3 +59,18 @@ type SecretKeySelector struct {
const (
TLSCAKey = "ca.crt"
)
+
+// Describes how the ACME challenge self check behaves when it fails.
+type ACMESelfCheckFailurePolicy string
+
+const (
+ // The default failure policy. This policy will cause the request to be
+ // sent to the certificate provider ONLY if the ACME challenge self check
+ // succeeds. The request is retried continuously and the `Certificate` will
+ // remain `Pending` indefinitely.
+ ACMESelfCheckFailurePolicyRetryForever ACMESelfCheckFailurePolicy = "RetryForever"
+
+ // This policy will cause the ACME challenge self check response to be
+ // ignored and the request will always be sent to the certificate provider.
+ ACMESelfCheckFailurePolicyIgnore ACMESelfCheckFailurePolicy = "Ignore"
+) | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package meta
// ConditionStatus represents a condition's status.
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in
// the condition; "ConditionFalse" means a resource is not in the condition;
// "ConditionUnknown" means kubernetes can't decide if a resource is in the
// condition or not. In the future, we could add other intermediate
// conditions, e.g. ConditionDegraded.
const (
// ConditionTrue represents the fact that a given condition is true
ConditionTrue ConditionStatus = "True"
// ConditionFalse represents the fact that a given condition is false
ConditionFalse ConditionStatus = "False"
// ConditionUnknown represents the fact that a given condition is unknown
ConditionUnknown ConditionStatus = "Unknown"
)
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
Name string
}
// ObjectReference is a reference to an object with a given name, kind and group.
type ObjectReference struct {
Name string
Kind string
Group string
}
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference
// The key of the secret to select from. Must be a valid secret key.
Key string
}
const (
TLSCAKey = "ca.crt"
)
| 1 | 21,169 | Given the description for this is `// Describes how the ACME challenge self check behaves when it fails.`, perhaps `RetryForever` is better terminology here as when the check fails, we don't just wait, we retry periodically. | jetstack-cert-manager | go |
@@ -187,14 +187,14 @@ _ostree_gpg_verifier_check_signature (OstreeGpgVerifier *self,
gpg_error = gpgme_data_new_from_fd (&kdata, fd);
if (gpg_error != GPG_ERR_NO_ERROR)
{
- ot_gpgme_error_to_gio_error (gpg_error, error);
+ ot_gpgme_throw (gpg_error, error, "Loading data from fd %i", fd);
goto out;
}
gpg_error = gpgme_op_import (result->context, kdata);
if (gpg_error != GPG_ERR_NO_ERROR)
{
- ot_gpgme_error_to_gio_error (gpg_error, error);
+ ot_gpgme_throw (gpg_error, error, "import");
goto out;
}
} | 1 | /* -*- mode: C; c-file-style: "gnu"; indent-tabs-mode: nil; -*-
*
* Copyright (C) 2011 Colin Walters <walters@verbum.org>
* Copyright (C) 2013 Sjoerd Simons <sjoerd.simons@collabora.co.uk>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Author: Sjoerd Simons <sjoerd.simons@collabora.co.uk>
*/
#include "config.h"
#include "libglnx.h"
#include "ostree-gpg-verifier.h"
#include "ot-gpg-utils.h"
#include "ostree-gpg-verify-result-private.h"
#include "otutil.h"
#include <stdlib.h>
#include <glib/gstdio.h>
typedef struct {
GObjectClass parent_class;
} OstreeGpgVerifierClass;
struct OstreeGpgVerifier {
GObject parent;
GList *keyrings;
GPtrArray *keyring_data;
GPtrArray *key_ascii_files;
};
G_DEFINE_TYPE (OstreeGpgVerifier, _ostree_gpg_verifier, G_TYPE_OBJECT)
static void
ostree_gpg_verifier_finalize (GObject *object)
{
OstreeGpgVerifier *self = OSTREE_GPG_VERIFIER (object);
g_list_free_full (self->keyrings, g_object_unref);
if (self->key_ascii_files)
g_ptr_array_unref (self->key_ascii_files);
g_clear_pointer (&self->keyring_data, (GDestroyNotify)g_ptr_array_unref);
G_OBJECT_CLASS (_ostree_gpg_verifier_parent_class)->finalize (object);
}
static void
_ostree_gpg_verifier_class_init (OstreeGpgVerifierClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = ostree_gpg_verifier_finalize;
/* Initialize GPGME */
gpgme_check_version (NULL);
}
static void
_ostree_gpg_verifier_init (OstreeGpgVerifier *self)
{
self->keyring_data = g_ptr_array_new_with_free_func ((GDestroyNotify)g_bytes_unref);
}
static void
verify_result_finalized_cb (gpointer data,
GObject *finalized_verify_result)
{
g_autofree gchar *tmp_dir = data; /* assume ownership */
/* XXX OstreeGpgVerifyResult could do this cleanup in its own
* finalize() method, but I didn't want this keyring hack
* bleeding into multiple classes. */
(void) glnx_shutil_rm_rf_at (AT_FDCWD, tmp_dir, NULL, NULL);
}
OstreeGpgVerifyResult *
_ostree_gpg_verifier_check_signature (OstreeGpgVerifier *self,
GBytes *signed_data,
GBytes *signatures,
GCancellable *cancellable,
GError **error)
{
GLNX_AUTO_PREFIX_ERROR("GPG", error);
gpgme_error_t gpg_error = 0;
g_auto(gpgme_data_t) data_buffer = NULL;
g_auto(gpgme_data_t) signature_buffer = NULL;
g_autofree char *tmp_dir = NULL;
g_autoptr(GOutputStream) target_stream = NULL;
OstreeGpgVerifyResult *result = NULL;
gboolean success = FALSE;
GList *link;
int armor;
/* GPGME has no API for using multiple keyrings (aka, gpg --keyring),
* so we concatenate all the keyring files into one pubring.gpg in a
* temporary directory, then tell GPGME to use that directory as the
* home directory. */
if (g_cancellable_set_error_if_cancelled (cancellable, error))
goto out;
result = g_initable_new (OSTREE_TYPE_GPG_VERIFY_RESULT,
cancellable, error, NULL);
if (result == NULL)
goto out;
if (!ot_gpgme_ctx_tmp_home_dir (result->context,
&tmp_dir, &target_stream,
cancellable, error))
goto out;
for (link = self->keyrings; link != NULL; link = link->next)
{
g_autoptr(GFileInputStream) source_stream = NULL;
GFile *keyring_file = link->data;
gssize bytes_written;
GError *local_error = NULL;
source_stream = g_file_read (keyring_file, cancellable, &local_error);
/* Disregard non-existent keyrings. */
if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND))
{
g_clear_error (&local_error);
continue;
}
else if (local_error != NULL)
{
g_propagate_error (error, local_error);
goto out;
}
bytes_written = g_output_stream_splice (target_stream,
G_INPUT_STREAM (source_stream),
G_OUTPUT_STREAM_SPLICE_CLOSE_SOURCE,
cancellable, error);
if (bytes_written < 0)
goto out;
}
for (guint i = 0; i < self->keyring_data->len; i++)
{
GBytes *keyringd = self->keyring_data->pdata[i];
gsize len;
gsize bytes_written;
const guint8 *buf = g_bytes_get_data (keyringd, &len);
if (!g_output_stream_write_all (target_stream, buf, len, &bytes_written,
cancellable, error))
goto out;
}
if (!g_output_stream_close (target_stream, cancellable, error))
goto out;
/* Save the previous armor value - we need it on for importing ASCII keys */
armor = gpgme_get_armor (result->context);
gpgme_set_armor (result->context, 1);
/* Now, use the API to import ASCII-armored keys */
if (self->key_ascii_files)
{
for (guint i = 0; i < self->key_ascii_files->len; i++)
{
const char *path = self->key_ascii_files->pdata[i];
glnx_fd_close int fd = -1;
g_auto(gpgme_data_t) kdata = NULL;
if (!glnx_openat_rdonly (AT_FDCWD, path, TRUE, &fd, error))
goto out;
gpg_error = gpgme_data_new_from_fd (&kdata, fd);
if (gpg_error != GPG_ERR_NO_ERROR)
{
ot_gpgme_error_to_gio_error (gpg_error, error);
goto out;
}
gpg_error = gpgme_op_import (result->context, kdata);
if (gpg_error != GPG_ERR_NO_ERROR)
{
ot_gpgme_error_to_gio_error (gpg_error, error);
goto out;
}
}
}
gpgme_set_armor (result->context, armor);
/* Both the signed data and signature GBytes instances will outlive the
* gpgme_data_t structs, so we can safely reuse the GBytes memory buffer
* directly and avoid a copy. */
gpg_error = gpgme_data_new_from_mem (&data_buffer,
g_bytes_get_data (signed_data, NULL),
g_bytes_get_size (signed_data),
0 /* do not copy */);
if (gpg_error != GPG_ERR_NO_ERROR)
{
ot_gpgme_error_to_gio_error (gpg_error, error);
g_prefix_error (error, "Unable to read signed data: ");
goto out;
}
gpg_error = gpgme_data_new_from_mem (&signature_buffer,
g_bytes_get_data (signatures, NULL),
g_bytes_get_size (signatures),
0 /* do not copy */);
if (gpg_error != GPG_ERR_NO_ERROR)
{
ot_gpgme_error_to_gio_error (gpg_error, error);
g_prefix_error (error, "Unable to read signature: ");
goto out;
}
gpg_error = gpgme_op_verify (result->context, signature_buffer, data_buffer, NULL);
if (gpg_error != GPG_ERR_NO_ERROR)
{
ot_gpgme_error_to_gio_error (gpg_error, error);
g_prefix_error (error, "Unable to complete signature verification: ");
goto out;
}
/* Result data is owned by the context. */
result->details = gpgme_op_verify_result (result->context);
gpgme_result_ref (result->details);
success = TRUE;
out:
if (success)
{
/* Keep the temporary directory around for the life of the result
* object so its GPGME context remains valid. It may yet have to
* extract user details from signing keys and will need to access
* the fabricated pubring.gpg keyring. */
g_object_weak_ref (G_OBJECT (result),
verify_result_finalized_cb,
g_strdup (tmp_dir));
}
else
{
/* Destroy the result object on error. */
g_clear_object (&result);
/* Try to clean up the temporary directory. */
if (tmp_dir != NULL)
(void) glnx_shutil_rm_rf_at (AT_FDCWD, tmp_dir, NULL, NULL);
}
return result;
}
/* Given @path which should contain a GPG keyring file, add it
* to the list of trusted keys.
*/
void
_ostree_gpg_verifier_add_keyring_file (OstreeGpgVerifier *self,
GFile *path)
{
g_return_if_fail (G_IS_FILE (path));
self->keyrings = g_list_append (self->keyrings, g_object_ref (path));
}
/* Given @keyring which should be the contents of a GPG keyring file, add it to
* the list of trusted keys.
*/
void
_ostree_gpg_verifier_add_keyring_data (OstreeGpgVerifier *self,
GBytes *keyring)
{
g_ptr_array_add (self->keyring_data, g_bytes_ref (keyring));
}
void
_ostree_gpg_verifier_add_key_ascii_file (OstreeGpgVerifier *self,
const char *path)
{
if (!self->key_ascii_files)
self->key_ascii_files = g_ptr_array_new_with_free_func (g_free);
g_ptr_array_add (self->key_ascii_files, g_strdup (path));
}
gboolean
_ostree_gpg_verifier_add_keyring_dir (OstreeGpgVerifier *self,
GFile *path,
GCancellable *cancellable,
GError **error)
{
return _ostree_gpg_verifier_add_keyring_dir_at (self, AT_FDCWD,
gs_file_get_path_cached (path),
cancellable, error);
}
gboolean
_ostree_gpg_verifier_add_keyring_dir_at (OstreeGpgVerifier *self,
int dfd,
const char *path,
GCancellable *cancellable,
GError **error)
{
g_auto(GLnxDirFdIterator) dfd_iter = { 0, };
if (!glnx_dirfd_iterator_init_at (dfd, path, FALSE,
&dfd_iter, error))
return FALSE;
while (TRUE)
{
struct dirent *dent;
if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dfd_iter, &dent, cancellable, error))
return FALSE;
if (dent == NULL)
break;
if (dent->d_type != DT_REG)
continue;
const char *name = dent->d_name;
/* Files with a .gpg suffix are typically keyrings except
* for trustdb.gpg, which is the GPG trust database. */
if (!g_str_has_suffix (name, ".gpg"))
continue;
if (g_str_equal (name, "trustdb.gpg"))
continue;
if (g_str_equal (name, "secring.gpg"))
continue;
glnx_fd_close int fd = -1;
if (!glnx_openat_rdonly (dfd_iter.fd, dent->d_name, TRUE, &fd, error))
return FALSE;
g_autoptr(GBytes) data = glnx_fd_readall_bytes (fd, cancellable, error);
if (!data)
return FALSE;
g_ptr_array_add (self->keyring_data, g_steal_pointer (&data));
}
return TRUE;
}
gboolean
_ostree_gpg_verifier_add_global_keyring_dir (OstreeGpgVerifier *self,
GCancellable *cancellable,
GError **error)
{
const char *global_keyring_path = g_getenv ("OSTREE_GPG_HOME");
g_autoptr(GFile) global_keyring_dir = NULL;
gboolean ret = FALSE;
g_return_val_if_fail (OSTREE_IS_GPG_VERIFIER (self), FALSE);
if (global_keyring_path == NULL)
global_keyring_path = DATADIR "/ostree/trusted.gpg.d/";
if (g_file_test (global_keyring_path, G_FILE_TEST_IS_DIR))
{
global_keyring_dir = g_file_new_for_path (global_keyring_path);
if (!_ostree_gpg_verifier_add_keyring_dir (self, global_keyring_dir,
cancellable, error))
{
g_prefix_error (error, "Reading keyring directory '%s'",
gs_file_get_path_cached (global_keyring_dir));
goto out;
}
}
ret = TRUE;
out:
return ret;
}
OstreeGpgVerifier*
_ostree_gpg_verifier_new (void)
{
return g_object_new (OSTREE_TYPE_GPG_VERIFIER, NULL);
}
| 1 | 12,094 | Nit: let's add a more descriptive prefix in line with the other ones? E.g. `Unable to import key` ? | ostreedev-ostree | c |
@@ -13,6 +13,7 @@ describe Bolt::SSH do
let(:user) { ENV['BOLT_SSH_USER'] || "vagrant" }
let(:password) { ENV['BOLT_SSH_PASSWORD'] || "vagrant" }
let(:port) { ENV['BOLT_SSH_PORT'] || 2224 }
+ let(:key) { ENV['BOLT_SSH_KEY'] || Dir[".vagrant/**/private_key"] }
let(:command) { "pwd" }
let(:ssh) { Bolt::SSH.new(hostname, port, user, password) }
let(:insecure) { { config: Bolt::Config.new(insecure: true) } } | 1 | require 'spec_helper'
require 'bolt_spec/errors'
require 'bolt_spec/files'
require 'bolt/node'
require 'bolt/node/ssh'
require 'bolt/config'
describe Bolt::SSH do
include BoltSpec::Errors
include BoltSpec::Files
let(:hostname) { ENV['BOLT_SSH_HOST'] || "localhost" }
let(:user) { ENV['BOLT_SSH_USER'] || "vagrant" }
let(:password) { ENV['BOLT_SSH_PASSWORD'] || "vagrant" }
let(:port) { ENV['BOLT_SSH_PORT'] || 2224 }
let(:command) { "pwd" }
let(:ssh) { Bolt::SSH.new(hostname, port, user, password) }
let(:insecure) { { config: Bolt::Config.new(insecure: true) } }
let(:echo_script) { <<BASH }
for var in "$@"
do
echo $var
done
BASH
def result_value(stdout = nil, stderr = nil, exit_code = 0)
{ 'stdout' => stdout || '',
'stderr' => stderr || '',
'exit_code' => exit_code }
end
context "when connecting", ssh: true do
it "performs secure host key verification by default" do
allow(Net::SSH)
.to receive(:start)
.with(anything,
anything,
hash_including(
verify_host_key: instance_of(Net::SSH::Verifiers::Secure)
))
ssh.connect
end
it "downgrades to lenient if insecure is true" do
ssh = Bolt::SSH.new(hostname, port, user, password, **insecure)
allow(Net::SSH)
.to receive(:start)
.with(anything,
anything,
hash_including(
verify_host_key: instance_of(Net::SSH::Verifiers::Lenient)
))
ssh.connect
end
it "rejects the connection if host key verification fails" do
expect_node_error(Bolt::Node::ConnectError,
'HOST_KEY_ERROR',
/Host key verification failed/) do
ssh.connect
end
end
it "raises ConnectError if authentication fails" do
ssh = Bolt::SSH.new(hostname, port, user, password, **insecure)
allow(Net::SSH)
.to receive(:start)
.and_raise(Net::SSH::AuthenticationFailed,
"Authentication failed for foo@bar.com")
expect_node_error(Bolt::Node::ConnectError,
'AUTH_ERROR',
/Authentication failed for foo@bar.com/) do
ssh.connect
end
end
it "returns Node::ConnectError if the node name can't be resolved" do
ssh = Bolt::SSH.new('totally-not-there', port)
expect_node_error(Bolt::Node::ConnectError,
'CONNECT_ERROR',
/Failed to connect to/) do
ssh.connect
end
end
it "returns Node::ConnectError if the connection is refused" do
ssh = Bolt::SSH.new(hostname, 65535, user, password)
expect_node_error(Bolt::Node::ConnectError,
'CONNECT_ERROR',
/Failed to connect to/) do
ssh.connect
end
end
it "returns Node::ConnectError if the connection times out" do
allow(Net::SSH)
.to receive(:start)
.and_raise(Net::SSH::ConnectionTimeout)
expect_node_error(Bolt::Node::ConnectError,
'CONNECT_ERROR',
/Failed to connect to/) do
ssh.connect
end
end
end
context "when running_commands" do
let(:ssh) { Bolt::SSH.new(hostname, port, user, password, **insecure) }
before(:each) { ssh.connect }
after(:each) { ssh.disconnect }
it "executes a command on a host", ssh: true do
expect(ssh._run_command(command).value).to eq(result_value("/home/#{user}\n"))
end
it "captures stderr from a host", ssh: true do
expect(ssh._run_command("ssh -V").value['stderr']).to match(/OpenSSH/)
end
it "can upload a file to a host", ssh: true do
contents = "kljhdfg"
with_tempfile_containing('upload-test', contents) do |file|
ssh.upload(file.path, "/home/#{user}/upload-test")
expect(
ssh._run_command("cat /home/#{user}/upload-test").stdout
).to eq(contents)
ssh.execute("rm /home/#{user}/upload-test")
end
end
it "can run a script remotely", ssh: true do
contents = "#!/bin/sh\necho hellote"
with_tempfile_containing('script test', contents) do |file|
expect(
ssh._run_script(file.path, []).stdout
).to eq("hellote\n")
end
end
it "can run a script remotely with quoted arguments", ssh: true do
with_tempfile_containing('script-test-ssh-quotes', echo_script) do |file|
expect(
ssh._run_script(
file.path,
['nospaces',
'with spaces',
"\"double double\"",
"'double single'",
'\'single single\'',
'"single double"',
"double \"double\" double",
"double 'single' double",
'single "double" single',
'single \'single\' single']
).stdout
).to eq(<<QUOTED)
nospaces
with spaces
"double double"
'double single'
'single single'
"single double"
double "double" double
double 'single' double
single "double" single
single 'single' single
QUOTED
end
end
it "escapes unsafe shellwords in arguments", ssh: true do
with_tempfile_containing('script-test-ssh-escape', echo_script) do |file|
expect(
ssh._run_script(
file.path,
['echo $HOME; cat /etc/passwd']
).stdout
).to eq(<<SHELLWORDS)
echo $HOME; cat /etc/passwd
SHELLWORDS
end
end
it "can run a task", ssh: true do
contents = "#!/bin/sh\necho -n ${PT_message_one} ${PT_message_two}"
arguments = { message_one: 'Hello from task', message_two: 'Goodbye' }
with_tempfile_containing('tasks test', contents) do |file|
expect(ssh._run_task(file.path, 'environment', arguments).message)
.to eq('Hello from task Goodbye')
end
end
it "can run a task passing input on stdin", ssh: true do
contents = "#!/bin/sh\ngrep 'message_one'"
arguments = { message_one: 'Hello from task', message_two: 'Goodbye' }
with_tempfile_containing('tasks test stdin', contents) do |file|
expect(ssh._run_task(file.path, 'stdin', arguments).value)
.to eq("message_one" => "Hello from task", "message_two" => "Goodbye")
end
end
it "can run a task passing input on stdin and environment", ssh: true do
contents = <<SHELL
#!/bin/sh
echo -n ${PT_message_one} ${PT_message_two}
grep 'message_one'
SHELL
arguments = { message_one: 'Hello from task', message_two: 'Goodbye' }
with_tempfile_containing('tasks-test-both', contents) do |file|
expect(ssh._run_task(file.path, 'both', arguments).message).to eq(<<SHELL)
Hello from task Goodbye{\"message_one\":\
\"Hello from task\",\"message_two\":\"Goodbye\"}
SHELL
end
end
context "when it can't upload a file" do
before(:each) do
expect(ssh).to receive(:write_remote_file).and_raise(
Bolt::Node::FileError.new("no write", "WRITE_ERROR")
)
end
it 'returns an error result for _upload', ssh: true do
contents = "kljhdfg"
with_tempfile_containing('upload-test', contents) do |file|
expect(ssh.upload(file.path, "/home/#{user}/upload-test").error['msg']).to eq('no write')
end
end
it 'returns an error result for _run_command', ssh: true do
contents = "#!/bin/sh\necho hellote"
with_tempfile_containing('script test', contents) do |file|
expect(
ssh._run_script(file.path, []).error['msg']
).to eq("no write")
end
end
it 'returns an error result for _run_task', ssh: true do
contents = "#!/bin/sh\necho -n ${PT_message_one} ${PT_message_two}"
arguments = { message_one: 'Hello from task', message_two: 'Goodbye' }
with_tempfile_containing('tasks test', contents) do |file|
expect(ssh._run_task(file.path, 'environment', arguments).error['msg']).to eq("no write")
end
end
end
context "when it can't create a tempfile" do
before(:each) do
expect(ssh).to receive(:make_tempdir).and_raise(
Bolt::Node::FileError.new("no tmpdir", "TEMDIR_ERROR")
)
end
it 'errors when it tries to run a script', ssh: true do
contents = "#!/bin/sh\necho hellote"
with_tempfile_containing('script test', contents) do |file|
expect(
ssh._run_script(file.path, []).error['msg']
).to eq("no tmpdir")
end
end
it "can run a task", ssh: true do
contents = "#!/bin/sh\necho -n ${PT_message_one} ${PT_message_two}"
arguments = { message_one: 'Hello from task', message_two: 'Goodbye' }
with_tempfile_containing('tasks test', contents) do |file|
expect(ssh._run_task(file.path, 'environment', arguments).error['msg']).to eq("no tmpdir")
end
end
end
end
context "with sudo" do
let(:config) {
Bolt::Config.new(insecure: true, sudo: true,
sudo_password: password, run_as: 'root')
}
let(:ssh) { Bolt::SSH.new(hostname, port, user, password, config: config) }
before(:each) { ssh.connect }
after(:each) { ssh.disconnect }
it "can execute a command", ssh: true do
expect(ssh._run_command('whoami').stdout).to eq("root\n")
end
it "can run a task passing input on stdin", ssh: true do
contents = "#!/bin/sh\ngrep 'message_one'"
arguments = { message_one: 'Hello from task', message_two: 'Goodbye' }
with_tempfile_containing('tasks test stdin', contents) do |file|
expect(ssh._run_task(file.path, 'stdin', arguments).value)
.to eq("message_one" => "Hello from task", "message_two" => "Goodbye")
end
end
context "requesting a pty" do
let(:config) {
Bolt::Config.new(insecure: true, sudo: true, sudo_password: password,
run_as: 'root', tty: true)
}
it "can execute a command when a tty is requested", ssh: true do
expect(ssh._run_command('whoami').stdout).to eq("\r\nroot\r\n")
end
end
end
end
| 1 | 7,054 | This looks like it will pass an array as `:key` in the default case. That doesn't reflect how this will actually work in practice. | puppetlabs-bolt | rb |
@@ -0,0 +1,11 @@
+#if NETFRAMEWORK
+namespace Datadog.Trace.IntegrationTests
+{
+ public static class Program
+ {
+ public static void Main()
+ {
+ }
+ }
+}
+#endif | 1 | 1 | 19,359 | Do we need this? | DataDog-dd-trace-dotnet | .cs |
|
@@ -282,6 +282,11 @@ func (b *ofFlowBuilder) MatchConjID(value uint32) FlowBuilder {
return b
}
+func (b *ofFlowBuilder) MatchPriority(priority uint16) FlowBuilder {
+ b.Match.Priority = priority
+ return b
+}
+
// MatchProtocol adds match condition for matching protocol type.
func (b *ofFlowBuilder) MatchProtocol(protocol Protocol) FlowBuilder {
switch protocol { | 1 | package openflow
import (
"fmt"
"net"
"strings"
"github.com/contiv/libOpenflow/openflow13"
"github.com/contiv/ofnet/ofctrl"
)
type ofFlowBuilder struct {
ofFlow
}
func (b *ofFlowBuilder) MatchTunMetadata(index int, data uint32) FlowBuilder {
rng := openflow13.NewNXRange(0, 31)
tm := &ofctrl.NXTunMetadata{
ID: index,
Data: data,
Range: rng,
}
b.ofFlow.Match.TunMetadatas = append(b.ofFlow.Match.TunMetadatas, tm)
return b
}
func (b *ofFlowBuilder) SetHardTimeout(timout uint16) FlowBuilder {
b.ofFlow.HardTimeout = timout
return b
}
func (b *ofFlowBuilder) SetIdleTimeout(timeout uint16) FlowBuilder {
b.ofFlow.IdleTimeout = timeout
return b
}
func (b *ofFlowBuilder) Done() Flow {
if b.ctStates != nil {
b.Flow.Match.CtStates = b.ctStates
b.ctStates = nil
}
if b.ctStateString != "" {
b.matchers = append(b.matchers, b.ctStateString)
b.ctStateString = ""
}
return &b.ofFlow
}
// MatchReg adds match condition for matching data in the target register.
func (b *ofFlowBuilder) MatchReg(regID int, data uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("reg%d=0x%x", regID, data))
reg := &ofctrl.NXRegister{
ID: regID,
Data: data,
}
b.Match.NxRegs = append(b.Match.NxRegs, reg)
return b
}
// MatchRegRange adds match condition for matching data in the target register at specified range.
func (b *ofFlowBuilder) MatchRegRange(regID int, data uint32, rng Range) FlowBuilder {
if rng[0] > 0 {
data <<= rng[0]
}
reg := &ofctrl.NXRegister{
ID: regID,
Data: data,
Range: rng.ToNXRange(),
}
b.Match.NxRegs = append(b.Match.NxRegs, reg)
return b
}
func (b *ofFlowBuilder) addCTStateString(value string) {
if b.ctStateString == "" {
b.ctStateString = fmt.Sprintf("ct_state=%s", value)
} else {
b.ctStateString += value
}
}
func (b *ofFlowBuilder) MatchCTStateNew(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetNew()
b.addCTStateString("+new")
} else {
b.ctStates.UnsetNew()
b.addCTStateString("-trk")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateRel(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetRel()
b.addCTStateString("+rel")
} else {
b.ctStates.UnsetRel()
b.addCTStateString("-rel")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateRpl(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetRpl()
b.addCTStateString("+rpl")
} else {
b.ctStates.UnsetRpl()
b.addCTStateString("-rpl")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateEst(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetEst()
b.addCTStateString("+est")
} else {
b.ctStates.UnsetEst()
b.addCTStateString("-est")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateTrk(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetTrk()
b.addCTStateString("+trk")
} else {
b.ctStates.UnsetTrk()
b.addCTStateString("-trk")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateInv(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetInv()
b.addCTStateString("+inv")
} else {
b.ctStates.UnsetInv()
b.addCTStateString("-inv")
}
return b
}
// MatchCTMark adds match condition for matching ct_mark.
func (b *ofFlowBuilder) MatchCTMark(value uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("ct_mark=%d", value))
b.ofFlow.Match.CtMark = value
return b
}
// MatchCTMarkMask sets the mask of ct_mark. The mask is used only if ct_mark is set.
func (b *ofFlowBuilder) MatchCTMarkMask(mask uint32) FlowBuilder {
if b.Flow.Match.CtMark > 0 {
b.ofFlow.Match.CtMarkMask = &mask
for i, data := range b.matchers {
if strings.HasPrefix(data, "ct_mark=") {
b.matchers[i] = fmt.Sprintf("%s/0x%x", data, mask)
break
}
}
}
return b
}
// MatchInPort adds match condition for matching in_port.
func (b *ofFlowBuilder) MatchInPort(inPort uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("in_port=%d", inPort))
b.Match.InputPort = inPort
return b
}
// MatchDstIP adds match condition for matching destination IP address.
func (b *ofFlowBuilder) MatchDstIP(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_dst=%s", ip.String()))
b.Match.IpDa = &ip
return b
}
// MatchDstIPNet adds match condition for matching destination IP CIDR.
func (b *ofFlowBuilder) MatchDstIPNet(ipnet net.IPNet) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_dst=%s", ipnet.String()))
b.Match.IpDa = &ipnet.IP
b.Match.IpDaMask = maskToIPv4(ipnet.Mask)
return b
}
func maskToIPv4(mask net.IPMask) *net.IP {
ip := net.IPv4(mask[0], mask[1], mask[2], mask[3])
return &ip
}
// MatchSrcIP adds match condition for matching source IP address.
func (b *ofFlowBuilder) MatchSrcIP(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_src=%s", ip.String()))
b.Match.IpSa = &ip
return b
}
// MatchSrcIPNet adds match condition for matching source IP CIDR.
func (b *ofFlowBuilder) MatchSrcIPNet(ipnet net.IPNet) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_src=%s", ipnet.String()))
b.Match.IpSa = &ipnet.IP
b.Match.IpSaMask = maskToIPv4(ipnet.Mask)
return b
}
// MatchDstMAC adds match condition for matching destination MAC address.
func (b *ofFlowBuilder) MatchDstMAC(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("dl_dst=%s", mac.String()))
b.Match.MacDa = &mac
return b
}
// MatchSrcMAC adds match condition for matching source MAC address.
func (b *ofFlowBuilder) MatchSrcMAC(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("dl_src=%s", mac.String()))
b.Match.MacSa = &mac
return b
}
// MatchARPSha adds match condition for matching ARP source host address.
func (b *ofFlowBuilder) MatchARPSha(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_sha=%s", mac.String()))
b.Match.ArpSha = &mac
return b
}
// MatchARPTha adds match condition for matching ARP target host address.
func (b *ofFlowBuilder) MatchARPTha(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_tha=%s", mac.String()))
b.Match.ArpTha = &mac
return b
}
// MatchARPSpa adds match condition for matching ARP source protocol address.
func (b *ofFlowBuilder) MatchARPSpa(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_spa=%s", ip.String()))
b.Match.ArpSpa = &ip
return b
}
// MatchARPTpa adds match condition for matching ARP target protocol address.
func (b *ofFlowBuilder) MatchARPTpa(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_tpa=%s", ip.String()))
b.Match.ArpTpa = &ip
return b
}
// MatchARPOp adds match condition for matching ARP operator.
func (b *ofFlowBuilder) MatchARPOp(op uint16) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_op=%d", op))
b.Match.ArpOper = op
return b
}
// MatchConjID adds match condition for matching conj_id.
func (b *ofFlowBuilder) MatchConjID(value uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("conj_id=%d", value))
b.Match.ConjunctionID = &value
return b
}
// MatchProtocol adds match condition for matching protocol type.
func (b *ofFlowBuilder) MatchProtocol(protocol Protocol) FlowBuilder {
switch protocol {
case ProtocolIP:
b.Match.Ethertype = 0x0800
case ProtocolARP:
b.Match.Ethertype = 0x0806
case ProtocolTCP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 6
case ProtocolUDP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 17
case ProtocolSCTP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 132
case ProtocolICMP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 1
}
b.protocol = protocol
return b
}
// MatchTCPDstPort adds match condition for matching TCP destination port.
func (b *ofFlowBuilder) MatchTCPDstPort(port uint16) FlowBuilder {
b.MatchProtocol(ProtocolTCP)
b.Match.TcpDstPort = port
// According to ovs-ofctl(8) man page, "tp_dst" is deprecated and "tcp_dst",
// "udp_dst", "sctp_dst" should be used for the destination port of TCP, UDP,
// SCTP respectively. However, OVS command line tools like ovs-ofctl and
// ovs-appctl still print flows with "tp_dst", so we also use "tp_dst" in flow
// matching string, as flow matching string can be used to look up matched
// flows from these tools' outputs.
b.matchers = append(b.matchers, fmt.Sprintf("tp_dst=%d", port))
return b
}
// MatchUDPDstPort adds match condition for matching UDP destination port.
func (b *ofFlowBuilder) MatchUDPDstPort(port uint16) FlowBuilder {
b.MatchProtocol(ProtocolUDP)
b.Match.UdpDstPort = port
b.matchers = append(b.matchers, fmt.Sprintf("tp_dst=%d", port))
return b
}
// MatchSCTPDstPort adds match condition for matching SCTP destination port.
func (b *ofFlowBuilder) MatchSCTPDstPort(port uint16) FlowBuilder {
b.MatchProtocol(ProtocolSCTP)
b.Match.SctpDstPort = port
b.matchers = append(b.matchers, fmt.Sprintf("tp_dst=%d", port))
return b
}
// MatchCTSrcIP matches the source IPv4 address of the connection tracker original direction tuple. This match requires
// a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches include
// "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTSrcIP(ip net.IP) FlowBuilder {
b.Match.CtIpSa = &ip
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_src=%s", ip.String()))
return b
}
// MatchCTSrcIPNet is the same as MatchCTSrcIP but supports IP masking.
func (b *ofFlowBuilder) MatchCTSrcIPNet(ipNet net.IPNet) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_dst=%s", ipNet.String()))
b.Match.CtIpSa = &ipNet.IP
b.Match.CtIpSaMask = maskToIPv4(ipNet.Mask)
return b
}
// MatchCTDstIP matches the destination IPv4 address of the connection tracker original direction tuple. This match
// requires a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches
// include "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTDstIP(ip net.IP) FlowBuilder {
b.Match.CtIpDa = &ip
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_dst=%s", ip.String()))
return b
}
// MatchCTDstIPNet is the same as MatchCTDstIP but supports IP masking.
func (b *ofFlowBuilder) MatchCTDstIPNet(ipNet net.IPNet) FlowBuilder {
b.Match.CtIpDa = &ipNet.IP
b.Match.CtIpDaMask = maskToIPv4(ipNet.Mask)
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_dst=%s", ipNet.String()))
return b
}
// MatchCTSrcPort matches the transport source port of the connection tracker original direction tuple. This match requires
// a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches include
// "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTSrcPort(port uint16) FlowBuilder {
b.Match.CtTpSrcPort = port
b.matchers = append(b.matchers, fmt.Sprintf("ct_tp_src=%d", port))
return b
}
// MatchCTDstPort matches the transport destination port of the connection tracker original direction tuple. This match
// requires a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches
// include "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTDstPort(port uint16) FlowBuilder {
b.Match.CtTpDstPort = port
b.matchers = append(b.matchers, fmt.Sprintf("ct_tp_dst=%d", port))
return b
}
// MatchCTProtocol matches the IP protocol type of the connection tracker original direction tuple. This match requires
// a match to valid connection tracking state as a prerequisite, and a valid connection tracking state matches include
// "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTProtocol(proto Protocol) FlowBuilder {
switch proto {
case ProtocolTCP:
b.Match.CtIpProto = 6
case ProtocolUDP:
b.Match.CtIpProto = 17
case ProtocolSCTP:
b.Match.CtIpProto = 132
case ProtocolICMP:
b.Match.CtIpProto = 1
}
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_proto=%d", b.Match.CtIpProto))
return b
}
// Cookie sets cookie ID for the flow entry.
func (b *ofFlowBuilder) Cookie(cookieID uint64) FlowBuilder {
b.Flow.CookieID = cookieID
return b
}
// CookieMask sets cookie mask for the flow entry.
func (b *ofFlowBuilder) CookieMask(cookieMask uint64) FlowBuilder {
b.Flow.CookieMask = cookieMask
return b
}
func (b *ofFlowBuilder) Action() Action {
return &ofFlowAction{b}
}
| 1 | 18,904 | I am a bit confused about this function. If it is used to set priority, we actually use function "Table.BuildFlow(priority uint16)" to set the value. If it is used to parse the priority value, I think adding a property(if you don't want to consume Match.Priority directly) to cache it and consume that property should be simpler than using string regex. | antrea-io-antrea | go |
@@ -68,7 +68,7 @@ func main() {
}
// addUnresolvedPackage adds an unresolved node to the graph representing the
-// packged described in the PackgetVer structure. Returns an error if the node
+// packaged described in the PackgetVer structure. Returns an error if the node
// could not be created.
func addUnresolvedPackage(g *pkggraph.PkgGraph, pkgVer *pkgjson.PackageVer) (newRunNode *pkggraph.PkgNode, err error) {
logger.Log.Debugf("Adding unresolved %s", pkgVer) | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package main
import (
"fmt"
"os"
"strings"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/topo"
"gopkg.in/alecthomas/kingpin.v2"
"microsoft.com/pkggen/internal/exe"
"microsoft.com/pkggen/internal/logger"
"microsoft.com/pkggen/internal/pkggraph"
"microsoft.com/pkggen/internal/pkgjson"
)
var (
app = kingpin.New("grapher", "Dependency graph generation tool")
input = exe.InputFlag(app, "Input json listing all local SRPMs")
output = exe.OutputFlag(app, "Output file to export the graph to")
logFile = exe.LogFileFlag(app)
logLevel = exe.LogLevelFlag(app)
strictGoals = app.Flag("strict-goals", "Don't allow missing goal packages").Bool()
strictUnresolved = app.Flag("strict-unresolved", "Don't allow missing unresolved packages").Bool()
depGraph = pkggraph.NewPkgGraph()
)
func main() {
app.Version(exe.ToolkitVersion)
kingpin.MustParse(app.Parse(os.Args[1:]))
var err error
logger.InitBestEffort(*logFile, *logLevel)
localPackages := pkgjson.PackageRepo{}
err = localPackages.ParsePackageJSON(*input)
if err != nil {
logger.Log.Panic(err)
}
err = populateGraph(depGraph, &localPackages)
if err != nil {
logger.Log.Panic(err)
}
err = validateGraph(depGraph)
if err != nil {
logger.Log.Panic(err)
}
// Add a default "ALL" goal to build everything local
_, err = depGraph.AddGoalNode("ALL", nil, *strictGoals)
if err != nil {
logger.Log.Panic(err)
}
err = pkggraph.WriteDOTGraphFile(depGraph, *output)
if err != nil {
logger.Log.Panic(err)
}
logger.Log.Info("Finished generating graph.")
}
// addUnresolvedPackage adds an unresolved node to the graph representing the
// packged described in the PackgetVer structure. Returns an error if the node
// could not be created.
func addUnresolvedPackage(g *pkggraph.PkgGraph, pkgVer *pkgjson.PackageVer) (newRunNode *pkggraph.PkgNode, err error) {
logger.Log.Debugf("Adding unresolved %s", pkgVer)
if *strictUnresolved {
err = fmt.Errorf("strict-unresolved does not allow unresolved packages, attempting to add %s", pkgVer)
return
}
nodes, err := g.FindBestPkgNode(pkgVer)
if err != nil {
return
}
if nodes != nil {
err = fmt.Errorf(`attempted to mark a local package "%+v" as unresolved`, pkgVer)
return
}
// Create a new node
newRunNode, err = g.AddPkgNode(pkgVer, pkggraph.StateUnresolved, pkggraph.TypeRemote, "<NO_SRPM_PATH>", "<NO_SPEC_PATH>", "<NO_SOURCE_PATH>", "<NO_ARCHITECTURE>", "<NO_REPO>")
if err != nil {
return
}
logger.Log.Infof("Adding unresolved node %s\n", newRunNode.FriendlyName())
return
}
// addNodesForPackage creates a "Run" and "Build" node for the package described
// in the PackageVer structure. Returns pointers to the build and run Nodes
// created, or an error if one of the nodes could not be created.
func addNodesForPackage(g *pkggraph.PkgGraph, pkgVer *pkgjson.PackageVer, pkg *pkgjson.Package) (newRunNode *pkggraph.PkgNode, newBuildNode *pkggraph.PkgNode, err error) {
nodes, err := g.FindExactPkgNodeFromPkg(pkgVer)
if err != nil {
return
}
if nodes != nil {
logger.Log.Warnf(`Duplicate package name for package %+v read from SRPM "%s" (Previous: %+v)`, pkgVer, pkg.SrpmPath, nodes.RunNode)
err = nil
if nodes.RunNode != nil {
newRunNode = nodes.RunNode
}
if nodes.BuildNode != nil {
newBuildNode = nodes.BuildNode
}
}
if newRunNode == nil {
// Add "Run" node
newRunNode, err = g.AddPkgNode(pkgVer, pkggraph.StateMeta, pkggraph.TypeRun, pkg.SrpmPath, pkg.SpecPath, pkg.SourceDir, pkg.Architecture, "<LOCAL>")
logger.Log.Debugf("Adding run node %s with id %d\n", newRunNode.FriendlyName(), newRunNode.ID())
if err != nil {
return
}
}
if newBuildNode == nil {
// Add "Build" node
newBuildNode, err = g.AddPkgNode(pkgVer, pkggraph.StateBuild, pkggraph.TypeBuild, pkg.SrpmPath, pkg.SpecPath, pkg.SourceDir, pkg.Architecture, "<LOCAL>")
logger.Log.Debugf("Adding build node %s with id %d\n", newBuildNode.FriendlyName(), newBuildNode.ID())
if err != nil {
return
}
}
// A "run" node has an implicit dependency on its coresponding "build" node, encode that here.
// SetEdge panics on error, and does not support looping edges.
newEdge := g.NewEdge(newRunNode, newBuildNode)
defer func() {
if r := recover(); r != nil {
logger.Log.Panicf("Adding edge failed for %+v", pkgVer)
}
}()
g.SetEdge(newEdge)
return
}
// addSingleDependency will add an edge between packageNode and the "Run" node for the
// dependency described in the PackageVer structure. Returns an error if the
// addition failed.
func addSingleDependency(g *pkggraph.PkgGraph, packageNode *pkggraph.PkgNode, dependency *pkgjson.PackageVer) error {
var dependentNode *pkggraph.PkgNode
logger.Log.Tracef("Adding a dependency from %+v to %+v", packageNode.VersionedPkg, dependency)
nodes, err := g.FindBestPkgNode(dependency)
if err != nil {
logger.Log.Errorf("Unable to check lookup list for %+v (%s)", dependency, err)
return err
}
if nodes == nil {
dependentNode, err = addUnresolvedPackage(g, dependency)
if err != nil {
logger.Log.Errorf(`Could not add a package "%s"`, dependency.Name)
return err
}
} else {
// All dependencies are assumed to be "Run" dependencies
dependentNode = nodes.RunNode
}
// SetEdge panics on error, and does not support looping edges.
newEdge := g.NewEdge(packageNode, dependentNode)
defer func() {
if r := recover(); r != nil {
logger.Log.Errorf("Failed to add edge failed between %+v and %+v", packageNode, dependency)
}
}()
if newEdge.To() == newEdge.From() {
logger.Log.Warnf("Package %+v requires itself!", packageNode)
} else {
g.SetEdge(newEdge)
}
return err
}
// addLocalPackage adds the package provided by the Package structure, and
// updates the SRPM path name
func addLocalPackage(g *pkggraph.PkgGraph, pkg *pkgjson.Package) error {
_, _, err := addNodesForPackage(g, pkg.Provides, pkg)
return err
}
// addDependencies adds edges for both build and runtime requirements for the
// package described in the Package structure. Returns an error if the edges
// could not be created.
func addPkgDependencies(g *pkggraph.PkgGraph, pkg *pkgjson.Package) (dependenciesAdded int, err error) {
provide := pkg.Provides
runDependencies := pkg.Requires
buildDependencies := pkg.BuildRequires
// Find the current node in the lookup list.
logger.Log.Debugf("Adding dependencies for package %s", pkg.SrpmPath)
nodes, err := g.FindExactPkgNodeFromPkg(provide)
if err != nil {
return
}
if nodes == nil {
return dependenciesAdded, fmt.Errorf("can't add dependencies to a missing package %+v", pkg)
}
runNode := nodes.RunNode
buildNode := nodes.BuildNode
// For each run time and build time dependency, add the edges
logger.Log.Tracef("Adding run dependencies")
for _, dependency := range runDependencies {
err = addSingleDependency(g, runNode, dependency)
if err != nil {
logger.Log.Errorf("Unable to add run-time dependencies for %+v", pkg)
return
}
dependenciesAdded++
}
logger.Log.Tracef("Adding build dependencies")
for _, dependency := range buildDependencies {
err = addSingleDependency(g, buildNode, dependency)
if err != nil {
logger.Log.Errorf("Unable to add build-time dependencies for %+v", pkg)
return
}
dependenciesAdded++
}
return
}
// populateGraph adds all the data contained in the PackageRepo structure into
// the graph.
func populateGraph(g *pkggraph.PkgGraph, repo *pkgjson.PackageRepo) (err error) {
packages := repo.Repo
// Scan and add each package we know about
logger.Log.Infof("Adding all packages from %s", *input)
// NOTE: range iterates by value, not reference. Manually access slice
for idx := range packages {
pkg := packages[idx]
err = addLocalPackage(g, pkg)
if err != nil {
logger.Log.Errorf("Failed to add local package %+v", pkg)
return err
}
}
logger.Log.Infof("\tAdded %d packages", len(packages))
// Rescan and add all the dependencies
logger.Log.Infof("Adding all dependencies from %s", *input)
dependenciesAdded := 0
for idx := range packages {
pkg := packages[idx]
num, err := addPkgDependencies(g, pkg)
if err != nil {
logger.Log.Errorf("Failed to add dependency %+v", pkg)
return err
}
dependenciesAdded += num
}
logger.Log.Infof("\tAdded %d dependencies", dependenciesAdded)
return err
}
// fixCycle attempts to fix a cycle. Cycles may be acceptable if all nodes are from the same spec file.
// If a cycle can be fixed an additional meta node will be added to represent the interdependencies of the cycle.
func fixCycle(g *pkggraph.PkgGraph, cycle []*pkggraph.PkgNode) (err error) {
specFile := cycle[0].SrpmPath
// Omit the first element of the cycle, since it is repeated as the last element
trimmedCycle := cycle[1:]
logger.Log.Debugf("Found cycle starting at %s", cycle[0].FriendlyName())
// For each node, remove any edges which point to other nodes in the cycle, and move any remaining dependencies to a new
// meta node, then have everything in the cycle depend on the new meta node.
groupedDependencies := make(map[int64]bool)
for _, currentNode := range trimmedCycle {
logger.Log.Tracef("\tCycle node: %s", currentNode.FriendlyName())
if currentNode.Type == pkggraph.TypeBuild {
return fmt.Errorf("cycle contains build dependencies, unresolvable")
}
// Remove all links to other members of the cycle
for _, nodeInCycle := range trimmedCycle {
g.RemoveEdge(currentNode.ID(), nodeInCycle.ID())
}
// Record any other dependencies the nodes have (ie, where can we get to from here), then remove them
fromNodes := graph.NodesOf(g.From(currentNode.ID()))
for _, from := range fromNodes {
groupedDependencies[from.ID()] = true
g.RemoveEdge(currentNode.ID(), from.ID())
}
}
// Convert the IDs back into actual nodes
dependencyNodes := make([]*pkggraph.PkgNode, 0, len(groupedDependencies))
for id := range groupedDependencies {
dependencyNodes = append(dependencyNodes, g.Node(id).(*pkggraph.PkgNode).This)
}
metaNode := g.AddMetaNode(trimmedCycle, dependencyNodes)
// Enable cycle detection between meta nodes within the same spec file
metaNode.SrpmPath = specFile
return
}
// validateGraph makes sure the graph is a directed acyclic graph (DAG)
func validateGraph(g *pkggraph.PkgGraph) (err error) {
cycles := topo.DirectedCyclesIn(g)
// Try to fix the cycles if we can before reporting them
// Keep track of which cycles we've failed to fix
unfixableCycleCount := 0
for len(cycles) > 0 {
for _, cycle := range cycles {
// Convert our list to pkggraph.PkgNodes
pkgCycle := make([]*pkggraph.PkgNode, 0, len(cycle))
for _, node := range cycle {
pkgCycle = append(pkgCycle, node.(*pkggraph.PkgNode).This)
}
err = fixCycle(g, pkgCycle)
if err != nil {
var cycleStringBuilder strings.Builder
logger.Log.Errorf("Error found while resolving package dependency cycles: %v", err)
fmt.Fprintf(&cycleStringBuilder, "{%s}", pkgCycle[0].FriendlyName())
for _, node := range pkgCycle[1:] {
fmt.Fprintf(&cycleStringBuilder, " --> {%s}", node.FriendlyName())
}
logger.Log.Errorf("Unfixable circular dependency of length %d:\t%s", unfixableCycleCount, cycleStringBuilder.String())
unfixableCycleCount++
}
}
if unfixableCycleCount > 0 {
err = fmt.Errorf("cycles detected in dependency graph")
return err
}
// Recalculate the list of cycles
cycles = topo.DirectedCyclesIn(g)
}
return
}
| 1 | 15,157 | In this case we probably meant "package". | microsoft-CBL-Mariner | go |
@@ -0,0 +1,19 @@
+const fs = require('fs');
+const { promisify } = require('util');
+const { dirname: getDirName } = require('path');
+const makeDir = require('make-dir');
+const writeFile = promisify(fs.writeFile);
+
+/**
+ * Create file with given contents at specified location
+ * @method createFile
+ * @param {String} path file path, inclusive of file name
+ * @param {String} content contents of the file
+ * @returns {Promise}
+ */
+const createFile = (path, content) =>
+ makeDir(getDirName(path)).then(() => writeFile(path, content));
+
+module.exports = {
+ createFile
+}; | 1 | 1 | 13,742 | Just export `createFile` directly. | dequelabs-axe-core | js |
|
@@ -175,6 +175,7 @@ func DefaultConfiguration() *Configuration {
config.Docker.ResultsTimeout = cli.Duration(20 * time.Second)
config.Docker.RemoveTimeout = cli.Duration(20 * time.Second)
config.Go.GoTool = "go"
+ config.Go.BuildIdTool = "//tools/go_buildid_replacer"
config.Go.CgoCCTool = "gcc"
config.Go.GoPath = "$TMP_DIR:$TMP_DIR/src:$TMP_DIR/$PKG_DIR:$TMP_DIR/third_party/go:$TMP_DIR/third_party/"
config.Python.PipTool = "pip3" | 1 | // Utilities for reading the Please config files.
package core
import (
"crypto/sha1"
"encoding/gob"
"fmt"
"os"
"path"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/jessevdk/go-flags"
"gopkg.in/gcfg.v1"
"cli"
)
// OsArch is the os/arch pair, like linux_amd64 etc.
const OsArch = runtime.GOOS + "_" + runtime.GOARCH
// ConfigFileName is the file name for the typical repo config - this is normally checked in
const ConfigFileName string = ".plzconfig"
// ArchConfigFileName is the architecture-specific config file which overrides the repo one.
// Also normally checked in if needed.
const ArchConfigFileName string = ".plzconfig_" + OsArch
// LocalConfigFileName is the file name for the local repo config - this is not normally checked
// in and used to override settings on the local machine.
const LocalConfigFileName string = ".plzconfig.local"
// MachineConfigFileName is the file name for the machine-level config - can use this to override
// things for a particular machine (eg. build machine with different caching behaviour).
const MachineConfigFileName = "/etc/plzconfig"
// UserConfigFileName is the file name for user-specific config (for all their repos).
const UserConfigFileName = "~/.please/plzconfig"
// The available container implementations that we support.
const (
ContainerImplementationNone = "none"
ContainerImplementationDocker = "docker"
)
func readConfigFile(config *Configuration, filename string) error {
log.Debug("Reading config from %s...", filename)
if err := gcfg.ReadFileInto(config, filename); err != nil && os.IsNotExist(err) {
return nil // It's not an error to not have the file at all.
} else if gcfg.FatalOnly(err) != nil {
return err
} else if err != nil {
log.Warning("Error in config file: %s", err)
}
return nil
}
// ReadConfigFiles reads all the config locations, in order, and merges them into a config object.
// Values are filled in by defaults initially and then overridden by each file in turn.
func ReadConfigFiles(filenames []string, profile string) (*Configuration, error) {
config := DefaultConfiguration()
for _, filename := range filenames {
if err := readConfigFile(config, filename); err != nil {
return config, err
}
if profile != "" {
if err := readConfigFile(config, filename+"."+profile); err != nil {
return config, err
}
}
}
// Set default values for slices. These add rather than overwriting so we can't set
// them upfront as we would with other config values.
if usingBazelWorkspace {
setDefault(&config.Parse.BuildFileName, []string{"BUILD.bazel", "BUILD"})
} else {
setDefault(&config.Parse.BuildFileName, []string{"BUILD"})
}
setDefault(&config.Build.Path, []string{"/usr/local/bin", "/usr/bin", "/bin"})
setDefault(&config.Build.PassEnv, []string{})
setDefault(&config.Cover.FileExtension, []string{".go", ".py", ".java", ".js", ".cc", ".h", ".c"})
setDefault(&config.Cover.ExcludeExtension, []string{".pb.go", "_pb2.py", ".pb.cc", ".pb.h", "_test.py", "_test.go", "_pb.go", "_bindata.go", "_test_main.cc"})
setDefault(&config.Proto.Language, []string{"cc", "py", "java", "go", "js"})
// Default values for these guys depend on config.Please.Location.
defaultPath(&config.Go.TestTool, config.Please.Location, "please_go_test")
defaultPath(&config.Go.FilterTool, config.Please.Location, "please_go_filter")
defaultPath(&config.Python.PexTool, config.Please.Location, "please_pex")
defaultPath(&config.Java.JavacWorker, config.Please.Location, "javac_worker")
defaultPath(&config.Java.JarCatTool, config.Please.Location, "jarcat")
defaultPath(&config.Java.PleaseMavenTool, config.Please.Location, "please_maven")
defaultPath(&config.Java.JUnitRunner, config.Please.Location, "junit_runner.jar")
// Default values for these guys depend on config.Java.JavaHome if that's been set.
if config.Java.JavaHome != "" {
defaultPathIfExists(&config.Java.JlinkTool, config.Java.JavaHome, "bin/jlink")
}
if (config.Cache.RPCPrivateKey == "") != (config.Cache.RPCPublicKey == "") {
return config, fmt.Errorf("Must pass both rpcprivatekey and rpcpublickey properties for cache")
}
// We can only verify options by reflection (we need struct tags) so run them quickly through this.
return config, config.ApplyOverrides(map[string]string{
"test.defaultcontainer": config.Test.DefaultContainer,
"python.testrunner": config.Python.TestRunner,
})
}
// setDefault sets a slice of strings in the config if the set one is empty.
func setDefault(conf *[]string, def []string) {
if len(*conf) == 0 {
*conf = def
}
}
// defaultPath sets a variable to a location in a directory if it's not already set.
func defaultPath(conf *string, dir, file string) {
if *conf == "" {
*conf = path.Join(dir, file)
}
}
// defaultPathIfExists sets a variable to a location in a directory if it's not already set and if the location exists.
func defaultPathIfExists(conf *string, dir, file string) {
if *conf == "" {
location := path.Join(dir, file)
// check that the location is valid
if _, err := os.Stat(location); err == nil {
*conf = location
}
}
}
// DefaultConfiguration returns the default configuration object with no overrides.
func DefaultConfiguration() *Configuration {
config := Configuration{buildEnvStored: &storedBuildEnv{}}
config.Please.Location = "~/.please"
config.Please.SelfUpdate = true
config.Please.Autoclean = true
config.Please.DownloadLocation = "https://get.please.build"
config.Please.NumOldVersions = 10
config.Build.Arch = cli.NewArch(runtime.GOOS, runtime.GOARCH)
config.Build.Lang = "en_GB.UTF-8" // Not the language of the UI, the language passed to rules.
config.Build.Nonce = "1402" // Arbitrary nonce to invalidate config when needed.
config.Build.Timeout = cli.Duration(10 * time.Minute)
config.Build.Config = "opt" // Optimised builds by default
config.Build.FallbackConfig = "opt" // Optimised builds as a fallback on any target that doesn't have a matching one set
config.Build.PleaseSandboxTool = "please_sandbox"
config.BuildConfig = map[string]string{}
config.BuildEnv = map[string]string{}
config.Aliases = map[string]string{}
config.Cache.HTTPTimeout = cli.Duration(5 * time.Second)
config.Cache.RPCTimeout = cli.Duration(5 * time.Second)
config.Cache.Dir = ".plz-cache"
config.Cache.DirCacheHighWaterMark = 10 * cli.GiByte
config.Cache.DirCacheLowWaterMark = 8 * cli.GiByte
config.Cache.DirClean = true
config.Cache.Workers = runtime.NumCPU() + 2 // Mirrors the number of workers in please.go.
config.Cache.RPCMaxMsgSize.UnmarshalFlag("200MiB")
config.Metrics.PushFrequency = cli.Duration(400 * time.Millisecond)
config.Metrics.PushTimeout = cli.Duration(500 * time.Millisecond)
config.Metrics.PerUser = true
config.Test.Timeout = cli.Duration(10 * time.Minute)
config.Test.DefaultContainer = ContainerImplementationDocker
config.Docker.DefaultImage = "ubuntu:trusty"
config.Docker.AllowLocalFallback = false
config.Docker.Timeout = cli.Duration(20 * time.Minute)
config.Docker.ResultsTimeout = cli.Duration(20 * time.Second)
config.Docker.RemoveTimeout = cli.Duration(20 * time.Second)
config.Go.GoTool = "go"
config.Go.CgoCCTool = "gcc"
config.Go.GoPath = "$TMP_DIR:$TMP_DIR/src:$TMP_DIR/$PKG_DIR:$TMP_DIR/third_party/go:$TMP_DIR/third_party/"
config.Python.PipTool = "pip3"
config.Python.DefaultInterpreter = "python3"
config.Python.TestRunner = "unittest"
config.Python.UsePyPI = true
// Annoyingly pip on OSX doesn't seem to work with this flag (you get the dreaded
// "must supply either home or prefix/exec-prefix" error). Goodness knows why *adding* this
// flag - which otherwise seems exactly what we want - provokes that error, but the logic
// of pip is rather a mystery to me.
if runtime.GOOS != "darwin" {
config.Python.PipFlags = "--isolated"
}
config.Java.DefaultTestPackage = ""
config.Java.SourceLevel = "8"
config.Java.TargetLevel = "8"
config.Java.ReleaseLevel = ""
config.Java.DefaultMavenRepo = []cli.URL{"https://repo1.maven.org/maven2"}
config.Java.JavacFlags = "-Werror -Xlint:-options" // bootstrap class path warnings are pervasive without this.
config.Java.JlinkTool = "jlink"
config.Java.JavaHome = ""
config.Cpp.CCTool = "gcc"
config.Cpp.CppTool = "g++"
config.Cpp.LdTool = "ld"
config.Cpp.ArTool = "ar"
config.Cpp.AsmTool = "nasm"
config.Cpp.DefaultOptCflags = "--std=c99 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCflags = "--std=c99 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.DefaultOptCppflags = "--std=c++11 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCppflags = "--std=c++11 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.Coverage = true
config.Proto.ProtocTool = "protoc"
// We're using the most common names for these; typically gRPC installs the builtin plugins
// as grpc_python_plugin etc.
config.Proto.ProtocGoPlugin = "protoc-gen-go"
config.Proto.GrpcPythonPlugin = "grpc_python_plugin"
config.Proto.GrpcJavaPlugin = "protoc-gen-grpc-java"
config.Proto.GrpcCCPlugin = "grpc_cpp_plugin"
config.Proto.PythonDep = "//third_party/python:protobuf"
config.Proto.JavaDep = "//third_party/java:protobuf"
config.Proto.GoDep = "//third_party/go:protobuf"
config.Proto.JsDep = ""
config.Proto.PythonGrpcDep = "//third_party/python:grpc"
config.Proto.JavaGrpcDep = "//third_party/java:grpc-all"
config.Proto.GoGrpcDep = "//third_party/go:grpc"
config.Bazel.Compatibility = usingBazelWorkspace
return &config
}
// A Configuration contains all the settings that can be configured about Please.
// This is parsed from .plzconfig etc; we also auto-generate help messages from its tags.
type Configuration struct {
Please struct {
Version cli.Version `help:"Defines the version of plz that this repo is supposed to use currently. If it's not present or the version matches the currently running version no special action is taken; otherwise if SelfUpdate is set Please will attempt to download an appropriate version, otherwise it will issue a warning and continue.\n\nNote that if this is not set, you can run plz update to update to the latest version available on the server." var:"PLZ_VERSION"`
Location string `help:"Defines the directory Please is installed into.\nDefaults to ~/.please but you might want it to be somewhere else if you're installing via another method (e.g. the debs and install script still use /opt/please)."`
SelfUpdate bool `help:"Sets whether plz will attempt to update itself when the version set in the config file is different."`
DownloadLocation cli.URL `help:"Defines the location to download Please from when self-updating. Defaults to the Please web server, but you can point it to some location of your own if you prefer to keep traffic within your network or use home-grown versions."`
NumOldVersions int `help:"Number of old versions to keep from autoupdates."`
Autoclean bool `help:"Automatically clean stale versions without prompting"`
NumThreads int `help:"Number of parallel build operations to run.\nIs overridden by the equivalent command-line flag, if that's passed." example:"6"`
Motd []string `help:"Message of the day; is displayed once at the top during builds. If multiple are given, one is randomly chosen."`
DefaultRepo string `help:"Location of the default repository; this is used if plz is invoked when not inside a repo, it changes to that directory then does its thing."`
} `help:"The [please] section in the config contains non-language-specific settings defining how Please should operate."`
Parse struct {
ExperimentalDir []string `help:"Directory containing experimental code. This is subject to some extra restrictions:\n - Code in the experimental dir can override normal visibility constraints\n - Code outside the experimental dir can never depend on code inside it\n - Tests are excluded from general detection." example:"experimental"`
BuildFileName []string `help:"Sets the names that Please uses instead of BUILD for its build files.\nFor clarity the documentation refers to them simply as BUILD files but you could reconfigure them here to be something else.\nOne case this can be particularly useful is in cases where you have a subdirectory named build on a case-insensitive file system like HFS+."`
BlacklistDirs []string `help:"Directories to blacklist when recursively searching for BUILD files (e.g. when using plz build ... or similar).\nThis is generally useful when you have large directories within your repo that don't need to be searched, especially things like node_modules that have come from external package managers."`
PreloadBuildDefs []string `help:"Files to preload by the parser before loading any BUILD files.\nSince this is done before the first package is parsed they must be files in the repository, they cannot be subinclude() paths." example:"build_defs/go_bindata.build_defs"`
} `help:"The [parse] section in the config contains settings specific to parsing files."`
Display struct {
UpdateTitle bool `help:"Updates the title bar of the shell window Please is running in as the build progresses. This isn't on by default because not everyone's shell is configured to reset it again after and we don't want to alter it forever."`
SystemStats bool `help:"Whether or not to show basic system resource usage in the interactive display. Has no effect without that configured."`
} `help:"Please has an animated display mode which shows the currently building targets.\nBy default it will autodetect whether it is using an interactive TTY session and choose whether to use it or not, although you can force it on or off via flags.\n\nThe display is heavily inspired by Buck's SuperConsole."`
Events struct {
Port int `help:"Port to start the streaming build event server on."`
} `help:"The [events] section in the config contains settings relating to the internal build event system & streaming them externally."`
Build struct {
Arch cli.Arch `help:"Architecture to compile for. Defaults to the host architecture."`
Timeout cli.Duration `help:"Default timeout for Dockerised tests, in seconds. Default is twenty minutes."`
Path []string `help:"The PATH variable that will be passed to the build processes.\nDefaults to /usr/local/bin:/usr/bin:/bin but of course can be modified if you need to get binaries from other locations." example:"/usr/local/bin:/usr/bin:/bin"`
Config string `help:"The build config to use when one is not chosen on the command line. Defaults to opt." example:"opt | dbg"`
FallbackConfig string `help:"The build config to use when one is chosen and a required target does not have one by the same name. Also defaults to opt." example:"opt | dbg"`
Lang string `help:"Sets the language passed to build rules when building. This can be important for some tools (although hopefully not many) - we've mostly observed it with Sass."`
Sandbox bool `help:"True to sandbox individual build actions, which isolates them using namespaces. Only works on Linux and requires please_sandbox to be installed separately." var:"BUILD_SANDBOX"`
PleaseSandboxTool string `help:"The location of the please_sandbox tool to use."`
Nonce string `help:"This is an arbitrary string that is added to the hash of every build target. It provides a way to force a rebuild of everything when it's changed.\nWe will bump the default of this whenever we think it's required - although it's been a pretty long time now and we hope that'll continue."`
PassEnv []string `help:"A list of environment variables to pass from the current environment to build rules. For example\n\nPassEnv = HTTP_PROXY\n\nwould copy your HTTP_PROXY environment variable to the build env for any rules."`
}
BuildConfig map[string]string `help:"A section of arbitrary key-value properties that are made available in the BUILD language. These are often useful for writing custom rules that need some configurable property.\n\n[buildconfig]\nandroid-tools-version = 23.0.2\n\nFor example, the above can be accessed as CONFIG.ANDROID_TOOLS_VERSION."`
BuildEnv map[string]string `help:"A set of extra environment variables to define for build rules. For example:\n\n[buildenv]\nsecret-passphrase = 12345\n\nThis would become SECRET_PASSPHRASE for any rules. These can be useful for passing secrets into custom rules; any variables containing SECRET or PASSWORD won't be logged.\n\nIt's also useful if you'd like internal tools to honour some external variable."`
Cache struct {
Workers int `help:"Number of workers for uploading artifacts to remote caches, which is done asynchronously."`
Dir string `help:"Sets the directory to use for the dir cache.\nThe default is .plz-cache, if set to the empty string the dir cache will be disabled."`
DirCacheHighWaterMark cli.ByteSize `help:"Starts cleaning the directory cache when it is over this number of bytes.\nCan also be given with human-readable suffixes like 10G, 200MB etc."`
DirCacheLowWaterMark cli.ByteSize `help:"When cleaning the directory cache, it's reduced to at most this size."`
DirClean bool `help:"Controls whether entries in the dir cache are cleaned or not. If disabled the cache will only grow."`
DirCompress bool `help:"Compresses stored artifacts in the dir cache. They are slower to store & retrieve but more compact."`
HTTPURL cli.URL `help:"Base URL of the HTTP cache.\nNot set to anything by default which means the cache will be disabled."`
HTTPWriteable bool `help:"If True this plz instance will write content back to the HTTP cache.\nBy default it runs in read-only mode."`
HTTPTimeout cli.Duration `help:"Timeout for operations contacting the HTTP cache, in seconds."`
RPCURL cli.URL `help:"Base URL of the RPC cache.\nNot set to anything by default which means the cache will be disabled."`
RPCWriteable bool `help:"If True this plz instance will write content back to the RPC cache.\nBy default it runs in read-only mode."`
RPCTimeout cli.Duration `help:"Timeout for operations contacting the RPC cache, in seconds."`
RPCPublicKey string `help:"File containing a PEM-encoded private key which is used to authenticate to the RPC cache." example:"my_key.pem"`
RPCPrivateKey string `help:"File containing a PEM-encoded certificate which is used to authenticate to the RPC cache." example:"my_cert.pem"`
RPCCACert string `help:"File containing a PEM-encoded certificate which is used to validate the RPC cache's certificate." example:"ca.pem"`
RPCSecure bool `help:"Forces SSL on for the RPC cache. It will be activated if any of rpcpublickey, rpcprivatekey or rpccacert are set, but this can be used if none of those are needed and SSL is still in use."`
RPCMaxMsgSize cli.ByteSize `help:"Maximum size of a single message that we'll send to the RPC server.\nThis should agree with the server's limit, if it's higher the artifacts will be rejected.\nThe value is given as a byte size so can be suffixed with M, GB, KiB, etc."`
} `help:"Please has several built-in caches that can be configured in its config file.\n\nThe simplest one is the directory cache which by default is written into the .plz-cache directory. This allows for fast retrieval of code that has been built before (for example, when swapping Git branches).\n\nThere is also a remote RPC cache which allows using a centralised server to store artifacts. A typical pattern here is to have your CI system write artifacts into it and give developers read-only access so they can reuse its work.\n\nFinally there's a HTTP cache which is very similar, but a little obsolete now since the RPC cache outperforms it and has some extra features. Otherwise the two have similar semantics and share quite a bit of implementation.\n\nPlease has server implementations for both the RPC and HTTP caches."`
Metrics struct {
PushGatewayURL cli.URL `help:"The URL of the pushgateway to send metrics to."`
PushFrequency cli.Duration `help:"The frequency, in milliseconds, to push statistics at." example:"400ms"`
PushTimeout cli.Duration `help:"Timeout on pushes to the metrics repository." example:"500ms"`
PerTest bool `help:"Emit per-test duration metrics. Off by default because they generate increased load on Prometheus."`
PerUser bool `help:"Emit per-user metrics. On by default for compatibility, but will generate more load on Prometheus."`
} `help:"A section of options relating to reporting metrics. Currently only pushing metrics to a Prometheus pushgateway is supported, which is enabled by the pushgatewayurl setting."`
CustomMetricLabels map[string]string `help:"Allows defining custom labels to be applied to metrics. The key is the name of the label, and the value is a command to be run, the output of which becomes the label's value. For example, to attach the current Git branch to all metrics:\n\n[custommetriclabels]\nbranch = git rev-parse --abbrev-ref HEAD\n\nBe careful when defining new labels, it is quite possible to overwhelm the metric collector by creating metric sets with too high cardinality."`
Test struct {
Timeout cli.Duration `help:"Default timeout applied to all tests. Can be overridden on a per-rule basis."`
DefaultContainer string `help:"Sets the default type of containerisation to use for tests that are given container = True.\nCurrently the only available option is 'docker', we expect to add support for more engines in future." options:"none,docker"`
Sandbox bool `help:"True to sandbox individual tests, which isolates them using namespaces. Somewhat experimental, only works on Linux and requires please_sandbox to be installed separately." var:"TEST_SANDBOX"`
}
Cover struct {
FileExtension []string `help:"Extensions of files to consider for coverage.\nDefaults to a reasonably obvious set for the builtin rules including .go, .py, .java, etc."`
ExcludeExtension []string `help:"Extensions of files to exclude from coverage.\nTypically this is for generated code; the default is to exclude protobuf extensions like .pb.go, _pb2.py, etc."`
}
Docker struct {
DefaultImage string `help:"The default image used for any test that doesn't specify another."`
AllowLocalFallback bool `help:"If True, will attempt to run the test locally if containerised running fails."`
Timeout cli.Duration `help:"Default timeout for containerised tests. Can be overridden on a per-rule basis."`
ResultsTimeout cli.Duration `help:"Timeout to wait when trying to retrieve results from inside the container. Default is 20 seconds."`
RemoveTimeout cli.Duration `help:"Timeout to wait when trying to remove a container after running a test. Defaults to 20 seconds."`
} `help:"Please supports running individual tests within Docker containers for isolation. This is useful for tests that mutate some global state (such as an embedded database, or open a server on a particular port). To do so, simply mark a test rule with container = True."`
Gc struct {
Keep []BuildLabel `help:"Marks targets that gc should always keep. Can include meta-targets such as //test/... and //docs:all."`
KeepLabel []string `help:"Defines a target label to be kept; for example, if you set this to go, no Go targets would ever be considered for deletion." example:"go"`
} `help:"Please supports a form of 'garbage collection', by which it means identifying targets that are not used for anything. By default binary targets and all their transitive dependencies are always considered non-garbage, as are any tests directly on those. The config options here allow tweaking this behaviour to retain more things.\n\nNote that it's a very good idea that your BUILD files are in the standard format when running this."`
Go struct {
GoTool string `help:"The binary to use to invoke Go & its subtools with." var:"GO_TOOL"`
GoRoot string `help:"If set, will set the GOROOT environment variable appropriately during build actions."`
TestTool string `help:"Sets the location of the please_go_test tool that is used to template the test main for go_test rules." var:"GO_TEST_TOOL"`
GoPath string `help:"If set, will set the GOPATH environment variable appropriately during build actions." var:"GOPATH"`
ImportPath string `help:"Sets the default Go import path at the root of this repository.\nFor example, in the Please repo, we might set it to github.com/thought-machine/please to allow imports from that package within the repo." var:"GO_IMPORT_PATH"`
CgoCCTool string `help:"Sets the location of CC while building cgo_library and cgo_test rules. Defaults to gcc" var:"CGO_CC_TOOL"`
FilterTool string `help:"Sets the location of the please_go_filter tool that is used to filter source files against build constraints." var:"GO_FILTER_TOOL"`
DefaultStatic bool `help:"Sets Go binaries to default to static linking. Note that enabling this may have negative consequences for some code, including Go's DNS lookup code in the net module." var:"GO_DEFAULT_STATIC"`
} `help:"Please has built-in support for compiling Go, and of course is written in Go itself.\nSee the config subfields or the Go rules themselves for more information.\n\nNote that Please is a bit more flexible than Go about directory layout - for example, it is possible to have multiple packages in a directory, but it's not a good idea to push this too far since Go's directory layout is inextricably linked with its import paths."`
Python struct {
PipTool string `help:"The tool that is invoked during pip_library rules." var:"PIP_TOOL"`
PipFlags string `help:"Additional flags to pass to pip invocations in pip_library rules." var:"PIP_FLAGS"`
PexTool string `help:"The tool that's invoked to build pexes. Defaults to please_pex in the install directory." var:"PEX_TOOL"`
DefaultInterpreter string `help:"The interpreter used for python_binary and python_test rules when none is specified on the rule itself. Defaults to python but you could of course set it to, say, pypy." var:"DEFAULT_PYTHON_INTERPRETER"`
TestRunner string `help:"The test runner used to discover & run Python tests; one of unittest, pytest or behave." var:"PYTHON_TEST_RUNNER" options:"unittest,pytest,behave"`
ModuleDir string `help:"Defines a directory containing modules from which they can be imported at the top level.\nBy default this is empty but by convention we define our pip_library rules in third_party/python and set this appropriately. Hence any of those third-party libraries that try something like import six will have it work as they expect, even though it's actually in a different location within the .pex." var:"PYTHON_MODULE_DIR"`
DefaultPipRepo cli.URL `help:"Defines a location for a pip repo to download wheels from.\nBy default pip_library uses PyPI (although see below on that) but you may well want to use this define another location to upload your own wheels to.\nIs overridden by the repo argument to pip_library." var:"PYTHON_DEFAULT_PIP_REPO"`
WheelRepo cli.URL `help:"Defines a location for a remote repo that python_wheel rules will download from. See python_wheel for more information." var:"PYTHON_WHEEL_REPO"`
UsePyPI bool `help:"Whether or not to use PyPI for pip_library rules or not. Defaults to true, if you disable this you will presumably want to set DefaultPipRepo to use one of your own.\nIs overridden by the use_pypi argument to pip_library." var:"USE_PYPI"`
WheelNameScheme string `help:"Defines a custom templatized wheel naming scheme. Templatized variables should be surrounded in curly braces, and the available options are: url_base, package_name, and version. The default search pattern is '{url_base}/{package_name}-{version}-${{OS}}-${{ARCH}}.whl' along with a few common variants." var:"PYTHON_WHEEL_NAME_SCHEME"`
} `help:"Please has built-in support for compiling Python.\nPlease's Python artifacts are pex files, which are essentially self-executable zip files containing all needed dependencies, bar the interpreter itself. This fits our aim of at least semi-static binaries for each language.\nSee https://github.com/pantsbuild/pex for more information.\nNote that due to differences between the environment inside a pex and outside some third-party code may not run unmodified (for example, it cannot simply open() files). It's possible to work around a lot of this, but if it all becomes too much it's possible to mark pexes as not zip-safe which typically resolves most of it at a modest speed penalty."`
Java struct {
JavacTool string `help:"Defines the tool used for the Java compiler. Defaults to javac." var:"JAVAC_TOOL"`
JlinkTool string `help:"Defines the tool used for the Java linker. Defaults to jlink." var:"JLINK_TOOL"`
JavaHome string `help:"Defines the path of the Java Home folder." var:"JAVA_HOME"`
JavacWorker string `help:"Defines the tool used for the Java persistent compiler. This is significantly (approx 4x) faster for large Java trees than invoking javac separately each time. Default to javac_worker in the install directory, but can be switched off to fall back to javactool and separate invocation." var:"JAVAC_WORKER"`
JarCatTool string `help:"Defines the tool used to concatenate .jar files which we use to build the output of java_binary, java_test and various other rules. Defaults to jarcat in the Please install directory." var:"JARCAT_TOOL"`
PleaseMavenTool string `help:"Defines the tool used to fetch information from Maven in maven_jars rules.\nDefaults to please_maven in the Please install directory." var:"PLEASE_MAVEN_TOOL"`
JUnitRunner string `help:"Defines the .jar containing the JUnit runner. This is built into all java_test rules since it's necessary to make JUnit do anything useful.\nDefaults to junit_runner.jar in the Please install directory." var:"JUNIT_RUNNER"`
DefaultTestPackage string `help:"The Java classpath to search for functions annotated with @Test. If not specified the compiled sources will be searched for files named *Test.java." var:"DEFAULT_TEST_PACKAGE"`
ReleaseLevel string `help:"The default Java release level when compiling.\nSourceLevel and TargetLevel are ignored if this is set. Bear in mind that this flag is only supported in Java version 9+." var:"JAVA_RELEASE_LEVEL"`
SourceLevel string `help:"The default Java source level when compiling. Defaults to 8." var:"JAVA_SOURCE_LEVEL"`
TargetLevel string `help:"The default Java bytecode level to target. Defaults to 8." var:"JAVA_TARGET_LEVEL"`
JavacFlags string `help:"Additional flags to pass to javac when compiling libraries." example:"-Xmx1200M" var:"JAVAC_FLAGS"`
JavacTestFlags string `help:"Additional flags to pass to javac when compiling tests." example:"-Xmx1200M" var:"JAVAC_TEST_FLAGS"`
DefaultMavenRepo []cli.URL `help:"Default location to load artifacts from in maven_jar rules. Can be overridden on a per-rule basis." var:"DEFAULT_MAVEN_REPO"`
} `help:"Please has built-in support for compiling Java.\nIt builds uber-jars for binary and test rules which contain all dependencies and can be easily deployed, and with the help of some of Please's additional tools they are deterministic as well.\n\nWe've only tested support for Java 7 and 8, although it's likely newer versions will work with little or no change."`
Cpp struct {
CCTool string `help:"The tool invoked to compile C code. Defaults to gcc but you might want to set it to clang, for example." var:"CC_TOOL"`
CppTool string `help:"The tool invoked to compile C++ code. Defaults to g++ but you might want to set it to clang++, for example." var:"CPP_TOOL"`
LdTool string `help:"The tool invoked to link object files. Defaults to ld but you could also set it to gold, for example." var:"LD_TOOL"`
ArTool string `help:"The tool invoked to archive static libraries. Defaults to ar." var:"AR_TOOL"`
AsmTool string `help:"The tool invoked as an assembler. Currently only used on OSX for cc_embed_binary rules and so defaults to nasm." var:"ASM_TOOL"`
LinkWithLdTool bool `help:"If true, instructs Please to use the tool set earlier in ldtool to link binaries instead of cctool.\nThis is an esoteric setting that most people don't want; a vanilla ld will not perform all steps necessary here (you'll get lots of missing symbol messages from having no libc etc). Generally best to leave this disabled unless you have very specific requirements." var:"LINK_WITH_LD_TOOL"`
DefaultOptCflags string `help:"Compiler flags passed to all C rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c99 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CFLAGS"`
DefaultDbgCflags string `help:"Compiler rules passed to all C rules during dbg builds.\nDefaults to --std=c99 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CFLAGS"`
DefaultOptCppflags string `help:"Compiler flags passed to all C++ rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c++11 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CPPFLAGS"`
DefaultDbgCppflags string `help:"Compiler rules passed to all C++ rules during dbg builds.\nDefaults to --std=c++11 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CPPFLAGS"`
DefaultLdflags string `help:"Linker flags passed to all C++ rules.\nBy default this is empty." var:"DEFAULT_LDFLAGS"`
DefaultNamespace string `help:"Namespace passed to all cc_embed_binary rules when not overridden by the namespace argument to that rule.\nNot set by default, if you want to use those rules you'll need to set it or pass it explicitly to each one." var:"DEFAULT_NAMESPACE"`
PkgConfigPath string `help:"Custom PKG_CONFIG_PATH for pkg-config.\nBy default this is empty." var:"PKG_CONFIG_PATH"`
Coverage bool `help:"If true (the default), coverage will be available for C and C++ build rules.\nThis is still a little experimental but should work for GCC. Right now it does not work for Clang (it likely will in Clang 4.0 which will likely support --fprofile-dir) and so this can be useful to disable it.\nIt's also useful in some cases for CI systems etc if you'd prefer to avoid the overhead, since the tests have to be compiled with extra instrumentation and without optimisation." var:"CPP_COVERAGE"`
} `help:"Please has built-in support for compiling C and C++ code. We don't support every possible nuance of compilation for these languages, but aim to provide something fairly straightforward.\nTypically there is little problem compiling & linking against system libraries although Please has no insight into those libraries and when they change, so cannot rebuild targets appropriately.\n\nThe C and C++ rules are very similar and simply take a different set of tools and flags to facilitate side-by-side usage."`
Proto struct {
ProtocTool string `help:"The binary invoked to compile .proto files. Defaults to protoc." var:"PROTOC_TOOL"`
ProtocGoPlugin string `help:"The binary passed to protoc as a plugin to generate Go code. Defaults to protoc-gen-go.\nWe've found this easier to manage with a go_get rule instead though, so you can also pass a build label here. See the Please repo for an example." var:"PROTOC_GO_PLUGIN"`
GrpcPythonPlugin string `help:"The plugin invoked to compile Python code for grpc_library.\nDefaults to protoc-gen-grpc-python." var:"GRPC_PYTHON_PLUGIN"`
GrpcJavaPlugin string `help:"The plugin invoked to compile Java code for grpc_library.\nDefaults to protoc-gen-grpc-java." var:"GRPC_JAVA_PLUGIN"`
GrpcCCPlugin string `help:"The plugin invoked to compile C++ code for grpc_library.\nDefaults to grpc_cpp_plugin." var:"GRPC_CC_PLUGIN"`
Language []string `help:"Sets the default set of languages that proto rules are built for.\nChosen from the set of {cc, java, go, py}.\nDefaults to all of them!" var:"PROTO_LANGUAGES"`
PythonDep string `help:"An in-repo dependency that's applied to any Python proto libraries." var:"PROTO_PYTHON_DEP"`
JavaDep string `help:"An in-repo dependency that's applied to any Java proto libraries." var:"PROTO_JAVA_DEP"`
GoDep string `help:"An in-repo dependency that's applied to any Go proto libraries." var:"PROTO_GO_DEP"`
JsDep string `help:"An in-repo dependency that's applied to any Javascript proto libraries." var:"PROTO_JS_DEP"`
PythonGrpcDep string `help:"An in-repo dependency that's applied to any Python gRPC libraries." var:"GRPC_PYTHON_DEP"`
JavaGrpcDep string `help:"An in-repo dependency that's applied to any Java gRPC libraries." var:"GRPC_JAVA_DEP"`
GoGrpcDep string `help:"An in-repo dependency that's applied to any Go gRPC libraries." var:"GRPC_GO_DEP"`
} `help:"Please has built-in support for compiling protocol buffers, which are a form of codegen to define common data types which can be serialised and communicated between different languages.\nSee https://developers.google.com/protocol-buffers/ for more information.\n\nThere is also support for gRPC, which is an implementation of protobuf's RPC framework. See http://www.grpc.io/ for more information.\n\nNote that you must have the protocol buffers compiler (and gRPC plugins, if needed) installed on your machine to make use of these rules."`
Licences struct {
Accept []string `help:"Licences that are accepted in this repository.\nWhen this is empty licences are ignored. As soon as it's set any licence detected or assigned must be accepted explicitly here.\nThere's no fuzzy matching, so some package managers (especially PyPI and Maven, but shockingly not npm which rather nicely uses SPDX) will generate a lot of slightly different spellings of the same thing, which will all have to be accepted here. We'd rather that than trying to 'cleverly' match them which might result in matching the wrong thing."`
Reject []string `help:"Licences that are explicitly rejected in this repository.\nAn astute observer will notice that this is not very different to just not adding it to the accept section, but it does have the advantage of explicitly documenting things that the team aren't allowed to use."`
} `help:"Please has some limited support for declaring acceptable licences and detecting them from some libraries. You should not rely on this for complete licence compliance, but it can be a useful check to try to ensure that unacceptable licences do not slip in."`
Aliases map[string]string `help:"It is possible to define aliases for new commands in your .plzconfig file. These are essentially string-string replacements of the command line, for example 'deploy = run //tools:deployer --' makes 'plz deploy' run a particular tool."`
Bazel struct {
Compatibility bool `help:"Activates limited Bazel compatibility mode. When this is active several rule arguments are available under different names (e.g. compiler_flags -> copts etc), the WORKSPACE file is interpreted, Makefile-style replacements like $< and $@ are made in genrule commands, etc.\nNote that Skylark is not generally supported and many aspects of compatibility are fairly superficial; it's unlikely this will work for complex setups of either tool." var:"BAZEL_COMPATIBILITY"`
} `help:"Bazel is an open-sourced version of Google's internal build tool. Please draws a lot of inspiration from the original tool although the two have now diverged in various ways.\nNonetheless, if you've used Bazel, you will likely find Please familiar."`
// buildEnvStored is a cached form of BuildEnv.
buildEnvStored *storedBuildEnv
}
type storedBuildEnv struct {
Env []string
Once sync.Once
}
// Hash returns a hash of the parts of this configuration that affect building targets in general.
// Most parts are considered not to (e.g. cache settings) or affect specific targets (e.g. changing
// tool paths which get accounted for on the targets that use them).
func (config *Configuration) Hash() []byte {
h := sha1.New()
// These fields are the ones that need to be in the general hash; other things will be
// picked up by relevant rules (particularly tool paths etc).
// Note that container settings are handled separately.
for _, f := range config.Parse.BuildFileName {
h.Write([]byte(f))
}
h.Write([]byte(config.Build.Lang))
h.Write([]byte(config.Build.Nonce))
for _, l := range config.Licences.Reject {
h.Write([]byte(l))
}
for _, env := range config.GetBuildEnv() {
h.Write([]byte(env))
}
return h.Sum(nil)
}
// ContainerisationHash returns the hash of the containerisation part of the config.
func (config *Configuration) ContainerisationHash() []byte {
h := sha1.New()
encoder := gob.NewEncoder(h)
if err := encoder.Encode(config.Docker); err != nil {
panic(err)
}
return h.Sum(nil)
}
// GetBuildEnv returns the build environment configured for this config object.
func (config *Configuration) GetBuildEnv() []string {
config.buildEnvStored.Once.Do(func() {
env := []string{
// Need to know these for certain rules.
"ARCH=" + config.Build.Arch.Arch,
"OS=" + config.Build.Arch.OS,
// These are slightly modified forms that are more convenient for some things.
"XARCH=" + config.Build.Arch.XArch(),
"XOS=" + config.Build.Arch.XOS(),
// It's easier to just make these available for Go-based rules.
"GOARCH=" + config.Build.Arch.GoArch(),
"GOOS=" + config.Build.Arch.OS,
}
// from the BuildEnv config keyword
for k, v := range config.BuildEnv {
pair := strings.Replace(strings.ToUpper(k), "-", "_", -1) + "=" + v
env = append(env, pair)
}
// from the user's environment based on the PassEnv config keyword
for _, k := range config.Build.PassEnv {
if v, isSet := os.LookupEnv(k); isSet {
env = append(env, k+"="+v)
}
}
sort.Strings(env)
config.buildEnvStored.Env = env
})
return config.buildEnvStored.Env
}
// ApplyOverrides applies a set of overrides to the config.
// The keys of the given map are dot notation for the config setting.
func (config *Configuration) ApplyOverrides(overrides map[string]string) error {
match := func(s1 string) func(string) bool {
return func(s2 string) bool {
return strings.ToLower(s2) == s1
}
}
elem := reflect.ValueOf(config).Elem()
for k, v := range overrides {
split := strings.Split(strings.ToLower(k), ".")
if len(split) != 2 {
return fmt.Errorf("Bad option format: %s", k)
}
field := elem.FieldByNameFunc(match(split[0]))
if !field.IsValid() {
return fmt.Errorf("Unknown config field: %s", split[0])
} else if field.Kind() == reflect.Map {
field.SetMapIndex(reflect.ValueOf(split[1]), reflect.ValueOf(v))
continue
} else if field.Kind() != reflect.Struct {
return fmt.Errorf("Unsettable config field: %s", split[0])
}
subfield, ok := field.Type().FieldByNameFunc(match(split[1]))
if !ok {
return fmt.Errorf("Unknown config field: %s", split[1])
}
field = field.FieldByNameFunc(match(split[1]))
switch field.Kind() {
case reflect.String:
// verify this is a legit setting for this field
if options := subfield.Tag.Get("options"); options != "" {
if !cli.ContainsString(v, strings.Split(options, ",")) {
return fmt.Errorf("Invalid value %s for field %s; options are %s", v, k, options)
}
}
if field.Type().Name() == "URL" {
field.Set(reflect.ValueOf(cli.URL(v)))
} else {
field.Set(reflect.ValueOf(v))
}
case reflect.Bool:
v = strings.ToLower(v)
// Mimics the set of truthy things gcfg accepts in our config file.
field.SetBool(v == "true" || v == "yes" || v == "on" || v == "1")
case reflect.Int:
i, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("Invalid value for an integer field: %s", v)
}
field.Set(reflect.ValueOf(i))
case reflect.Int64:
var d cli.Duration
if err := d.UnmarshalText([]byte(v)); err != nil {
return fmt.Errorf("Invalid value for a duration field: %s", v)
}
field.Set(reflect.ValueOf(d))
case reflect.Slice:
// Comma-separated values are accepted.
if field.Type().Elem().Kind() == reflect.Struct {
// Assume it must be a slice of BuildLabel.
l := []BuildLabel{}
for _, s := range strings.Split(v, ",") {
l = append(l, ParseBuildLabel(s, ""))
}
field.Set(reflect.ValueOf(l))
} else if field.Type().Elem().Name() == "URL" {
urls := []cli.URL{}
for _, s := range strings.Split(v, ",") {
urls = append(urls, cli.URL(s))
}
field.Set(reflect.ValueOf(urls))
} else {
field.Set(reflect.ValueOf(strings.Split(v, ",")))
}
default:
return fmt.Errorf("Can't override config field %s (is %s)", k, field.Kind())
}
}
return nil
}
// Completions returns a list of possible completions for the given option prefix.
func (config *Configuration) Completions(prefix string) []flags.Completion {
ret := []flags.Completion{}
t := reflect.TypeOf(config).Elem()
for i := 0; i < t.NumField(); i++ {
if field := t.Field(i); field.Type.Kind() == reflect.Struct {
for j := 0; j < field.Type.NumField(); j++ {
subfield := field.Type.Field(j)
if name := strings.ToLower(field.Name + "." + subfield.Name); strings.HasPrefix(name, prefix) {
help := subfield.Tag.Get("help")
if options := subfield.Tag.Get("options"); options != "" {
for _, option := range strings.Split(options, ",") {
ret = append(ret, flags.Completion{Item: name + ":" + option, Description: help})
}
} else {
ret = append(ret, flags.Completion{Item: name + ":", Description: help})
}
}
}
}
}
return ret
}
| 1 | 8,290 | This probably needs to get shipped with plz; you shouldn't have to redefine it within the user's repo. It should use `defaultPath` like the other things up the top of the file. | thought-machine-please | go |
@@ -4,7 +4,7 @@ var run = function() {
var WalletKey = bitcore.WalletKey;
var Builder = bitcore.TransactionBuilder;
var opts = {
- network: networks.testnet
+ network: networks['btc'].testnet
};
console.log('## Network: ' + opts.network.name); | 1 | var run = function() {
bitcore = typeof(bitcore) === 'undefined' ? require('../bitcore') : bitcore;
var networks = require('../networks');
var WalletKey = bitcore.WalletKey;
var Builder = bitcore.TransactionBuilder;
var opts = {
network: networks.testnet
};
console.log('## Network: ' + opts.network.name);
var input = {};
input.addr = "n2hoFVbPrYQf7RJwiRy1tkbuPPqyhAEfbp";
input.priv = "cS62Ej4SobZnpFQYN1PEEBr2KWf5sgRYYnELtumcG6WVCfxno39V";
// Complete with the corresponding UTXO you want to use
var utxos = [{
address: input.addr,
txid: "39c71ebda371f75f4b854a720eaf9898b237facf3c2b101b58cd4383a44a6adc",
vout: 1,
ts: 1396288753,
scriptPubKey: "76a914e867aad8bd361f57c50adc37a0c018692b5b0c9a88ac",
amount: 0.4296,
confirmations: 2
}];
var privs = [
"cP6JBHuQf7yqeqtdKRd22ibF3VehDv7G6BdzxSNABgrv3jFJUGoN",
"cQfRwF7XLSM5xGUpF8PZvob2MZyULvZPA2j5cat2RKDJrja7FtCZ",
"cUkYub4jtFVYymHh38yMMW36nJB4pXG5Pzd5QjResq79kAndkJcg",
"cMyBgowsyrJRufoKWob73rMQB1PBqDdwFt8z4TJ6APN2HkmX1Ttm",
"cN9yZCom6hAZpHtCp8ovE1zFa7RqDf3Cr4W6AwH2tp59Jjh9JcXu",
];
var pubkeys = []
privs.forEach(function(p) {
var wk = new WalletKey(opts);
wk.fromObj({
priv: p
});
pubkeys.push(bitcore.buffertools.toHex(wk.privKey.public));
});
var outs = [{
nreq: 3,
pubkeys: pubkeys,
amount: 0.05
}];
var tx = new Builder(opts)
.setUnspent(utxos)
.setOutputs(outs)
.sign([input.priv])
.build();
var txHex = tx.serialize().toString('hex');
console.log('1) SEND TO MULSISIG TX: ', txHex);
console.log('[this example originally generated TXID: e4bc22d8c519d3cf848d710619f8480be56176a4a6548dfbe865ab3886b578b5 on testnet]\n\n\thttp://test.bitcore.io/tx/e4bc22d8c519d3cf848d710619f8480be56176a4a6548dfbe865ab3886b578b5\n\n');
//save scriptPubKey
var scriptPubKey = tx.outs[0].s.toString('hex');
/*
*
* REDDEEM TX
*/
var utxos2 = [{
address: input.addr,
txid: "e4bc22d8c519d3cf848d710619f8480be56176a4a6548dfbe865ab3886b578b5",
vout: 0,
ts: 1396288753,
scriptPubKey: scriptPubKey,
amount: 0.05,
confirmations: 2
}];
outs = [{
address: input.addr,
amount: 0.04
}];
var b = new Builder(opts)
.setUnspent(utxos2)
.setOutputs(outs)
.sign(privs);
tx = b.build();
var txHex = tx.serialize().toString('hex');
console.log('2) REDEEM SCRIPT: ', txHex);
console.log('=> Is signed status:', b.isFullySigned(), tx.countInputMissingSignatures(0));
console.log('[this example originally generated TXID: 1eb388977b2de99562eb0fbcc661a100eaffed99c53bfcfebe5a087002039b83 on testnet]\n\n\thttp://test.bitcore.io/tx/1eb388977b2de99562eb0fbcc661a100eaffed99c53bfcfebe5a087002039b83');
};
// This is just for browser & mocha compatibility
if (typeof module !== 'undefined') {
module.exports.run = run;
if (require.main === module) {
run();
}
} else {
run();
}
////
| 1 | 12,941 | interface for bitcoin should not change if possible. i.e: networks.testnet should return networks['btc'].testnet | bitpay-bitcore | js |
@@ -12,6 +12,7 @@ import (
"fmt"
"math/big"
"syscall"
+ "io/ioutil"
"github.com/golang/protobuf/proto"
"github.com/spf13/cobra" | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"syscall"
"github.com/golang/protobuf/proto"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/crypto/ssh/terminal"
"google.golang.org/grpc/status"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/cli/ioctl/cmd/account"
"github.com/iotexproject/iotex-core/cli/ioctl/cmd/config"
"github.com/iotexproject/iotex-core/cli/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
)
// Flags
var (
gasLimit uint64
gasPrice string
nonce uint64
signer string
bytecode []byte
)
// ActionCmd represents the account command
var ActionCmd = &cobra.Command{
Use: "action",
Short: "Manage actions of IoTeX blockchain",
}
func init() {
ActionCmd.AddCommand(actionHashCmd)
ActionCmd.AddCommand(actionTransferCmd)
ActionCmd.AddCommand(actionDeployCmd)
ActionCmd.AddCommand(actionInvokeCmd)
ActionCmd.AddCommand(actionClaimCmd)
ActionCmd.AddCommand(actionDepositCmd)
ActionCmd.PersistentFlags().StringVar(&config.ReadConfig.Endpoint, "endpoint",
config.ReadConfig.Endpoint, "set endpoint for once")
ActionCmd.PersistentFlags().BoolVar(&config.Insecure, "insecure", config.Insecure,
"insecure connection for once")
setActionFlags(actionTransferCmd, actionDeployCmd, actionInvokeCmd, actionClaimCmd,
actionDepositCmd)
}
func setActionFlags(cmds ...*cobra.Command) {
for _, cmd := range cmds {
cmd.Flags().Uint64VarP(&gasLimit, "gas-limit", "l", 0, "set gas limit")
cmd.Flags().StringVarP(&gasPrice, "gas-price", "p", "1",
"set gas price (unit: 10^(-6)Iotx)")
cmd.Flags().StringVarP(&signer, "signer", "s", "", "choose a signing account")
cmd.Flags().Uint64VarP(&nonce, "nonce", "n", 0, "set nonce")
cmd.MarkFlagRequired("signer")
if cmd == actionDeployCmd || cmd == actionInvokeCmd {
cmd.Flags().BytesHexVarP(&bytecode, "bytecode", "b", nil, "set the byte code")
cmd.MarkFlagRequired("gas-limit")
cmd.MarkFlagRequired("bytecode")
}
}
}
// GetGasPrice gets the suggest gas price
func GetGasPrice() (*big.Int, error) {
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return nil, err
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
request := &iotexapi.SuggestGasPriceRequest{}
response, err := cli.SuggestGasPrice(ctx, request)
if err != nil {
return nil, err
}
return new(big.Int).SetUint64(response.GasPrice), nil
}
func sendAction(elp action.Envelope) (string, error) {
fmt.Printf("Enter password #%s:\n", signer)
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.L().Error("failed to get password", zap.Error(err))
return "", err
}
prvKey, err := account.KsAccountToPrivateKey(signer, string(bytePassword))
if err != nil {
return "", err
}
defer prvKey.Zero()
sealed, err := action.Sign(elp, prvKey)
prvKey.Zero()
if err != nil {
log.L().Error("failed to sign action", zap.Error(err))
return "", err
}
selp := sealed.Proto()
actionInfo, err := printActionProto(selp)
if err != nil {
return "", err
}
var confirm string
fmt.Println("\n" + actionInfo + "\n" +
"Please confirm your action.\n" +
"Type 'YES' to continue, quit for anything else.")
fmt.Scanf("%s", &confirm)
if confirm != "YES" && confirm != "yes" {
return "Quit", nil
}
fmt.Println()
request := &iotexapi.SendActionRequest{Action: selp}
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return "", err
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
_, err = cli.SendAction(ctx, request)
if err != nil {
sta, ok := status.FromError(err)
if ok {
return "", fmt.Errorf(sta.Message())
}
return "", err
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
return "Action has been sent to blockchain.\n" +
"Wait for several seconds and query this action by hash:\n" +
hex.EncodeToString(shash[:]), nil
}
| 1 | 17,564 | File is not `gofmt`-ed with `-s` (from `gofmt`) | iotexproject-iotex-core | go |
@@ -929,3 +929,17 @@ void wlr_surface_send_leave(struct wlr_surface *surface,
}
}
}
+
+static inline int64_t timespec_to_msec(const struct timespec *a) {
+ return (int64_t)a->tv_sec * 1000 + a->tv_nsec / 1000000;
+}
+
+void wlr_surface_send_frame_done(struct wlr_surface *surface,
+ const struct timespec *when) {
+ struct wlr_frame_callback *cb, *cnext;
+ wl_list_for_each_safe(cb, cnext, &surface->current->frame_callback_list,
+ link) {
+ wl_callback_send_done(cb->resource, timespec_to_msec(when));
+ wl_resource_destroy(cb->resource);
+ }
+} | 1 | #include <assert.h>
#include <stdlib.h>
#include <wayland-server.h>
#include <wlr/util/log.h>
#include <wlr/render/interface.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/render/egl.h>
#include <wlr/render/matrix.h>
static void wlr_surface_state_reset_buffer(struct wlr_surface_state *state) {
if (state->buffer) {
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
}
static void buffer_destroy(struct wl_listener *listener, void *data) {
struct wlr_surface_state *state =
wl_container_of(listener, state, buffer_destroy_listener);
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
static void wlr_surface_state_release_buffer(struct wlr_surface_state *state) {
if (state->buffer) {
wl_resource_post_event(state->buffer, WL_BUFFER_RELEASE);
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
}
static void wlr_surface_state_set_buffer(struct wlr_surface_state *state,
struct wl_resource *buffer) {
state->buffer = buffer;
if (buffer) {
wl_resource_add_destroy_listener(buffer,
&state->buffer_destroy_listener);
state->buffer_destroy_listener.notify = buffer_destroy;
}
}
static void surface_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t sx, int32_t sy) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER;
surface->pending->sx = sx;
surface->pending->sy = sy;
wlr_surface_state_reset_buffer(surface->pending);
wlr_surface_state_set_buffer(surface->pending, buffer);
}
static void surface_damage(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width, int32_t height) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_SURFACE_DAMAGE;
pixman_region32_union_rect(&surface->pending->surface_damage,
&surface->pending->surface_damage,
x, y, width, height);
}
static void destroy_frame_callback(struct wl_resource *resource) {
struct wlr_frame_callback *cb = wl_resource_get_user_data(resource);
wl_list_remove(&cb->link);
free(cb);
}
static void surface_frame(struct wl_client *client,
struct wl_resource *resource, uint32_t callback) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
struct wlr_frame_callback *cb =
calloc(1, sizeof(struct wlr_frame_callback));
if (cb == NULL) {
wl_resource_post_no_memory(resource);
return;
}
cb->resource = wl_resource_create(client, &wl_callback_interface, 1,
callback);
if (cb->resource == NULL) {
free(cb);
wl_resource_post_no_memory(resource);
return;
}
wl_resource_set_implementation(cb->resource,
NULL, cb, destroy_frame_callback);
wl_list_insert(surface->pending->frame_callback_list.prev, &cb->link);
surface->pending->invalid |= WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST;
}
static void surface_set_opaque_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if ((surface->pending->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
pixman_region32_clear(&surface->pending->opaque);
}
surface->pending->invalid |= WLR_SURFACE_INVALID_OPAQUE_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending->opaque, region);
} else {
pixman_region32_clear(&surface->pending->opaque);
}
}
static void surface_set_input_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_INPUT_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending->input, region);
} else {
pixman_region32_init_rect(&surface->pending->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
}
static void wlr_surface_update_size(struct wlr_surface *surface, struct wlr_surface_state *state) {
if (!state->buffer) {
state->height = 0;
state->width = 0;
return;
}
int scale = state->scale;
enum wl_output_transform transform = state->transform;
wlr_texture_get_buffer_size(surface->texture, state->buffer,
&state->buffer_width, &state->buffer_height);
int _width = state->buffer_width / scale;
int _height = state->buffer_height / scale;
if (transform == WL_OUTPUT_TRANSFORM_90 ||
transform == WL_OUTPUT_TRANSFORM_270 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_90 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_270) {
int tmp = _width;
_width = _height;
_height = tmp;
}
struct wlr_frame_callback *cb, *tmp;
wl_list_for_each_safe(cb, tmp, &state->frame_callback_list, link) {
wl_resource_destroy(cb->resource);
}
wl_list_init(&state->frame_callback_list);
state->width = _width;
state->height = _height;
}
static void wlr_surface_to_buffer_region(int scale,
enum wl_output_transform transform, pixman_region32_t *surface_region,
pixman_region32_t *buffer_region,
int width, int height) {
pixman_box32_t *src_rects, *dest_rects;
int nrects, i;
src_rects = pixman_region32_rectangles(surface_region, &nrects);
dest_rects = malloc(nrects * sizeof(*dest_rects));
if (!dest_rects) {
return;
}
for (i = 0; i < nrects; i++) {
switch (transform) {
default:
case WL_OUTPUT_TRANSFORM_NORMAL:
dest_rects[i].x1 = src_rects[i].x1;
dest_rects[i].y1 = src_rects[i].y1;
dest_rects[i].x2 = src_rects[i].x2;
dest_rects[i].y2 = src_rects[i].y2;
break;
case WL_OUTPUT_TRANSFORM_90:
dest_rects[i].x1 = height - src_rects[i].y2;
dest_rects[i].y1 = src_rects[i].x1;
dest_rects[i].x2 = height - src_rects[i].y1;
dest_rects[i].y2 = src_rects[i].x2;
break;
case WL_OUTPUT_TRANSFORM_180:
dest_rects[i].x1 = width - src_rects[i].x2;
dest_rects[i].y1 = height - src_rects[i].y2;
dest_rects[i].x2 = width - src_rects[i].x1;
dest_rects[i].y2 = height - src_rects[i].y1;
break;
case WL_OUTPUT_TRANSFORM_270:
dest_rects[i].x1 = src_rects[i].y1;
dest_rects[i].y1 = width - src_rects[i].x2;
dest_rects[i].x2 = src_rects[i].y2;
dest_rects[i].y2 = width - src_rects[i].x1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED:
dest_rects[i].x1 = width - src_rects[i].x2;
dest_rects[i].y1 = src_rects[i].y1;
dest_rects[i].x2 = width - src_rects[i].x1;
dest_rects[i].y2 = src_rects[i].y2;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_90:
dest_rects[i].x1 = height - src_rects[i].y2;
dest_rects[i].y1 = width - src_rects[i].x2;
dest_rects[i].x2 = height - src_rects[i].y1;
dest_rects[i].y2 = width - src_rects[i].x1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_180:
dest_rects[i].x1 = src_rects[i].x1;
dest_rects[i].y1 = height - src_rects[i].y2;
dest_rects[i].x2 = src_rects[i].x2;
dest_rects[i].y2 = height - src_rects[i].y1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_270:
dest_rects[i].x1 = src_rects[i].y1;
dest_rects[i].y1 = src_rects[i].x1;
dest_rects[i].x2 = src_rects[i].y2;
dest_rects[i].y2 = src_rects[i].x2;
break;
}
}
if (scale != 1) {
for (i = 0; i < nrects; i++) {
dest_rects[i].x1 *= scale;
dest_rects[i].x2 *= scale;
dest_rects[i].y1 *= scale;
dest_rects[i].y2 *= scale;
}
}
pixman_region32_fini(buffer_region);
pixman_region32_init_rects(buffer_region, dest_rects, nrects);
free(dest_rects);
}
/**
* Append pending state to current state and clear pending state.
*/
static void wlr_surface_move_state(struct wlr_surface *surface, struct wlr_surface_state *next,
struct wlr_surface_state *state) {
bool update_damage = false;
bool update_size = false;
if ((next->invalid & WLR_SURFACE_INVALID_SCALE)) {
state->scale = next->scale;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_TRANSFORM)) {
state->transform = next->transform;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER)) {
wlr_surface_state_release_buffer(state);
wlr_surface_state_set_buffer(state, next->buffer);
wlr_surface_state_reset_buffer(next);
state->sx = next->sx;
state->sy = next->sy;
update_size = true;
}
if (update_size) {
wlr_surface_update_size(surface, state);
}
if ((next->invalid & WLR_SURFACE_INVALID_SURFACE_DAMAGE)) {
pixman_region32_union(&state->surface_damage,
&state->surface_damage,
&next->surface_damage);
pixman_region32_intersect_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width,
state->height);
pixman_region32_clear(&next->surface_damage);
update_damage = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER_DAMAGE)) {
pixman_region32_union(&state->buffer_damage,
&state->buffer_damage,
&next->buffer_damage);
pixman_region32_clear(&next->buffer_damage);
update_damage = true;
}
if (update_damage) {
pixman_region32_t buffer_damage;
pixman_region32_init(&buffer_damage);
wlr_surface_to_buffer_region(state->scale, state->transform,
&state->surface_damage, &buffer_damage, state->width,
state->height);
pixman_region32_union(&state->buffer_damage,
&state->buffer_damage, &buffer_damage);
pixman_region32_fini(&buffer_damage);
pixman_region32_intersect_rect(&state->buffer_damage,
&state->buffer_damage, 0, 0,
state->buffer_width, state->buffer_height);
}
if ((next->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
// TODO: process buffer
pixman_region32_clear(&next->opaque);
}
if ((next->invalid & WLR_SURFACE_INVALID_INPUT_REGION)) {
// TODO: process buffer
pixman_region32_copy(&state->input, &next->input);
}
if ((next->invalid & WLR_SURFACE_INVALID_SUBSURFACE_POSITION)) {
state->subsurface_position.x = next->subsurface_position.x;
state->subsurface_position.y = next->subsurface_position.y;
next->subsurface_position.x = 0;
next->subsurface_position.y = 0;
}
if ((next->invalid & WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST)) {
wl_list_insert_list(&state->frame_callback_list, &next->frame_callback_list);
wl_list_init(&next->frame_callback_list);
}
state->invalid |= next->invalid;
next->invalid = 0;
}
static void wlr_surface_damage_subsurfaces(struct wlr_subsurface *subsurface) {
// XXX: This is probably the wrong way to do it, because this damage should
// come from the client, but weston doesn't do it correctly either and it
// seems to work ok. See the comment on weston_surface_damage for more info
// about a better approach.
struct wlr_surface *surface = subsurface->surface;
pixman_region32_union_rect(&surface->current->surface_damage,
&surface->current->surface_damage,
0, 0, surface->current->width,
surface->current->height);
subsurface->reordered = false;
struct wlr_subsurface *child;
wl_list_for_each(child, &subsurface->surface->subsurface_list, parent_link) {
wlr_surface_damage_subsurfaces(child);
}
}
static void wlr_surface_flush_damage(struct wlr_surface *surface,
bool reupload_buffer) {
if (!surface->current->buffer) {
return;
}
struct wl_shm_buffer *buffer = wl_shm_buffer_get(surface->current->buffer);
if (!buffer) {
if (wlr_renderer_buffer_is_drm(surface->renderer,
surface->current->buffer)) {
wlr_texture_upload_drm(surface->texture, surface->current->buffer);
goto release;
} else {
wlr_log(L_INFO, "Unknown buffer handle attached");
return;
}
}
uint32_t format = wl_shm_buffer_get_format(buffer);
if (reupload_buffer) {
wlr_texture_upload_shm(surface->texture, format, buffer);
} else {
pixman_region32_t damage = surface->current->buffer_damage;
if (!pixman_region32_not_empty(&damage)) {
goto release;
}
int n;
pixman_box32_t *rects = pixman_region32_rectangles(&damage, &n);
for (int i = 0; i < n; ++i) {
pixman_box32_t rect = rects[i];
if (!wlr_texture_update_shm(surface->texture, format,
rect.x1, rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1,
buffer)) {
break;
}
}
}
release:
pixman_region32_clear(&surface->current->surface_damage);
pixman_region32_clear(&surface->current->buffer_damage);
wlr_surface_state_release_buffer(surface->current);
}
static void wlr_surface_commit_pending(struct wlr_surface *surface) {
int32_t oldw = surface->current->buffer_width;
int32_t oldh = surface->current->buffer_height;
bool null_buffer_commit =
(surface->pending->invalid & WLR_SURFACE_INVALID_BUFFER &&
surface->pending->buffer == NULL);
wlr_surface_move_state(surface, surface->pending, surface->current);
if (null_buffer_commit) {
surface->texture->valid = false;
}
bool reupload_buffer = oldw != surface->current->buffer_width ||
oldh != surface->current->buffer_height;
wlr_surface_flush_damage(surface, reupload_buffer);
// commit subsurface order
struct wlr_subsurface *subsurface;
wl_list_for_each_reverse(subsurface, &surface->subsurface_pending_list,
parent_pending_link) {
wl_list_remove(&subsurface->parent_link);
wl_list_insert(&surface->subsurface_list, &subsurface->parent_link);
if (subsurface->reordered) {
// TODO: damage all the subsurfaces
wlr_surface_damage_subsurfaces(subsurface);
}
}
// TODO: add the invalid bitfield to this callback
wl_signal_emit(&surface->events.commit, surface);
}
static bool wlr_subsurface_is_synchronized(struct wlr_subsurface *subsurface) {
while (subsurface) {
if (subsurface->synchronized) {
return true;
}
if (!subsurface->parent) {
return false;
}
subsurface = subsurface->parent->subsurface;
}
return false;
}
/**
* Recursive function to commit the effectively synchronized children.
*/
static void wlr_subsurface_parent_commit(struct wlr_subsurface *subsurface,
bool synchronized) {
struct wlr_surface *surface = subsurface->surface;
if (synchronized || subsurface->synchronized) {
if (subsurface->has_cache) {
wlr_surface_move_state(surface, subsurface->cached, surface->pending);
wlr_surface_commit_pending(surface);
subsurface->has_cache = false;
subsurface->cached->invalid = 0;
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurface_list, parent_link) {
wlr_subsurface_parent_commit(tmp, true);
}
}
}
static void wlr_subsurface_commit(struct wlr_subsurface *subsurface) {
struct wlr_surface *surface = subsurface->surface;
if (wlr_subsurface_is_synchronized(subsurface)) {
wlr_surface_move_state(surface, surface->pending, subsurface->cached);
subsurface->has_cache = true;
} else {
if (subsurface->has_cache) {
wlr_surface_move_state(surface, subsurface->cached, surface->pending);
wlr_surface_commit_pending(surface);
subsurface->has_cache = false;
} else {
wlr_surface_commit_pending(surface);
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurface_list, parent_link) {
wlr_subsurface_parent_commit(tmp, false);
}
}
}
static void surface_commit(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
struct wlr_subsurface *subsurface = surface->subsurface;
if (subsurface) {
wlr_subsurface_commit(subsurface);
return;
}
wlr_surface_commit_pending(surface);
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurface_list, parent_link) {
wlr_subsurface_parent_commit(tmp, false);
}
}
static void surface_set_buffer_transform(struct wl_client *client,
struct wl_resource *resource, int transform) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_TRANSFORM;
surface->pending->transform = transform;
}
static void surface_set_buffer_scale(struct wl_client *client,
struct wl_resource *resource,
int32_t scale) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_SCALE;
surface->pending->scale = scale;
}
static void surface_damage_buffer(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width,
int32_t height) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER_DAMAGE;
pixman_region32_union_rect(&surface->pending->buffer_damage,
&surface->pending->buffer_damage,
x, y, width, height);
}
const struct wl_surface_interface surface_interface = {
.destroy = surface_destroy,
.attach = surface_attach,
.damage = surface_damage,
.frame = surface_frame,
.set_opaque_region = surface_set_opaque_region,
.set_input_region = surface_set_input_region,
.commit = surface_commit,
.set_buffer_transform = surface_set_buffer_transform,
.set_buffer_scale = surface_set_buffer_scale,
.damage_buffer = surface_damage_buffer
};
static struct wlr_surface_state *wlr_surface_state_create() {
struct wlr_surface_state *state =
calloc(1, sizeof(struct wlr_surface_state));
if (state == NULL) {
return NULL;
}
state->scale = 1;
state->transform = WL_OUTPUT_TRANSFORM_NORMAL;
wl_list_init(&state->frame_callback_list);
pixman_region32_init(&state->surface_damage);
pixman_region32_init(&state->buffer_damage);
pixman_region32_init(&state->opaque);
pixman_region32_init_rect(&state->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
return state;
}
static void wlr_surface_state_destroy(struct wlr_surface_state *state) {
wlr_surface_state_reset_buffer(state);
struct wlr_frame_callback *cb, *tmp;
wl_list_for_each_safe(cb, tmp, &state->frame_callback_list, link) {
wl_resource_destroy(cb->resource);
}
pixman_region32_fini(&state->surface_damage);
pixman_region32_fini(&state->buffer_damage);
pixman_region32_fini(&state->opaque);
pixman_region32_fini(&state->input);
free(state);
}
void wlr_subsurface_destroy(struct wlr_subsurface *subsurface) {
wlr_surface_state_destroy(subsurface->cached);
if (subsurface->parent) {
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy_listener.link);
}
wl_resource_set_user_data(subsurface->resource, NULL);
if (subsurface->surface) {
subsurface->surface->subsurface = NULL;
}
free(subsurface);
}
static void destroy_surface(struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
wl_signal_emit(&surface->events.destroy, surface);
if (surface->subsurface) {
wlr_subsurface_destroy(surface->subsurface);
}
wlr_texture_destroy(surface->texture);
wlr_surface_state_destroy(surface->pending);
wlr_surface_state_destroy(surface->current);
free(surface);
}
struct wlr_surface *wlr_surface_create(struct wl_resource *res,
struct wlr_renderer *renderer) {
struct wlr_surface *surface = calloc(1, sizeof(struct wlr_surface));
if (!surface) {
wl_resource_post_no_memory(res);
return NULL;
}
wlr_log(L_DEBUG, "New wlr_surface %p (res %p)", surface, res);
surface->renderer = renderer;
surface->texture = wlr_render_texture_create(renderer);
surface->resource = res;
surface->current = wlr_surface_state_create();
surface->pending = wlr_surface_state_create();
wl_signal_init(&surface->events.commit);
wl_signal_init(&surface->events.destroy);
wl_list_init(&surface->subsurface_list);
wl_list_init(&surface->subsurface_pending_list);
wl_resource_set_implementation(res, &surface_interface,
surface, destroy_surface);
return surface;
}
void wlr_surface_get_matrix(struct wlr_surface *surface,
float (*matrix)[16],
const float (*projection)[16],
const float (*transform)[16]) {
int width = surface->texture->width;
int height = surface->texture->height;
float scale[16];
wlr_matrix_identity(matrix);
if (transform) {
wlr_matrix_mul(matrix, transform, matrix);
}
wlr_matrix_scale(&scale, width, height, 1);
wlr_matrix_mul(matrix, &scale, matrix);
wlr_matrix_mul(projection, matrix, matrix);
}
bool wlr_surface_has_buffer(struct wlr_surface *surface) {
return surface->texture && surface->texture->valid;
}
int wlr_surface_set_role(struct wlr_surface *surface, const char *role,
struct wl_resource *error_resource, uint32_t error_code) {
assert(role);
if (surface->role == NULL ||
surface->role == role ||
strcmp(surface->role, role) == 0) {
surface->role = role;
return 0;
}
wl_resource_post_error(error_resource, error_code,
"Cannot assign role %s to wl_surface@%d, already has role %s\n",
role,
wl_resource_get_id(surface->resource),
surface->role);
return -1;
}
static void subsurface_resource_destroy(struct wl_resource *resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (subsurface) {
wlr_subsurface_destroy(subsurface);
}
}
static void subsurface_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void subsurface_set_position(struct wl_client *client,
struct wl_resource *resource, int32_t x, int32_t y) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
struct wlr_surface *surface = subsurface->surface;
surface->pending->invalid |= WLR_SURFACE_INVALID_SUBSURFACE_POSITION;
surface->pending->subsurface_position.x = x;
surface->pending->subsurface_position.y = y;
}
static struct wlr_subsurface *subsurface_find_sibling(
struct wlr_subsurface *subsurface, struct wlr_surface *surface) {
struct wlr_surface *parent = subsurface->parent;
struct wlr_subsurface *sibling;
wl_list_for_each(sibling, &parent->subsurface_list, parent_link) {
if (sibling->surface == surface && sibling != subsurface)
return sibling;
}
return NULL;
}
static void subsurface_place_above(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (!subsurface) {
return;
}
struct wlr_surface *sibling_surface =
wl_resource_get_user_data(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_above", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(sibling->parent_pending_link.prev,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_place_below(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
struct wlr_surface *sibling_surface =
wl_resource_get_user_data(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_below", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(&sibling->parent_pending_link,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_set_sync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (subsurface) {
subsurface->synchronized = true;
}
}
static void subsurface_set_desync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (subsurface && subsurface->synchronized) {
subsurface->synchronized = false;
if (!wlr_subsurface_is_synchronized(subsurface)) {
// TODO: do a synchronized commit to flush the cache
wlr_subsurface_parent_commit(subsurface, true);
}
}
}
static const struct wl_subsurface_interface subsurface_implementation = {
.destroy = subsurface_destroy,
.set_position = subsurface_set_position,
.place_above = subsurface_place_above,
.place_below = subsurface_place_below,
.set_sync = subsurface_set_sync,
.set_desync = subsurface_set_desync,
};
static void subsurface_handle_parent_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, parent_destroy_listener);
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy_listener.link);
subsurface->parent = NULL;
}
void wlr_surface_make_subsurface(struct wlr_surface *surface,
struct wlr_surface *parent, uint32_t id) {
struct wl_client *client = wl_resource_get_client(surface->resource);
assert(surface->subsurface == NULL);
struct wlr_subsurface *subsurface =
calloc(1, sizeof(struct wlr_subsurface));
if (!subsurface) {
wl_client_post_no_memory(client);
return;
}
subsurface->cached = wlr_surface_state_create();
if (subsurface->cached == NULL) {
free(subsurface);
wl_client_post_no_memory(client);
return;
}
subsurface->synchronized = true;
subsurface->surface = surface;
// link parent
subsurface->parent = parent;
wl_signal_add(&parent->events.destroy,
&subsurface->parent_destroy_listener);
subsurface->parent_destroy_listener.notify =
subsurface_handle_parent_destroy;
wl_list_insert(&parent->subsurface_list, &subsurface->parent_link);
wl_list_insert(&parent->subsurface_pending_list,
&subsurface->parent_pending_link);
subsurface->resource =
wl_resource_create(client, &wl_subsurface_interface, 1, id);
if (subsurface->resource == NULL) {
wlr_surface_state_destroy(subsurface->cached);
free(subsurface);
wl_client_post_no_memory(client);
return;
}
wl_resource_set_implementation(subsurface->resource,
&subsurface_implementation, subsurface,
subsurface_resource_destroy);
surface->subsurface = subsurface;
}
struct wlr_surface *wlr_surface_get_main_surface(struct wlr_surface *surface) {
struct wlr_subsurface *sub;
while (surface && (sub = surface->subsurface)) {
surface = sub->parent;
}
return surface;
}
struct wlr_subsurface *wlr_surface_subsurface_at(struct wlr_surface *surface,
double sx, double sy, double *sub_x, double *sub_y) {
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurface_list, parent_link) {
double _sub_x = subsurface->surface->current->subsurface_position.x;
double _sub_y = subsurface->surface->current->subsurface_position.y;
struct wlr_subsurface *sub =
wlr_surface_subsurface_at(subsurface->surface, _sub_x + sx,
_sub_y + sy, sub_x, sub_y);
if (sub) {
// TODO: This won't work for nested subsurfaces. Convert sub_x and
// sub_y to the parent coordinate system
return sub;
}
int sub_width = subsurface->surface->current->buffer_width;
int sub_height = subsurface->surface->current->buffer_height;
if ((sx > _sub_x && sx < _sub_x + sub_width) &&
(sy > _sub_y && sy < _sub_y + sub_height)) {
if (pixman_region32_contains_point(
&subsurface->surface->current->input,
sx - _sub_x, sy - _sub_y, NULL)) {
*sub_x = _sub_x;
*sub_y = _sub_y;
return subsurface;
}
}
}
return NULL;
}
void wlr_surface_send_enter(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wl_resource *resource;
wl_resource_for_each(resource, &output->wl_resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_enter(surface->resource, resource);
break;
}
}
}
void wlr_surface_send_leave(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wl_resource *resource;
wl_resource_for_each(resource, &output->wl_resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_leave(surface->resource, resource);
break;
}
}
}
| 1 | 9,179 | We have this functions in a couple of places. It should probably live in util or something. | swaywm-wlroots | c |
@@ -53,6 +53,7 @@ module Mongoid
# The associated object will be replaced by the below update if non-nil, so only
# run the callbacks and state-changing code by passing persist: false in that case.
_target.destroy(persist: !replacement) if persistable?
+ _target.new_record = true
end
unbind_one
return nil unless replacement | 1 | # frozen_string_literal: true
module Mongoid
module Association
module Embedded
class EmbedsOne
class Proxy < Association::One
# The valid options when defining this association.
#
# @return [ Array<Symbol> ] The allowed options when defining this association.
VALID_OPTIONS = [
:autobuild,
:as,
:cascade_callbacks,
:cyclic,
:store_as
].freeze
# Instantiate a new embeds_one association.
#
# @example Create the new proxy.
# One.new(person, name, association)
#
# @param [ Document ] base The document this association hangs off of.
# @param [ Document ] target The child document in the association.
# @param [ Association ] association The association metadata.
def initialize(base, target, association)
init(base, target, association) do
characterize_one(_target)
bind_one
characterize_one(_target)
_base._reset_memoized_descendants!
_target.save if persistable?
end
end
# Substitutes the supplied target documents for the existing document
# in the association.
#
# @example Substitute the new document.
# person.name.substitute(new_name)
#
# @param [ Document ] replacement A document to replace the target.
#
# @return [ Document, nil ] The association or nil.
def substitute(replacement)
if replacement != self
if _assigning?
_base.add_atomic_unset(_target) unless replacement
else
# The associated object will be replaced by the below update if non-nil, so only
# run the callbacks and state-changing code by passing persist: false in that case.
_target.destroy(persist: !replacement) if persistable?
end
unbind_one
return nil unless replacement
replacement = Factory.build(klass, replacement) if replacement.is_a?(::Hash)
self._target = replacement
bind_one
characterize_one(_target)
_target.save if persistable?
end
self
end
private
# Instantiate the binding associated with this association.
#
# @example Get the binding.
# relation.binding([ address ])
#
# @return [ Binding ] The association's binding.
def binding
Binding.new(_base, _target, _association)
end
# Are we able to persist this association?
#
# @example Can we persist the association?
# relation.persistable?
#
# @return [ true, false ] If the association is persistable.
def persistable?
_base.persisted? && !_binding? && !_building? && !_assigning?
end
class << self
# Returns true if the association is an embedded one. In this case
# always true.
#
# @example Is this association embedded?
# Association::Embedded::EmbedsOne.embedded?
#
# @return [ true ] true.
def embedded?
true
end
# Get the path calculator for the supplied document.
#
# @example Get the path calculator.
# Proxy.path(document)
#
# @param [ Document ] document The document to calculate on.
#
# @return [ Mongoid::Atomic::Paths::Embedded::One ]
# The embedded one atomic path calculator.
def path(document)
Mongoid::Atomic::Paths::Embedded::One.new(document)
end
end
end
end
end
end
end
| 1 | 13,506 | maybe this should be inside the destroy? | mongodb-mongoid | rb |
@@ -106,10 +106,17 @@ class RPN(BaseDetector):
list[np.ndarray]: proposals
"""
x = self.extract_feat(img)
+ # get origin input shape to onnx dynamic input shape
+ import torch
+ if torch.onnx.is_in_onnx_export():
+ img_shape = torch._shape_as_tensor(img)[2:]
+ img_metas[0]['img_shape_for_onnx'] = img_shape
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
if rescale:
for proposals, meta in zip(proposal_list, img_metas):
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
+ if torch.onnx.is_in_onnx_export():
+ return proposal_list
return [proposal.cpu().numpy() for proposal in proposal_list]
| 1 | import mmcv
from mmcv.image import tensor2imgs
from mmdet.core import bbox_mapping
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class RPN(BaseDetector):
"""Implementation of Region Proposal Network."""
def __init__(self,
backbone,
neck,
rpn_head,
train_cfg,
test_cfg,
pretrained=None):
super(RPN, self).__init__()
self.backbone = build_backbone(backbone)
self.neck = build_neck(neck) if neck is not None else None
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(RPN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
self.neck.init_weights()
self.rpn_head.init_weights()
def extract_feat(self, img):
"""Extract features.
Args:
img (torch.Tensor): Image tensor with shape (n, c, h ,w).
Returns:
list[torch.Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Dummy forward function."""
x = self.extract_feat(img)
rpn_outs = self.rpn_head(x)
return rpn_outs
def forward_train(self,
img,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if (isinstance(self.train_cfg.rpn, dict)
and self.train_cfg.rpn.get('debug', False)):
self.rpn_head.debug_imgs = tensor2imgs(img)
x = self.extract_feat(img)
losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
x = self.extract_feat(img)
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
if rescale:
for proposals, meta in zip(proposal_list, img_metas):
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
return [proposal.cpu().numpy() for proposal in proposal_list]
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
proposal_list = self.rpn_head.aug_test_rpn(
self.extract_feats(imgs), img_metas)
if not rescale:
for proposals, img_meta in zip(proposal_list, img_metas[0]):
img_shape = img_meta['img_shape']
scale_factor = img_meta['scale_factor']
flip = img_meta['flip']
flip_direction = img_meta['flip_direction']
proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
scale_factor, flip,
flip_direction)
return [proposal.cpu().numpy() for proposal in proposal_list]
def show_result(self, data, result, dataset=None, top_k=20):
"""Show RPN proposals on the image.
Although we assume batch size is 1, this method supports arbitrary
batch size.
"""
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
mmcv.imshow_bboxes(img_show, result, top_k=top_k)
| 1 | 23,032 | Do not import torch in the test function because it will import torch every test iteration and will slow down the testing speed. | open-mmlab-mmdetection | py |
@@ -69,11 +69,16 @@ func (s *Service) InstanceByTags(machine *actuators.MachineScope) (*v1alpha1.Ins
}
// InstanceIfExists returns the existing instance or nothing if it doesn't exist.
-func (s *Service) InstanceIfExists(id string) (*v1alpha1.Instance, error) {
- klog.V(2).Infof("Looking for instance %q", id)
+func (s *Service) InstanceIfExists(id *string) (*v1alpha1.Instance, error) {
+ if id == nil {
+ klog.Error("Instance does not have an instance id")
+ return nil, nil
+ }
+
+ klog.V(2).Infof("Looking for instance %q", *id)
input := &ec2.DescribeInstancesInput{
- InstanceIds: []*string{aws.String(id)},
+ InstanceIds: []*string{id},
Filters: []*ec2.Filter{
filter.EC2.VPC(s.scope.VPC().ID),
filter.EC2.InstanceStates(ec2.InstanceStateNamePending, ec2.InstanceStateNameRunning), | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ec2
import (
"encoding/base64"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
"k8s.io/klog"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1alpha1"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/converters"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/filter"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/awserrors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/certificates"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/kubeadm"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/userdata"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/tags"
"sigs.k8s.io/cluster-api-provider-aws/pkg/record"
)
// InstanceByTags returns the existing instance or nothing if it doesn't exist.
func (s *Service) InstanceByTags(machine *actuators.MachineScope) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Looking for existing instance for machine %q in cluster %q", machine.Name(), s.scope.Name())
input := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
filter.EC2.VPC(s.scope.VPC().ID),
filter.EC2.ClusterOwned(s.scope.Name()),
filter.EC2.Name(machine.Name()),
filter.EC2.InstanceStates(ec2.InstanceStateNamePending, ec2.InstanceStateNameRunning),
},
}
out, err := s.scope.EC2.DescribeInstances(input)
switch {
case awserrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, errors.Wrap(err, "failed to describe instances by tags")
}
// TODO: currently just returns the first matched instance, need to
// better rationalize how to find the right instance to return if multiple
// match
for _, res := range out.Reservations {
for _, inst := range res.Instances {
return converters.SDKToInstance(inst), nil
}
}
return nil, nil
}
// InstanceIfExists returns the existing instance or nothing if it doesn't exist.
func (s *Service) InstanceIfExists(id string) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Looking for instance %q", id)
input := &ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(id)},
Filters: []*ec2.Filter{
filter.EC2.VPC(s.scope.VPC().ID),
filter.EC2.InstanceStates(ec2.InstanceStateNamePending, ec2.InstanceStateNameRunning),
},
}
out, err := s.scope.EC2.DescribeInstances(input)
switch {
case awserrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, errors.Wrapf(err, "failed to describe instance: %q", id)
}
if len(out.Reservations) > 0 && len(out.Reservations[0].Instances) > 0 {
return converters.SDKToInstance(out.Reservations[0].Instances[0]), nil
}
return nil, nil
}
// createInstance runs an ec2 instance.
func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken, kubeConfig string) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Creating a new instance for machine %q", machine.Name())
input := &v1alpha1.Instance{
Type: machine.MachineConfig.InstanceType,
IAMProfile: machine.MachineConfig.IAMInstanceProfile,
}
input.Tags = tags.Build(tags.BuildParams{
ClusterName: s.scope.Name(),
Lifecycle: tags.ResourceLifecycleOwned,
Name: aws.String(machine.Name()),
Role: aws.String(machine.Role()),
})
var err error
// Pick image from the machine configuration, or use a default one.
if machine.MachineConfig.AMI.ID != nil {
input.ImageID = *machine.MachineConfig.AMI.ID
} else {
input.ImageID, err = s.defaultAMILookup("ubuntu", "18.04", machine.Machine.Spec.Versions.Kubelet)
if err != nil {
return nil, err
}
}
// Pick subnet from the machine configuration, or default to the first private available.
if machine.MachineConfig.Subnet != nil && machine.MachineConfig.Subnet.ID != nil {
input.SubnetID = *machine.MachineConfig.Subnet.ID
} else {
sns := s.scope.Subnets().FilterPrivate()
if len(sns) == 0 {
return nil, awserrors.NewFailedDependency(
errors.Errorf("failed to run machine %q, no subnets available", machine.Name()),
)
}
input.SubnetID = sns[0].ID
}
if !s.scope.ClusterConfig.CAKeyPair.HasCertAndKey() {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, missing CACertificate"),
)
}
if s.scope.Network().APIServerELB.DNSName == "" {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, APIServer ELB not available"),
)
}
caCertHash, err := certificates.GenerateCertificateHash(s.scope.ClusterConfig.CAKeyPair.Cert)
if err != nil {
return input, err
}
// apply values based on the role of the machine
switch machine.Role() {
case "controlplane":
if s.scope.SecurityGroups()[v1alpha1.SecurityGroupControlPlane] == nil {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, security group not available"),
)
}
var userData string
if bootstrapToken != "" {
klog.V(2).Infof("Allowing machine %q to join control plane for cluster %q", machine.Name(), s.scope.Name())
kubeadm.SetJoinNodeConfigurationOverrides(caCertHash, bootstrapToken, machine, &machine.MachineConfig.KubeadmConfiguration.Join)
kubeadm.SetControlPlaneJoinConfigurationOverrides(&machine.MachineConfig.KubeadmConfiguration.Join)
joinConfigurationYAML, err := kubeadm.ConfigurationToYAML(&machine.MachineConfig.KubeadmConfiguration.Join)
if err != nil {
return nil, err
}
userData, err = userdata.JoinControlPlane(&userdata.ContolPlaneJoinInput{
CACert: string(s.scope.ClusterConfig.CAKeyPair.Cert),
CAKey: string(s.scope.ClusterConfig.CAKeyPair.Key),
EtcdCACert: string(s.scope.ClusterConfig.EtcdCAKeyPair.Cert),
EtcdCAKey: string(s.scope.ClusterConfig.EtcdCAKeyPair.Key),
FrontProxyCACert: string(s.scope.ClusterConfig.FrontProxyCAKeyPair.Cert),
FrontProxyCAKey: string(s.scope.ClusterConfig.FrontProxyCAKeyPair.Key),
SaCert: string(s.scope.ClusterConfig.SAKeyPair.Cert),
SaKey: string(s.scope.ClusterConfig.SAKeyPair.Key),
JoinConfiguration: joinConfigurationYAML,
})
if err != nil {
return input, err
}
} else {
klog.V(2).Infof("Machine %q is the first controlplane machine for cluster %q", machine.Name(), s.scope.Name())
if !s.scope.ClusterConfig.CAKeyPair.HasCertAndKey() {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, missing CAPrivateKey"),
)
}
kubeadm.SetClusterConfigurationOverrides(machine, &s.scope.ClusterConfig.ClusterConfiguration)
clusterConfigYAML, err := kubeadm.ConfigurationToYAML(&s.scope.ClusterConfig.ClusterConfiguration)
if err != nil {
return nil, err
}
kubeadm.SetInitConfigurationOverrides(&machine.MachineConfig.KubeadmConfiguration.Init)
initConfigYAML, err := kubeadm.ConfigurationToYAML(&machine.MachineConfig.KubeadmConfiguration.Init)
if err != nil {
return nil, err
}
userData, err = userdata.NewControlPlane(&userdata.ControlPlaneInput{
CACert: string(s.scope.ClusterConfig.CAKeyPair.Cert),
CAKey: string(s.scope.ClusterConfig.CAKeyPair.Key),
EtcdCACert: string(s.scope.ClusterConfig.EtcdCAKeyPair.Cert),
EtcdCAKey: string(s.scope.ClusterConfig.EtcdCAKeyPair.Key),
FrontProxyCACert: string(s.scope.ClusterConfig.FrontProxyCAKeyPair.Cert),
FrontProxyCAKey: string(s.scope.ClusterConfig.FrontProxyCAKeyPair.Key),
SaCert: string(s.scope.ClusterConfig.SAKeyPair.Cert),
SaKey: string(s.scope.ClusterConfig.SAKeyPair.Key),
ClusterConfiguration: clusterConfigYAML,
InitConfiguration: initConfigYAML,
})
if err != nil {
return input, err
}
}
input.UserData = aws.String(userData)
input.SecurityGroupIDs = append(input.SecurityGroupIDs, s.scope.SecurityGroups()[v1alpha1.SecurityGroupControlPlane].ID)
case "node":
input.SecurityGroupIDs = append(input.SecurityGroupIDs, s.scope.SecurityGroups()[v1alpha1.SecurityGroupNode].ID)
kubeadm.SetJoinNodeConfigurationOverrides(caCertHash, bootstrapToken, machine, &machine.MachineConfig.KubeadmConfiguration.Join)
joinConfigurationYAML, err := kubeadm.ConfigurationToYAML(&machine.MachineConfig.KubeadmConfiguration.Join)
if err != nil {
return nil, err
}
userData, err := userdata.NewNode(&userdata.NodeInput{
JoinConfiguration: joinConfigurationYAML,
})
if err != nil {
return input, err
}
input.UserData = aws.String(userData)
default:
return nil, errors.Errorf("Unknown node role %q", machine.Role())
}
// Pick SSH key, if any.
if machine.MachineConfig.KeyName != "" {
input.KeyName = aws.String(machine.MachineConfig.KeyName)
} else {
input.KeyName = aws.String(defaultSSHKeyName)
}
out, err := s.runInstance(machine.Role(), input)
if err != nil {
return nil, err
}
record.Eventf(machine.Machine, "CreatedInstance", "Created new %s instance with id %q", machine.Role(), out.ID)
return out, nil
}
// TerminateInstance terminates an EC2 instance.
// Returns nil on success, error in all other cases.
func (s *Service) TerminateInstance(instanceID string) error {
klog.V(2).Infof("Attempting to terminate instance with id %q", instanceID)
input := &ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{instanceID}),
}
if _, err := s.scope.EC2.TerminateInstances(input); err != nil {
return errors.Wrapf(err, "failed to terminate instance with id %q", instanceID)
}
klog.V(2).Infof("Terminated instance with id %q", instanceID)
record.Eventf(s.scope.Cluster, "DeletedInstance", "Terminated instance %q", instanceID)
return nil
}
// TerminateInstanceAndWait terminates and waits
// for an EC2 instance to terminate.
func (s *Service) TerminateInstanceAndWait(instanceID string) error {
if err := s.TerminateInstance(instanceID); err != nil {
return err
}
klog.V(2).Infof("Waiting for EC2 instance with id %q to terminate", instanceID)
input := &ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice([]string{instanceID}),
}
if err := s.scope.EC2.WaitUntilInstanceTerminated(input); err != nil {
return errors.Wrapf(err, "failed to wait for instance %q termination", instanceID)
}
return nil
}
// MachineExists will return whether or not a machine exists.
func (s *Service) MachineExists(machine *actuators.MachineScope) (bool, error) {
var err error
var instance *v1alpha1.Instance
if machine.MachineStatus.InstanceID != nil {
instance, err = s.InstanceIfExists(*machine.MachineStatus.InstanceID)
} else {
instance, err = s.InstanceByTags(machine)
}
if err != nil {
if awserrors.IsNotFound(err) {
return false, nil
}
return false, errors.Wrapf(err, "failed to lookup machine %q", machine.Name())
}
return instance != nil, nil
}
// CreateOrGetMachine will either return an existing instance or create and return an instance.
func (s *Service) CreateOrGetMachine(machine *actuators.MachineScope, bootstrapToken, kubeConfig string) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Attempting to create or get machine %q", machine.Name())
// instance id exists, try to get it
if machine.MachineStatus.InstanceID != nil {
klog.V(2).Infof("Looking up machine %q by id %q", machine.Name(), *machine.MachineStatus.InstanceID)
instance, err := s.InstanceIfExists(*machine.MachineStatus.InstanceID)
if err != nil && !awserrors.IsNotFound(err) {
return nil, errors.Wrapf(err, "failed to look up machine %q by id %q", machine.Name(), *machine.MachineStatus.InstanceID)
} else if err == nil && instance != nil {
return instance, nil
}
}
klog.V(2).Infof("Looking up machine %q by tags", machine.Name())
instance, err := s.InstanceByTags(machine)
if err != nil && !awserrors.IsNotFound(err) {
return nil, errors.Wrapf(err, "failed to query machine %q instance by tags", machine.Name())
} else if err == nil && instance != nil {
return instance, nil
}
return s.createInstance(machine, bootstrapToken, kubeConfig)
}
func (s *Service) runInstance(role string, i *v1alpha1.Instance) (*v1alpha1.Instance, error) {
input := &ec2.RunInstancesInput{
InstanceType: aws.String(i.Type),
SubnetId: aws.String(i.SubnetID),
ImageId: aws.String(i.ImageID),
KeyName: i.KeyName,
EbsOptimized: i.EBSOptimized,
MaxCount: aws.Int64(1),
MinCount: aws.Int64(1),
UserData: i.UserData,
}
if i.UserData != nil {
input.UserData = aws.String(base64.StdEncoding.EncodeToString([]byte(*i.UserData)))
}
if len(i.SecurityGroupIDs) > 0 {
input.SecurityGroupIds = aws.StringSlice(i.SecurityGroupIDs)
}
if i.IAMProfile != "" {
input.IamInstanceProfile = &ec2.IamInstanceProfileSpecification{
Name: aws.String(i.IAMProfile),
}
}
if len(i.Tags) > 0 {
spec := &ec2.TagSpecification{ResourceType: aws.String(ec2.ResourceTypeInstance)}
for key, value := range i.Tags {
spec.Tags = append(spec.Tags, &ec2.Tag{
Key: aws.String(key),
Value: aws.String(value),
})
}
input.TagSpecifications = append(input.TagSpecifications, spec)
}
out, err := s.scope.EC2.RunInstances(input)
if err != nil {
return nil, errors.Wrapf(err, "failed to run instance: %v", i)
}
if len(out.Instances) == 0 {
return nil, errors.Errorf("no instance returned for reservation %v", out.GoString())
}
s.scope.EC2.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{InstanceIds: []*string{out.Instances[0].InstanceId}})
return converters.SDKToInstance(out.Instances[0]), nil
}
// UpdateInstanceSecurityGroups modifies the security groups of the given
// EC2 instance.
func (s *Service) UpdateInstanceSecurityGroups(instanceID string, ids []string) error {
klog.V(2).Infof("Attempting to update security groups on instance %q", instanceID)
input := &ec2.ModifyInstanceAttributeInput{
InstanceId: aws.String(instanceID),
Groups: aws.StringSlice(ids),
}
if _, err := s.scope.EC2.ModifyInstanceAttribute(input); err != nil {
return errors.Wrapf(err, "failed to modify instance %q security groups", instanceID)
}
return nil
}
// UpdateResourceTags updates the tags for an instance.
// This will be called if there is anything to create (update) or delete.
// We may not always have to perform each action, so we check what we're
// receiving to avoid calling AWS if we don't need to.
func (s *Service) UpdateResourceTags(resourceID *string, create map[string]string, remove map[string]string) error {
klog.V(2).Infof("Attempting to update tags on resource %q", *resourceID)
// If we have anything to create or update
if len(create) > 0 {
klog.V(2).Infof("Attempting to create tags on resource %q", *resourceID)
// Convert our create map into an array of *ec2.Tag
createTagsInput := converters.MapToTags(create)
// Create the CreateTags input.
input := &ec2.CreateTagsInput{
Resources: []*string{resourceID},
Tags: createTagsInput,
}
// Create/Update tags in AWS.
if _, err := s.scope.EC2.CreateTags(input); err != nil {
return errors.Wrapf(err, "failed to create tags for resource %q: %+v", *resourceID, create)
}
}
// If we have anything to remove
if len(remove) > 0 {
klog.V(2).Infof("Attempting to delete tags on resource %q", *resourceID)
// Convert our remove map into an array of *ec2.Tag
removeTagsInput := converters.MapToTags(remove)
// Create the DeleteTags input
input := &ec2.DeleteTagsInput{
Resources: []*string{resourceID},
Tags: removeTagsInput,
}
// Delete tags in AWS.
if _, err := s.scope.EC2.DeleteTags(input); err != nil {
return errors.Wrapf(err, "failed to delete tags for resource %q: %v", *resourceID, remove)
}
}
return nil
}
| 1 | 8,584 | I think we want to return an error here, not nil. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -38,6 +38,7 @@ describe( 'core/user userInfo', () => {
picture: 'https://path/to/image',
},
verified: true,
+ userInputState: 'complete',
};
let registry; | 1 | /**
* `core/user` data store: userInfo tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
* Internal dependencies
*/
import {
createTestRegistry,
subscribeUntil,
unsubscribeFromAll,
} from '../../../../../tests/js/utils';
import { initialState } from './index';
import { STORE_NAME } from './constants';
describe( 'core/user userInfo', () => {
const userDataGlobal = '_googlesitekitUserData';
const userData = {
connectURL: 'http://example.com/wp-admin/admin.php?page=googlesitekit-splash&googlesitekit_connect=1&nonce=a1b2c3d4e5',
user: {
id: 1,
email: 'admin@example.com',
name: 'admin',
picture: 'https://path/to/image',
},
verified: true,
};
let registry;
beforeEach( () => {
registry = createTestRegistry();
} );
afterEach( () => {
delete global[ userDataGlobal ];
unsubscribeFromAll( registry );
} );
describe( 'actions', () => {
describe( 'receiveUserInfo', () => {
it( 'requires the userInfo param', () => {
expect( () => {
registry.dispatch( STORE_NAME ).receiveUserInfo();
} ).toThrow( 'userInfo is required.' );
} );
it( 'receives and sets userInfo', async () => {
const { user } = userData;
await registry.dispatch( STORE_NAME ).receiveUserInfo( user );
expect( registry.select( STORE_NAME ).getUser() ).toMatchObject( user );
} );
} );
describe( 'receiveUserIsVerified', () => {
it( 'requires the userIsVerified param ', () => {
expect( () => {
registry.dispatch( STORE_NAME ).receiveUserIsVerified();
} ).toThrow( 'userIsVerified is required.' );
} );
it( 'receives and sets userIsVerified', async () => {
const { verified } = userData;
await registry.dispatch( STORE_NAME ).receiveUserIsVerified( verified );
expect( registry.select( STORE_NAME ).isVerified() ).toEqual( verified );
} );
} );
} );
describe( 'selectors', () => {
describe( 'getConnectURL', () => {
it( 'uses a resolver to load data from a global variable', async () => {
// Set up the global
global[ userDataGlobal ] = userData;
registry.select( STORE_NAME ).getConnectURL();
await subscribeUntil( registry,
() => registry.select( STORE_NAME ).hasFinishedResolution( 'getConnectURL' )
);
const connectURL = registry.select( STORE_NAME ).getConnectURL();
expect( connectURL ).toBe( userData.connectURL );
// Data must not be wiped after retrieving, as it could be used by other dependants.
expect( global[ userDataGlobal ] ).not.toEqual( undefined );
} );
it( 'will return initial state (undefined) when no data is available', async () => {
expect( global[ userDataGlobal ] ).toEqual( undefined );
const connectURL = registry.select( STORE_NAME ).getConnectURL();
expect( connectURL ).toEqual( initialState.connectURL );
expect( console ).toHaveErrored();
} );
it( 'accepts an optional list of additional scopes to add as a query parameter', () => {
registry.dispatch( STORE_NAME ).receiveConnectURL( userData.connectURL );
const additionalScopes = [ 'http://example.com/test/scope/a', 'http://example.com/test/scope/b' ];
const connectURL = registry.select( STORE_NAME ).getConnectURL( { additionalScopes } );
expect( connectURL ).toMatchQueryParameters( {
'additional_scopes[0]': 'http://example.com/test/scope/a',
'additional_scopes[1]': 'http://example.com/test/scope/b',
} );
} );
it( 'accepts an optional redirectURL to add as a query parameter', () => {
registry.dispatch( STORE_NAME ).receiveConnectURL( userData.connectURL );
const redirectURL = 'http://example.com/test/redirect/';
const connectURL = registry.select( STORE_NAME ).getConnectURL( { redirectURL } );
expect( connectURL ).toMatchQueryParameters( {
redirect: redirectURL,
} );
} );
it( 'does not add query parameters when no options are passed', () => {
registry.dispatch( STORE_NAME ).receiveConnectURL( userData.connectURL );
const connectURL = registry.select( STORE_NAME ).getConnectURL();
expect( connectURL ).not.toContain( '&additional_scopes' );
expect( connectURL ).not.toContain( '&redirect' );
} );
} );
describe( 'getUser', () => {
it( 'uses a resolver to load user data from a global variable', async () => {
// Set up the global
global[ userDataGlobal ] = userData;
expect( global[ userDataGlobal ] ).not.toEqual( undefined );
registry.select( STORE_NAME ).getUser();
await subscribeUntil( registry,
() => (
registry.select( STORE_NAME ).getUser() !== initialState
),
);
const userInfo = registry.select( STORE_NAME ).getUser();
expect( userInfo ).toMatchObject( userData.user );
// Data must not be wiped after retrieving, as it could be used by other dependants.
expect( global[ userDataGlobal ] ).not.toEqual( undefined );
} );
it( 'will return initial state (undefined) when no data is available', async () => {
expect( global[ userDataGlobal ] ).toEqual( undefined );
const userInfo = registry.select( STORE_NAME ).getUser();
const { user } = initialState;
expect( userInfo ).toEqual( user );
expect( console ).toHaveErrored();
} );
} );
describe( 'isVerified', () => {
it( 'uses a resolver to load verification status from a global variable', async () => {
// Set up the global
global[ userDataGlobal ] = userData;
expect( global[ userDataGlobal ] ).not.toEqual( undefined );
registry.select( STORE_NAME ).isVerified();
await subscribeUntil( registry,
() => (
registry.select( STORE_NAME ).isVerified() !== initialState
),
);
const isVerified = registry.select( STORE_NAME ).isVerified();
expect( isVerified ).toEqual( userData.verified );
// Data must not be wiped after retrieving, as it could be used by other dependants.
expect( global[ userDataGlobal ] ).not.toEqual( undefined );
} );
it( 'will return initial state (undefined) when no data is available', async () => {
expect( global[ userDataGlobal ] ).toEqual( undefined );
const isVerified = registry.select( STORE_NAME ).isVerified();
const { verified } = initialState;
expect( isVerified ).toEqual( verified );
expect( console ).toHaveErrored();
} );
} );
describe.each( [
[ 'getID' ],
[ 'getName' ],
[ 'getEmail' ],
[ 'getPicture' ],
] )( `%s()`, ( selector ) => {
it( 'uses a resolver to load user info then returns the info when this specific selector is used', async () => {
// Set up the global
global[ userDataGlobal ] = userData;
registry.select( STORE_NAME )[ selector ]();
await subscribeUntil( registry,
() => (
registry.select( STORE_NAME )[ selector ]() !== undefined
),
);
const userInfo = registry.select( STORE_NAME ).getUser();
expect( userInfo ).toEqual( userData.user );
} );
it( 'will return initial state (undefined) when no data is available', async () => {
expect( global[ userDataGlobal ] ).toEqual( undefined );
const result = registry.select( STORE_NAME )[ selector ]();
expect( result ).toEqual( undefined );
expect( console ).toHaveErrored();
} );
} );
} );
} );
| 1 | 33,076 | Nitpicking, but I think it would be more accurate to use an actually supported value, i.e. `completed`. | google-site-kit-wp | js |
@@ -0,0 +1,17 @@
+class OhAdmin::JobsController < ApplicationController
+ before_action :admin_session_required
+ before_action :find_project
+ layout 'admin'
+ helper JobApiHelper
+
+ def index
+ @response = JSON.parse(ApiJob.new(@project.id, params[:page]).get)
+ end
+
+ private
+
+ def find_project
+ @project = Project.find_by_vanity_url(params[:project_id])
+ raise ParamRecordNotFound if @project.nil?
+ end
+end | 1 | 1 | 9,234 | How about renaming the `ApiJob` class to `JobApi`. As per rails convention, get method is to get a single object, not a collection, can we change that to `fetch`/`where`. It would be great if we can move the `get` method to a class method. | blackducksoftware-ohloh-ui | rb |
|
@@ -427,12 +427,14 @@ func TestCreateBlockchain(t *testing.T) {
require.NoError(registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(registry.Register(rolldpos.ProtocolID, rp))
- bc := NewBlockchain(cfg, nil, InMemStateFactoryOption(), InMemDaoOption(), RegistryOption(®istry))
+ sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
+ require.NoError(err)
+ bc := NewBlockchain(cfg, nil, sf, InMemDaoOption(), RegistryOption(®istry))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, hu)
require.NoError(registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
- bc.GetFactory().AddActionHandlers(acc, exec)
+ sf.AddActionHandlers(acc, exec)
require.NoError(bc.Start(ctx))
require.NotNil(bc)
height := bc.TipHeight() | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"math/big"
"os"
"sync"
"testing"
"time"
"github.com/iotexproject/iotex-address/address"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/blockdao"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
)
var (
deployHash hash.Hash256 // in block 2
setHash hash.Hash256 // in block 3
shrHash hash.Hash256 // in block 4
shlHash hash.Hash256 // in block 5
sarHash hash.Hash256 // in block 6
extHash hash.Hash256 // in block 7
crt2Hash hash.Hash256 // in block 8
setTopic, _ = hex.DecodeString("fe00000000000000000000000000000000000000000000000000000000001f40") // in block 3
getTopic, _ = hex.DecodeString("0000000000000000000000000000000000000000000000000000000000000001") // in block 4
shrTopic, _ = hex.DecodeString("00fe00000000000000000000000000000000000000000000000000000000001f") // in block 4
shlTopic, _ = hex.DecodeString("fe00000000000000000000000000000000000000000000000000000000001f00") // in block 5
sarTopic, _ = hex.DecodeString("fffe00000000000000000000000000000000000000000000000000000000001f") // in block 6
extTopic, _ = hex.DecodeString("4a98ce81a2fd5177f0f42b49cb25b01b720f9ce8019f3937f63b789766c938e2") // in block 7
crt2Topic, _ = hex.DecodeString("0000000000000000000000001895e6033cd1081f18e0bd23a4501d9376028523") // in block 8
)
func addTestingConstantinopleBlocks(bc Blockchain, dao blockdao.BlockDAO) error {
// Add block 1
addr0 := identityset.Address(27).String()
priKey0 := identityset.PrivateKey(27)
data, err := hex.DecodeString("608060405234801561001057600080fd5b506104d5806100206000396000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c806381ea44081161005b57806381ea440814610101578063a91b336214610159578063c2bc2efc14610177578063f5eacece146101cf57610088565b80635bec9e671461008d57806360fe47b1146100975780636bc8ecaa146100c5578063744f5f83146100e3575b600080fd5b6100956101ed565b005b6100c3600480360360208110156100ad57600080fd5b8101908080359060200190929190505050610239565b005b6100cd610270565b6040518082815260200191505060405180910390f35b6100eb6102b3565b6040518082815260200191505060405180910390f35b6101436004803603602081101561011757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506102f6565b6040518082815260200191505060405180910390f35b61016161036a565b6040518082815260200191505060405180910390f35b6101b96004803603602081101561018d57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506103ad565b6040518082815260200191505060405180910390f35b6101d761045f565b6040518082815260200191505060405180910390f35b5b60011561020b5760008081548092919060010191905055506101ee565b7f8bfaa460932ccf8751604dd60efa3eafa220ec358fccb32ef703f91c509bc3ea60405160405180910390a1565b80600081905550807fdf7a95aebff315db1b7716215d602ab537373cdb769232aae6055c06e798425b60405160405180910390a250565b6000805460081d905080600081905550807fdf7a95aebff315db1b7716215d602ab537373cdb769232aae6055c06e798425b60405160405180910390a280905090565b6000805460081c905080600081905550807fdf7a95aebff315db1b7716215d602ab537373cdb769232aae6055c06e798425b60405160405180910390a280905090565b60008073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141561033157600080fd5b813f9050807fdf7a95aebff315db1b7716215d602ab537373cdb769232aae6055c06e798425b60405160405180910390a2809050919050565b6000805460081b905080600081905550807fdf7a95aebff315db1b7716215d602ab537373cdb769232aae6055c06e798425b60405160405180910390a280905090565b60008073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156103e857600080fd5b7fbde7a70c2261170a87678200113c8e12f82f63d0a1d1cfa45681cbac328e87e382600054604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a16000549050919050565b60008080602060406000f59150817fdf7a95aebff315db1b7716215d602ab537373cdb769232aae6055c06e798425b60405160405180910390a2819150509056fea265627a7a72305820209a8ef04c4d621759f34878b27b238650e8605c8a71d6efc619a769a64aa9cc64736f6c634300050a0032")
if err != nil {
return err
}
ex1, err := testutil.SignedExecution(action.EmptyAddress, priKey0, 1, big.NewInt(0), 500000, big.NewInt(testutil.TestGasPriceInt64), data)
if err != nil {
return err
}
deployHash = ex1.Hash()
accMap := make(map[string][]action.SealedEnvelope)
accMap[addr0] = []action.SealedEnvelope{ex1}
blockTime := time.Unix(1546329600, 0)
blk, err := bc.MintNewBlock(
accMap,
blockTime,
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// get deployed contract address
var contract string
if dao != nil {
r, err := dao.GetReceiptByActionHash(deployHash, 1)
if err != nil {
return err
}
contract = r.ContractAddress
}
addOneBlock := func(nonce uint64, data []byte) (hash.Hash256, error) {
ex1, err := testutil.SignedExecution(contract, priKey0, nonce, big.NewInt(0), testutil.TestGasLimit*5, big.NewInt(testutil.TestGasPriceInt64), data)
if err != nil {
return hash.ZeroHash256, err
}
accMap := make(map[string][]action.SealedEnvelope)
accMap[addr0] = []action.SealedEnvelope{ex1}
blockTime = blockTime.Add(time.Second)
blk, err = bc.MintNewBlock(
accMap,
blockTime,
)
if err != nil {
return hash.ZeroHash256, err
}
if err := bc.ValidateBlock(blk); err != nil {
return hash.ZeroHash256, err
}
if err := bc.CommitBlock(blk); err != nil {
return hash.ZeroHash256, err
}
return ex1.Hash(), nil
}
// Add block 2
// call set() to set storedData = 0xfe...1f40
funcSig := hash.Hash256b([]byte("set(uint256)"))
data = append(funcSig[:4], setTopic...)
setHash, err = addOneBlock(2, data)
if err != nil {
return err
}
// Add block 3
// call shright() to test SHR opcode, storedData => 0x00fe...1f
funcSig = hash.Hash256b([]byte("shright()"))
shrHash, err = addOneBlock(3, funcSig[:4])
if err != nil {
return err
}
// Add block 4
// call shleft() to test SHL opcode, storedData => 0xfe...1f00
funcSig = hash.Hash256b([]byte("shleft()"))
shlHash, err = addOneBlock(4, funcSig[:4])
if err != nil {
return err
}
// Add block 5
// call saright() to test SAR opcode, storedData => 0xfffe...1f
funcSig = hash.Hash256b([]byte("saright()"))
sarHash, err = addOneBlock(5, funcSig[:4])
if err != nil {
return err
}
// Add block 6
// call getCodeHash() to test EXTCODEHASH opcode
funcSig = hash.Hash256b([]byte("getCodeHash(address)"))
addr, _ := address.FromString(contract)
ethaddr := hash.BytesToHash256(addr.Bytes())
data = append(funcSig[:4], ethaddr[:]...)
extHash, err = addOneBlock(6, data)
if err != nil {
return err
}
// Add block 7
// call create2() to test CREATE2 opcode
funcSig = hash.Hash256b([]byte("create2()"))
crt2Hash, err = addOneBlock(7, funcSig[:4])
if err != nil {
return err
}
return nil
}
func addTestingTsfBlocks(bc Blockchain, dao blockdao.BlockDAO) error {
// Add block 1
addr0 := identityset.Address(27).String()
tsf0, err := testutil.SignedTransfer(addr0, identityset.PrivateKey(0), 1, big.NewInt(90000000), nil, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
accMap := make(map[string][]action.SealedEnvelope)
accMap[identityset.Address(0).String()] = []action.SealedEnvelope{tsf0}
blk, err := bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
priKey0 := identityset.PrivateKey(27)
addr1 := identityset.Address(28).String()
priKey1 := identityset.PrivateKey(28)
addr2 := identityset.Address(29).String()
priKey2 := identityset.PrivateKey(29)
addr3 := identityset.Address(30).String()
priKey3 := identityset.PrivateKey(30)
addr4 := identityset.Address(31).String()
priKey4 := identityset.PrivateKey(31)
addr5 := identityset.Address(32).String()
priKey5 := identityset.PrivateKey(32)
addr6 := identityset.Address(33).String()
// Add block 2
// test --> A, B, C, D, E, F
tsf1, err := testutil.SignedTransfer(addr1, priKey0, 1, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err := testutil.SignedTransfer(addr2, priKey0, 2, big.NewInt(30), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err := testutil.SignedTransfer(addr3, priKey0, 3, big.NewInt(50), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err := testutil.SignedTransfer(addr4, priKey0, 4, big.NewInt(70), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf5, err := testutil.SignedTransfer(addr5, priKey0, 5, big.NewInt(110), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf6, err := testutil.SignedTransfer(addr6, priKey0, 6, big.NewInt(50<<20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// deploy simple smart contract
data, _ := hex.DecodeString("608060405234801561001057600080fd5b50610233806100206000396000f300608060405260043610610057576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635bec9e671461005c57806360fe47b114610073578063c2bc2efc146100a0575b600080fd5b34801561006857600080fd5b506100716100f7565b005b34801561007f57600080fd5b5061009e60048036038101908080359060200190929190505050610143565b005b3480156100ac57600080fd5b506100e1600480360381019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061017a565b6040518082815260200191505060405180910390f35b5b6001156101155760008081548092919060010191905055506100f8565b7f8bfaa460932ccf8751604dd60efa3eafa220ec358fccb32ef703f91c509bc3ea60405160405180910390a1565b80600081905550807fdf7a95aebff315db1b7716215d602ab537373cdb769232aae6055c06e798425b60405160405180910390a250565b60008073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141515156101b757600080fd5b6000548273ffffffffffffffffffffffffffffffffffffffff167fbde7a70c2261170a87678200113c8e12f82f63d0a1d1cfa45681cbac328e87e360405160405180910390a360005490509190505600a165627a7a723058203198d0390613dab2dff2fa053c1865e802618d628429b01ab05b8458afc347eb0029")
ex1, err := testutil.SignedExecution(action.EmptyAddress, priKey2, 1, big.NewInt(0), 200000, big.NewInt(testutil.TestGasPriceInt64), data)
if err != nil {
return err
}
deployHash = ex1.Hash()
accMap = make(map[string][]action.SealedEnvelope)
accMap[addr0] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6}
accMap[addr2] = []action.SealedEnvelope{ex1}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// get deployed contract address
var contract string
cfg := bc.(*blockchain).config
_, gateway := cfg.Plugins[config.GatewayPlugin]
if gateway && !cfg.Chain.EnableAsyncIndexWrite {
r, err := dao.GetReceiptByActionHash(deployHash, 2)
if err != nil {
return err
}
contract = r.ContractAddress
}
// Add block 3
// Charlie --> A, B, D, E, test
tsf1, err = testutil.SignedTransfer(addr1, priKey3, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr2, priKey3, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr4, priKey3, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr5, priKey3, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf5, err = testutil.SignedTransfer(addr0, priKey3, 5, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// call set() to set storedData = 0x1f40
data, _ = hex.DecodeString("60fe47b1")
data = append(data, setTopic...)
ex1, err = testutil.SignedExecution(contract, priKey2, 2, big.NewInt(0), testutil.TestGasLimit*5, big.NewInt(testutil.TestGasPriceInt64), data)
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[addr3] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5}
accMap[addr2] = []action.SealedEnvelope{ex1}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 4
// Delta --> B, E, F, test
tsf1, err = testutil.SignedTransfer(addr2, priKey4, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr5, priKey4, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr6, priKey4, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr0, priKey4, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
data, _ = hex.DecodeString("c2bc2efc")
data = append(data, getTopic...)
ex1, err = testutil.SignedExecution(contract, priKey2, 3, big.NewInt(0), testutil.TestGasLimit*5, big.NewInt(testutil.TestGasPriceInt64), data)
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[addr4] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4}
accMap[addr2] = []action.SealedEnvelope{ex1}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 5
// Delta --> A, B, C, D, F, test
tsf1, err = testutil.SignedTransfer(addr1, priKey5, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr2, priKey5, 2, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr3, priKey5, 3, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr4, priKey5, 4, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf5, err = testutil.SignedTransfer(addr6, priKey5, 5, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf6, err = testutil.SignedTransfer(addr0, priKey5, 6, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf7, err := testutil.SignedTransfer(addr3, priKey3, 6, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf8, err := testutil.SignedTransfer(addr1, priKey1, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[addr5] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6}
accMap[addr3] = []action.SealedEnvelope{tsf7}
accMap[addr1] = []action.SealedEnvelope{tsf8}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
return bc.CommitBlock(blk)
}
func TestCreateBlockchain(t *testing.T) {
require := require.New(t)
ctx := context.Background()
cfg := config.Default
// disable account-based testing
cfg.Chain.TrieDBPath = ""
cfg.Genesis.EnableGravityChainVoting = false
// create chain
registry := protocol.Registry{}
hu := config.NewHeightUpgrade(cfg)
acc := account.NewProtocol(hu)
require.NoError(registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(registry.Register(rolldpos.ProtocolID, rp))
bc := NewBlockchain(cfg, nil, InMemStateFactoryOption(), InMemDaoOption(), RegistryOption(®istry))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, hu)
require.NoError(registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
bc.GetFactory().AddActionHandlers(acc, exec)
require.NoError(bc.Start(ctx))
require.NotNil(bc)
height := bc.TipHeight()
require.Equal(0, int(height))
fmt.Printf("Create blockchain pass, height = %d\n", height)
defer func() {
err := bc.Stop(ctx)
require.NoError(err)
}()
// add 4 sample blocks
require.NoError(addTestingTsfBlocks(bc, nil))
height = bc.TipHeight()
require.Equal(5, int(height))
}
func TestBlockchain_MintNewBlock(t *testing.T) {
ctx := context.Background()
cfg := config.Default
cfg.Genesis.BlockGasLimit = uint64(100000)
cfg.Genesis.EnableGravityChainVoting = false
registry := protocol.Registry{}
hu := config.NewHeightUpgrade(cfg)
acc := account.NewProtocol(hu)
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
bc := NewBlockchain(cfg, nil, InMemStateFactoryOption(), InMemDaoOption(), RegistryOption(®istry))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, hu)
require.NoError(t, registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
bc.GetFactory().AddActionHandlers(acc, exec)
require.NoError(t, bc.Start(ctx))
defer func() {
require.NoError(t, bc.Stop(ctx))
}()
tsf, err := action.NewTransfer(
1,
big.NewInt(100000000),
identityset.Address(27).String(),
[]byte{}, uint64(100000),
big.NewInt(10),
)
require.NoError(t, err)
data, _ := hex.DecodeString("608060405234801561001057600080fd5b5060df8061001f6000396000f3006080604052600436106049576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806360fe47b114604e5780636d4ce63c146078575b600080fd5b348015605957600080fd5b5060766004803603810190808035906020019092919050505060a0565b005b348015608357600080fd5b50608a60aa565b6040518082815260200191505060405180910390f35b8060008190555050565b600080549050905600a165627a7a7230582002faabbefbbda99b20217cf33cb8ab8100caf1542bf1f48117d72e2c59139aea0029")
execution, err := action.NewExecution(action.EmptyAddress, 2, big.NewInt(0), uint64(100000), big.NewInt(0), data)
require.NoError(t, err)
bd := &action.EnvelopeBuilder{}
elp1 := bd.SetAction(tsf).
SetNonce(1).
SetGasLimit(100000).
SetGasPrice(big.NewInt(10)).Build()
selp1, err := action.Sign(elp1, identityset.PrivateKey(0))
require.NoError(t, err)
// This execution should not be included in block because block is out of gas
elp2 := bd.SetAction(execution).
SetNonce(2).
SetGasLimit(100000).
SetGasPrice(big.NewInt(10)).Build()
selp2, err := action.Sign(elp2, identityset.PrivateKey(0))
require.NoError(t, err)
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[identityset.Address(0).String()] = []action.SealedEnvelope{selp1, selp2}
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.NoError(t, err)
require.Equal(t, 2, len(blk.Actions))
require.Equal(t, 1, len(blk.Receipts))
var gasConsumed uint64
for _, receipt := range blk.Receipts {
gasConsumed += receipt.GasConsumed
}
require.True(t, gasConsumed <= cfg.Genesis.BlockGasLimit)
}
func TestBlockchain_MintNewBlock_PopAccount(t *testing.T) {
ctx := context.Background()
cfg := config.Default
cfg.Genesis.EnableGravityChainVoting = false
registry := protocol.Registry{}
hu := config.NewHeightUpgrade(cfg)
acc := account.NewProtocol(hu)
require.NoError(t, registry.Register(account.ProtocolID, acc))
bc := NewBlockchain(cfg, nil, InMemStateFactoryOption(), InMemDaoOption(), RegistryOption(®istry))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, hu)
require.NoError(t, registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
bc.GetFactory().AddActionHandlers(acc, exec)
require.NoError(t, bc.Start(ctx))
defer func() {
require.NoError(t, bc.Stop(ctx))
}()
addr0 := identityset.Address(27).String()
priKey0 := identityset.PrivateKey(27)
addr1 := identityset.Address(28).String()
addr3 := identityset.Address(30).String()
priKey3 := identityset.PrivateKey(30)
require.NoError(t, addTestingTsfBlocks(bc, nil))
// test third block
bytes := []byte{}
for i := 0; i < 1000; i++ {
bytes = append(bytes, 1)
}
actionMap := make(map[string][]action.SealedEnvelope)
actions := make([]action.SealedEnvelope, 0)
for i := uint64(0); i < 300; i++ {
tsf, err := testutil.SignedTransfer(addr1, priKey0, i+7, big.NewInt(2), bytes,
1000000, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(t, err)
actions = append(actions, tsf)
}
actionMap[addr0] = actions
transfer1, err := testutil.SignedTransfer(addr1, priKey3, 7, big.NewInt(2),
[]byte{}, 100000, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(t, err)
actionMap[addr3] = []action.SealedEnvelope{transfer1}
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.NoError(t, err)
require.NotNil(t, blk)
require.Equal(t, 183, len(blk.Actions))
whetherInclude := false
for _, action := range blk.Actions {
if transfer1.Hash() == action.Hash() {
whetherInclude = true
break
}
}
require.True(t, whetherInclude)
}
type MockSubscriber struct {
counter int
mu sync.RWMutex
}
func (ms *MockSubscriber) HandleBlock(blk *block.Block) error {
ms.mu.Lock()
tsfs, _ := action.ClassifyActions(blk.Actions)
ms.counter += len(tsfs)
ms.mu.Unlock()
return nil
}
func (ms *MockSubscriber) Counter() int {
ms.mu.RLock()
defer ms.mu.RUnlock()
return ms.counter
}
func TestConstantinople(t *testing.T) {
testValidateBlockchain := func(cfg config.Config, t *testing.T) {
require := require.New(t)
ctx := context.Background()
// Create a blockchain from scratch
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
hc := config.NewHeightUpgrade(cfg)
acc := account.NewProtocol(hc)
sf.AddActionHandlers(acc)
registry := protocol.Registry{}
require.NoError(registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(registry.Register(rolldpos.ProtocolID, rp))
// create indexer
cfg.DB.DbPath = cfg.Chain.IndexDBPath
indexer, err := blockindex.NewIndexer(db.NewBoltDB(cfg.DB), cfg.Genesis.Hash())
require.NoError(err)
// create BlockDAO
cfg.DB.DbPath = cfg.Chain.ChainDBPath
dao := blockdao.NewBlockDAO(db.NewBoltDB(cfg.DB), indexer, cfg.Chain.CompressBlock, cfg.DB)
require.NotNil(dao)
bc := NewBlockchain(
cfg,
dao,
PrecreatedStateFactoryOption(sf),
RegistryOption(®istry),
)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, hc)
require.NoError(registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
sf.AddActionHandlers(exec)
require.NoError(bc.Start(ctx))
require.NoError(addCreatorToFactory(sf))
defer func() {
require.NoError(bc.Stop(ctx))
}()
require.NoError(addTestingConstantinopleBlocks(bc, dao))
hashTopic := []struct {
h hash.Hash256
blkHash string
topic []byte
}{
{
deployHash,
"d1ff0e7fe2a54600a171d3bcc9e222c656d584b3a0e7b33373e634de3f8cd010",
nil,
},
{
setHash,
"24667a8d9ca9f4d8c1bc651b9be205cc8422aca36dba8895aa39c50a8937be09",
setTopic,
},
{
shrHash,
"fd8ef98e94689d4a69fc828693dc931c48767b53dec717329bbac043c21fa78c",
shrTopic,
},
{
shlHash,
"77d0861e5e7164691c71fe5031087dda5ea20039bd096feaae9d8166bdf6a6a9",
shlTopic,
},
{
sarHash,
"7946fa90bd7c25f84bf83f727cc4589abc690d488ec8fa4f4af2ec9d19c71e74",
sarTopic,
},
{
extHash,
"0d35e9623375411f39c701ddf78f743abf3615f732977c01966a2fe359ae46f9",
extTopic,
},
{
crt2Hash,
"63f147cfecd0a58a9d6211886b53533cfe3ae57a539a2fecab05b27beab04e69",
crt2Topic,
},
}
// test getReceipt
for i := range hashTopic {
actHash := hashTopic[i].h
ai, err := indexer.GetActionIndex(actHash[:])
require.NoError(err)
r, err := dao.GetReceiptByActionHash(actHash, ai.BlockHeight())
require.NoError(err)
require.NotNil(r)
require.Equal(uint64(1), r.Status)
require.Equal(actHash, r.ActionHash)
require.Equal(uint64(i)+1, r.BlockHeight)
a, err := dao.GetActionByActionHash(actHash, ai.BlockHeight())
require.NoError(err)
require.NotNil(a)
require.Equal(actHash, a.Hash())
actIndex, err := indexer.GetActionIndex(actHash[:])
require.NoError(err)
blkHash, err := bc.GetHashByHeight(actIndex.BlockHeight())
require.NoError(err)
require.Equal(hashTopic[i].blkHash, hex.EncodeToString(blkHash[:]))
if hashTopic[i].topic != nil {
funcSig := hash.Hash256b([]byte("Set(uint256)"))
blk, err := bc.GetBlockByHeight(1 + uint64(i))
require.NoError(err)
f := blk.Header.LogsBloomfilter()
require.NotNil(f)
require.True(f.Exist(funcSig[:]))
require.True(f.Exist(hashTopic[i].topic))
}
}
// test getActions
addr27 := hash.BytesToHash160(identityset.Address(27).Bytes())
total, err := indexer.GetActionCountByAddress(addr27)
require.NoError(err)
require.EqualValues(7, total)
actions, err := indexer.GetActionsByAddress(addr27, 0, total)
require.EqualValues(total, len(actions))
for i := range actions {
require.Equal(hashTopic[i].h[:], actions[i])
}
}
cfg := config.Default
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
testIndexFile, _ := ioutil.TempFile(os.TempDir(), "index")
testIndexPath := testIndexFile.Name()
defer func() {
testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
testutil.CleanupPath(t, testIndexPath)
// clear the gateway
delete(cfg.Plugins, config.GatewayPlugin)
}()
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
cfg.Chain.ProducerPrivKey = "a000000000000000000000000000000000000000000000000000000000000000"
cfg.Genesis.EnableGravityChainVoting = false
cfg.Plugins[config.GatewayPlugin] = true
cfg.Chain.EnableAsyncIndexWrite = false
cfg.Genesis.AleutianBlockHeight = 2
t.Run("test Constantinople contract", func(t *testing.T) {
testValidateBlockchain(cfg, t)
})
}
func TestLoadBlockchainfromDB(t *testing.T) {
testValidateBlockchain := func(cfg config.Config, t *testing.T) {
require := require.New(t)
ctx := context.Background()
// Create a blockchain from scratch
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
hu := config.NewHeightUpgrade(cfg)
acc := account.NewProtocol(hu)
sf.AddActionHandlers(acc)
registry := protocol.Registry{}
require.NoError(registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(registry.Register(rolldpos.ProtocolID, rp))
var indexer blockindex.Indexer
if _, gateway := cfg.Plugins[config.GatewayPlugin]; gateway && !cfg.Chain.EnableAsyncIndexWrite {
// create indexer
cfg.DB.DbPath = cfg.Chain.IndexDBPath
indexer, err = blockindex.NewIndexer(db.NewBoltDB(cfg.DB), cfg.Genesis.Hash())
require.NoError(err)
}
// create BlockDAO
cfg.DB.DbPath = cfg.Chain.ChainDBPath
dao := blockdao.NewBlockDAO(db.NewBoltDB(cfg.DB), indexer, cfg.Chain.CompressBlock, cfg.DB)
require.NotNil(dao)
bc := NewBlockchain(
cfg,
dao,
PrecreatedStateFactoryOption(sf),
RegistryOption(®istry),
)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, hu)
require.NoError(registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
sf.AddActionHandlers(exec)
require.NoError(bc.Start(ctx))
require.NoError(addCreatorToFactory(sf))
ms := &MockSubscriber{counter: 0}
require.NoError(bc.AddSubscriber(ms))
require.Equal(0, ms.Counter())
height := bc.TipHeight()
fmt.Printf("Open blockchain pass, height = %d\n", height)
require.Nil(addTestingTsfBlocks(bc, dao))
require.NoError(bc.Stop(ctx))
require.Equal(24, ms.Counter())
// Load a blockchain from DB
accountProtocol := account.NewProtocol(hu)
registry = protocol.Registry{}
require.NoError(registry.Register(account.ProtocolID, accountProtocol))
bc = NewBlockchain(
cfg,
dao,
PrecreatedStateFactoryOption(sf),
RegistryOption(®istry),
)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
)
require.NoError(registry.Register(rolldpos.ProtocolID, rolldposProtocol))
rewardingProtocol := rewarding.NewProtocol(bc, rolldposProtocol)
require.NoError(registry.Register(rewarding.ProtocolID, rewardingProtocol))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(accountProtocol)
require.NoError(bc.Start(ctx))
defer func() {
require.NoError(bc.Stop(ctx))
}()
// verify block header hash
for i := uint64(1); i <= 5; i++ {
hash, err := bc.GetHashByHeight(i)
require.NoError(err)
height, err = bc.GetHeightByHash(hash)
require.NoError(err)
require.Equal(i, height)
header, err := bc.BlockHeaderByHash(hash)
require.NoError(err)
require.Equal(hash, header.HashBlock())
header, err = bc.BlockHeaderByHeight(height)
require.NoError(err)
require.Equal(height, header.Height())
// bloomfilter only exists after aleutian height
require.Equal(height >= cfg.Genesis.AleutianBlockHeight, header.LogsBloomfilter() != nil)
}
empblk, err := bc.GetBlockByHash(hash.ZeroHash256)
require.Nil(empblk)
require.NotNil(err.Error())
header, err := bc.BlockHeaderByHeight(60000)
require.Nil(header)
require.Error(err)
// add wrong blocks
h := bc.TipHeight()
blkhash := bc.TipHash()
header, err = bc.BlockHeaderByHeight(h)
require.NoError(err)
require.Equal(blkhash, header.HashBlock())
fmt.Printf("Current tip = %d hash = %x\n", h, blkhash)
// add block with wrong height
selp, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(50), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(err)
nblk, err := block.NewTestingBuilder().
SetHeight(h + 2).
SetPrevBlockHash(blkhash).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp).SignAndBuild(identityset.PrivateKey(29))
require.NoError(err)
err = bc.ValidateBlock(&nblk)
require.Error(err)
fmt.Printf("Cannot validate block %d: %v\n", header.Height(), err)
// add block with zero prev hash
selp2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(50), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(err)
nblk, err = block.NewTestingBuilder().
SetHeight(h + 1).
SetPrevBlockHash(hash.ZeroHash256).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp2).SignAndBuild(identityset.PrivateKey(29))
require.NoError(err)
err = bc.ValidateBlock(&nblk)
require.Error(err)
fmt.Printf("Cannot validate block %d: %v\n", header.Height(), err)
// add existing block again will have no effect
blk, err := bc.GetBlockByHeight(3)
require.NotNil(blk)
require.NoError(err)
require.NoError(bc.(*blockchain).commitBlock(blk))
fmt.Printf("Cannot add block 3 again: %v\n", err)
// invalid address returns error
act, err := bc.StateByAddr("")
require.Equal("invalid bech32 string length 0", errors.Cause(err).Error())
require.Nil(act)
// valid but unused address should return empty account
act, err = bc.StateByAddr("io1066kus4vlyvk0ljql39fzwqw0k22h7j8wmef3n")
require.NoError(err)
require.Equal(uint64(0), act.Nonce)
require.Equal(big.NewInt(0), act.Balance)
_, gateway := cfg.Plugins[config.GatewayPlugin]
if gateway && !cfg.Chain.EnableAsyncIndexWrite {
// verify deployed contract
ai, err := indexer.GetActionIndex(deployHash[:])
require.NoError(err)
r, err := dao.GetReceiptByActionHash(deployHash, ai.BlockHeight())
require.NoError(err)
require.NotNil(r)
require.Equal(uint64(1), r.Status)
require.Equal(uint64(2), r.BlockHeight)
// 2 topics in block 3 calling set()
funcSig := hash.Hash256b([]byte("Set(uint256)"))
blk, err := bc.GetBlockByHeight(3)
require.NoError(err)
f := blk.Header.LogsBloomfilter()
require.NotNil(f)
require.True(f.Exist(funcSig[:]))
require.True(f.Exist(setTopic))
// 3 topics in block 4 calling get()
funcSig = hash.Hash256b([]byte("Get(address,uint256)"))
blk, err = bc.GetBlockByHeight(4)
require.NoError(err)
f = blk.Header.LogsBloomfilter()
require.NotNil(f)
require.True(f.Exist(funcSig[:]))
require.True(f.Exist(setTopic))
require.True(f.Exist(getTopic))
// verify genesis block index
bi, err := indexer.GetBlockIndex(0)
require.NoError(err)
require.Equal(cfg.Genesis.Hash(), hash.BytesToHash256(bi.Hash()))
require.EqualValues(0, bi.NumAction())
require.Equal(big.NewInt(0), bi.TsfAmount())
for h := uint64(1); h <= 5; h++ {
// verify getting number of actions
blk, err = bc.GetBlockByHeight(h)
require.NoError(err)
blkIndex, err := indexer.GetBlockIndex(h)
require.NoError(err)
require.EqualValues(blkIndex.NumAction(), len(blk.Actions))
// verify getting transfer amount
tsfs, _ := action.ClassifyActions(blk.Actions)
tsfa := big.NewInt(0)
for _, tsf := range tsfs {
tsfa.Add(tsfa, tsf.Amount())
}
require.Equal(blkIndex.TsfAmount(), tsfa)
}
}
}
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
testIndexFile, _ := ioutil.TempFile(os.TempDir(), "index")
testIndexPath := testIndexFile.Name()
defer func() {
testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
testutil.CleanupPath(t, testIndexPath)
}()
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
cfg.Genesis.EnableGravityChainVoting = false
t.Run("load blockchain from DB w/o explorer", func(t *testing.T) {
testValidateBlockchain(cfg, t)
})
testTrieFile, _ = ioutil.TempFile(os.TempDir(), "trie")
testTriePath2 := testTrieFile.Name()
testDBFile, _ = ioutil.TempFile(os.TempDir(), "db")
testDBPath2 := testDBFile.Name()
testIndexFile2, _ := ioutil.TempFile(os.TempDir(), "index")
testIndexPath2 := testIndexFile2.Name()
defer func() {
testutil.CleanupPath(t, testTriePath2)
testutil.CleanupPath(t, testDBPath2)
testutil.CleanupPath(t, testIndexPath2)
// clear the gateway
delete(cfg.Plugins, config.GatewayPlugin)
}()
cfg.Plugins[config.GatewayPlugin] = true
cfg.Chain.TrieDBPath = testTriePath2
cfg.Chain.ChainDBPath = testDBPath2
cfg.Chain.IndexDBPath = testIndexPath2
cfg.Chain.EnableAsyncIndexWrite = false
cfg.Genesis.AleutianBlockHeight = 3
t.Run("load blockchain from DB", func(t *testing.T) {
testValidateBlockchain(cfg, t)
})
}
func TestBlockchain_Validator(t *testing.T) {
cfg := config.Default
// disable account-based testing
cfg.Chain.TrieDBPath = ""
ctx := context.Background()
bc := NewBlockchain(cfg, nil, InMemDaoOption(), InMemStateFactoryOption())
require.NoError(t, bc.Start(ctx))
defer func() {
err := bc.Stop(ctx)
require.Nil(t, err)
}()
require.NotNil(t, bc)
val := bc.Validator()
require.NotNil(t, bc)
bc.SetValidator(val)
require.NotNil(t, bc.Validator())
}
func TestBlockchainInitialCandidate(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
testIndexFile, _ := ioutil.TempFile(os.TempDir(), "index")
testIndexPath := testIndexFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
cfg.Consensus.Scheme = config.RollDPoSScheme
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
accountProtocol := account.NewProtocol(config.NewHeightUpgrade(cfg))
sf.AddActionHandlers(accountProtocol)
registry := protocol.Registry{}
require.NoError(registry.Register(account.ProtocolID, accountProtocol))
bc := NewBlockchain(
cfg,
nil,
PrecreatedStateFactoryOption(sf),
BoltDBDaoOption(),
RegistryOption(®istry),
)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
)
require.NoError(registry.Register(rolldpos.ProtocolID, rolldposProtocol))
rewardingProtocol := rewarding.NewProtocol(bc, rolldposProtocol)
require.NoError(registry.Register(rewarding.ProtocolID, rewardingProtocol))
require.NoError(registry.Register(poll.ProtocolID, poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)))
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
candidate, err := sf.CandidatesByHeight(1)
require.NoError(err)
require.Equal(24, len(candidate))
}
func TestBlockchain_StateByAddr(t *testing.T) {
require := require.New(t)
cfg := config.Default
// disable account-based testing
// create chain
bc := NewBlockchain(cfg, nil, InMemDaoOption(), InMemStateFactoryOption())
require.NoError(bc.Start(context.Background()))
require.NotNil(bc)
_, err := bc.CreateState(identityset.Address(0).String(), big.NewInt(100))
require.NoError(err)
s, err := bc.StateByAddr(identityset.Address(0).String())
require.NoError(err)
require.Equal(uint64(0), s.Nonce)
require.Equal(big.NewInt(100), s.Balance)
require.Equal(hash.ZeroHash256, s.Root)
require.Equal([]byte(nil), s.CodeHash)
}
func TestBlocks(t *testing.T) {
// This test is used for committing block verify benchmark purpose
t.Skip()
require := require.New(t)
cfg := config.Default
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
testIndexFile, _ := ioutil.TempFile(os.TempDir(), "index")
testIndexPath := testIndexFile.Name()
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, nil, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
require.NoError(addCreatorToFactory(sf))
a := identityset.Address(28).String()
priKeyA := identityset.PrivateKey(28)
c := identityset.Address(29).String()
ws, err := sf.NewWorkingSet()
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100000))
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, c, big.NewInt(100000))
require.NoError(err)
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
})
_, err = ws.RunActions(ctx, 0, nil)
require.NoError(err)
require.NoError(sf.Commit(ws))
for i := 0; i < 10; i++ {
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[a] = []action.SealedEnvelope{}
for i := 0; i < 1000; i++ {
tsf, err := testutil.SignedTransfer(c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
actionMap[a] = append(actionMap[a], tsf)
}
blk, _ := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.Nil(bc.ValidateBlock(blk))
require.Nil(bc.CommitBlock(blk))
}
}
func TestActions(t *testing.T) {
// This test is used for block verify benchmark purpose
t.Skip()
require := require.New(t)
cfg := config.Default
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
testIndexFile, _ := ioutil.TempFile(os.TempDir(), "index")
testIndexPath := testIndexFile.Name()
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, nil, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
require.NoError(addCreatorToFactory(sf))
a := identityset.Address(28).String()
priKeyA := identityset.PrivateKey(28)
c := identityset.Address(29).String()
ws, err := sf.NewWorkingSet()
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100000))
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, c, big.NewInt(100000))
require.NoError(err)
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
})
_, err = ws.RunActions(ctx, 0, nil)
require.NoError(err)
require.NoError(sf.Commit(ws))
val := &validator{sf: sf, validatorAddr: ""}
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
bc.Validator().AddActionValidators(account.NewProtocol(config.NewHeightUpgrade(cfg)))
actionMap := make(map[string][]action.SealedEnvelope)
for i := 0; i < 5000; i++ {
tsf, err := testutil.SignedTransfer(c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
actionMap[a] = append(actionMap[a], tsf)
tsf2, err := testutil.SignedTransfer(a, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
actionMap[a] = append(actionMap[a], tsf2)
}
blk, _ := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.Nil(val.Validate(blk, 0, blk.PrevHash()))
}
func addCreatorToFactory(sf factory.Factory) error {
ws, err := sf.NewWorkingSet()
if err != nil {
return err
}
if _, err = accountutil.LoadOrCreateAccount(
ws,
identityset.Address(27).String(),
unit.ConvertIotxToRau(10000000000),
); err != nil {
return err
}
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return err
}
return sf.Commit(ws)
}
| 1 | 19,433 | assignments should only be cuddled with other assignments (from `wsl`) | iotexproject-iotex-core | go |
@@ -54,3 +54,11 @@ func (c *Cache) Delete(key interface{}) error {
c.values.Delete(key)
return nil
}
+
+func (c *Cache) GetAll() ([]interface{}, error) {
+ return nil, cache.ErrUnimplemented
+}
+
+func (c *Cache) PutHash(k interface{}, v interface{}) error {
+ return cache.ErrUnimplemented
+} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memorycache
import (
"sync"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/cache/cachemetrics"
)
type Cache struct {
values sync.Map
}
func NewCache() *Cache {
return &Cache{}
}
func (c *Cache) Get(key interface{}) (interface{}, error) {
item, ok := c.values.Load(key)
if !ok {
cachemetrics.IncGetOperationCounter(
cachemetrics.LabelSourceInmemory,
cachemetrics.LabelStatusMiss,
)
return nil, cache.ErrNotFound
}
cachemetrics.IncGetOperationCounter(
cachemetrics.LabelSourceInmemory,
cachemetrics.LabelStatusHit,
)
return item, nil
}
func (c *Cache) Put(key interface{}, value interface{}) error {
c.values.Store(key, value)
return nil
}
func (c *Cache) Delete(key interface{}) error {
c.values.Delete(key)
return nil
}
| 1 | 17,836 | `k` is unused in PutHash | pipe-cd-pipe | go |
@@ -39,7 +39,7 @@ interface StatementsSource extends FileSource
public function isStatic(): bool;
- public function getSource(): ?StatementsSource;
+ public function getSource(): StatementsSource;
public function getCodebase() : Codebase;
| 1 | <?php
namespace Psalm;
interface StatementsSource extends FileSource
{
public function getNamespace(): ?string;
/**
* @return array<string, string>
*/
public function getAliasedClassesFlipped(): array;
/**
* @return array<string, string>
*/
public function getAliasedClassesFlippedReplaceable(): array;
public function getFQCLN(): ?string;
public function getClassName(): ?string;
public function getParentFQCLN(): ?string;
/**
* @return array<string, array<string, array{Type\Union}>>|null
*/
public function getTemplateTypeMap(): ?array;
/**
* @return void
*/
public function setRootFilePath(string $file_path, string $file_name);
public function hasParentFilePath(string $file_path): bool;
public function hasAlreadyRequiredFilePath(string $file_path): bool;
public function getRequireNesting(): int;
public function isStatic(): bool;
public function getSource(): ?StatementsSource;
public function getCodebase() : Codebase;
/**
* Get a list of suppressed issues
*
* @return array<string>
*/
public function getSuppressedIssues(): array;
/**
* @param array<int, string> $new_issues
*
* @return void
*/
public function addSuppressedIssues(array $new_issues);
/**
* @param array<int, string> $new_issues
*
* @return void
*/
public function removeSuppressedIssues(array $new_issues);
public function getNodeTypeProvider() : NodeTypeProvider;
}
| 1 | 9,164 | None of the child return null here, plus it was creating an incoherence between interfaces. | vimeo-psalm | php |
@@ -1076,7 +1076,7 @@ pony_ctx_t* ponyint_sched_init(uint32_t threads, bool noyield, bool nopin,
// If minimum thread count is > thread count, cap it at thread count
if(min_threads > threads)
- min_threads = threads;
+ min_threads = threads; // this becomes the equivalent of --ponynoscale
// convert to cycles for use with ponyint_cpu_tick()
// 1 second = 2000000000 cycles (approx.) | 1 | #define PONY_WANT_ATOMIC_DEFS
#include "scheduler.h"
#include "cpu.h"
#include "mpmcq.h"
#include "../actor/actor.h"
#include "../gc/cycle.h"
#include "../asio/asio.h"
#include "../mem/pagemap.h"
#include "../mem/pool.h"
#include "ponyassert.h"
#include <dtrace.h>
#include <string.h>
#include "mutemap.h"
#define PONY_SCHED_BLOCK_THRESHOLD 1000000
static DECLARE_THREAD_FN(run_thread);
typedef enum
{
SCHED_BLOCK = 20,
SCHED_UNBLOCK = 21,
SCHED_CNF = 30,
SCHED_ACK,
SCHED_TERMINATE = 40,
SCHED_SUSPEND = 41,
SCHED_UNMUTE_ACTOR = 50,
SCHED_NOISY_ASIO = 51,
SCHED_UNNOISY_ASIO = 52
} sched_msg_t;
// Scheduler global data.
static uint64_t last_cd_tsc;
static uint32_t scheduler_count;
static uint32_t min_scheduler_count;
static uint64_t scheduler_suspend_threshold;
static PONY_ATOMIC(uint32_t) active_scheduler_count;
static PONY_ATOMIC(uint32_t) active_scheduler_count_check;
static scheduler_t* scheduler;
static PONY_ATOMIC(bool) detect_quiescence;
static bool use_yield;
static mpmcq_t inject;
static __pony_thread_local scheduler_t* this_scheduler;
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
static pthread_mutex_t sched_mut;
static pthread_once_t sched_mut_once = PTHREAD_ONCE_INIT;
void sched_mut_init()
{
pthread_mutex_init(&sched_mut, NULL);
}
#else
static PONY_ATOMIC(bool) scheduler_count_changing;
#endif
#ifdef USE_MEMTRACK
// holds only size of pthread_cond variables and scheduler_t array
static size_t mem_allocated;
static size_t mem_used;
/** Get the static memory used by the scheduler subsystem.
*/
size_t ponyint_sched_static_mem_size()
{
return mem_used;
}
/** Get the static memory allocated by the scheduler subsystem.
*/
size_t ponyint_sched_static_alloc_size()
{
return mem_allocated;
}
size_t ponyint_sched_total_mem_size(pony_ctx_t* ctx)
{
return
// memory used for each actor struct
// + memory used for actormaps for gc acquire/release messages
ctx->mem_used_actors
// memory used for mutemap
+ ctx->mem_used;
}
size_t ponyint_sched_total_alloc_size(pony_ctx_t* ctx)
{
return
// memory allocated for each actor struct
// + memory allocated for actormaps for gc acquire/release messages
ctx->mem_allocated_actors
// memory allocated for mutemap
+ ctx->mem_allocated;
}
#endif
/**
* Gets the current active scheduler count
*/
static uint32_t get_active_scheduler_count()
{
return atomic_load_explicit(&active_scheduler_count, memory_order_relaxed);
}
/**
* Gets the current active scheduler count check
*/
static uint32_t get_active_scheduler_count_check()
{
return atomic_load_explicit(&active_scheduler_count_check, memory_order_relaxed);
}
/**
* Gets the next actor from the scheduler queue.
*/
static pony_actor_t* pop(scheduler_t* sched)
{
return (pony_actor_t*)ponyint_mpmcq_pop(&sched->q);
}
/**
* Puts an actor on the scheduler queue.
*/
static void push(scheduler_t* sched, pony_actor_t* actor)
{
ponyint_mpmcq_push_single(&sched->q, actor);
}
/**
* Handles the global queue and then pops from the local queue
*/
static pony_actor_t* pop_global(scheduler_t* sched)
{
pony_actor_t* actor = (pony_actor_t*)ponyint_mpmcq_pop(&inject);
if(actor != NULL)
return actor;
if (sched == NULL)
return NULL;
else
return pop(sched);
}
/**
* Sends a message to a thread.
*/
static void send_msg(uint32_t from, uint32_t to, sched_msg_t msg, intptr_t arg)
{
pony_msgi_t* m = (pony_msgi_t*)pony_alloc_msg(
POOL_INDEX(sizeof(pony_msgi_t)), msg);
#ifdef USE_MEMTRACK_MESSAGES
this_scheduler->ctx.num_messages--;
this_scheduler->ctx.mem_used_messages += sizeof(pony_msgi_t);
this_scheduler->ctx.mem_used_messages -= POOL_ALLOC_SIZE(pony_msgi_t);
#endif
m->i = arg;
ponyint_thread_messageq_push(&scheduler[to].mq, &m->msg, &m->msg
#ifdef USE_DYNAMIC_TRACE
, from, to
#endif
);
(void)from;
}
static void send_msg_all_active(uint32_t from, sched_msg_t msg, intptr_t arg)
{
uint32_t current_active_scheduler_count = get_active_scheduler_count();
for(uint32_t i = 0; i < current_active_scheduler_count; i++)
send_msg(from, i, msg, arg);
}
static void send_msg_all(uint32_t from, sched_msg_t msg, intptr_t arg)
{
for(uint32_t i = 0; i < scheduler_count; i++)
send_msg(from, i, msg, arg);
}
static void signal_suspended_threads(uint32_t sched_count, int32_t curr_sched_id)
{
for(uint32_t i = 0; i < sched_count; i++)
{
if((int32_t)i != curr_sched_id)
ponyint_thread_wake(scheduler[i].tid, scheduler[i].sleep_object);
}
}
static void wake_suspended_threads(int32_t current_scheduler_id)
{
uint32_t current_active_scheduler_count = get_active_scheduler_count();
// wake up any sleeping threads
while ((current_active_scheduler_count = get_active_scheduler_count()) < scheduler_count)
{
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
// acquire mutex if using pthreads
if(!pthread_mutex_lock(&sched_mut))
#else
// get the bool that controls modifying the active scheduler count variable
// if using signals
if(!atomic_load_explicit(&scheduler_count_changing, memory_order_relaxed)
&& !atomic_exchange_explicit(&scheduler_count_changing, true,
memory_order_acquire))
#endif
{
// in case the count changed between the while check and now
current_active_scheduler_count = get_active_scheduler_count();
if(current_active_scheduler_count < scheduler_count)
{
// set active_scheduler_count to wake all schedulers
current_active_scheduler_count = scheduler_count;
atomic_store_explicit(&active_scheduler_count, current_active_scheduler_count,
memory_order_relaxed);
}
#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
// unlock the bool that controls modifying the active scheduler count
// variable if using signals.
atomic_store_explicit(&scheduler_count_changing, false,
memory_order_release);
#endif
// send signals to all scheduler threads that should be awake
// this is somewhat wasteful if a scheduler thread is already awake
// NOTE: this intentionally allows for the case where some scheduler
// threads might miss the signal and not wake up. That is handled in
// the following while loop
signal_suspended_threads(current_active_scheduler_count, current_scheduler_id);
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
// unlock mutex if using pthreads
pthread_mutex_unlock(&sched_mut);
#endif
}
// wait for sleeping threads to wake and update check variable
while (get_active_scheduler_count() != get_active_scheduler_count_check())
{
// send signals to all scheduler threads that should be awake
// this is somewhat wasteful if a scheduler thread is already awake
// but is necessary in case the signal to wake a thread was missed
// NOTE: this intentionally allows for the case where some scheduler
// threads might miss the signal and not wake up. That is handled in
// by a combination of the check variable and this while loop
signal_suspended_threads(current_active_scheduler_count, current_scheduler_id);
}
}
}
// start cnf/ack cycle for quiescence if block count >= active_scheduler_count
static void maybe_start_cnf_ack_cycle(scheduler_t* sched)
{
if(atomic_load_explicit(&detect_quiescence, memory_order_relaxed) &&
(sched->block_count >= get_active_scheduler_count()))
{
// reset ack token count to 0 because dynamic scheduler scaling means
// that a new thread can wake up changing active_scheduler_count and
// then block causing block_count >= active_scheduler_count for a
// second time and if we don't reset, we can think we've received
// enough acks when we really haven't
sched->ack_token++;
sched->ack_count = 0;
// If we think all threads are blocked, send CNF(token) to everyone.
send_msg_all_active(sched->index, SCHED_CNF, sched->ack_token);
}
}
// handle SCHED_BLOCK message
static void handle_sched_block(scheduler_t* sched)
{
sched->block_count++;
maybe_start_cnf_ack_cycle(sched);
}
// handle SCHED_UNBLOCK message
static void handle_sched_unblock(scheduler_t* sched)
{
// Cancel all acks and increment the ack token, so that any pending
// acks in the queue will be dropped when they are received.
sched->block_count--;
sched->ack_token++;
sched->ack_count = 0;
}
static bool read_msg(scheduler_t* sched)
{
pony_msgi_t* m;
bool run_queue_changed = false;
while((m = (pony_msgi_t*)ponyint_thread_messageq_pop(&sched->mq
#ifdef USE_DYNAMIC_TRACE
, sched->index
#endif
)) != NULL)
{
#ifdef USE_MEMTRACK_MESSAGES
sched->ctx.num_messages--;
sched->ctx.mem_used_messages -= sizeof(pony_msgi_t);
sched->ctx.mem_allocated_messages -= POOL_ALLOC_SIZE(pony_msgi_t);
#endif
switch(m->msg.id)
{
case SCHED_SUSPEND:
{
maybe_start_cnf_ack_cycle(sched);
break;
}
case SCHED_BLOCK:
{
handle_sched_block(sched);
break;
}
case SCHED_UNBLOCK:
{
handle_sched_unblock(sched);
break;
}
case SCHED_CNF:
{
// Echo the token back as ACK(token).
send_msg(sched->index, 0, SCHED_ACK, m->i);
break;
}
case SCHED_ACK:
{
// If it's the current token, increment the ack count.
if(m->i == sched->ack_token)
sched->ack_count++;
break;
}
case SCHED_TERMINATE:
{
sched->terminate = true;
break;
}
case SCHED_UNMUTE_ACTOR:
{
if (ponyint_sched_unmute_senders(&sched->ctx, (pony_actor_t*)m->i))
run_queue_changed = true;
break;
}
case SCHED_NOISY_ASIO:
{
// mark asio as being noisy
sched->asio_noisy = true;
break;
}
case SCHED_UNNOISY_ASIO:
{
// mark asio as not being noisy
sched->asio_noisy = false;
break;
}
default: {}
}
}
return run_queue_changed;
}
/**
* If we can terminate, return true. If all schedulers are waiting, one of
* them will stop the ASIO back end and tell the cycle detector to try to
* terminate.
*/
static bool quiescent(scheduler_t* sched, uint64_t tsc, uint64_t tsc2)
{
if(sched->terminate)
return true;
uint32_t current_active_scheduler_count = get_active_scheduler_count();
if(sched->ack_count >= current_active_scheduler_count)
{
// mark last cycle detector tsc as something huge to ensure
// cycle detector will not get triggered
// this is required to ensure scheduler queues are empty
// upon termination
uint64_t saved_last_cd_tsc = last_cd_tsc;
last_cd_tsc = -1;
if(sched->asio_stoppable && ponyint_asio_stop())
{
// successfully stopped ASIO thread
// tell all scheduler threads to terminate
send_msg_all(sched->index, SCHED_TERMINATE, 0);
wake_suspended_threads(sched->index);
sched->ack_token++;
sched->ack_count = 0;
} else if(ponyint_asio_stoppable()) {
sched->asio_stoppable = true;
sched->ack_token++;
sched->ack_count = 0;
// Run another CNF/ACK cycle.
send_msg_all_active(sched->index, SCHED_CNF, sched->ack_token);
// restore last cycle detector tsc to re-enable cycle detector
// triggering
last_cd_tsc = saved_last_cd_tsc;
} else {
// ASIO is not stoppable
sched->asio_stoppable = false;
// restore last cycle detector tsc to re-enable cycle detector
// triggering
last_cd_tsc = saved_last_cd_tsc;
}
}
ponyint_cpu_core_pause(tsc, tsc2, use_yield);
return false;
}
static scheduler_t* choose_victim(scheduler_t* sched)
{
scheduler_t* victim = sched->last_victim;
while(true)
{
// Schedulers are laid out sequentially in memory
// Back up one.
victim--;
uint32_t current_active_scheduler_count = get_active_scheduler_count();
if(victim < scheduler)
// victim is before the first scheduler location
// wrap around to the end.
victim = &scheduler[current_active_scheduler_count - 1];
if((victim == sched->last_victim) || (current_active_scheduler_count == 1))
{
// If we have tried all possible victims, return no victim. Set our last
// victim to ourself to indicate we've started over.
sched->last_victim = sched;
break;
}
// Don't try to steal from ourself.
if(victim == sched)
continue;
// Record that this is our victim and return it.
sched->last_victim = victim;
return victim;
}
return NULL;
}
/**
* Suspend this thread for some time, including no sleep at all if
* pop_global() can give us an actor immediately.
*
* WARNING: suspend_scheduler must be called in critical section
* protected by sched_mut/scheduler_count_changing,
* and we return with that mechanism:
* * Pthreads: locked, because pthread_thread_suspend() does
* not permit a choice to avoid reacquiring the mutex.
* * Non-Pthreads: unlocked, because after the suspend,
* we only need to reacquire in special case of
* sched->index==0.
*/
static pony_actor_t* suspend_scheduler(scheduler_t* sched,
uint32_t current_active_scheduler_count)
{
pony_actor_t* actor = NULL;
// decrement active_scheduler_count so other schedulers know we're
// sleeping
uint32_t sched_count = get_active_scheduler_count();
// make sure the scheduler count didn't change
// if it did, then another thread resumed and it may not be
// appropriate for us to suspend any longer, so don't suspend
if(sched_count != current_active_scheduler_count) {
#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
atomic_store_explicit(&scheduler_count_changing, false,
memory_order_release);
#endif
return actor;
}
atomic_store_explicit(&active_scheduler_count, sched_count - 1,
memory_order_relaxed);
// decrement active_scheduler_count_check
uint32_t sched_count_check = get_active_scheduler_count_check();
atomic_store_explicit(&active_scheduler_count_check, sched_count_check - 1,
memory_order_relaxed);
// ensure main active scheduler count and check variable match
pony_assert(sched_count == sched_count_check);
#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
// unlock the bool that controls modifying the active scheduler count
// variable if using signals
atomic_store_explicit(&scheduler_count_changing, false,
memory_order_release);
#endif
// let sched 0 know we're suspending only after decrementing
// active_scheduler_count to avoid a race condition between
// when we update active_scheduler_count and scheduler 0 processes
// the SCHED_SUSPEND message we send it. If we don't do this,
// and scheduler 0 processes the SCHED_SUSPEND message before we
// decrement active_scheduler_count, it could think that
// active_scheduler_count > block_count and not start the CNF/ACK
// process for termination and potentiall hang the runtime instead
// of allowing it to reach quiescence.
if(sched->index != 0)
send_msg(sched->index, 0, SCHED_SUSPEND, 0);
// dtrace suspend notification
DTRACE1(THREAD_SUSPEND, (uintptr_t)sched);
while(get_active_scheduler_count() <= (uint32_t)sched->index)
{
// if we're scheduler 0 with noisy actors check to make
// sure inject queue is empty to avoid race condition
// between thread 0 sleeping and the ASIO thread getting a
// new event
if(sched->index == 0)
{
actor = pop_global(NULL);
if(actor != NULL)
break;
if(read_msg(sched))
{
// An actor was unmuted and added to our run queue. Pop it and return.
// Effectively, we are "stealing" from ourselves. We need to verify that
// popping succeeded (actor != NULL) as some other scheduler might have
// stolen the newly scheduled actor from us already. Schedulers, what a
// bunch of thieving bastards!
actor = pop_global(sched);
if(actor != NULL)
break;
}
// if ASIO is no longer noisy due to reading a message from the ASIO
// thread
if(!sched->asio_noisy)
break;
}
// sleep waiting for signal to wake up again
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
ponyint_thread_suspend(sched->sleep_object, &sched_mut);
#else
ponyint_thread_suspend(sched->sleep_object);
#endif
}
// dtrace resume notification
DTRACE1(THREAD_RESUME, (uintptr_t)sched);
#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
// When using signals, need to acquire sched count changing variable
while (true)
{
// get the bool that controls modifying the active scheduler
// count variable if using signals
if(!atomic_load_explicit(&scheduler_count_changing, memory_order_relaxed)
&& !atomic_exchange_explicit(&scheduler_count_changing, true,
memory_order_acquire))
{
#endif
// get active_scheduler_count
sched_count = get_active_scheduler_count();
// make sure active_scheduler_count == 1 if it is 0
// and we're scheduler 0 with noisy actors
// and we just pulled an actor off the inject queue
// or the ASIO is no longer noisy
// and we broke out of the earlier loop before suspending
// to return the actor
if(sched_count == 0)
{
// set active_scheduler_count to 1
sched_count = 1;
atomic_store_explicit(&active_scheduler_count,
sched_count, memory_order_relaxed);
}
// increment active_scheduler_count_check
sched_count_check = get_active_scheduler_count_check();
atomic_store_explicit(&active_scheduler_count_check,
sched_count_check + 1, memory_order_relaxed);
// ensure main active scheduler count and check variable match
// pony_assert(sched_count == sched_count_check);
#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
// unlock the bool that controls modifying the active scheduler count
// variable if using signals
atomic_store_explicit(&scheduler_count_changing, false,
memory_order_release);
// break while loop
break;
}
}
#endif
return actor;
}
static pony_actor_t* perhaps_suspend_scheduler(
scheduler_t* sched, uint32_t current_active_scheduler_count,
bool* block_sent, uint32_t* steal_attempts, bool sched_is_blocked)
{
// if we're the highest active scheduler thread
// and there are more active schedulers than the minimum requested
// and we're not terminating
// and active scheduler count matchs the check variable indicating all
// threads that should be awake are awake
if ((current_active_scheduler_count > min_scheduler_count)
&& (sched == &scheduler[current_active_scheduler_count - 1])
&& (!sched->terminate)
&& (current_active_scheduler_count == get_active_scheduler_count_check())
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
// try to acquire mutex if using pthreads
&& !pthread_mutex_trylock(&sched_mut)
#else
// try and get the bool that controls modifying the active scheduler count
// variable if using signals
&& (!atomic_load_explicit(&scheduler_count_changing, memory_order_relaxed)
&& !atomic_exchange_explicit(&scheduler_count_changing, true,
memory_order_acquire))
#endif
)
{
pony_actor_t* actor = NULL;
// can only sleep if we're scheduler > 0 or if we're scheduler 0 and
// there is at least one noisy actor registered
if((sched->index > 0) || ((sched->index == 0) && sched->asio_noisy))
{
if (!sched_is_blocked)
{
// unblock before suspending to ensure cnf/ack cycle works as expected
if(sched->index == 0)
handle_sched_unblock(sched);
else
send_msg(sched->index, 0, SCHED_UNBLOCK, 0);
*block_sent = false;
}
actor = suspend_scheduler(sched, current_active_scheduler_count);
// reset steal_attempts so we try to steal from all other schedulers
// prior to suspending again
*steal_attempts = 0;
}
else
{
pony_assert(sched->index == 0);
pony_assert(!sched->asio_noisy);
#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
// suspend_scheduler() would have unlocked for us,
// but we didn't call it, so unlock now.
atomic_store_explicit(&scheduler_count_changing, false,
memory_order_release);
#endif
if (sched_is_blocked)
{
// send block message if there are no noisy actors registered
// with the ASIO thread and this is scheduler 0
handle_sched_block(sched);
*block_sent = true;
}
}
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
// unlock mutex if using pthreads
pthread_mutex_unlock(&sched_mut);
#endif
if(actor != NULL)
return actor;
}
return NULL;
}
/**
* Use mpmcqs to allow stealing directly from a victim, without waiting for a
* response.
*/
static pony_actor_t* steal(scheduler_t* sched)
{
bool block_sent = false;
uint32_t steal_attempts = 0;
uint64_t tsc = ponyint_cpu_tick();
pony_actor_t* actor;
scheduler_t* victim = NULL;
while(true)
{
victim = choose_victim(sched);
actor = pop_global(victim);
if(actor != NULL)
break;
uint64_t tsc2 = ponyint_cpu_tick();
if(read_msg(sched))
{
// An actor was unmuted and added to our run queue. Pop it and return.
// Effectively, we are "stealing" from ourselves. We need to verify that
// popping succeeded (actor != NULL) as some other scheduler might have
// stolen the newly scheduled actor from us already. Schedulers, what a
// bunch of thieving bastards!
actor = pop_global(sched);
if(actor != NULL)
break;
}
if(quiescent(sched, tsc, tsc2))
{
DTRACE2(WORK_STEAL_FAILURE, (uintptr_t)sched, (uintptr_t)victim);
return NULL;
}
// Determine if we are blocked.
//
// Note, "blocked" means we have no more work to do and we believe that we
// should check to see if we can terminate the program.
//
// To be blocked, we have to:
//
// 1. Not have any noisy actors registered with the ASIO thread/subsystem.
// If we have any noisy actors then, while we might not have any work
// to do, we aren't blocked. Blocked means we can't make forward
// progress and the program might be ready to terminate. Noisy actors
// means that no, the program isn't ready to terminate becuase one of
// noisy actors could receive a message from an external source (timer,
// network, etc).
// 2. Not have any muted actors. If we are holding any muted actors then,
// while we might not have any work to do, we aren't blocked. Blocked
// means we can't make forward progress and the program might be ready
// to terminate. Muted actors means that no, the program isn't ready
// to terminate.
// 3. We have attempted to steal from every other scheduler and failed to
// get any work. In the process of stealing from every other scheduler,
// we will have also tried getting work off the ASIO inject queue
// multiple times
// 4. We've been trying to steal for at least PONY_SCHED_BLOCK_THRESHOLD
// cycles (currently 1000000).
// In many work stealing scenarios, we immediately get steal an actor.
// Sending a block/unblock pair in that scenario is very wasteful.
// Same applies to other "quick" steal scenarios.
// 1 million cycles is roughly 1 millisecond, depending on clock speed.
// By waiting 1 millisecond before sending a block message, we are going to
// delay quiescence by a small amount of time but also optimize work
// stealing for generating far fewer block/unblock messages.
uint32_t current_active_scheduler_count = get_active_scheduler_count();
uint64_t clocks_elapsed = tsc2 - tsc;
if (!block_sent)
{
// make sure thread scaling order is still valid. we should never be
// active if the active_scheduler_count isn't larger than our index.
pony_assert(current_active_scheduler_count > (uint32_t)sched->index);
if (steal_attempts < current_active_scheduler_count)
{
steal_attempts++;
}
else if ((clocks_elapsed > PONY_SCHED_BLOCK_THRESHOLD) &&
(ponyint_mutemap_size(&sched->mute_mapping) == 0))
{
// only try and suspend if enough time has passed
if(clocks_elapsed > scheduler_suspend_threshold)
{
// in case active scheduler count changed
current_active_scheduler_count = get_active_scheduler_count();
actor = perhaps_suspend_scheduler(sched, current_active_scheduler_count,
&block_sent, &steal_attempts, true);
if (actor != NULL)
break;
}
if(!sched->asio_noisy)
{
// Only send block messages if there are no noisy actors registered
// with the ASIO thread
if(sched->index == 0)
handle_sched_block(sched);
else
send_msg(sched->index, 0, SCHED_BLOCK, 0);
block_sent = true;
}
}
}
else
{
// block sent and no work to do. We should try and suspend if we can now
// if we do suspend, we'll send a unblock message first to ensure cnf/ack
// cycle works as expected
// make sure thread scaling order is still valid. we should never be
// active if the active_scheduler_count isn't larger than our index.
pony_assert(current_active_scheduler_count > (uint32_t)sched->index);
// only try and suspend if enough time has passed
if(clocks_elapsed > scheduler_suspend_threshold)
{
actor = perhaps_suspend_scheduler(sched, current_active_scheduler_count,
&block_sent, &steal_attempts, false);
if (actor != NULL)
break;
}
}
// if we're scheduler 0 and cycle detection is enabled
if(!ponyint_actor_getnoblock() && (sched->index == 0))
{
// trigger cycle detector by sending it a message if it is time
uint64_t current_tsc = ponyint_cpu_tick();
if(ponyint_cycle_check_blocked(&sched->ctx, last_cd_tsc, current_tsc))
{
last_cd_tsc = current_tsc;
// cycle detector should now be on the queue
actor = pop_global(sched);
if(actor != NULL)
break;
}
}
}
if(block_sent)
{
// Only send unblock message if a corresponding block message was sent
if(sched->index == 0)
handle_sched_unblock(sched);
else
send_msg(sched->index, 0, SCHED_UNBLOCK, 0);
}
DTRACE3(WORK_STEAL_SUCCESSFUL, (uintptr_t)sched, (uintptr_t)victim, (uintptr_t)actor);
return actor;
}
/**
* Run a scheduler thread until termination.
*/
static void run(scheduler_t* sched)
{
if(sched->index == 0)
last_cd_tsc = 0;
pony_actor_t* actor = pop_global(sched);
if (DTRACE_ENABLED(ACTOR_SCHEDULED) && actor != NULL) {
DTRACE2(ACTOR_SCHEDULED, (uintptr_t)sched, (uintptr_t)actor);
}
while(true)
{
// if we're scheduler 0
if(sched->index == 0)
{
// if cycle detection is enabled
if(!ponyint_actor_getnoblock())
{
// trigger cycle detector by sending it a message if it is time
uint64_t current_tsc = ponyint_cpu_tick();
if(ponyint_cycle_check_blocked(&sched->ctx, last_cd_tsc, current_tsc))
{
last_cd_tsc = current_tsc;
// cycle detector should now be on the queue
if(actor == NULL)
actor = pop_global(sched);
}
}
uint32_t current_active_scheduler_count = get_active_scheduler_count();
uint32_t current_active_scheduler_count_check = get_active_scheduler_count_check();
// if not all threads that should be awake are awake due to a missed signal
if(current_active_scheduler_count != current_active_scheduler_count_check)
{
// send signals to all scheduler threads that should be awake
// this is somewhat wasteful if a scheduler thread is already awake
// but is necessary in case the signal to wake a thread was missed
signal_suspended_threads(current_active_scheduler_count, sched->index);
}
}
// In response to reading a message, we might have unmuted an actor and
// added it back to our queue. if we don't have an actor to run, we want
// to pop from our queue to check for a recently unmuted actor
if(read_msg(sched) && actor == NULL)
{
actor = pop_global(sched);
}
if(actor == NULL)
{
// We had an empty queue and no rescheduled actor.
actor = steal(sched);
if(actor == NULL)
{
// Termination.
pony_assert(pop(sched) == NULL);
return;
}
DTRACE2(ACTOR_SCHEDULED, (uintptr_t)sched, (uintptr_t)actor);
}
// We have at least one muted actor...
// Try and wake up a sleeping scheduler thread to help with the load.
// This is to err on the side of caution and wake up more threads in case
// of muted actors rather than potentially not wake up enough threads.
// If there isn't enough work, they'll go back to sleep.
// NOTE: This could result in a pathological case where only one thread
// has a muted actor but there is only one overloaded actor. In this case
// the extra scheduler threads would keep being woken up and then go back
// to sleep over and over again.
if(ponyint_mutemap_size(&sched->mute_mapping) > 0)
ponyint_sched_maybe_wakeup(sched->index);
// Run the current actor and get the next actor.
bool reschedule = ponyint_actor_run(&sched->ctx, actor, false);
pony_actor_t* next = pop_global(sched);
if(reschedule)
{
if(next != NULL)
{
// If we have a next actor, we go on the back of the queue. Otherwise,
// we continue to run this actor.
push(sched, actor);
DTRACE2(ACTOR_DESCHEDULED, (uintptr_t)sched, (uintptr_t)actor);
actor = next;
DTRACE2(ACTOR_SCHEDULED, (uintptr_t)sched, (uintptr_t)actor);
// We have at least two actors worth of work; the one we just finished
// running a batch for that needs to be rescheduled and the next one we
// just `pop_global`d. This indicates that there is enough work in
// theory to have another scheduler thread be woken up to do work in
// parallel.
// Try and wake up a sleeping scheduler thread to help with the load.
// If there isn't enough work, they'll go back to sleep.
ponyint_sched_maybe_wakeup(sched->index);
}
} else {
// We aren't rescheduling, so run the next actor. This may be NULL if our
// queue was empty.
DTRACE2(ACTOR_DESCHEDULED, (uintptr_t)sched, (uintptr_t)actor);
actor = next;
if (DTRACE_ENABLED(ACTOR_SCHEDULED) && actor != NULL) {
DTRACE2(ACTOR_SCHEDULED, (uintptr_t)sched, (uintptr_t)actor);
}
}
}
}
static DECLARE_THREAD_FN(run_thread)
{
scheduler_t* sched = (scheduler_t*) arg;
this_scheduler = sched;
ponyint_cpu_affinity(sched->cpu);
#if !defined(PLATFORM_IS_WINDOWS) && !defined(USE_SCHEDULER_SCALING_PTHREADS)
// Make sure we block signals related to scheduler sleeping/waking
// so they queue up to avoid race conditions
sigset_t set;
sigemptyset(&set);
sigaddset(&set, PONY_SCHED_SLEEP_WAKE_SIGNAL);
pthread_sigmask(SIG_BLOCK, &set, NULL);
#endif
run(sched);
ponyint_pool_thread_cleanup();
return 0;
}
static void ponyint_sched_shutdown()
{
uint32_t start;
start = 0;
for(uint32_t i = start; i < scheduler_count; i++)
ponyint_thread_join(scheduler[i].tid);
DTRACE0(RT_END);
ponyint_cycle_terminate(&scheduler[0].ctx);
for(uint32_t i = 0; i < scheduler_count; i++)
{
while(ponyint_thread_messageq_pop(&scheduler[i].mq
#ifdef USE_DYNAMIC_TRACE
, i
#endif
) != NULL) { ; }
ponyint_messageq_destroy(&scheduler[i].mq);
ponyint_mpmcq_destroy(&scheduler[i].q);
#if defined(PLATFORM_IS_WINDOWS)
// close wait event objects
CloseHandle(scheduler[i].sleep_object);
#elif defined(USE_SCHEDULER_SCALING_PTHREADS)
// destroy pthread condition object
pthread_cond_destroy(scheduler[i].sleep_object);
#ifdef USE_MEMTRACK
mem_used -= sizeof(pthread_cond_t);
mem_allocated -= POOL_ALLOC_SIZE(pthread_cond_t);
#endif
POOL_FREE(pthread_cond_t, scheduler[i].sleep_object);
// set sleep condition object to NULL
scheduler[i].sleep_object = NULL;
#endif
}
ponyint_pool_free_size(scheduler_count * sizeof(scheduler_t), scheduler);
#ifdef USE_MEMTRACK
mem_used -= (scheduler_count * sizeof(scheduler_t));
mem_allocated -= (ponyint_pool_used_size(scheduler_count
* sizeof(scheduler_t)));
#endif
scheduler = NULL;
scheduler_count = 0;
atomic_store_explicit(&active_scheduler_count, 0, memory_order_relaxed);
ponyint_mpmcq_destroy(&inject);
}
pony_ctx_t* ponyint_sched_init(uint32_t threads, bool noyield, bool nopin,
bool pinasio, uint32_t min_threads, uint32_t thread_suspend_threshold)
{
pony_register_thread();
use_yield = !noyield;
// if thread suspend threshold is less then 1, then ensure it is 1
if(thread_suspend_threshold < 1)
thread_suspend_threshold = 1;
// If no thread count is specified, use the available physical core count.
if(threads == 0)
threads = ponyint_cpu_count();
// If minimum thread count is > thread count, cap it at thread count
if(min_threads > threads)
min_threads = threads;
// convert to cycles for use with ponyint_cpu_tick()
// 1 second = 2000000000 cycles (approx.)
// based on same scale as ponyint_cpu_core_pause() uses
scheduler_suspend_threshold = thread_suspend_threshold * 1000000;
scheduler_count = threads;
min_scheduler_count = min_threads;
atomic_store_explicit(&active_scheduler_count, scheduler_count,
memory_order_relaxed);
atomic_store_explicit(&active_scheduler_count_check, scheduler_count,
memory_order_relaxed);
scheduler = (scheduler_t*)ponyint_pool_alloc_size(
scheduler_count * sizeof(scheduler_t));
#ifdef USE_MEMTRACK
mem_used += (scheduler_count * sizeof(scheduler_t));
mem_allocated += (ponyint_pool_used_size(scheduler_count
* sizeof(scheduler_t)));
#endif
memset(scheduler, 0, scheduler_count * sizeof(scheduler_t));
uint32_t asio_cpu = ponyint_cpu_assign(scheduler_count, scheduler, nopin,
pinasio);
#if !defined(PLATFORM_IS_WINDOWS) && defined(USE_SCHEDULER_SCALING_PTHREADS)
pthread_once(&sched_mut_once, sched_mut_init);
#endif
for(uint32_t i = 0; i < scheduler_count; i++)
{
#if defined(PLATFORM_IS_WINDOWS)
// create wait event objects
scheduler[i].sleep_object = CreateEvent(NULL, FALSE, FALSE, NULL);
#elif defined(USE_SCHEDULER_SCALING_PTHREADS)
// create pthread condition object
#ifdef USE_MEMTRACK
mem_used += sizeof(pthread_cond_t);
mem_allocated += POOL_ALLOC_SIZE(pthread_cond_t);
#endif
scheduler[i].sleep_object = POOL_ALLOC(pthread_cond_t);
int ret = pthread_cond_init(scheduler[i].sleep_object, NULL);
if(ret != 0)
{
// if it failed, set `sleep_object` to `NULL` for error
#ifdef USE_MEMTRACK
mem_used -= sizeof(pthread_cond_t);
mem_allocated -= POOL_ALLOC_SIZE(pthread_cond_t);
#endif
POOL_FREE(pthread_cond_t, scheduler[i].sleep_object);
scheduler[i].sleep_object = NULL;
}
#else
scheduler[i].sleep_object = PONY_SCHED_SLEEP_WAKE_SIGNAL;
#endif
scheduler[i].ctx.scheduler = &scheduler[i];
scheduler[i].last_victim = &scheduler[i];
scheduler[i].index = i;
scheduler[i].asio_noisy = false;
ponyint_messageq_init(&scheduler[i].mq);
ponyint_mpmcq_init(&scheduler[i].q);
}
ponyint_mpmcq_init(&inject);
ponyint_asio_init(asio_cpu);
return pony_ctx();
}
bool ponyint_sched_start(bool library)
{
pony_register_thread();
if(!ponyint_asio_start())
return false;
atomic_store_explicit(&detect_quiescence, !library, memory_order_relaxed);
DTRACE0(RT_START);
uint32_t start = 0;
for(uint32_t i = start; i < scheduler_count; i++)
{
#if defined(PLATFORM_IS_WINDOWS) || defined(USE_SCHEDULER_SCALING_PTHREADS)
// there was an error creating a wait event or a pthread condition object
if(scheduler[i].sleep_object == NULL)
return false;
#endif
if(!ponyint_thread_create(&scheduler[i].tid, run_thread, scheduler[i].cpu,
&scheduler[i]))
return false;
}
if(!library)
{
ponyint_sched_shutdown();
}
return true;
}
void ponyint_sched_stop()
{
atomic_store_explicit(&detect_quiescence, true, memory_order_relaxed);
ponyint_sched_shutdown();
}
void ponyint_sched_add(pony_ctx_t* ctx, pony_actor_t* actor)
{
if(ctx->scheduler != NULL)
{
// Add to the current scheduler thread.
push(ctx->scheduler, actor);
} else {
// Put on the shared mpmcq.
ponyint_mpmcq_push(&inject, actor);
}
}
uint32_t ponyint_sched_cores()
{
return scheduler_count;
}
uint32_t ponyint_active_sched_count()
{
return get_active_scheduler_count();
}
PONY_API void pony_register_thread()
{
if(this_scheduler != NULL)
return;
// Create a scheduler_t, even though we will only use the pony_ctx_t.
this_scheduler = POOL_ALLOC(scheduler_t);
memset(this_scheduler, 0, sizeof(scheduler_t));
this_scheduler->tid = ponyint_thread_self();
this_scheduler->index = -1;
}
PONY_API void pony_unregister_thread()
{
if(this_scheduler == NULL)
return;
POOL_FREE(scheduler_t, this_scheduler);
this_scheduler = NULL;
ponyint_pool_thread_cleanup();
}
PONY_API pony_ctx_t* pony_ctx()
{
pony_assert(this_scheduler != NULL);
return &this_scheduler->ctx;
}
// Tell all scheduler threads that asio is noisy
void ponyint_sched_noisy_asio(int32_t from)
{
send_msg_all(from, SCHED_NOISY_ASIO, 0);
}
// Tell all scheduler threads that asio is not noisy
void ponyint_sched_unnoisy_asio(int32_t from)
{
send_msg_all(from, SCHED_UNNOISY_ASIO, 0);
}
// Maybe wake up a scheduler thread if possible
void ponyint_sched_maybe_wakeup_if_all_asleep(int32_t current_scheduler_id)
{
uint32_t current_active_scheduler_count = get_active_scheduler_count();
// wake up threads if the current active count is 0
// keep trying until successful to avoid deadlock
while((current_active_scheduler_count = get_active_scheduler_count()) == 0)
{
ponyint_sched_maybe_wakeup(current_scheduler_id);
current_active_scheduler_count = get_active_scheduler_count();
if(current_active_scheduler_count >= 1)
{
// wait for sleeping threads to wake and update check variable
// to ensure that we have at least one fully awake scheduler thread
while (get_active_scheduler_count() != get_active_scheduler_count_check())
{
// send signals to all scheduler threads that should be awake
// this is somewhat wasteful if a scheduler thread is already awake
// but is necessary in case the signal to wake a thread was missed
// NOTE: this intentionally allows for the case where some scheduler
// threads might miss the signal and not wake up. That is handled in
// by a combination of the check variable and this while loop
signal_suspended_threads(current_active_scheduler_count, current_scheduler_id);
}
}
}
}
// Maybe wake up a scheduler thread if possible
void ponyint_sched_maybe_wakeup(int32_t current_scheduler_id)
{
uint32_t current_active_scheduler_count = get_active_scheduler_count();
// if we have some schedulers that are sleeping, wake one up
if((current_active_scheduler_count < scheduler_count) &&
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
// try to acquire mutex if using pthreads
!pthread_mutex_trylock(&sched_mut)
#else
// try and get the bool that controls modifying the active scheduler count
// variable if using signals
(!atomic_load_explicit(&scheduler_count_changing, memory_order_relaxed)
&& !atomic_exchange_explicit(&scheduler_count_changing, true,
memory_order_acquire))
#endif
)
{
// in case the count changed between the while check and now
current_active_scheduler_count = get_active_scheduler_count();
if(current_active_scheduler_count < scheduler_count)
{
// increment active_scheduler_count to wake a new scheduler up
current_active_scheduler_count++;
atomic_store_explicit(&active_scheduler_count, current_active_scheduler_count,
memory_order_relaxed);
}
#if !defined(USE_SCHEDULER_SCALING_PTHREADS)
// unlock the bool that controls modifying the active scheduler count
// variable if using signals.
atomic_store_explicit(&scheduler_count_changing, false,
memory_order_release);
#endif
// send signals to all scheduler threads that should be awake
// this is somewhat wasteful if a scheduler thread is already awake
// NOTE: this intentionally allows for the case where some scheduler
// threads might miss the signal and not wake up. That is handled as
// part of the beginning of the `run` loop and the while loop in
// ponyint_sched_maybe_wakeup_if_all_asleep
signal_suspended_threads(current_active_scheduler_count, current_scheduler_id);
#if defined(USE_SCHEDULER_SCALING_PTHREADS)
// unlock mutex if using pthreads
pthread_mutex_unlock(&sched_mut);
#endif
}
}
// Manage a scheduler's mute map
//
// When an actor attempts to send to an overloaded actor, it will be added
// to the mute map for this scheduler. The mute map is in the form of:
//
// overloaded receiving actor => [sending actors]
//
// - A given actor will only existing as a sending actor in the map for
// a given scheduler.
// - Receiving actors can exist as a mute map key in the mute map of more
// than one scheduler
//
// Because muted sending actors only exist in a single scheduler's mute map
// and because they aren't scheduled when they are muted, any manipulation
// that we do on their state (for example incrementing or decrementing their
// mute count) is thread safe as only a single scheduler thread will be
// accessing the information.
void ponyint_sched_mute(pony_ctx_t* ctx, pony_actor_t* sender, pony_actor_t* recv)
{
pony_assert(sender != recv);
scheduler_t* sched = ctx->scheduler;
size_t index = HASHMAP_UNKNOWN;
muteref_t key;
key.key = recv;
muteref_t* mref = ponyint_mutemap_get(&sched->mute_mapping, &key, &index);
if(mref == NULL)
{
mref = ponyint_muteref_alloc(recv);
#ifdef USE_MEMTRACK
ctx->mem_used += sizeof(muteref_t);
ctx->mem_allocated += POOL_ALLOC_SIZE(muteref_t);
int64_t old_mmap_mem_size = ponyint_mutemap_mem_size(&sched->mute_mapping);
int64_t old_mmap_alloc_size =
ponyint_mutemap_alloc_size(&sched->mute_mapping);
#endif
ponyint_mutemap_putindex(&sched->mute_mapping, mref, index);
#ifdef USE_MEMTRACK
int64_t new_mmap_mem_size = ponyint_mutemap_mem_size(&sched->mute_mapping);
int64_t new_mmap_alloc_size =
ponyint_mutemap_alloc_size(&sched->mute_mapping);
ctx->mem_used += (new_mmap_mem_size - old_mmap_mem_size);
ctx->mem_allocated += (new_mmap_alloc_size - old_mmap_alloc_size);
#endif
}
size_t index2 = HASHMAP_UNKNOWN;
pony_actor_t* r = ponyint_muteset_get(&mref->value, sender, &index2);
if(r == NULL)
{
// This is safe because an actor can only ever be in a single scheduler's
// mutemap
#ifdef USE_MEMTRACK
int64_t old_mset_mem_size = ponyint_muteset_mem_size(&mref->value);
int64_t old_mset_alloc_size = ponyint_muteset_alloc_size(&mref->value);
#endif
ponyint_muteset_putindex(&mref->value, sender, index2);
atomic_fetch_add_explicit(&sender->muted, 1, memory_order_relaxed);
#ifdef USE_MEMTRACK
int64_t new_mset_mem_size = ponyint_muteset_mem_size(&mref->value);
int64_t new_mset_alloc_size = ponyint_muteset_alloc_size(&mref->value);
ctx->mem_used += (new_mset_mem_size - old_mset_mem_size);
ctx->mem_allocated += (new_mset_alloc_size - old_mset_alloc_size);
pony_assert(ctx->mem_used >= 0);
pony_assert(ctx->mem_allocated >= 0);
#endif
}
#ifdef USE_MEMTRACK
pony_assert(ctx->mem_used ==
(int64_t)ponyint_mutemap_total_mem_size(&sched->mute_mapping));
pony_assert(ctx->mem_allocated ==
(int64_t)ponyint_mutemap_total_alloc_size(&sched->mute_mapping));
#endif
}
void ponyint_sched_start_global_unmute(uint32_t from, pony_actor_t* actor)
{
send_msg_all_active(from, SCHED_UNMUTE_ACTOR, (intptr_t)actor);
}
DECLARE_STACK(ponyint_actorstack, actorstack_t, pony_actor_t);
DEFINE_STACK(ponyint_actorstack, actorstack_t, pony_actor_t);
bool ponyint_sched_unmute_senders(pony_ctx_t* ctx, pony_actor_t* actor)
{
size_t actors_rescheduled = 0;
scheduler_t* sched = ctx->scheduler;
size_t index = HASHMAP_UNKNOWN;
muteref_t key;
key.key = actor;
muteref_t* mref = ponyint_mutemap_get(&sched->mute_mapping, &key, &index);
if(mref != NULL)
{
size_t i = HASHMAP_UNKNOWN;
pony_actor_t* muted = NULL;
actorstack_t* needs_unmuting = NULL;
#ifdef USE_MEMTRACK
ctx->mem_used -= sizeof(muteref_t);
ctx->mem_allocated -= POOL_ALLOC_SIZE(muteref_t);
ctx->mem_used -= ponyint_muteset_mem_size(&mref->value);
ctx->mem_allocated -= ponyint_muteset_alloc_size(&mref->value);
pony_assert(ctx->mem_used >= 0);
pony_assert(ctx->mem_allocated >= 0);
#endif
// Find and collect any actors that need to be unmuted
while((muted = ponyint_muteset_next(&mref->value, &i)) != NULL)
{
// This is safe because an actor can only ever be in a single scheduler's
// mutemap
size_t muted_count = atomic_fetch_sub_explicit(&muted->muted, 1, memory_order_relaxed);
pony_assert(muted_count > 0);
// If muted_count used to be 1 before we decremented it; then the actor
// is longer muted
if(muted_count == 1)
{
needs_unmuting = ponyint_actorstack_push(needs_unmuting, muted);
}
}
ponyint_mutemap_removeindex(&sched->mute_mapping, index);
ponyint_muteref_free(mref);
// Unmute any actors that need to be unmuted
pony_actor_t* to_unmute;
while(needs_unmuting != NULL)
{
needs_unmuting = ponyint_actorstack_pop(needs_unmuting, &to_unmute);
if(!has_flag(to_unmute, FLAG_UNSCHEDULED))
{
ponyint_unmute_actor(to_unmute);
// TODO: we don't want to reschedule if our queue is empty.
// That's wasteful.
ponyint_sched_add(ctx, to_unmute);
DTRACE2(ACTOR_SCHEDULED, (uintptr_t)sched, (uintptr_t)to_unmute);
actors_rescheduled++;
}
ponyint_sched_start_global_unmute(ctx->scheduler->index, to_unmute);
}
}
#ifdef USE_MEMTRACK
pony_assert(ctx->mem_used ==
(int64_t)ponyint_mutemap_total_mem_size(&sched->mute_mapping));
pony_assert(ctx->mem_allocated ==
(int64_t)ponyint_mutemap_total_alloc_size(&sched->mute_mapping));
#endif
return actors_rescheduled > 0;
}
// Return the scheduler's index
PONY_API int32_t pony_sched_index(pony_ctx_t* ctx)
{
return ctx->scheduler->index;
}
| 1 | 13,880 | Should we remove this entirely now then? | ponylang-ponyc | c |
@@ -0,0 +1,17 @@
+package monitor
+
+import (
+ "github.com/weaveworks/weave/net/address"
+)
+
+// Monitor is an interface for tracking changes in ring allocations.
+type Monitor interface {
+ // HandleUpdate is called whenever an address ring gets updated.
+ //
+ // prevRanges corresponds to ranges which were owned by a peer before
+ // a change in the ring, while currRanges to the ones which are currently
+ // owned by the peer.
+ HandleUpdate(prevRanges, currRanges []address.Range) error
+ // String returns a user-friendly name of the monitor.
+ String() string
+} | 1 | 1 | 12,733 | That is way too generic a name. | weaveworks-weave | go |
|
@@ -49,4 +49,11 @@ describe('duplicate-id', function () {
assert.isTrue(checks['duplicate-id'].evaluate.call(checkContext, node));
});
+ it('should allow overwrote ids', function () {
+ fixture.innerHTML = '<form data-testelm="1" id="target"><input name="id"></form>';
+ var node = fixture.querySelector('[data-testelm="1"]');
+
+ assert.isTrue(checks['duplicate-id'].evaluate.call(checkContext, node));
+ });
+
}); | 1 | describe('duplicate-id', function () {
'use strict';
var fixture = document.getElementById('fixture');
var checkContext = {
_relatedNodes: [],
_data: null,
data: function (d) {
this._data = d;
},
relatedNodes: function (rn) {
this._relatedNodes = rn;
}
};
afterEach(function () {
fixture.innerHTML = '';
checkContext._relatedNodes = [];
checkContext._data = null;
});
it('should return true if there is only one element with an ID', function () {
fixture.innerHTML = '<div id="target"></div>';
var node = fixture.querySelector('#target');
assert.isTrue(checks['duplicate-id'].evaluate.call(checkContext, node));
assert.equal(checkContext._data, node.id);
assert.deepEqual(checkContext._relatedNodes, []);
});
it('should return false if there are multiple elements with an ID', function () {
fixture.innerHTML = '<div id="target"></div><div id="target"></div>';
var node = fixture.querySelector('#target');
assert.isFalse(checks['duplicate-id'].evaluate.call(checkContext, node));
assert.equal(checkContext._data, node.id);
assert.deepEqual(checkContext._relatedNodes, [node.nextSibling]);
});
it('should return remove duplicates', function () {
assert.deepEqual(checks['duplicate-id'].after([{data: 'a'}, {data: 'b'}, {data: 'b'}]), [{data: 'a'}, {data: 'b'}]);
});
it('should ignore empty ids', function () {
fixture.innerHTML = '<div data-testelm="1" id=""></div><div data-testelm="2" id=""></div>';
var node = fixture.querySelector('[data-testelm="1"]');
assert.isTrue(checks['duplicate-id'].evaluate.call(checkContext, node));
});
});
| 1 | 11,201 | This might be overkill for test code...but as stewards of accessibility it would be appropriate to have a label in the fixture. | dequelabs-axe-core | js |
@@ -232,7 +232,16 @@ TEST_F(VkBestPracticesLayerTest, CmdClearAttachmentTest) {
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
- m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
+
+ auto* secondary_full_clear = new VkCommandBufferObj(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ auto* secondary_small_clear = new VkCommandBufferObj(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ VkCommandBufferBeginInfo begin_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
+ begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT |
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
+ VkCommandBufferInheritanceInfo inherit_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO };
+ begin_info.pInheritanceInfo = &inherit_info;
+ inherit_info.subpass = 0;
+ inherit_info.renderPass = m_renderPassBeginInfo.renderPass;
// Main thing we care about for this test is that the VkImage obj we're
// clearing matches Color Attachment of FB | 1 | /*
* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (c) 2015-2021 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Camden Stocker <camden@lunarg.com>
*/
#include "cast_utils.h"
#include "layer_validation_tests.h"
#include "best_practices_error_enums.h"
void VkBestPracticesLayerTest::InitBestPracticesFramework() {
// Enable all vendor-specific checks
VkLayerSettingValueDataEXT bp_setting_string_value{};
bp_setting_string_value.arrayString.pCharArray = "VALIDATION_CHECK_ENABLE_VENDOR_SPECIFIC_ALL";
bp_setting_string_value.arrayString.count = sizeof(bp_setting_string_value.arrayString.pCharArray);
VkLayerSettingValueEXT bp_vendor_all_setting_val = {"enables", VK_LAYER_SETTING_VALUE_TYPE_STRING_ARRAY_EXT,
bp_setting_string_value};
VkLayerSettingsEXT bp_settings{static_cast<VkStructureType>(VK_STRUCTURE_TYPE_INSTANCE_LAYER_SETTINGS_EXT), nullptr, 1,
&bp_vendor_all_setting_val};
features_.pNext = &bp_settings;
InitFramework(m_errorMonitor, &features_);
}
TEST_F(VkBestPracticesLayerTest, ValidateReturnCodes) {
uint32_t version = SetTargetApiVersion(VK_API_VERSION_1_2);
if (version < VK_API_VERSION_1_1) {
printf("%s At least Vulkan version 1.2 is required, skipping test.\n", kSkipPrefix);
return;
}
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitBestPracticesFramework());
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
if (!InitSwapchain()) {
printf("%s Cannot create surface or swapchain, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
// Attempt to force an invalid return code for an unsupported format
VkImageFormatProperties2 image_format_prop = {};
image_format_prop.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
VkPhysicalDeviceImageFormatInfo2 image_format_info = {};
image_format_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
image_format_info.format = VK_FORMAT_R32G32B32_SFLOAT;
image_format_info.tiling = VK_IMAGE_TILING_LINEAR;
image_format_info.type = VK_IMAGE_TYPE_3D;
image_format_info.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
VkResult result = vk::GetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &image_format_info, &image_format_prop);
// Only run this test if this super-wierd format is not supported
if (VK_SUCCESS != result) {
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-Error-Result");
vk::GetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &image_format_info, &image_format_prop);
m_errorMonitor->VerifyFound();
}
if (IsPlatform(kMockICD) || DeviceSimulation()) {
printf("%s Test not supported by MockICD, skipping test case.\n", kSkipPrefix);
return;
}
// Force a non-success success code by only asking for a subset of query results
uint32_t format_count;
std::vector<VkSurfaceFormatKHR> formats;
result = vk::GetPhysicalDeviceSurfaceFormatsKHR(gpu(), m_surface, &format_count, NULL);
if (result != VK_SUCCESS || format_count <= 1) {
printf("%s test requires 2 or more extensions available, skipping test.\n", kSkipPrefix);
return;
}
format_count -= 1;
formats.resize(format_count);
m_errorMonitor->SetDesiredFailureMsg(kInformationBit, "UNASSIGNED-BestPractices-NonSuccess-Result");
result = vk::GetPhysicalDeviceSurfaceFormatsKHR(gpu(), m_surface, &format_count, formats.data());
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, UseDeprecatedInstanceExtensions) {
TEST_DESCRIPTION("Create an instance with a deprecated extension.");
uint32_t version = SetTargetApiVersion(VK_API_VERSION_1_1);
if (version < VK_API_VERSION_1_1) {
printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix);
return;
}
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find %s extension, skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitBestPracticesFramework());
// Create a 1.1 vulkan instance and request an extension promoted to core in 1.1
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-vkCreateInstance-deprecated-extension");
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-vkCreateInstance-specialuse-extension");
VkInstance dummy;
auto features = features_;
auto ici = GetInstanceCreateInfo();
features.pNext = ici.pNext;
ici.pNext = &features;
vk::CreateInstance(&ici, nullptr, &dummy);
m_errorMonitor->VerifyFound();
// Create a 1.0 vulkan instance and request an extension promoted to core in 1.1
m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT);
m_errorMonitor->SetUnexpectedError("UNASSIGNED-khronos-Validation-debug-build-warning-message");
VkApplicationInfo* new_info = new VkApplicationInfo;
new_info->apiVersion = VK_API_VERSION_1_0;
new_info->pApplicationName = ici.pApplicationInfo->pApplicationName;
new_info->applicationVersion = ici.pApplicationInfo->applicationVersion;
new_info->pEngineName = ici.pApplicationInfo->pEngineName;
new_info->engineVersion = ici.pApplicationInfo->engineVersion;
ici.pApplicationInfo = new_info;
vk::CreateInstance(&ici, nullptr, &dummy);
vk::DestroyInstance(dummy, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkBestPracticesLayerTest, UseDeprecatedDeviceExtensions) {
TEST_DESCRIPTION("Create a device with a deprecated extension.");
uint32_t version = SetTargetApiVersion(VK_API_VERSION_1_2);
if (version < VK_API_VERSION_1_2) {
printf("%s At least Vulkan version 1.2 is required, skipping test.\n", kSkipPrefix);
return;
}
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find %s extension, skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitBestPracticesFramework());
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s At least Vulkan version 1.2 is required for device, skipping test\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
return;
}
VkDevice local_device;
VkDeviceCreateInfo dev_info = {};
VkDeviceQueueCreateInfo queue_info = {};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = NULL;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = nullptr;
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.pNext = nullptr;
dev_info.queueCreateInfoCount = 1;
dev_info.pQueueCreateInfos = &queue_info;
dev_info.enabledLayerCount = 0;
dev_info.ppEnabledLayerNames = NULL;
dev_info.enabledExtensionCount = m_device_extension_names.size();
dev_info.ppEnabledExtensionNames = m_device_extension_names.data();
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-vkCreateDevice-deprecated-extension");
vk::CreateDevice(this->gpu(), &dev_info, NULL, &local_device);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, SpecialUseExtensions) {
TEST_DESCRIPTION("Create a device with a 'specialuse' extension.");
ASSERT_NO_FATAL_FAILURE(InitBestPracticesFramework());
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping test\n", kSkipPrefix, VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME);
return;
}
VkDevice local_device;
VkDeviceCreateInfo dev_info = {};
VkDeviceQueueCreateInfo queue_info = {};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = NULL;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = nullptr;
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.pNext = nullptr;
dev_info.queueCreateInfoCount = 1;
dev_info.pQueueCreateInfos = &queue_info;
dev_info.enabledLayerCount = 0;
dev_info.ppEnabledLayerNames = NULL;
dev_info.enabledExtensionCount = m_device_extension_names.size();
dev_info.ppEnabledExtensionNames = m_device_extension_names.data();
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-vkCreateDevice-specialuse-extension");
vk::CreateDevice(this->gpu(), &dev_info, NULL, &local_device);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, CmdClearAttachmentTest) {
TEST_DESCRIPTION("Test for validating usage of vkCmdClearAttachments");
InitBestPracticesFramework();
InitState();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Main thing we care about for this test is that the VkImage obj we're
// clearing matches Color Attachment of FB
// Also pass down other dummy params to keep driver and paramchecker happy
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 1.0;
color_attachment.clearValue.color.float32[1] = 1.0;
color_attachment.clearValue.color.float32[2] = 1.0;
color_attachment.clearValue.color.float32[3] = 1.0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1};
// Call for full-sized FB Color attachment prior to issuing a Draw
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit, "UNASSIGNED-BestPractices-DrawState-ClearCmdBeforeDraw");
vk::CmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, VtxBufferBadIndex) {
InitBestPracticesFramework();
InitState();
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit, "UNASSIGNED-BestPractices-DrawState-VtxIndexOutOfBounds");
// This test may also trigger other warnings
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkAllocateMemory-small-allocation");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkBindMemory-small-dedicated-allocation");
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {};
pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci.pNext = NULL;
pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
pipe_ms_state_ci.sampleShadingEnable = 0;
pipe_ms_state_ci.minSampleShading = 1.0;
pipe_ms_state_ci.pSampleMask = NULL;
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.pipe_ms_state_ci_ = pipe_ms_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
// Don't care about actual data, just need to get to draw to flag error
const float vbo_data[3] = {1.f, 0.f, 1.f};
VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void*)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)0, 1); // VBO idx 1, but no VBO in PSO
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
// This is a positive test. No failures are expected.
TEST_F(VkBestPracticesLayerTest, TestDestroyFreeNullHandles) {
VkResult err;
TEST_DESCRIPTION("Call all applicable destroy and free routines with NULL handles, expecting no validation errors");
InitBestPracticesFramework();
InitState();
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->ExpectSuccess();
vk::DestroyBuffer(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyBufferView(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyCommandPool(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyDescriptorPool(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyDescriptorSetLayout(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyDevice(VK_NULL_HANDLE, NULL);
vk::DestroyEvent(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyFence(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyFramebuffer(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyImage(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyImageView(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyInstance(VK_NULL_HANDLE, NULL);
vk::DestroyPipeline(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyPipelineCache(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyPipelineLayout(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyQueryPool(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyRenderPass(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroySampler(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroySemaphore(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyShaderModule(m_device->device(), VK_NULL_HANDLE, NULL);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffers[3] = {};
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffers[1]);
vk::FreeCommandBuffers(m_device->device(), command_pool, 3, command_buffers);
vk::DestroyCommandPool(m_device->device(), command_pool, NULL);
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 2;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptor_sets[3] = {};
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_sets[1]);
ASSERT_VK_SUCCESS(err);
vk::FreeDescriptorSets(m_device->device(), ds_pool, 3, descriptor_sets);
vk::DestroyDescriptorPool(m_device->device(), ds_pool, NULL);
vk::FreeMemory(m_device->device(), VK_NULL_HANDLE, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkBestPracticesLayerTest, CommandBufferReset) {
TEST_DESCRIPTION("Test for validating usage of vkCreateCommandPool with COMMAND_BUFFER_RESET_BIT");
InitBestPracticesFramework();
InitState();
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit,
"UNASSIGNED-BestPractices-vkCreateCommandPool-command-buffer-reset");
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, SimultaneousUse) {
TEST_DESCRIPTION("Test for validating usage of vkBeginCommandBuffer with SIMULTANEOUS_USE");
InitBestPracticesFramework();
InitState();
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit, "UNASSIGNED-BestPractices-vkBeginCommandBuffer-simultaneous-use");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkBeginCommandBuffer-one-time-submit");
VkCommandBufferBeginInfo cmd_begin_info{};
cmd_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_begin_info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_begin_info);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, SmallAllocation) {
TEST_DESCRIPTION("Test for small memory allocations");
InitBestPracticesFramework();
InitState();
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit, "UNASSIGNED-BestPractices-vkAllocateMemory-small-allocation");
// Find appropriate memory type for given reqs
VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
VkPhysicalDeviceMemoryProperties dev_mem_props = m_device->phy().memory_properties();
uint32_t mem_type_index = 0;
for (mem_type_index = 0; mem_type_index < dev_mem_props.memoryTypeCount; ++mem_type_index) {
if (mem_props == (mem_props & dev_mem_props.memoryTypes[mem_type_index].propertyFlags)) break;
}
EXPECT_LT(mem_type_index, dev_mem_props.memoryTypeCount) << "Could not find a suitable memory type.";
const uint32_t kSmallAllocationSize = 1024;
VkMemoryAllocateInfo alloc_info{};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = kSmallAllocationSize;
alloc_info.memoryTypeIndex = mem_type_index;
VkDeviceMemory memory;
vk::AllocateMemory(m_device->device(), &alloc_info, nullptr, &memory);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, SmallDedicatedAllocation) {
TEST_DESCRIPTION("Test for small dedicated memory allocations");
InitBestPracticesFramework();
InitState();
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit,
"UNASSIGNED-BestPractices-vkBindMemory-small-dedicated-allocation");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkAllocateMemory-small-allocation");
VkImageCreateInfo image_info{};
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info.extent = {64, 64, 1};
image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_info.imageType = VK_IMAGE_TYPE_2D;
image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_info.arrayLayers = 1;
image_info.mipLevels = 1;
// Create a small image with a dedicated allocation
VkImageObj image(m_device);
image.init_no_mem(*m_device, image_info);
vk_testing::DeviceMemory mem;
mem.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(),
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT));
vk::BindImageMemory(device(), image.handle(), mem.handle(), 0);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, MSImageRequiresMemory) {
TEST_DESCRIPTION("Test for MS image that requires memory");
InitBestPracticesFramework();
InitState();
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit,
"UNASSIGNED-BestPractices-vkCreateRenderPass-image-requires-memory");
VkAttachmentDescription attachment{};
attachment.samples = VK_SAMPLE_COUNT_4_BIT;
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
VkRenderPassCreateInfo rp_info{};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = 1;
rp_info.pAttachments = &attachment;
VkRenderPass rp;
vk::CreateRenderPass(m_device->device(), &rp_info, nullptr, &rp);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, AttachmentShouldNotBeTransient) {
TEST_DESCRIPTION("Test for non-lazy multisampled images");
InitBestPracticesFramework();
InitState();
if (IsPlatform(kPixel2XL) || IsPlatform(kPixel3) || IsPlatform(kPixel3aXL) || IsPlatform(kShieldTV) || IsPlatform(kShieldTVb) ||
IsPlatform(kNexusPlayer)) {
printf("%s This test seems super-picky on Android platforms\n", kSkipPrefix);
return;
}
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit,
"UNASSIGNED-BestPractices-vkCreateFramebuffer-attachment-should-not-be-transient");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkAllocateMemory-small-allocation");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkBindMemory-small-dedicated-allocation");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkBindImageMemory-non-lazy-transient-image");
VkAttachmentDescription attachment{};
attachment.samples = VK_SAMPLE_COUNT_1_BIT;
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
VkRenderPassCreateInfo rp_info{};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = 1;
rp_info.pAttachments = &attachment;
VkRenderPass rp = VK_NULL_HANDLE;
vk::CreateRenderPass(m_device->device(), &rp_info, nullptr, &rp);
VkImageCreateInfo image_info{};
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info.extent = {1920, 1080, 1};
image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_info.imageType = VK_IMAGE_TYPE_2D;
image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_info.arrayLayers = 1;
image_info.mipLevels = 1;
VkImageObj image(m_device);
image.init(&image_info);
VkImageViewCreateInfo iv_info{};
iv_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
iv_info.format = VK_FORMAT_R8G8B8A8_UNORM;
iv_info.image = image.handle();
iv_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
iv_info.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
iv_info.components = {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
VkImageView image_view = VK_NULL_HANDLE;
vk::CreateImageView(m_device->device(), &iv_info, nullptr, &image_view);
VkFramebufferCreateInfo fb_info{};
fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fb_info.renderPass = rp;
fb_info.layers = 1;
fb_info.width = 1920;
fb_info.height = 1080;
fb_info.attachmentCount = 1;
fb_info.pAttachments = &image_view;
VkFramebuffer fb = VK_NULL_HANDLE;
vk::CreateFramebuffer(m_device->device(), &fb_info, nullptr, &fb);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, TooManyInstancedVertexBuffers) {
TEST_DESCRIPTION("Test for too many instanced vertex buffers");
InitBestPracticesFramework();
InitState();
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit,
"UNASSIGNED-BestPractices-vkCreateGraphicsPipelines-too-many-instanced-vertex-buffers");
// This test may also trigger the small allocation warnings
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkAllocateMemory-small-allocation");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkBindMemory-small-dedicated-allocation");
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::vector<VkVertexInputBindingDescription> bindings(2, VkVertexInputBindingDescription{});
std::vector<VkVertexInputAttributeDescription> attributes(2, VkVertexInputAttributeDescription{});
bindings[0].binding = 0;
bindings[0].stride = 4;
bindings[0].inputRate = VK_VERTEX_INPUT_RATE_INSTANCE;
attributes[0].binding = 0;
bindings[1].binding = 1;
bindings[1].stride = 8;
bindings[1].inputRate = VK_VERTEX_INPUT_RATE_INSTANCE;
attributes[1].binding = 1;
VkPipelineVertexInputStateCreateInfo vi_state_ci{};
vi_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vi_state_ci.vertexBindingDescriptionCount = static_cast<uint32_t>(bindings.size());
vi_state_ci.pVertexBindingDescriptions = bindings.data();
vi_state_ci.vertexAttributeDescriptionCount = static_cast<uint32_t>(attributes.size());
vi_state_ci.pVertexAttributeDescriptions = attributes.data();
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.vi_ci_ = vi_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, ClearAttachmentsAfterLoad) {
TEST_DESCRIPTION("Test for clearing attachments after load");
InitBestPracticesFramework();
InitState();
m_clear_via_load_op = false; // Force LOAD_OP_LOAD
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit, "UNASSIGNED-BestPractices-vkCmdClearAttachments-clear-after-load");
// On tiled renderers, this can also trigger a warning about LOAD_OP_LOAD causing a readback
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-vkCmdBeginRenderPass-attachment-needs-readback");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-DrawState-ClearCmdBeforeDraw");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-RenderPass-redundant-store");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-RenderPass-redundant-clear");
m_errorMonitor->SetAllowedFailureMsg("UNASSIGNED-BestPractices-RenderPass-inefficient-clear");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 1.0;
color_attachment.clearValue.color.float32[1] = 1.0;
color_attachment.clearValue.color.float32[2] = 1.0;
color_attachment.clearValue.color.float32[3] = 1.0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1};
vk::CmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, TripleBufferingTest) {
TEST_DESCRIPTION("Test for usage of triple buffering");
AddSurfaceInstanceExtension();
InitBestPracticesFramework();
AddSwapchainDeviceExtension();
InitState();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"UNASSIGNED-BestPractices-vkCreateSwapchainKHR-suboptimal-swapchain-image-count");
if (!InitSurface()) {
printf("%s Cannot create surface, skipping test\n", kSkipPrefix);
return;
}
InitSwapchainInfo();
VkBool32 supported;
vk::GetPhysicalDeviceSurfaceSupportKHR(gpu(), m_device->graphics_queue_node_index_, m_surface, &supported);
if (!supported) {
printf("%s Graphics queue does not support present, skipping test\n", kSkipPrefix);
return;
}
VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
VkSurfaceTransformFlagBitsKHR preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
VkSwapchainCreateInfoKHR swapchain_create_info = {};
swapchain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swapchain_create_info.pNext = 0;
swapchain_create_info.surface = m_surface;
swapchain_create_info.minImageCount = 2;
swapchain_create_info.imageFormat = m_surface_formats[0].format;
swapchain_create_info.imageColorSpace = m_surface_formats[0].colorSpace;
swapchain_create_info.imageExtent = {m_surface_capabilities.minImageExtent.width, m_surface_capabilities.minImageExtent.height};
swapchain_create_info.imageArrayLayers = 1;
swapchain_create_info.imageUsage = imageUsage;
swapchain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
swapchain_create_info.preTransform = preTransform;
swapchain_create_info.compositeAlpha = m_surface_composite_alpha;
swapchain_create_info.presentMode = m_surface_present_modes[0];
swapchain_create_info.clipped = VK_FALSE;
swapchain_create_info.oldSwapchain = 0;
VkResult err = vk::CreateSwapchainKHR(device(), &swapchain_create_info, nullptr, &m_swapchain);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"UNASSIGNED-BestPractices-vkCreateSwapchainKHR-suboptimal-swapchain-image-count");
swapchain_create_info.minImageCount = 3;
err = vk::CreateSwapchainKHR(device(), &swapchain_create_info, nullptr, &m_swapchain);
m_errorMonitor->VerifyNotFound();
ASSERT_VK_SUCCESS(err)
DestroySwapchain();
}
TEST_F(VkBestPracticesLayerTest, SwapchainCreationTest) {
TEST_DESCRIPTION("Test for correct swapchain creation");
AddSurfaceInstanceExtension();
InitBestPracticesFramework();
AddSwapchainDeviceExtension();
InitState();
if (!InitSurface()) {
printf("%s Cannot create surface, skipping test\n", kSkipPrefix);
return;
}
// GetPhysicalDeviceSurfaceCapabilitiesKHR() not called before trying to create a swapchain
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-vkCreateSwapchainKHR-surface-not-retrieved");
// GetPhysicalDeviceSurfaceFormatsKHR() not called before trying to create a swapchain
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-vkCreateSwapchainKHR-surface-not-retrieved");
// GetPhysicalDeviceSurfacePresentModesKHR() not called before trying to create a swapchain
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-BestPractices-vkCreateSwapchainKHR-surface-not-retrieved");
#ifdef VK_USE_PLATFORM_ANDROID_KHR
m_surface_composite_alpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
#else
m_surface_composite_alpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
#endif
VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
VkSurfaceTransformFlagBitsKHR preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
VkSwapchainCreateInfoKHR swapchain_create_info = {};
swapchain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swapchain_create_info.pNext = 0;
swapchain_create_info.surface = m_surface;
swapchain_create_info.minImageCount = 3;
swapchain_create_info.imageArrayLayers = 1;
swapchain_create_info.imageUsage = imageUsage;
swapchain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
swapchain_create_info.preTransform = preTransform;
swapchain_create_info.compositeAlpha = m_surface_composite_alpha;
swapchain_create_info.presentMode = VK_PRESENT_MODE_MAILBOX_KHR;
swapchain_create_info.clipped = VK_FALSE;
swapchain_create_info.oldSwapchain = 0;
// Set unexpected error because warning is thrown any time the present mode is not VK_PRESENT_MODE_FIFO_KHR
m_errorMonitor->SetUnexpectedError("UNASSIGNED-BestPractices-vkCreateSwapchainKHR-swapchain-presentmode-not-fifo");
VkResult err = vk::CreateSwapchainKHR(device(), &swapchain_create_info, nullptr, &m_swapchain);
ASSERT_TRUE(err == VK_ERROR_VALIDATION_FAILED_EXT);
m_errorMonitor->VerifyFound();
// Test for successful swapchain creation when GetPhysicalDeviceSurfaceCapabilitiesKHR() and
// GetPhysicalDeviceSurfaceFormatsKHR() are queried as expected and GetPhysicalDeviceSurfacePresentModesKHR() is not called but
// the present mode is VK_PRESENT_MODE_FIFO_KHR
vk::GetPhysicalDeviceSurfaceCapabilitiesKHR(gpu(), m_surface, &m_surface_capabilities);
uint32_t format_count;
vk::GetPhysicalDeviceSurfaceFormatsKHR(gpu(), m_surface, &format_count, nullptr);
if (format_count != 0) {
m_surface_formats.resize(format_count);
vk::GetPhysicalDeviceSurfaceFormatsKHR(gpu(), m_surface, &format_count, m_surface_formats.data());
}
swapchain_create_info.imageFormat = m_surface_formats[0].format;
swapchain_create_info.imageColorSpace = m_surface_formats[0].colorSpace;
swapchain_create_info.imageExtent = {m_surface_capabilities.minImageExtent.width, m_surface_capabilities.minImageExtent.height};
swapchain_create_info.presentMode = VK_PRESENT_MODE_FIFO_KHR;
m_errorMonitor->ExpectSuccess(kWarningBit);
err = vk::CreateSwapchainKHR(device(), &swapchain_create_info, nullptr, &m_swapchain);
m_errorMonitor->VerifyNotFound();
DestroySwapchain();
}
TEST_F(VkBestPracticesLayerTest, ExpectedQueryDetails) {
TEST_DESCRIPTION("Check that GetPhysicalDeviceQueueFamilyProperties is working as expected");
// Vulkan 1.1 required to test vkGetPhysicalDeviceQueueFamilyProperties2
app_info_.apiVersion = VK_API_VERSION_1_1;
// VK_KHR_get_physical_device_properties2 required to test vkGetPhysicalDeviceQueueFamilyProperties2KHR
instance_extensions_.emplace_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitBestPracticesFramework());
const vk_testing::PhysicalDevice phys_device_obj(gpu_);
std::vector<VkQueueFamilyProperties> queue_family_props;
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
// Ensure we can find a graphics queue family.
uint32_t queue_count = 0;
vk::GetPhysicalDeviceQueueFamilyProperties(phys_device_obj.handle(), &queue_count, nullptr);
queue_family_props.resize(queue_count);
vk::GetPhysicalDeviceQueueFamilyProperties(phys_device_obj.handle(), &queue_count, queue_family_props.data());
// Now for GetPhysicalDeviceQueueFamilyProperties2
std::vector<VkQueueFamilyProperties2> queue_family_props2;
vk::GetPhysicalDeviceQueueFamilyProperties2(phys_device_obj.handle(), &queue_count, nullptr);
queue_family_props2.resize(queue_count);
vk::GetPhysicalDeviceQueueFamilyProperties2(phys_device_obj.handle(), &queue_count, queue_family_props2.data());
// And for GetPhysicalDeviceQueueFamilyProperties2KHR
PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR>(
vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceQueueFamilyProperties2KHR"));
if (vkGetPhysicalDeviceQueueFamilyProperties2KHR) {
vkGetPhysicalDeviceQueueFamilyProperties2KHR(phys_device_obj.handle(), &queue_count, nullptr);
queue_family_props2.resize(queue_count);
vkGetPhysicalDeviceQueueFamilyProperties2KHR(phys_device_obj.handle(), &queue_count, queue_family_props2.data());
}
vk_testing::Device device(phys_device_obj.handle());
device.init();
}
TEST_F(VkBestPracticesLayerTest, MissingQueryDetails) {
TEST_DESCRIPTION("Check that GetPhysicalDeviceQueueFamilyProperties generates appropriate query warning");
ASSERT_NO_FATAL_FAILURE(InitBestPracticesFramework());
const vk_testing::PhysicalDevice phys_device_obj(gpu_);
std::vector<VkQueueFamilyProperties> queue_family_props(1);
uint32_t queue_count = static_cast<uint32_t>(queue_family_props.size());
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "UNASSIGNED-CoreValidation-DevLimit-MissingQueryCount");
vk::GetPhysicalDeviceQueueFamilyProperties(phys_device_obj.handle(), &queue_count, queue_family_props.data());
m_errorMonitor->VerifyFound();
// Now get information correctly
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
vk_testing::QueueCreateInfoArray queue_info(phys_device_obj.queue_properties());
// Only request creation with queuefamilies that have at least one queue
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t j = 0; j < queue_info.size(); ++j) {
if (qci[j].queueCount) {
create_queue_infos.push_back(qci[j]);
}
}
m_errorMonitor->VerifyNotFound();
VkPhysicalDeviceFeatures all_features;
VkDeviceCreateInfo device_ci = {};
device_ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_ci.pNext = nullptr;
device_ci.queueCreateInfoCount = create_queue_infos.size();
device_ci.pQueueCreateInfos = create_queue_infos.data();
device_ci.enabledLayerCount = 0;
device_ci.ppEnabledLayerNames = NULL;
device_ci.enabledExtensionCount = 0;
device_ci.ppEnabledExtensionNames = nullptr;
device_ci.pEnabledFeatures = &all_features;
// vkGetPhysicalDeviceFeatures has not been called, so this should produce a warning
m_errorMonitor->SetDesiredFailureMsg(kWarningBit,
"UNASSIGNED-BestPractices-vkCreateDevice-physical-device-features-not-retrieved");
VkDevice device;
vk::CreateDevice(phys_device_obj.handle(), &device_ci, nullptr, &device);
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, GetSwapchainImagesInvalidCount) {
TEST_DESCRIPTION("Pass an 'incorrect' count to the second GetSwapchainImagesKHR call");
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitBestPracticesFramework());
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!InitSwapchain()) {
printf("%s Cannot create surface or swapchain, skipping test\n", kSkipPrefix);
return;
}
uint32_t swapchain_images_count = 0;
vk::GetSwapchainImagesKHR(device(), m_swapchain, &swapchain_images_count, nullptr);
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, kVUID_BestPractices_Swapchain_InvalidCount);
++swapchain_images_count; // Set the image count to something greater (i.e., "invalid") than what was returned
std::vector<VkImage> swapchain_images(swapchain_images_count, VK_NULL_HANDLE);
vk::GetSwapchainImagesKHR(device(), m_swapchain, &swapchain_images_count, swapchain_images.data());
m_errorMonitor->VerifyFound();
}
TEST_F(VkBestPracticesLayerTest, DepthBiasNoAttachment) {
TEST_DESCRIPTION("Enable depthBias without a depth attachment");
InitBestPracticesFramework();
InitState();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.rs_state_ci_.depthBiasEnable = VK_TRUE;
pipe.rs_state_ci_.depthBiasConstantFactor = 1.0f;
pipe.InitState();
pipe.CreateGraphicsPipeline();
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, kVUID_BestPractices_DepthBiasNoAttachment);
vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
| 1 | 17,268 | Are these allocations necessary, or can these be instantiated normally (i.e., `VkCommandBufferObj secondary_full_clear(...)`). If the allocations _are_ necessar, I'd vote for using something like `std::unique_ptr` and then remove the associated `delete`s. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -43,6 +43,8 @@ public class InternalForTests {
public static void clear(ElasticsearchHttpStorage es) throws IOException {
es.clear();
+ // clear servicespan cache in order to prevent tests breaking
+ ((ElasticsearchHttpSpanConsumer)es.asyncSpanConsumer()).resetIndexToServiceSpansCache();
}
public static void flushOnWrites(ElasticsearchHttpStorage.Builder builder) { | 1 | /**
* Copyright 2015-2017 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage.elasticsearch.http;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import zipkin.Codec;
import zipkin.DependencyLink;
import zipkin.internal.CallbackCaptor;
import zipkin.internal.Pair;
import zipkin.storage.AsyncSpanConsumer;
import static zipkin.storage.elasticsearch.http.ElasticsearchHttpSpanStore.DEPENDENCY_LINK;
/** Package accessor for integration tests */
public class InternalForTests {
public static void writeDependencyLinks(ElasticsearchHttpStorage es, List<DependencyLink> links,
long midnightUTC) {
String index = es.indexNameFormatter().indexNameForTimestamp(midnightUTC);
HttpBulkIndexer indexer = new HttpBulkIndexer("index-links", es);
for (DependencyLink link : links) {
byte[] document = Codec.JSON.writeDependencyLink(link);
indexer.add(index, DEPENDENCY_LINK, document,
link.parent + "|" + link.child); // Unique constraint
}
CallbackCaptor<Void> callback = new CallbackCaptor<>();
indexer.execute(callback);
callback.get();
}
public static void clear(ElasticsearchHttpStorage es) throws IOException {
es.clear();
}
public static void flushOnWrites(ElasticsearchHttpStorage.Builder builder) {
builder.flushOnWrites(true);
}
/** The old consumer didn't write to the "servicespan" type on ingest. */
public static AsyncSpanConsumer oldConsumer(ElasticsearchHttpStorage es) {
es.ensureIndexTemplate();
return new ElasticsearchHttpSpanConsumer(es) {
@Override void indexNames(HttpBulkIndexer ignored, Map<String, Set<Pair<String>>> ignored2) {
}
};
}
}
| 1 | 12,358 | es.clear should call this | openzipkin-zipkin | java |
@@ -54,6 +54,10 @@ class TPTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self._tmpDir)
+
+ def testInitDefaultTP(self):
+ self.assertTrue(isinstance(TP(), TP))
+
def testCheckpointLearned(self):
# Create a model and give it some inputs to learn. | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the Python implementation of the temporal pooler."""
import csv
import cPickle as pickle
import itertools
import os
import random
import shutil
import tempfile
import unittest2 as unittest
import numpy
from pkg_resources import resource_filename
from nupic.research import fdrutilities
from nupic.research.TP import TP
COL_SET = set(range(500))
VERBOSITY = 0
class TPTest(unittest.TestCase):
"""Unit tests for the TP class."""
def setUp(self):
self._tmpDir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmpDir)
def testCheckpointLearned(self):
# Create a model and give it some inputs to learn.
tp1 = TP(numberOfCols=100, cellsPerColumn=12, verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3]))
for bottomUpInput in train:
if bottomUpInput is None:
tp1.reset()
else:
tp1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TP.
checkpointPath = os.path.join(self._tmpDir, 'a')
tp1.saveToFile(checkpointPath)
tp2 = pickle.loads(pickle.dumps(tp1))
tp2.loadFromFile(checkpointPath)
# Check that the TPs are the same.
self.assertTPsEqual(tp1, tp2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable(sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tp1.reset()
tp2.reset()
else:
result1 = tp1.compute(bottomUpInput, True, True)
result2 = tp2.compute(bottomUpInput, True, True)
self.assertTPsEqual(tp1, tp2)
self.assertTrue(numpy.array_equal(result1, result2))
def testCheckpointMiddleOfSequence(self):
# Create a model and give it some inputs to learn.
tp1 = TP(numberOfCols=100, cellsPerColumn=12, verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3] +
[sequences[3][:5]]))
for bottomUpInput in train:
if bottomUpInput is None:
tp1.reset()
else:
tp1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TP.
checkpointPath = os.path.join(self._tmpDir, 'a')
tp1.saveToFile(checkpointPath)
tp2 = pickle.loads(pickle.dumps(tp1))
tp2.loadFromFile(checkpointPath)
# Check that the TPs are the same.
self.assertTPsEqual(tp1, tp2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable([sequences[3][5:]] +
sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tp1.reset()
tp2.reset()
else:
result1 = tp1.compute(bottomUpInput, True, True)
result2 = tp2.compute(bottomUpInput, True, True)
self.assertTPsEqual(tp1, tp2)
self.assertTrue(numpy.array_equal(result1, result2))
def testCheckpointMiddleOfSequence2(self):
"""More complex test of checkpointing in the middle of a sequence."""
tp1 = TP(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2,
False, 1960, 0, False, '', 3, 10, 5, 0, 32, 128, 32, 'normal')
tp2 = TP(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2,
False, 1960, 0, False, '', 3, 10, 5, 0, 32, 128, 32, 'normal')
with open(resource_filename(__name__, 'data/tp_input.csv'), 'r') as fin:
reader = csv.reader(fin)
records = []
for bottomUpInStr in fin:
bottomUpIn = numpy.array(eval('[' + bottomUpInStr.strip() + ']'),
dtype='int32')
records.append(bottomUpIn)
i = 1
for r in records[:250]:
print i
i += 1
output1 = tp1.compute(r, True, True)
output2 = tp2.compute(r, True, True)
self.assertTrue(numpy.array_equal(output1, output2))
print 'Serializing and deserializing models.'
savePath1 = os.path.join(self._tmpDir, 'tp1.bin')
tp1.saveToFile(savePath1)
tp3 = pickle.loads(pickle.dumps(tp1))
tp3.loadFromFile(savePath1)
savePath2 = os.path.join(self._tmpDir, 'tp2.bin')
tp2.saveToFile(savePath2)
tp4 = pickle.loads(pickle.dumps(tp2))
tp4.loadFromFile(savePath2)
self.assertTPsEqual(tp1, tp3)
self.assertTPsEqual(tp2, tp4)
for r in records[250:]:
print i
i += 1
out1 = tp1.compute(r, True, True)
out2 = tp2.compute(r, True, True)
out3 = tp3.compute(r, True, True)
out4 = tp4.compute(r, True, True)
self.assertTrue(numpy.array_equal(out1, out2))
self.assertTrue(numpy.array_equal(out1, out3))
self.assertTrue(numpy.array_equal(out1, out4))
self.assertTPsEqual(tp1, tp2)
self.assertTPsEqual(tp1, tp3)
self.assertTPsEqual(tp2, tp4)
def assertTPsEqual(self, tp1, tp2):
"""Asserts that two TP instances are the same.
This is temporarily disabled since it does not work with the C++
implementation of the TP.
"""
self.assertEqual(tp1, tp2, tp1.diff(tp2))
self.assertTrue(fdrutilities.tpDiff2(tp1, tp2, 1, False))
@staticmethod
def generateSequence(n=10, numCols=100, minOnes=21, maxOnes=25):
"""Generates a sequence of n patterns."""
return [None] + [TPTest.generatePattern(numCols, minOnes, maxOnes)
for _ in xrange(n)]
@staticmethod
def generatePattern(numCols=100, minOnes=21, maxOnes=25):
"""Generate a single test pattern with given parameters.
Parameters:
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
"""
assert minOnes < maxOnes
assert maxOnes < numCols
nOnes = random.randint(minOnes, maxOnes)
ind = random.sample(xrange(numCols), nOnes)
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
if __name__ == '__main__':
unittest.main()
| 1 | 15,921 | please remove. this is true by the definition of the Python language | numenta-nupic | py |
@@ -1239,6 +1239,8 @@ KEY_DATA = collections.OrderedDict([
('stop', ['<Ctrl-s>']),
('print', ['<Ctrl-Alt-p>']),
('open qute:settings', ['Ss']),
+ ('select-follow', ['<Return>']),
+ ('select-follow -t', ['<Ctrl-Return>']),
])),
('insert', collections.OrderedDict([ | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Configuration data for config.py.
Module attributes:
FIRST_COMMENT: The initial comment header to place in the config.
SECTION_DESC: A dictionary with descriptions for sections.
DATA: A global read-only copy of the default config, an OrderedDict of
sections.
"""
import sys
import re
import collections
from qutebrowser.config import configtypes as typ
from qutebrowser.config import sections as sect
from qutebrowser.config.value import SettingValue
from qutebrowser.utils.qtutils import MAXVALS
FIRST_COMMENT = r"""
# vim: ft=dosini
# Configfile for qutebrowser.
#
# This configfile is parsed by python's configparser in extended
# interpolation mode. The format is very INI-like, so there are
# categories like [general] with "key = value"-pairs.
#
# Note that you shouldn't add your own comments, as this file is
# regenerated every time the config is saved.
#
# Interpolation looks like ${value} or ${section:value} and will be
# replaced by the respective value.
#
# Some settings will expand environment variables. Note that, since
# interpolation is run first, you will need to escape the $ char as
# described below.
#
# This is the default config, so if you want to remove anything from
# here (as opposed to change/add), for example a key binding, set it to
# an empty value.
#
# You will need to escape the following values:
# - # at the start of the line (at the first position of the key) (\#)
# - $ in a value ($$)
"""
SECTION_DESC = {
'general': "General/miscellaneous options.",
'ui': "General options related to the user interface.",
'input': "Options related to input modes.",
'network': "Settings related to the network.",
'completion': "Options related to completion and command history.",
'tabs': "Configuration of the tab bar.",
'storage': "Settings related to cache and storage.",
'content': "Loaded plugins/scripts and allowed actions.",
'hints': "Hinting settings.",
'searchengines': (
"Definitions of search engines which can be used via the address "
"bar.\n"
"The searchengine named `DEFAULT` is used when "
"`general -> auto-search` is true and something else than a URL was "
"entered to be opened. Other search engines can be used by prepending "
"the search engine name to the search term, e.g. "
"`:open google qutebrowser`. The string `{}` will be replaced by the "
"search term, use `{{` and `}}` for literal `{`/`}` signs."),
'aliases': (
"Aliases for commands.\n"
"By default, no aliases are defined. Example which adds a new command "
"`:qtb` to open qutebrowsers website:\n\n"
"`qtb = open http://www.qutebrowser.org/`"),
'colors': (
"Colors used in the UI.\n"
"A value can be in one of the following format:\n\n"
" * `#RGB`/`#RRGGBB`/`#RRRGGGBBB`/`#RRRRGGGGBBBB`\n"
" * A SVG color name as specified in http://www.w3.org/TR/SVG/"
"types.html#ColorKeywords[the W3C specification].\n"
" * transparent (no color)\n"
" * `rgb(r, g, b)` / `rgba(r, g, b, a)` (values 0-255 or "
"percentages)\n"
" * `hsv(h, s, v)` / `hsva(h, s, v, a)` (values 0-255, hue 0-359)\n"
" * A gradient as explained in http://qt-project.org/doc/qt-4.8/"
"stylesheet-reference.html#list-of-property-types[the Qt "
"documentation] under ``Gradient''.\n\n"
"The `hints.*` values are a special case as they're real CSS "
"colors, not Qt-CSS colors. There, for a gradient, you need to use "
"`-webkit-gradient`, see https://www.webkit.org/blog/175/introducing-"
"css-gradients/[the WebKit documentation]."),
'fonts': (
"Fonts used for the UI, with optional style/weight/size.\n\n"
" * Style: `normal`/`italic`/`oblique`\n"
" * Weight: `normal`, `bold`, `100`..`900`\n"
" * Size: _number_ `px`/`pt`"),
}
DEFAULT_FONT_SIZE = '10pt' if sys.platform == 'darwin' else '8pt'
def data(readonly=False):
"""Get the default config data.
Return:
A {name: section} OrderedDict.
"""
return collections.OrderedDict([
('general', sect.KeyValue(
('ignore-case',
SettingValue(typ.IgnoreCase(), 'smart'),
"Whether to find text on a page case-insensitively."),
('wrap-search',
SettingValue(typ.Bool(), 'true'),
"Whether to wrap finding text to the top when arriving at the "
"end."),
('startpage',
SettingValue(typ.List(), 'https://www.duckduckgo.com'),
"The default page(s) to open at the start, separated by commas."),
('default-page',
SettingValue(typ.FuzzyUrl(), '${startpage}'),
"The page to open if :open -t/-b/-w is used without URL. Use "
"`about:blank` for a blank page."),
('auto-search',
SettingValue(typ.AutoSearch(), 'naive'),
"Whether to start a search when something else than a URL is "
"entered."),
('auto-save-config',
SettingValue(typ.Bool(), 'true'),
"Whether to save the config automatically on quit."),
('auto-save-interval',
SettingValue(typ.Int(minval=0), '15000'),
"How often (in milliseconds) to auto-save config/cookies/etc."),
('editor',
SettingValue(typ.ShellCommand(placeholder=True), 'gvim -f "{}"'),
"The editor (and arguments) to use for the `open-editor` "
"command.\n\n"
"Use `{}` for the filename. The value gets split like in a "
"shell, so you can use `\"` or `'` to quote arguments."),
('editor-encoding',
SettingValue(typ.Encoding(), 'utf-8'),
"Encoding to use for editor."),
('private-browsing',
SettingValue(typ.Bool(), 'false'),
"Do not record visited pages in the history or store web page "
"icons."),
('developer-extras',
SettingValue(typ.Bool(), 'false'),
"Enable extra tools for Web developers.\n\n"
"This needs to be enabled for `:inspector` to work and also adds "
"an _Inspect_ entry to the context menu."),
('print-element-backgrounds',
SettingValue(typ.Bool(), 'true'),
"Whether the background color and images are also drawn when the "
"page is printed."),
('xss-auditing',
SettingValue(typ.Bool(), 'false'),
"Whether load requests should be monitored for cross-site "
"scripting attempts.\n\n"
"Suspicious scripts will be blocked and reported in the "
"inspector's JavaScript console. Enabling this feature might "
"have an impact on performance."),
('site-specific-quirks',
SettingValue(typ.Bool(), 'true'),
"Enable workarounds for broken sites."),
('default-encoding',
SettingValue(typ.String(none_ok=True), ''),
"Default encoding to use for websites.\n\n"
"The encoding must be a string describing an encoding such as "
"_utf-8_, _iso-8859-1_, etc. If left empty a default value will "
"be used."),
('new-instance-open-target',
SettingValue(typ.NewInstanceOpenTarget(), 'window'),
"How to open links in an existing instance if a new one is "
"launched."),
('log-javascript-console',
SettingValue(typ.Bool(), 'false'),
"Whether to log javascript console messages."),
('save-session',
SettingValue(typ.Bool(), 'false'),
"Whether to always save the open pages."),
('session-default-name',
SettingValue(typ.SessionName(none_ok=True), ''),
"The name of the session to save by default, or empty for the "
"last loaded session."),
readonly=readonly
)),
('ui', sect.KeyValue(
('zoom-levels',
SettingValue(typ.PercList(minval=0),
'25%,33%,50%,67%,75%,90%,100%,110%,125%,150%,175%,'
'200%,250%,300%,400%,500%'),
"The available zoom levels, separated by commas."),
('default-zoom',
SettingValue(typ.Perc(), '100%'),
"The default zoom level."),
('downloads-position',
SettingValue(typ.VerticalPosition(), 'north'),
"Where to show the downloaded files."),
('message-timeout',
SettingValue(typ.Int(), '2000'),
"Time (in ms) to show messages in the statusbar for."),
('message-unfocused',
SettingValue(typ.Bool(), 'false'),
"Whether to show messages in unfocused windows."),
('confirm-quit',
SettingValue(typ.ConfirmQuit(), 'never'),
"Whether to confirm quitting the application."),
('display-statusbar-messages',
SettingValue(typ.Bool(), 'false'),
"Whether to display javascript statusbar messages."),
('zoom-text-only',
SettingValue(typ.Bool(), 'false'),
"Whether the zoom factor on a frame applies only to the text or "
"to all content."),
('frame-flattening',
SettingValue(typ.Bool(), 'false'),
"Whether to expand each subframe to its contents.\n\n"
"This will flatten all the frames to become one scrollable "
"page."),
('user-stylesheet',
SettingValue(typ.UserStyleSheet(),
'::-webkit-scrollbar { width: 0px; height: 0px; }'),
"User stylesheet to use (absolute filename or CSS string). Will "
"expand environment variables."),
('css-media-type',
SettingValue(typ.String(none_ok=True), ''),
"Set the CSS media type."),
('smooth-scrolling',
SettingValue(typ.Bool(), 'false'),
"Whether to enable smooth scrolling for webpages."),
('remove-finished-downloads',
SettingValue(typ.Bool(), 'false'),
"Whether to remove finished downloads automatically."),
('hide-statusbar',
SettingValue(typ.Bool(), 'false'),
"Whether to hide the statusbar unless a message is shown."),
('window-title-format',
SettingValue(typ.FormatString(fields=['perc', 'perc_raw', 'title',
'title_sep', 'id']),
'{perc}{title}{title_sep}qutebrowser'),
"The format to use for the window title. The following "
"placeholders are defined:\n\n"
"* `{perc}`: The percentage as a string like `[10%]`.\n"
"* `{perc_raw}`: The raw percentage, e.g. `10`\n"
"* `{title}`: The title of the current web page\n"
"* `{title_sep}`: The string ` - ` if a title is set, empty "
"otherwise.\n"
"* `{id}`: The internal window ID of this window."),
('hide-mouse-cursor',
SettingValue(typ.Bool(), 'false'),
"Whether to hide the mouse cursor."),
readonly=readonly
)),
('network', sect.KeyValue(
('do-not-track',
SettingValue(typ.Bool(), 'true'),
"Value to send in the `DNT` header."),
('accept-language',
SettingValue(typ.String(none_ok=True), 'en-US,en'),
"Value to send in the `accept-language` header."),
('user-agent',
SettingValue(typ.UserAgent(none_ok=True), ''),
"User agent to send. Empty to send the default."),
('proxy',
SettingValue(typ.Proxy(), 'system'),
"The proxy to use.\n\n"
"In addition to the listed values, you can use a `socks://...` "
"or `http://...` URL."),
('proxy-dns-requests',
SettingValue(typ.Bool(), 'true'),
"Whether to send DNS requests over the configured proxy."),
('ssl-strict',
SettingValue(typ.BoolAsk(), 'ask'),
"Whether to validate SSL handshakes."),
('dns-prefetch',
SettingValue(typ.Bool(), 'true'),
"Whether to try to pre-fetch DNS entries to speed up browsing."),
readonly=readonly
)),
('completion', sect.KeyValue(
('download-path-suggestion',
SettingValue(typ.DownloadPathSuggestion(), 'path'),
"What to display in the download filename input."),
('timestamp-format',
SettingValue(typ.String(none_ok=True), '%Y-%m-%d'),
"How to format timestamps (e.g. for history)"),
('show',
SettingValue(typ.Bool(), 'true'),
"Whether to show the autocompletion window."),
('height',
SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1),
'50%'),
"The height of the completion, in px or as percentage of the "
"window."),
('cmd-history-max-items',
SettingValue(typ.Int(minval=-1), '100'),
"How many commands to save in the command history.\n\n"
"0: no history / -1: unlimited"),
('web-history-max-items',
SettingValue(typ.Int(minval=-1), '1000'),
"How many URLs to show in the web history.\n\n"
"0: no history / -1: unlimited"),
('quick-complete',
SettingValue(typ.Bool(), 'true'),
"Whether to move on to the next part when there's only one "
"possible completion left."),
('shrink',
SettingValue(typ.Bool(), 'false'),
"Whether to shrink the completion to be smaller than the "
"configured size if there are no scrollbars."),
readonly=readonly
)),
('input', sect.KeyValue(
('timeout',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '500'),
"Timeout for ambiguous key bindings."),
('partial-timeout',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '1000'),
"Timeout for partially typed key bindings."),
('insert-mode-on-plugins',
SettingValue(typ.Bool(), 'false'),
"Whether to switch to insert mode when clicking flash and other "
"plugins."),
('auto-leave-insert-mode',
SettingValue(typ.Bool(), 'true'),
"Whether to leave insert mode if a non-editable element is "
"clicked."),
('auto-insert-mode',
SettingValue(typ.Bool(), 'false'),
"Whether to automatically enter insert mode if an editable "
"element is focused after page load."),
('forward-unbound-keys',
SettingValue(typ.ForwardUnboundKeys(), 'auto'),
"Whether to forward unbound keys to the webview in normal mode."),
('spatial-navigation',
SettingValue(typ.Bool(), 'false'),
"Enables or disables the Spatial Navigation feature\n\n"
"Spatial navigation consists in the ability to navigate between "
"focusable elements in a Web page, such as hyperlinks and form "
"controls, by using Left, Right, Up and Down arrow keys. For "
"example, if a user presses the Right key, heuristics determine "
"whether there is an element he might be trying to reach towards "
"the right and which element he probably wants."),
('links-included-in-focus-chain',
SettingValue(typ.Bool(), 'true'),
"Whether hyperlinks should be included in the keyboard focus "
"chain."),
('rocker-gestures',
SettingValue(typ.Bool(), 'false'),
"Whether to enable Opera-like mouse rocker gestures. This "
"disables the context menu."),
('mouse-zoom-divider',
SettingValue(typ.Int(minval=1), '512'),
"How much to divide the mouse wheel movements to translate them "
"into zoom increments."),
readonly=readonly
)),
('tabs', sect.KeyValue(
('background-tabs',
SettingValue(typ.Bool(), 'false'),
"Whether to open new tabs (middleclick/ctrl+click) in "
"background."),
('select-on-remove',
SettingValue(typ.SelectOnRemove(), 'right'),
"Which tab to select when the focused tab is removed."),
('new-tab-position',
SettingValue(typ.NewTabPosition(), 'right'),
"How new tabs are positioned."),
('new-tab-position-explicit',
SettingValue(typ.NewTabPosition(), 'last'),
"How new tabs opened explicitly are positioned."),
('last-close',
SettingValue(typ.LastClose(), 'ignore'),
"Behaviour when the last tab is closed."),
('hide-auto',
SettingValue(typ.Bool(), 'false'),
"Hide the tab bar if only one tab is open."),
('hide-always',
SettingValue(typ.Bool(), 'false'),
"Always hide the tab bar."),
('wrap',
SettingValue(typ.Bool(), 'true'),
"Whether to wrap when changing tabs."),
('movable',
SettingValue(typ.Bool(), 'true'),
"Whether tabs should be movable."),
('close-mouse-button',
SettingValue(typ.CloseButton(), 'middle'),
"On which mouse button to close tabs."),
('position',
SettingValue(typ.Position(), 'north'),
"The position of the tab bar."),
('show-favicons',
SettingValue(typ.Bool(), 'true'),
"Whether to show favicons in the tab bar."),
('width',
SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1),
'20%'),
"The width of the tab bar if it's vertical, in px or as "
"percentage of the window."),
('indicator-width',
SettingValue(typ.Int(minval=0), '3'),
"Width of the progress indicator (0 to disable)."),
('indicator-space',
SettingValue(typ.Int(minval=0), '3'),
"Spacing between tab edge and indicator."),
('tabs-are-windows',
SettingValue(typ.Bool(), 'false'),
"Whether to open windows instead of tabs."),
('title-format',
SettingValue(typ.FormatString(
fields=['perc', 'perc_raw', 'title', 'title_sep', 'index',
'id']), '{index}: {title}'),
"The format to use for the tab title. The following placeholders "
"are defined:\n\n"
"* `{perc}`: The percentage as a string like `[10%]`.\n"
"* `{perc_raw}`: The raw percentage, e.g. `10`\n"
"* `{title}`: The title of the current web page\n"
"* `{title_sep}`: The string ` - ` if a title is set, empty "
"otherwise.\n"
"* `{index}`: The index of this tab.\n"
"* `{id}`: The internal tab ID of this tab."),
('mousewheel-tab-switching',
SettingValue(typ.Bool(), 'true'),
"Switch between tabs using the mouse wheel."),
readonly=readonly
)),
('storage', sect.KeyValue(
('download-directory',
SettingValue(typ.Directory(none_ok=True), ''),
"The directory to save downloads to. An empty value selects a "
"sensible os-specific default. Will expand environment "
"variables."),
('maximum-pages-in-cache',
SettingValue(
typ.Int(none_ok=True, minval=0, maxval=MAXVALS['int']), ''),
"The maximum number of pages to hold in the global memory page "
"cache.\n\n"
"The Page Cache allows for a nicer user experience when "
"navigating forth or back to pages in the forward/back history, "
"by pausing and resuming up to _n_ pages.\n\n"
"For more information about the feature, please refer to: "
"http://webkit.org/blog/427/webkit-page-cache-i-the-basics/"),
('object-cache-capacities',
SettingValue(
typ.WebKitBytesList(length=3, maxsize=MAXVALS['int']), ''),
"The capacities for the global memory cache for dead objects "
"such as stylesheets or scripts. Syntax: cacheMinDeadCapacity, "
"cacheMaxDead, totalCapacity.\n\n"
"The _cacheMinDeadCapacity_ specifies the minimum number of "
"bytes that dead objects should consume when the cache is under "
"pressure.\n\n"
"_cacheMaxDead_ is the maximum number of bytes that dead objects "
"should consume when the cache is *not* under pressure.\n\n"
"_totalCapacity_ specifies the maximum number of bytes "
"that the cache should consume *overall*."),
('offline-storage-default-quota',
SettingValue(typ.WebKitBytes(maxsize=MAXVALS['int64']), ''),
"Default quota for new offline storage databases."),
('offline-web-application-cache-quota',
SettingValue(typ.WebKitBytes(maxsize=MAXVALS['int64']), ''),
"Quota for the offline web application cache."),
('offline-storage-database',
SettingValue(typ.Bool(), 'true'),
"Whether support for the HTML 5 offline storage feature is "
"enabled."),
('offline-web-application-storage',
SettingValue(typ.Bool(), 'true'),
"Whether support for the HTML 5 web application cache feature is "
"enabled.\n\n"
"An application cache acts like an HTTP cache in some sense. For "
"documents that use the application cache via JavaScript, the "
"loader engine will first ask the application cache for the "
"contents, before hitting the network.\n\n"
"The feature is described in details at: "
"http://dev.w3.org/html5/spec/Overview.html#appcache"),
('local-storage',
SettingValue(typ.Bool(), 'true'),
"Whether support for the HTML 5 local storage feature is "
"enabled."),
('cache-size',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int64']),
'52428800'),
"Size of the HTTP network cache."),
readonly=readonly
)),
('content', sect.KeyValue(
('allow-images',
SettingValue(typ.Bool(), 'true'),
"Whether images are automatically loaded in web pages."),
('allow-javascript',
SettingValue(typ.Bool(), 'true'),
"Enables or disables the running of JavaScript programs."),
('allow-plugins',
SettingValue(typ.Bool(), 'false'),
"Enables or disables plugins in Web pages.\n\n"
'Qt plugins with a mimetype such as "application/x-qt-plugin" '
"are not affected by this setting."),
('webgl',
SettingValue(typ.Bool(), 'true'),
"Enables or disables WebGL."),
('css-regions',
SettingValue(typ.Bool(), 'true'),
"Enable or disable support for CSS regions."),
('hyperlink-auditing',
SettingValue(typ.Bool(), 'false'),
"Enable or disable hyperlink auditing (<a ping>)."),
('geolocation',
SettingValue(typ.BoolAsk(), 'ask'),
"Allow websites to request geolocations."),
('notifications',
SettingValue(typ.BoolAsk(), 'ask'),
"Allow websites to show notifications."),
#('allow-java',
# SettingValue(typ.Bool(), 'true'),
# "Enables or disables Java applets. Currently Java applets are "
# "not supported"),
('javascript-can-open-windows',
SettingValue(typ.Bool(), 'false'),
"Whether JavaScript programs can open new windows."),
('javascript-can-close-windows',
SettingValue(typ.Bool(), 'false'),
"Whether JavaScript programs can close windows."),
('javascript-can-access-clipboard',
SettingValue(typ.Bool(), 'false'),
"Whether JavaScript programs can read or write to the "
"clipboard."),
('ignore-javascript-prompt',
SettingValue(typ.Bool(), 'false'),
"Whether all javascript prompts should be ignored."),
('ignore-javascript-alert',
SettingValue(typ.Bool(), 'false'),
"Whether all javascript alerts should be ignored."),
('local-content-can-access-remote-urls',
SettingValue(typ.Bool(), 'false'),
"Whether locally loaded documents are allowed to access remote "
"urls."),
('local-content-can-access-file-urls',
SettingValue(typ.Bool(), 'true'),
"Whether locally loaded documents are allowed to access other "
"local urls."),
('cookies-accept',
SettingValue(typ.AcceptCookies(), 'default'),
"Whether to accept cookies."),
('cookies-store',
SettingValue(typ.Bool(), 'true'),
"Whether to store cookies."),
('host-block-lists',
SettingValue(
typ.UrlList(none_ok=True),
'http://www.malwaredomainlist.com/hostslist/hosts.txt,'
'http://someonewhocares.org/hosts/hosts,'
'http://winhelp2002.mvps.org/hosts.zip,'
'http://malwaredomains.lehigh.edu/files/justdomains.zip,'
'http://pgl.yoyo.org/adservers/serverlist.php?'
'hostformat=hosts&mimetype=plaintext'),
"List of URLs of lists which contain hosts to block.\n\n"
"The file can be in one of the following formats:\n\n"
"- An '/etc/hosts'-like file\n"
"- One host per line\n"
"- A zip-file of any of the above, with either only one file, or "
"a file named 'hosts' (with any extension)."),
('host-blocking-enabled',
SettingValue(typ.Bool(), 'true'),
"Whether host blocking is enabled."),
readonly=readonly
)),
('hints', sect.KeyValue(
('border',
SettingValue(typ.String(), '1px solid #E3BE23'),
"CSS border value for hints."),
('opacity',
SettingValue(typ.Float(minval=0.0, maxval=1.0), '0.7'),
"Opacity for hints."),
('mode',
SettingValue(typ.HintMode(), 'letter'),
"Mode to use for hints."),
('chars',
SettingValue(typ.String(minlen=2), 'asdfghjkl'),
"Chars used for hint strings."),
('min-chars',
SettingValue(typ.Int(minval=1), '1'),
"Mininum number of chars used for hint strings."),
('scatter',
SettingValue(typ.Bool(), 'true'),
"Whether to scatter hint key chains (like Vimium) or not (like "
"dwb)."),
('uppercase',
SettingValue(typ.Bool(), 'false'),
"Make chars in hint strings uppercase."),
('auto-follow',
SettingValue(typ.Bool(), 'true'),
"Whether to auto-follow a hint if there's only one left."),
('next-regexes',
SettingValue(typ.RegexList(flags=re.IGNORECASE),
r'\bnext\b,\bmore\b,\bnewer\b,\b[>→≫]\b,\b(>>|»)\b'),
"A comma-separated list of regexes to use for 'next' links."),
('prev-regexes',
SettingValue(typ.RegexList(flags=re.IGNORECASE),
r'\bprev(ious)?\b,\bback\b,\bolder\b,\b[<←≪]\b,'
r'\b(<<|«)\b'),
"A comma-separated list of regexes to use for 'prev' links."),
readonly=readonly
)),
('searchengines', sect.ValueList(
typ.SearchEngineName(), typ.SearchEngineUrl(),
('DEFAULT', 'https://duckduckgo.com/?q={}'),
readonly=readonly
)),
('aliases', sect.ValueList(
typ.String(forbidden=' '), typ.Command(),
readonly=readonly
)),
('colors', sect.KeyValue(
('completion.fg',
SettingValue(typ.QtColor(), 'white'),
"Text color of the completion widget."),
('completion.bg',
SettingValue(typ.QssColor(), '#333333'),
"Background color of the completion widget."),
('completion.alternate-bg',
SettingValue(typ.QssColor(), '#444444'),
"Alternating background color of the completion widget."),
('completion.category.fg',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of completion widget category headers."),
('completion.category.bg',
SettingValue(typ.QssColor(), 'qlineargradient(x1:0, y1:0, x2:0, '
'y2:1, stop:0 #888888, stop:1 #505050)'),
"Background color of the completion widget category headers."),
('completion.category.border.top',
SettingValue(typ.QssColor(), 'black'),
"Top border color of the completion widget category headers."),
('completion.category.border.bottom',
SettingValue(typ.QssColor(), '${completion.category.border.top}'),
"Bottom border color of the completion widget category headers."),
('completion.item.selected.fg',
SettingValue(typ.QtColor(), 'black'),
"Foreground color of the selected completion item."),
('completion.item.selected.bg',
SettingValue(typ.QssColor(), '#e8c000'),
"Background color of the selected completion item."),
('completion.item.selected.border.top',
SettingValue(typ.QssColor(), '#bbbb00'),
"Top border color of the completion widget category headers."),
('completion.item.selected.border.bottom',
SettingValue(
typ.QssColor(), '${completion.item.selected.border.top}'),
"Bottom border color of the selected completion item."),
('completion.match.fg',
SettingValue(typ.QssColor(), '#ff4444'),
"Foreground color of the matched text in the completion."),
('statusbar.bg',
SettingValue(typ.QssColor(), 'black'),
"Foreground color of the statusbar."),
('statusbar.fg',
SettingValue(typ.QssColor(), 'white'),
"Foreground color of the statusbar."),
('statusbar.bg.error',
SettingValue(typ.QssColor(), 'red'),
"Background color of the statusbar if there was an error."),
('statusbar.bg.warning',
SettingValue(typ.QssColor(), 'darkorange'),
"Background color of the statusbar if there is a warning."),
('statusbar.bg.prompt',
SettingValue(typ.QssColor(), 'darkblue'),
"Background color of the statusbar if there is a prompt."),
('statusbar.bg.insert',
SettingValue(typ.QssColor(), 'darkgreen'),
"Background color of the statusbar in insert mode."),
('statusbar.bg.caret',
SettingValue(typ.QssColor(), 'purple'),
"Background color of the statusbar in caret mode."),
('statusbar.bg.caret-selection',
SettingValue(typ.QssColor(), '#a12dff'),
"Background color of the statusbar in caret mode with a "
"selection"),
('statusbar.progress.bg',
SettingValue(typ.QssColor(), 'white'),
"Background color of the progress bar."),
('statusbar.url.fg',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Default foreground color of the URL in the statusbar."),
('statusbar.url.fg.success',
SettingValue(typ.QssColor(), 'lime'),
"Foreground color of the URL in the statusbar on successful "
"load."),
('statusbar.url.fg.error',
SettingValue(typ.QssColor(), 'orange'),
"Foreground color of the URL in the statusbar on error."),
('statusbar.url.fg.warn',
SettingValue(typ.QssColor(), 'yellow'),
"Foreground color of the URL in the statusbar when there's a "
"warning."),
('statusbar.url.fg.hover',
SettingValue(typ.QssColor(), 'aqua'),
"Foreground color of the URL in the statusbar for hovered "
"links."),
('tabs.fg.odd',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of unselected odd tabs."),
('tabs.fg.even',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of unselected even tabs."),
('tabs.fg.selected',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of selected tabs."),
('tabs.bg.odd',
SettingValue(typ.QtColor(), 'grey'),
"Background color of unselected odd tabs."),
('tabs.bg.even',
SettingValue(typ.QtColor(), 'darkgrey'),
"Background color of unselected even tabs."),
('tabs.bg.selected',
SettingValue(typ.QtColor(), 'black'),
"Background color of selected tabs."),
('tabs.bg.bar',
SettingValue(typ.QtColor(), '#555555'),
"Background color of the tab bar."),
('tabs.indicator.start',
SettingValue(typ.QtColor(), '#0000aa'),
"Color gradient start for the tab indicator."),
('tabs.indicator.stop',
SettingValue(typ.QtColor(), '#00aa00'),
"Color gradient end for the tab indicator."),
('tabs.indicator.error',
SettingValue(typ.QtColor(), '#ff0000'),
"Color for the tab indicator on errors.."),
('tabs.indicator.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for the tab indicator."),
('hints.fg',
SettingValue(typ.CssColor(), 'black'),
"Font color for hints."),
('hints.fg.match',
SettingValue(typ.CssColor(), 'green'),
"Font color for the matched part of hints."),
('hints.bg',
SettingValue(
typ.CssColor(), '-webkit-gradient(linear, left top, '
'left bottom, color-stop(0%,#FFF785), '
'color-stop(100%,#FFC542))'),
"Background color for hints."),
('downloads.fg',
SettingValue(typ.QtColor(), '#ffffff'),
"Foreground color for downloads."),
('downloads.bg.bar',
SettingValue(typ.QssColor(), 'black'),
"Background color for the download bar."),
('downloads.bg.start',
SettingValue(typ.QtColor(), '#0000aa'),
"Color gradient start for downloads."),
('downloads.bg.stop',
SettingValue(typ.QtColor(), '#00aa00'),
"Color gradient end for downloads."),
('downloads.bg.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for downloads."),
('downloads.bg.error',
SettingValue(typ.QtColor(), 'red'),
"Background color for downloads with errors."),
readonly=readonly
)),
('fonts', sect.KeyValue(
('_monospace',
SettingValue(typ.Font(), 'Terminus, Monospace, '
'"DejaVu Sans Mono", Monaco, '
'"Bitstream Vera Sans Mono", "Andale Mono", '
'"Liberation Mono", "Courier New", Courier, '
'monospace, Fixed, Consolas, Terminal'),
"Default monospace fonts."),
('completion',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the completion widget."),
('tabbar',
SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the tab bar."),
('statusbar',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the statusbar."),
('downloads',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for the downloadbar."),
('hints',
SettingValue(typ.Font(), 'bold 12px Monospace'),
"Font used for the hints."),
('debug-console',
SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for the debugging console."),
('web-family-standard',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for standard fonts."),
('web-family-fixed',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for fixed fonts."),
('web-family-serif',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for serif fonts."),
('web-family-sans-serif',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for sans-serif fonts."),
('web-family-cursive',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for cursive fonts."),
('web-family-fantasy',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for fantasy fonts."),
('web-size-minimum',
SettingValue(
typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''),
"The hard minimum font size."),
('web-size-minimum-logical',
SettingValue(
typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''),
"The minimum logical font size that is applied when zooming "
"out."),
('web-size-default',
SettingValue(
typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''),
"The default font size for regular text."),
('web-size-default-fixed',
SettingValue(
typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''),
"The default font size for fixed-pitch text."),
readonly=readonly
)),
])
DATA = data(readonly=True)
KEY_FIRST_COMMENT = """
# vim: ft=conf
#
# In this config file, qutebrowser's key bindings are configured.
# The format looks like this:
#
# [keymode]
#
# command
# keychain
# keychain2
# ...
#
# All blank lines and lines starting with '#' are ignored.
# Inline-comments are not permitted.
#
# keymode is a comma separated list of modes in which the key binding should be
# active. If keymode starts with !, the key binding is active in all modes
# except the listed modes.
#
# For special keys (can't be part of a keychain), enclose them in `<`...`>`.
# For modifiers, you can use either `-` or `+` as delimiters, and these names:
#
# * Control: `Control`, `Ctrl`
# * Meta: `Meta`, `Windows`, `Mod4`
# * Alt: `Alt`, `Mod1`
# * Shift: `Shift`
#
# For simple keys (no `<>`-signs), a capital letter means the key is pressed
# with Shift. For special keys (with `<>`-signs), you need to explicitly add
# `Shift-` to match a key pressed with shift. You can bind multiple commands
# by separating them with `;;`.
"""
KEY_SECTION_DESC = {
'all': "Keybindings active in all modes.",
'normal': "Keybindings for normal mode.",
'insert': (
"Keybindings for insert mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `open-editor`: Open a texteditor with the focused field."),
'hint': (
"Keybindings for hint mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `follow-hint`: Follow the currently selected hint."),
'passthrough': (
"Keybindings for passthrough mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode."),
'command': (
"Keybindings for command mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `command-history-prev`: Switch to previous command in history.\n"
" * `command-history-next`: Switch to next command in history.\n"
" * `completion-item-prev`: Select previous item in completion.\n"
" * `completion-item-next`: Select next item in completion.\n"
" * `command-accept`: Execute the command currently in the "
"commandline."),
'prompt': (
"Keybindings for prompts in the status line.\n"
"You can bind normal keys in this mode, but they will be only active "
"when a yes/no-prompt is asked. For other prompt modes, you can only "
"bind special keys.\n"
"Useful hidden commands to map in this section:\n\n"
" * `prompt-accept`: Confirm the entered value.\n"
" * `prompt-yes`: Answer yes to a yes/no question.\n"
" * `prompt-no`: Answer no to a yes/no question."),
'caret': (
""),
}
KEY_DATA = collections.OrderedDict([
('!normal', collections.OrderedDict([
('leave-mode', ['<Escape>', '<Ctrl-[>']),
])),
('normal', collections.OrderedDict([
('search', ['<Escape>']),
('set-cmd-text -s :open', ['o']),
('set-cmd-text :open {url}', ['go']),
('set-cmd-text -s :open -t', ['O']),
('set-cmd-text :open -t {url}', ['gO']),
('set-cmd-text -s :open -b', ['xo']),
('set-cmd-text :open -b {url}', ['xO']),
('set-cmd-text -s :open -w', ['wo']),
('set-cmd-text :open -w {url}', ['wO']),
('open -t', ['ga', '<Ctrl-T>']),
('tab-close', ['d', '<Ctrl-W>']),
('tab-close -o', ['D']),
('tab-only', ['co']),
('tab-focus', ['T']),
('tab-move', ['gm']),
('tab-move -', ['gl']),
('tab-move +', ['gr']),
('tab-next', ['J', 'gt']),
('tab-prev', ['K', 'gT']),
('tab-clone', ['gC']),
('reload', ['r']),
('reload -f', ['R']),
('back', ['H', '<Backspace>']),
('back -t', ['th']),
('back -w', ['wh']),
('forward', ['L']),
('forward -t', ['tl']),
('forward -w', ['wl']),
('fullscreen', ['<F11>']),
('hint', ['f']),
('hint all tab', ['F']),
('hint all window', ['wf']),
('hint all tab-bg', [';b']),
('hint all tab-fg', [';f']),
('hint all hover', [';h']),
('hint images', [';i']),
('hint images tab', [';I']),
('hint images tab-bg', ['.i']),
('hint links fill ":open {hint-url}"', [';o']),
('hint links fill ":open -t {hint-url}"', [';O']),
('hint links fill ":open -b {hint-url}"', ['.o']),
('hint links yank', [';y']),
('hint links yank-primary', [';Y']),
('hint --rapid links tab-bg', [';r']),
('hint --rapid links window', [';R']),
('hint links download', [';d']),
('scroll left', ['h']),
('scroll down', ['j']),
('scroll up', ['k']),
('scroll right', ['l']),
('undo', ['u', '<Ctrl-Shift-T>']),
('scroll-perc 0', ['gg']),
('scroll-perc', ['G']),
('search-next', ['n']),
('search-prev', ['N']),
('enter-mode insert', ['i']),
('enter-mode caret', ['v']),
('yank', ['yy']),
('yank -s', ['yY']),
('yank -t', ['yt']),
('yank -ts', ['yT']),
('paste', ['pp']),
('paste -s', ['pP']),
('paste -t', ['Pp']),
('paste -ts', ['PP']),
('paste -w', ['wp']),
('paste -ws', ['wP']),
('quickmark-save', ['m']),
('set-cmd-text -s :quickmark-load', ['b']),
('set-cmd-text -s :quickmark-load -t', ['B']),
('set-cmd-text -s :quickmark-load -w', ['wb']),
('save', ['sf']),
('set-cmd-text -s :set', ['ss']),
('set-cmd-text -s :set -t', ['sl']),
('set-cmd-text -s :set keybind', ['sk']),
('zoom-out', ['-']),
('zoom-in', ['+']),
('zoom', ['=']),
('navigate prev', ['[[']),
('navigate next', [']]']),
('navigate prev -t', ['{{']),
('navigate next -t', ['}}']),
('navigate up', ['gu']),
('navigate up -t', ['gU']),
('navigate increment', ['<Ctrl-A>']),
('navigate decrement', ['<Ctrl-X>']),
('inspector', ['wi']),
('download', ['gd']),
('download-cancel', ['ad']),
('download-remove --all', ['cd']),
('view-source', ['gf']),
('tab-focus last', ['<Ctrl-Tab>']),
('enter-mode passthrough', ['<Ctrl-V>']),
('quit', ['<Ctrl-Q>']),
('scroll-page 0 1', ['<Ctrl-F>']),
('scroll-page 0 -1', ['<Ctrl-B>']),
('scroll-page 0 0.5', ['<Ctrl-D>']),
('scroll-page 0 -0.5', ['<Ctrl-U>']),
('tab-focus 1', ['<Alt-1>']),
('tab-focus 2', ['<Alt-2>']),
('tab-focus 3', ['<Alt-3>']),
('tab-focus 4', ['<Alt-4>']),
('tab-focus 5', ['<Alt-5>']),
('tab-focus 6', ['<Alt-6>']),
('tab-focus 7', ['<Alt-7>']),
('tab-focus 8', ['<Alt-8>']),
('tab-focus 9', ['<Alt-9>']),
('home', ['<Ctrl-h>']),
('stop', ['<Ctrl-s>']),
('print', ['<Ctrl-Alt-p>']),
('open qute:settings', ['Ss']),
])),
('insert', collections.OrderedDict([
('open-editor', ['<Ctrl-E>']),
])),
('hint', collections.OrderedDict([
('follow-hint', ['<Return>', '<Ctrl-M>', '<Ctrl-J>']),
('hint --rapid links tab-bg', ['<Ctrl-R>']),
('hint links', ['<Ctrl-F>']),
('hint all tab-bg', ['<Ctrl-B>']),
])),
('passthrough', {}),
('command', collections.OrderedDict([
('command-history-prev', ['<Ctrl-P>']),
('command-history-next', ['<Ctrl-N>']),
('completion-item-prev', ['<Shift-Tab>', '<Up>']),
('completion-item-next', ['<Tab>', '<Down>']),
('command-accept', ['<Return>', '<Ctrl-J>', '<Shift-Return>',
'<Ctrl-M>']),
])),
('prompt', collections.OrderedDict([
('prompt-accept', ['<Return>', '<Ctrl-J>', '<Shift-Return>',
'<Ctrl-M>']),
('prompt-yes', ['y']),
('prompt-no', ['n']),
])),
('command,prompt', collections.OrderedDict([
('rl-backward-char', ['<Ctrl-B>']),
('rl-forward-char', ['<Ctrl-F>']),
('rl-backward-word', ['<Alt-B>']),
('rl-forward-word', ['<Alt-F>']),
('rl-beginning-of-line', ['<Ctrl-A>']),
('rl-end-of-line', ['<Ctrl-E>']),
('rl-unix-line-discard', ['<Ctrl-U>']),
('rl-kill-line', ['<Ctrl-K>']),
('rl-kill-word', ['<Alt-D>']),
('rl-unix-word-rubout', ['<Ctrl-W>']),
('rl-yank', ['<Ctrl-Y>']),
('rl-delete-char', ['<Ctrl-?>']),
('rl-backward-delete-char', ['<Ctrl-H>']),
])),
('caret', collections.OrderedDict([
('toggle-selection', ['v', '<Space>']),
('drop-selection', ['<Ctrl-Space>']),
('enter-mode normal', ['c']),
('move-to-next-line', ['j']),
('move-to-prev-line', ['k']),
('move-to-next-char', ['l']),
('move-to-prev-char', ['h']),
('move-to-end-of-word', ['e']),
('move-to-next-word', ['w']),
('move-to-prev-word', ['b']),
('move-to-start-of-next-block', [']']),
('move-to-start-of-prev-block', ['[']),
('move-to-end-of-next-block', ['}']),
('move-to-end-of-prev-block', ['{']),
('move-to-start-of-line', ['0']),
('move-to-end-of-line', ['$']),
('move-to-start-of-document', ['gg']),
('move-to-end-of-document', ['G']),
('yank-selected -p', ['Y']),
('yank-selected', ['y', '<Return>', '<Ctrl-J>']),
('scroll left', ['H']),
('scroll down', ['J']),
('scroll up', ['K']),
('scroll right', ['L']),
])),
])
# A list of (regex, replacement) tuples of changed key commands.
CHANGED_KEY_COMMANDS = [
(re.compile(r'^open -([twb]) about:blank$'), r'open -\1'),
(re.compile(r'^download-page$'), r'download'),
(re.compile(r'^cancel-download$'), r'download-cancel'),
(re.compile(r'^search ""$'), r'search'),
(re.compile(r"^search ''$"), r'search'),
(re.compile(r"""^set-cmd-text ['"](.*) ['"]$"""), r'set-cmd-text -s \1'),
(re.compile(r"""^set-cmd-text ['"](.*)['"]$"""), r'set-cmd-text \1'),
(re.compile(r"^hint links rapid$"), r'hint --rapid links tab-bg'),
(re.compile(r"^hint links rapid-win$"), r'hint --rapid links window'),
(re.compile(r'^scroll -50 0$'), r'scroll left'),
(re.compile(r'^scroll 0 50$'), r'scroll down'),
(re.compile(r'^scroll 0 -50$'), r'scroll up'),
(re.compile(r'^scroll 50 0$'), r'scroll right'),
(re.compile(r'^scroll ([-\d]+ [-\d]+)$'), r'scroll-px \1'),
]
| 1 | 13,037 | Hmm, I wonder if binding `<Return>` is a good idea... this means it wouldn't be passed to webpages anymore, which makes me wonder how many people rely on that... I guess trying it is the only way to find out :wink: | qutebrowser-qutebrowser | py |
@@ -170,10 +170,18 @@ func udpPkt(src, dst string) packet {
func icmpPkt(src, dst string) packet {
return packetWithPorts(1, src+":0", dst+":0")
}
+func icmpPkt_with_type_code(src, dst string, icmpType, icmpCode int) packet {
+ return packet{
+ protocol: 1,
+ srcAddr: src,
+ srcPort: 0,
+ dstAddr: dst,
+ dstPort: (icmpCode << 8) | (icmpType),
+ }
+}
var polProgramTests = []polProgramTest{
// Tests of actions and flow control.
-
{
PolicyName: "no tiers",
DroppedPackets: []packet{ | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ut_test
import (
"encoding/binary"
"fmt"
"net"
"strconv"
"strings"
"testing"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/asm"
"github.com/projectcalico/felix/bpf/ipsets"
"github.com/projectcalico/felix/bpf/polprog"
"github.com/projectcalico/felix/bpf/state"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/proto"
)
func TestLoadAllowAllProgram(t *testing.T) {
RegisterTestingT(t)
b := asm.NewBlock()
b.MovImm32(asm.R0, -1)
b.Exit()
insns, err := b.Assemble()
Expect(err).NotTo(HaveOccurred())
fd, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred())
Expect(fd).NotTo(BeZero())
defer func() {
Expect(fd.Close()).NotTo(HaveOccurred())
}()
rc, err := bpf.RunBPFProgram(fd, make([]byte, 500), 1)
Expect(err).NotTo(HaveOccurred())
Expect(rc.RC).To(BeNumerically("==", -1))
}
func TestLoadProgramWithMapAcccess(t *testing.T) {
RegisterTestingT(t)
ipsMap := ipsets.Map(&bpf.MapContext{})
Expect(ipsMap.EnsureExists()).NotTo(HaveOccurred())
Expect(ipsMap.MapFD()).NotTo(BeZero())
b := asm.NewBlock()
b.MovImm64(asm.R1, 0)
b.StoreStack64(asm.R1, -8)
b.StoreStack64(asm.R1, -16)
b.StoreStack64(asm.R1, -24)
b.StoreStack64(asm.R1, -32)
b.Mov64(asm.R2, asm.R10)
b.AddImm64(asm.R2, -32)
b.LoadMapFD(asm.R1, uint32(ipsMap.MapFD()))
b.Call(asm.HelperMapLookupElem)
b.MovImm32(asm.R0, -1)
b.Exit()
insns, err := b.Assemble()
Expect(err).NotTo(HaveOccurred())
fd, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred())
Expect(fd).NotTo(BeZero())
defer func() {
Expect(fd.Close()).NotTo(HaveOccurred())
}()
rc, err := bpf.RunBPFProgram(fd, make([]byte, 500), 1)
Expect(err).NotTo(HaveOccurred())
Expect(rc.RC).To(BeNumerically("==", -1))
}
func TestLoadKitchenSinkPolicy(t *testing.T) {
RegisterTestingT(t)
alloc := idalloc.New()
allocID := func(id string) string {
alloc.GetOrAlloc(id)
return id
}
cleanIPSetMap()
pg := polprog.NewBuilder(alloc, ipsMap.MapFD(), stateMap.MapFD(), jumpMap.MapFD())
insns, err := pg.Instructions([][][]*proto.Rule{{{{
Action: "Allow",
IpVersion: 4,
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Number{Number: 6}},
SrcNet: []string{"10.0.0.0/8"},
SrcPorts: []*proto.PortRange{{First: 80, Last: 81}, {First: 8080, Last: 8081}},
SrcNamedPortIpSetIds: []string{allocID("n:abcdef1234567890")},
DstNet: []string{"11.0.0.0/8"},
DstPorts: []*proto.PortRange{{First: 3000, Last: 3001}},
DstNamedPortIpSetIds: []string{allocID("n:foo1234567890")},
Icmp: nil,
SrcIpSetIds: []string{allocID("s:sbcdef1234567890")},
DstIpSetIds: []string{allocID("s:dbcdef1234567890")},
NotProtocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "UDP"}},
NotSrcNet: []string{"12.0.0.0/8"},
NotSrcPorts: []*proto.PortRange{{First: 5000, Last: 5000}},
NotDstNet: []string{"13.0.0.0/8"},
NotDstPorts: []*proto.PortRange{{First: 4000, Last: 4000}},
NotIcmp: nil,
NotSrcIpSetIds: []string{allocID("s:abcdef1234567890")},
NotDstIpSetIds: []string{allocID("s:abcdef123456789l")},
NotSrcNamedPortIpSetIds: []string{allocID("n:0bcdef1234567890")},
NotDstNamedPortIpSetIds: []string{allocID("n:0bcdef1234567890")},
}}}})
Expect(err).NotTo(HaveOccurred())
fd, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred())
Expect(fd).NotTo(BeZero())
Expect(fd.Close()).NotTo(HaveOccurred())
}
const (
RCDrop = 2
RCEpilogueReached = 123
)
func packetWithPorts(proto int, src, dst string) packet {
parts := strings.Split(src, ":")
srcAddr := parts[0]
srcPort, err := strconv.Atoi(parts[1])
if err != nil {
panic(err)
}
parts = strings.Split(dst, ":")
dstAddr := parts[0]
dstPort, err := strconv.Atoi(parts[1])
if err != nil {
panic(err)
}
return packet{
protocol: proto,
srcAddr: srcAddr,
srcPort: srcPort,
dstAddr: dstAddr,
dstPort: dstPort,
}
}
func tcpPkt(src, dst string) packet {
return packetWithPorts(6, src, dst)
}
func udpPkt(src, dst string) packet {
return packetWithPorts(17, src, dst)
}
func icmpPkt(src, dst string) packet {
return packetWithPorts(1, src+":0", dst+":0")
}
var polProgramTests = []polProgramTest{
// Tests of actions and flow control.
{
PolicyName: "no tiers",
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "unreachable tier",
Policy: [][][]*proto.Rule{
{},
{{{
Action: "Allow",
}}},
},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "pass to nowhere",
Policy: [][][]*proto.Rule{
{{{
Action: "Pass",
}}},
},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "pass to allow",
Policy: [][][]*proto.Rule{
{
{
{Action: "Pass"},
{Action: "Deny"},
},
},
{
{
{Action: "Allow"},
},
},
},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "pass to deny",
Policy: [][][]*proto.Rule{
{
{
{Action: "Pass"},
{Action: "Allow"},
},
},
{
{
{Action: "Deny"},
},
},
},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "explicit allow",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "explicit deny",
Policy: [][][]*proto.Rule{{{{
Action: "Deny",
}}}},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
// Protocol match tests.
{
PolicyName: "allow tcp",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow !tcp",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotProtocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow udp",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "udp"}},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
// CIDR tests.
{
PolicyName: "allow 10.0.0.1/32",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNet: []string{"10.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from 10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow from CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from !CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotSrcNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
},
{
PolicyName: "allow to CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
},
{
PolicyName: "allow to !CIDRs",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotDstNet: []string{"102.0.0.0/8", "10.0.0.1/32", "11.0.0.1/32"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from !10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotSrcNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
icmpPkt("11.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow to 10.0.0.1/32",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNet: []string{"10.0.0.1/32"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
},
{
PolicyName: "allow to 10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
icmpPkt("11.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
},
{
PolicyName: "allow to !10.0.0.0/8",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotDstNet: []string{"10.0.0.0/8"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
// Port tests.
{
PolicyName: "allow from tcp:80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow from tcp:80-81",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcPorts: []*proto.PortRange{{
First: 80,
Last: 81,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
tcpPkt("10.0.0.2:81", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:79", "10.0.0.1:31245"),
tcpPkt("10.0.0.2:82", "10.0.0.1:31245"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow from tcp:0-80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcPorts: []*proto.PortRange{{
First: 0,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:0", "10.0.0.1:31245"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:81", "10.0.0.1:31245")},
},
{
PolicyName: "allow to tcp:80-65535",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{{
First: 80,
Last: 65535,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:65535")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:79")},
},
{
PolicyName: "allow to tcp:ranges",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{
{First: 80, Last: 81},
{First: 90, Last: 90},
},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:81"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:90")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:79"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:82"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:89"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:91"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
},
{
PolicyName: "allow to tcp:!ranges",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotDstPorts: []*proto.PortRange{
{First: 80, Last: 81},
{First: 90, Last: 90},
},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:79"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:82"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:89"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:91")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:81"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:90"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
},
{
PolicyName: "allow from tcp:!80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotSrcPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
PolicyName: "allow to tcp:80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
{
// BPF immediate values are signed, check that we don't get tripped up by a sign extension.
PolicyName: "allow to tcp:65535",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
DstPorts: []*proto.PortRange{{
First: 65535,
Last: 65535,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:65535")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
},
{
PolicyName: "allow to tcp:!80",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotDstPorts: []*proto.PortRange{{
First: 80,
Last: 80,
}},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
},
// IP set tests.
{
PolicyName: "allow from empty IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
SrcIpSetIds: []string{"setA"},
}}}},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
IPSets: map[string][]string{
"setA": {},
},
},
{
PolicyName: "allow from !empty IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
Protocol: &proto.Protocol{NumberOrName: &proto.Protocol_Name{Name: "tcp"}},
NotSrcIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
IPSets: map[string][]string{
"setA": {},
},
},
{
PolicyName: "allow from IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080")},
IPSets: map[string][]string{
"setA": {"10.0.0.0/8"},
},
},
{
PolicyName: "allow to IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
IPSets: map[string][]string{
"setA": {"11.0.0.0/8", "123.0.0.1/32"},
},
},
{
PolicyName: "allow from !IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotSrcIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
udpPkt("10.0.0.2:80", "10.0.0.1:31245"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80"),
icmpPkt("10.0.0.1", "10.0.0.2")},
IPSets: map[string][]string{
"setA": {"10.0.0.0/8"},
},
},
{
PolicyName: "allow to !IP set",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
NotDstIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"),
udpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024")},
IPSets: map[string][]string{
"setA": {"11.0.0.0/8", "123.0.0.1/32"},
},
},
{
PolicyName: "allow to named port",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNamedPortIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"), // Wrong port
udpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Wrong proto
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Src/dest confusion
tcpPkt("10.0.0.2:31245", "10.0.0.1:80"), // Wrong dest
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80", "123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow to named ports",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
DstNamedPortIpSetIds: []string{"setA", "setB"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"), // Wrong port
udpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Wrong proto
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Src/dest confusion
tcpPkt("10.0.0.2:31245", "10.0.0.1:80"), // Wrong dest
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80"},
"setB": {"123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow to mixed ports",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
// Should match either port or named port
DstPorts: []*proto.PortRange{
{First: 81, Last: 82},
{First: 90, Last: 90},
},
DstNamedPortIpSetIds: []string{"setA", "setB"},
}}}},
AllowedPackets: []packet{
udpPkt("10.0.0.2:12345", "123.0.0.1:1024"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:90"),
tcpPkt("10.0.0.1:31245", "10.0.0.2:82")},
DroppedPackets: []packet{
tcpPkt("11.0.0.1:12345", "10.0.0.2:8080"), // Wrong port
udpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Wrong proto
tcpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Src/dest confusion
tcpPkt("10.0.0.2:31245", "10.0.0.1:80"), // Wrong dest
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80"},
"setB": {"123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow from named port",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNamedPortIpSetIds: []string{"setA"},
}}}},
AllowedPackets: []packet{
udpPkt("123.0.0.1:1024", "10.0.0.2:12345"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:8080", "11.0.0.1:12345"), // Wrong port
udpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Wrong proto
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Src/dest confusion
tcpPkt("10.0.0.1:80", "10.0.0.2:31245"), // Wrong src
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80", "123.0.0.1/32,udp:1024"},
},
},
{
PolicyName: "allow from named ports",
Policy: [][][]*proto.Rule{{{{
Action: "Allow",
SrcNamedPortIpSetIds: []string{"setA", "setB"},
}}}},
AllowedPackets: []packet{
udpPkt("123.0.0.1:1024", "10.0.0.2:12345"),
tcpPkt("10.0.0.2:80", "10.0.0.1:31245")},
DroppedPackets: []packet{
tcpPkt("10.0.0.2:8080", "11.0.0.1:12345"), // Wrong port
udpPkt("10.0.0.2:80", "10.0.0.1:31245"), // Wrong proto
tcpPkt("10.0.0.1:31245", "10.0.0.2:80"), // Src/dest confusion
tcpPkt("10.0.0.1:80", "10.0.0.2:31245"), // Wrong src
},
IPSets: map[string][]string{
"setA": {"10.0.0.2/32,tcp:80"},
"setB": {"123.0.0.1/32,udp:1024"},
},
},
// TODO ICMP
}
func TestPolicyPrograms(t *testing.T) {
for i, p := range polProgramTests {
t.Run(fmt.Sprintf("%d:Policy=%s", i, p.PolicyName), p.Run)
}
}
type polProgramTest struct {
PolicyName string
Policy [][][]*proto.Rule
AllowedPackets []packet
DroppedPackets []packet
IPSets map[string][]string
}
type packet struct {
protocol int
srcAddr string
srcPort int
dstAddr string
dstPort int
}
func (p packet) String() string {
protoName := fmt.Sprint(p.protocol)
switch p.protocol {
case 6:
protoName = "tcp"
case 17:
protoName = "udp"
case 1:
protoName = "icmp"
}
return fmt.Sprintf("%s-%s:%d->%s:%d", protoName, p.srcAddr, p.srcPort, p.dstAddr, p.dstPort)
}
func (p packet) ToState() state.State {
return state.State{
IPProto: uint8(p.protocol),
SrcAddr: ipUintFromString(p.srcAddr),
PostNATDstAddr: ipUintFromString(p.dstAddr),
SrcPort: uint16(p.srcPort),
PostNATDstPort: uint16(p.dstPort),
}
}
func ipUintFromString(addrStr string) uint32 {
if addrStr == "" {
return 0
}
addr := net.ParseIP(addrStr)
return binary.LittleEndian.Uint32(addr.To4())
}
func TestIPUintFromString(t *testing.T) {
RegisterTestingT(t)
Expect(ipUintFromString("10.0.0.1")).To(Equal(uint32(0x0100000a)))
}
func (p *polProgramTest) Run(t *testing.T) {
RegisterTestingT(t)
// The prog builder refuses to allocate IDs as a precaution, give it an allocator that forces allocations.
realAlloc := idalloc.New()
forceAlloc := &forceAllocator{alloc: realAlloc}
// MAke sure the maps are available.
cleanIPSetMap()
// FIXME should clean up the maps at the end of each test but recreating the maps seems to be racy
p.setUpIPSets(realAlloc, ipsMap)
// Build the program.
pg := polprog.NewBuilder(forceAlloc, ipsMap.MapFD(), testStateMap.MapFD(), jumpMap.MapFD())
insns, err := pg.Instructions(p.Policy)
Expect(err).NotTo(HaveOccurred(), "failed to assemble program")
// Load the program into the kernel. We don't pin it so it'll be removed when the
// test process exits (or by the defer).
polProgFD, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred(), "failed to load program into the kernel")
Expect(polProgFD).NotTo(BeZero())
defer func() {
err := polProgFD.Close()
Expect(err).NotTo(HaveOccurred())
}()
// Give the policy program somewhere to jump to.
epiFD := p.installEpilogueProgram(jumpMap)
defer func() {
err := epiFD.Close()
Expect(err).NotTo(HaveOccurred())
}()
log.Debug("Setting up state map")
for _, pkt := range p.AllowedPackets {
pkt := pkt
t.Run(fmt.Sprintf("should allow %v", pkt), func(t *testing.T) {
RegisterTestingT(t)
p.runProgram(pkt.ToState(), testStateMap, polProgFD, RCEpilogueReached, polprog.PolRCAllow)
})
}
for _, pkt := range p.DroppedPackets {
pkt := pkt
t.Run(fmt.Sprintf("should drop %v", pkt), func(t *testing.T) {
RegisterTestingT(t)
p.runProgram(pkt.ToState(), testStateMap, polProgFD, RCDrop, polprog.PolRCNoMatch)
})
}
}
// installEpilogueProgram installs a trivial BPF program into the jump table that returns RCEpilogueReached.
func (p *polProgramTest) installEpilogueProgram(jumpMap bpf.Map) bpf.ProgFD {
b := asm.NewBlock()
// Load the RC into the return register.
b.MovImm64(asm.R0, RCEpilogueReached)
// Exit!
b.Exit()
epiInsns, err := b.Assemble()
Expect(err).NotTo(HaveOccurred())
epiFD, err := bpf.LoadBPFProgramFromInsns(epiInsns, "Apache-2.0")
Expect(err).NotTo(HaveOccurred(), "failed to load program into the kernel")
Expect(epiFD).NotTo(BeZero())
jumpValue := make([]byte, 4)
binary.LittleEndian.PutUint32(jumpValue, uint32(epiFD))
err = jumpMap.Update([]byte{1, 0, 0, 0}, jumpValue)
Expect(err).NotTo(HaveOccurred())
return epiFD
}
func (p *polProgramTest) runProgram(stateIn state.State, stateMap bpf.Map, progFD bpf.ProgFD, expProgRC int, expPolRC int) {
// The policy program takes its input from the state map (rather than looking at the
// packet). Set up the state map.
stateMapKey := []byte{0, 0, 0, 0} // State map has a single key
stateBytesIn := stateIn.AsBytes()
log.WithField("stateBytes", stateBytesIn).Debug("State bytes in")
log.Debugf("State in %#v", stateIn)
err := stateMap.Update(stateMapKey, stateBytesIn)
Expect(err).NotTo(HaveOccurred(), "failed to update state map")
log.Debug("Running BPF program")
result, err := bpf.RunBPFProgram(progFD, make([]byte, 1000), 1)
Expect(err).NotTo(HaveOccurred())
log.Debug("Checking result...")
stateBytesOut, err := stateMap.Get(stateMapKey)
Expect(err).NotTo(HaveOccurred())
log.WithField("stateBytes", stateBytesOut).Debug("State bytes out")
stateOut := state.StateFromBytes(stateBytesOut)
log.Debugf("State out %#v", stateOut)
Expect(stateOut.PolicyRC).To(BeNumerically("==", expPolRC), "policy RC was incorrect")
Expect(result.RC).To(BeNumerically("==", expProgRC), "program RC was incorrect")
// Check no other fields got clobbered.
expectedStateOut := stateIn
expectedStateOut.PolicyRC = int32(expPolRC)
Expect(stateOut).To(Equal(expectedStateOut), "policy program modified unexpected parts of the state")
}
func (p *polProgramTest) setUpIPSets(alloc *idalloc.IDAllocator, ipsMap bpf.Map) {
for name, members := range p.IPSets {
id := alloc.GetOrAlloc(name)
for _, m := range members {
entry := ipsets.ProtoIPSetMemberToBPFEntry(id, m)
err := ipsMap.Update(entry[:], ipsets.DummyValue)
Expect(err).NotTo(HaveOccurred())
}
}
}
func cleanIPSetMap() {
// Clean out any existing IP sets. (The other maps have a fixed number of keys that
// we set as needed.)
var keys [][]byte
err := ipsMap.Iter(func(k, v []byte) {
keys = append(keys, k)
})
Expect(err).NotTo(HaveOccurred(), "failed to clean out map before test")
for _, k := range keys {
err = ipsMap.Delete(k)
Expect(err).NotTo(HaveOccurred(), "failed to clean out map before test")
}
}
| 1 | 17,658 | Golang naming convention is to use camel case `icmpPktWithTypeCode` Often the linter will complain | projectcalico-felix | c |
@@ -300,6 +300,12 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http
protected FrameResponseHeaders FrameResponseHeaders { get; } = new FrameResponseHeaders();
+ public TimeSpan RequestBodyTimeout { get; set; }
+
+ public double RequestBodyMinimumDataRate { get; set; }
+
+ public TimeSpan RequestBodyMinimumDataRateGracePeriod { get; set; }
+
public void InitializeStreams(MessageBody messageBody)
{
if (_frameStreams == null) | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Net;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;
using Microsoft.AspNetCore.Server.Kestrel.Internal.System;
using Microsoft.AspNetCore.Server.Kestrel.Internal.System.IO.Pipelines;
using Microsoft.AspNetCore.Server.Kestrel.Internal.System.Text.Encodings.Web.Utf8;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions;
using Microsoft.Extensions.Internal;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Primitives;
using Microsoft.AspNetCore.Http.Features;
// ReSharper disable AccessToModifiedClosure
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http
{
public abstract partial class Frame : IFrameControl
{
private const byte ByteAsterisk = (byte)'*';
private const byte ByteForwardSlash = (byte)'/';
private const byte BytePercentage = (byte)'%';
private static readonly ArraySegment<byte> _endChunkedResponseBytes = CreateAsciiByteArraySegment("0\r\n\r\n");
private static readonly ArraySegment<byte> _continueBytes = CreateAsciiByteArraySegment("HTTP/1.1 100 Continue\r\n\r\n");
private static readonly Action<WritableBuffer, FrameAdapter> _writeHeaders = WriteResponseHeaders;
private static readonly byte[] _bytesConnectionClose = Encoding.ASCII.GetBytes("\r\nConnection: close");
private static readonly byte[] _bytesConnectionKeepAlive = Encoding.ASCII.GetBytes("\r\nConnection: keep-alive");
private static readonly byte[] _bytesTransferEncodingChunked = Encoding.ASCII.GetBytes("\r\nTransfer-Encoding: chunked");
private static readonly byte[] _bytesHttpVersion11 = Encoding.ASCII.GetBytes("HTTP/1.1 ");
private static readonly byte[] _bytesEndHeaders = Encoding.ASCII.GetBytes("\r\n\r\n");
private static readonly byte[] _bytesServer = Encoding.ASCII.GetBytes("\r\nServer: " + Constants.ServerName);
private const string EmptyPath = "/";
private const string Asterisk = "*";
private readonly object _onStartingSync = new Object();
private readonly object _onCompletedSync = new Object();
private Streams _frameStreams;
protected Stack<KeyValuePair<Func<object, Task>, object>> _onStarting;
protected Stack<KeyValuePair<Func<object, Task>, object>> _onCompleted;
protected volatile bool _requestProcessingStopping; // volatile, see: https://msdn.microsoft.com/en-us/library/x13ttww7.aspx
protected int _requestAborted;
private CancellationTokenSource _abortedCts;
private CancellationToken? _manuallySetRequestAbortToken;
protected RequestProcessingStatus _requestProcessingStatus;
protected bool _keepAlive;
protected bool _upgradeAvailable;
private volatile bool _wasUpgraded;
private bool _canHaveBody;
private bool _autoChunk;
protected Exception _applicationException;
private BadHttpRequestException _requestRejectedException;
protected HttpVersion _httpVersion;
private string _requestId;
private int _remainingRequestHeadersBytesAllowed;
private int _requestHeadersParsed;
private uint _requestCount;
protected readonly long _keepAliveTicks;
private readonly long _requestHeadersTimeoutTicks;
protected long _responseBytesWritten;
private readonly FrameContext _frameContext;
private readonly IHttpParser<FrameAdapter> _parser;
private HttpRequestTarget _requestTargetForm = HttpRequestTarget.Unknown;
private Uri _absoluteRequestTarget;
public Frame(FrameContext frameContext)
{
_frameContext = frameContext;
ServerOptions = ServiceContext.ServerOptions;
_parser = ServiceContext.HttpParserFactory(new FrameAdapter(this));
FrameControl = this;
_keepAliveTicks = ServerOptions.Limits.KeepAliveTimeout.Ticks;
_requestHeadersTimeoutTicks = ServerOptions.Limits.RequestHeadersTimeout.Ticks;
Output = new OutputProducer(frameContext.Output, frameContext.ConnectionId, frameContext.ServiceContext.Log);
RequestBodyPipe = CreateRequestBodyPipe();
}
public IPipe RequestBodyPipe { get; }
public ServiceContext ServiceContext => _frameContext.ServiceContext;
public IConnectionInformation ConnectionInformation => _frameContext.ConnectionInformation;
public IFeatureCollection ConnectionFeatures { get; set; }
public IPipeReader Input => _frameContext.Input;
public OutputProducer Output { get; }
public ITimeoutControl TimeoutControl => _frameContext.TimeoutControl;
protected IKestrelTrace Log => ServiceContext.Log;
private DateHeaderValueManager DateHeaderValueManager => ServiceContext.DateHeaderValueManager;
// Hold direct reference to ServerOptions since this is used very often in the request processing path
private KestrelServerOptions ServerOptions { get; }
private IPEndPoint LocalEndPoint => ConnectionInformation.LocalEndPoint;
private IPEndPoint RemoteEndPoint => ConnectionInformation.RemoteEndPoint;
protected string ConnectionId => _frameContext.ConnectionId;
public string ConnectionIdFeature { get; set; }
public bool HasStartedConsumingRequestBody { get; set; }
public long? MaxRequestBodySize { get; set; }
/// <summary>
/// The request id. <seealso cref="HttpContext.TraceIdentifier"/>
/// </summary>
public string TraceIdentifier
{
set => _requestId = value;
get
{
// don't generate an ID until it is requested
if (_requestId == null)
{
_requestId = StringUtilities.ConcatAsHexSuffix(ConnectionId, ':', _requestCount);
}
return _requestId;
}
}
public bool WasUpgraded => _wasUpgraded;
public IPAddress RemoteIpAddress { get; set; }
public int RemotePort { get; set; }
public IPAddress LocalIpAddress { get; set; }
public int LocalPort { get; set; }
public string Scheme { get; set; }
public string Method { get; set; }
public string PathBase { get; set; }
public string Path { get; set; }
public string QueryString { get; set; }
public string RawTarget { get; set; }
public string HttpVersion
{
get
{
if (_httpVersion == Http.HttpVersion.Http11)
{
return "HTTP/1.1";
}
if (_httpVersion == Http.HttpVersion.Http10)
{
return "HTTP/1.0";
}
return string.Empty;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
set
{
// GetKnownVersion returns versions which ReferenceEquals interned string
// As most common path, check for this only in fast-path and inline
if (ReferenceEquals(value, "HTTP/1.1"))
{
_httpVersion = Http.HttpVersion.Http11;
}
else if (ReferenceEquals(value, "HTTP/1.0"))
{
_httpVersion = Http.HttpVersion.Http10;
}
else
{
HttpVersionSetSlow(value);
}
}
}
[MethodImpl(MethodImplOptions.NoInlining)]
private void HttpVersionSetSlow(string value)
{
if (value == "HTTP/1.1")
{
_httpVersion = Http.HttpVersion.Http11;
}
else if (value == "HTTP/1.0")
{
_httpVersion = Http.HttpVersion.Http10;
}
else
{
_httpVersion = Http.HttpVersion.Unknown;
}
}
public IHeaderDictionary RequestHeaders { get; set; }
public Stream RequestBody { get; set; }
private int _statusCode;
public int StatusCode
{
get => _statusCode;
set
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(StatusCode));
}
_statusCode = value;
}
}
private string _reasonPhrase;
public string ReasonPhrase
{
get => _reasonPhrase;
set
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(ReasonPhrase));
}
_reasonPhrase = value;
}
}
public IHeaderDictionary ResponseHeaders { get; set; }
public Stream ResponseBody { get; set; }
public CancellationToken RequestAborted
{
get
{
// If a request abort token was previously explicitly set, return it.
if (_manuallySetRequestAbortToken.HasValue)
{
return _manuallySetRequestAbortToken.Value;
}
// Otherwise, get the abort CTS. If we have one, which would mean that someone previously
// asked for the RequestAborted token, simply return its token. If we don't,
// check to see whether we've already aborted, in which case just return an
// already canceled token. Finally, force a source into existence if we still
// don't have one, and return its token.
var cts = _abortedCts;
return
cts != null ? cts.Token :
(Volatile.Read(ref _requestAborted) == 1) ? new CancellationToken(true) :
RequestAbortedSource.Token;
}
set
{
// Set an abort token, overriding one we create internally. This setter and associated
// field exist purely to support IHttpRequestLifetimeFeature.set_RequestAborted.
_manuallySetRequestAbortToken = value;
}
}
private CancellationTokenSource RequestAbortedSource
{
get
{
// Get the abort token, lazily-initializing it if necessary.
// Make sure it's canceled if an abort request already came in.
// EnsureInitialized can return null since _abortedCts is reset to null
// after it's already been initialized to a non-null value.
// If EnsureInitialized does return null, this property was accessed between
// requests so it's safe to return an ephemeral CancellationTokenSource.
var cts = LazyInitializer.EnsureInitialized(ref _abortedCts, () => new CancellationTokenSource())
?? new CancellationTokenSource();
if (Volatile.Read(ref _requestAborted) == 1)
{
cts.Cancel();
}
return cts;
}
}
public bool HasResponseStarted => _requestProcessingStatus == RequestProcessingStatus.ResponseStarted;
protected FrameRequestHeaders FrameRequestHeaders { get; } = new FrameRequestHeaders();
protected FrameResponseHeaders FrameResponseHeaders { get; } = new FrameResponseHeaders();
public void InitializeStreams(MessageBody messageBody)
{
if (_frameStreams == null)
{
_frameStreams = new Streams(this);
}
(RequestBody, ResponseBody) = _frameStreams.Start(messageBody);
}
public void PauseStreams() => _frameStreams.Pause();
public void StopStreams() => _frameStreams.Stop();
public void Reset()
{
_onStarting = null;
_onCompleted = null;
_requestProcessingStatus = RequestProcessingStatus.RequestPending;
_keepAlive = false;
_autoChunk = false;
_applicationException = null;
ResetFeatureCollection();
HasStartedConsumingRequestBody = false;
MaxRequestBodySize = ServerOptions.Limits.MaxRequestBodySize;
TraceIdentifier = null;
Scheme = null;
Method = null;
PathBase = null;
Path = null;
RawTarget = null;
_requestTargetForm = HttpRequestTarget.Unknown;
_absoluteRequestTarget = null;
QueryString = null;
_httpVersion = Http.HttpVersion.Unknown;
StatusCode = StatusCodes.Status200OK;
ReasonPhrase = null;
RemoteIpAddress = RemoteEndPoint?.Address;
RemotePort = RemoteEndPoint?.Port ?? 0;
LocalIpAddress = LocalEndPoint?.Address;
LocalPort = LocalEndPoint?.Port ?? 0;
ConnectionIdFeature = ConnectionId;
FrameRequestHeaders.Reset();
FrameResponseHeaders.Reset();
RequestHeaders = FrameRequestHeaders;
ResponseHeaders = FrameResponseHeaders;
if (ConnectionFeatures != null)
{
foreach (var feature in ConnectionFeatures)
{
// Set the scheme to https if there's an ITlsConnectionFeature
if (feature.Key == typeof(ITlsConnectionFeature))
{
Scheme = "https";
}
FastFeatureSet(feature.Key, feature.Value);
}
}
_manuallySetRequestAbortToken = null;
_abortedCts = null;
// Allow two bytes for \r\n after headers
_remainingRequestHeadersBytesAllowed = ServerOptions.Limits.MaxRequestHeadersTotalSize + 2;
_requestHeadersParsed = 0;
_responseBytesWritten = 0;
_requestCount++;
}
/// <summary>
/// Stops the request processing loop between requests.
/// Called on all active connections when the server wants to initiate a shutdown
/// and after a keep-alive timeout.
/// </summary>
public void Stop()
{
_requestProcessingStopping = true;
Input.CancelPendingRead();
}
private void CancelRequestAbortedToken()
{
try
{
RequestAbortedSource.Cancel();
_abortedCts = null;
}
catch (Exception ex)
{
Log.ApplicationError(ConnectionId, TraceIdentifier, ex);
}
}
/// <summary>
/// Immediate kill the connection and poison the request and response streams.
/// </summary>
public void Abort(Exception error)
{
if (Interlocked.Exchange(ref _requestAborted, 1) == 0)
{
_requestProcessingStopping = true;
_frameStreams?.Abort(error);
Output.Abort();
// Potentially calling user code. CancelRequestAbortedToken logs any exceptions.
ServiceContext.ThreadPool.UnsafeRun(state => ((Frame)state).CancelRequestAbortedToken(), this);
}
}
/// <summary>
/// Primary loop which consumes socket input, parses it for protocol framing, and invokes the
/// application delegate for as long as the socket is intended to remain open.
/// The resulting Task from this loop is preserved in a field which is used when the server needs
/// to drain and close all currently active connections.
/// </summary>
public abstract Task ProcessRequestsAsync();
public void OnStarting(Func<object, Task> callback, object state)
{
lock (_onStartingSync)
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(OnStarting));
}
if (_onStarting == null)
{
_onStarting = new Stack<KeyValuePair<Func<object, Task>, object>>();
}
_onStarting.Push(new KeyValuePair<Func<object, Task>, object>(callback, state));
}
}
public void OnCompleted(Func<object, Task> callback, object state)
{
lock (_onCompletedSync)
{
if (_onCompleted == null)
{
_onCompleted = new Stack<KeyValuePair<Func<object, Task>, object>>();
}
_onCompleted.Push(new KeyValuePair<Func<object, Task>, object>(callback, state));
}
}
protected async Task FireOnStarting()
{
Stack<KeyValuePair<Func<object, Task>, object>> onStarting = null;
lock (_onStartingSync)
{
onStarting = _onStarting;
_onStarting = null;
}
if (onStarting != null)
{
try
{
foreach (var entry in onStarting)
{
await entry.Key.Invoke(entry.Value);
}
}
catch (Exception ex)
{
ReportApplicationError(ex);
}
}
}
protected async Task FireOnCompleted()
{
Stack<KeyValuePair<Func<object, Task>, object>> onCompleted = null;
lock (_onCompletedSync)
{
onCompleted = _onCompleted;
_onCompleted = null;
}
if (onCompleted != null)
{
foreach (var entry in onCompleted)
{
try
{
await entry.Key.Invoke(entry.Value);
}
catch (Exception ex)
{
ReportApplicationError(ex);
}
}
}
}
public async Task FlushAsync(CancellationToken cancellationToken = default(CancellationToken))
{
await InitializeResponse(0);
await Output.FlushAsync(cancellationToken);
}
public Task WriteAsync(ArraySegment<byte> data, CancellationToken cancellationToken = default(CancellationToken))
{
if (!HasResponseStarted)
{
return WriteAsyncAwaited(data, cancellationToken);
}
VerifyAndUpdateWrite(data.Count);
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
return TaskCache.CompletedTask;
}
return WriteChunkedAsync(data, cancellationToken);
}
else
{
CheckLastWrite();
return Output.WriteAsync(data, cancellationToken: cancellationToken);
}
}
else
{
HandleNonBodyResponseWrite();
return TaskCache.CompletedTask;
}
}
public async Task WriteAsyncAwaited(ArraySegment<byte> data, CancellationToken cancellationToken)
{
await InitializeResponseAwaited(data.Count);
// WriteAsyncAwaited is only called for the first write to the body.
// Ensure headers are flushed if Write(Chunked)Async isn't called.
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
await FlushAsync(cancellationToken);
return;
}
await WriteChunkedAsync(data, cancellationToken);
}
else
{
CheckLastWrite();
await Output.WriteAsync(data, cancellationToken: cancellationToken);
}
}
else
{
HandleNonBodyResponseWrite();
await FlushAsync(cancellationToken);
}
}
private void VerifyAndUpdateWrite(int count)
{
var responseHeaders = FrameResponseHeaders;
if (responseHeaders != null &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten + count > responseHeaders.ContentLength.Value)
{
_keepAlive = false;
throw new InvalidOperationException(
CoreStrings.FormatTooManyBytesWritten(_responseBytesWritten + count, responseHeaders.ContentLength.Value));
}
_responseBytesWritten += count;
}
private void CheckLastWrite()
{
var responseHeaders = FrameResponseHeaders;
// Prevent firing request aborted token if this is the last write, to avoid
// aborting the request if the app is still running when the client receives
// the final bytes of the response and gracefully closes the connection.
//
// Called after VerifyAndUpdateWrite(), so _responseBytesWritten has already been updated.
if (responseHeaders != null &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten == responseHeaders.ContentLength.Value)
{
_abortedCts = null;
}
}
protected void VerifyResponseContentLength()
{
var responseHeaders = FrameResponseHeaders;
if (!HttpMethods.IsHead(Method) &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten < responseHeaders.ContentLength.Value)
{
// We need to close the connection if any bytes were written since the client
// cannot be certain of how many bytes it will receive.
if (_responseBytesWritten > 0)
{
_keepAlive = false;
}
ReportApplicationError(new InvalidOperationException(
CoreStrings.FormatTooFewBytesWritten(_responseBytesWritten, responseHeaders.ContentLength.Value)));
}
}
private Task WriteChunkedAsync(ArraySegment<byte> data, CancellationToken cancellationToken)
{
return Output.WriteAsync(data, chunk: true, cancellationToken: cancellationToken);
}
private Task WriteChunkedResponseSuffix()
{
return Output.WriteAsync(_endChunkedResponseBytes);
}
private static ArraySegment<byte> CreateAsciiByteArraySegment(string text)
{
var bytes = Encoding.ASCII.GetBytes(text);
return new ArraySegment<byte>(bytes);
}
public void ProduceContinue()
{
if (HasResponseStarted)
{
return;
}
if (_httpVersion == Http.HttpVersion.Http11 &&
RequestHeaders.TryGetValue("Expect", out var expect) &&
(expect.FirstOrDefault() ?? "").Equals("100-continue", StringComparison.OrdinalIgnoreCase))
{
Output.WriteAsync(_continueBytes).GetAwaiter().GetResult();
}
}
public Task InitializeResponse(int firstWriteByteCount)
{
if (HasResponseStarted)
{
return TaskCache.CompletedTask;
}
if (_onStarting != null)
{
return InitializeResponseAwaited(firstWriteByteCount);
}
if (_applicationException != null)
{
ThrowResponseAbortedException();
}
VerifyAndUpdateWrite(firstWriteByteCount);
ProduceStart(appCompleted: false);
return TaskCache.CompletedTask;
}
private async Task InitializeResponseAwaited(int firstWriteByteCount)
{
await FireOnStarting();
if (_applicationException != null)
{
ThrowResponseAbortedException();
}
VerifyAndUpdateWrite(firstWriteByteCount);
ProduceStart(appCompleted: false);
}
private void ProduceStart(bool appCompleted)
{
if (HasResponseStarted)
{
return;
}
_requestProcessingStatus = RequestProcessingStatus.ResponseStarted;
CreateResponseHeader(appCompleted);
}
protected Task TryProduceInvalidRequestResponse()
{
if (_requestRejectedException != null)
{
return ProduceEnd();
}
return TaskCache.CompletedTask;
}
protected Task ProduceEnd()
{
if (_requestRejectedException != null || _applicationException != null)
{
if (HasResponseStarted)
{
// We can no longer change the response, so we simply close the connection.
_requestProcessingStopping = true;
return TaskCache.CompletedTask;
}
// If the request was rejected, the error state has already been set by SetBadRequestState and
// that should take precedence.
if (_requestRejectedException != null)
{
SetErrorResponseException(_requestRejectedException);
}
else
{
// 500 Internal Server Error
SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError);
}
}
if (!HasResponseStarted)
{
return ProduceEndAwaited();
}
return WriteSuffix();
}
private async Task ProduceEndAwaited()
{
ProduceStart(appCompleted: true);
// Force flush
await Output.FlushAsync();
await WriteSuffix();
}
private Task WriteSuffix()
{
// _autoChunk should be checked after we are sure ProduceStart() has been called
// since ProduceStart() may set _autoChunk to true.
if (_autoChunk)
{
return WriteAutoChunkSuffixAwaited();
}
if (_keepAlive)
{
Log.ConnectionKeepAlive(ConnectionId);
}
if (HttpMethods.IsHead(Method) && _responseBytesWritten > 0)
{
Log.ConnectionHeadResponseBodyWrite(ConnectionId, _responseBytesWritten);
}
return TaskCache.CompletedTask;
}
private async Task WriteAutoChunkSuffixAwaited()
{
// For the same reason we call CheckLastWrite() in Content-Length responses.
_abortedCts = null;
await WriteChunkedResponseSuffix();
if (_keepAlive)
{
Log.ConnectionKeepAlive(ConnectionId);
}
}
private void CreateResponseHeader(bool appCompleted)
{
var responseHeaders = FrameResponseHeaders;
var hasConnection = responseHeaders.HasConnection;
var connectionOptions = FrameHeaders.ParseConnection(responseHeaders.HeaderConnection);
var hasTransferEncoding = responseHeaders.HasTransferEncoding;
var transferCoding = FrameHeaders.GetFinalTransferCoding(responseHeaders.HeaderTransferEncoding);
if (_keepAlive && hasConnection)
{
_keepAlive = (connectionOptions & ConnectionOptions.KeepAlive) == ConnectionOptions.KeepAlive;
}
// https://tools.ietf.org/html/rfc7230#section-3.3.1
// If any transfer coding other than
// chunked is applied to a response payload body, the sender MUST either
// apply chunked as the final transfer coding or terminate the message
// by closing the connection.
if (hasTransferEncoding && transferCoding != TransferCoding.Chunked)
{
_keepAlive = false;
}
// Set whether response can have body
_canHaveBody = StatusCanHaveBody(StatusCode) && Method != "HEAD";
// Don't set the Content-Length or Transfer-Encoding headers
// automatically for HEAD requests or 204, 205, 304 responses.
if (_canHaveBody)
{
if (!hasTransferEncoding && !responseHeaders.ContentLength.HasValue)
{
if (appCompleted && StatusCode != StatusCodes.Status101SwitchingProtocols)
{
// Since the app has completed and we are only now generating
// the headers we can safely set the Content-Length to 0.
responseHeaders.ContentLength = 0;
}
else
{
// Note for future reference: never change this to set _autoChunk to true on HTTP/1.0
// connections, even if we were to infer the client supports it because an HTTP/1.0 request
// was received that used chunked encoding. Sending a chunked response to an HTTP/1.0
// client would break compliance with RFC 7230 (section 3.3.1):
//
// A server MUST NOT send a response containing Transfer-Encoding unless the corresponding
// request indicates HTTP/1.1 (or later).
if (_httpVersion == Http.HttpVersion.Http11 && StatusCode != StatusCodes.Status101SwitchingProtocols)
{
_autoChunk = true;
responseHeaders.SetRawTransferEncoding("chunked", _bytesTransferEncodingChunked);
}
else
{
_keepAlive = false;
}
}
}
}
else if (hasTransferEncoding)
{
RejectNonBodyTransferEncodingResponse(appCompleted);
}
responseHeaders.SetReadOnly();
if (!hasConnection)
{
if (!_keepAlive)
{
responseHeaders.SetRawConnection("close", _bytesConnectionClose);
}
else if (_httpVersion == Http.HttpVersion.Http10)
{
responseHeaders.SetRawConnection("keep-alive", _bytesConnectionKeepAlive);
}
}
if (ServerOptions.AddServerHeader && !responseHeaders.HasServer)
{
responseHeaders.SetRawServer(Constants.ServerName, _bytesServer);
}
if (!responseHeaders.HasDate)
{
var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues();
responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes);
}
Output.Write(_writeHeaders, new FrameAdapter(this));
}
private static void WriteResponseHeaders(WritableBuffer writableBuffer, FrameAdapter frameAdapter)
{
var frame = frameAdapter.Frame;
var writer = new WritableBufferWriter(writableBuffer);
var responseHeaders = frame.FrameResponseHeaders;
writer.Write(_bytesHttpVersion11);
var statusBytes = ReasonPhrases.ToStatusBytes(frame.StatusCode, frame.ReasonPhrase);
writer.Write(statusBytes);
responseHeaders.CopyTo(ref writer);
writer.Write(_bytesEndHeaders);
}
public void ParseRequest(ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined)
{
consumed = buffer.Start;
examined = buffer.End;
switch (_requestProcessingStatus)
{
case RequestProcessingStatus.RequestPending:
if (buffer.IsEmpty)
{
break;
}
TimeoutControl.ResetTimeout(_requestHeadersTimeoutTicks, TimeoutAction.SendTimeoutResponse);
_requestProcessingStatus = RequestProcessingStatus.ParsingRequestLine;
goto case RequestProcessingStatus.ParsingRequestLine;
case RequestProcessingStatus.ParsingRequestLine:
if (TakeStartLine(buffer, out consumed, out examined))
{
buffer = buffer.Slice(consumed, buffer.End);
_requestProcessingStatus = RequestProcessingStatus.ParsingHeaders;
goto case RequestProcessingStatus.ParsingHeaders;
}
else
{
break;
}
case RequestProcessingStatus.ParsingHeaders:
if (TakeMessageHeaders(buffer, out consumed, out examined))
{
_requestProcessingStatus = RequestProcessingStatus.AppStarted;
}
break;
}
}
public bool TakeStartLine(ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined)
{
var overLength = false;
if (buffer.Length >= ServerOptions.Limits.MaxRequestLineSize)
{
buffer = buffer.Slice(buffer.Start, ServerOptions.Limits.MaxRequestLineSize);
overLength = true;
}
var result = _parser.ParseRequestLine(new FrameAdapter(this), buffer, out consumed, out examined);
if (!result && overLength)
{
ThrowRequestRejected(RequestRejectionReason.RequestLineTooLong);
}
return result;
}
public bool TakeMessageHeaders(ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined)
{
// Make sure the buffer is limited
bool overLength = false;
if (buffer.Length >= _remainingRequestHeadersBytesAllowed)
{
buffer = buffer.Slice(buffer.Start, _remainingRequestHeadersBytesAllowed);
// If we sliced it means the current buffer bigger than what we're
// allowed to look at
overLength = true;
}
var result = _parser.ParseHeaders(new FrameAdapter(this), buffer, out consumed, out examined, out var consumedBytes);
_remainingRequestHeadersBytesAllowed -= consumedBytes;
if (!result && overLength)
{
ThrowRequestRejected(RequestRejectionReason.HeadersExceedMaxTotalSize);
}
if (result)
{
TimeoutControl.CancelTimeout();
}
return result;
}
public bool StatusCanHaveBody(int statusCode)
{
// List of status codes taken from Microsoft.Net.Http.Server.Response
return statusCode != StatusCodes.Status204NoContent &&
statusCode != StatusCodes.Status205ResetContent &&
statusCode != StatusCodes.Status304NotModified;
}
private void ThrowResponseAlreadyStartedException(string value)
{
throw new InvalidOperationException(CoreStrings.FormatParameterReadOnlyAfterResponseStarted(value));
}
private void RejectNonBodyTransferEncodingResponse(bool appCompleted)
{
var ex = new InvalidOperationException(CoreStrings.FormatHeaderNotAllowedOnResponse("Transfer-Encoding", StatusCode));
if (!appCompleted)
{
// Back out of header creation surface exeception in user code
_requestProcessingStatus = RequestProcessingStatus.AppStarted;
throw ex;
}
else
{
ReportApplicationError(ex);
// 500 Internal Server Error
SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError);
}
}
private void SetErrorResponseException(BadHttpRequestException ex)
{
SetErrorResponseHeaders(ex.StatusCode);
if (!StringValues.IsNullOrEmpty(ex.AllowedHeader))
{
FrameResponseHeaders.HeaderAllow = ex.AllowedHeader;
}
}
private void SetErrorResponseHeaders(int statusCode)
{
Debug.Assert(!HasResponseStarted, $"{nameof(SetErrorResponseHeaders)} called after response had already started.");
StatusCode = statusCode;
ReasonPhrase = null;
var responseHeaders = FrameResponseHeaders;
responseHeaders.Reset();
var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues();
responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes);
responseHeaders.ContentLength = 0;
if (ServerOptions.AddServerHeader)
{
responseHeaders.SetRawServer(Constants.ServerName, _bytesServer);
}
}
public void HandleNonBodyResponseWrite()
{
// Writes to HEAD response are ignored and logged at the end of the request
if (Method != "HEAD")
{
// Throw Exception for 204, 205, 304 responses.
throw new InvalidOperationException(CoreStrings.FormatWritingToResponseBodyNotSupported(StatusCode));
}
}
private void ThrowResponseAbortedException()
{
throw new ObjectDisposedException(CoreStrings.UnhandledApplicationException, _applicationException);
}
public void ThrowRequestRejected(RequestRejectionReason reason)
=> throw BadHttpRequestException.GetException(reason);
public void ThrowRequestRejected(RequestRejectionReason reason, string detail)
=> throw BadHttpRequestException.GetException(reason, detail);
private void ThrowRequestTargetRejected(Span<byte> target)
=> throw GetInvalidRequestTargetException(target);
private BadHttpRequestException GetInvalidRequestTargetException(Span<byte> target)
=> BadHttpRequestException.GetException(
RequestRejectionReason.InvalidRequestTarget,
Log.IsEnabled(LogLevel.Information)
? target.GetAsciiStringEscaped(Constants.MaxExceptionDetailSize)
: string.Empty);
public void SetBadRequestState(RequestRejectionReason reason)
{
SetBadRequestState(BadHttpRequestException.GetException(reason));
}
public void SetBadRequestState(BadHttpRequestException ex)
{
Log.ConnectionBadRequest(ConnectionId, ex);
if (!HasResponseStarted)
{
SetErrorResponseException(ex);
}
_keepAlive = false;
_requestProcessingStopping = true;
_requestRejectedException = ex;
}
protected void ReportApplicationError(Exception ex)
{
if (_applicationException == null)
{
_applicationException = ex;
}
else if (_applicationException is AggregateException)
{
_applicationException = new AggregateException(_applicationException, ex).Flatten();
}
else
{
_applicationException = new AggregateException(_applicationException, ex);
}
Log.ApplicationError(ConnectionId, TraceIdentifier, ex);
}
public void OnStartLine(HttpMethod method, HttpVersion version, Span<byte> target, Span<byte> path, Span<byte> query, Span<byte> customMethod, bool pathEncoded)
{
Debug.Assert(target.Length != 0, "Request target must be non-zero length");
var ch = target[0];
if (ch == ByteForwardSlash)
{
// origin-form.
// The most common form of request-target.
// https://tools.ietf.org/html/rfc7230#section-5.3.1
OnOriginFormTarget(method, version, target, path, query, customMethod, pathEncoded);
}
else if (ch == ByteAsterisk && target.Length == 1)
{
OnAsteriskFormTarget(method);
}
else if (target.GetKnownHttpScheme(out var scheme))
{
OnAbsoluteFormTarget(target, query);
}
else
{
// Assume anything else is considered authority form.
// FYI: this should be an edge case. This should only happen when
// a client mistakenly thinks this server is a proxy server.
OnAuthorityFormTarget(method, target);
}
Method = method != HttpMethod.Custom
? HttpUtilities.MethodToString(method) ?? string.Empty
: customMethod.GetAsciiStringNonNullCharacters();
HttpVersion = HttpUtilities.VersionToString(version);
Debug.Assert(RawTarget != null, "RawTarget was not set");
Debug.Assert(Method != null, "Method was not set");
Debug.Assert(Path != null, "Path was not set");
Debug.Assert(QueryString != null, "QueryString was not set");
Debug.Assert(HttpVersion != null, "HttpVersion was not set");
}
private void OnOriginFormTarget(HttpMethod method, HttpVersion version, Span<byte> target, Span<byte> path, Span<byte> query, Span<byte> customMethod, bool pathEncoded)
{
Debug.Assert(target[0] == ByteForwardSlash, "Should only be called when path starts with /");
_requestTargetForm = HttpRequestTarget.OriginForm;
// URIs are always encoded/escaped to ASCII https://tools.ietf.org/html/rfc3986#page-11
// Multibyte Internationalized Resource Identifiers (IRIs) are first converted to utf8;
// then encoded/escaped to ASCII https://www.ietf.org/rfc/rfc3987.txt "Mapping of IRIs to URIs"
string requestUrlPath = null;
string rawTarget = null;
try
{
// Read raw target before mutating memory.
rawTarget = target.GetAsciiStringNonNullCharacters();
if (pathEncoded)
{
// URI was encoded, unescape and then parse as UTF-8
var pathLength = UrlEncoder.Decode(path, path);
// Removing dot segments must be done after unescaping. From RFC 3986:
//
// URI producing applications should percent-encode data octets that
// correspond to characters in the reserved set unless these characters
// are specifically allowed by the URI scheme to represent data in that
// component. If a reserved character is found in a URI component and
// no delimiting role is known for that character, then it must be
// interpreted as representing the data octet corresponding to that
// character's encoding in US-ASCII.
//
// https://tools.ietf.org/html/rfc3986#section-2.2
pathLength = PathNormalizer.RemoveDotSegments(path.Slice(0, pathLength));
requestUrlPath = GetUtf8String(path.Slice(0, pathLength));
}
else
{
var pathLength = PathNormalizer.RemoveDotSegments(path);
if (path.Length == pathLength && query.Length == 0)
{
// If no decoding was required, no dot segments were removed and
// there is no query, the request path is the same as the raw target
requestUrlPath = rawTarget;
}
else
{
requestUrlPath = path.Slice(0, pathLength).GetAsciiStringNonNullCharacters();
}
}
}
catch (InvalidOperationException)
{
ThrowRequestTargetRejected(target);
}
QueryString = query.GetAsciiStringNonNullCharacters();
RawTarget = rawTarget;
Path = requestUrlPath;
}
private void OnAuthorityFormTarget(HttpMethod method, Span<byte> target)
{
_requestTargetForm = HttpRequestTarget.AuthorityForm;
// This is not complete validation. It is just a quick scan for invalid characters
// but doesn't check that the target fully matches the URI spec.
for (var i = 0; i < target.Length; i++)
{
var ch = target[i];
if (!UriUtilities.IsValidAuthorityCharacter(ch))
{
ThrowRequestTargetRejected(target);
}
}
// The authority-form of request-target is only used for CONNECT
// requests (https://tools.ietf.org/html/rfc7231#section-4.3.6).
if (method != HttpMethod.Connect)
{
ThrowRequestRejected(RequestRejectionReason.ConnectMethodRequired);
}
// When making a CONNECT request to establish a tunnel through one or
// more proxies, a client MUST send only the target URI's authority
// component (excluding any userinfo and its "@" delimiter) as the
// request-target.For example,
//
// CONNECT www.example.com:80 HTTP/1.1
//
// Allowed characters in the 'host + port' section of authority.
// See https://tools.ietf.org/html/rfc3986#section-3.2
RawTarget = target.GetAsciiStringNonNullCharacters();
Path = string.Empty;
QueryString = string.Empty;
}
private void OnAsteriskFormTarget(HttpMethod method)
{
_requestTargetForm = HttpRequestTarget.AsteriskForm;
// The asterisk-form of request-target is only used for a server-wide
// OPTIONS request (https://tools.ietf.org/html/rfc7231#section-4.3.7).
if (method != HttpMethod.Options)
{
ThrowRequestRejected(RequestRejectionReason.OptionsMethodRequired);
}
RawTarget = Asterisk;
Path = string.Empty;
QueryString = string.Empty;
}
private void OnAbsoluteFormTarget(Span<byte> target, Span<byte> query)
{
_requestTargetForm = HttpRequestTarget.AbsoluteForm;
// absolute-form
// https://tools.ietf.org/html/rfc7230#section-5.3.2
// This code should be the edge-case.
// From the spec:
// a server MUST accept the absolute-form in requests, even though
// HTTP/1.1 clients will only send them in requests to proxies.
RawTarget = target.GetAsciiStringNonNullCharacters();
// Validation of absolute URIs is slow, but clients
// should not be sending this form anyways, so perf optimization
// not high priority
if (!Uri.TryCreate(RawTarget, UriKind.Absolute, out var uri))
{
ThrowRequestTargetRejected(target);
}
_absoluteRequestTarget = uri;
Path = uri.LocalPath;
// don't use uri.Query because we need the unescaped version
QueryString = query.GetAsciiStringNonNullCharacters();
}
private unsafe static string GetUtf8String(Span<byte> path)
{
// .NET 451 doesn't have pointer overloads for Encoding.GetString so we
// copy to an array
fixed (byte* pointer = &path.DangerousGetPinnableReference())
{
return Encoding.UTF8.GetString(pointer, path.Length);
}
}
public void OnHeader(Span<byte> name, Span<byte> value)
{
_requestHeadersParsed++;
if (_requestHeadersParsed > ServerOptions.Limits.MaxRequestHeaderCount)
{
ThrowRequestRejected(RequestRejectionReason.TooManyHeaders);
}
var valueString = value.GetAsciiStringNonNullCharacters();
FrameRequestHeaders.Append(name, valueString);
}
protected void EnsureHostHeaderExists()
{
if (_httpVersion == Http.HttpVersion.Http10)
{
return;
}
// https://tools.ietf.org/html/rfc7230#section-5.4
// A server MUST respond with a 400 (Bad Request) status code to any
// HTTP/1.1 request message that lacks a Host header field and to any
// request message that contains more than one Host header field or a
// Host header field with an invalid field-value.
var host = FrameRequestHeaders.HeaderHost;
if (host.Count <= 0)
{
ThrowRequestRejected(RequestRejectionReason.MissingHostHeader);
}
else if (host.Count > 1)
{
ThrowRequestRejected(RequestRejectionReason.MultipleHostHeaders);
}
else if (_requestTargetForm == HttpRequestTarget.AuthorityForm)
{
if (!host.Equals(RawTarget))
{
ThrowRequestRejected(RequestRejectionReason.InvalidHostHeader, host.ToString());
}
}
else if (_requestTargetForm == HttpRequestTarget.AbsoluteForm)
{
// If the target URI includes an authority component, then a
// client MUST send a field - value for Host that is identical to that
// authority component, excluding any userinfo subcomponent and its "@"
// delimiter.
// System.Uri doesn't not tell us if the port was in the original string or not.
// When IsDefaultPort = true, we will allow Host: with or without the default port
var authorityAndPort = _absoluteRequestTarget.Authority + ":" + _absoluteRequestTarget.Port;
if ((host != _absoluteRequestTarget.Authority || !_absoluteRequestTarget.IsDefaultPort)
&& host != authorityAndPort)
{
ThrowRequestRejected(RequestRejectionReason.InvalidHostHeader, host.ToString());
}
}
}
private IPipe CreateRequestBodyPipe()
=> ConnectionInformation.PipeFactory.Create(new PipeOptions
{
ReaderScheduler = ServiceContext.ThreadPool,
WriterScheduler = InlineScheduler.Default,
MaximumSizeHigh = 1,
MaximumSizeLow = 1
});
private enum HttpRequestTarget
{
Unknown = -1,
// origin-form is the most common
OriginForm,
AbsoluteForm,
AuthorityForm,
AsteriskForm
}
}
}
| 1 | 13,352 | nit: Probably should renamed to `InitializeBody` | aspnet-KestrelHttpServer | .cs |
@@ -11,13 +11,15 @@ export default Component.extend({
open: false,
- navMenuIcon: computed('config.blogUrl', function () {
- let url = `${this.get('config.blogUrl')}/favicon.png`;
+ navMenuIcon: computed('config.blogUrl', 'settings.icon', function () {
+ let blogIcon = this.get('settings.icon') ? this.get('settings.icon') : 'favicon.ico';
+ let url = `${this.get('config.blogUrl')}/${blogIcon}`;
return htmlSafe(`background-image: url(${url})`);
}),
config: injectService(),
+ settings: injectService(),
session: injectService(),
ghostPaths: injectService(),
feature: injectService(), | 1 | import Component from 'ember-component';
import {htmlSafe} from 'ember-string';
import injectService from 'ember-service/inject';
import computed from 'ember-computed';
import calculatePosition from 'ember-basic-dropdown/utils/calculate-position';
export default Component.extend({
tagName: 'nav',
classNames: ['gh-nav'],
classNameBindings: ['open'],
open: false,
navMenuIcon: computed('config.blogUrl', function () {
let url = `${this.get('config.blogUrl')}/favicon.png`;
return htmlSafe(`background-image: url(${url})`);
}),
config: injectService(),
session: injectService(),
ghostPaths: injectService(),
feature: injectService(),
routing: injectService('-routing'),
mouseEnter() {
this.sendAction('onMouseEnter');
},
// equivalent to "left: auto; right: -20px"
userDropdownPosition(trigger, dropdown) {
let {horizontalPosition, verticalPosition, style} = calculatePosition(...arguments);
let {width: dropdownWidth} = dropdown.firstElementChild.getBoundingClientRect();
style.right += (dropdownWidth - 20);
style['z-index'] = 1100;
return {horizontalPosition, verticalPosition, style};
},
actions: {
toggleAutoNav() {
this.sendAction('toggleMaximise');
},
showMarkdownHelp() {
this.sendAction('showMarkdownHelp');
},
closeMobileMenu() {
this.sendAction('closeMobileMenu');
},
openAutoNav() {
this.sendAction('openAutoNav');
}
}
});
| 1 | 8,045 | is `settings.icon` always null/undefined when there's no icon or does is it get set to a blank string? It might be worth wrapping it in an `isBlank()` anyway | TryGhost-Admin | js |
@@ -90,8 +90,9 @@ var Module = fx.Options(
fx.Provide(MetadataManagerProvider),
fx.Provide(NamespaceCacheProvider),
fx.Provide(serialization.NewSerializer),
- fx.Provide(ArchiverProviderProvider),
fx.Provide(ArchivalMetadataProvider),
+ fx.Provide(ArchiverProviderProvider),
+ fx.Invoke(RegisterBootstrapContainer),
fx.Provide(PersistenceBeanProvider),
fx.Provide(MembershipFactoryProvider),
fx.Provide(MembershipMonitorProvider), | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package resource
import (
"net"
"os"
"time"
"github.com/uber-go/tally"
"github.com/uber/tchannel-go"
"go.temporal.io/api/workflowservice/v1"
sdkclient "go.temporal.io/sdk/client"
"go.uber.org/fx"
"go.temporal.io/server/client"
"go.temporal.io/server/client/frontend"
"go.temporal.io/server/client/history"
"go.temporal.io/server/client/matching"
"go.temporal.io/server/common"
"go.temporal.io/server/common/archiver"
"go.temporal.io/server/common/archiver/provider"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common/dynamicconfig"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/membership"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/persistence"
persistenceClient "go.temporal.io/server/common/persistence/client"
"go.temporal.io/server/common/persistence/serialization"
"go.temporal.io/server/common/quotas"
"go.temporal.io/server/common/resolver"
"go.temporal.io/server/common/searchattribute"
)
type (
SnTaggedLogger log.Logger
ThrottledLogger log.Logger
ThrottledLoggerRpsFn quotas.RateFn
ServiceName string
HostName string
InstanceID string
)
var Module = fx.Options(
fx.Provide(SnTaggedLoggerProvider),
fx.Provide(ThrottledLoggerProvider),
fx.Provide(PersistenceConfigProvider),
fx.Provide(MetricsScopeProvider),
fx.Provide(HostNameProvider),
fx.Provide(ServiceNameProvider),
fx.Provide(ClusterMetadataProvider),
fx.Provide(ClusterMetadataConfigProvider),
fx.Provide(TimeSourceProvider),
fx.Provide(ClusterMetadataManagerProvider),
fx.Provide(PersistenceServiceResolverProvider),
fx.Provide(AbstractDatastoreFactoryProvider),
fx.Provide(ClusterNameProvider),
fx.Provide(MetricsClientProvider),
persistenceClient.FactoryModule,
fx.Provide(SearchAttributeProviderProvider),
fx.Provide(SearchAttributeManagerProvider),
fx.Provide(SearchAttributeMapperProvider),
fx.Provide(MetadataManagerProvider),
fx.Provide(NamespaceCacheProvider),
fx.Provide(serialization.NewSerializer),
fx.Provide(ArchiverProviderProvider),
fx.Provide(ArchivalMetadataProvider),
fx.Provide(PersistenceBeanProvider),
fx.Provide(MembershipFactoryProvider),
fx.Provide(MembershipMonitorProvider),
fx.Provide(ClientFactoryProvider),
fx.Provide(ClientBeanProvider),
fx.Provide(SdkClientProvider),
fx.Provide(FrontedClientProvider),
fx.Provide(PersistenceFaultInjectionFactoryProvider),
fx.Provide(GrpcListenerProvider),
fx.Provide(InstanceIDProvider),
fx.Provide(RingpopChannelProvider),
fx.Provide(RuntimeMetricsReporterProvider),
fx.Provide(NewFromDI),
)
func SnTaggedLoggerProvider(logger log.Logger, sn ServiceName) SnTaggedLogger {
return log.With(logger, tag.Service(string(sn)))
}
func ThrottledLoggerProvider(
logger SnTaggedLogger,
fn ThrottledLoggerRpsFn,
) ThrottledLogger {
return log.NewThrottledLogger(
logger,
quotas.RateFn(fn),
)
}
func GrpcListenerProvider(factory common.RPCFactory) net.Listener {
return factory.GetGRPCListener()
}
func MetricsClientProvider(params *BootstrapParams) metrics.Client {
return params.MetricsClient
}
func PersistenceConfigProvider(params *BootstrapParams) *config.Persistence {
return ¶ms.PersistenceConfig
}
func MetricsScopeProvider(params *BootstrapParams) tally.Scope {
return params.MetricsScope
}
func ServiceNameProvider(params *BootstrapParams) ServiceName {
return ServiceName(params.Name)
}
func HostNameProvider() (HostName, error) {
hn, err := os.Hostname()
return HostName(hn), err
}
func ClusterMetadataConfigProvider(params *BootstrapParams) *config.ClusterMetadata {
return params.ClusterMetadataConfig
}
func ClusterMetadataProvider(config *config.ClusterMetadata) cluster.Metadata {
return cluster.NewMetadata(
config.EnableGlobalNamespace,
config.FailoverVersionIncrement,
config.MasterClusterName,
config.CurrentClusterName,
config.ClusterInformation,
)
}
func ClusterNameProvider(config *config.ClusterMetadata) persistenceClient.ClusterName {
return persistenceClient.ClusterName(config.CurrentClusterName)
}
func PersistenceServiceResolverProvider(params *BootstrapParams) resolver.ServiceResolver {
return params.PersistenceServiceResolver
}
func AbstractDatastoreFactoryProvider(params *BootstrapParams) persistenceClient.AbstractDataStoreFactory {
return params.AbstractDatastoreFactory
}
func TimeSourceProvider() clock.TimeSource {
return clock.NewRealTimeSource()
}
func ClusterMetadataManagerProvider(factory persistenceClient.Factory) (persistence.ClusterMetadataManager, error) {
return factory.NewClusterMetadataManager()
}
func SearchAttributeProviderProvider(
timeSource clock.TimeSource,
cmMgr persistence.ClusterMetadataManager,
) searchattribute.Provider {
return searchattribute.NewManager(timeSource, cmMgr)
}
func SearchAttributeManagerProvider(
timeSource clock.TimeSource,
cmMgr persistence.ClusterMetadataManager,
) searchattribute.Manager {
return searchattribute.NewManager(timeSource, cmMgr)
}
func SearchAttributeMapperProvider(params *BootstrapParams) searchattribute.Mapper {
return params.SearchAttributesMapper
}
func MetadataManagerProvider(factory persistenceClient.Factory) (persistence.MetadataManager, error) {
return factory.NewMetadataManager()
}
func NamespaceCacheProvider(
logger SnTaggedLogger,
metricsClient metrics.Client,
clusterMetadata cluster.Metadata,
metadataManager persistence.MetadataManager,
) namespace.Registry {
return namespace.NewRegistry(
metadataManager,
clusterMetadata.IsGlobalNamespaceEnabled(),
metricsClient,
logger,
)
}
func ArchivalMetadataProvider(params *BootstrapParams) archiver.ArchivalMetadata {
return params.ArchivalMetadata
}
func ArchiverProviderProvider(params *BootstrapParams) provider.ArchiverProvider {
return params.ArchiverProvider
}
func ClientFactoryProvider(params *BootstrapParams) client.FactoryProvider {
factoryProvider := params.ClientFactoryProvider
if factoryProvider == nil {
factoryProvider = client.NewFactoryProvider()
}
return factoryProvider
}
func ClientBeanProvider(
factoryProvider client.FactoryProvider,
rpcFactory common.RPCFactory,
membershipMonitor membership.Monitor,
metricsClient metrics.Client,
dynamicCollection *dynamicconfig.Collection,
persistenceConfig *config.Persistence,
logger SnTaggedLogger,
clusterMetadata cluster.Metadata,
) (client.Bean, error) {
return client.NewClientBean(
factoryProvider.NewFactory(
rpcFactory,
membershipMonitor,
metricsClient,
dynamicCollection,
persistenceConfig.NumHistoryShards,
logger,
),
clusterMetadata,
)
}
func MembershipFactoryProvider(
params *BootstrapParams,
persistenceBean persistenceClient.Bean,
logger SnTaggedLogger,
) (MembershipMonitorFactory, error) {
return params.MembershipFactoryInitializer(persistenceBean, logger)
}
func PersistenceBeanProvider(factory persistenceClient.Factory) (persistenceClient.Bean, error) {
return persistenceClient.NewBeanFromFactory(factory)
}
// TODO: Seems that all this factory mostly handles singleton logic. We should be able to handle it via IOC.
func MembershipMonitorProvider(membershipFactory MembershipMonitorFactory) (membership.Monitor, error) {
return membershipFactory.GetMembershipMonitor()
}
func SdkClientProvider(params *BootstrapParams) sdkclient.Client {
return params.SdkClient
}
func FrontedClientProvider(clientBean client.Bean) workflowservice.WorkflowServiceClient {
frontendRawClient := clientBean.GetFrontendClient()
return frontend.NewRetryableClient(
frontendRawClient,
common.CreateFrontendServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
}
func PersistenceFaultInjectionFactoryProvider(factory persistenceClient.Factory) *persistenceClient.FaultInjectionDataStoreFactory {
return factory.FaultInjection()
}
func RingpopChannelProvider(rpcFactory common.RPCFactory) *tchannel.Channel {
return rpcFactory.GetRingpopChannel()
}
func InstanceIDProvider(params *BootstrapParams) InstanceID {
return InstanceID(params.InstanceID)
}
func RuntimeMetricsReporterProvider(
metricsScope tally.Scope,
logger SnTaggedLogger,
instanceID InstanceID,
) *metrics.RuntimeMetricsReporter {
return metrics.NewRuntimeMetricsReporter(
metricsScope,
time.Minute,
logger,
string(instanceID),
)
}
func NewFromDI(
persistenceConf *config.Persistence,
svcName ServiceName,
metricsScope tally.Scope,
hostName HostName,
clusterMetadata cluster.Metadata,
saProvider searchattribute.Provider,
saManager searchattribute.Manager,
saMapper searchattribute.Mapper,
namespaceRegistry namespace.Registry,
timeSource clock.TimeSource,
payloadSerializer serialization.Serializer,
metricsClient metrics.Client,
archivalMetadata archiver.ArchivalMetadata,
archiverProvider provider.ArchiverProvider,
membershipMonitor membership.Monitor,
sdkClient sdkclient.Client,
frontendClient workflowservice.WorkflowServiceClient,
clientBean client.Bean,
persistenceBean persistenceClient.Bean,
persistenceFaultInjection *persistenceClient.FaultInjectionDataStoreFactory,
logger SnTaggedLogger,
throttledLogger ThrottledLogger,
grpcListener net.Listener,
ringpopChannel *tchannel.Channel,
runtimeMetricsReporter *metrics.RuntimeMetricsReporter,
rpcFactory common.RPCFactory,
) (Resource, error) {
frontendServiceResolver, err := membershipMonitor.GetResolver(common.FrontendServiceName)
if err != nil {
return nil, err
}
matchingServiceResolver, err := membershipMonitor.GetResolver(common.MatchingServiceName)
if err != nil {
return nil, err
}
historyServiceResolver, err := membershipMonitor.GetResolver(common.HistoryServiceName)
if err != nil {
return nil, err
}
workerServiceResolver, err := membershipMonitor.GetResolver(common.WorkerServiceName)
if err != nil {
return nil, err
}
matchingRawClient, err := clientBean.GetMatchingClient(namespaceRegistry.GetNamespaceName)
if err != nil {
return nil, err
}
matchingClient := matching.NewRetryableClient(
matchingRawClient,
common.CreateMatchingServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
historyRawClient := clientBean.GetHistoryClient()
historyClient := history.NewRetryableClient(
historyRawClient,
common.CreateHistoryServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
return &Impl{
status: common.DaemonStatusInitialized,
numShards: persistenceConf.NumHistoryShards,
serviceName: string(svcName),
hostName: string(hostName),
metricsScope: metricsScope,
clusterMetadata: clusterMetadata,
saProvider: saProvider,
saManager: saManager,
saMapper: saMapper,
namespaceRegistry: namespaceRegistry,
timeSource: timeSource,
payloadSerializer: payloadSerializer,
metricsClient: metricsClient,
archivalMetadata: archivalMetadata,
archiverProvider: archiverProvider,
// membership infos
membershipMonitor: membershipMonitor,
frontendServiceResolver: frontendServiceResolver,
matchingServiceResolver: matchingServiceResolver,
historyServiceResolver: historyServiceResolver,
workerServiceResolver: workerServiceResolver,
sdkClient: sdkClient,
frontendClient: frontendClient,
matchingRawClient: matchingRawClient,
matchingClient: matchingClient,
historyRawClient: historyRawClient,
historyClient: historyClient,
clientBean: clientBean,
persistenceBean: persistenceBean,
persistenceFaultInjection: persistenceFaultInjection,
logger: logger,
throttledLogger: throttledLogger,
grpcListener: grpcListener,
ringpopChannel: ringpopChannel,
runtimeMetricsReporter: runtimeMetricsReporter,
rpcFactory: rpcFactory,
}, nil
}
| 1 | 12,832 | put invoke to the end of the list, and other module at beginning of the list (line 86) | temporalio-temporal | go |
@@ -166,7 +166,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
if db.capacity == 0 {
db.capacity = defaultCapacity
}
- db.logger.Infof("db capacity: %v", db.capacity)
+ db.logger.Infof("database capacity: %d chunks, %d bytes, %.2f megabytes.", db.capacity, db.capacity*swarm.ChunkSize, float64(db.capacity*swarm.ChunkSize)*9.5367431640625e-7)
if maxParallelUpdateGC > 0 {
db.updateGCSem = make(chan struct{}, maxParallelUpdateGC)
} | 1 | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package localstore
import (
"encoding/binary"
"errors"
"os"
"runtime/pprof"
"sync"
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/prometheus/client_golang/prometheus"
"github.com/syndtr/goleveldb/leveldb"
)
var _ storage.Storer = &DB{}
var (
// ErrInvalidMode is retuned when an unknown Mode
// is provided to the function.
ErrInvalidMode = errors.New("invalid mode")
)
var (
// Default value for Capacity DB option.
defaultCapacity uint64 = 5000000
// Limit the number of goroutines created by Getters
// that call updateGC function. Value 0 sets no limit.
maxParallelUpdateGC = 1000
)
// DB is the local store implementation and holds
// database related objects.
type DB struct {
shed *shed.DB
tags *tags.Tags
// schema name of loaded data
schemaName shed.StringField
// retrieval indexes
retrievalDataIndex shed.Index
retrievalAccessIndex shed.Index
// push syncing index
pushIndex shed.Index
// push syncing subscriptions triggers
pushTriggers []chan struct{}
pushTriggersMu sync.RWMutex
// pull syncing index
pullIndex shed.Index
// pull syncing subscriptions triggers per bin
pullTriggers map[uint8][]chan struct{}
pullTriggersMu sync.RWMutex
// binIDs stores the latest chunk serial ID for every
// proximity order bin
binIDs shed.Uint64Vector
// garbage collection index
gcIndex shed.Index
// garbage collection exclude index for pinned contents
gcExcludeIndex shed.Index
// pin files Index
pinIndex shed.Index
// field that stores number of intems in gc index
gcSize shed.Uint64Field
// garbage collection is triggered when gcSize exceeds
// the capacity value
capacity uint64
// triggers garbage collection event loop
collectGarbageTrigger chan struct{}
// a buffered channel acting as a semaphore
// to limit the maximal number of goroutines
// created by Getters to call updateGC function
updateGCSem chan struct{}
// a wait group to ensure all updateGC goroutines
// are done before closing the database
updateGCWG sync.WaitGroup
// baseKey is the overlay address
baseKey []byte
batchMu sync.Mutex
// this channel is closed when close function is called
// to terminate other goroutines
close chan struct{}
// protect Close method from exiting before
// garbage collection and gc size write workers
// are done
collectGarbageWorkerDone chan struct{}
// wait for all subscriptions to finish before closing
// underlaying BadgerDB to prevent possible panics from
// iterators
subscritionsWG sync.WaitGroup
metrics metrics
logger logging.Logger
}
// Options struct holds optional parameters for configuring DB.
type Options struct {
// Capacity is a limit that triggers garbage collection when
// number of items in gcIndex equals or exceeds it.
Capacity uint64
// MetricsPrefix defines a prefix for metrics names.
MetricsPrefix string
Tags *tags.Tags
}
// New returns a new DB. All fields and indexes are initialized
// and possible conflicts with schema from existing database is checked.
// One goroutine for writing batches is created.
func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB, err error) {
if o == nil {
// default options
o = &Options{
Capacity: defaultCapacity,
}
}
db = &DB{
capacity: o.Capacity,
baseKey: baseKey,
tags: o.Tags,
// channel collectGarbageTrigger
// needs to be buffered with the size of 1
// to signal another event if it
// is triggered during already running function
collectGarbageTrigger: make(chan struct{}, 1),
close: make(chan struct{}),
collectGarbageWorkerDone: make(chan struct{}),
metrics: newMetrics(),
logger: logger,
}
if db.capacity == 0 {
db.capacity = defaultCapacity
}
db.logger.Infof("db capacity: %v", db.capacity)
if maxParallelUpdateGC > 0 {
db.updateGCSem = make(chan struct{}, maxParallelUpdateGC)
}
db.shed, err = shed.NewDB(path)
if err != nil {
return nil, err
}
// Identify current storage schema by arbitrary name.
db.schemaName, err = db.shed.NewStringField("schema-name")
if err != nil {
return nil, err
}
schemaName, err := db.schemaName.Get()
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
return nil, err
}
if schemaName == "" {
// initial new localstore run
err := db.schemaName.Put(DbSchemaCurrent)
if err != nil {
return nil, err
}
} else {
// execute possible migrations
err = db.migrate(schemaName)
if err != nil {
return nil, err
}
}
// Persist gc size.
db.gcSize, err = db.shed.NewUint64Field("gc-size")
if err != nil {
return nil, err
}
// Index storing actual chunk address, data and bin id.
db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 16)
binary.BigEndian.PutUint64(b[:8], fields.BinID)
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
value = append(b, fields.Data...)
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
e.BinID = binary.BigEndian.Uint64(value[:8])
e.Data = value[16:]
return e, nil
},
})
if err != nil {
return nil, err
}
// Index storing access timestamp for a particular address.
// It is needed in order to update gc index keys for iteration order.
db.retrievalAccessIndex, err = db.shed.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp))
return b, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(value))
return e, nil
},
})
if err != nil {
return nil, err
}
// pull index allows history and live syncing per po bin
db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash|Tag", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 41)
key[0] = db.po(swarm.NewAddress(fields.Address))
binary.BigEndian.PutUint64(key[1:9], fields.BinID)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.BinID = binary.BigEndian.Uint64(key[1:9])
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
value = make([]byte, 36) // 32 bytes address, 4 bytes tag
copy(value, fields.Address)
if fields.Tag != 0 {
binary.BigEndian.PutUint32(value[32:], fields.Tag)
}
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.Address = value[:32]
if len(value) > 32 {
e.Tag = binary.BigEndian.Uint32(value[32:])
}
return e, nil
},
})
if err != nil {
return nil, err
}
// create a vector for bin IDs
db.binIDs, err = db.shed.NewUint64Vector("bin-ids")
if err != nil {
return nil, err
}
// create a pull syncing triggers used by SubscribePull function
db.pullTriggers = make(map[uint8][]chan struct{})
// push index contains as yet unsynced chunks
db.pushIndex, err = db.shed.NewIndex("StoreTimestamp|Hash->Tags", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 40)
binary.BigEndian.PutUint64(key[:8], uint64(fields.StoreTimestamp))
copy(key[8:], fields.Address)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key[8:]
e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
tag := make([]byte, 4)
binary.BigEndian.PutUint32(tag, fields.Tag)
return tag, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
if len(value) == 4 { // only values with tag should be decoded
e.Tag = binary.BigEndian.Uint32(value)
}
return e, nil
},
})
if err != nil {
return nil, err
}
// create a push syncing triggers used by SubscribePush function
db.pushTriggers = make([]chan struct{}, 0)
// gc index for removable chunk ordered by ascending last access time
db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->nil", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
b := make([]byte, 16, 16+len(fields.Address))
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
binary.BigEndian.PutUint64(b[8:16], fields.BinID)
key = append(b, fields.Address...)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
e.BinID = binary.BigEndian.Uint64(key[8:16])
e.Address = key[16:]
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
return nil, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
return e, nil
},
})
if err != nil {
return nil, err
}
// Create a index structure for storing pinned chunks and their pin counts
db.pinIndex, err = db.shed.NewIndex("Hash->PinCounter", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b[:8], fields.PinCounter)
return b, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.PinCounter = binary.BigEndian.Uint64(value[:8])
return e, nil
},
})
if err != nil {
return nil, err
}
// Create a index structure for excluding pinned chunks from gcIndex
db.gcExcludeIndex, err = db.shed.NewIndex("Hash->nil", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
return nil, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
return e, nil
},
})
if err != nil {
return nil, err
}
// start garbage collection worker
go db.collectGarbageWorker()
return db, nil
}
// Close closes the underlying database.
func (db *DB) Close() (err error) {
close(db.close)
// wait for all handlers to finish
done := make(chan struct{})
go func() {
db.updateGCWG.Wait()
db.subscritionsWG.Wait()
// wait for gc worker to
// return before closing the shed
<-db.collectGarbageWorkerDone
close(done)
}()
select {
case <-done:
case <-time.After(5 * time.Second):
db.logger.Errorf("localstore closed with still active goroutines")
// Print a full goroutine dump to debug blocking.
// TODO: use a logger to write a goroutine profile
prof := pprof.Lookup("goroutine")
err = prof.WriteTo(os.Stdout, 2)
if err != nil {
return err
}
}
return db.shed.Close()
}
// po computes the proximity order between the address
// and database base key.
func (db *DB) po(addr swarm.Address) (bin uint8) {
return swarm.Proximity(db.baseKey, addr.Bytes())
}
// DebugIndices returns the index sizes for all indexes in localstore
// the returned map keys are the index name, values are the number of elements in the index
func (db *DB) DebugIndices() (indexInfo map[string]int, err error) {
indexInfo = make(map[string]int)
for k, v := range map[string]shed.Index{
"retrievalDataIndex": db.retrievalDataIndex,
"retrievalAccessIndex": db.retrievalAccessIndex,
"pushIndex": db.pushIndex,
"pullIndex": db.pullIndex,
"gcIndex": db.gcIndex,
"gcExcludeIndex": db.gcExcludeIndex,
"pinIndex": db.pinIndex,
} {
indexSize, err := v.Count()
if err != nil {
return indexInfo, err
}
indexInfo[k] = indexSize
}
val, err := db.gcSize.Get()
if err != nil {
return indexInfo, err
}
indexInfo["gcSize"] = int(val)
return indexInfo, err
}
// chunkToItem creates new Item with data provided by the Chunk.
func chunkToItem(ch swarm.Chunk) shed.Item {
return shed.Item{
Address: ch.Address().Bytes(),
Data: ch.Data(),
Tag: ch.TagID(),
}
}
// addressToItem creates new Item with a provided address.
func addressToItem(addr swarm.Address) shed.Item {
return shed.Item{
Address: addr.Bytes(),
}
}
// addressesToItems constructs a slice of Items with only
// addresses set on them.
func addressesToItems(addrs ...swarm.Address) []shed.Item {
items := make([]shed.Item, len(addrs))
for i, addr := range addrs {
items[i] = shed.Item{
Address: addr.Bytes(),
}
}
return items
}
// now is a helper function that returns a current unix timestamp
// in UTC timezone.
// It is set in the init function for usage in production, and
// optionally overridden in tests for data validation.
var now func() int64
func init() {
// set the now function
now = func() (t int64) {
return time.Now().UTC().UnixNano()
}
}
// totalTimeMetric logs a message about time between provided start time
// and the time when the function is called and sends a resetting timer metric
// with provided name appended with ".total-time".
func totalTimeMetric(metric prometheus.Counter, start time.Time) {
totalTime := time.Since(start)
metric.Add(float64(totalTime))
}
| 1 | 10,558 | I think that bytes is too precise. Can we just calculate the approximate value in appropriate units? Something like MB, GB depending on the value, `db capacity: 5000000 chunks (approximately 20GB)` . | ethersphere-bee | go |
@@ -0,0 +1,4 @@
+let label = node.getAttribute('alt');
+let text = node.textContent;
+
+return (!!label && axe.commons.text.sanitize(label).trim() !== '' && axe.commons.text.sanitize(text).trim() !== ''); | 1 | 1 | 11,880 | This check shouldn't look at the content, you're already doing this with `none-empty-text`. I also think this check should be renamed to `none-empty-alt` or something like it. There is nothing specific to applets in this check to warrant putting `applet` in the check ID. | dequelabs-axe-core | js |
|
@@ -31,9 +31,11 @@ class TaxCreationForm extends BaseForm
protected $taxEngine = null;
- public function __construct(Request $request, $type = "form", $data = array(), $options = array(), TaxEngine $taxEngine = null)
+ public function __construct(Request $request, $type = "form", $data = array(), $options = array())
{
- $this->taxEngine = $taxEngine;
+ $this->taxEngine = $options["tax_engine"];
+
+ unset($options["tax_engine"]);
parent::__construct($request, $type, $data, $options);
} | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : dev@thelia.net */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Form;
use Symfony\Component\Validator\Constraints;
use Symfony\Component\Validator\Constraints\NotBlank;
use Thelia\Core\Form\Type\TheliaType;
use Thelia\Core\Translation\Translator;
use Thelia\TaxEngine\TaxEngine;
use Thelia\Model\Tax;
use Thelia\Core\HttpFoundation\Request;
/**
* Class TaxCreationForm
* @package Thelia\Form
* @author Etienne Roudeix <eroudeix@openstudio.fr>
*/
class TaxCreationForm extends BaseForm
{
use StandardDescriptionFieldsTrait;
protected $taxEngine = null;
public function __construct(Request $request, $type = "form", $data = array(), $options = array(), TaxEngine $taxEngine = null)
{
$this->taxEngine = $taxEngine;
parent::__construct($request, $type, $data, $options);
}
protected function buildForm($change_mode = false)
{
if ($this->taxEngine == null) {
throw new \LogicException(Translator::getInstance()->trans("The TaxEngine should be passed to this form before using it."));
}
$types = $this->taxEngine->getTaxTypeList();
$typeList = array();
$requirementList = array();
foreach ($types as $classname) {
$instance = new $classname();
$typeList[Tax::escapeTypeName($classname)] = $instance->getTitle();
$requirementList[$classname] = $instance->getRequirementsDefinition();
}
$this->formBuilder
->add("locale", "text", array(
"constraints" => array(new NotBlank())
))
->add("type", "choice", array(
"choices" => $typeList,
"required" => true,
"constraints" => array(
new Constraints\NotBlank(),
),
"label" => Translator::getInstance()->trans("Type"),
"label_attr" => array("for" => "type_field"),
))
;
foreach ($requirementList as $name => $requirements) {
foreach ($requirements as $requirement) {
$this->formBuilder
// Replace the '\' in the class name by hyphens
// See TaxController::getRequirements if some changes are made about this.
->add(Tax::escapeTypeName($name) . ':' . $requirement->getName(), new TheliaType(), array(
//"instance" => $requirement->getType(),
"constraints" => array(
new Constraints\Callback(
array(
"methods" => array(
array($requirement->getType(), "verifyForm"),
),
)
),
),
"attr" => array(
"tag" => "requirements",
"tax_type" => Tax::escapeTypeName($name),
),
"label" => Translator::getInstance()->trans($requirement->getName()),
"type" => $requirement->getType()->getFormType(),
"options" => $requirement->getType()->getFormOptions(),
))
;
}
}
$this->addStandardDescFields(array('postscriptum', 'chapo', 'locale'));
}
public function getName()
{
return "thelia_tax_creation";
}
}
| 1 | 10,722 | Why did you change the signature of this method ? You can break BC doing that | thelia-thelia | php |
@@ -49,6 +49,12 @@ func Run(ctx context.Context, cfg *config.Node) error {
}
template := configToml
+
+ if cfg.ConfigTemplate != "" {
+ fileBytes, _ := ioutil.ReadFile(cfg.ConfigTemplate)
+ template = string(fileBytes[:])
+ }
+
if !cfg.NoFlannel {
template += configCNIToml
} | 1 | package containerd
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/namespaces"
"github.com/natefinch/lumberjack"
util2 "github.com/rancher/k3s/pkg/agent/util"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/util"
)
const (
maxMsgSize = 1024 * 1024 * 16
configToml = `
[plugins.opt]
path = "%OPT%"
[plugins.cri]
stream_server_address = "%NODE%"
stream_server_port = "10010"
`
configCNIToml = `
[plugins.cri.cni]
bin_dir = "%CNIBIN%"
conf_dir = "%CNICFG%"
`
)
func Run(ctx context.Context, cfg *config.Node) error {
args := []string{
"containerd",
"-c", cfg.Containerd.Config,
"-a", cfg.Containerd.Address,
"--state", cfg.Containerd.State,
"--root", cfg.Containerd.Root,
}
template := configToml
if !cfg.NoFlannel {
template += configCNIToml
}
template = strings.Replace(template, "%OPT%", cfg.Containerd.Opt, -1)
template = strings.Replace(template, "%CNIBIN%", cfg.AgentConfig.CNIBinDir, -1)
template = strings.Replace(template, "%CNICFG%", cfg.AgentConfig.CNIConfDir, -1)
template = strings.Replace(template, "%NODE%", cfg.AgentConfig.NodeName, -1)
if err := util2.WriteFile(cfg.Containerd.Config, template); err != nil {
return err
}
if os.Getenv("CONTAINERD_LOG_LEVEL") != "" {
args = append(args, "-l", os.Getenv("CONTAINERD_LOG_LEVEL"))
}
stdOut := io.Writer(os.Stdout)
stdErr := io.Writer(os.Stderr)
if cfg.Containerd.Log != "" {
logrus.Infof("Logging containerd to %s", cfg.Containerd.Log)
stdOut = &lumberjack.Logger{
Filename: cfg.Containerd.Log,
MaxSize: 50,
MaxBackups: 3,
MaxAge: 28,
Compress: true,
}
stdErr = stdOut
}
go func() {
logrus.Infof("Running containerd %s", config.ArgString(args[1:]))
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = stdOut
cmd.Stderr = stdErr
cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGKILL,
}
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "containerd: %s\n", err)
}
os.Exit(1)
}()
for {
addr, dailer, err := util.GetAddressAndDialer("unix://" + cfg.Containerd.Address)
if err != nil {
time.Sleep(1 * time.Second)
continue
}
conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(3*time.Second), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)))
if err != nil {
time.Sleep(1 * time.Second)
continue
}
c := runtimeapi.NewRuntimeServiceClient(conn)
_, err = c.Version(ctx, &runtimeapi.VersionRequest{
Version: "0.1.0",
})
if err == nil {
conn.Close()
break
}
conn.Close()
logrus.Infof("Waiting for containerd startup: %v", err)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
}
}
return preloadImages(cfg)
}
func preloadImages(cfg *config.Node) error {
fileInfo, err := os.Stat(cfg.Images)
if os.IsNotExist(err) {
return nil
} else if err != nil {
logrus.Errorf("Unable to find images in %s: %v", cfg.Images, err)
return nil
}
if !fileInfo.IsDir() {
return nil
}
fileInfos, err := ioutil.ReadDir(cfg.Images)
if err != nil {
logrus.Errorf("Unable to read images in %s: %v", cfg.Images, err)
return nil
}
client, err := containerd.New(cfg.Containerd.Address)
if err != nil {
return err
}
defer client.Close()
ctxContainerD := namespaces.WithNamespace(context.Background(), "k8s.io")
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
filePath := filepath.Join(cfg.Images, fileInfo.Name())
file, err := os.Open(filePath)
if err != nil {
logrus.Errorf("Unable to read %s: %v", filePath, err)
continue
}
logrus.Debugf("Import %s", filePath)
_, err = client.Import(ctxContainerD, file)
if err != nil {
logrus.Errorf("Unable to import %s: %v", filePath, err)
}
}
return nil
}
| 1 | 7,287 | can you catch the error here please and return err if it can't read the template | k3s-io-k3s | go |
@@ -58,6 +58,7 @@ public class MainController {
appsController.setOnAppLoaded(() -> {
enginesController.loadEngines();
containersController.loadContainers();
+ libraryController.updateLibrary();
});
appsController.loadApps(); | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.javafx.controller;
import javafx.application.Platform;
import javafx.scene.control.Alert;
import javafx.scene.control.ButtonType;
import org.phoenicis.javafx.controller.apps.AppsController;
import org.phoenicis.javafx.controller.containers.ContainersController;
import org.phoenicis.javafx.controller.engines.EnginesController;
import org.phoenicis.javafx.controller.library.LibraryController;
import org.phoenicis.javafx.controller.settings.SettingsController;
import org.phoenicis.javafx.views.common.ThemeManager;
import org.phoenicis.javafx.views.mainwindow.MainWindow;
import org.phoenicis.settings.SettingsManager;
import java.util.Optional;
import static org.phoenicis.configuration.localisation.Localisation.tr;
public class MainController {
private final MainWindow mainWindow;
private final SettingsManager settingsManager;
private String applicationName;
public MainController(String applicationName, LibraryController libraryController, AppsController appsController,
EnginesController enginesController, ContainersController containersController,
SettingsController settingsController, ThemeManager themeManager, SettingsManager settingsManager) {
super();
this.applicationName = applicationName;
this.mainWindow = new MainWindow(applicationName, libraryController.getView(), appsController.getView(),
enginesController.getView(), containersController.getView(), settingsController.getView(), themeManager,
settingsManager);
this.settingsManager = settingsManager;
libraryController.setOnTabOpened(mainWindow::showLibrary);
appsController.setOnAppLoaded(() -> {
enginesController.loadEngines();
containersController.loadContainers();
});
appsController.loadApps();
}
public void show() {
mainWindow.show();
}
public void setOnClose(Runnable onClose) {
this.mainWindow.setOnCloseRequest(event -> {
Alert alert = new Alert(Alert.AlertType.CONFIRMATION);
alert.initOwner(this.mainWindow);
alert.setTitle(applicationName);
alert.setHeaderText(tr("Are you sure you want to close all {0} windows?", applicationName));
Optional<ButtonType> result = alert.showAndWait();
if (result.isPresent() && result.get() == ButtonType.OK) {
this.settingsManager.setWindowHeight(this.mainWindow.getHeight());
this.settingsManager.setWindowWidth(this.mainWindow.getWidth());
this.settingsManager.setWindowMaximized(this.mainWindow.isMaximized());
this.settingsManager.save();
Platform.exit();
onClose.run();
} else {
event.consume();
}
});
}
}
| 1 | 10,635 | Is this method called at another location too? I'm just asking because you didn't remove another call to `updateLibrary` | PhoenicisOrg-phoenicis | java |
@@ -14,7 +14,7 @@ class PublicPagePolicy < ApplicationPolicy
end
def template_export?
- @object.is_default || @object.org.funder?
+ @object.present? && ( @object.is_default || @object.org.funder? ) && @object.published
end
def plan_export? | 1 | class PublicPagePolicy < ApplicationPolicy
def initialize(object, object2 = nil)
@object = object
@object2 = object2
end
def plan_index?
true
end
def template_index?
true
end
def template_export?
@object.is_default || @object.org.funder?
end
def plan_export?
@object.publicly_visible?
end
def plan_organisationally_exportable?
plan = @object
user = @object2
if plan.is_a?(Plan) && user.is_a?(User)
return plan.publicly_visible? || (plan.organisationally_visible? && plan.owner.present? && plan.owner.org_id == user.org_id)
end
return false;
end
end
| 1 | 18,300 | Not necessary but you should be able to add a `?` on `is_default?` and `published?` | DMPRoadmap-roadmap | rb |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.