query
				 
			stringlengths 8 
			6.75k 
			 | document
				 
			stringlengths 9 
			1.89M 
			 | negatives
				 
			listlengths 19 
			19 
			 | metadata
				 
			dict  | 
|---|---|---|---|
	/ Math Abs returns the absolute value of operand. 
 | 
	func Abs(operand int) int {
	if operand < 0 {
		return operand * -1
	}
	return operand
} 
 | 
	[
  "func Abs(a int) int {\n\treturn neogointernal.Opcode1(\"ABS\", a).(int)\n}",
  "func Abs(x float64) float64 {\n\tif x < 0 {\n\t\tx = -x\n\t}\n\treturn x\n}",
  "func Abs(x float64) float64 {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\tif x == 0 {\n\t\treturn 0 // return correctly abs(-0)\n\t}\n\treturn x\n}",
  "func Abs(number int) int {\n\tif number > 0 {\n\t\treturn number\n\t}\n\treturn -number\n}",
  "func (e *ErrDecimal) Abs(d, x *Decimal) *Decimal {\n\treturn e.op2(d, x, e.Ctx.Abs)\n}",
  "func Abs(v int) int {\n\tif v > 0 {\n\t\treturn v\n\t}\n\treturn -v\n}",
  "func Abs(a *big.Float) *big.Float {\n\treturn ZeroBigFloat().Abs(a)\n}",
  "func (d Decimal) Abs() Decimal {\n\tif !d.IsNegative() {\n\t\treturn d\n\t}\n\td.ensureInitialized()\n\td2Value := new(big.Int).Abs(d.value)\n\treturn Decimal{\n\t\tvalue: d2Value,\n\t\texp:   d.exp,\n\t}\n}",
  "func Abs(scope *Scope, x tf.Output) (y tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Abs\",\n\t\tInput: []tf.Input{\n\t\t\tx,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
  "func Abs(value gcv.Value) gcv.Value {\n\tif value.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(cmplx.Abs(value.Complex()))\n\t}\n\treturn gcv.MakeValue(math.Abs(value.Real()))\n}",
  "func (d Decimal) Abs() Decimal {\n\td.ensureInitialized()\n\td2Value := new(big.Int).Abs(d.value)\n\treturn Decimal{\n\t\tvalue: d2Value,\n\t\texp:   d.exp,\n\t}\n}",
  "func (d Decimal) Abs() Decimal {\n\treturn Decimal{\n\t\tdec: d.dec.Abs(),\n\t}\n}",
  "func (z *Float) Abs(x *Float) *Float {}",
  "func (d TGDecimal) Abs() TGDecimal {\n\td.ensureInitialized()\n\td2Value := new(big.Int).Abs(d.value)\n\treturn TGDecimal{\n\t\tvalue: d2Value,\n\t\texp:   d.exp,\n\t}\n}",
  "func abs(x int64) int64 {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\treturn x\n}",
  "func (f MyFloat) Abs() float64 {\n\tif f < 0 {\n\t\treturn float64(-f)\n\t}\n\treturn float64(f)\n}",
  "func Abs(t1 TermT) TermT {\n\treturn TermT(C.yices_abs(C.term_t(t1)))\n}",
  "func Abs(z, x *big.Int) *big.Int {\n\treturn z.Abs(x)\n}",
  "func (m *Money) Abs() *Money {\n\tif m.M < 0 {\n\t\tm.Neg()\n\t}\n\treturn m\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Add a to operand. 
 | 
	func Add(a, operand int) int { return operand + a } 
 | 
	[
  "func Add() {\n\tMatch('+')\n\tTerm()\n\tEmitLn(\"ADD (SP)+,D0\")\n}",
  "func Add(a, b Expr) Expr {\n\treturn &addOp{&simpleOperator{a, b, scanner.ADD}}\n}",
  "func Add(a, b Expr) Expr {\n\treturn &arithmeticOperator{&simpleOperator{a, b, scanner.ADD}}\n}",
  "func ADDPD(mx, x operand.Op) { ctx.ADDPD(mx, x) }",
  "func (gdt *Basis) OperatorAdd(b Basis) Basis {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_basis_operator_add(GDNative.api, arg0, arg1)\n\n\treturn Basis{base: &ret}\n\n}",
  "func (proxy CalculatorProxy) Add(a int, b int) int {\n\trequestor := distribution.NewRequestor()\n\tinvocation := distribution.NewInvocation(\n\t\tproxy.client.ObjectID,\n\t\tproxy.client.Hostname,\n\t\tproxy.client.Port,\n\t\t\"add\",\n\t\t[]int{a, b},\n\t)\n\n\trequestor.Invoke(invocation)\n\n\treturn 2 + 2\n}",
  "func (s *Script) AddOperand(operand []byte) *Script {\n\tdataLen := len(operand)\n\n\tif dataLen < int(OPPUSHDATA1) {\n\t\t*s = append(*s, byte(dataLen))\n\t} else if dataLen <= 0xff {\n\t\t*s = append(*s, byte(OPPUSHDATA1), byte(dataLen))\n\t} else if dataLen <= 0xffff {\n\t\tbuf := make([]byte, 2)\n\t\tbinary.LittleEndian.PutUint16(buf, uint16(dataLen))\n\t\t*s = append(*s, byte(OPPUSHDATA2))\n\t\t*s = append(*s, buf...)\n\t} else {\n\t\tbuf := make([]byte, 4)\n\t\tbinary.LittleEndian.PutUint32(buf, uint32(dataLen))\n\t\t*s = append(*s, byte(OPPUSHDATA4))\n\t\t*s = append(*s, buf...)\n\t}\n\n\t// Append the actual operand\n\t*s = append(*s, operand...)\n\treturn s\n}",
  "func (v V) Add(a V) V {\n\treturn V{v.X + a.X, v.Y + a.Y}\n}",
  "func ADDPS(mx, x operand.Op) { ctx.ADDPS(mx, x) }",
  "func Add( a *context.Value, b *context.Value ) (*context.Value,error) {\n  if a != nil && b != nil {\n    switch a.OperationType( b ) {\n      case context.VAR_BOOL:\n        return context.IntValue( a.Int() + b.Int() ), nil\n      case context.VAR_INT:\n        return context.IntValue( a.Int() + b.Int() ), nil\n      case context.VAR_FLOAT:\n        return context.FloatValue( a.Float() + b.Float() ), nil\n      case context.VAR_STRING:\n        return context.StringValue( a.String() + b.String() ), nil\n      case context.VAR_COMPLEX:\n        return context.ComplexValue( a.Complex() + b.Complex() ), nil\n    }\n  }\n\n  return nil, errors.New( \"Unsupported type for add\" )\n}",
  "func (m *Manager) PushOperand(operand string) {\n\toperandData := m.getOperandData(operand)\n\toperandName := operandData.Name\n\t// If the operand is an attribute from an instance we set tag in it's ID\n\tif operandData.FromSelf {\n\t\toperandName = fmt.Sprintf(\"self_%d_%s\", operandData.SelfDir, operandData.Name)\n\t}\n\telement := NewElement(operandData.Dir, operandName, operandData.TypeOf, operandData.Class)\n\tm.operands.Push(element)\n}",
  "func (o *Operator) Add(f manager.Runnable) error {\n\treturn o.mgr.Add(f)\n}",
  "func (gdt *Vector3) OperatorAdd(b Vector3) Vector3 {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_operator_add(GDNative.api, arg0, arg1)\n\n\treturn Vector3{base: &ret}\n\n}",
  "func Add(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_add(C.term_t(t1), C.term_t(t2)))\n}",
  "func (m *Integer) Add(n int64) { m.value.Add(n) }",
  "func (g *Gini) Add(m z.Lit) {\n\tg.xo.Add(m)\n}",
  "func (z *Float64) Plus(y *Float64, a float64) *Float64 {\n\tz.l = y.l + a\n\tz.r = y.r\n\treturn z\n}",
  "func ADDQ(imr, mr operand.Op) { ctx.ADDQ(imr, mr) }",
  "func ADDW(imr, amr operand.Op) { ctx.ADDW(imr, amr) }"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Subtract a from operand. 
 | 
	func Subtract(a, operand int) int { return operand - a } 
 | 
	[
  "func Subtract() {\n\tMatch('-')\n\tTerm()\n\tEmitLn(\"SUB (SP)+,D0\")\n\tEmitLn(\"NEG D0\")\n}",
  "func (gdt *Basis) OperatorSubtract(b Basis) Basis {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_basis_operator_subtract(GDNative.api, arg0, arg1)\n\n\treturn Basis{base: &ret}\n\n}",
  "func (e Exp) Sub(operand interface{}) Exp {\n\treturn naryBuiltin(subtractKind, nil, e, operand)\n}",
  "func (t Tuple) Subtract(u Tuple) Tuple {\n\treturn TupleSubtract(t, u)\n}",
  "func (gdt *Vector3) OperatorSubtract(b Vector3) Vector3 {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_operator_subtract(GDNative.api, arg0, arg1)\n\n\treturn Vector3{base: &ret}\n\n}",
  "func (s *server) Subtract(ctx context.Context, in *pb.SubtractRequest) (*pb.SubtractReply, error) {\n\treturn &pb.SubtractReply{N1: in.N1 - in.N2}, nil\n}",
  "func Sub( a *context.Value, b *context.Value ) (*context.Value,error) {\n  if a != nil && b != nil {\n    switch a.OperationType( b ) {\n      case context.VAR_BOOL:\n        return context.IntValue( a.Int() - b.Int() ), nil\n      case context.VAR_INT:\n        return context.IntValue( a.Int() - b.Int() ), nil\n      case context.VAR_FLOAT:\n        return context.FloatValue( a.Float() - b.Float() ), nil\n      case context.VAR_COMPLEX:\n        return context.ComplexValue( a.Complex() - b.Complex() ), nil\n      default:\n    }\n  }\n\n  return nil, errors.New( \"Unsupported type for sub\" )\n}",
  "func calcSubtract(rOpd, lOpd formulaArg, opdStack *Stack) error {\n\tlOpdVal := lOpd.ToNumber()\n\tif lOpdVal.Type != ArgNumber {\n\t\treturn errors.New(lOpdVal.Value())\n\t}\n\trOpdVal := rOpd.ToNumber()\n\tif rOpdVal.Type != ArgNumber {\n\t\treturn errors.New(rOpdVal.Value())\n\t}\n\topdStack.Push(newNumberFormulaArg(lOpdVal.Number - rOpdVal.Number))\n\treturn nil\n}",
  "func Subtract(a, b float64) float64 {\n\treturn a - b\n}",
  "func (cal *Calculate) sub(value float64) (result float64) {\n\tif len(cal.Arg) == 2 {\n\t\treturn (cal.Arg[0] - cal.Arg[1])\n\t} else if len(cal.Arg) == 1 {\n\t\treturn (value - cal.Arg[0])\n\t}\n\n\tlog.Fatalln(\"Please check the data format of the calculation unit\")\n\treturn\n}",
  "func (v *Point) Subtract(p, q *Point) *Point {\n\tvar qNeg Point\n\tqNeg.Negate(q)\n\treturn v.Add(p, &qNeg)\n}",
  "func Subtract(left, right int) int {\n\treturn left - right\n}",
  "func (date Nakamura) Subtract(value int, format string) Nakamura {\n\treturn Add(date, -value, format)\n}",
  "func Subtract(args ...int) int {\n\tif len(args) < 2 {\n\t\treturn 0\n\t}\n\n\tres := args[0]\n\tfor i := 1; i < len(args); i++ {\n\t\tres -= args[i]\n\t}\n\n\treturn res\n}",
  "func (z *Int) Sub(x, y *Int) *Int {}",
  "func SUBPD(mx, x operand.Op) { ctx.SUBPD(mx, x) }",
  "func (v *Vector2) Subtract(b Vector2) {\r\n\tv.x -= b.x\r\n\tv.y -= b.y\r\n}",
  "func (b *boltIssueStore) Subtract(delta *Annotation) error {\n\treturn b.update(nil, delta, subtractOP)\n}",
  "func sub(x, y int) (answer int, err error) {\n\tanswer = x - y\n\treturn\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Divide operand by a. 
 | 
	func Divide(a, operand int) int { return operand / a } 
 | 
	[
  "func (e Exp) Div(operand interface{}) Exp {\n\treturn naryBuiltin(divideKind, nil, e, operand)\n}",
  "func (c *calculon) Divide(ctx context.Context, arg calculator.Operand) (calculator.Result, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tif arg.Value == 0 {\n\t\treturn calculator.Result{}, calculator.InvalidArgumentError(map[string]string{\"value\": \"cannot be 0\"})\n\t}\n\tc.current /= arg.Value\n\treturn calculator.Result{c.current}, nil\n}",
  "func Div(a, b Expr) Expr {\n\treturn &arithmeticOperator{&simpleOperator{a, b, scanner.DIV}}\n}",
  "func DIVB(mr operand.Op) { ctx.DIVB(mr) }",
  "func Divide() {\n\tMatch('/')\n\tFactor()\n\tEmitLn(\"MOVE (SP)+,D1\")\n\tEmitLn(\"DIVS D1,D0\")\n}",
  "func ValueDiv(a, b reflect.Value) (reflect.Value, error) {\n\taBkind := GetBaseKind(a)\n\tbBkind := GetBaseKind(b)\n\n\tswitch aBkind {\n\tcase reflect.Int64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Int() / b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Int() / int64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Int()) / b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Uint64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(int64(a.Uint()) / b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Uint() / b.Uint()), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Uint()) / b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Float64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Float() / float64(b.Int())), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Float() / float64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(a.Float() / b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tdefault:\n\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t}\n}",
  "func Divide() {\n\tMatch('/')\n\tFactor()\n\tEmitLn(\"MOVE (SP)+,D1\")\n\tEmitLn(\"EXS.L D0\")\n\tEmitLn(\"DIVS D1,D0\")\n}",
  "func Div(a, b Expr) Expr {\n\treturn &divOp{&simpleOperator{a, b, scanner.DIV}}\n}",
  "func Div( a *context.Value, b *context.Value ) (*context.Value,error) {\n  if a != nil && b != nil {\n    if b.IsZero() {\n      return nil, errors.New( \"Division by zero\")\n    }\n\n    switch a.OperationType( b ) {\n      case context.VAR_BOOL:\n        return context.IntValue( a.Int() / b.Int() ), nil\n      case context.VAR_INT:\n        return context.FloatValue( a.Float() / b.Float() ), nil\n      case context.VAR_FLOAT:\n        return context.FloatValue( a.Float() / b.Float() ), nil\n      case context.VAR_COMPLEX:\n        return context.ComplexValue( a.Complex() / b.Complex() ), nil\n    }\n  }\n\n  return nil, errors.New( \"Unsupported type for div\" )\n}",
  "func DIVPD(mx, x operand.Op) { ctx.DIVPD(mx, x) }",
  "func DIVL(mr operand.Op) { ctx.DIVL(mr) }",
  "func Div(a float64, b float64) (float64, error) {\n\tif b == 0 {\n\t\treturn 0.0, errors.New(\"Can't devide by zero\")\n\t}\n\n\treturn a / b, nil\n}",
  "func DIVQ(mr operand.Op) { ctx.DIVQ(mr) }",
  "func (v Fraction) Div(a Fraction) Fraction {\n\treturn NewFraction(\n\t\tv.negativeFactor()*a.negativeFactor()*int(v.numerator*a.denominator),\n\t\tint(v.denominator*a.numerator),\n\t)\n}",
  "func Command_Div(script *rex.Script, params []*rex.Value) {\n\tif len(params) != 2 {\n\t\trex.ErrorParamCount(\"float:div\", \"2\")\n\t}\n\n\tscript.RetVal = rex.NewValueFloat64(params[0].Float64() / params[1].Float64())\n\treturn\n}",
  "func (z *Float64) Divide(y *Float64, a float64) *Float64 {\n\tz.l = y.l / a\n\tz.r = y.r / a\n\treturn z\n}",
  "func (a Vector) Div(b float64) Vector {\n    return Vector{a.X / b, a.Y / b}\n}",
  "func (b ValExprBuilder) Div(expr interface{}) ValExprBuilder {\n\treturn b.makeBinaryExpr('/', expr)\n}",
  "func gfDivide(a, b gfElement) gfElement {\n\tif a == gfZero {\n\t\treturn gfZero\n\t} else if b == gfZero {\n\t\tlog.Panicln(\"Divide by zero\")\n\t}\n\n\treturn gfMultiply(a, gfInverse(b))\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Modulo returns operand modulo a. 
 | 
	func Modulo(a, operand int) int { return operand % a } 
 | 
	[
  "func Mod(a, b Expr) Expr {\n\treturn &arithmeticOperator{&simpleOperator{a, b, scanner.MOD}}\n}",
  "func (this *BigInteger) Mod(a *BigInteger) *BigInteger {\n\tvar r *BigInteger = NewBigInteger()\n\tthis.Abs().DivRemTo(a, nil, r)\n\tif this.S < 0 && r.CompareTo(ZERO) > 0 {\n\t\ta.SubTo(r, r)\n\t}\n\treturn r\n}",
  "func (z *Int) Mod(x, y *Int) *Int {}",
  "func mod(v, modulus int) int {\n\treturn (v%modulus + modulus) % modulus\n}",
  "func PosMod(a, b int64) int64 {\n\tm := a % b\n\tif m < 0 {\n\t\treturn m + b\n\t}\n\treturn m\n}",
  "func (i IntValue64) Mod(data OperableData) Data {\n\treturn IntValue64(int64(i) % data.Int64())\n}",
  "func DivMod(a int, b int, div *int, mod *int) {\n\t*div = a / b\n\t*mod = a % b\n}",
  "func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {}",
  "func pyMod(d, m int) int {\r\n\tvar res int = d % m\r\n\tif (res < 0 && m > 0) || (res > 0 && m < 0) {\r\n\t\treturn res + m\r\n\t}\r\n\treturn res\r\n}",
  "func (i IntValue32) Mod(data OperableData) Data {\n\treturn IntValue32(int32(i) % data.Int32())\n}",
  "func Modulo(sha string, num int) int {\n\thasher := fnv.New32a()\n\thasher.Write([]byte(sha))\n\tpartition := int(hasher.Sum32()) % num\n\tif partition < 0 {\n\t\tpartition = -partition\n\t}\n\treturn partition\n}",
  "func PowMod(a, b, m int) int {\n\ta = a % m\n\tp := 1 % m\n\tfor b > 0 {\n\t\tif b&1 != 0 {\n\t\t\tp = (p * a) % m\n\t\t}\n\t\tb >>= 1\n\t\ta = (a * a) % m\n\t}\n\treturn p\n}",
  "func Mod(dividend, divisor int64) (int64, error) {\n\tif divisor == 0 {\n\t\treturn 0, newErrZeroDivision(fmt.Sprintf(\"%d + %d\", dividend, divisor))\n\t}\n\n\treturn dividend % divisor, nil\n}",
  "func (b ValExprBuilder) Mod(expr interface{}) ValExprBuilder {\n\treturn b.makeBinaryExpr('%', expr)\n}",
  "func (r Rand) Modulo(n int) int {\n\t// modulo len(groups) with big.Ints (Mod method works on pointers)\n\t//var b big.Int\n\tb := big.NewInt(0)\n\tb.SetBytes(r)\n\tb.Mod(b, big.NewInt(int64(n)))\n\treturn int(b.Int64())\n}",
  "func (r Rand) Modulo(n int) int {\n\t// modulo len(groups) with big.Ints (Mod method works on pointers)\n\t//var b big.Int\n\tb := big.NewInt(0)\n\tb.SetBytes(r.Bytes())\n\tb.Mod(b, big.NewInt(int64(n)))\n        return int(b.Int64())\n}",
  "func (l *BigInt) Mod(r Number) Number {\n\tif ri, ok := r.(*BigInt); ok {\n\t\tlb := (*big.Int)(l)\n\t\trb := (*big.Int)(ri)\n\t\tif rb.IsInt64() && rb.Int64() == 0 {\n\t\t\tpanic(errors.New(ErrDivideByZero))\n\t\t}\n\t\tres := new(big.Int).Rem(lb, rb)\n\t\treturn maybeInteger(res)\n\t}\n\tlp, rp := purify(l, r)\n\treturn lp.Mod(rp)\n}",
  "func ModMul(a, b, mod int) int {\n\ta, b = a%mod, b%mod\n\tif b == 0 {\n\t\treturn 0\n\t}\n\tif a*b/b == a {\n\t\treturn a * b % mod\n\t}\n\tpanic(\"overflow\")\n}",
  "func (i FloatValue64) Mod(data OperableData) Data {\n\treturn FloatValue64(int(i) % data.Int())\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Multiply operand and a. 
 | 
	func Multiply(a, operand int) int { return operand * a } 
 | 
	[
  "func (vm VM) mul(termAddress1, termAddress2, resultAddress int, mode1, mode2, mode3 ParamMode) {\n\tvm.ModeWrite(resultAddress, vm.ModeRead(termAddress1, mode1)*vm.ModeRead(termAddress2, mode2), mode3)\n}",
  "func Mul(a, b Expr) Expr {\n\treturn &arithmeticOperator{&simpleOperator{a, b, scanner.MUL}}\n}",
  "func MULPD(mx, x operand.Op) { ctx.MULPD(mx, x) }",
  "func MULB(mr operand.Op) { ctx.MULB(mr) }",
  "func MULPS(mx, x operand.Op) { ctx.MULPS(mx, x) }",
  "func Multiply(a cty.Value, b cty.Value) (cty.Value, error) {\n\treturn MultiplyFunc.Call([]cty.Value{a, b})\n}",
  "func MULQ(mr operand.Op) { ctx.MULQ(mr) }",
  "func Mul(lhs, rhs Arithmetic) (MultivariableExpression, error) {\n\tl := lhs.MultivariableExpression().Simplify()\n\tr := rhs.MultivariableExpression().Simplify()\n\n\texpressionsMap := map[Symbol]float64{}\n\tfor _, e := range l.expressions {\n\t\texpressionsMap[e.symbol] = e.power\n\t}\n\tfor _, e := range r.expressions {\n\t\tpower, ok := expressionsMap[e.symbol]\n\t\tif ok {\n\t\t\texpressionsMap[e.symbol] = power + e.power\n\t\t} else {\n\t\t\texpressionsMap[e.symbol] = e.power\n\t\t}\n\t}\n\n\texpressions := []Expression{}\n\tfor symbol, power := range expressionsMap {\n\t\texpressions = append(expressions, symbol.Power(power))\n\t}\n\n\tproduct := MultivariableExpression{\n\t\texpressions: expressions,\n\t\tcoefficient: l.coefficient * r.coefficient,\n\t}\n\n\treturn product, nil\n}",
  "func IMULQ(ops ...operand.Op) { ctx.IMULQ(ops...) }",
  "func (i *MyInt) MultiplyBy(operand MyInt) {\n\t*i *= operand\n}",
  "func Add(a, operand int) int { return operand + a }",
  "func (c *calculon) Multiply(ctx context.Context, arg calculator.Operand) (calculator.Result, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tc.current *= arg.Value\n\treturn calculator.Result{c.current}, nil\n}",
  "func Mul(a, b Expr) Expr {\n\treturn &mulOp{&simpleOperator{a, b, scanner.MUL}}\n}",
  "func Mult( a *context.Value, b *context.Value ) (*context.Value,error) {\n  if a != nil && b != nil {\n    switch a.OperationType( b ) {\n      case context.VAR_BOOL:\n        return context.IntValue( a.Int() * b.Int() ), nil\n      case context.VAR_INT:\n        return context.IntValue( a.Int() * b.Int() ), nil\n      case context.VAR_FLOAT:\n        return context.FloatValue( a.Float() * b.Float() ), nil\n      case context.VAR_COMPLEX:\n        return context.ComplexValue( a.Complex() * b.Complex() ), nil\n    }\n  }\n  return nil, errors.New( \"Unsupported type for mult\" )\n}",
  "func (self *Point) MultiplyAdd(a *Point, b *Point, s int) *Point{\n    return &Point{self.Object.Call(\"multiplyAdd\", a, b, s)}\n}",
  "func MULXL(mr, r, r1 operand.Op) { ctx.MULXL(mr, r, r1) }",
  "func ANDPS(mx, x operand.Op) { ctx.ANDPS(mx, x) }",
  "func mul(x byte, y byte) byte {\n\tif x == 0 || y == 0 {\n\t\treturn 0\n\t}\n\treturn expOp[logOp[x]+logOp[y]]\n}",
  "func PAND(mx, x operand.Op) { ctx.PAND(mx, x) }"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Min returns the minimum of a and operand. 
 | 
	func Min(a, operand int) int {
	if a < operand {
		return a
	}
	return operand
} 
 | 
	[
  "func Min(a, b int) int {\n\treturn neogointernal.Opcode2(\"MIN\", a, b).(int)\n}",
  "func (self *State)Min(a,b any)any{\n  self.IncOperations(self.coeff[\"min\"]+self.off[\"min\"])\n  return wrap2(a,b,math.Min)\n}",
  "func Min(a, b float64) float64 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func min(a int, b int) (res int) {\n\tif a < b {\n\t\tres = a\n\t} else {\n\t\tres = b\n\t}\n\n\treturn\n}",
  "func Min(a int, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func Min(arg, arg2 float64) float64 {\n\treturn math.Min(arg, arg2)\n}",
  "func min(a, b float64) float64 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func (g *Graph) Min(x1 Node, x2 Node) Node {\n\treturn g.NewOperator(fn.NewMin(x1, x2), x1, x2)\n}",
  "func min(a, b int32) int32 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func Min(numbers ...cty.Value) (cty.Value, error) {\n\treturn MinFunc.Call(numbers)\n}",
  "func (b ValExprBuilder) Min() ValExprBuilder {\n\treturn b.makeFunc(\"MIN\", false)\n}",
  "func Min[T Ordered](a, b T) T {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func ElemMin(a, b Res) Res {\n\taMask := a.Output().Copy()\n\taMask.Sub(b.Output())\n\tanyvec.LessThan(aMask, aMask.Creator().MakeNumeric(0))\n\tbMask := aMask.Copy()\n\tanyvec.Complement(bMask)\n\treturn Add(Mul(a, NewConst(aMask)), Mul(b, NewConst(bMask)))\n}",
  "func mini(x int, y int) int {\n    // Return the minimum of two integers.\n    if x <= y {\n        return x\n    } else {\n        return y\n    }\n}",
  "func Min(v1, v2 Value) (Value, error) {\n\treturn minmax(v1, v2, true)\n}",
  "func Min(valueA gcv.Value, valueB gcv.Value) (gcv.Value, error) {\n\tif valueA.Type() == gcv.Complex || valueB.Type() == gcv.Complex {\n\t\treturn nil, errors.New(\"Min is not supported for Complex numbers\")\n\t}\n\treturn gcv.MakeValue(math.Min(valueA.Real(), valueB.Real())), nil\n}",
  "func min(x, y int64) int64 {\n\tif x > y {\n\t\treturn y\n\t}\n\treturn x\n}",
  "func Min[\n\tValueT typecons.Ordered,\n](refValue ValueT) OrderedConstraint[ValueT] {\n\treturn Func(\n\t\tfmt.Sprintf(\"min %v\", refValue),\n\t\tGreaterThanOrEqualTo(refValue).IsValid)\n}",
  "func Min(first Decimal, rest ...Decimal) Decimal {\n\tans := first\n\tfor _, item := range rest {\n\t\tif item.Cmp(ans) < 0 {\n\t\t\tans = item\n\t\t}\n\t}\n\treturn ans\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Max returns the maximum of a and operand. 
 | 
	func Max(a, operand int) int {
	if a > operand {
		return a
	}
	return operand
} 
 | 
	[
  "func Max(a, b int) int {\n\treturn neogointernal.Opcode2(\"MAX\", a, b).(int)\n}",
  "func (self *State)Max(a,b any)any{\n  self.IncOperations(self.coeff[\"max\"]+self.off[\"max\"])\n  return wrap2(a,b,math.Max)\n}",
  "func Max(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func Max(a, b float64) float64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func Max(a interface{}, b interface{}) interface{} {\n\tif a == nil {\n\t\treturn b\n\t}\n\tif b == nil {\n\t\treturn a\n\t}\n\tif Cmp(a, b) > 0 {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func Max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func max(a int, b int) int {\n        if a > b { return a } else { return b }\n}",
  "func max(a, b float64) float64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func Max(x, y int64) int64 {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}",
  "func Max(arg, arg2 float64) float64 {\n\treturn math.Max(arg, arg2)\n}",
  "func Max(v1, v2 Value) (Value, error) {\n\treturn minmax(v1, v2, false)\n}",
  "func Max[T constraints.Ordered](x T, y T) T {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}",
  "func Max(valueA gcv.Value, valueB gcv.Value) (gcv.Value, error) {\n\tif valueA.Type() == gcv.Complex || valueB.Type() == gcv.Complex {\n\t\treturn nil, errors.New(\"Max is not supported for Complex numbers\")\n\t}\n\treturn gcv.MakeValue(math.Max(valueA.Real(), valueB.Real())), nil\n}",
  "func Max[T Ordered](a, b T) T {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func (b ValExprBuilder) Max() ValExprBuilder {\n\treturn b.makeFunc(\"MAX\", false)\n}",
  "func Max(max interface{}) ThresholdRule {\n\treturn ThresholdRule{\n\t\tthreshold: max,\n\t\toperator:  lessEqualThan,\n\t\terr:       ErrMaxLessEqualThanRequired,\n\t}\n}",
  "func max(a, b ImpactAmount) ImpactAmount {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
  "func (g *Graph) Max(x1 Node, x2 Node) Node {\n\treturn g.NewOperator(fn.NewMax(x1, x2), x1, x2)\n}",
  "func Max[\n\tValueT typecons.Ordered,\n](refValue ValueT) OrderedConstraint[ValueT] {\n\treturn Func(\n\t\tfmt.Sprintf(\"max %v\", refValue),\n\t\tLessThanOrEqualTo(refValue).IsValid)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	InSequence does work in sequence and waits for the previous work item to complete. it should write results on channel in order of input work array. 
 | 
	func InSequence(ch chan Result, work []int) []Result {
	results := make([]Result, len(work))
	go buildInSeq(ch, work, results)
	return results
} 
 | 
	[
  "func Test3() {\r\n// input\r\nnums := []int{1,2,3,4}\r\n// each stage will be asynchronus because use buffer channel with unlimited\r\n//stage 1\r\ndataChannel1 := sliceToChannel(nums)\r\n// stage 2\r\nfinalChannel := sq(dataChannel1)\r\n// stage 3\r\nfor n := range finalChannel {\r\n\tfmt.Println(n)\r\n}\r\n}",
  "func InParallel(ch chan Result, work []int) []Result {\n\tresults := make([]Result, len(work))\n\tvar mutex = &sync.Mutex{}\n\tfor i, num := range work {\n\t\tgo func(w int, res *Result, ch chan Result) {\n\t\t\t*res = executeWork(w)\n\t\t\tmutex.Lock()\n\t\t\tch <- *res\n\t\t\tmutex.Unlock()\n\t\t}(num, &results[i], ch)\n\t}\n\treturn results\n}",
  "func (s *intSequence) Items() <-chan string { return s.data }",
  "func (w *Worker) Work() {\n\tfor {\n\t\tselect {\n\t\tcase <-w.done:\n\t\t\treturn\n\t\tdefault:\n\t\t\tevent := <-w.channel\n\t\t\tw.writeToBuffer(event)\n\t\t}\n\t}\n}",
  "func TestSendOrderedOutputs(t *testing.T) {\n\tinitTestLogs()\n\n\tfnames := []string{}\n\tfor i := 1; i <= 10; i++ {\n\t\tfnames = append(fnames, fmt.Sprintf(\"/tmp/f%d.txt\", i))\n\t}\n\n\twf := NewWorkflow(\"test_wf\", 16)\n\tig := NewIPGen(wf, \"ipgen\", fnames...)\n\n\tfc := NewProc(wf, \"fc\", \"echo {i:in} > {o:out}\")\n\tfc.SetPathExtend(\"in\", \"out\", \"\")\n\tfc.In(\"in\").Connect(ig.Out)\n\n\tsl := NewProc(wf, \"sl\", \"cat {i:in} > {o:out}\")\n\tsl.SetPathExtend(\"in\", \"out\", \".copy.txt\")\n\tsl.In(\"in\").Connect(fc.Out(\"out\"))\n\n\tassert.NotNil(t, sl.Out)\n\n\tvar expFname string\n\ti := 1\n\n\ttempPort := NewInPort(\"temp\")\n\tConnectFrom(tempPort, sl.Out(\"out\"))\n\n\t// Should not start go-routines before connection stuff is done\n\tgo ig.Run()\n\tgo fc.Run()\n\tgo sl.Run()\n\n\tfor ft := range tempPort.Chan {\n\t\tDebug.Printf(\"TestSendOrderedOutputs: Looping over item %d ...\\n\", i)\n\t\texpFname = fmt.Sprintf(\"/tmp/f%d.txt.copy.txt\", i)\n\t\tassert.EqualValues(t, expFname, ft.Path())\n\t\tDebug.Printf(\"TestSendOrderedOutputs: Looping over item %d Done.\\n\", i)\n\t\ti++\n\t}\n\n\tDebug.Println(\"TestSendOrderedOutputs: Done with loop ...\")\n\n\texpFnames := []string{}\n\tfor i := 1; i <= 10; i++ {\n\t\texpFnames = append(expFnames, fmt.Sprintf(\"/tmp/f%d.txt.copy.txt\", i))\n\t}\n\tcleanFiles(fnames...)\n\tcleanFiles(expFnames...)\n}",
  "func anyThingDoneSlice(inp <-chan anyThing) (done <-chan []anyThing) {\n\tsig := make(chan []anyThing)\n\tgo func(done chan<- []anyThing, inp <-chan anyThing) {\n\t\tdefer close(done)\n\t\tslice := []anyThing{}\n\t\tfor i := range inp {\n\t\t\tslice = append(slice, i)\n\t\t}\n\t\tdone <- slice\n\t}(sig, inp)\n\treturn sig\n}",
  "func (b *bufferedChan) Run() {\n\tdefer close(b.OutChannel)\n\tfor value := range b.inChannel {\n\t\tselect {\n\t\tcase <-b.ctx.Done():\n\t\t\tfmt.Println(\"Run: Time to return\")\n\t\t\treturn\n\t\tcase b.OutChannel <- value:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n}",
  "func ThingDoneSlice(inp <-chan Thing) (done <-chan []Thing) {\n\tsig := make(chan []Thing)\n\tgo doneThingSlice(sig, inp)\n\treturn sig\n}",
  "func TestStreamSequential(t *testing.T) {\n\tconst evCount = 100000\n\n\ts := NewStream(1, 2)\n\tdefer s.Shutdown()\n\tevents := testEvents(evCount)\n\n\t// send a bunch of \"events\", and make sure we receive them all.\n\tfmt.Println(\"sending test events to queue\")\n\tfor i, e := range events {\n\t\tassert.Equal(t, int32(i), e.GetCallbackID())\n\t\ts.Push(e)\n\t\tassert.Equal(t, i+1, s.Len())\n\t}\n\n\tassert.Equal(t, evCount, s.Len())\n\n\tfmt.Println(\"reading events from queue\")\n\tfor _, e := range events {\n\t\tqe, ok := <-s.Recv()\n\t\tassert.True(t, ok)\n\t\tassert.Equal(t, e, qe)\n\t}\n\n\tassert.Zero(t, s.Len())\n}",
  "func (ow *ordered[T, U]) Wait() []U {\n\tow.wg.Wait()\n\treturn ow.results\n}",
  "func out(ch chan int) {\n\tfor i := 1; i < 100; i++ {\n\t\tch <- i\n\t\ttime.Sleep(time.Second * 1)\n\t}\n\n}",
  "func (p *AsyncProducer) Input() chan<- *sarama.ProducerMessage { return p.input }",
  "func (j *Job) Wait() { <-j.isDone }",
  "func (b *batch) wait() (q *queue) {\n\tb.workingq.Lock()\n\tfor {\n\t\tv, ok := b.workingq.Dequeue0()\n\t\tif !ok {\n\t\t\tb.wcond.Wait()\n\t\t\tcontinue\n\t\t}\n\n\t\tb.workingq.Unlock()\n\t\tq = v.(*queue)\n\t\treturn\n\t}\n}",
  "func (a *Axe) inWorker(done func()) {\n\tdefer done()\n\tparser := NewParser(nginxItemOrder)\n\tfor input := range a.inChan {\n\t\ta.incrNumLines()\n\t\tll, err := parser.ParseLine(input)\n\t\tif err != nil {\n\t\t\ta.errChan <- fmt.Errorf(\"%d:%v\", a.numLines, err)\n\t\t} else {\n\t\t\ta.outChan <- ll\n\t\t}\n\t}\n\tclose(a.outChan)\n}",
  "func (o *ObservableImpl) Sample(iterable Iterable, opts ...Option) Observable {\n\toption := parseOptions(opts...)\n\tnext := option.buildChannel()\n\tctx := option.buildContext(o.parent)\n\titCh := make(chan Item)\n\tobsCh := make(chan Item)\n\n\tgo func() {\n\t\tdefer close(obsCh)\n\t\tobserve := o.Observe(opts...)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase i, ok := <-observe:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ti.SendContext(ctx, obsCh)\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer close(itCh)\n\t\tobserve := iterable.Observe(opts...)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase i, ok := <-observe:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ti.SendContext(ctx, itCh)\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer close(next)\n\t\tvar lastEmittedItem Item\n\t\tisItemWaitingToBeEmitted := false\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase _, ok := <-itCh:\n\t\t\t\tif ok {\n\t\t\t\t\tif isItemWaitingToBeEmitted {\n\t\t\t\t\t\tnext <- lastEmittedItem\n\t\t\t\t\t\tisItemWaitingToBeEmitted = false\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase item, ok := <-obsCh:\n\t\t\t\tif ok {\n\t\t\t\t\tlastEmittedItem = item\n\t\t\t\t\tisItemWaitingToBeEmitted = true\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn &ObservableImpl{\n\t\titerable: newChannelIterable(next),\n\t}\n}",
  "func (m *Mare) InChannel(items chan interface{}) *Mare {\n\tm.prepareInput()\n\tgo func() {\n\t\tdefer m.mapInWorkers.Done()\n\t\tfor item := range items {\n\t\t\tm.mapInChan <- item\n\t\t}\n\t}()\n\t// Return m\n\treturn m\n}",
  "func (incubator *Incubator) Iterate() {\n\tcallback := make(chan error)\n\tincubator.iterateChan <- callback\n\t<-callback\n}",
  "func (b ArrayBucket) Worker(inChan <-chan string, outChan chan<- string, wg *sync.WaitGroup) {\n\twg.Add(1)\n\tgo func() {\n\t\tfor cand := range inChan {\n\t\t\t//Test against the bucket if\n\t\t\t// it the cand can be made from available tokens\n\t\t\tif b.testMatch(cand) {\n\t\t\t\toutChan <- cand\n\t\t\t}\n\t\t}\n\t\twg.Done()\n\t}()\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	InParallel does work in parallel and should write results on channels as work item complete 
 | 
	func InParallel(ch chan Result, work []int) []Result {
	results := make([]Result, len(work))
	var mutex = &sync.Mutex{}
	for i, num := range work {
		go func(w int, res *Result, ch chan Result) {
			*res = executeWork(w)
			mutex.Lock()
			ch <- *res
			mutex.Unlock()
		}(num, &results[i], ch)
	}
	return results
} 
 | 
	[
  "func (h *connHandler) parallel(m *reqData, args []interface{}) {\n\th.Lock()\n\theap.Push(&h.pQueue, m)\n\th.Unlock()\n\n\tm.resp = h.srv.pool.Cmd(m.cmd, args...)\n\n\th.Lock()\n\tfor h.pQueue.Len() > 0 {\n\t\titem := heap.Pop(&h.pQueue).(*reqData)\n\t\tif item.resp == nil {\n\t\t\theap.Push(&h.pQueue, item)\n\t\t\tbreak\n\t\t}\n\t\tif item.answerCh != nil {\n\t\t\titem.answerCh <- item.resp\n\t\t}\n\t}\n\th.Unlock()\n}",
  "func writingWorker(\n    workerIndex int,\n    outputFolder string,\n    in <-chan result,\n    group *sync.WaitGroup,\n    exportFunc io.Exporter) {\n\n    defer group.Done()\n\n    for result := range in {\n        outputFile := path.Join(outputFolder, utils.SimpleRandomString(20))\n        log.Printf(\"[worker:%d] saving file %s\", workerIndex, outputFile)\n        if result.err != nil {\n            log.Printf(result.err.Error())\n        } else {\n            query := result.collection.Query\n            log.Printf(\n                \"[worker:%d] exporting query results for '%s' into file '%s\",\n                workerIndex, query, outputFile)\n            if err := exportFunc(result.collection, outputFile); err != nil {\n                log.Printf(err.Error())\n            }\n        }\n    }\n\n    log.Printf(\"[worker:%d] terminated\", workerIndex)\n}",
  "func WorkParallel(db *DbGorp, tasks []func(worker *DbWorker), returnResults bool, maxNumWorkers int, timeouts int) (results []interface{}, err error) {\n\tif maxNumWorkers == 0 {\n\t\tmaxNumWorkers = len(tasks)\n\t}\n\n\t// Create a container with no status callback\n\tcontainer := NewDbWorker(db,\n\t\tMakeCallback(nil,\n\t\t\tfunc(value interface{}, worker *DbWorker) {\n\t\t\t\ttask := value.(func(worker *DbWorker))\n\t\t\t\ttask(worker)\n\t\t\t}), maxNumWorkers)\n\terr = container.Start()\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, task := range tasks {\n\t\tcontainer.InputChannel <- task\n\t}\n\tif returnResults {\n\t\tfor range tasks {\n\t\t\tresult := <-container.OutputChannel\n\t\t\tresults = append(results, result)\n\t\t}\n\t}\n\n\tcontainer.Close(timeouts)\n\treturn\n}",
  "func (l *BenchmarkRunner) work(b Benchmark, wg *sync.WaitGroup, c *duplexChannel, workerNum int) {\n\tproc := b.GetProcessor()\n\tproc.Init(workerNum, l.doLoad)\n\tfor b := range c.toWorker {\n\t\tmetricCnt, rowCnt := proc.ProcessBatch(b, l.doLoad)\n\t\tatomic.AddUint64(&l.metricCnt, metricCnt)\n\t\tatomic.AddUint64(&l.rowCnt, rowCnt)\n\t\tc.sendToScanner()\n\t}\n\tswitch c := proc.(type) {\n\tcase ProcessorCloser:\n\t\tc.Close(l.doLoad)\n\t}\n\twg.Done()\n}",
  "func ExecuteInParallel(q *Queue, fn func(interface{})) {\n\tif q == nil {\n\t\treturn\n\t}\n\n\tq.lock.Lock()\n\ttodo, done := int64(len(q.items)), int64(-1)\n\tif todo == 0 {\n\t\treturn\n\t}\n\n\tnumCPU := 1\n\tif runtime.NumCPU() > 1 {\n\t\tnumCPU = runtime.NumCPU() - 1\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(numCPU)\n\titems := q.items\n\n\tfor i := 0; i < numCPU; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tindex := atomic.AddInt64(&done, 1)\n\t\t\t\tif index >= todo {\n\t\t\t\t\twg.Done()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tfn(items[index])\n\t\t\t\titems[index] = 0\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Wait()\n\tq.lock.Unlock()\n\tq.Dispose()\n}",
  "func worker(id int, jobs <-chan int, results chan<- int) {\n    for j := range jobs {\n        fmt.Println(\"worker\", id, \"processing job\", j)\n        time.Sleep(time.Second)\n        results <- j * 2 // we just multiply any value we're given to 2\n    }\n}",
  "func (conn *Conn) outWorker() {\n\tfor msg := range conn.out {\n\t\terr := conn.SendMessage(msg)\n\t\tconn.callsLck.RLock()\n\t\tif err != nil {\n\t\t\tif c := conn.calls[msg.serial]; c != nil {\n\t\t\t\tc.Err = err\n\t\t\t\tc.Done <- c\n\t\t\t}\n\t\t\tconn.serialLck.Lock()\n\t\t\tdelete(conn.serialUsed, msg.serial)\n\t\t\tconn.serialLck.Unlock()\n\t\t} else if msg.Type != TypeMethodCall {\n\t\t\tconn.serialLck.Lock()\n\t\t\tdelete(conn.serialUsed, msg.serial)\n\t\t\tconn.serialLck.Unlock()\n\t\t}\n\t\tconn.callsLck.RUnlock()\n\t}\n}",
  "func Test3() {\r\n// input\r\nnums := []int{1,2,3,4}\r\n// each stage will be asynchronus because use buffer channel with unlimited\r\n//stage 1\r\ndataChannel1 := sliceToChannel(nums)\r\n// stage 2\r\nfinalChannel := sq(dataChannel1)\r\n// stage 3\r\nfor n := range finalChannel {\r\n\tfmt.Println(n)\r\n}\r\n}",
  "func queryWorker(\n    workerIndex int,\n    in <-chan string,\n    out chan<- result,\n    group *sync.WaitGroup,\n    client *api.BingClient) {\n\n    defer group.Done()\n\n    for queryString := range in {\n        if queryString == \"\" { continue }\n        log.Printf(\"[worker:%d] sending search string: %s\", workerIndex, queryString)\n\n        currOffset := 0\n        running := true\n        var err error\n        for running {\n            params := api.CreateQuery(queryString, currOffset)\n            paramsString := params.AsQueryParameters()\n            log.Printf(\"[worker:%d] running query with params: %s\", workerIndex, paramsString)\n            images := client.RequestImages(params)\n            if images.Values == nil {\n                err = fmt.Errorf(\"[worker:%d] failed to pull query: %s/%s\",\n                    workerIndex, client.Endpoint, paramsString)\n                running = false\n            } else {\n                running = images.NextOffset != currOffset\n                currOffset = images.NextOffset\n            }\n            out <- result{images, err}\n        }\n    }\n\n    log.Printf(\"[worker:%d] terminated\", workerIndex)\n}",
  "func wait_and_process_results(results <-chan stat, done <-chan doneStatus,\n  num_workers int) []stat {\n\n  output := make([]stat, 0)\n\n  for w := 0; w < num_workers; {\n    select {  // Blocking\n    case result := <-results:\n      output = append(output, result)\n    case <-done:\n      num_workers--\n    }\n  }\n\nDONE:\n\n  // process any remaining results\n  for {\n    select {\n    case result := <-results:\n      output = append(output, result)\n    default:\n      break DONE\n    }\n  }\n\n  return output\n}",
  "func worker(id int, jobs <-chan int, results chan<- int) {\n\tfor j := range jobs {\n\t\tfmt.Println(\"worker\", id, \"processing job\", j)\n\t\t//simulate an expensive work\n\t\ttime.Sleep(time.Second)\n\t\tresults <- j * 10\n\t}\n}",
  "func processQueryResults(cmdType string, qResultChans []chan Query, dir string, threadcount int) {\n\tvar wg sync.WaitGroup\n\tfor i, qResultChan := range qResultChans {\n\t\twg.Add(1)\n\n\t\tgo func(i int, qResultChan chan Query) {\n\t\t\tdefer wg.Done()\n\t\t\tfilename := strconv.Itoa(i)\n\t\t\twriteQueryResults(cmdType, qResultChan, filename, dir, threadcount)\n\t\t}(i, qResultChan)\n\t}\n\twg.Wait()\n}",
  "func workerpool() {\n\tworkers := 3\n\tworkchan := make(chan int)\n\tfor i := 0; i < workers; i++ {\n\t\tgo func() {\n\t\t\tfor i := range workchan {\n\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t\tfmt.Println(\"Workerpool worked on \", i)\n\t\t\t}\n\t\t}()\n\t}\n\tamountOfWork := 10\n\tfor i := 0; i < amountOfWork; i++ {\n\t\tworkchan <- i\n\t}\n\tfmt.Println(\"Finished workerpool work\")\n\t//Give some time for goroutines to finish. To avoid using WaitGroup and loosing focus.\n\ttime.Sleep(5 * time.Second)\n}",
  "func useData() {\n\tdefer close(done)\n\tvar wg sync.WaitGroup\n\tconcurrencyRate := 10 // in the wild you'd use a config variable for this\n\tfor i := 0; i < concurrencyRate; i++ {\n\t\tfmt.Println(\"Worker \", i)\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor p := range providerChan {\n\t\t\t\tfunc() {\n\t\t\t\t\tfmt.Printf(\"Read from chan: %q, %q\\n\", p.name, p.url)\n\t\t\t\t\tresp, err := http.Head(p.url)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"Error making head request for %q: %v\\n\", p.url, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tdefer resp.Body.Close()\n\t\t\t\t\tfmt.Printf(\"Processing Data: %q\\t%s\\n\", p.name, resp.Status)\n\t\t\t\t}()\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n}",
  "func testParallelismWithBeverages(bm *BeverageMachine, beverageNames []string) {\n\n\tfmt.Printf(\"\\nstarting test: testParallelismWithBeverages\\n\\n\")\n\n\twg := sync.WaitGroup{}\n\tfor i, beverageName := range beverageNames {\n\t\twg.Add(1)\n\t\tgo func(i int, beverageName string) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfmt.Printf(\"thread %d-> start\\n\", i+1)\n\n\t\t\t//1. get an idle dispenser\n\t\t\tdispenser, err := bm.GetIdleDispenser()\n\t\t\tfor err != nil {\n\t\t\t\tfmt.Printf(\"thread %d-> %s, retrying in 2 seconds...\\n\", i+1, err.Error())\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\tdispenser, err = bm.GetIdleDispenser()\n\t\t\t}\n\t\t\tfmt.Printf(\"thread %d-> acquired dispenser %d\\n\", i+1, dispenser.GetId())\n\n\t\t\tfmt.Printf(\"thread %d-> starting to prepare %s on dispenser %d...\\n\", i+1, beverageName, dispenser.GetId())\n\n\t\t\t//2. request the beverage from the dispenser\n\t\t\tbeverage, err := bm.RequestBeverage(dispenser, beverageName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"thread %d-> dispenser %d says: %s\\n\", i+1, dispenser.GetId(), err.Error())\n\t\t\t\tfmt.Printf(\"thread %d-> end\\n\", i+1)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfmt.Printf(\"thread %d-> successfully served %s on dispenser %d\\n\", i+1, beverage.GetName(), dispenser.GetId())\n\t\t\tfmt.Printf(\"thread %d-> end\\n\", i+1)\n\t\t}(i, beverageName)\n\t}\n\twg.Wait()\n\n\tfmt.Println(\"\\ncompleted test: testParallelismWithBeverages\\n\")\n}",
  "func (b ArrayBucket) Worker(inChan <-chan string, outChan chan<- string, wg *sync.WaitGroup) {\n\twg.Add(1)\n\tgo func() {\n\t\tfor cand := range inChan {\n\t\t\t//Test against the bucket if\n\t\t\t// it the cand can be made from available tokens\n\t\t\tif b.testMatch(cand) {\n\t\t\t\toutChan <- cand\n\t\t\t}\n\t\t}\n\t\twg.Done()\n\t}()\n}",
  "func (t *tester) Parallel() {\n\t// NYI\n}",
  "func pooling() {\n\tch := make(chan string)\n\trobots := runtime.NumCPU()\n\tworks := 1000\n\n\tfor i := 0; i < robots; i++ {\n\t\tgo func(id int) {\n\t\t\tfor c := range ch {\n\t\t\t\tfmt.Printf(\" robot %v received task %v\\n\", i, c)\n\t\t\t\ttime.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)\n\t\t\t\tfmt.Printf(\" robot %v finished task %v\\n\", i, c)\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tfor i := 0; i < works; i++ {\n\t\tch <- fmt.Sprint(i)\n\t}\n\n\ttime.Sleep(time.Second)\n\tclose(ch)\n\n}",
  "func Test_Multi_Write_Iteration_Concurrency(t *testing.T) {\n\tc := Config{\n\t\tBucketLimits: []float64{1},\n\t}\n\thv, err := New(c)\n\tif err != nil {\n\t\tt.Fatalf(\"expected nil, got %v\", err)\n\t}\n\n\tvar done = make(chan bool)\n\tvar errors = make(chan error)\n\tvar writeWaitGroup sync.WaitGroup\n\tfor i := 0; i < 1000; i++ {\n\t\twriteWaitGroup.Add(1)\n\n\t\tgo func(i int) {\n\t\t\tdefer writeWaitGroup.Done()\n\n\t\t\terr := hv.Add(fmt.Sprintf(\"%v\", i), float64(i))\n\t\t\tif err != nil {\n\t\t\t\terrors <- err\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tvar iterateWaitGroup sync.WaitGroup\n\tfor i := 0; i < 1000; i++ {\n\t\titerateWaitGroup.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer iterateWaitGroup.Done()\n\n\t\t\tfor range hv.Histograms() {\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twriteWaitGroup.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\tbreak\n\tcase err := <-errors:\n\t\tt.Fatalf(\"goroutine error : %v\", err)\n\t}\n\n\titerateWaitGroup.Wait()\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	SpittingWebToNest returns the Nest that this Spitter is creating a Web to spit at, thus connecting them. Nil if not spitting. Value can be returned as a nil pointer. 
 | 
	func (spitterImpl *SpitterImpl) SpittingWebToNest() spiders.Nest {
	return spitterImpl.spittingWebToNestImpl
} 
 | 
	[
  "func Nest(element Element) Element {\n\treturn &nest{child: element}\n}",
  "func (o GroupOutput) WebUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Group) pulumi.StringOutput { return v.WebUrl }).(pulumi.StringOutput)\n}",
  "func (m *Chat) GetWebUrl()(*string) {\n    return m.webUrl\n}",
  "func (client AppsClient) StartWebSiteNetworkTrace(resourceGroupName string, name string, durationInSeconds *int32, maxFrameLength *int32, sasURL string) (result String, err error) {\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+[^\\.]$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewErrorWithValidationError(err, \"web.AppsClient\", \"StartWebSiteNetworkTrace\")\n\t}\n\n\treq, err := client.StartWebSiteNetworkTracePreparer(resourceGroupName, name, durationInSeconds, maxFrameLength, sasURL)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"web.AppsClient\", \"StartWebSiteNetworkTrace\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.StartWebSiteNetworkTraceSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"web.AppsClient\", \"StartWebSiteNetworkTrace\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.StartWebSiteNetworkTraceResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"web.AppsClient\", \"StartWebSiteNetworkTrace\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
  "func NetherSprouts() Grass {\n\treturn Grass{4}\n}",
  "func (web *Web) Webs() *Webs {\n\treturn NewWebs(\n\t\tweb.client,\n\t\tfmt.Sprintf(\"%s/Webs\", web.endpoint),\n\t\tweb.config,\n\t)\n}",
  "func (k *KeyRing) buildTrustWeb() {\n\tvar queue []*Key\n\tvisited := make(map[string]bool)\n\n\t// Populate initial trusted peers.\n\t// The queue only contains peers whose signatures can be trusted.\n\tfor _, key := range k.keys {\n\t\tif key.trust >= TrustThreshold {\n\t\t\tqueue = append(queue, key)\n\t\t\tvisited[key.identity] = true\n\t\t}\n\n\t\tkey.effectiveTrust = key.trust\n\t\tkey.signedBy = nil\n\t}\n\n\t// While there are some vertexes to be processed\n\tvar current *Key\n\tfor len(queue) > 0 {\n\t\tcurrent, queue = queue[0], queue[1:]\n\n\t\t// For each signatures\n\t\tfor signee, signature := range current.Signatures {\n\n\t\t\t// The signature is valid, add its value (if exists)\n\t\t\tsigneeKey := k.keys[signee]\n\t\t\tif signeeKey == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// EffectiveTrust calculation takes into account previously\n\t\t\t// accumulated trust wrt signer's trust.\n\t\t\tsigneeKey.effectiveTrust = signeeKey.effectiveTrust.Add(\n\t\t\t\tsignature.Trust.Min(current.effectiveTrust),\n\t\t\t)\n\t\t\tsigneeKey.signedBy = append(signeeKey.signedBy, current)\n\n\t\t\t// Is it the first time we can trust the signee?\n\t\t\tif signeeKey.effectiveTrust >= TrustThreshold {\n\t\t\t\tif !visited[signee] {\n\t\t\t\t\tqueue = append(queue, signeeKey)\n\t\t\t\t\tvisited[signee] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tk.stale = false\n}",
  "func (client AppsClient) StartWebSiteNetworkTraceResponder(resp *http.Response) (result String, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
  "func (n *Node) join() (*Node, error) {\n\tif !n.IsAllowed() {\n\t\terr := errors.New(fmt.Sprintln(n.Nodestr, \"is not allowd\"))\n\t\treturn nil, err\n\t}\n\tres, err := n.Talk(\"/join/\"+n.Myself.toxstring(), true, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(n.Nodestr, \"response of join:\", res)\n\tswitch len(res) {\n\tcase 0:\n\t\treturn nil, errors.New(\"illegal response\")\n\tcase 1:\n\t\tif res[0] != \"WELCOME\" {\n\t\t\treturn nil, errors.New(\"not welcomed\")\n\t\t}\n\t\treturn nil, nil\n\t}\n\tnn, err := newNode(res[1])\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\tif res[0] != \"WELCOME\" {\n\t\terr = errors.New(\"not welcomed\")\n\t}\n\treturn nn, err\n}",
  "func StartTweeting(twitter *twitter.Client, storageBackend storage.Pool, tweetTime time.Duration) {\n\n\t// Setup tweet scheduling\n\tts := &TweetSearch{\n\t\tChannel:   make(chan *Tweet),\n\t\tTrending:  trendingwrap.NewClient(),\n\t\tStorage:   storageBackend,\n\t\tURLLength: twitter.Configuration.ShortUrlLengthHttps,\n\t}\n\tSetupRegularTweetSearchProcess(ts, tweetTime)\n\tlog.Println(\"Setup complete. Lets wait for the first trending project...\")\n\n\t// Waiting for tweets ...\n\tfor tweet := range ts.Channel {\n\t\t// Sometimes it happens that we won`t get a project.\n\t\t// In this situation we try to avoid empty tweets like ...\n\t\t//\t* https://twitter.com/TrendingGithub/status/628714326564696064\n\t\t//\t* https://twitter.com/TrendingGithub/status/628530032361795584\n\t\t//\t* https://twitter.com/TrendingGithub/status/628348405790711808\n\t\t// we will return here\n\t\t// We do this check here and not in tweets.go, because otherwise\n\t\t// a new tweet won`t be scheduled\n\t\tif len(tweet.ProjectName) <= 0 {\n\t\t\tlog.Println(\"No project found. No tweet sent.\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// In debug mode the twitter variable is not available, so we won`t tweet the tweet.\n\t\t// We will just output them.\n\t\t// This is a good development feature ;)\n\t\tif twitter.API == nil {\n\t\t\tlog.Printf(\"Tweet: %s (length: %d)\", tweet.Tweet, len(tweet.Tweet))\n\n\t\t} else {\n\t\t\tpostedTweet, err := twitter.Tweet(tweet.Tweet)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Tweet publishing: ❌  (%s)\\n\", err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Tweet publishing: ✅  (https://twitter.com/TrendingGithub/status/%s)\\n\", postedTweet.IdStr)\n\t\t\t}\n\t\t}\n\t\tts.MarkTweetAsAlreadyTweeted(tweet.ProjectName)\n\t}\n}",
  "func (sc *RSSScraper) Networks() []*types.Network {\n\treturn []*types.Network{sc.Network}\n}",
  "func (ut *sportPayload) Publicize() *SportPayload {\n\tvar pub SportPayload\n\tif ut.Active != nil {\n\t\tpub.Active = *ut.Active\n\t}\n\tif ut.EventTerm != nil {\n\t\tpub.EventTerm = *ut.EventTerm\n\t}\n\tif ut.GameTerm != nil {\n\t\tpub.GameTerm = *ut.GameTerm\n\t}\n\tif ut.ID != nil {\n\t\tpub.ID = ut.ID\n\t}\n\tif ut.MaxPreSplitPrice != nil {\n\t\tpub.MaxPreSplitPrice = *ut.MaxPreSplitPrice\n\t}\n\tif ut.Name != nil {\n\t\tpub.Name = *ut.Name\n\t}\n\treturn &pub\n}",
  "func NewWeb(client *gosip.SPClient, endpoint string, config *RequestConfig) *Web {\n\treturn &Web{\n\t\tclient:    client,\n\t\tendpoint:  endpoint,\n\t\tconfig:    config,\n\t\tmodifiers: NewODataMods(),\n\t}\n}",
  "func StartTweeting(twitter *Twitter, storageBackend storage.Pool) {\n\n\t// Setup tweet scheduling\n\tts := &TweetSearch{\n\t\tChannel:   make(chan *Tweet),\n\t\tTrending:  NewTrendingClient(),\n\t\tStorage:   storageBackend,\n\t\tURLLength: twitter.Configuration.ShortUrlLengthHttps,\n\t}\n\tSetupRegularTweetSearchProcess(ts)\n\n\t// Waiting for tweets ...\n\tfor tweet := range ts.Channel {\n\t\t// Sometimes it happens that we won`t get a project.\n\t\t// In this situation we try to avoid empty tweets like ...\n\t\t//\t* https://twitter.com/TrendingGithub/status/628714326564696064\n\t\t//\t* https://twitter.com/TrendingGithub/status/628530032361795584\n\t\t//\t* https://twitter.com/TrendingGithub/status/628348405790711808\n\t\t// we will return here\n\t\t// We do this check here and not in tweets.go, because otherwise\n\t\t// a new tweet won`t be scheduled\n\t\tif len(tweet.ProjectName) <= 0 {\n\t\t\tlog.Print(\"No project found. No tweet sent.\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// In debug mode the twitter variable is not available, so we won`t tweet the tweet.\n\t\t// We will just output them.\n\t\t// This is a good development feature ;)\n\t\tif twitter.API == nil {\n\t\t\tlog.Printf(\"Tweet: %s (length: %d)\", tweet.Tweet, len(tweet.Tweet))\n\n\t\t} else {\n\t\t\tpostedTweet, err := twitter.Tweet(tweet.Tweet)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Tweet %s posted\", postedTweet.IdStr)\n\t\t\t}\n\t\t}\n\t\tts.MarkTweetAsAlreadyTweeted(tweet.ProjectName)\n\t}\n}",
  "func New(url, token string, mock bool, l *logrus.Logger) Nest {\n\n\tinitLog(l)\n\n\tlogDebug(funcName(), \"New nest structure\", url)\n\n\t// Read mock file\n\tif mock {\n\t\tlogWarn(funcName(), \"Mock activated !!!\")\n\t\tmockFileByte = readFile(mockFile)\n\t}\n\n\trest = http.New(log)\n\n\treturn &nest{url: url, token: token, mock: mock}\n\n}",
  "func (m *SharingLink) GetWebUrl()(*string) {\n    val, err := m.GetBackingStore().Get(\"webUrl\")\n    if err != nil {\n        panic(err)\n    }\n    if val != nil {\n        return val.(*string)\n    }\n    return nil\n}",
  "func (m *EducationAssignment) GetWebUrl()(*string) {\n    return m.webUrl\n}",
  "func WebAddress() string {\n\tif IsSukebei() {\n\t\treturn Get().WebAddress.Sukebei\n\t} else {\n\t\treturn Get().WebAddress.Nyaa\n\t}\n}",
  "func NnTsp(cities []common.City) common.Tour {\n\tstart := cities[0]\n\tunvisited := cities[1:]\n\tt := common.NewTour([]common.City{start})\n\tfor len(unvisited) > 0 {\n\t\tc := nearestNeighbor(t.Cities()[len(t.Cities())-1], unvisited)\n\t\tt.Append(c)\n\t\tdeleteFrom(c, &unvisited)\n\t}\n\n\treturn *t\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	InitImplDefaults initializes safe defaults for all fields in Spitter. 
 | 
	func (spitterImpl *SpitterImpl) InitImplDefaults() {
	spitterImpl.SpiderlingImpl.InitImplDefaults()
	spitterImpl.spittingWebToNestImpl = nil
} 
 | 
	[
  "func (forecastImpl *ForecastImpl) InitImplDefaults() {\n\tforecastImpl.GameObjectImpl.InitImplDefaults()\n\n\tforecastImpl.controllingPlayerImpl = nil\n\tforecastImpl.directionImpl = \"\"\n\tforecastImpl.intensityImpl = 0\n}",
  "func (playerImpl *PlayerImpl) InitImplDefaults() {\n\tplayerImpl.GameObjectImpl.InitImplDefaults()\n\n\tplayerImpl.bribesRemainingImpl = 0\n\tplayerImpl.buildingsImpl = []anarchy.Building{}\n\tplayerImpl.clientTypeImpl = \"\"\n\tplayerImpl.fireDepartmentsImpl = []anarchy.FireDepartment{}\n\tplayerImpl.headquartersImpl = nil\n\tplayerImpl.lostImpl = true\n\tplayerImpl.nameImpl = \"\"\n\tplayerImpl.opponentImpl = nil\n\tplayerImpl.policeDepartmentsImpl = []anarchy.PoliceDepartment{}\n\tplayerImpl.reasonLostImpl = \"\"\n\tplayerImpl.reasonWonImpl = \"\"\n\tplayerImpl.timeRemainingImpl = 0\n\tplayerImpl.warehousesImpl = []anarchy.Warehouse{}\n\tplayerImpl.weatherStationsImpl = []anarchy.WeatherStation{}\n\tplayerImpl.wonImpl = true\n}",
  "func (playerImpl *PlayerImpl) InitImplDefaults() {\n\tplayerImpl.GameObjectImpl.InitImplDefaults()\n\n\tplayerImpl.clientTypeImpl = \"\"\n\tplayerImpl.goldImpl = 0\n\tplayerImpl.infamyImpl = 0\n\tplayerImpl.lostImpl = true\n\tplayerImpl.nameImpl = \"\"\n\tplayerImpl.opponentImpl = nil\n\tplayerImpl.portImpl = nil\n\tplayerImpl.reasonLostImpl = \"\"\n\tplayerImpl.reasonWonImpl = \"\"\n\tplayerImpl.timeRemainingImpl = 0\n\tplayerImpl.unitsImpl = []pirates.Unit{}\n\tplayerImpl.wonImpl = true\n}",
  "func (jobImpl *JobImpl) InitImplDefaults() {\n\tjobImpl.GameObjectImpl.InitImplDefaults()\n\n\tjobImpl.carryLimitImpl = 0\n\tjobImpl.damageImpl = 0\n\tjobImpl.energyImpl = 0\n\tjobImpl.movesImpl = 0\n\tjobImpl.rangeImpl = 0\n\tjobImpl.shieldImpl = 0\n\tjobImpl.titleImpl = \"\"\n\tjobImpl.unitCostImpl = 0\n}",
  "func (unitImpl *UnitImpl) InitImplDefaults() {\n\tunitImpl.GameObjectImpl.InitImplDefaults()\n\n\tunitImpl.actedImpl = true\n\tunitImpl.dashXImpl = 0\n\tunitImpl.dashYImpl = 0\n\tunitImpl.energyImpl = 0\n\tunitImpl.genariumImpl = 0\n\tunitImpl.isBusyImpl = true\n\tunitImpl.jobImpl = nil\n\tunitImpl.legendariumImpl = 0\n\tunitImpl.movesImpl = 0\n\tunitImpl.mythiciteImpl = 0\n\tunitImpl.ownerImpl = nil\n\tunitImpl.protectorImpl = nil\n\tunitImpl.rariumImpl = 0\n\tunitImpl.shieldImpl = 0\n\tunitImpl.xImpl = 0\n\tunitImpl.yImpl = 0\n}",
  "func (fireDepartmentImpl *FireDepartmentImpl) InitImplDefaults() {\n\tfireDepartmentImpl.BuildingImpl.InitImplDefaults()\n\n\tfireDepartmentImpl.fireExtinguishedImpl = 0\n}",
  "func (ctx *Context) InitDefaults() {\n\tctx.Insecure = defaultInsecure\n\tctx.User = defaultUser\n}",
  "func init() {\n\tuserFields := schema.User{}.Fields()\n\t_ = userFields\n\t// userDescUsername is the schema descriptor for username field.\n\tuserDescUsername := userFields[0].Descriptor()\n\t// user.DefaultUsername holds the default value on creation for the username field.\n\tuser.DefaultUsername = userDescUsername.Default.(string)\n\t// userDescName is the schema descriptor for name field.\n\tuserDescName := userFields[1].Descriptor()\n\t// user.DefaultName holds the default value on creation for the name field.\n\tuser.DefaultName = userDescName.Default.(string)\n\t// userDescSurname is the schema descriptor for surname field.\n\tuserDescSurname := userFields[2].Descriptor()\n\t// user.DefaultSurname holds the default value on creation for the surname field.\n\tuser.DefaultSurname = userDescSurname.Default.(string)\n}",
  "func (o *GetComponentByIDParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
  "func (o *GetSubjectLevelConfigParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
  "func (o *UpdateLookmlModelParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
  "func populateDefaults(numParams int, info *methodInfo, rv reflect.Value) {\n\t// When there are no more parameters left in the supplied parameters,\n\t// any remaining struct fields must be optional.  Thus, populate them\n\t// with their associated default value as needed.\n\tfor i := numParams; i < info.maxParams; i++ {\n\t\trvf := rv.Field(i)\n\t\tif defaultVal, ok := info.defaults[i]; ok {\n\t\t\trvf.Set(defaultVal)\n\t\t}\n\t}\n}",
  "func (c *AuthConfig) init() {\n\tif c.Provisioners == nil {\n\t\tc.Provisioners = provisioner.List{}\n\t}\n\tif c.Template == nil {\n\t\tc.Template = &ASN1DN{}\n\t}\n\tif c.Backdate == nil {\n\t\tc.Backdate = &provisioner.Duration{\n\t\t\tDuration: DefaultBackdate,\n\t\t}\n\t}\n}",
  "func InitDefault(o interface{}) {\n\tt := reflect.TypeOf(o).Elem()\n\tv := reflect.ValueOf(o).Elem()\n\n\tfieldCount := t.NumField()\n\n\tfor i := 0; i < fieldCount; i++ {\n\t\tfield := t.Field(i)\n\n\t\tif v.Field(i).Kind() == reflect.Struct {\n\t\t\tInitDefault(v.Field(i).Addr().Interface())\n\t\t\tcontinue\n\t\t}\n\n\t\tif defaultValue, ok := field.Tag.Lookup(\"default\"); ok {\n\n\t\t\tswitch defaultValue {\n\t\t\tcase \"UNSETFLOAT\":\n\t\t\t\tv.Field(i).SetFloat(UNSETFLOAT)\n\t\t\tcase \"UNSETINT\":\n\t\t\t\tv.Field(i).SetInt(UNSETINT)\n\t\t\tcase \"-1\":\n\t\t\t\tv.Field(i).SetInt(-1)\n\t\t\tcase \"true\":\n\t\t\t\tv.Field(i).SetBool(true)\n\t\t\tdefault:\n\t\t\t\tlog.Panic(\"Unknown defaultValue\", zap.Reflect(\"default\", v))\n\t\t\t}\n\t\t}\n\n\t}\n}",
  "func (o *AuthenticateParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
  "func (c *engine) setDefaults() (err error) {\n\tm, err := ConfigToMap(c.cast.Spec.Defaults)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.values[string(v1alpha1.ConfigTLP)] = m\n\treturn\n}",
  "func (o *GetHostStorageParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
  "func (o *GetCurrentGenerationParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
  "func (s *Notif) SetDefaults() {\n\t// noop\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	DeltaMerge merges the delta for a given attribute in Spitter. 
 | 
	func (spitterImpl *SpitterImpl) DeltaMerge(
	deltaMerge base.DeltaMerge,
	attribute string,
	delta interface{},
) (bool, error) {
	merged, err := spitterImpl.SpiderlingImpl.DeltaMerge(
		deltaMerge,
		attribute,
		delta,
	)
	if merged || err != nil {
		return merged, err
	}
	spidersDeltaMerge, ok := deltaMerge.(DeltaMerge)
	if !ok {
		return false, errors.New(
			"deltaMerge is not the expected type of: " +
				"'spiders.impl.DeltaMerge'",
		)
	}
	switch attribute {
	case "spittingWebToNest":
		spitterImpl.spittingWebToNestImpl = spidersDeltaMerge.Nest(delta)
		return true, nil
	}
	return false, nil // no errors in delta merging
} 
 | 
	[
  "func (forecastImpl *ForecastImpl) DeltaMerge(\n\tdeltaMerge base.DeltaMerge,\n\tattribute string,\n\tdelta interface{},\n) (bool, error) {\n\tmerged, err := forecastImpl.GameObjectImpl.DeltaMerge(\n\t\tdeltaMerge,\n\t\tattribute,\n\t\tdelta,\n\t)\n\tif merged || err != nil {\n\t\treturn merged, err\n\t}\n\n\tanarchyDeltaMerge, ok := deltaMerge.(DeltaMerge)\n\tif !ok {\n\t\treturn false, errors.New(\n\t\t\t\"deltaMerge is not the expected type of: \" +\n\t\t\t\t\"'anarchy.impl.DeltaMerge'\",\n\t\t)\n\t}\n\n\tswitch attribute {\n\tcase \"controllingPlayer\":\n\t\tforecastImpl.controllingPlayerImpl = anarchyDeltaMerge.Player(delta)\n\t\treturn true, nil\n\tcase \"direction\":\n\t\tforecastImpl.directionImpl = anarchyDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"intensity\":\n\t\tforecastImpl.intensityImpl = anarchyDeltaMerge.Int(delta)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil // no errors in delta merging\n}",
  "func (playerImpl *PlayerImpl) DeltaMerge(\n\tdeltaMerge base.DeltaMerge,\n\tattribute string,\n\tdelta interface{},\n) (bool, error) {\n\tmerged, err := playerImpl.GameObjectImpl.DeltaMerge(\n\t\tdeltaMerge,\n\t\tattribute,\n\t\tdelta,\n\t)\n\tif merged || err != nil {\n\t\treturn merged, err\n\t}\n\n\tpiratesDeltaMerge, ok := deltaMerge.(DeltaMerge)\n\tif !ok {\n\t\treturn false, errors.New(\n\t\t\t\"deltaMerge is not the expected type of: \" +\n\t\t\t\t\"'pirates.impl.DeltaMerge'\",\n\t\t)\n\t}\n\n\tswitch attribute {\n\tcase \"clientType\":\n\t\tplayerImpl.clientTypeImpl = piratesDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"gold\":\n\t\tplayerImpl.goldImpl = piratesDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"infamy\":\n\t\tplayerImpl.infamyImpl = piratesDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"lost\":\n\t\tplayerImpl.lostImpl = piratesDeltaMerge.Boolean(delta)\n\t\treturn true, nil\n\tcase \"name\":\n\t\tplayerImpl.nameImpl = piratesDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"opponent\":\n\t\tplayerImpl.opponentImpl = piratesDeltaMerge.Player(delta)\n\t\treturn true, nil\n\tcase \"port\":\n\t\tplayerImpl.portImpl = piratesDeltaMerge.Port(delta)\n\t\treturn true, nil\n\tcase \"reasonLost\":\n\t\tplayerImpl.reasonLostImpl = piratesDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"reasonWon\":\n\t\tplayerImpl.reasonWonImpl = piratesDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"timeRemaining\":\n\t\tplayerImpl.timeRemainingImpl = piratesDeltaMerge.Float(delta)\n\t\treturn true, nil\n\tcase \"units\":\n\t\tplayerImpl.unitsImpl = piratesDeltaMerge.ArrayOfUnit(&playerImpl.unitsImpl, delta)\n\t\treturn true, nil\n\tcase \"won\":\n\t\tplayerImpl.wonImpl = piratesDeltaMerge.Boolean(delta)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil // no errors in delta merging\n}",
  "func (playerImpl *PlayerImpl) DeltaMerge(\n\tdeltaMerge base.DeltaMerge,\n\tattribute string,\n\tdelta interface{},\n) (bool, error) {\n\tmerged, err := playerImpl.GameObjectImpl.DeltaMerge(\n\t\tdeltaMerge,\n\t\tattribute,\n\t\tdelta,\n\t)\n\tif merged || err != nil {\n\t\treturn merged, err\n\t}\n\n\tanarchyDeltaMerge, ok := deltaMerge.(DeltaMerge)\n\tif !ok {\n\t\treturn false, errors.New(\n\t\t\t\"deltaMerge is not the expected type of: \" +\n\t\t\t\t\"'anarchy.impl.DeltaMerge'\",\n\t\t)\n\t}\n\n\tswitch attribute {\n\tcase \"bribesRemaining\":\n\t\tplayerImpl.bribesRemainingImpl = anarchyDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"buildings\":\n\t\tplayerImpl.buildingsImpl = anarchyDeltaMerge.ArrayOfBuilding(&playerImpl.buildingsImpl, delta)\n\t\treturn true, nil\n\tcase \"clientType\":\n\t\tplayerImpl.clientTypeImpl = anarchyDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"fireDepartments\":\n\t\tplayerImpl.fireDepartmentsImpl = anarchyDeltaMerge.ArrayOfFireDepartment(&playerImpl.fireDepartmentsImpl, delta)\n\t\treturn true, nil\n\tcase \"headquarters\":\n\t\tplayerImpl.headquartersImpl = anarchyDeltaMerge.Warehouse(delta)\n\t\treturn true, nil\n\tcase \"lost\":\n\t\tplayerImpl.lostImpl = anarchyDeltaMerge.Boolean(delta)\n\t\treturn true, nil\n\tcase \"name\":\n\t\tplayerImpl.nameImpl = anarchyDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"opponent\":\n\t\tplayerImpl.opponentImpl = anarchyDeltaMerge.Player(delta)\n\t\treturn true, nil\n\tcase \"policeDepartments\":\n\t\tplayerImpl.policeDepartmentsImpl = anarchyDeltaMerge.ArrayOfPoliceDepartment(&playerImpl.policeDepartmentsImpl, delta)\n\t\treturn true, nil\n\tcase \"reasonLost\":\n\t\tplayerImpl.reasonLostImpl = anarchyDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"reasonWon\":\n\t\tplayerImpl.reasonWonImpl = anarchyDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"timeRemaining\":\n\t\tplayerImpl.timeRemainingImpl = anarchyDeltaMerge.Float(delta)\n\t\treturn true, nil\n\tcase \"warehouses\":\n\t\tplayerImpl.warehousesImpl = anarchyDeltaMerge.ArrayOfWarehouse(&playerImpl.warehousesImpl, delta)\n\t\treturn true, nil\n\tcase \"weatherStations\":\n\t\tplayerImpl.weatherStationsImpl = anarchyDeltaMerge.ArrayOfWeatherStation(&playerImpl.weatherStationsImpl, delta)\n\t\treturn true, nil\n\tcase \"won\":\n\t\tplayerImpl.wonImpl = anarchyDeltaMerge.Boolean(delta)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil // no errors in delta merging\n}",
  "func (unitImpl *UnitImpl) DeltaMerge(\n\tdeltaMerge base.DeltaMerge,\n\tattribute string,\n\tdelta interface{},\n) (bool, error) {\n\tmerged, err := unitImpl.GameObjectImpl.DeltaMerge(\n\t\tdeltaMerge,\n\t\tattribute,\n\t\tdelta,\n\t)\n\tif merged || err != nil {\n\t\treturn merged, err\n\t}\n\n\tstardashDeltaMerge, ok := deltaMerge.(DeltaMerge)\n\tif !ok {\n\t\treturn false, errors.New(\n\t\t\t\"deltaMerge is not the expected type of: \" +\n\t\t\t\t\"'stardash.impl.DeltaMerge'\",\n\t\t)\n\t}\n\n\tswitch attribute {\n\tcase \"acted\":\n\t\tunitImpl.actedImpl = stardashDeltaMerge.Boolean(delta)\n\t\treturn true, nil\n\tcase \"dashX\":\n\t\tunitImpl.dashXImpl = stardashDeltaMerge.Float(delta)\n\t\treturn true, nil\n\tcase \"dashY\":\n\t\tunitImpl.dashYImpl = stardashDeltaMerge.Float(delta)\n\t\treturn true, nil\n\tcase \"energy\":\n\t\tunitImpl.energyImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"genarium\":\n\t\tunitImpl.genariumImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"isBusy\":\n\t\tunitImpl.isBusyImpl = stardashDeltaMerge.Boolean(delta)\n\t\treturn true, nil\n\tcase \"job\":\n\t\tunitImpl.jobImpl = stardashDeltaMerge.Job(delta)\n\t\treturn true, nil\n\tcase \"legendarium\":\n\t\tunitImpl.legendariumImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"moves\":\n\t\tunitImpl.movesImpl = stardashDeltaMerge.Float(delta)\n\t\treturn true, nil\n\tcase \"mythicite\":\n\t\tunitImpl.mythiciteImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"owner\":\n\t\tunitImpl.ownerImpl = stardashDeltaMerge.Player(delta)\n\t\treturn true, nil\n\tcase \"protector\":\n\t\tunitImpl.protectorImpl = stardashDeltaMerge.Unit(delta)\n\t\treturn true, nil\n\tcase \"rarium\":\n\t\tunitImpl.rariumImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"shield\":\n\t\tunitImpl.shieldImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"x\":\n\t\tunitImpl.xImpl = stardashDeltaMerge.Float(delta)\n\t\treturn true, nil\n\tcase \"y\":\n\t\tunitImpl.yImpl = stardashDeltaMerge.Float(delta)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil // no errors in delta merging\n}",
  "func (jobImpl *JobImpl) DeltaMerge(\n\tdeltaMerge base.DeltaMerge,\n\tattribute string,\n\tdelta interface{},\n) (bool, error) {\n\tmerged, err := jobImpl.GameObjectImpl.DeltaMerge(\n\t\tdeltaMerge,\n\t\tattribute,\n\t\tdelta,\n\t)\n\tif merged || err != nil {\n\t\treturn merged, err\n\t}\n\n\tstardashDeltaMerge, ok := deltaMerge.(DeltaMerge)\n\tif !ok {\n\t\treturn false, errors.New(\n\t\t\t\"deltaMerge is not the expected type of: \" +\n\t\t\t\t\"'stardash.impl.DeltaMerge'\",\n\t\t)\n\t}\n\n\tswitch attribute {\n\tcase \"carryLimit\":\n\t\tjobImpl.carryLimitImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"damage\":\n\t\tjobImpl.damageImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"energy\":\n\t\tjobImpl.energyImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"moves\":\n\t\tjobImpl.movesImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"range\":\n\t\tjobImpl.rangeImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"shield\":\n\t\tjobImpl.shieldImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\tcase \"title\":\n\t\tjobImpl.titleImpl = stardashDeltaMerge.String(delta)\n\t\treturn true, nil\n\tcase \"unitCost\":\n\t\tjobImpl.unitCostImpl = stardashDeltaMerge.Int(delta)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil // no errors in delta merging\n}",
  "func (fireDepartmentImpl *FireDepartmentImpl) DeltaMerge(\n\tdeltaMerge base.DeltaMerge,\n\tattribute string,\n\tdelta interface{},\n) (bool, error) {\n\tmerged, err := fireDepartmentImpl.BuildingImpl.DeltaMerge(\n\t\tdeltaMerge,\n\t\tattribute,\n\t\tdelta,\n\t)\n\tif merged || err != nil {\n\t\treturn merged, err\n\t}\n\n\tanarchyDeltaMerge, ok := deltaMerge.(DeltaMerge)\n\tif !ok {\n\t\treturn false, errors.New(\n\t\t\t\"deltaMerge is not the expected type of: \" +\n\t\t\t\t\"'anarchy.impl.DeltaMerge'\",\n\t\t)\n\t}\n\n\tswitch attribute {\n\tcase \"fireExtinguished\":\n\t\tfireDepartmentImpl.fireExtinguishedImpl = anarchyDeltaMerge.Int(delta)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil // no errors in delta merging\n}",
  "func (a *AttributeDefinition) Merge(other *AttributeDefinition) *AttributeDefinition {\n\tif other == nil {\n\t\treturn a\n\t}\n\tif a == nil {\n\t\treturn other\n\t}\n\tleft := a.Type.(Object)\n\tright := other.Type.(Object)\n\tif left == nil || right == nil {\n\t\tpanic(\"cannot merge non object attributes\") // bug\n\t}\n\tfor n, v := range right {\n\t\tleft[n] = v\n\t}\n\treturn a\n}",
  "func MergeAttr(a Attributes) MergeOption {\n\treturn func(m *mergeReq) {\n\t\tm.Attributes = &a\n\t}\n}",
  "func (inj *Injector) Merge(i *Injector, override bool) error {\n\tfor name, v := range i.data {\n\t\tif _, ok := inj.data[name]; ok && !override {\n\t\t\treturn ErrInjectorSetTwicePointer(v)\n\t\t}\n\t\tinj.data[name] = v\n\t}\n\treturn nil\n}",
  "func (mb *MutableBag) Merge(bags ...*MutableBag) error {\n\t// first step is to make sure there are no redundant definitions of the same attribute\n\tkeys := make(map[string]bool)\n\tfor _, bag := range bags {\n\t\tif bag == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k := range bag.values {\n\t\t\tif keys[k] {\n\t\t\t\treturn fmt.Errorf(\"conflicting value for attribute %s\", k)\n\t\t\t}\n\t\t\tkeys[k] = true\n\t\t}\n\t}\n\n\t// now that we know there are no conflicting definitions, do the actual merging...\n\tfor _, bag := range bags {\n\t\tif bag == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range bag.values {\n\t\t\tmb.values[k] = copyValue(v)\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func (i *instance) merge(seq int, dep map[paxi.ID]int) {\n\tif seq > i.seq {\n\t\ti.seq = seq\n\t\ti.changed = true\n\t}\n\tfor id, d := range dep {\n\t\tif d > i.dep[id] {\n\t\t\ti.dep[id] = d\n\t\t\ti.changed = true\n\t\t}\n\t}\n}",
  "func (o Event) Merge(d EventDiff) Event {\n\tn, _ := o.merge(&d)\n\treturn n\n}",
  "func(t *TargImp) Merge(other Target) (Target, os.Error) {\n\tx := other.(*TargImp)\n\tif x.Name() != t.Name() {\n\t\treturn nil, os.NewError(\"cannot merge targets with different names\")\n\t}\n\t\n\tfor y := 0; y < x.dependlen; y++ {\n\t\tif !t.isDependent(x.dependencies[y]) {\n\t\t\tt.dependencies[t.dependlen] = x.dependencies[y]\n\t\t\tt.dependlen++\n\t\t}\n\t}\n\t\n\treturn t, nil\n}",
  "func (entry *Entry) Merge(merge *Entry) {\n\tfor name, value := range merge.fields {\n\t\tentry.SetField(name, value)\n\t}\n}",
  "func (s *Store) logMerge(\n\tctx context.Context, txn *kv.Txn, updatedLHSDesc, rhsDesc roachpb.RangeDescriptor,\n) error {\n\tif !s.cfg.LogRangeEvents {\n\t\treturn nil\n\t}\n\treturn s.insertRangeLogEvent(ctx, txn, kvserverpb.RangeLogEvent{\n\t\tTimestamp:    selectEventTimestamp(s, txn.ReadTimestamp()),\n\t\tRangeID:      updatedLHSDesc.RangeID,\n\t\tEventType:    kvserverpb.RangeLogEventType_merge,\n\t\tStoreID:      s.StoreID(),\n\t\tOtherRangeID: rhsDesc.RangeID,\n\t\tInfo: &kvserverpb.RangeLogEvent_Info{\n\t\t\tUpdatedDesc: &updatedLHSDesc,\n\t\t\tRemovedDesc: &rhsDesc,\n\t\t},\n\t})\n}",
  "func (o *Object) Merge(other *Object) *Object {\n\tres := o\n\tfor _, nat := range *other {\n\t\tres.Set(nat.Name, DupAtt(nat.Attribute))\n\t}\n\treturn res\n}",
  "func DeltaMerge(tx *sql.Tx, targetTable, tempTable, conditional string) error {\n\tif tx == nil || targetTable == \"\" || tempTable == \"\" || conditional == \"\" {\n\t\treturn nil\n\t}\n\tdeleteQuery := fmt.Sprintf(`\n\t\t\tDELETE FROM %v\n\t\t\tUSING %v\n\t\t\tWHERE %v\n\t`, targetTable, tempTable, conditional)\n\n\tif _, err := tx.Exec(deleteQuery); err != nil {\n\t\treturn err\n\t}\n\n\tinsertQuery := fmt.Sprintf(\"INSERT INTO %v SELECT DISTINCT * FROM %v\", targetTable, tempTable)\n\tif _, err := tx.Exec(insertQuery); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
  "func (m groupSalaryStatsMap) merge(key string, src *groupSalaryStats) {\n\tif s, ok := m[key]; ok {\n\t\ts.salaries.merge(&src.salaries)\n\t\treturn\n\t}\n\tm[key] = src\n}",
  "func (r *AggregateMetadata) Merge(other AggregateMetadata) {\n\tfor k, v := range other {\n\t\t(*r)[k] += v\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	MakePaymentHandler returns a Handler for making a payment 
 | 
	func MakePaymentHandler(formatter *render.Render) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		log.Printf("Make payment handler function")
		fmt.Println("Make payment handler router function")
		var payment model.Payment
		_ = json.NewDecoder(req.Body).Decode(&payment)
		transactionID, _ := service.MakePayment(&payment)
		formatter.JSON(w, http.StatusOK, struct {
			Success       bool
			Message       string
			TransactionID int
		}{
			true,
			"Fee Payment Transaction Recorded Successfully",
			transactionID,
		})
	}
} 
 | 
	[
  "func MakePaymentHandlers(r *mux.Router, n negroni.Negroni, service payment.UseCase) {\n\tpaymentsRouter := r.PathPrefix(\"/v1/payments\").Subrouter()\n\tpaymentsRouter.Handle(\"\", n.With(\n\t\tnegroni.Wrap(paymentFindAll(service)),\n\t)).Methods(\"GET\", \"OPTIONS\")\n\n\tpaymentsRouter.Handle(\"/{paymentID}\", n.With(\n\t\tnegroni.Wrap(paymentFind(service)),\n\t)).Methods(\"GET\", \"OPTIONS\")\n\n\tpaymentsRouter.Handle(\"\", n.With(\n\t\tnegroni.Wrap(paymentCreate(service)),\n\t)).Methods(\"POST\", \"OPTIONS\")\n\n\tpaymentsRouter.Handle(\"/{paymentID}\", n.With(\n\t\tnegroni.Wrap(paymentUpdate(service)),\n\t)).Methods(\"PUT\", \"OPTIONS\")\n\n\tpaymentsRouter.Handle(\"/{paymentID}\", n.With(\n\t\tnegroni.Wrap(paymentDelete(service)),\n\t)).Methods(\"DELETE\", \"OPTIONS\")\n}",
  "func NewPaymentHandler(svc go_payd.PaymentService) *paymentHandler {\r\n\treturn &paymentHandler{\r\n\t\tsvc: svc,\r\n\t}\r\n}",
  "func (h CreatePaymentRequestHandler) Handle(params paymentrequestop.CreatePaymentRequestParams) middleware.Responder {\n\t// TODO: authorization to create payment request\n\n\treturn h.AuditableAppContextFromRequestWithErrors(params.HTTPRequest,\n\t\tfunc(appCtx appcontext.AppContext) (middleware.Responder, error) {\n\n\t\t\tpayload := params.Body\n\t\t\tif payload == nil {\n\t\t\t\terr := apperror.NewBadDataError(\"Invalid payment request: params Body is nil\")\n\t\t\t\terrPayload := payloads.ClientError(handlers.SQLErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\t\t\t\tappCtx.Logger().Error(err.Error(), zap.Any(\"payload\", errPayload))\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestBadRequest().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tappCtx.Logger().Info(\"primeapi.CreatePaymentRequestHandler info\", zap.String(\"pointOfContact\", params.Body.PointOfContact))\n\n\t\t\tmoveTaskOrderIDString := payload.MoveTaskOrderID.String()\n\t\t\tmtoID, err := uuid.FromString(moveTaskOrderIDString)\n\t\t\tif err != nil {\n\t\t\t\tappCtx.Logger().Error(\"Invalid payment request: params MoveTaskOrderID cannot be converted to a UUID\",\n\t\t\t\t\tzap.String(\"MoveTaskOrderID\", moveTaskOrderIDString), zap.Error(err))\n\t\t\t\t// create a custom verrs for returning a 422\n\t\t\t\tverrs :=\n\t\t\t\t\t&validate.Errors{Errors: map[string][]string{\n\t\t\t\t\t\t\"move_id\": {\"id cannot be converted to UUID\"},\n\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\terrPayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tisFinal := false\n\t\t\tif payload.IsFinal != nil {\n\t\t\t\tisFinal = *payload.IsFinal\n\t\t\t}\n\n\t\t\tpaymentRequest := models.PaymentRequest{\n\t\t\t\tIsFinal:         isFinal,\n\t\t\t\tMoveTaskOrderID: mtoID,\n\t\t\t}\n\n\t\t\t// Build up the paymentRequest.PaymentServiceItems using the incoming payload to offload Swagger data coming\n\t\t\t// in from the API. These paymentRequest.PaymentServiceItems will be used as a temp holder to process the incoming API data\n\t\t\tvar verrs *validate.Errors\n\t\t\tpaymentRequest.PaymentServiceItems, verrs, err = h.buildPaymentServiceItems(appCtx, payload)\n\n\t\t\tif err != nil || verrs.HasAny() {\n\n\t\t\t\tappCtx.Logger().Error(\"could not build service items\", zap.Error(err))\n\t\t\t\t// TODO: do not bail out before creating the payment request, we need the failed record\n\t\t\t\t//       we should create the failed record and store it as failed with a rejection\n\t\t\t\terrPayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tcreatedPaymentRequest, err := h.PaymentRequestCreator.CreatePaymentRequestCheck(appCtx, &paymentRequest)\n\t\t\tif err != nil {\n\t\t\t\tappCtx.Logger().Error(\"Error creating payment request\", zap.Error(err))\n\t\t\t\tswitch e := err.(type) {\n\t\t\t\tcase apperror.InvalidCreateInputError:\n\t\t\t\t\tverrs := e.ValidationErrors\n\t\t\t\t\tdetail := err.Error()\n\t\t\t\t\tpayload := payloads.ValidationError(detail, h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(payload), err\n\n\t\t\t\tcase apperror.NotFoundError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.NotFoundMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestNotFound().WithPayload(payload), err\n\t\t\t\tcase apperror.ConflictError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.ConflictErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestConflict().WithPayload(payload), err\n\t\t\t\tcase apperror.InvalidInputError:\n\t\t\t\t\tpayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), &validate.Errors{})\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(payload), err\n\t\t\t\tcase apperror.QueryError:\n\t\t\t\t\tif e.Unwrap() != nil {\n\t\t\t\t\t\t// If you can unwrap, log the internal error (usually a pq error) for better debugging\n\t\t\t\t\t\tappCtx.Logger().Error(\"primeapi.CreatePaymentRequestHandler query error\", zap.Error(e.Unwrap()))\n\t\t\t\t\t}\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestInternalServerError().WithPayload(\n\t\t\t\t\t\tpayloads.InternalServerError(nil, h.GetTraceIDFromRequest(params.HTTPRequest))), err\n\n\t\t\t\tcase *apperror.BadDataError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.BadRequestErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestBadRequest().WithPayload(payload), err\n\t\t\t\tdefault:\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestInternalServerError().WithPayload(\n\t\t\t\t\t\tpayloads.InternalServerError(nil, h.GetTraceIDFromRequest(params.HTTPRequest))), err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturnPayload := payloads.PaymentRequest(createdPaymentRequest)\n\t\t\tappCtx.Logger().Info(\"Successful payment request creation for mto ID\", zap.String(\"moveID\", moveTaskOrderIDString))\n\t\t\treturn paymentrequestop.NewCreatePaymentRequestCreated().WithPayload(returnPayload), nil\n\t\t})\n}",
  "func paymentCreate(service payment.UseCase) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer r.Body.Close()\n\t\tvar p *entity.Payment\n\t\terr := json.NewDecoder(r.Body).Decode(&p)\n\t\tif err != nil {\n\t\t\trespondWithError(w, http.StatusBadRequest, \"Invalid request payload\")\n\t\t\treturn\n\t\t}\n\t\tp.ID, err = service.Store(p)\n\t\tif err != nil {\n\t\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\trespondWithJSON(w, http.StatusCreated, p)\n\t})\n}",
  "func (a PaymentsClient) MakePayment() {\n\t// TODO: Make a remote call to the payment server and make a payment\n}",
  "func (s *Server) handleDashboardPayment() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payment.html\")\n\t\t})\n\t\tctx, provider, data, errs, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamActiveNav] = provider.GetURLBookings()\n\n\t\t//load the booking\n\t\tidStr := r.FormValue(URLParams.BookID)\n\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, idStr, true, false)\n\t\tif !ok {\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamFormAction] = book.GetURLPayment()\n\n\t\t//check if a payment is supported, otherwise view the order\n\t\tif !book.SupportsPayment() {\n\t\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLView(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t//check if already paid, in which case just view the payment\n\t\tif book.IsPaid() {\n\t\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLPaymentView(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t//load the service\n\t\tnow := data[TplParamCurrentTime].(time.Time)\n\t\tctx, _, ok = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\tdata[TplParamDesc] = \"\"\n\t\t\tdata[TplParamEmail] = book.Client.Email\n\t\t\tdata[TplParamName] = book.Client.Name\n\t\t\tdata[TplParamPhone] = book.Client.Phone\n\t\t\tdata[TplParamPrice] = book.ComputeServicePrice()\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//read the form\n\t\tdesc := r.FormValue(URLParams.Desc)\n\t\temail := r.FormValue(URLParams.Email)\n\t\tname := r.FormValue(URLParams.Name)\n\t\tphone := r.FormValue(URLParams.Phone)\n\t\tpriceStr := r.FormValue(URLParams.Price)\n\n\t\t//prepare the data\n\t\tdata[TplParamDesc] = desc\n\t\tdata[TplParamEmail] = email\n\t\tdata[TplParamName] = name\n\t\tdata[TplParamPhone] = phone\n\t\tdata[TplParamPrice] = priceStr\n\n\t\t//validate the form\n\t\tform := &PaymentForm{\n\t\t\tEmailForm: EmailForm{\n\t\t\t\tEmail: strings.TrimSpace(email),\n\t\t\t},\n\t\t\tNameForm: NameForm{\n\t\t\t\tName: name,\n\t\t\t},\n\t\t\tPhone:           FormatPhone(phone),\n\t\t\tPrice:           priceStr,\n\t\t\tDescription:     desc,\n\t\t\tClientInitiated: false,\n\t\t\tDirectCapture:   false,\n\t\t}\n\t\tok = s.validateForm(w, r.WithContext(ctx), tpl, data, errs, form, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//save the payment\n\t\tctx, payment, err := s.savePaymentBooking(ctx, provider, book, form, now)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"save payment\", \"error\", err)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//queue the email\n\t\tpaymentUI := s.createPaymentUI(payment)\n\t\tctx, err = s.queueEmailInvoice(ctx, provider.Name, paymentUI)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"queue email invoice\", \"error\", err)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//success\n\t\ts.SetCookieMsg(w, MsgPaymentSuccess)\n\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLView(), http.StatusSeeOther)\n\t}\n}",
  "func (s *Server) handleDashboardPaymentView() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\n\t//steps on the page\n\tsteps := struct {\n\t\tStepDel      string\n\t\tStepMarkPaid string\n\t}{\n\t\tStepDel:      \"stepDel\",\n\t\tStepMarkPaid: \"stepMarkPaid\",\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payment-view.html\")\n\t\t})\n\t\tctx, provider, data, errs, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamActiveNav] = provider.GetURLPayments()\n\t\tdata[TplParamSteps] = steps\n\n\t\t//load the booking\n\t\tnow := data[TplParamCurrentTime].(time.Time)\n\t\tvar paymentUI *paymentUI\n\t\tbookIDStr := r.FormValue(URLParams.BookID)\n\t\tif bookIDStr != \"\" {\n\t\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, bookIDStr, false, false)\n\t\t\tif !ok {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdata[TplParamFormAction] = book.GetURLPaymentView()\n\n\t\t\t//load the service\n\t\t\tctx, _, ok = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t//probe for a payment\n\t\t\tctx, payment, err := LoadPaymentByProviderIDAndSecondaryIDAndType(ctx, s.getDB(), provider.ID, book.ID, PaymentTypeBooking)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"load payment\", \"error\", err, \"id\", book.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif payment == nil {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpaymentUI = s.createPaymentUI(payment)\n\t\t} else {\n\t\t\t//load the payment directly\n\t\t\tidStr := r.FormValue(URLParams.PaymentID)\n\t\t\tif idStr == \"\" {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tid := uuid.FromStringOrNil(idStr)\n\t\t\tif id == uuid.Nil {\n\t\t\t\tlogger.Errorw(\"invalid uuid\", \"id\", idStr)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx, payment, err := LoadPaymentByID(ctx, s.getDB(), &id)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"load payment\", \"error\", err, \"id\", id)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpaymentUI = s.createPaymentUI(payment)\n\t\t\tdata[TplParamFormAction] = paymentUI.GetURLView()\n\n\t\t\t//probe for a booking\n\t\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, payment.SecondaryID.String(), false, false)\n\t\t\tif ok {\n\t\t\t\tctx, _, _ = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\t\t} else if paymentUI.ServiceID != \"\" {\n\t\t\t\tsvcID := uuid.FromStringOrNil(paymentUI.ServiceID)\n\t\t\t\tif svcID == uuid.Nil {\n\t\t\t\t\tlogger.Errorw(\"invalid uuid\", \"id\", paymentUI.ServiceID)\n\t\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx, _, _ = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, &svcID, now)\n\t\t\t}\n\t\t}\n\t\tdata[TplParamPayment] = paymentUI\n\n\t\t//set-up the confirmation\n\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgPaymentMarkPaid)\n\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\tdata[TplParamConfirmSubmitValue] = steps.StepMarkPaid\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//process the step\n\t\tstep := r.FormValue(URLParams.Step)\n\t\tswitch step {\n\t\tcase steps.StepDel:\n\t\t\tctx, err := DeletePayment(ctx, s.getDB(), paymentUI.ID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"delete payment\", \"error\", err, \"id\", paymentUI.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase steps.StepMarkPaid:\n\t\t\tctx, err := UpdatePaymentDirectCapture(ctx, s.getDB(), paymentUI.ID, &now)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"update payment captured\", \"error\", err, \"id\", paymentUI.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tlogger.Errorw(\"invalid step\", \"id\", paymentUI.ID, \"step\", step)\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\ts.SetCookieMsg(w, MsgUpdateSuccess)\n\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t}\n}",
  "func (mw loggingMiddleware) PostPayment(ctx context.Context, p Payment) (err error) {\n\tdefer func(begin time.Time) {\n\t\tmw.logger.Log(\"method\", \"PostPayment\", \"id\", p.Account, \"took\", time.Since(begin), \"err\", err)\n\t}(time.Now())\n\treturn mw.next.PostPayment(ctx, p)\n}",
  "func MakePaymentEndpoints(service PaymentService, logger kitlog.Logger) http.Handler {\n\trouter := chi.NewRouter()\n\trouter.Method(http.MethodPost, \"/\", kithttp.NewServer(\n\t\ttransferMoney(service), decodeTransferMoneyRequest, encodeTransferMoneyResponse,\n\t\t[]kithttp.ServerOption{\n\t\t\tkithttp.ServerErrorLogger(logger),\n\t\t\tkithttp.ServerErrorEncoder(encodePaymentError),\n\t\t}...))\n\n\trouter.Method(http.MethodGet, \"/\", kithttp.NewServer(\n\t\tlistPayments(service), decodeListPaymentsRequest, encodeListPaymentsResponse,\n\t\t[]kithttp.ServerOption{\n\t\t\tkithttp.ServerErrorLogger(logger),\n\t\t\tkithttp.ServerErrorEncoder(encodePaymentError),\n\t\t}...))\n\n\treturn router\n}",
  "func paymentDelete(service payment.UseCase) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tpaymentID, ok := vars[\"paymentID\"]\n\t\tif !ok {\n\t\t\trespondWithError(w, http.StatusNotFound, \"Missing route parameter 'paymentID'\")\n\t\t\treturn\n\t\t}\n\t\tif entity.IsValidID(paymentID) {\n\t\t\terr := service.Delete(entity.StringToID(paymentID))\n\t\t\tif err != nil {\n\t\t\t\trespondWithError(w, http.StatusNotFound, \"Payment ID does not exist\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\trespondWithJSON(w, http.StatusNoContent, nil)\n\t\t} else {\n\t\t\trespondWithError(w, http.StatusBadRequest, \"Invalid Payment ID\")\n\t\t\treturn\n\t\t}\n\t})\n}",
  "func NewHandler(s Service, v *validator.Validate, c *cache.Cache) FundHandler {\n\treturn FundHandler{service: s, validate: v, cache: c}\n}",
  "func (s *Server) handleDashboardPayments() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payments.html\")\n\t\t})\n\t\tctx, provider, data, _, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//setup the breadcrumbs\n\t\tbreadcrumbs := []breadcrumb{\n\t\t\t{\"Invoices\", \"\"},\n\t\t}\n\t\tdata[TplParamBreadcrumbs] = breadcrumbs\n\t\tdata[TplParamActiveNav] = provider.GetURLPayments()\n\t\tdata[TplParamFormAction] = provider.GetURLPayments()\n\n\t\t//read the form\n\t\tfilterStr := r.FormValue(URLParams.Filter)\n\n\t\t//prepare the data\n\t\tdata[TplParamFilter] = filterStr\n\n\t\t//validate the filter\n\t\tvar err error\n\t\tfilter := PaymentFilterAll\n\t\tif filterStr != \"\" {\n\t\t\tfilter, err = ParsePaymentFilter(filterStr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"parse filter\", \"error\", err, \"filter\", filterStr)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t}\n\t\t}\n\n\t\t//load the payments\n\t\tctx, payments, err := ListPaymentsByProviderIDAndFilter(ctx, s.getDB(), provider.ID, filter)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"load payments\", \"error\", err, \"id\", provider.ID)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamPayments] = s.createPaymentUIs(payments)\n\n\t\t//load the count\n\t\tctx, countUnPaid, err := CountPaymentsByProviderIDAndFilter(ctx, s.getDB(), provider.ID, PaymentFilterUnPaid)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"count payments unpaid\", \"error\", err, \"id\", provider.ID)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamCountUnPaid] = countUnPaid\n\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t}\n}",
  "func GetPayment(repo repository.Repository) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tpaymentID := mux.Vars(r)[\"paymentID\"]\n\t\tval, err := repo.Get(paymentID)\n\t\tif err != nil {\n\t\t\tif err == repository.ErrNotFound {\n\t\t\t\tSendErrorResponse(w, r, http.StatusNotFound, errors.Errorf(\"paymentID:%s not found\", paymentID))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tSendErrorResponse(w, r, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tSendResponse(w, r, http.StatusOK, val)\n\t\treturn\n\t}\n}",
  "func (i *Interceptor) handlePayment(iCtx *interceptContext) error {\n\tswitch {\n\t// Resume/track a pending payment if it was interrupted for some reason.\n\tcase iCtx.token != nil && iCtx.token.isPending():\n\t\tlog.Infof(\"Payment of LSAT token is required, resuming/\" +\n\t\t\t\"tracking previous payment from pending LSAT token\")\n\t\terr := i.trackPayment(iCtx.mainCtx, iCtx.token)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t// We don't have a token yet, try to get a new one.\n\tcase iCtx.token == nil:\n\t\t// We don't have a token yet, get a new one.\n\t\tlog.Infof(\"Payment of LSAT token is required, paying invoice\")\n\t\tvar err error\n\t\tiCtx.token, err = i.payLsatToken(iCtx.mainCtx, iCtx.metadata)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t// We have a token and it's valid, nothing more to do here.\n\tdefault:\n\t\tlog.Debugf(\"Found valid LSAT token to add to request\")\n\t}\n\n\tif err := i.addLsatCredentials(iCtx); err != nil {\n\t\tlog.Errorf(\"Adding macaroon to request failed: %v\", err)\n\t\treturn fmt.Errorf(\"adding macaroon failed: %v\", err)\n\t}\n\treturn nil\n}",
  "func CreatePayment(repo repository.Repository) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tvar t *model.Payment\n\t\tif err := json.NewDecoder(r.Body).Decode(&t); err != nil {\n\t\t\tSendErrorResponse(w, r, http.StatusBadRequest, err)\n\t\t\treturn\n\t\t}\n\n\t\terr := validate(t)\n\t\tif err != nil {\n\t\t\tSendErrorResponse(w, r, http.StatusBadRequest, err)\n\t\t\treturn\n\t\t}\n\n\t\tdup, err := repo.Get(t.ID)\n\t\tif err == nil {\n\t\t\tif cmp.Equal(*t, *dup) {\n\t\t\t\tw.WriteHeader(http.StatusCreated)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tSendErrorResponse(w, r, http.StatusConflict, errors.New(\"already exists\"))\n\t\t\treturn\n\t\t}\n\n\t\terr = repo.Create(t)\n\t\tif err != nil {\n\t\t\tSendErrorResponse(w, r, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusCreated)\n\t})\n}",
  "func CreatePayment(c *soso.Context) {\n\tif c.Token == nil {\n\t\tc.ErrorResponse(403, soso.LevelError, errors.New(\"User not authorized\"))\n\t\treturn\n\t}\n\treq := c.RequestMap\n\n\tpayID, _ := req[\"id\"].(float64)\n\tleadID, _ := req[\"lead_id\"].(float64)\n\n\tif leadID < 0 || payID <= 0 {\n\t\tc.ErrorResponse(http.StatusInternalServerError, soso.LevelError, errors.New(\"Incorrect parameter\"))\n\t\treturn\n\t}\n\n\tif leadID != 0 {\n\t\t_, role, err := getConversationID(c.Token.UID, uint64(leadID))\n\t\tif err != nil {\n\t\t\tc.ErrorResponse(http.StatusInternalServerError, soso.LevelError, err)\n\t\t\treturn\n\t\t}\n\n\t\torderData, paymentData, err := retrieveOrder(uint64(payID))\n\t\tif err != nil {\n\t\t\tc.ErrorResponse(http.StatusInternalServerError, soso.LevelError, err)\n\t\t\treturn\n\t\t}\n\n\t\tif !canBuy(paymentData.Direction, role) {\n\t\t\tc.ErrorResponse(http.StatusInternalServerError, soso.LevelError, fmt.Errorf(\"This side of order can not pay it\"))\n\t\t\treturn\n\t\t}\n\t\tif orderData.LeadId != uint64(leadID) {\n\t\t\tc.ErrorResponse(http.StatusInternalServerError, soso.LevelError, fmt.Errorf(\"Parameters mangled\"))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// now -- create the order\n\tctx, cancel := rpc.DefaultContext()\n\tdefer cancel()\n\tresp, err := paymentServiceClient.BuyOrder(ctx, &payment.BuyOrderRequest{\n\t\tPayId: uint64(payID),\n\t\tUser: &payment.UserInfo{\n\t\t\tIp:     c.RemoteIP,\n\t\t\tUserId: c.Token.UID,\n\t\t\t// phone not needed here\n\t\t},\n\t})\n\n\tif err != nil { // RPC errors\n\t\tc.ErrorResponse(http.StatusInternalServerError, soso.LevelError, err)\n\t\treturn\n\t}\n\tif resp.Error > 0 { // service errors\n\t\tc.Response.ResponseMap = map[string]interface{}{\n\t\t\t\"ErrorCode\":    resp.Error,\n\t\t\t\"ErrorMessage\": resp.ErrorMessage,\n\t\t}\n\t\tc.ErrorResponse(http.StatusInternalServerError, soso.LevelError, errors.New(resp.ErrorMessage))\n\t\treturn\n\t}\n\n\tc.SuccessResponse(map[string]interface{}{\n\t\t\"redirect_url\": resp.RedirectUrl,\n\t})\n\n}",
  "func (env *Env) CreatePayment(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"POST payment\")\n\n\ttx := &model.TX{}\n\terr := render.DecodeJSON(r.Body, &tx)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error deserialising payment: %v\\n\", err)\n\t\trender.Status(r, http.StatusBadRequest)\n\t\trender.JSON(w, r, err)\n\t\treturn\n\t}\n\n\ttx, err = env.db.CreateTX(*tx)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating payment: %v\\n\", err)\n\t\trender.Status(r, http.StatusMethodNotAllowed)\n\t\trender.JSON(w, r, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n}",
  "func (s *Server) handleDashboardPaymentSettings() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\n\t//steps on the page\n\tsteps := struct {\n\t\tStepDel string\n\t\tStepUpd string\n\t}{\n\t\tStepDel: \"stepDel\",\n\t\tStepUpd: \"stepUpd\",\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payment-settings.html\")\n\t\t})\n\t\tctx, provider, data, errs, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//setup the breadcrumbs\n\t\tbreadcrumbs := []breadcrumb{\n\t\t\t{\"Payment Settings\", \"\"},\n\t\t}\n\t\tdata[TplParamBreadcrumbs] = breadcrumbs\n\t\tdata[TplParamActiveNav] = provider.GetURLPaymentSettings()\n\t\tdata[TplParamFormAction] = provider.GetURLPaymentSettings()\n\t\tdata[TplParamSteps] = steps\n\t\tdata[TplParamTypes] = PaymentTypes\n\n\t\t//handle the input\n\t\temail := r.FormValue(URLParams.Email)\n\t\tid := r.FormValue(URLParams.ID)\n\t\tstep := r.FormValue(URLParams.Step)\n\t\tpaymentType := r.FormValue(URLParams.Type)\n\n\t\t//prepare the data\n\t\tdata[TplParamEmail] = email\n\t\tdata[TplParamID] = id\n\t\tdata[TplParamType] = paymentType\n\n\t\t//prepare the confirmation modal\n\t\tswitch paymentType {\n\t\tcase PaymentTypes.TypePayPal:\n\t\t\tif provider.PayPalEmail != nil {\n\t\t\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgPayPalRemove)\n\t\t\t\tdata[TplParamConfirmSubmitValue] = steps.StepDel\n\t\t\t} else {\n\t\t\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgPayPalActivate)\n\t\t\t\tdata[TplParamConfirmSubmitValue] = steps.StepUpd\n\t\t\t}\n\t\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\tcase PaymentTypes.TypeStripe:\n\t\t\tif provider.StripeToken != nil {\n\t\t\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgStripeRemove)\n\t\t\t\tdata[TplParamConfirmSubmitValue] = steps.StepDel\n\t\t\t} else {\n\t\t\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgStripeActivate)\n\t\t\t\tdata[TplParamConfirmSubmitValue] = steps.StepUpd\n\t\t\t}\n\t\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\tcase PaymentTypes.TypeZelle:\n\t\t\tif provider.ZelleID != nil {\n\t\t\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgZelleRemove)\n\t\t\t\tdata[TplParamConfirmSubmitValue] = steps.StepDel\n\t\t\t} else {\n\t\t\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgZelleActivate)\n\t\t\t\tdata[TplParamConfirmSubmitValue] = steps.StepUpd\n\t\t\t}\n\t\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\t}\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\t//default the data\n\t\t\tif provider.PayPalEmail != nil {\n\t\t\t\tdata[TplParamEmail] = *provider.PayPalEmail\n\t\t\t}\n\t\t\tif provider.ZelleID != nil {\n\t\t\t\tdata[TplParamID] = *provider.ZelleID\n\t\t\t}\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//execute the correct operation\n\t\tswitch step {\n\t\tcase steps.StepDel:\n\t\t\tswitch paymentType {\n\t\t\tcase PaymentTypes.TypePayPal:\n\t\t\t\tprovider.PayPalEmail = nil\n\t\t\tcase PaymentTypes.TypeStripe:\n\t\t\t\t//revoke access\n\t\t\t\terr := RevokeOAuthTokenStripe(ctx, provider.StripeToken)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Errorw(\"revoke stripe\", \"error\", err)\n\t\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tprovider.StripeToken = nil\n\t\t\tcase PaymentTypes.TypeZelle:\n\t\t\t\tprovider.ZelleID = nil\n\t\t\t}\n\n\t\t\t//save the provider\n\t\t\tctx, err := SaveProvider(ctx, s.getDB(), provider.Provider)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"save provider\", \"error\", err, \"provider\", provider)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase steps.StepUpd:\n\t\t\tswitch paymentType {\n\t\t\tcase PaymentTypes.TypePayPal:\n\t\t\t\t//validate the data\n\t\t\t\tform := EmailForm{\n\t\t\t\t\tEmail: strings.TrimSpace(email),\n\t\t\t\t}\n\t\t\t\tok = s.validateForm(w, r.WithContext(ctx), tpl, data, errs, form, true)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t//populate from the form\n\t\t\t\tprovider.PayPalEmail = &form.Email\n\n\t\t\t\t//save the provider\n\t\t\t\tctx, err := SaveProvider(ctx, s.getDB(), provider.Provider)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Errorw(\"save provider\", \"error\", err, \"provider\", provider)\n\t\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase PaymentTypes.TypeStripe:\n\t\t\t\ts.invokeHdlrGet(s.handleStripeLogin(), w, r.WithContext(ctx))\n\t\t\t\treturn\n\t\t\tcase PaymentTypes.TypeZelle:\n\t\t\t\t//validate the data\n\t\t\t\tform := ZelleIDForm{\n\t\t\t\t\tZelleID: id,\n\t\t\t\t}\n\t\t\t\tok = s.validateForm(w, r.WithContext(ctx), tpl, data, errs, form, true)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t//populate from the form\n\t\t\t\tprovider.ZelleID = &form.ZelleID\n\n\t\t\t\t//save the provider\n\t\t\t\tctx, err := SaveProvider(ctx, s.getDB(), provider.Provider)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Errorw(\"save provider\", \"error\", err, \"provider\", provider)\n\t\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tlogger.Errorw(\"invalid step\", \"step\", step)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//success\n\t\ts.SetCookieMsg(w, MsgUpdateSuccess)\n\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPaymentSettings(), http.StatusSeeOther)\n\t}\n}",
  "func cryptoPaymentCallback(w http.ResponseWriter, r *http.Request) {\n\n\t// Read the content from body\n\tvar bodyBytes []byte\n\tbodyBytes, _ = ioutil.ReadAll(r.Body)\n\t// replace the content\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))\n\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tlog.Error().Msg(\"couldn't parse form: \" + err.Error())\n\t\treturn\n\t}\n\n\t// get Hmac\n\tsuppliedHmac := r.Header.Get(\"Hmac\")\n\tif suppliedHmac == \"\"{\n\t\tlog.Error().Msg(\"no HMAC signature set\")\n\t\treturn\n\t}\n\n\t// compute our own hmac\n\tmac := hmac.New(sha512.New, []byte(config.CoinPaymentsIPN))\n\tmac.Write([]byte(bodyBytes))\n\tcomputedHmac := hex.EncodeToString(mac.Sum(nil))\n\n\t// verify supplied hmac matches computed\n\tif suppliedHmac != computedHmac {\n\t\tlog.Info().Msg(\"hmacs don't match!\")\n\t\treturn\n\t}\n\n\t// decode form to callback struct\n\ttransactionCallback := new(TransactionCallback)\n\tdecoder := schema.NewDecoder()\n\tdecoder.IgnoreUnknownKeys(true)\n\terr = decoder.Decode(transactionCallback, r.Form)\n\tif err != nil {\n\t\tlog.Error().Msg(\"couldn't decode callback: \" + err.Error())\n\t\treturn\n\t}\n\n\t// verify correct merchant id\n\tsuppliedMerchantId := transactionCallback.Merchant\n\tif suppliedMerchantId != config.CoinPaymentsMerchantId{\n\t\tlog.Error().Msg(\"merchant id doesn't match\")\n\t\treturn\n\t}\n\n\tpaymentStatus := transactionCallback.Status\n\tlog.Info().Msg(\"callback successfully processed with status: \" + transactionCallback.StatusText)\n\tlog.Info().Msg(\"Coinpayments update for transaction id: \" + transactionCallback.Id)\n\n\tlog.Info().Msg( \"transaction email: \" + transactionCallback.Email)\n\n\n\tpaymentComplete := false\n\tswitch paymentStatus {\n\tcase 0:\n\t\tlog.Info().Msg(\"waiting for payment\")\n\t\tbreak\n\tcase 1:\n\t\tlog.Info().Msg(\"coins received!\")\n\t\t//TODO: we will want to be more stringent in the future, wait for confirmation\n\t\t//paymentComplete = true\n\t\tbreak\n\tcase 2:\n\t\tlog.Info().Msg(\"coins queued for payout!\")\n\t\t//paymentComplete = true\n\t\tbreak\n\tcase -1:\n\t\tlog.Info().Msg(\"payment cancelled or timed out\")\n\t\tbreak\n\tcase -2:\n\t\tlog.Info().Msg(\"Paypal refund or reversal\")\n\t\tbreak\n\tcase 3:\n\t\tlog.Info().Msg(\"Paypal pending!\")\n\t\tbreak\n\tcase 100:\n\t\tlog.Info().Msg(\"Payment Complete!\")\n\t\tpaymentComplete = true\n\t\tbreak\n\t}\n\n\n\tif paymentComplete{\n\t\tsendEmail(\"Payment Complete!\", \"We'll be in touch as we summon your \" +\n\t\t\t\"Entity from the Ether. See: https://\" + config.ServerLocation + \"/paid?email=\" +\n\t\t\ttransactionCallback.Email +\n\t\t\t\" for your next steps. \", transactionCallback.Email)\n\n\t\tsendEmail(\"Payment Complete!\", transactionCallback.Email + \" completed their payment\",\n\t\t\tconfig.AdminEmail)\n\n\n\t}\n\n\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Kinds is a list of known pkger kinds. 
 | 
	func Kinds() []Kind {
	var out []Kind
	for k := range kinds {
		out = append(out, k)
	}
	return out
} 
 | 
	[
  "func SupportedKinds() []string {\n\treturn kinds\n}",
  "func (*bzlLibraryLang) Kinds() map[string]rule.KindInfo {\n\treturn kinds\n}",
  "func PrintKinds(c *cli.Context, fnF FilterFunc, fnH HeaderFunc) {\n\tformat := c.String(\"output\")\n\n\tobjs := fnF(c)\n\n\tswitch format {\n\tcase \"\", \"w\", \"wide\":\n\t\tPrintTabular(fnH, objs, utils.Wide(c))\n\tcase \"yaml\":\n\t\tutils.PrintStructInYAML(objs)\n\tcase \"json\":\n\t\tutils.PrintStructInJSON(objs)\n\tcase \"v\", \"vv\", \"vvv\":\n\t\tfor _, s := range objs {\n\t\t\ts.PrintV(format)\n\t\t}\n\tdefault:\n\t\tfmt.Printf(\"Output format: [%s] is not supported.\\n\", format)\n\t}\n\n\tfmt.Printf(\"* %d found\\n\", len(objs))\n}",
  "func (es ExternalServices) Kinds() (kinds []string) {\n\tset := make(map[string]bool, len(es))\n\tfor _, e := range es {\n\t\tif !set[e.Kind] {\n\t\t\tkinds = append(kinds, e.Kind)\n\t\t\tset[e.Kind] = true\n\t\t}\n\t}\n\treturn kinds\n}",
  "func GetKindsFor(restMapper meta.RESTMapper, resource string) []schema.GroupVersionKind {\n\tgvr := schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: resource}\n\tgvks, err := restMapper.KindsFor(gvr)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\treturn gvks\n}",
  "func getApplicableKindsForPolicy(p *v1alpha1.Policy) []string {\n\tkindsMap := map[string]interface{}{}\n\tkinds := []string{}\n\t// iterate over the rules an identify all kinds\n\tfor _, rule := range p.Spec.Rules {\n\t\tfor _, k := range rule.ResourceDescription.Kinds {\n\t\t\tkindsMap[k] = nil\n\t\t}\n\t}\n\n\t// get the kinds\n\tfor k := range kindsMap {\n\t\tkinds = append(kinds, k)\n\t}\n\treturn kinds\n}",
  "func getApplicableKindsForPolicy(p *v1alpha1.Policy) []string {\n\tkindsMap := map[string]interface{}{}\n\tkinds := []string{}\n\t// iterate over the rules an identify all kinds\n\t// Matching\n\tfor _, rule := range p.Spec.Rules {\n\t\tfor _, k := range rule.MatchResources.Kinds {\n\t\t\tkindsMap[k] = nil\n\t\t}\n\t\t// remove excluded ones\n\t\tfor _, k := range rule.ExcludeResources.Kinds {\n\t\t\tif _, ok := kindsMap[k]; ok {\n\t\t\t\t// delete kind\n\t\t\t\tdelete(kindsMap, k)\n\t\t\t}\n\t\t}\n\t}\n\n\t// get the kinds\n\tfor k := range kindsMap {\n\t\tkinds = append(kinds, k)\n\t}\n\treturn kinds\n}",
  "func (c *Chart) UnknownKinds(known []string) []string {\n\tlookup := make(map[string]bool, len(known))\n\tfor _, k := range known {\n\t\tlookup[k] = true\n\t}\n\n\tu := []string{}\n\tfor n := range c.Kind {\n\t\tif _, ok := lookup[n]; !ok {\n\t\t\tu = append(u, n)\n\t\t}\n\t}\n\n\treturn u\n}",
  "func getApplicableKindsForPolicy(p *kyverno.ClusterPolicy) []string {\n\tkinds := []string{}\n\t// iterate over the rules an identify all kinds\n\t// Matching\n\tfor _, rule := range p.Spec.Rules {\n\t\tfor _, k := range rule.MatchResources.Kinds {\n\t\t\tkinds = append(kinds, k)\n\t\t}\n\t}\n\treturn kinds\n}",
  "func (l Lang) Kinds() map[string]rule.KindInfo {\n\treturn map[string]rule.KindInfo{\n\t\t\"corp_protos\": {\n\t\t\tMatchAny:   false,\n\t\t\tMatchAttrs: []string{\"srcs\"},\n\t\t},\n\t}\n}",
  "func (rs Repos) Kinds() (kinds []string) {\n\tset := map[string]bool{}\n\tfor _, r := range rs {\n\t\tkind := strings.ToUpper(r.ExternalRepo.ServiceType)\n\t\tif !set[kind] {\n\t\t\tkinds = append(kinds, kind)\n\t\t\tset[kind] = true\n\t\t}\n\t}\n\treturn kinds\n}",
  "func (tr TemplateResources) ListKinds() []TemplateResource {\n\tvar resources []TemplateResource\n\tfor _, r := range tr.Resources {\n\t\tif r.IsListKind() {\n\t\t\tresources = append(resources, r)\n\t\t}\n\t}\n\treturn resources\n}",
  "func (_m *Resolver) ListUsageKinds(ctx context.Context, first *int, offset *int) ([]*gqlschema.UsageKind, error) {\n\tvar r0 []*gqlschema.UsageKind\n\tvar r1 error\n\tr1 = _m.err\n\n\treturn r0, r1\n}",
  "func KindStrings() []string {\n\tstrs := make([]string, len(_KindNames))\n\tcopy(strs, _KindNames)\n\treturn strs\n}",
  "func GetKinds(fnG GetKindsByFileFunc, kind string) []Kind {\n\tvar kinds []Kind\n\n\tdir := filepath.Join(DataDir, kind)\n\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tkindsChan := make(chan Kind, len(files))\n\tvar wg sync.WaitGroup\n\n\tfor _, f := range files {\n\t\twg.Add(1)\n\t\tgo func(path string) {\n\t\t\tdefer wg.Done()\n\t\t\tkindsChan <- fnG(path)\n\t\t}(filepath.Join(dir, f.Name()))\n\t}\n\n\twg.Wait()\n\tclose(kindsChan)\n\n\tfor v := range kindsChan {\n\t\tkinds = append(kinds, v)\n\t}\n\n\tsort.Slice(kinds, func(i, j int) bool { return kinds[i].GetName() < kinds[j].GetName() })\n\n\treturn kinds\n}",
  "func KindValues() []Kind {\n\treturn _KindValues\n}",
  "func Kinds(obj interface{}) (map[string]string, error) {\n\tobjValue, err := getReflectValue(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkindMap := map[string]string{}\n\tobjType := objValue.Type()\n\tfor i := 0; i < objValue.NumField(); i++ {\n\t\tfieldType := objType.Field(i)\n\t\tfieldValue := objValue.Field(i)\n\n\t\tif fieldValue.CanInterface() {\n\t\t\tkindMap[fieldType.Name] = fieldValue.Kind().String()\n\t\t}\n\t}\n\n\treturn kindMap, nil\n}",
  "func CriticalKinds() []string {\n\tck := make([]string, 0, 6)\n\tck = append(ck, constant.RulesKind)\n\tck = append(ck, constant.AttributeManifestKind)\n\tck = append(ck, constant.AdapterKind)\n\tck = append(ck, constant.TemplateKind)\n\tck = append(ck, constant.InstanceKind)\n\tck = append(ck, constant.HandlerKind)\n\treturn ck\n}",
  "func KindsToStrings(kinds KindSlice) []string {\n\tstrs := make([]string, len(kinds))\n\tfor i, k := range kinds {\n\t\tstrs[i] = k.String()\n\t}\n\treturn strs\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	OK validates the kind is valid. 
 | 
	func (k Kind) OK() error {
	if k == KindUnknown {
		return errors.New("invalid kind")
	}
	if !kinds[k] {
		return errors.New("unsupported kind provided")
	}
	return nil
} 
 | 
	[
  "func (o *Resource) GetKindOk() (*string, bool) {\n\tif o == nil || o.Kind == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Kind, true\n}",
  "func (o *ObjectReference) GetKindOk() (*string, bool) {\n\tif o == nil || o.Kind == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Kind, true\n}",
  "func (m *Resource) IsOK() bool {\n\tswitch {\n\tcase len(m.name) == 0:\n\t\treturn false\n\tcase len(m.description) == 0:\n\t\treturn false\n\tcase m.schema == nil:\n\t\treturn false\n\tcase m.model == nil:\n\t\treturn false\n\tcase m.store == nil:\n\t\treturn false\n\tcase len(m.methods) == 0:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}",
  "func MatchKind(err error, k Kind) bool {\n\treturn k.Match(err)\n}",
  "func (c *Collection) invalidKind(kind string) bool {\n\tkinds := []string{\"ComponentStatus\"}\n\tfor _, item := range kinds {\n\t\tif kind == item {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
  "func OK() ValidationError {\n\treturn ValidationError{}\n}",
  "func (r *Resource) Validate() error {\n\tif len(r.Group) == 0 {\n\t\treturn fmt.Errorf(\"group cannot be empty\")\n\t}\n\tif len(r.Version) == 0 {\n\t\treturn fmt.Errorf(\"version cannot be empty\")\n\t}\n\tif len(r.Kind) == 0 {\n\t\treturn fmt.Errorf(\"kind cannot be empty\")\n\t}\n\n\tif len(r.Resource) == 0 {\n\t\tr.Resource = flect.Pluralize(strings.ToLower(r.Kind))\n\t}\n\n\tgroupMatch := regexp.MustCompile(\"^[a-z]+$\")\n\tif !groupMatch.MatchString(r.Group) {\n\t\treturn fmt.Errorf(\"group must match ^[a-z]+$ (was %s)\", r.Group)\n\t}\n\n\tversionMatch := regexp.MustCompile(\"^v\\\\d+(alpha\\\\d+|beta\\\\d+)?$\")\n\tif !versionMatch.MatchString(r.Version) {\n\t\treturn fmt.Errorf(\n\t\t\t\"version must match ^v\\\\d+(alpha\\\\d+|beta\\\\d+)?$ (was %s)\", r.Version)\n\t}\n\tif r.Kind != flect.Pascalize(r.Kind) {\n\t\treturn fmt.Errorf(\"kind must be camelcase (expected %s was %s)\", flect.Pascalize(r.Kind), r.Kind)\n\t}\n\n\treturn nil\n}",
  "func (d *Document) OK() error {\n\tif d.URL == \"\" {\n\t\treturn er.InvalidField(\"url\")\n\t}\n\tif d.DocType == \"\" {\n\t\treturn er.InvalidField(\"doc_type\")\n\t}\n\tif d.OwnerType == \"\" {\n\t\treturn er.InvalidField(\"owner_type\")\n\t}\n\treturn nil\n}",
  "func (o *ReconciliationTarget) GetKindOk() (*string, bool) {\n\tif o == nil || o.Kind == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Kind, true\n}",
  "func (o *TeamConfiguration) GetKindOk() (*string, bool) {\n\tif o == nil  {\n\t\treturn nil, false\n\t}\n\treturn &o.Kind, true\n}",
  "func OK(err error) bool {\n\tif err != nil {\n\t\tlog.Printf(\"unexpected error: %s\", err)\n\t\treturn false\n\t}\n\n\treturn true\n}",
  "func FormatOk(fmt string) bool {\n\tfmts := []string{\n\t\tFmt_ntriples,\n\t\tFmt_turtle,\n\t\tFmt_rdfxmlXmp,\n\t\tFmt_rdfxmlAbbrev,\n\t\tFmt_rdfxml,\n\t\tFmt_rss,\n\t\tFmt_atom,\n\t\tFmt_dot,\n\t\tFmt_jsonTriples,\n\t\tFmt_json,\n\t\tFmt_html,\n\t\tFmt_nquads,\n\t}\n\tfor _, f := range fmts {\n\t\tif fmt == f {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
  "func (ctl *Ctl) SpecIsValid() (bool, error) {\n\treturn true, nil\n}",
  "func Ok(t *testing.T, value interface{}, arg ...interface{}) {\n\tvar res bool\n\ti++\n\n\tswitch value.(type) {\n\tdefault:\n\t\ts := fmt.Sprintf(\"%v\", value)\n\t\tres = s != \"\" && s != \"[]\" && s != \"<nil>\"\n\tcase error:\n\t\tres = value != nil\n\tcase string:\n\t\tres = len(value.(string)) > 0\n\tcase []byte:\n\t\tres = len(value.([]byte)) > 0\n\tcase []rune:\n\t\tres = len(value.([]rune)) > 0\n\tcase int:\n\t\tres = value.(int) != 0\n\tcase bool:\n\t\tres = value.(bool)\n\tcase float32:\n\t\tres = value.(float32) != 0\n\tcase float64:\n\t\tres = value.(float64) != 0\n\t}\n\n\tmsg := getMsg(arg)\n\n\tif !res {\n\t\tt.Errorf(\"Not ok %d: %s\\n%v==%s\\n\", i, msg, value, reflect.TypeOf(value))\n\t}\n\tt.Logf(\"Ok %d: %s\\n\", i, msg)\n}",
  "func (resp *Response) OK() bool {\n\treturn resp.StatusCode < 400\n}",
  "func (req *Request) OK(body string) {\n\treq.Reply(http.StatusOK, body)\n}",
  "func (r *Request) OK() error {\n\tif len(r.URL) == 0 {\n\t\treturn ErrRequired{Msg: \"url must be specified\"}\n\t}\n\treturn nil\n}",
  "func (c *SeaterController) OK(data interface{}) {\n\tc.Code(200)\n\tc.jsonResp(data)\n}",
  "func (o *GetResourceUsageOKBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	ResourceType converts a kind to a known resource type (if applicable). 
 | 
	func (k Kind) ResourceType() influxdb.ResourceType {
	switch k {
	case KindBucket:
		return influxdb.BucketsResourceType
	case KindCheck, KindCheckDeadman, KindCheckThreshold:
		return influxdb.ChecksResourceType
	case KindDashboard:
		return influxdb.DashboardsResourceType
	case KindLabel:
		return influxdb.LabelsResourceType
	case KindNotificationEndpoint,
		KindNotificationEndpointHTTP,
		KindNotificationEndpointPagerDuty,
		KindNotificationEndpointSlack:
		return influxdb.NotificationEndpointResourceType
	case KindNotificationRule:
		return influxdb.NotificationRuleResourceType
	case KindTask:
		return influxdb.TasksResourceType
	case KindTelegraf:
		return influxdb.TelegrafsResourceType
	case KindVariable:
		return influxdb.VariablesResourceType
	default:
		return ""
	}
} 
 | 
	[
  "func kindToResource(kind string, mixedCase bool) (plural, singular string) {\n\tif len(kind) == 0 {\n\t\treturn\n\t}\n\tif mixedCase {\n\t\t// Legacy support for mixed case names\n\t\tsingular = strings.ToLower(kind[:1]) + kind[1:]\n\t} else {\n\t\tsingular = strings.ToLower(kind)\n\t}\n\tswitch string(singular[len(singular)-1]) {\n\tcase \"s\":\n\t\tplural = singular\n\tcase \"y\":\n\t\tplural = strings.TrimSuffix(singular, \"y\") + \"ies\"\n\tdefault:\n\t\tplural = singular + \"s\"\n\t}\n\treturn\n}",
  "func MapResourceKindToListResourcesType(kind string) string {\n\tswitch kind {\n\tcase types.KindApp:\n\t\treturn types.KindAppServer\n\tcase types.KindDatabase:\n\t\treturn types.KindDatabaseServer\n\tcase types.KindKubernetesCluster:\n\t\treturn types.KindKubeServer\n\tdefault:\n\t\treturn kind\n\t}\n}",
  "func resourceType(labels map[string]string) ResourceType {\n\trt := defaultResourcetype()\n\tif ContextProtocolTCP == labels[istioProtocol] {\n\t\trt.protocol = protocolTCP\n\t}\n\treturn rt\n}",
  "func getResourceKind(data []byte) (string, error) {\n\ttypeMeta := unversioned.TypeMeta{}\n\tif err := json.Unmarshal(data, &typeMeta); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn typeMeta.Kind, nil\n}",
  "func (r *CredentialReconciler) resourceForKind(kwg kindWithGroup) (metav1.APIResource, error) {\n\tresource, exists := r.resourceCache[kwg]\n\tif !exists {\n\t\t// TODO: this is definitely not the best way of dealing with a missing resource in the local\n\t\t// cache. While this is fine for a prototype, it can crash the controller pod or the node\n\t\t// (depending on the pod resources) if the kind with the specified group doesn't exist on\n\t\t// the API server (or if the API server goes off for a little walk), or crash the node.\n\t\tif err := r.updateResourceCache(); err != nil {\n\t\t\treturn metav1.APIResource{}, err\n\t\t}\n\t\treturn r.resourceForKind(kwg)\n\t}\n\treturn resource, nil\n}",
  "func (o *Tag) GetResourceTypeOk() (*string, bool) {\n\tif o == nil || o.ResourceType == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ResourceType, true\n}",
  "func (o DatasourceOutput) ResourceType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Datasource) *string { return v.ResourceType }).(pulumi.StringPtrOutput)\n}",
  "func (o *ResourceReviewRequest) SetResourceType(v string) {\n\to.ResourceType = &v\n}",
  "func ResourceTypeFromString(typeString string) (ResourceType, error) {\n\tswitch strings.ToUpper(typeString) {\n\tcase \"ANY\":\n\t\treturn ResourceAny, nil\n\tcase \"TOPIC\":\n\t\treturn ResourceTopic, nil\n\tcase \"GROUP\":\n\t\treturn ResourceGroup, nil\n\tcase \"BROKER\":\n\t\treturn ResourceBroker, nil\n\tdefault:\n\t\treturn ResourceUnknown, NewError(ErrInvalidArg, \"Unknown resource type\", false)\n\t}\n}",
  "func ParseResourceKind(name string) ResourceKind {\n\tk, ok := resourceNameToValue[name]\n\tif ok {\n\t\treturn k\n\t}\n\treturn UnKnownKind\n}",
  "func (o *ResourceReviewRequest) GetResourceTypeOk() (*string, bool) {\n\tif o == nil || o.ResourceType == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ResourceType, true\n}",
  "func (o *ListReplicationTasksParams) SetResourceType(resourceType *string) {\n\to.ResourceType = resourceType\n}",
  "func (f *Filter) AddResourceType(t string) {\n\tconst (\n\t\tresourceType = \"resource.type\"\n\t)\n\tif strings.Contains(f.String(), resourceType) {\n\t\tglog.Fatalf(\"Stackdriver filters may only contain one '%s'\", resourceType)\n\t}\n\tf.add(fmt.Sprintf(\"%s=\\\"%s\\\"\", resourceType, t))\n}",
  "func ResourceTypeString(s string) (ResourceType, error) {\n\tif val, ok := _ResourceTypeNameToValueMap[s]; ok {\n\t\treturn val, nil\n\t}\n\ts = strings.ToLower(s)\n\tif val, ok := _ResourceTypeNameToValueMap[s]; ok {\n\t\treturn val, nil\n\t}\n\treturn 0, fmt.Errorf(\"%s does not belong to ResourceType values\", s)\n}",
  "func (o *Tag) SetResourceType(v string) {\n\to.ResourceType = &v\n}",
  "func (o *LabelProperties) GetResourceTypeOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\n\treturn o.ResourceType, true\n}",
  "func (s *ComponentStatus) FindResourcesOfKind(kind string) []*StatusType {\n\tresources := []*StatusType{}\n\tsuffix := \",Kind=\" + kind\n\tfor _, status := range s.Resources {\n\t\tif strings.HasSuffix(status.Resource, suffix) {\n\t\t\tresources = append(resources, status)\n\t\t}\n\t}\n\treturn resources\n}",
  "func (o *Tag) GetResourceType() string {\n\tif o == nil || o.ResourceType == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ResourceType\n}",
  "func (o *ListNodesParams) SetResourceType(resourceType *string) {\n\to.ResourceType = resourceType\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Encode will safely encode the id. 
 | 
	func (s SafeID) Encode() ([]byte, error) {
	id := platform.ID(s)
	b, _ := id.Encode()
	return b, nil
} 
 | 
	[
  "func (id ID) Encode() string {\n\tbody := base32i.CheckEncode(id.Bytes())\n\treturn string(append([]byte{'b'}, body...))\n}",
  "func (id Id) String() string {\n\treturn base64.RawURLEncoding.EncodeToString([]byte(id))\n}",
  "func (id ID) String() string {\n\treturn base64.URLEncoding.EncodeToString(id[:])\n}",
  "func (id ID) Encode(dst []byte) []byte {\n\tencode(dst, id[:])\n\treturn dst\n}",
  "func encodeObjectId(buf *bytes.Buffer, path, name string, val ObjectId) error {\n\tif len(val) != 12 {\n\t\treturn fmt.Errorf(\"%v, ObjectId must be 12 bytes.\", path)\n\t}\n\n\t// type\n\tif err := buf.WriteByte(_OBJECT_ID); err != nil {\n\t\treturn err\n\t}\n\n\t// name\n\tif err := writeCstring(buf, name); err != nil {\n\t\treturn err\n\t}\n\n\t// value\n\tif _, err := buf.Write(val); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
  "func encodeID(val string) string {\n\tif reValidAvatarID.MatchString(val) {\n\t\treturn strings.TrimSuffix(val, imgSfx) // already encoded, strip .image\n\t}\n\treturn store.EncodeID(val)\n}",
  "func EncodeIdentifier(w io.Writer, id *Identifier) error {\n\tif err := binary.Write(w, byteOrder, id.Version); err != nil {\n\t\treturn err\n\t}\n\n\tswitch id.Version {\n\t// A version 0 identifier consists of its linked payment hash, followed\n\t// by the token ID.\n\tcase 0:\n\t\tif _, err := w.Write(id.PaymentHash[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err := w.Write(id.TokenID[:])\n\t\treturn err\n\n\tdefault:\n\t\treturn fmt.Errorf(\"%w: %v\", ErrUnknownVersion, id.Version)\n\t}\n}",
  "func (_Contract *ContractCaller) EncodeTokenId(opts *bind.CallOpts, _x *big.Int, _y *big.Int) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Contract.contract.Call(opts, out, \"encodeTokenId\", _x, _y)\n\treturn *ret0, err\n}",
  "func (t Identifier) Encode() (serialized []byte, err error) {\n\treturn t[:], nil\n}",
  "func (m Mixer) EncodeID(password string, id uint64) string {\n\treturn m.EncodeIDPadding(password, id, DefaultPaddingLength)\n}",
  "func (id ID) String() string {\n\treturn hex.EncodeToString(id[:])\n}",
  "func EncodeUnrestrictedSQLIdent(buf *bytes.Buffer, s string, flags EncodeFlags) {\n\tif flags.HasFlags(EncBareIdentifiers) || isBareIdentifier(s) {\n\t\tbuf.WriteString(s)\n\t\treturn\n\t}\n\tencodeEscapedSQLIdent(buf, s)\n}",
  "func IDB58Encode(id ID) string {\n\treturn core.IDB58Encode(id)\n}",
  "func stringifyId(raw interface{}) string {\n\tif buildObjId, ok := raw.(bson.ObjectId); ok {\n\t\treturn buildObjId.Hex()\n\t}\n\tif asStr, ok := raw.(fmt.Stringer); ok {\n\t\treturn asStr.String()\n\t}\n\treturn fmt.Sprintf(\"%v\", raw)\n}",
  "func EncodeUnrestrictedSQLIdent(buf *bytes.Buffer, s string, flags EncodeFlags) {\n\tif flags.HasFlags(EncBareIdentifiers) || IsBareIdentifier(s) {\n\t\tbuf.WriteString(s)\n\t\treturn\n\t}\n\tEncodeEscapedSQLIdent(buf, s)\n}",
  "func (id RecordID) String() string {\n\treturn hex.EncodeToString(id)\n}",
  "func Encode(userID, profileID string) string {\n\tif profileID == \"\" {\n\t\treturn userID\n\t}\n\treturn fmt.Sprintf(\"%s_%s\", userID, profileID)\n}",
  "func (p *pid) Encode() string {\n\tidStrs := make([]string, len(p.Ids))\n\tfor i, v := range p.Ids {\n\t\tidStrs[i] = fmt.Sprintf(\"%d.%d\", v.Pos, v.AgentId)\n\t}\n\treturn strings.Join(idStrs, \":\") + \"~\" + common.Itoa(p.Seq)\n}",
  "func (op *insert) Encode() string {\n\treturn fmt.Sprintf(\"i,%s,%s\", op.Pid.Encode(), op.Value)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	HasConflicts provides a binary t/f if there are any changes within package after dry run is complete. 
 | 
	func (d Diff) HasConflicts() bool {
	for _, b := range d.Buckets {
		if b.hasConflict() {
			return true
		}
	}
	for _, l := range d.Labels {
		if l.hasConflict() {
			return true
		}
	}
	for _, v := range d.Variables {
		if v.hasConflict() {
			return true
		}
	}
	return false
} 
 | 
	[
  "func (t *Table) HasConflicts(ctx context.Context) (bool, error) {\n\tif t.Format() == types.Format_DOLT {\n\t\tart, err := t.GetArtifacts(ctx)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\treturn art.HasConflicts(ctx)\n\t}\n\treturn t.table.HasConflicts(ctx)\n}",
  "func (rnode *RuleNode) HasConflict() bool {\n\treturn false // TODO\n}",
  "func (env *Environment) checkIntegrity(ps ...*gdf.Package) (conflicts map[string]map[string][3]string) {\n\tconflicts = map[string]map[string][3]string{}\n\tconflicts[\"#dep-registry-orphan#\"] = map[string][3]string{}\n\tconflicts[\"#dep-registry-inconsistency#\"] = map[string][3]string{}\n\n\tdefer func() {\n\t\tif len(conflicts[\"#dep-registry-orphan#\"]) == 0 {\n\t\t\tdelete(conflicts, \"#dep-registry-orphan#\")\n\t\t}\n\t\tif len(conflicts[\"#dep-registry-inconsistency#\"]) == 0 {\n\t\t\tdelete(conflicts, \"#dep-registry-inconsistency#\")\n\t\t}\n\t}()\n\tpkgs := map[string]bool{}\n\n\tfor _, p := range ps {\n\t\tpkgs[p.Path] = true\n\t\td, er := env.Diff(p, false)\n\t\tif er != nil {\n\t\t\tconflicts[p.Path] = map[string][3]string{\n\t\t\t\t\"#dep-registry-inconsistency#\": [3]string{\"missing\", er.Error(), \"\"},\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif d != nil && len(d.Exports) > 0 {\n\t\t\tconflicts[p.Path] = map[string][3]string{\n\t\t\t\t\"#dep-registry-inconsistency#\": [3]string{\"exports\", strings.Join(d.Exports, \"\\n\"), \"\"},\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif d != nil && len(d.Imports) > 0 {\n\t\t\tconflicts[p.Path] = map[string][3]string{\n\t\t\t\t\"#dep-registry-inconsistency#\": [3]string{\"imports\", strings.Join(d.Imports, \"\\n\"), \"\"},\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\terrs := env.db.hasConflict(p, map[string]bool{})\n\t\tif len(errs) > 0 {\n\t\t\tconflicts[p.Path] = errs\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tdbpkgs, err := env.db.GetAllPackages()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tfor _, dbp := range dbpkgs {\n\t\tif !pkgs[dbp.Package] {\n\t\t\tconflicts[\"#dep-registry-orphan#\"][dbp.Package] = [3]string{\"orphan\", dbp.Package, \"\"}\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn\n}",
  "func (d *Dependencies) satisfied() bool {\n\treturn len(d.All) == 0\n}",
  "func (f *File) HasUncommitedChanges() bool {\n\treturn len(f.cache) > 0\n}",
  "func (c *Client) HasDependency(ctx context.Context, change *gerritpb.ChangeInfo) (bool, error) {\n\trelatedChanges, err := c.getRelatedChanges(ctx, change)\n\tif err != nil {\n\t\treturn false, errors.Annotate(err, \"failed checking dependency\").Err()\n\t}\n\n\tfor _, relatedChange := range relatedChanges {\n\t\tif relatedChange.Status == gerritpb.ChangeStatus_MERGED {\n\t\t\t// relatedChange here is the newest merged. If relatedChange != change,\n\t\t\t// then there is a merged dependency\n\t\t\treturn relatedChange.Project != change.Project ||\n\t\t\t\trelatedChange.Number != change.Number, nil\n\t\t}\n\t}\n\n\t// none of the related changes are merged, so no merged dependencies\n\treturn false, nil\n}",
  "func (s *BzrRepo) IsDirty() bool {\n\tout, err := s.RunFromDir(\"bzr\", \"diff\")\n\treturn err != nil || len(out) != 0\n}",
  "func (ab *AtomicBlock) conflicts(s ids.Set) (bool, error) {\n\tif ab.Status() == choices.Accepted {\n\t\treturn false, nil\n\t}\n\tif ab.inputs.Overlaps(s) {\n\t\treturn true, nil\n\t}\n\tparent, err := ab.parentBlock()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn parent.conflicts(s)\n}",
  "func (o *oracle) hasConflict(txn *Txn) bool {\n\tif len(txn.reads) == 0 {\n\t\treturn false\n\t}\n\tfor _, committedTxn := range o.committedTxns {\n\t\t// If the committedTxn.ts is less than txn.readTs that implies that the\n\t\t// committedTxn finished before the current transaction started.\n\t\t// We don't need to check for conflict in that case.\n\t\t// This change assumes linearizability. Lack of linearizability could\n\t\t// cause the read ts of a new txn to be lower than the commit ts of\n\t\t// a txn before it (@mrjn).\n\t\tif committedTxn.ts <= txn.readTs {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, ro := range txn.reads {\n\t\t\tif _, has := committedTxn.conflictKeys[ro]; has {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
  "func IsConflicting(err error) bool {\n\treturn err == ErrConflict\n}",
  "func (o *TransactionResult) HasProblems() bool {\n\tif o != nil && o.Problems != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
  "func hasDependency(bld *build.File, r *build.Rule, dep string) bool {\n\tpkg := filepath.Dir(bld.Path)\n\toldDeps := r.Attr(\"deps\")\n\tif edit.ListFind(oldDeps, dep, pkg) != nil {\n\t\treturn true\n\t}\n\truntimeDeps := r.Attr(\"runtime_deps\")\n\treturn edit.ListFind(runtimeDeps, dep, pkg) != nil\n}",
  "func (pd PackageDifference) Any() bool {\n\tif len(pd.Additions) > 0 || len(pd.Removals) > 0 || len(pd.Changes) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
  "func (e *Eval) dirty(f *Flow) bool {\n\tif f.Op == Extern {\n\t\treturn true\n\t}\n\tfor _, dep := range f.Deps {\n\t\tif e.dirty(dep) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
  "func hasScripts(def types.Definition) bool {\n\treturn def.BuildData.Post != \"\" || def.BuildData.Pre != \"\" || def.BuildData.Setup != \"\" || def.BuildData.Test != \"\"\n}",
  "func ConflictingRequires(verbose bool) (bool, error) {\r\n\t// obtain the set of requires by all modules in our build (via 'go mod graph').\r\n\t// this takes into account replace directives.\r\n\trequires, err := modgraph.Requirements()\r\n\tif err != nil {\r\n\t\treturn false, err\r\n\t}\r\n\r\n\t// track our paths and versions in { path: {version, version, ...}, ... } map.\r\n\tpaths := make(map[string][]string)\r\n\tfor _, require := range requires {\r\n\t\tf := strings.Split(require, \"@\")\r\n\t\tif len(f) != 2 {\r\n\t\t\treturn false, fmt.Errorf(\"unexpected requirement: %s\", require)\r\n\t\t}\r\n\t\tpath, version := f[0], f[1]\r\n\t\tif !semver.IsValid(version) {\r\n\t\t\treturn false, fmt.Errorf(\"invalid semver version: %s\", require)\r\n\t\t}\r\n\r\n\t\t// Probably not needed, but might as well use the canonical semver version. That strips \"+incompatible\",\r\n\t\t// which we need to preserve. Thus, we check here for \"+incompatible\" and add it back if needed.\r\n\t\tif semver.Build(version) == \"+incompatible\" {\r\n\t\t\tpaths[path] = append(paths[path], semver.Canonical(version)+\"+incompatible\")\r\n\t\t} else {\r\n\t\t\tpaths[path] = append(paths[path], semver.Canonical(version))\r\n\t\t}\r\n\t}\r\n\r\n\t// for each path, loop over its versions (in semantic order) and build up a list\r\n\t// of potential conflicts.\r\n\tflagged := false\r\n\tfor path, versions := range paths {\r\n\t\tsort.Slice(versions, func(i, j int) bool { return -1 == semver.Compare(versions[i], versions[j]) })\r\n\r\n\t\tif verbose {\r\n\t\t\tfmt.Printf(\"gomodvet: conflictingrequires: module %q has require versions: %v\\n\", path, versions)\r\n\t\t}\r\n\r\n\t\tpriorVersion := \"\"\r\n\t\tvar potentialIncompats []string\r\n\t\tfor _, version := range versions {\r\n\t\t\tif version == priorVersion {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tif isBeforeV1(version) {\r\n\t\t\t\t// all pre-v1 versions are potentially incompatible\r\n\t\t\t\tpotentialIncompats = append(potentialIncompats, version)\r\n\t\t\t} else if isV1(version) && !isV1(priorVersion) {\r\n\t\t\t\t// the first v1 version seen is potentially incompatible with any v0, v2+incompatible, v3+incompatible, etc.\r\n\t\t\t\tpotentialIncompats = append(potentialIncompats, version)\r\n\t\t\t} else if isV2OrHigherIncompat(version) && semver.Major(version) != semver.Major(priorVersion) {\r\n\t\t\t\t// the first major version v2+incompatible, v3+incompatible, etc is potentially incompatible.\r\n\t\t\t\t// (If two v2+incompatible versions are seen, in theory they should be compatible with each other).\r\n\t\t\t\tpotentialIncompats = append(potentialIncompats, version)\r\n\t\t\t}\r\n\t\t\tpriorVersion = version\r\n\t\t}\r\n\t\tif len(potentialIncompats) > 1 {\r\n\t\t\t// mutiple potential incompatible versions, which means they can be incompatible with each other.\r\n\t\t\tfmt.Printf(\"gomodvet-004: module %q was required with potentially incompatible versions: %s\\n\",\r\n\t\t\t\tpath, strings.Join(potentialIncompats, \", \"))\r\n\t\t\tflagged = true\r\n\t\t}\r\n\t}\r\n\treturn flagged, nil\r\n}",
  "func (j *Job) HasDependencies() bool {\n\tj.depMut.RLock()\n\tdefer j.depMut.RUnlock()\n\treturn len(j.dependents) > 0\n}",
  "func (r *ApplyOrgResult) HasChanges() bool {\n\treturn *r != ApplyOrgResult{}\n}",
  "func HasRelevantChanges(relevantPaths []string) (bool, error) {\n\tchanges, err := GetChangedFiles()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tlogFields := logrus.Fields{\n\t\t\"changesFound\":  changes,\n\t\t\"RelevantPaths\": relevantPaths,\n\t}\n\n\tfor _, appPath := range relevantPaths {\n\t\tif strings.Contains(changes, appPath) {\n\t\t\toutput.Logger().\n\t\t\t\tWithFields(logFields).\n\t\t\t\tWithField(\"relevantChangesFound\", appPath).\n\t\t\t\tInfo(\"found at least one relevant change in the current commit\")\n\t\t\treturn true, nil\n\t\t}\n\t}\n\toutput.Logger().WithFields(logFields).\n\t\tWarn(\"no relevant changes found in this commit\")\n\treturn false, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	UnmarshalJSON decodes the check values. 
 | 
	func (d *DiffCheckValues) UnmarshalJSON(b []byte) (err error) {
	d.Check, err = icheck.UnmarshalJSON(b)
	if errors2.EInternal == errors2.ErrorCode(err) {
		return nil
	}
	return err
} 
 | 
	[
  "func (c *CheckNameAvailabilityResponseBody) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"absCode\":\n\t\t\terr = unpopulate(val, \"AbsCode\", &c.AbsCode)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"message\":\n\t\t\terr = unpopulate(val, \"Message\", &c.Message)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"valid\":\n\t\t\terr = unpopulate(val, \"Valid\", &c.Valid)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (scw *SafetyCheckWrapper) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"SafetyCheck\":\n\t\t\tif v != nil {\n\t\t\t\tsafetyCheck, err := unmarshalBasicSafetyCheck(*v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tscw.SafetyCheck = safetyCheck\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func (c *CheckAvailabilityResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"isAvailiable\":\n\t\t\terr = unpopulate(val, \"IsAvailiable\", &c.IsAvailiable)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &c.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sku\":\n\t\t\terr = unpopulate(val, \"SKU\", &c.SKU)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &c.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func UnmarshalAndCheckValue(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
  "func (e *EntityNameAvailabilityCheckOutput) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"message\":\n\t\t\terr = unpopulate(val, \"Message\", &e.Message)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"nameAvailable\":\n\t\t\terr = unpopulate(val, \"NameAvailable\", &e.NameAvailable)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"reason\":\n\t\t\terr = unpopulate(val, \"Reason\", &e.Reason)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CheckNameAvailabilityRequestBody) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CheckNameAvailabilityInput) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CheckNameAvailabilityResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"message\":\n\t\t\terr = unpopulate(val, \"Message\", &c.Message)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"nameAvailable\":\n\t\t\terr = unpopulate(val, \"NameAvailable\", &c.NameAvailable)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"reason\":\n\t\t\terr = unpopulate(val, \"Reason\", &c.Reason)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CreateJobValidations) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"individualRequestDetails\":\n\t\t\tc.IndividualRequestDetails, err = unmarshalValidationInputRequestClassificationArray(val)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"validationCategory\":\n\t\t\terr = unpopulate(val, \"ValidationCategory\", &c.ValidationCategory)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (v *VerificationIPFlowResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"access\":\n\t\t\terr = unpopulate(val, \"Access\", &v.Access)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ruleName\":\n\t\t\terr = unpopulate(val, \"RuleName\", &v.RuleName)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (f *FirewallPolicyIntrusionDetection) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", f, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"configuration\":\n\t\t\terr = unpopulate(val, \"Configuration\", &f.Configuration)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"mode\":\n\t\t\terr = unpopulate(val, \"Mode\", &f.Mode)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", f, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *ConfigDiagnosticsValidatorResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"issues\":\n\t\t\terr = unpopulate(val, \"Issues\", &c.Issues)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"replicaSetSubnetDisplayName\":\n\t\t\terr = unpopulate(val, \"ReplicaSetSubnetDisplayName\", &c.ReplicaSetSubnetDisplayName)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"status\":\n\t\t\terr = unpopulate(val, \"Status\", &c.Status)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"validatorId\":\n\t\t\terr = unpopulate(val, \"ValidatorID\", &c.ValidatorID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CheckAvailabilityParameters) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"isAvailiable\":\n\t\t\terr = unpopulate(val, \"IsAvailiable\", &c.IsAvailiable)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &c.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sku\":\n\t\t\terr = unpopulate(val, \"SKU\", &c.SKU)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &c.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CheckNameAvailabilityResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"message\":\n\t\t\terr = unpopulate(val, \"Message\", &c.Message)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"nameAvailable\":\n\t\t\terr = unpopulate(val, \"NameAvailable\", &c.NameAvailable)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"reason\":\n\t\t\terr = unpopulate(val, \"Reason\", &c.Reason)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CertificateVerificationDescription) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"certificate\":\n\t\t\terr = unpopulate(val, \"Certificate\", &c.Certificate)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CheckSKUAvailabilityParameter) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"kind\":\n\t\t\terr = unpopulate(val, \"Kind\", &c.Kind)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"skus\":\n\t\t\terr = unpopulate(val, \"SKUs\", &c.SKUs)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (r *ReportComplianceStatus) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"m365\":\n\t\t\terr = unpopulate(val, \"M365\", &r.M365)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (c *CheckNameAvailabilityParameters) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (v *Validator) UnmarshalJSON(data []byte) error {\n\tbv := &bechValidator{}\n\tif err := codec.Cdc.UnmarshalJSON(data, bv); err != nil {\n\t\treturn err\n\t}\n\tconsPubKey, err := sdk.GetConsPubKeyBech32(bv.ConsPubKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = Validator{\n\t\tAddress:                 bv.Address,\n\t\tConsPubKey:              consPubKey,\n\t\tJailed:                  bv.Jailed,\n\t\tStakedTokens:            bv.StakedTokens,\n\t\tStatus:                  bv.Status,\n\t\tUnstakingCompletionTime: bv.UnstakingCompletionTime,\n\t}\n\treturn nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	UnmarshalJSON decodes the notification endpoint. This is necessary unfortunately. 
 | 
	func (d *DiffNotificationEndpointValues) UnmarshalJSON(b []byte) (err error) {
	d.NotificationEndpoint, err = endpoint.UnmarshalJSON(b)
	if errors2.EInvalid == errors2.ErrorCode(err) {
		return nil
	}
	return
} 
 | 
	[
  "func (n *Notification) UnmarshalJSON(b []byte) error {\n\tnotification := map[string]interface{}{}\n\n\terr := json.Unmarshal(b, ¬ification)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif notification[\"_id\"] != nil && bson.IsObjectIdHex(notification[\"_id\"].(string)) {\n\t\tn.ID = bson.ObjectIdHex(notification[\"_id\"].(string))\n\t}\n\tif notification[\"id\"] != nil && bson.IsObjectIdHex(notification[\"id\"].(string)) {\n\t\tn.ID = bson.ObjectIdHex(notification[\"id\"].(string))\n\t}\n\tif notification[\"recipient\"] == nil {\n\t\t// return errors.New(\"Order Hash is not set\")\n\t} else {\n\t\tn.Recipient = common.HexToAddress(notification[\"recipient\"].(string))\n\t}\n\n\tif notification[\"message\"] != nil {\n\t\tn.Message = notification[\"message\"].(Message)\n\t}\n\n\tif notification[\"type\"] != nil {\n\t\tn.Type = notification[\"type\"].(string)\n\t}\n\n\tif notification[\"status\"] != nil {\n\t\tn.Status = notification[\"status\"].(string)\n\t}\n\n\tif notification[\"createdAt\"] != nil {\n\t\tnm, _ := time.Parse(time.RFC3339Nano, notification[\"createdAt\"].(string))\n\t\tn.CreatedAt = nm\n\t}\n\n\tif notification[\"updatedAt\"] != nil {\n\t\tnm, _ := time.Parse(time.RFC3339Nano, notification[\"updatedAt\"].(string))\n\t\tn.UpdatedAt = nm\n\t}\n\n\treturn nil\n}",
  "func (n *WebpushNotification) UnmarshalJSON(b []byte) error {\n\ttype webpushNotificationInternal WebpushNotification\n\tvar temp = (*webpushNotificationInternal)(n)\n\tif err := json.Unmarshal(b, temp); err != nil {\n\t\treturn err\n\t}\n\tallFields := make(map[string]interface{})\n\tif err := json.Unmarshal(b, &allFields); err != nil {\n\t\treturn err\n\t}\n\tfor k := range n.standardFields() {\n\t\tdelete(allFields, k)\n\t}\n\tif len(allFields) > 0 {\n\t\tn.CustomData = allFields\n\t}\n\treturn nil\n}",
  "func (l *LiveEventEndpoint) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"protocol\":\n\t\t\terr = unpopulate(val, \"Protocol\", &l.Protocol)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"url\":\n\t\t\terr = unpopulate(val, \"URL\", &l.URL)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (p *PrivateEndpoint) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &p.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (n *NotificationHubResource) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", n, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &n.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &n.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &n.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &n.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sku\":\n\t\t\terr = unpopulate(val, \"SKU\", &n.SKU)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &n.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &n.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", n, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (m *MessagingEndpointProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"lockDurationAsIso8601\":\n\t\t\terr = unpopulate(val, \"LockDurationAsIso8601\", &m.LockDurationAsIso8601)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"maxDeliveryCount\":\n\t\t\terr = unpopulate(val, \"MaxDeliveryCount\", &m.MaxDeliveryCount)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ttlAsIso8601\":\n\t\t\terr = unpopulate(val, \"TTLAsIso8601\", &m.TTLAsIso8601)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (n *NotificationChannelProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn err\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"createdDate\":\n\t\t\terr = unpopulateTimeRFC3339(val, &n.CreatedDate)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"description\":\n\t\t\terr = unpopulate(val, &n.Description)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"emailRecipient\":\n\t\t\terr = unpopulate(val, &n.EmailRecipient)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"events\":\n\t\t\terr = unpopulate(val, &n.Events)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"notificationLocale\":\n\t\t\terr = unpopulate(val, &n.NotificationLocale)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"provisioningState\":\n\t\t\terr = unpopulate(val, &n.ProvisioningState)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"uniqueIdentifier\":\n\t\t\terr = unpopulate(val, &n.UniqueIdentifier)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"webHookUrl\":\n\t\t\terr = unpopulate(val, &n.WebHookURL)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
  "func UnmarshalNotificationResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(NotificationResponse)\n\terr = core.UnmarshalPrimitive(m, \"notification_id\", &obj.NotificationID)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
  "func (a *AndroidNotification) UnmarshalJSON(b []byte) error {\n\ttype androidInternal AndroidNotification\n\ttemp := struct {\n\t\tEventTimestamp string   `json:\"event_time,omitempty\"`\n\t\tPriority       string   `json:\"notification_priority,omitempty\"`\n\t\tVisibility     string   `json:\"visibility,omitempty\"`\n\t\tVibrateTimings []string `json:\"vibrate_timings,omitempty\"`\n\t\t*androidInternal\n\t}{\n\t\tandroidInternal: (*androidInternal)(a),\n\t}\n\tif err := json.Unmarshal(b, &temp); err != nil {\n\t\treturn err\n\t}\n\n\tif temp.Priority != \"\" {\n\t\tpriorities := map[string]AndroidNotificationPriority{\n\t\t\t\"PRIORITY_MIN\":     PriorityMin,\n\t\t\t\"PRIORITY_LOW\":     PriorityLow,\n\t\t\t\"PRIORITY_DEFAULT\": PriorityDefault,\n\t\t\t\"PRIORITY_HIGH\":    PriorityHigh,\n\t\t\t\"PRIORITY_MAX\":     PriorityMax,\n\t\t}\n\t\tif prio, ok := priorities[temp.Priority]; ok {\n\t\t\ta.Priority = prio\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unknown priority value: %q\", temp.Priority)\n\t\t}\n\t}\n\n\tif temp.Visibility != \"\" {\n\t\tvisibilities := map[string]AndroidNotificationVisibility{\n\t\t\t\"PRIVATE\": VisibilityPrivate,\n\t\t\t\"PUBLIC\":  VisibilityPublic,\n\t\t\t\"SECRET\":  VisibilitySecret,\n\t\t}\n\t\tif vis, ok := visibilities[temp.Visibility]; ok {\n\t\t\ta.Visibility = vis\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unknown visibility value: %q\", temp.Visibility)\n\t\t}\n\t}\n\n\tif temp.EventTimestamp != \"\" {\n\t\tts, err := time.Parse(rfc3339Zulu, temp.EventTimestamp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ta.EventTimestamp = &ts\n\t}\n\n\tvar vibTimings []int64\n\tfor _, t := range temp.VibrateTimings {\n\t\tvibTime, err := stringToDuration(t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmillis := int64(vibTime / time.Millisecond)\n\t\tvibTimings = append(vibTimings, millis)\n\t}\n\ta.VibrateTimingMillis = vibTimings\n\treturn nil\n}",
  "func (w *WebHookEventSubscriptionDestination) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", w, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"endpointType\":\n\t\t\terr = unpopulate(val, \"EndpointType\", &w.EndpointType)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &w.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", w, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (p *PrivateEndpointProperty) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &p.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (s *SimplePrivateEndpointConnection) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &s.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &s.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &s.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &s.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &s.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (p *PrivateEndpointConnection) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &p.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &p.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &p.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &p.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (a *NotificationFeedActivity) UnmarshalJSON(b []byte) (err error) {\n\n\trawPayload := make(map[string]*json.RawMessage)\n\tmetadata := make(map[string]string)\n\n\terr = json.Unmarshal(b, &rawPayload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor key, value := range rawPayload {\n\t\tlowerKey := strings.ToLower(key)\n\n\t\tif value == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif lowerKey == \"id\" {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\ta.ID = strValue\n\t\t} else if lowerKey == \"actor\" {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\ta.Actor = FeedID(strValue)\n\t\t} else if lowerKey == \"verb\" {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\ta.Verb = strValue\n\t\t} else if lowerKey == \"foreign_id\" {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\ta.ForeignID = strValue\n\t\t} else if lowerKey == \"object\" {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\ta.Object = FeedID(strValue)\n\t\t} else if lowerKey == \"origin\" {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\ta.Origin = FeedID(strValue)\n\t\t} else if lowerKey == \"target\" {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\ta.Target = FeedID(strValue)\n\t\t} else if lowerKey == \"time\" {\n\t\t\tvar strValue string\n\t\t\terr := json.Unmarshal(*value, &strValue)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttimeStamp, err := time.Parse(\"2006-01-02T15:04:05.999999\", strValue)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ta.TimeStamp = &timeStamp\n\t\t} else if lowerKey == \"data\" {\n\t\t\ta.Data = value\n\t\t} else if lowerKey == \"to\" {\n\n\t\t\tvar to1D []string\n\t\t\tvar to2D [][]string\n\n\t\t\terr := json.Unmarshal(*value, &to1D)\n\t\t\tif err != nil {\n\t\t\t\terr = nil\n\t\t\t\terr = json.Unmarshal(*value, &to2D)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, to := range to2D {\n\t\t\t\t\tif len(to) == 2 {\n\t\t\t\t\t\tfeedStr := to[0] + \" \" + to[1]\n\t\t\t\t\t\tto1D = append(to1D, feedStr)\n\t\t\t\t\t} else if len(to) == 1 {\n\t\t\t\t\t\tto1D = append(to1D, to[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, to := range to1D {\n\n\t\t\t\tfeed := GeneralFeed{}\n\n\t\t\t\tmatch, err := regexp.MatchString(`^\\w+:\\w+ .*?$`, to)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif match {\n\t\t\t\t\tfirstSplit := strings.Split(to, \":\")\n\t\t\t\t\tsecondSplit := strings.Split(firstSplit[1], \" \")\n\n\t\t\t\t\tfeed.FeedSlug = firstSplit[0]\n\t\t\t\t\tfeed.UserID = secondSplit[0]\n\t\t\t\t\tfeed.token = secondSplit[1]\n\t\t\t\t\ta.To = append(a.To, &feed)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmatch = false\n\t\t\t\terr = nil\n\n\t\t\t\tmatch, err = regexp.MatchString(`^\\w+:\\w+$`, to)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif match {\n\t\t\t\t\tfirstSplit := strings.Split(to, \":\")\n\n\t\t\t\t\tfeed.FeedSlug = firstSplit[0]\n\t\t\t\t\tfeed.UserID = firstSplit[1]\n\t\t\t\t\ta.To = append(a.To, &feed)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tvar strValue string\n\t\t\tjson.Unmarshal(*value, &strValue)\n\t\t\tmetadata[key] = strValue\n\t\t}\n\t}\n\n\ta.MetaData = metadata\n\treturn nil\n\n}",
  "func (p *PrivateEndpointConnection) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &p.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &p.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &p.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &p.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &p.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (e *EdgeProfileSubscriptionPatch) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &e.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (d *DefenderSettingsPropertiesMdeIntegration) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"status\":\n\t\t\terr = unpopulate(val, \"Status\", &d.Status)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (e *EndpointAccessResource) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"relay\":\n\t\t\terr = unpopulate(val, \"Relay\", &e.Relay)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func (e *EndpointProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"provisioningState\":\n\t\t\terr = unpopulate(val, \"ProvisioningState\", &e.ProvisioningState)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"resourceId\":\n\t\t\terr = unpopulate(val, \"ResourceID\", &e.ResourceID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &e.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	MarshalJSON marshals a summary chart. 
 | 
	func (s *SummaryChart) MarshalJSON() ([]byte, error) {
	b, err := influxdb.MarshalViewPropertiesJSON(s.Properties)
	if err != nil {
		return nil, err
	}
	type alias SummaryChart
	out := struct {
		Props json.RawMessage `json:"properties"`
		alias
	}{
		Props: b,
		alias: alias(*s),
	}
	return json.Marshal(out)
} 
 | 
	[
  "func (c CveSummary) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"critical\", c.Critical)\n\tpopulate(objectMap, \"high\", c.High)\n\tpopulate(objectMap, \"low\", c.Low)\n\tpopulate(objectMap, \"medium\", c.Medium)\n\tpopulate(objectMap, \"undefined\", c.Undefined)\n\tpopulate(objectMap, \"unknown\", c.Unknown)\n\treturn json.Marshal(objectMap)\n}",
  "func (s Summary) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"@odata.context\", s.ODataContext)\n\tpopulate(objectMap, \"@odata.id\", s.ODataID)\n\tpopulate(objectMap, \"policyAssignments\", s.PolicyAssignments)\n\tpopulate(objectMap, \"results\", s.Results)\n\treturn json.Marshal(objectMap)\n}",
  "func (a *Action) AggregateSummaryJSON() ([]byte, error) {\n\ta.Lock()\n\tdefer a.Unlock()\n\n\tif a.agg == nil {\n\t\ta.agg = newActionAggregators(a)\n\t}\n\n\treturn a.agg.action.agg.resultJSON(), nil\n}",
  "func (p PolicyDefinitionSummary) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"effect\", p.Effect)\n\tpopulate(objectMap, \"policyDefinitionGroupNames\", p.PolicyDefinitionGroupNames)\n\tpopulate(objectMap, \"policyDefinitionId\", p.PolicyDefinitionID)\n\tpopulate(objectMap, \"policyDefinitionReferenceId\", p.PolicyDefinitionReferenceID)\n\tpopulate(objectMap, \"results\", p.Results)\n\treturn json.Marshal(objectMap)\n}",
  "func (sps ServersProjectSummary) MarshalJSON() ([]byte, error) {\n\tsps.InstanceType = InstanceTypeBasicProjectSummaryInstanceTypeServers\n\tobjectMap := make(map[string]interface{})\n\tif sps.DiscoveredCount != nil {\n\t\tobjectMap[\"discoveredCount\"] = sps.DiscoveredCount\n\t}\n\tif sps.AssessedCount != nil {\n\t\tobjectMap[\"assessedCount\"] = sps.AssessedCount\n\t}\n\tif sps.ReplicatingCount != nil {\n\t\tobjectMap[\"replicatingCount\"] = sps.ReplicatingCount\n\t}\n\tif sps.TestMigratedCount != nil {\n\t\tobjectMap[\"testMigratedCount\"] = sps.TestMigratedCount\n\t}\n\tif sps.MigratedCount != nil {\n\t\tobjectMap[\"migratedCount\"] = sps.MigratedCount\n\t}\n\tif sps.RefreshSummaryState != \"\" {\n\t\tobjectMap[\"refreshSummaryState\"] = sps.RefreshSummaryState\n\t}\n\tif sps.LastSummaryRefreshedTime != nil {\n\t\tobjectMap[\"lastSummaryRefreshedTime\"] = sps.LastSummaryRefreshedTime\n\t}\n\tif sps.ExtendedSummary != nil {\n\t\tobjectMap[\"extendedSummary\"] = sps.ExtendedSummary\n\t}\n\tif sps.InstanceType != \"\" {\n\t\tobjectMap[\"instanceType\"] = sps.InstanceType\n\t}\n\treturn json.Marshal(objectMap)\n}",
  "func (s SummaryResults) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"nonCompliantPolicies\", s.NonCompliantPolicies)\n\tpopulate(objectMap, \"nonCompliantResources\", s.NonCompliantResources)\n\tpopulate(objectMap, \"policyDetails\", s.PolicyDetails)\n\tpopulate(objectMap, \"policyGroupDetails\", s.PolicyGroupDetails)\n\tpopulate(objectMap, \"queryResultsUri\", s.QueryResultsURI)\n\tpopulate(objectMap, \"resourceDetails\", s.ResourceDetails)\n\treturn json.Marshal(objectMap)\n}",
  "func (g *GroupCounts) MarshalJSON() ([]byte, error) {\n\tgroups := g.Groups()\n\tvar counts interface{} = groups\n\n\tif len(groups) == 0 {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\tswitch g.aggregateType {\n\tcase sumAggregate:\n\t\tcounts = *(*[]groupCountSum)(unsafe.Pointer(&groups))\n\tcase distinctAggregate:\n\t\tcounts = *(*[]groupCountAggregate)(unsafe.Pointer(&groups))\n\tcase decimalSumAggregate:\n\t\tcounts = *(*[]groupCountDecimalSum)(unsafe.Pointer(&groups))\n\t}\n\treturn json.Marshal(counts)\n}",
  "func (ss SolutionSummary) MarshalJSON() ([]byte, error) {\n\tss.InstanceType = InstanceTypeBasicSolutionSummaryInstanceTypeSolutionSummary\n\tobjectMap := make(map[string]interface{})\n\tif ss.InstanceType != \"\" {\n\t\tobjectMap[\"instanceType\"] = ss.InstanceType\n\t}\n\treturn json.Marshal(objectMap)\n}",
  "func (ps ProjectSummary) MarshalJSON() ([]byte, error) {\n\tps.InstanceType = InstanceTypeBasicProjectSummaryInstanceTypeProjectSummary\n\tobjectMap := make(map[string]interface{})\n\tif ps.RefreshSummaryState != \"\" {\n\t\tobjectMap[\"refreshSummaryState\"] = ps.RefreshSummaryState\n\t}\n\tif ps.LastSummaryRefreshedTime != nil {\n\t\tobjectMap[\"lastSummaryRefreshedTime\"] = ps.LastSummaryRefreshedTime\n\t}\n\tif ps.ExtendedSummary != nil {\n\t\tobjectMap[\"extendedSummary\"] = ps.ExtendedSummary\n\t}\n\tif ps.InstanceType != \"\" {\n\t\tobjectMap[\"instanceType\"] = ps.InstanceType\n\t}\n\treturn json.Marshal(objectMap)\n}",
  "func (a AlertsSummaryGroup) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"groupedby\", a.Groupedby)\n\tpopulate(objectMap, \"smartGroupsCount\", a.SmartGroupsCount)\n\tpopulate(objectMap, \"total\", a.Total)\n\tpopulate(objectMap, \"values\", a.Values)\n\treturn json.Marshal(objectMap)\n}",
  "func (s *Series) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(&jsonMarshallableSeries{\n\t\tTarget:     s.Alias(),\n\t\tDataPoints: s.Points(),\n\t})\n}",
  "func (sss ServersSolutionSummary) MarshalJSON() ([]byte, error) {\n\tsss.InstanceType = InstanceTypeBasicSolutionSummaryInstanceTypeServers\n\tobjectMap := make(map[string]interface{})\n\tif sss.DiscoveredCount != nil {\n\t\tobjectMap[\"discoveredCount\"] = sss.DiscoveredCount\n\t}\n\tif sss.AssessedCount != nil {\n\t\tobjectMap[\"assessedCount\"] = sss.AssessedCount\n\t}\n\tif sss.ReplicatingCount != nil {\n\t\tobjectMap[\"replicatingCount\"] = sss.ReplicatingCount\n\t}\n\tif sss.TestMigratedCount != nil {\n\t\tobjectMap[\"testMigratedCount\"] = sss.TestMigratedCount\n\t}\n\tif sss.MigratedCount != nil {\n\t\tobjectMap[\"migratedCount\"] = sss.MigratedCount\n\t}\n\tif sss.InstanceType != \"\" {\n\t\tobjectMap[\"instanceType\"] = sss.InstanceType\n\t}\n\treturn json.Marshal(objectMap)\n}",
  "func (f FirmwareSummary) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"analysisTimeSeconds\", f.AnalysisTimeSeconds)\n\tpopulate(objectMap, \"binaryCount\", f.BinaryCount)\n\tpopulate(objectMap, \"componentCount\", f.ComponentCount)\n\tpopulate(objectMap, \"extractedFileCount\", f.ExtractedFileCount)\n\tpopulate(objectMap, \"extractedSize\", f.ExtractedSize)\n\tpopulate(objectMap, \"fileSize\", f.FileSize)\n\tpopulate(objectMap, \"rootFileSystems\", f.RootFileSystems)\n\treturn json.Marshal(objectMap)\n}",
  "func (ara AggregatedResultsAnalysis) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif ara.Duration != nil {\n\t\tobjectMap[\"duration\"] = ara.Duration\n\t}\n\tif ara.NotReportedResultsByOutcome != nil {\n\t\tobjectMap[\"notReportedResultsByOutcome\"] = ara.NotReportedResultsByOutcome\n\t}\n\tif ara.PreviousContext != nil {\n\t\tobjectMap[\"previousContext\"] = ara.PreviousContext\n\t}\n\tif ara.ResultsByOutcome != nil {\n\t\tobjectMap[\"resultsByOutcome\"] = ara.ResultsByOutcome\n\t}\n\tif ara.ResultsDifference != nil {\n\t\tobjectMap[\"resultsDifference\"] = ara.ResultsDifference\n\t}\n\tif ara.RunSummaryByOutcome != nil {\n\t\tobjectMap[\"runSummaryByOutcome\"] = ara.RunSummaryByOutcome\n\t}\n\tif ara.RunSummaryByState != nil {\n\t\tobjectMap[\"runSummaryByState\"] = ara.RunSummaryByState\n\t}\n\tif ara.TotalTests != nil {\n\t\tobjectMap[\"totalTests\"] = ara.TotalTests\n\t}\n\treturn json.Marshal(objectMap)\n}",
  "func (b BinaryHardeningSummary) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"canary\", b.Canary)\n\tpopulate(objectMap, \"nx\", b.Nx)\n\tpopulate(objectMap, \"pie\", b.Pie)\n\tpopulate(objectMap, \"relro\", b.Relro)\n\tpopulate(objectMap, \"stripped\", b.Stripped)\n\tpopulate(objectMap, \"totalFiles\", b.TotalFiles)\n\treturn json.Marshal(objectMap)\n}",
  "func (r ReservationSummary) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"cancelledCount\", r.CancelledCount)\n\tpopulate(objectMap, \"expiredCount\", r.ExpiredCount)\n\tpopulate(objectMap, \"expiringCount\", r.ExpiringCount)\n\tpopulate(objectMap, \"failedCount\", r.FailedCount)\n\tpopulate(objectMap, \"pendingCount\", r.PendingCount)\n\tpopulate(objectMap, \"succeededCount\", r.SucceededCount)\n\treturn json.Marshal(objectMap)\n}",
  "func (dps DatabaseProjectSummary) MarshalJSON() ([]byte, error) {\n\tdps.InstanceType = InstanceTypeBasicProjectSummaryInstanceTypeDatabases\n\tobjectMap := make(map[string]interface{})\n\tif dps.RefreshSummaryState != \"\" {\n\t\tobjectMap[\"refreshSummaryState\"] = dps.RefreshSummaryState\n\t}\n\tif dps.LastSummaryRefreshedTime != nil {\n\t\tobjectMap[\"lastSummaryRefreshedTime\"] = dps.LastSummaryRefreshedTime\n\t}\n\tif dps.ExtendedSummary != nil {\n\t\tobjectMap[\"extendedSummary\"] = dps.ExtendedSummary\n\t}\n\tif dps.InstanceType != \"\" {\n\t\tobjectMap[\"instanceType\"] = dps.InstanceType\n\t}\n\treturn json.Marshal(objectMap)\n}",
  "func (m *Monitoring) Summary(w http.ResponseWriter, r *http.Request) {\n\tb, err := stats.Summary()\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, err, \"failed to get metrics\")\n\t\treturn\n\t}\n\tJSON(w, http.StatusOK, b)\n}",
  "func (dss DatabasesSolutionSummary) MarshalJSON() ([]byte, error) {\n\tdss.InstanceType = InstanceTypeBasicSolutionSummaryInstanceTypeDatabases\n\tobjectMap := make(map[string]interface{})\n\tif dss.DatabasesAssessedCount != nil {\n\t\tobjectMap[\"databasesAssessedCount\"] = dss.DatabasesAssessedCount\n\t}\n\tif dss.DatabaseInstancesAssessedCount != nil {\n\t\tobjectMap[\"databaseInstancesAssessedCount\"] = dss.DatabaseInstancesAssessedCount\n\t}\n\tif dss.MigrationReadyCount != nil {\n\t\tobjectMap[\"migrationReadyCount\"] = dss.MigrationReadyCount\n\t}\n\tif dss.InstanceType != \"\" {\n\t\tobjectMap[\"instanceType\"] = dss.InstanceType\n\t}\n\treturn json.Marshal(objectMap)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	MaxFiles that can be written based on filesize and free blocks 
 | 
	func (s *StatFS) MaxFiles(size int) int {
	return int(s.stat.Bfree / (uint64(size) / uint64(s.stat.Bsize)))
} 
 | 
	[
  "func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {\n\tvar nfiles, remain uint64\n\ttfs, err := flS.totalFileSize() \n\tif err != nil {\n\t\tlog.Printf(\"DefineFiles(): Error computing total file size: %s\", err.Error())\n\t\treturn err\n\t}\n\tif tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit\n\t\treturn fmt.Errorf(\"Size requested is over the limit: requested %d bytes, limit: %d bytes.\", tsize, hilimit)\n\t}\n\tfor index, fsize := range flS.fileSizes {\n\t\tnfiles = tsize / fsize\n\t\tremain = tsize % fsize\n\t\tif nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities\n\t\t\ttsize -= limitFiles * fsize\n\t\t\tflS.fileAmmount[index] = limitFiles\n\t\t} else if nfiles == 0 {\n\t\t\tflS.fileAmmount[index] = 0\n\t\t} else {\n\t\t\ttsize -= nfiles * fsize\n\t\t\tflS.fileAmmount[index] = nfiles\n\t\t}\n\t}\n\tif tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size\n\t\tnfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]\n\t\tremain = tsize % flS.fileSizes[len(flS.fileSizes)-1]\n\t\tflS.fileAmmount[len(flS.fileAmmount)-1] += nfiles\n\t}\n\tif remain > 0 { //The remain must be smaller than the bigger file size.\n\t\tfor index, fsize := range flS.fileSizes {\n\t\t\tif remain <= 3*fsize {\n\t\t\t\tsignRemain := int(remain)\n\t\t\t\tfor signRemain > 0 {\n\t\t\t\t\tflS.fileAmmount[index]++\n\t\t\t\t\tsignRemain -= int(fsize)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
  "func MaxOpenFiles() int { return maxOpenFiles() }",
  "func (s *StatFS) BlockFiles(size int) int {\n\treturn size / int(s.stat.Bsize)\n}",
  "func (g *Group) MaxFiles() int64 {\n\treturn g.Max\n}",
  "func (opts *FIFOCompactionOptions) GetMaxTableFilesSize() uint64 {\n\treturn uint64(C.rocksdb_fifo_compaction_options_get_max_table_files_size(opts.c))\n}",
  "func calcMaxBytesPerBatch(fileSizeBytes int64) int64 {\n\tconst mb = 1e6\n\n\treturn 100 * mb\n}",
  "func _908walLimitSize(tls *crt.TLS, _pWal uintptr /* *TWal */, _nMax Ti64) {\n\tesc := crt.MustMalloc(8)\n\tvar (\n\t\t_sz = esc // *Ti64\n\t\t_rx int32\n\t)\n\tdefer crt.Free(esc)\n\t_526sqlite3BeginBenignMalloc(tls)\n\t_rx = _355sqlite3OsFileSize(tls, *(*uintptr)(unsafe.Pointer(_pWal + 8)), _sz)\n\tif _rx != int32(0) || (*(*Ti64)(unsafe.Pointer(_sz))) <= _nMax {\n\t\tgoto _1\n\t}\n\n\t_rx = _356sqlite3OsTruncate(tls, *(*uintptr)(unsafe.Pointer(_pWal + 8)), _nMax)\n_1:\n\t_527sqlite3EndBenignMalloc(tls)\n\tif _rx == 0 {\n\t\tgoto _2\n\t}\n\n\tXsqlite3_log(tls, _rx, ts+21471 /* \"cannot limit WAL size: %s\" */, *(*uintptr)(unsafe.Pointer(_pWal + 108)))\n_2:\n}",
  "func WriteFileWithMaxPerms(path string, data []byte, perms os.FileMode) error {\n\tf, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, perms)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer logger.ErrorIfCalling(f.Close)\n\terr = EnsureFileMaxPerms(f, perms)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.Write(data)\n\treturn err\n}",
  "func FileSize(min, max uint64) MatcherFunc { return SizeRange(min, max) }",
  "func (f *FileTarget) SetMaxBufferByte(maxBufferByte int) {\n    f.maxBufferByte = maxBufferByte\n}",
  "func maxItemsPerCachedPart() uint64 {\n\tmem := memory.Remaining()\n\t// Production data shows that each item occupies ~4 bytes in the compressed part.\n\t// It is expected no more than defaultPartsToMerge/2 parts exist\n\t// in the OS page cache before they are merged into bigger part.\n\t// Halft of the remaining RAM must be left for lib/storage parts,\n\t// so the maxItems is calculated using the below code:\n\tmaxItems := uint64(mem) / (4 * defaultPartsToMerge)\n\tif maxItems < 1e6 {\n\t\tmaxItems = 1e6\n\t}\n\treturn maxItems\n}",
  "func (bdl *backpressureDiskLimiter) updateBytesSemaphoreMaxLocked() {\n\tnewMax := int64(bdl.getMaxJournalBytes(bdl.journalBytes, bdl.freeBytes))\n\tdelta := newMax - bdl.byteSemaphoreMax\n\t// These operations are adjusting the *maximum* value of\n\t// bdl.byteSemaphore.\n\tif delta > 0 {\n\t\tbdl.byteSemaphore.Release(delta)\n\t} else if delta < 0 {\n\t\tbdl.byteSemaphore.ForceAcquire(-delta)\n\t}\n\tbdl.byteSemaphoreMax = newMax\n}",
  "func (c *DirentCache) setMaxSize(max uint64) {\n\tc.mu.Lock()\n\tc.maxSize = max\n\tc.maybeShrink()\n\tc.mu.Unlock()\n}",
  "func (opts *FIFOCompactionOptions) SetMaxTableFilesSize(value uint64) {\n\tC.rocksdb_fifo_compaction_options_set_max_table_files_size(opts.c, C.uint64_t(value))\n}",
  "func (acc *Account) limitPerFileBandwidth(n int) {\n\tacc.values.mu.Lock()\n\ttokenBucket := acc.tokenBucket[TokenBucketSlotAccounting]\n\tacc.values.mu.Unlock()\n\n\tif tokenBucket != nil {\n\t\terr := tokenBucket.WaitN(context.Background(), n)\n\t\tif err != nil {\n\t\t\tfs.Errorf(nil, \"Token bucket error: %v\", err)\n\t\t}\n\t}\n}",
  "func setOpenFileLimit() {\n\tvar lim syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim)\n\tif err != nil {\n\t\ttlog.Warn.Printf(\"Getting RLIMIT_NOFILE failed: %v\", err)\n\t\treturn\n\t}\n\tif lim.Cur >= 4096 {\n\t\treturn\n\t}\n\tlim.Cur = 4096\n\terr = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lim)\n\tif err != nil {\n\t\ttlog.Warn.Printf(\"Setting RLIMIT_NOFILE to %+v failed: %v\", lim, err)\n\t\t//         %+v output: \"{Cur:4097 Max:4096}\" ^\n\t}\n}",
  "func TestMaxTransfer(t *testing.T) {\n\tctx := context.Background()\n\tctx, ci := fs.AddConfig(ctx)\n\tci.MaxTransfer = 3 * 1024\n\tci.Transfers = 1\n\tci.Checkers = 1\n\tci.CutoffMode = fs.CutoffModeHard\n\n\ttest := func(t *testing.T, cutoff fs.CutoffMode) {\n\t\tr := fstest.NewRun(t)\n\t\tci.CutoffMode = cutoff\n\n\t\tif r.Fremote.Name() != \"local\" {\n\t\t\tt.Skip(\"This test only runs on local\")\n\t\t}\n\n\t\t// Create file on source\n\t\tfile1 := r.WriteFile(\"file1\", string(make([]byte, 5*1024)), t1)\n\t\tfile2 := r.WriteFile(\"file2\", string(make([]byte, 2*1024)), t1)\n\t\tfile3 := r.WriteFile(\"file3\", string(make([]byte, 3*1024)), t1)\n\t\tr.CheckLocalItems(t, file1, file2, file3)\n\t\tr.CheckRemoteItems(t)\n\n\t\taccounting.GlobalStats().ResetCounters()\n\n\t\terr := Sync(ctx, r.Fremote, r.Flocal, false)\n\t\texpectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal)\n\t\tif cutoff != fs.CutoffModeHard {\n\t\t\texpectedErr = accounting.ErrorMaxTransferLimitReachedGraceful\n\t\t}\n\t\tfserrors.Count(expectedErr)\n\t\tassert.Equal(t, expectedErr, err)\n\t}\n\n\tt.Run(\"Hard\", func(t *testing.T) { test(t, fs.CutoffModeHard) })\n\tt.Run(\"Soft\", func(t *testing.T) { test(t, fs.CutoffModeSoft) })\n\tt.Run(\"Cautious\", func(t *testing.T) { test(t, fs.CutoffModeCautious) })\n}",
  "func (bdl *backpressureDiskLimiter) getMaxJournalBytes(\n\tjournalBytes, freeBytes int64) float64 {\n\t// Calculate k(J+F), converting to float64 first to avoid\n\t// overflow, although losing some precision in the process.\n\tjournalBytesFloat := float64(journalBytes)\n\tfreeBytesFloat := float64(freeBytes)\n\tbyteLimit :=\n\t\tbdl.byteLimitFrac * (journalBytesFloat + freeBytesFloat)\n\treturn math.Min(byteLimit, float64(bdl.byteLimit))\n}",
  "func (o *consumer) setMaxPendingBytes(limit int) {\n\to.pblimit = limit\n\to.maxpb = limit / 16\n\tif o.maxpb == 0 {\n\t\to.maxpb = 1\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	BlockFiles calculates the amount of blocks needes for a file based on filesize and blocksize 
 | 
	func (s *StatFS) BlockFiles(size int) int {
	return size / int(s.stat.Bsize)
} 
 | 
	[
  "func (m *MFile) NumBlocks() uint64 {\n\treturn m.size / m.blkSize\n}",
  "func blocks(n int) int {\n\tb := align(n+2, blocksize)\n\treturn b / blocksize\n}",
  "func (d *Digest) BlockSize() int { return 1 }",
  "func blocks(dim int, bsize ...int) int {\n\tsize := blocksize\n\tif len(bsize) > 0 {\n\t\tsize = bsize[0]\n\t}\n\treturn (dim + size - 1) / size\n}",
  "func calculateBlocks(size uint64, isDecrypt bool) (blocks uint64) {\n\tblocks = size / BlockSize\n\tif !isDecrypt {\n\t\tblocks++\n\t}\n\treturn\n}",
  "func ProcessFileBlocks(inFile *os.File, outFile *os.File, bufferSize int, startAt int64, len int64, processor BlockProcessor) {\n\tvar pos int64\n\tvar err error\n\tif startAt > 0 {\n\n\t\tpos, err = inFile.Seek(startAt, 0)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tbuffer := make([]byte, bufferSize)\n\n\treader := bufio.NewReader(inFile)\n\twriter := bufio.NewWriter(outFile)\n\n\tdefer writer.Flush()\n\n\tfor {\n\t\tn, err := reader.Read(buffer)\n\t\tif n <= 0 && err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"file read failed: \", err)\n\t\t\tbreak\n\t\t}\n\t\tif len < int64(n) {\n\t\t\tprocessor(writer, buffer, int(len), pos)\n\t\t\tbreak\n\t\t}\n\t\tprocessor(writer, buffer, n, pos)\n\t\tpos += int64(n)\n\t\tlen -= int64(n)\n\t}\n}",
  "func (h *Hash) BlockSize() int { return len(h.buf) }",
  "func ProcessAllFileBlocks(inFile *os.File, outFile *os.File, bufferSize int, startAt int64, processor BlockProcessor) {\n\tvar pos int64\n\tvar err error\n\tif startAt > 0 {\n\n\t\tpos, err = inFile.Seek(startAt, 0)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tbuffer := make([]byte, bufferSize)\n\n\treader := bufio.NewReader(inFile)\n\twriter := bufio.NewWriter(outFile)\n\n\tdefer writer.Flush()\n\n\tfor {\n\t\tn, err := reader.Read(buffer)\n\t\tif n <= 0 && err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"file read failed: \", err)\n\t\t\tbreak\n\t\t}\n\t\tprocessor(writer, buffer, n, pos)\n\t\tpos += int64(n)\n\t}\n}",
  "func calculateBufferSize(blocks uint64) uint64 {\n\tif nbb := NBufferBlocks; blocks < nbb {\n\t\treturn blocks\n\t} else {\n\t\treturn nbb\n\t}\n}",
  "func BlockSizeECB(f EncryptFunc) int {\n\tconst (\n\t\tbufLen       = 1024\n\t\tminBlockSize = 4\n\t\tmaxBlockSize = bufLen / 4\n\t)\n\n\tenc := f(A(bufLen))\n\n\tfor bs := minBlockSize; bs <= maxBlockSize; bs++ {\n\t\tnumNeeded := bufLen/bs - 2 // first or last may be misaligned\n\t\tvar prevBlock []byte       // last block that was seen\n\t\tblockCount := 0            // consecutive occurrences of prevBlock\n\t\tfor start := 0; start+bs < len(enc); start += bs {\n\t\t\tbl := enc[start : start+bs]\n\t\t\tif prevBlock == nil || !bytes.Equal(bl, prevBlock) {\n\t\t\t\tprevBlock = bl\n\t\t\t\tblockCount = 0\n\t\t\t} else {\n\t\t\t\tblockCount++\n\t\t\t\tif blockCount >= numNeeded {\n\t\t\t\t\treturn bs\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tpanic(\"couldn't find block size\")\n}",
  "func (*digest) BlockSize() int {\n\treturn int(BlockSize)\n}",
  "func GetNumBlocks(geom Geometry, blockSize Point) int {\n\tstartPt := geom.StartPoint()\n\tsize := geom.Size()\n\tnumBlocks := 1\n\tfor dim := uint8(0); dim < geom.Size().NumDims(); dim++ {\n\t\tblockLength := blockSize.Value(dim)\n\t\tstartMod := startPt.Value(dim) % blockLength\n\t\tlength := size.Value(dim) + startMod\n\t\tblocks := length / blockLength\n\t\tif length%blockLength != 0 {\n\t\t\tblocks++\n\t\t}\n\t\tnumBlocks *= int(blocks)\n\t}\n\treturn numBlocks\n}",
  "func (lf *ListFile) NumBytes() int64 {\n\t// NOTE: here we don't use IsClosed() because\n\t// it uses the mutex; Size() is used in noMutexIterateLines\n\t// which is called after another mutex is locked,\n\t// making IsClosed() wait forever for the mutex unlock.\n\tif lf.isClosed {\n\t\treturn 0\n\t}\n\n\terr := lf.file.Sync()\n\tif err != nil {\n\t\t// TODO: not panic??\n\t\tpanic(err)\n\t}\n\n\tinfo, err := lf.file.Stat()\n\tif err != nil {\n\t\t// TODO: not panic??\n\t\tpanic(err)\n\t}\n\n\treturn info.Size()\n}",
  "func calculateChunks(state *State, segmentnum uint) []*chunk {\n\tcount := int64(segmentnum)\n\n\tpieceLength := int64(state.BitfieldPieceLength)\n\n\t// calculate the chunks of a resumable file.\n\tif state.Bitfield.Count() != 0 {\n\t\tvar chunks []*chunk\n\t\tvar idx uint32\n\t\tfor {\n\t\t\tstart, ok := state.Bitfield.FirstClear(idx)\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tend, ok := state.Bitfield.FirstSet(start)\n\t\t\tif !ok {\n\t\t\t\tchunks = append(chunks, &chunk{\n\t\t\t\t\toffset: int64(start) * pieceLength,\n\t\t\t\t\tlength: state.FileLength - int64(start)*pieceLength,\n\t\t\t\t})\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tchunks = append(chunks, &chunk{\n\t\t\t\toffset: int64(start) * pieceLength,\n\t\t\t\tlength: int64(end-start) * pieceLength,\n\t\t\t})\n\n\t\t\tidx = end\n\t\t}\n\t\treturn chunks\n\t}\n\n\t// calculate the chunks of a fresh new file.\n\n\tfilesize := state.FileLength\n\t// don't even consider smaller files\n\tif filesize <= pieceLength || count <= 1 {\n\t\treturn []*chunk{{offset: 0, length: filesize}}\n\t}\n\n\t// how many blocks fit perfectly on a filesize\n\tblockCount := filesize / pieceLength\n\t// how many bytes are left out\n\texcessBytes := filesize % pieceLength\n\n\t// If there are no blocks available for the given blocksize, we're gonna\n\t// reduce the count to the max available block count.\n\tif blockCount < count {\n\t\tcount = blockCount\n\t}\n\n\tblocksPerUnit := blockCount / count\n\texcessBlocks := blockCount % count\n\n\tvar chunks []*chunk\n\tfor i := int64(0); i < count; i++ {\n\t\tchunks = append(chunks, &chunk{\n\t\t\toffset: i * blocksPerUnit * pieceLength,\n\t\t\tlength: blocksPerUnit * pieceLength,\n\t\t})\n\t}\n\n\tif excessBlocks > 0 {\n\t\toffset := count * blocksPerUnit * pieceLength\n\t\tlength := excessBlocks * pieceLength\n\t\tchunks = append(chunks, &chunk{\n\t\t\toffset: offset,\n\t\t\tlength: length,\n\t\t})\n\t}\n\n\t// append excess bytes to the last chunk\n\tif excessBytes > 0 {\n\t\tc := chunks[len(chunks)-1]\n\t\tc.length += excessBytes\n\t}\n\n\treturn chunks\n}",
  "func calculatePartitionSize(f *os.File, volumeKey *volumeInfo) (uint64, error) {\n\ts, err := unix.IoctlGetInt(int(f.Fd()), unix.BLKGETSIZE64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tsize := uint64(s) / volumeKey.storageSectorSize\n\tif size < volumeKey.storageOffset {\n\t\treturn 0, fmt.Errorf(\"Block file size %v is smaller than LUKS segment offset %v\", s, volumeKey.storageOffset)\n\t}\n\treturn size - volumeKey.storageOffset, nil\n}",
  "func getBlkSize(p string) {\n\tfs := syscall.Statfs_t{}\n\terr := syscall.Statfs(p, &fs)\n\tif err != nil {\n\t\tlog.Fatal(\"get block size error with:\", err)\n\t}\n\tBlockSize = uint64(fs.Bsize)\n}",
  "func (bdi *Info) CountBlocks() int {\n\treturn int(C.spdk_bdev_get_num_blocks(bdi.ptr()))\n}",
  "func (tfb *TempFileBlock) Size() (int64, error) {\n\tf, err := os.Open(tfb.TempFile)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"Error opening file block at \\\"\"+tfb.TempFile+\"\\\" for reading\")\n\t}\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"Error getting file info for file block at \\\"\"+tfb.TempFile+\"\\\"\")\n\t}\n\treturn fi.Size(), nil\n}",
  "func (file S3File) GetNumChunks() int64 {\n\tinput := &s3.ListObjectsInput{\n\t\tBucket:    aws.String(file.crypto.bucket),\n\t\tPrefix:    aws.String(file.encryptedPath),\n\t\tDelimiter: aws.String(\"\"),\n\t}\n\n\tresult, err := file.crypto.svc.ListObjects(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase s3.ErrCodeNoSuchBucket:\n\t\t\t\tfmt.Println(s3.ErrCodeNoSuchBucket, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\n\t\treturn -1\n\t}\n\n\tfileSize := result.Contents[0].Size\n\n\t//log.Printf(\"File size: %d\\n\", fileInfo.Size())\n\tpayloadSize := *fileSize - HeaderSize\n\n\tnumChunks := payloadSize / EncryptedChunkSize\n\n\tif numChunks*EncryptedChunkSize < payloadSize {\n\t\tnumChunks++\n\t}\n\n\treturn numChunks\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	DSL returns the attached DSL. 
 | 
	func (e *Element) DSL() func() { return e.DSLFunc } 
 | 
	[
  "func (t *TraitDefinition) DSL() func() {\n\treturn t.DSLFunc\n}",
  "func RunDSL(t *testing.T, dsl func()) *RootExpr {\n\tt.Helper()\n\tsetupDSLRun()\n\n\t// run DSL (first pass)\n\tif !eval.Execute(dsl, nil) {\n\t\tt.Fatal(eval.Context.Error())\n\t}\n\n\t// run DSL (second pass)\n\tif err := eval.RunDSL(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// return generated root\n\treturn Root\n}",
  "func RunHTTPDSL(t *testing.T, dsl func()) *RootExpr {\n\tsetupDSLRun()\n\n\t// run DSL (first pass)\n\tif !eval.Execute(dsl, nil) {\n\t\tt.Fatal(eval.Context.Error())\n\t}\n\n\t// run DSL (second pass)\n\tif err := eval.RunDSL(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// return generated root\n\treturn Root\n}",
  "func jokeLang() {\r\ndsl(\r\n\tPROC,`Example()`,\r\n\tVAR,`x int`,\r\n\t    `y int`,\r\n\t    `s string`,\r\n\tSTART,\r\n\t\t`x`,`<--`,50,\r\n\t\t`y`,`<--`,100,\r\n\t\t`total`,`<--`,`add(x, y)`,\r\n\t\t`stdout(total)`,\r\n\t\t`s`,`<--`,`s became a string`,\r\n\tFINISH,\r\n\r\n\tPROC,`AnotherExample()`,\r\n\tVAR, `a int`,\r\n\t     `b int`,\r\n\tSTART,\r\n\t\t`a`,`<--`,10,\r\n\t\t`b`,`<--`,20,\r\n\t\t`total`,`<--`,`add(a, b)`,\r\n\t\t`stdout(total)`,\r\n\tFINISH,\r\n\r\n\tRUN,\r\n\r\n\t  `Example()`,\r\n\r\n\t  `# this is a comment`,\r\n\t  `# Or an inside joke`,\r\n\t  `# My voice does not have a Lispppft.`,\r\n\r\n\t  `# Let's run the example procedure twice more`,\r\n\r\n      `Example()`,\r\n\t  `Example()`,\r\n\r\n\t  `# Comment: it should therefore print the example 3 times total`,\r\n\r\n\t  `# now let's call another example procedure`,\r\n\t  `AnotherExample()`,\r\n\r\n\t   // but what about if we want to call some GoLang Code?\r\n\t   // this calls someGoLangFunc() inside the DSL run section we are in\r\n\t   callGo(someGoLangFunc),\r\n\t   // there, we just mixed DSL code with Go Code calls\r\n\r\n\t  `# Now we're back to the DSL code again.. call the example one more time...`,\r\n\t  `Example()`,\r\n\r\n\t DONE,\r\n\r\n\r\ndslFinished)\r\n}",
  "func (s *DbRecorder) Builder() *squirrel.StatementBuilderType {\n\treturn s.builder\n}",
  "func (do *Domain) DDL() ddl.DDL {\n\treturn do.ddl\n}",
  "func DefaultBuilder(path string) Builder {\n\treturn func(topic string, partition int32) (Storage, error) {\n\t\tfp := filepath.Join(path, fmt.Sprintf(\"%s.%d\", topic, partition))\n\t\tdb, err := leveldb.OpenFile(fp, nil)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error opening leveldb: %v\", err)\n\t\t}\n\t\treturn New(db)\n\t}\n}",
  "func (s *service) Builder() *sql.Builder {\n\treturn s.builder\n}",
  "func NewDataChangeDSL(txn keyval.ProtoTxn) *DataChangeDSL {\n\treturn &DataChangeDSL{txn: txn}\n}",
  "func (v *View) Definition() Node {\n\treturn v.definition\n}",
  "func (obj *application) Definition() DefinitionSection {\n\treturn obj.def\n}",
  "func BuildsFrom(dsl func()) {\n\tif m, ok := relationalModelDefinition(false); ok {\n\t\t/*\t\tmts, ok := bf.(*design.UserTypeDefinition)\n\t\t\t\tif ok {\n\t\t\t\t\tm.BuiltFrom[mts.TypeName] = mts\n\t\t\t\t} else if mts, ok := bf.(*design.MediaTypeDefinition); ok {\n\t\t\t\t\tm.BuiltFrom[mts.TypeName] = mts.UserTypeDefinition\n\t\t\t\t}\n\t\t\t\tm.PopulateFromModeledType()\n\t\t*/\n\t\tbf := gorma.NewBuildSource()\n\t\tbf.DefinitionDSL = dsl\n\t\tbf.Parent = m\n\t\tm.BuildSources = append(m.BuildSources, bf)\n\t}\n\n}",
  "func QueuedFactBuilder() interface{} {\n\treturn &QueuedEvent{}\n}",
  "func RunGRPCDSL(t *testing.T, dsl func()) *RootExpr {\n\tsetupDSLRun()\n\n\t// run DSL (first pass)\n\tif !eval.Execute(dsl, nil) {\n\t\tt.Fatal(eval.Context.Error())\n\t}\n\n\t// run DSL (second pass)\n\tif err := eval.RunDSL(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// return generated root\n\treturn Root\n}",
  "func GetDialect() SQLDialect { return def.GetDialect() }",
  "func (dfa *DFAccess) DocumentsDSLQuery(dslQueryString string, nResults int) *SearchResults {\n\treturn dfa.dslQuery(\"documents/dslquery\", dslQueryString, nResults)\n}",
  "func (conn *PgxConnection) Builder() sqlf.Builder {\n\treturn conn._builder\n}",
  "func (dsl *DataChangeDSL) Delete() linux.DeleteDSL {\n\treturn &DeleteDSL{dsl, dsl.vppDataChange.Delete()}\n}",
  "func NewDataChangeDSL(txn keyval.ProtoTxn) *DataChangeDSL {\n\tvppDbAdapter := vpp_dbadapter.NewDataChangeDSL(txn)\n\treturn &DataChangeDSL{txn: txn, vppDataChange: vppDbAdapter}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Finalize finalizes the relationships. 
 | 
	func (e *Element) Finalize() {
	for _, rel := range e.Relationships {
		rel.Finalize()
	}
} 
 | 
	[
  "func (g *Graph) Finalize() error {\n\tfor _, node := range g.Nodes {\n\t\tif node.Kind == \"Cluster\" || node.Kind == \"Namespace\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := g.Relationships[node.UID]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(node.GetNamespace()) == 0 {\n\t\t\tcluster, err := g.CoreV1().Cluster()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tg.Relationship(cluster, node.Kind, node)\n\t\t\tcontinue\n\t\t}\n\n\t\tmetadata := metav1.ObjectMeta{Name: node.GetNamespace()}\n\t\tnamespace, err := g.CoreV1().Namespace(&v1.Namespace{ObjectMeta: metadata})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tg.Relationship(namespace, node.Kind, node)\n\t}\n\n\treturn nil\n}",
  "func (r *Relationship) Finalize() {\n\t// prefix tags\n\tif r.InteractionStyle == InteractionAsynchronous {\n\t\tr.Tags = mergeTags(\"Asynchronous\", []string{r.Tags})\n\t}\n\tr.Tags = mergeTags(\"Relationship\", []string{r.Tags})\n}",
  "func (sim *SISimulation) Finalize() {\n\t// Record genotype tree\n\tvar wg sync.WaitGroup\n\tc := make(chan GenotypeNode)\n\td := make(chan Genotype)\n\n\twg.Add(2)\n\tgo func() {\n\t\tfor _, node := range sim.GenotypeNodeMap() {\n\t\t\tc <- node\n\t\t}\n\t\tclose(c)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\tfor _, genotype := range sim.GenotypeSet().Map() {\n\t\t\td <- genotype\n\t\t}\n\t\tclose(d)\n\t\twg.Done()\n\t}()\n\tvar wg2 sync.WaitGroup\n\twg2.Add(2)\n\tgo func() {\n\t\tsim.WriteGenotypeNodes(c)\n\t\twg2.Done()\n\t}()\n\tgo func() {\n\t\tsim.WriteGenotypes(d)\n\t\twg2.Done()\n\t}()\n\twg2.Wait()\n\n\t// Clear memory by deleting pathogens in host\n\tfor _, host := range sim.HostMap() {\n\t\thost.RemoveAllPathogens()\n\t}\n}",
  "func (cd *ConnectionDetails) Finalize() error {\n\tcd.Dialect = normalizeSynonyms(cd.Dialect)\n\n\tif cd.Options == nil { // for safety\n\t\tcd.Options = make(map[string]string)\n\t}\n\n\tif cd.URL != \"\" {\n\t\tif err := cd.withURL(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif fin, ok := finalizer[cd.Dialect]; ok {\n\t\tfin(cd)\n\t}\n\n\tif DialectSupported(cd.Dialect) {\n\t\tif cd.Database != \"\" || cd.URL != \"\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"no database or URL specified\")\n\t}\n\treturn errors.Errorf(\"unsupported dialect '%v'\", cd.Dialect)\n}",
  "func (r *relation) Close() {\n\tfor _, v := range r.mp {\n\t\tv.Close()\n\t}\n}",
  "func (s *schemaNode) finalize() (err error) {\n\tif s.snType != sntNotSchema {\n\t\tif s.id != \"\" {\n\t\t\tif s.idURL.IsAbs() {\n\t\t\t\ts.baseURI = s.id\n\t\t\t\ts.baseURIObj = s.idURL\n\t\t\t} else {\n\t\t\t\tnode := s.parent\n\t\t\t\tfor node != nil {\n\t\t\t\t\tif node.baseURI != \"\" {\n\t\t\t\t\t\ts.baseURIObj = node.baseURIObj.ResolveReference(s.idURL)\n\t\t\t\t\t\ts.baseURI = s.baseURIObj.String()\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode = node.parent\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif s.baseURI == \"\" {\n\t\t\t\t\tpanic(\"baseURI must not be empty\")\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnode := s.parent\n\t\t\tfor node != nil {\n\t\t\t\tif node.baseURI != \"\" {\n\t\t\t\t\ts.baseURI = node.baseURI\n\t\t\t\t\ts.baseURIObj = node.baseURIObj\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnode = node.parent\n\t\t\t}\n\t\t}\n\n\t\ts.setCanonicalURI()\n\t\terr = s.schema.schemaJar.Add(s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// If here, s must have baseURI\n\tfor _, v := range s.kvMap {\n\t\tif err = v.finalize(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, v := range s.nodeArr {\n\t\tif err = v.finalize(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func (m *modelLoader) finalize(itemMap map[reflect.Type][]interface{}) error {\n\t//fill all relationships we can on our items\n\tfor _, f := range m.relationships {\n\t\titems, ok := itemMap[baseType(f.Struct.Type)]\n\t\tif !ok {\n\t\t\t//this relationship isn't in our item map\n\t\t\tcontinue\n\t\t}\n\n\t\tlookup := make(map[string][]reflect.Value)\n\n\t\t//construct a map with possibilities of this relationship\n\t\tfor _, n := range items {\n\t\t\titemVal := reflect.ValueOf(n).Elem()\n\n\t\t\t//build a key for the attributes of this relationship\n\t\t\tvar sb strings.Builder\n\t\t\tfor i, name := range f.Relationship.ForeignFieldNames {\n\t\t\t\tval := itemVal.FieldByName(name).Interface()\n\n\t\t\t\tif valuer, ok := val.(driver.Valuer); ok {\n\t\t\t\t\tvar err error\n\t\t\t\t\tval, err = valuer.Value()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsb.WriteString(fmt.Sprintf(\"[%d:%v]\", i, val))\n\t\t\t}\n\n\t\t\tkey := sb.String()\n\t\t\tlookup[key] = append(lookup[key], itemVal.Addr())\n\t\t}\n\n\t\t//go through all models were tracking and fill in this relationship\n\t\tfor _, item := range m.items {\n\t\t\titemVal := reflect.ValueOf(item).Elem()\n\t\t\trelVal := itemVal.FieldByName(f.Name)\n\n\t\t\t//build a key for the attributes of this relationship\n\t\t\tvar sb strings.Builder\n\t\t\tfor i, name := range f.Relationship.AssociationForeignFieldNames {\n\t\t\t\tval := itemVal.FieldByName(name)\n\t\t\t\tif val.Kind() == reflect.Ptr && !val.IsNil() {\n\t\t\t\t\tval = val.Elem()\n\t\t\t\t}\n\n\t\t\t\tkeyValue := val.Interface()\n\t\t\t\tif valuer, ok := keyValue.(driver.Valuer); ok {\n\t\t\t\t\tkeyValue, _ = valuer.Value()\n\t\t\t\t}\n\t\t\t\tsb.WriteString(fmt.Sprintf(\"[%d:%v]\", i, keyValue))\n\t\t\t}\n\n\t\t\tkey := sb.String()\n\t\t\t//find items corresponding to this item for this relationship\n\t\t\tfor _, newVal := range lookup[key] {\n\t\t\t\t//we have items to fill this relationship, fill it based on the struct\n\t\t\t\tif relVal.Kind() == reflect.Slice {\n\t\t\t\t\t//add the result to our slice\n\t\t\t\t\tif relVal.Type().Elem().Kind() != reflect.Ptr {\n\t\t\t\t\t\t//we have a slice of structs so add the struct we're pointing to\n\t\t\t\t\t\tnewVal = newVal.Elem()\n\t\t\t\t\t}\n\n\t\t\t\t\trelVal.Set(reflect.Append(relVal, newVal))\n\t\t\t\t} else {\n\t\t\t\t\t//we don't have a slice so set the item to the first one we have and move on\n\t\t\t\t\tif relVal.Type().Kind() != reflect.Ptr {\n\t\t\t\t\t\tnewVal = newVal.Elem()\n\t\t\t\t\t}\n\n\t\t\t\t\trelVal.Set(newVal)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func (f *FirmataAdaptor) Finalize() (errs []error) {\n\tif err := f.Disconnect(); err != nil {\n\t\treturn []error{err}\n\t}\n\treturn\n}",
  "func (c *Connections) Finalize() (errs []error) {\n\tfor _, connection := range *c {\n\t\tif cerrs := connection.Finalize(); cerrs != nil {\n\t\t\tfor i, err := range cerrs {\n\t\t\t\tcerrs[i] = fmt.Errorf(\"Connection %q: %v\", connection.Name(), err)\n\t\t\t}\n\t\t\terrs = append(errs, cerrs...)\n\t\t}\n\t}\n\treturn errs\n}",
  "func (p *Project) Finalize() error {\n\terrs := errors.AggregatedError{}\n\tp.Targets = make(TargetNameMap)\n\tfor name, t := range p.MasterFile.Targets {\n\t\tt.Initialize(name, p)\n\t\terrs.Add(p.Targets.Add(t))\n\t}\n\terrs.AddMany(\n\t\tp.Targets.BuildDeps(),\n\t\tp.Targets.CheckCyclicDeps(),\n\t)\n\n\treturn errs.Aggregate()\n}",
  "func Finalize(confs ...Configuration) {\n\tfinalized.Do(func() {\n\t\tfor _, conf := range confs {\n\t\t\tif dynamicConf, ok := conf.(DynamicConfiguration); ok {\n\t\t\t\tdynamicConf.Finalize()\n\t\t\t}\n\t\t}\n\t})\n}",
  "func (kor *KubernetesOAMRouter) Finalize(canary *flaggerv1.Canary) error {\n\treturn fmt.Errorf(\"OAM router doesn't do finalize\")\n}",
  "func (c *Container) Finalize() {\n\tc.PrefixTags(\"Element\", \"Container\")\n\tc.Element.Finalize()\n}",
  "func (t *Tags) Finalize() {\n\tif t.noFinalize {\n\t\treturn\n\t}\n\n\tvalues := t.values\n\tt.values = nil\n\n\tfor i := range values {\n\t\tvalues[i].Finalize()\n\t}\n\n\tif t.pool == nil {\n\t\treturn\n\t}\n\n\tt.pool.PutTags(Tags{values: values})\n}",
  "func (s *Statement) Finalize() (e error) {\n\treturn SQLiteError(C.sqlite3_finalize(s.cptr))\n}",
  "func (_Finalizable *FinalizableTransactor) Finalize(opts *bind.TransactOpts, fin bool) (*types.Transaction, error) {\n\treturn _Finalizable.contract.Transact(opts, \"finalize\", fin)\n}",
  "func (c *closeTrackingConn) finalize() {\n\tmon.Event(\"quic_connection_leaked\")\n\t_ = c.ConnectorConn.Close()\n}",
  "func (h *HostExpr) Finalize() {\n\tif h.Variables == nil {\n\t\th.Variables = &AttributeExpr{Type: &Object{}}\n\t}\n}",
  "func (e *Adaptor) Finalize() (err error) {\n\tfor _, pin := range e.digitalPins {\n\t\tif pin != nil {\n\t\t\tif errs := pin.Unexport(); errs != nil {\n\t\t\t\terr = multierror.Append(err, errs)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, pin := range e.pwmPins {\n\t\tif pin != nil {\n\t\t\tif errs := pin.enable(\"0\"); errs != nil {\n\t\t\t\terr = multierror.Append(err, errs)\n\t\t\t}\n\t\t\tif errs := pin.unexport(); errs != nil {\n\t\t\t\terr = multierror.Append(err, errs)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, bus := range e.i2cBuses {\n\t\tif bus != nil {\n\t\t\tif errs := bus.Close(); errs != nil {\n\t\t\t\terr = multierror.Append(err, errs)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	GetElement returns the underlying element. 
 | 
	func (e *Element) GetElement() *Element { return e } 
 | 
	[
  "func (e emptyBackEnd) GetElement(i *Element) interface{} {\n\treturn \"not nil!\"\n}",
  "func (recv *MarkupParseContext) GetElement() string {\n\tretC := C.g_markup_parse_context_get_element((*C.GMarkupParseContext)(recv.native))\n\tretGo := C.GoString(retC)\n\n\treturn retGo\n}",
  "func (r *Result) GetElement() (*Element, error) {\n\tres, ok := r.Data.(*Element)\n\tif !ok {\n\t\treturn nil, newError(err0603ResultNotElementError)\n\t}\n\treturn res, nil\n}",
  "func (a *Array) Get(idx int) Element {\n\treturn a.elements.Get(idx).elem\n}",
  "func (m FloatMatrix) GetElement(i int64, j int64) float64 {\n\treturn m.A[i*m.N+j]\n}",
  "func (m *OrderedMap[K, V]) GetElement(key K) *Element[K, V] {\n\telement, ok := m.kv[key]\n\tif ok {\n\t\treturn element\n\t}\n\n\treturn nil\n}",
  "func (p GetCommand) Element() storage.Element {\n\treturn storage.NewElement(p.key, storage.NilBytes)\n}",
  "func (l *ListItem) GetElem(index int) Element {\n\t//if index will exceed bounds of array\n\tif index >= len(l.contents) {\n\t\t//counter starts at bottom, looks for \"Text\" to stop, or stops at end\n\t\tfor index = 0; index < len(l.contents) && l.contents[index].GetType() != \"span\"; index++ {\n\t\t}\n\t}\n\t//returns requested Element, \"Text\" Element, or last Element.. in that preference depending on existence\n\treturn l.contents[index]\n}",
  "func (ft *FieldType) GetElem(idx int) string {\n\treturn ft.elems[idx]\n}",
  "func (l *List) GetElement(i int) *Element {\r\n\tvar e *Element\r\n\tif i < l.N/2 {\r\n\t\te = l.dummy.Next\r\n\t\tfor j := 0; j < i; j++ {\r\n\t\t\te = e.Next\r\n\t\t}\r\n\t} else {\r\n\t\te = l.dummy\r\n\t\tfor j := l.N; j > i; j-- {\r\n\t\t\te = e.Prev\r\n\t\t}\r\n\t}\r\n\treturn e\r\n}",
  "func (uni *UniformMatrix3f) GetElement(col, row int, v float32) float32 {\n\n\treturn uni.v[col*3+row]\n}",
  "func (lruCache *LRUCache) GetElem(k string) (*list.Element, error) {\n\tif elem, exists := lruCache.m[k]; !exists {\n\t\treturn nil, errors.New(\"element does not exist in cache\")\n\t} else {\n\t\t// Element is accessed again therefore move to the front (make most recently used)\n\t\tlruCache.l.MoveToFront(elem)\n\t\treturn elem, nil\n\t}\n\n}",
  "func (dl *DirtyExtentList) Get() *list.Element {\n\tdl.RLock()\n\tdefer dl.RUnlock()\n\treturn dl.list.Front()\n}",
  "func (p *idElementPool) get() *idElement {\n\treturn p.pool.Get().(*idElement)\n}",
  "func (c *CacheManager) GetElement(filename string, element string) (string, error) {\n\treturn filepath.Join(c.cachedDir, filename, element), nil\n}",
  "func (e *Entity) GetElementByID(string) Object { return nil }",
  "func getElementBytes(stub shim.ChaincodeStubInterface, elementKey string) ([]byte, error) {\n\telementBytes, err := stub.GetState(elementKey)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if elementBytes == nil {\n\t\treturn nil, fmt.Errorf(\"no element with key %s\", elementKey)\n\t}\n\treturn elementBytes, nil\n}",
  "func (arr *ArrayADT) GetElement(index int) int {\n\treturn arr.data[index]\n}",
  "func (e *Common) Element() string {\n\treturn e.name\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	MergeTags adds the given tags. It skips tags already present in e.Tags. 
 | 
	func (e *Element) MergeTags(tags ...string) {
	e.Tags = mergeTags(e.Tags, tags)
} 
 | 
	[
  "func (e *Event) AddToTags(tags ...string) {\n\tfor _, t := range tags {\n\t\tif !ContainsAllTags(*e, t) {\n\t\t\te.EventTags = append(e.EventTags, t)\n\t\t}\n\t}\n}",
  "func MergeTags(generalTags []*Tag, infraTags []*Tag) []*Tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.Key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.Key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
  "func mergeTags(localTags []*ecs.Tag, ec2Tags []*ecs.Tag) []*ecs.Tag {\n\ttagsMap := make(map[string]string)\n\n\tfor _, ec2Tag := range ec2Tags {\n\t\ttagsMap[aws.StringValue(ec2Tag.Key)] = aws.StringValue(ec2Tag.Value)\n\t}\n\n\tfor _, localTag := range localTags {\n\t\ttagsMap[aws.StringValue(localTag.Key)] = aws.StringValue(localTag.Value)\n\t}\n\n\treturn utils.MapToTags(tagsMap)\n}",
  "func MergeTags(t map[string]string) MergeOption {\n\treturn func(m *mergeReq) {\n\t\tm.Tags = t\n\t}\n}",
  "func mergeTags(generalTags []*tag, infraTags []*tag) []*tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
  "func mergeTags(t1, t2 []Tag) []Tag {\n\tn := len(t1) + len(t2)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tout := make([]Tag, 0, n)\n\tout = append(out, t1...)\n\tout = append(out, t2...)\n\n\treturn SortTags(out)\n}",
  "func (rc *nopS) AddTags(tags ...string) {\n}",
  "func mergeTags(existing string, tags []string) string {\n\tif existing == \"\" {\n\t\treturn strings.Join(tags, \",\")\n\t}\n\told := strings.Split(existing, \",\")\n\tvar merged []string\n\tfor _, o := range old {\n\t\tfound := false\n\t\tfor _, tag := range tags {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, o)\n\t\t}\n\t}\n\tfor _, tag := range tags {\n\t\tfound := false\n\t\tfor _, o := range merged {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, tag)\n\t\t}\n\t}\n\treturn strings.Join(merged, \",\")\n}",
  "func (g *GameObject) AddTags(tags []string) {\n\tfor i := range tags {\n\t\tg.tags[tags[i]] = true\n\t}\n}",
  "func MergeTags(tagMaps ...map[string]string) map[string]string {\n\tmerged := make(map[string]string)\n\tfor _, tagMap := range tagMaps {\n\t\tfor k, v := range tagMap {\n\t\t\tmerged[k] = v\n\t\t}\n\t}\n\treturn merged\n}",
  "func (f *BaseFormatter) AddTags(tags ...string) {\n\tf.tags = append(f.tags, tags...)\n\tf.tagReplacer = strings.NewReplacer(f.tags...)\n}",
  "func (p *plugin) concatTags(tags1 *structtag.Tags, tags2 *structtag.Tags) (*structtag.Tags, error) {\n\tif tags1.Len() == 0 {\n\t\treturn tags2, nil\n\t}\n\tif tags2.Len() == 0 {\n\t\treturn tags1, nil\n\t}\n\n\tfor _, t2 := range tags2.Tags() {\n\t\tvar found bool\n\t\tfor _, t1 := range tags1.Tags() {\n\t\t\tif t1.Key == t2.Key {\n\t\t\t\tif len(t1.Name) == 0 {\n\t\t\t\t\tt1.Name = t2.Name\n\t\t\t\t}\n\t\t\t\tif t1.Options == nil || len(t1.Options) == 0 {\n\t\t\t\t\tt1.Options = t2.Options\n\t\t\t\t}\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tvar err error\n\t\t\ts := tags1.String() + \" \" + t2.String()\n\t\t\ttags1, err = structtag.Parse(s)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to parse tags '%s': %s\", s, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tags1, nil\n}",
  "func (au *ArticleUpdate) AddTags(t ...*Tag) *ArticleUpdate {\n\tids := make([]int, len(t))\n\tfor i := range t {\n\t\tids[i] = t[i].ID\n\t}\n\treturn au.AddTagIDs(ids...)\n}",
  "func MergeTagSlices(Original []TagInformation, ToAdd []TagInformation) []TagInformation {\n\t//Rules:\n\t//We do not care about meta-tags\n\t//Tags in ToAdd win\n\t//Exlusionary tags win after tags in ToAdd\n\n\t//First, remove duplicates from original that exist in ToAdd\n\tfor Index := 0; Index < len(ToAdd); Index++ {\n\t\tif ToAdd[Index].IsMeta {\n\t\t\tcontinue //Skip Metatags\n\t\t}\n\t\t//Standard tag confirmed, scan for duplicates\n\t\tfor ScanIndex := 0; ScanIndex < len(Original); ScanIndex++ {\n\t\t\tif Original[ScanIndex].IsMeta {\n\t\t\t\tcontinue //Skip comparing metas\n\t\t\t}\n\t\t\tif Original[ScanIndex].ID == ToAdd[Index].ID {\n\t\t\t\t//Remove and resize\n\t\t\t\tOriginal = append(Original[:ScanIndex], Original[ScanIndex+1:]...)\n\t\t\t\t//we just need to continue current scan from the same ScanIndex\n\t\t\t\tScanIndex--\n\t\t\t}\n\t\t}\n\t}\n\n\t//Now we can fall back to RemoveDuplicateTags to cleanup any other issues\n\treturn RemoveDuplicateTags(append(Original, ToAdd...))\n}",
  "func JoinTags(tags ...Tag) TagSet {\n\tvar result TagSet\n\tresult = append(result, tags...)\n\treturn result\n}",
  "func JoinTags(a []string, b []string) []string {\n\treturn append(a, b...)\n}",
  "func (app *Configurable) AddTags(parameters map[string]string) interfaces.AppFunction {\n\ttags, failed := app.processTagsParameter(parameters)\n\tif failed {\n\t\treturn nil\n\t}\n\n\ttransform := transforms.NewTags(tags)\n\treturn transform.AddTags\n}",
  "func (op Operation) AddTags(tags ...string) Operation {\n\top.Tags = append(op.Tags, tags...)\n\treturn op\n}",
  "func addTags(s selection, args []string) {\n\tif len(args) < 1 {\n\t\tlog.Fatal(`Usage: A addtags <tags> [options]\n<tags>:\tcomma-separated tags to add, e.g. json,xml\n[options]:\toptions to add, e.g. 'json=omitempty'`)\n\t}\n\targuments := []string{\n\t\t\"-file\", s.filename(), \"-modified\", \"-format\", \"json\", \"-line\", s.lineSel(), \"-add-tags\", args[0],\n\t}\n\tif len(args) > 1 {\n\t\targuments = append(arguments, \"-add-options\", args[1])\n\t}\n\tbuf := runWithStdin(s.archive(), \"gomodifytags\", arguments...)\n\tvar out gomodifytagsOutput\n\tif err := json.Unmarshal([]byte(buf), &out); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err := s.win.Addr(\"%d,%d\", out.Start, out.End); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif _, err := s.win.Write(\"data\", []byte(strings.Join(out.Lines, \"\\n\")+\"\\n\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tshowAddr(s.win, s.start)\n\tif len(out.Errs) != 0 {\n\t\tfmt.Fprintln(os.Stderr, strings.Join(out.Errs, \"\\n\"))\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	PrefixTags adds the given tags to the beginning of the comma separated list. 
 | 
	func (e *Element) PrefixTags(tags ...string) {
	prefix := strings.Join(tags, ",")
	if e.Tags == "" {
		e.Tags = prefix
		return
	}
	e.Tags = mergeTags(prefix, strings.Split(e.Tags, ","))
} 
 | 
	[
  "func TagsHasPrefix(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldTags), v))\n\t})\n}",
  "func (e *Extractor) NamesFromTagWithPrefix(tag string, prefix string) (out []string, err error) {\n\n\tif err := e.isValidStruct(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := reflect.ValueOf(e.StructAddr).Elem()\n\tfields := e.fields(s)\n\n\tfor _, field := range fields {\n\t\tval, ok := field.tags.Lookup(tag)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tkey, omit := e.parseOmitempty(val, field.value)\n\t\tif omit {\n\t\t\tcontinue\n\t\t}\n\t\tout = append(out, strings.TrimSpace(prefix+key))\n\t}\n\n\treturn\n}",
  "func prefixSlice(pre string, slice []string) (ret []string) {\n\tfor _, i := range slice {\n\t\tret = append(ret, pre+i)\n\t}\n\n\treturn\n}",
  "func prependAll(prefix string, in []string) (out []string) {\n\tfor _, s := range in {\n\t\tout = append(out, prefix+s)\n\t}\n\treturn\n}",
  "func SetPrefix(prefix string) string {\n\tdefer logger.SetPrefix(prefix)\n\told := tags[0]\n\ttags[0] = prefix\n\treturn old\n}",
  "func TagHasPrefix(t kappnavv1.Tag, prefix string) bool {\n\treturn strings.HasPrefix(string(t), prefix)\n}",
  "func (rc *nopS) AddTags(tags ...string) {\n}",
  "func prefixJoin(prefix string, array []string, separator string) (result string) {\n\tif len(array) == 0 {\n\t\treturn\n\t}\n\tfor index, val := range array {\n\t\tif index == 0 {\n\t\t\tresult = val\n\t\t} else {\n\t\t\tresult = join(result, concat(prefix, val), separator)\n\t\t}\n\t}\n\treturn\n}",
  "func IncomingDTagTransferRequestsPrefix(recipient string) []byte {\n\treturn append(DTagTransferRequestPrefix, []byte(recipient)...)\n}",
  "func prefixedNames(fullName, placeholder string) string {\n\tvar prefixed string\n\tparts := strings.Split(fullName, \",\")\n\tfor i, name := range parts {\n\t\tname = strings.Trim(name, \" \")\n\t\tprefixed += prefixFor(name) + name\n\t\tif placeholder != \"\" {\n\t\t\tprefixed += \" \" + placeholder\n\t\t}\n\t\tif i < len(parts)-1 {\n\t\t\tprefixed += \", \"\n\t\t}\n\t}\n\treturn prefixed\n}",
  "func getListPrefix(opt *Options, s *goquery.Selection) string {\n\tif isWrapperListItem(s) {\n\t\treturn \"\"\n\t}\n\n\tparent := s.Parent()\n\tif parent.Is(\"ul\") {\n\t\treturn opt.BulletListMarker + \" \"\n\t} else if parent.Is(\"ol\") {\n\t\tcurrentIndex := s.Index() + 1\n\n\t\tlastIndex := parent.Children().Last().Index() + 1\n\t\tmaxLength := len(strconv.Itoa(lastIndex))\n\n\t\t// pad the numbers so that all prefix numbers in the list take up the same space\n\t\t// `%02d.` -> \"01. \"\n\t\tformat := `%0` + strconv.Itoa(maxLength) + `d. `\n\t\treturn fmt.Sprintf(format, currentIndex)\n\t}\n\t// If the HTML is malformed and the list element isn't in a ul or ol, return no prefix\n\treturn \"\"\n}",
  "func (s *IPSet) AddPrefix(p IPPrefix) { s.AddRange(p.Range()) }",
  "func NormalizeTags(tags []string) []string {\n\tfor idx, tag := range tags {\n\t\ttags[idx] = strings.Replace(strings.ToLower(strings.TrimPrefix(tag, \"+\")), \" \", \"-\", -1)\n\t}\n\treturn tags\n}",
  "func JoinTags(a []string, b []string) []string {\n\treturn append(a, b...)\n}",
  "func generateTags(tags string, labels string, itemLabels map[string]string) []string {\n\ttagList := make([]string, 0)\n\tif tags != \"\" {\n\t\ttt := strings.Split(tags, \",\")\n\t\tfor _, t := range tt {\n\t\t\ttagList = append(tagList, strings.TrimSpace(t))\n\t\t}\n\t}\n\n\tif labels == \"*\" {\n\t\t// make all the labels tags\n\t\tfor ln, lv := range itemLabels {\n\t\t\ttagList = append(tagList, ln+\":\"+lv)\n\t\t}\n\t} else if labels != \"\" {\n\t\tll := strings.Split(labels, \",\")\n\t\tfor ln, lv := range itemLabels {\n\t\t\tfor _, l := range ll {\n\t\t\t\tif strings.TrimSpace(l) == ln {\n\t\t\t\t\ttagList = append(tagList, ln+\":\"+lv)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tagList\n}",
  "func (b *InsertBuilder) Prefix(sql string, args ...interface{}) *InsertBuilder {\n\tb.prefixes = append(b.prefixes, Expr(sql, args...))\n\treturn b\n}",
  "func VertexListPrefix(graph string) []byte {\n\treturn bytes.Join([][]byte{vertexPrefix, []byte(graph), {}}, []byte{0})\n}",
  "func (o GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKeyOutput) Prefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKey) []string { return v.Prefixes }).(pulumi.StringArrayOutput)\n}",
  "func (c *HelpCountryCode) SetPrefixes(value []string) {\n\tc.Flags.Set(0)\n\tc.Prefixes = value\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	mergeTags merges the comma separated tags in old with the ones in tags and returns a comma separated string with the results. 
 | 
	func mergeTags(existing string, tags []string) string {
	if existing == "" {
		return strings.Join(tags, ",")
	}
	old := strings.Split(existing, ",")
	var merged []string
	for _, o := range old {
		found := false
		for _, tag := range tags {
			if tag == o {
				found = true
				break
			}
		}
		if !found {
			merged = append(merged, o)
		}
	}
	for _, tag := range tags {
		found := false
		for _, o := range merged {
			if tag == o {
				found = true
				break
			}
		}
		if !found {
			merged = append(merged, tag)
		}
	}
	return strings.Join(merged, ",")
} 
 | 
	[
  "func JoinTags(a []string, b []string) []string {\n\treturn append(a, b...)\n}",
  "func mergeTags(localTags []*ecs.Tag, ec2Tags []*ecs.Tag) []*ecs.Tag {\n\ttagsMap := make(map[string]string)\n\n\tfor _, ec2Tag := range ec2Tags {\n\t\ttagsMap[aws.StringValue(ec2Tag.Key)] = aws.StringValue(ec2Tag.Value)\n\t}\n\n\tfor _, localTag := range localTags {\n\t\ttagsMap[aws.StringValue(localTag.Key)] = aws.StringValue(localTag.Value)\n\t}\n\n\treturn utils.MapToTags(tagsMap)\n}",
  "func mergeTags(t1, t2 []Tag) []Tag {\n\tn := len(t1) + len(t2)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tout := make([]Tag, 0, n)\n\tout = append(out, t1...)\n\tout = append(out, t2...)\n\n\treturn SortTags(out)\n}",
  "func (p *plugin) concatTags(tags1 *structtag.Tags, tags2 *structtag.Tags) (*structtag.Tags, error) {\n\tif tags1.Len() == 0 {\n\t\treturn tags2, nil\n\t}\n\tif tags2.Len() == 0 {\n\t\treturn tags1, nil\n\t}\n\n\tfor _, t2 := range tags2.Tags() {\n\t\tvar found bool\n\t\tfor _, t1 := range tags1.Tags() {\n\t\t\tif t1.Key == t2.Key {\n\t\t\t\tif len(t1.Name) == 0 {\n\t\t\t\t\tt1.Name = t2.Name\n\t\t\t\t}\n\t\t\t\tif t1.Options == nil || len(t1.Options) == 0 {\n\t\t\t\t\tt1.Options = t2.Options\n\t\t\t\t}\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tvar err error\n\t\t\ts := tags1.String() + \" \" + t2.String()\n\t\t\ttags1, err = structtag.Parse(s)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to parse tags '%s': %s\", s, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tags1, nil\n}",
  "func MergeTags(generalTags []*Tag, infraTags []*Tag) []*Tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.Key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.Key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
  "func mergeTags(generalTags []*tag, infraTags []*tag) []*tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
  "func JoinTags(tags ...Tag) TagSet {\n\tvar result TagSet\n\tresult = append(result, tags...)\n\treturn result\n}",
  "func (e *Element) MergeTags(tags ...string) {\n\te.Tags = mergeTags(e.Tags, tags)\n}",
  "func TagsDiff(sqsTags map[string]string, newTags map[string]string) (removed, added map[string]string) {\n\tremoved = map[string]string{}\n\tfor k, v := range sqsTags {\n\t\tif _, ok := newTags[k]; !ok {\n\t\t\tremoved[k] = v\n\t\t}\n\t}\n\n\tadded = map[string]string{}\n\tfor k, newV := range newTags {\n\t\tif oldV, ok := sqsTags[k]; !ok || oldV != newV {\n\t\t\tadded[k] = newV\n\t\t}\n\t}\n\treturn\n}",
  "func getTagChange(oldTags, newTags []string) (tagsToAdd, tagsToDelete []string) {\n\tolgTagsMap := sliceToMap(oldTags)\n\tnewTagsMap := sliceToMap(newTags)\n\n\tfor tag := range newTagsMap {\n\t\tif _, ok := olgTagsMap[tag]; !ok {\n\t\t\ttagsToAdd = append(tagsToAdd, tag)\n\t\t}\n\t}\n\n\tfor tag := range olgTagsMap {\n\t\tif _, ok := newTagsMap[tag]; !ok {\n\t\t\ttagsToDelete = append(tagsToDelete, tag)\n\t\t}\n\t}\n\n\treturn\n}",
  "func MergeTags(tagMaps ...map[string]string) map[string]string {\n\tmerged := make(map[string]string)\n\tfor _, tagMap := range tagMaps {\n\t\tfor k, v := range tagMap {\n\t\t\tmerged[k] = v\n\t\t}\n\t}\n\treturn merged\n}",
  "func DiffTags(spec []v1beta1.Tag, current []ecrtypes.Tag) (addTags []ecrtypes.Tag, remove []string) {\n\taddMap := make(map[string]string, len(spec))\n\tfor _, t := range spec {\n\t\taddMap[t.Key] = t.Value\n\t}\n\tremoveMap := map[string]struct{}{}\n\tfor _, t := range current {\n\t\tif addMap[aws.ToString(t.Key)] == aws.ToString(t.Value) {\n\t\t\tdelete(addMap, aws.ToString(t.Key))\n\t\t\tcontinue\n\t\t}\n\t\tremoveMap[aws.ToString(t.Key)] = struct{}{}\n\t}\n\tfor k, v := range addMap {\n\t\taddTags = append(addTags, ecrtypes.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t}\n\tfor k := range removeMap {\n\t\tremove = append(remove, k)\n\t}\n\treturn\n}",
  "func DiffTags(spec []Tag, current []ecr.Tag) (addTags []ecr.Tag, remove []string) {\n\tlocal := TagsToMap(spec)\n\tremote := ECRTagsToMap(current)\n\tadd := make(map[string]string, len(local))\n\tremove = []string{}\n\tfor k, v := range local {\n\t\tadd[k] = v\n\t}\n\tfor k, v := range remote {\n\t\tswitch val, ok := local[k]; {\n\t\tcase ok && val != v:\n\t\t\tremove = append(remove, k)\n\t\tcase !ok:\n\t\t\tremove = append(remove, k)\n\t\t\tdelete(add, k)\n\t\tdefault:\n\t\t\tdelete(add, k)\n\t\t}\n\t}\n\taddTags = []ecr.Tag{}\n\tfor key, value := range add {\n\t\tvalue := value\n\t\tkey := key\n\t\taddTags = append(addTags, ecr.Tag{Key: &key, Value: &value})\n\t}\n\treturn\n}",
  "func DiffTags(spec []v1alpha1.Tag, current []ecr.Tag) (addTags []ecr.Tag, remove []string) {\n\taddMap := make(map[string]string, len(spec))\n\tfor _, t := range spec {\n\t\taddMap[t.Key] = t.Value\n\t}\n\tremoveMap := map[string]struct{}{}\n\tfor _, t := range current {\n\t\tif addMap[aws.StringValue(t.Key)] == aws.StringValue(t.Value) {\n\t\t\tdelete(addMap, aws.StringValue(t.Key))\n\t\t\tcontinue\n\t\t}\n\t\tremoveMap[aws.StringValue(t.Key)] = struct{}{}\n\t}\n\tfor k, v := range addMap {\n\t\taddTags = append(addTags, ecr.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t}\n\tfor k := range removeMap {\n\t\tremove = append(remove, k)\n\t}\n\treturn\n}",
  "func ConvertTagsToString(tags []string) string {\n\tfinalString := make([]string, 0)\n\ttagChar := \"#\"\n\tfor _, value := range tags {\n\t\ttrimmed := strings.TrimSpace(value)\n\t\tfinalString = append(finalString, fmt.Sprintf(\"%s%s\", tagChar, trimmed))\n\t}\n\treturn strings.Join(finalString, \" \")\n}",
  "func MergeTags(t map[string]string) MergeOption {\n\treturn func(m *mergeReq) {\n\t\tm.Tags = t\n\t}\n}",
  "func ConcatTagValues(tagValues []string) string {\n\tif len(tagValues) == 0 {\n\t\treturn \"\"\n\t}\n\treturn strings.Join(tagValues, \",\")\n}",
  "func unmarshalTag(Tags []*Tag) string {\n\ttags := make([]string, len(Tags))\n\tfor i := range Tags {\n\t\ttags[i] = Tags[i].Name\n\t\tif len(Tags[i].Versions) >= 1 {\n\t\t\ttags[i] += \":\" + strings.Join(Tags[i].Versions, \":\")\n\t\t}\n\t}\n\treturn strings.Join(tags, \",\")\n}",
  "func MergeTagSlices(Original []TagInformation, ToAdd []TagInformation) []TagInformation {\n\t//Rules:\n\t//We do not care about meta-tags\n\t//Tags in ToAdd win\n\t//Exlusionary tags win after tags in ToAdd\n\n\t//First, remove duplicates from original that exist in ToAdd\n\tfor Index := 0; Index < len(ToAdd); Index++ {\n\t\tif ToAdd[Index].IsMeta {\n\t\t\tcontinue //Skip Metatags\n\t\t}\n\t\t//Standard tag confirmed, scan for duplicates\n\t\tfor ScanIndex := 0; ScanIndex < len(Original); ScanIndex++ {\n\t\t\tif Original[ScanIndex].IsMeta {\n\t\t\t\tcontinue //Skip comparing metas\n\t\t\t}\n\t\t\tif Original[ScanIndex].ID == ToAdd[Index].ID {\n\t\t\t\t//Remove and resize\n\t\t\t\tOriginal = append(Original[:ScanIndex], Original[ScanIndex+1:]...)\n\t\t\t\t//we just need to continue current scan from the same ScanIndex\n\t\t\t\tScanIndex--\n\t\t\t}\n\t\t}\n\t}\n\n\t//Now we can fall back to RemoveDuplicateTags to cleanup any other issues\n\treturn RemoveDuplicateTags(append(Original, ToAdd...))\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	NewLimitedWriter will automatically Close the writer on certain conditions defined by options. Since some options include async events the writes will be mutex protected meaning only 1 thread will be able to call Write/Close at the time 
 | 
	func NewLimitedWriter(w io.WriteCloser, options ...LimitedWriterOption) io.WriteCloser {
	for _, o := range options {
		w = o(w)
	}
	return NewSyncedWriteCloser(w)
} 
 | 
	[
  "func NewLimitedWriter(w io.Writer, limit int) io.Writer {\n\treturn &limitedWriter{w, limit}\n}",
  "func ConcurrentWriteLimit(n int) LoggerOption { return concurrentWriteLimit(n) }",
  "func LimitWriter(w io.Writer, n int64) io.Writer {\n\treturn &limitedWriter{W: w, N: n}\n}",
  "func NewWriter(w io.Writer, fns ...LimitConfigFn) *Writer {\n\ts := &Writer{WriteCloser: WrapWriteCloser(w)}\n\tNewRateLimiterSetter(s, fns...)\n\treturn s\n}",
  "func Limit(w io.Writer, limit int64) io.Writer {\n\treturn &limitedWriter{w: w, n: limit}\n}",
  "func RateLimitedWriter(w io.Writer, config *Configuration) io.Writer {\n\tif !config.isRateLimitedWr() {\n\t\treturn w\n\t}\n\treturn ratelimit.Writer(w,\n\t\tratelimit.NewBucketWithRate(\n\t\t\tconfig.rateLimitWr(),\n\t\t\tconfig.maxBurstCapWr()))\n}",
  "func NewLimitWriter(w io.Writer, limiters ...Limiter) *LimitWriter {\n\treturn &LimitWriter{\n\t\tw:        w,\n\t\tlimiters: limiters,\n\t}\n}",
  "func NewWriter(w io.Writer, limiter Limiter) io.Writer {\n\treturn &writer{w, limiter}\n}",
  "func NewWriter(w io.Writer, maxDelay time.Duration) *Writer {\n\treturn &Writer{\n\t\tw: bufio.NewWriter(w),\n\t\td: maxDelay,\n\t}\n}",
  "func New(w io.Writer, n int64) io.Writer {\n\treturn &limitWriter{\n\t\tw: w,\n\t\tn: n,\n\t}\n}",
  "func WithMaxBytes(maxBytes int) LimitedWriterOption {\n\tbytesWritten := 0\n\treturn func(w io.WriteCloser) io.WriteCloser {\n\t\tpreCheck := NewPreWriteCallbacks(w, func(p []byte) error {\n\t\t\tif bytesWritten+len(p) > maxBytes {\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to close WriteCloser writing maxBytes; Close error was: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn ErrTooLargeWrite\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\treturn NewPostWriteCallbacks(preCheck, func(p []byte, n int, err error) {\n\t\t\tbytesWritten += n\n\t\t})\n\t}\n}",
  "func WriterBufferSizeOption(indicator int) Option {\n\treturn func(o *options) {\n\t\to.writerBufferSize = indicator\n\t}\n}",
  "func newWriterPool(maxWriters int) *writerPool {\n\treturn &writerPool{\n\t\tflatePool: make(chan io.WriteCloser, maxWriters),\n\t\tgzipPool:  make(chan io.WriteCloser, maxWriters),\n\t}\n}",
  "func SizeLimit(size int) WriterDecorator {\n\treturn func(writer io.Writer) io.Writer {\n\t\treturn WriterFunc(func(p []byte) (int, error) {\n\t\t\tif len(p) > size {\n\t\t\t\treturn 0, ErrSizeLimitExceeded\n\t\t\t}\n\t\t\treturn writer.Write(p)\n\t\t})\n\t}\n}",
  "func NewWithWriter(threads, length int, w PanicWriter) *Queue {\n\tvar q Queue\n\tq.j = make(jobs, length)\n\tq.w = w\n\tq.spawnThreads(threads)\n\treturn &q\n}",
  "func (th *Writer) ApplyLimit(l *Limiter) {\n\tth.limiter = l.Limiter\n}",
  "func NewRateLimiter(w io.Writer, bps int) io.WriteCloser {\n\tunit := time.Second\n\tg := min(1000, gcd(bps, int(unit)))\n\tunit /= time.Duration(g)\n\tbps /= g\n\trv := &rateLimiter{\n\t\tinput:     make(chan writeRequest),\n\t\tticker:    time.NewTicker(unit),\n\t\tlimit:     bps,\n\t\tremaining: bps,\n\t\toutput:    w,\n\t\tquit:      make(chan bool),\n\t}\n\tgo rv.run()\n\treturn rv\n}",
  "func RateLimit(limit int) WriterDecorator {\n\tlimiter := rate.NewLimiter(rate.Limit(limit), limit)\n\treturn func(writer io.Writer) io.Writer {\n\t\treturn WriterFunc(func(p []byte) (int, error) {\n\t\t\tif !limiter.Allow() {\n\t\t\t\treturn 0, ErrRateLimitExceeded\n\t\t\t}\n\t\t\treturn writer.Write(p)\n\t\t})\n\t}\n}",
  "func WriterOpt(w io.Writer) Option {\n\treturn func(o *options) {\n\t\to.writer = w\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	WithMaxBytes will block writes which would make the total stream larger than maxBytes. 
 | 
	func WithMaxBytes(maxBytes int) LimitedWriterOption {
	bytesWritten := 0
	return func(w io.WriteCloser) io.WriteCloser {
		preCheck := NewPreWriteCallbacks(w, func(p []byte) error {
			if bytesWritten+len(p) > maxBytes {
				if err := w.Close(); err != nil {
					return fmt.Errorf("failed to close WriteCloser writing maxBytes; Close error was: %w", err)
				}
				return ErrTooLargeWrite
			}
			return nil
		})
		return NewPostWriteCallbacks(preCheck, func(p []byte, n int, err error) {
			bytesWritten += n
		})
	}
} 
 | 
	[
  "func (o *consumer) setMaxPendingBytes(limit int) {\n\to.pblimit = limit\n\to.maxpb = limit / 16\n\tif o.maxpb == 0 {\n\t\to.maxpb = 1\n\t}\n}",
  "func WithMaxInflightBytes(n int) WriterOption {\n\treturn func(ms *ManagedStream) {\n\t\tms.streamSettings.MaxInflightBytes = n\n\t}\n}",
  "func WithMaxBatchSizeBytes(n int64) WriteHandlerOption {\n\treturn func(w *WriteHandler) {\n\t\tw.maxBatchSizeBytes = n\n\t}\n}",
  "func (s *Server) SetMaxHeaderBytes(b int) {\n\ts.config.MaxHeaderBytes = b\n}",
  "func WriteVarBytes(w io.Writer, buf []byte, limit uint64) error {\n    if uint64(len(buf)) >= limit { return ErrLimitExceeded }\n    if err := WriteVarUint(w, uint64(len(buf))); err != nil { return err }\n    return WriteFull(w, buf)\n}",
  "func MaxDataBytes(maxBytes, evidenceBytes int64, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCommitBytes(valsCount) -\n\t\tevidenceBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytes. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}",
  "func (s *settings) SetMaxWriteSize(size uint) {\n\ts.wMaxSize = size\n}",
  "func MaxBytesHandler(h Handler, n int64) Handler {\n\treturn HandlerFunc(func(w ResponseWriter, r *Request) {\n\t\tr2 := *r\n\t\tr2.Body = MaxBytesReader(w, r.Body, n)\n\t\th.ServeHTTP(w, &r2)\n\t})\n}",
  "func WithMaxBytesPerPayload(MaxBytesPerPayload int) Option {\n\treturn func(o *Options) error {\n\t\to.maxBytesPerPayload = MaxBytesPerPayload\n\t\treturn nil\n\t}\n}",
  "func MaxDataBytes(maxBytes int64, keyType crypto.KeyType, evidenceBytes int64, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCoreChainLockSize -\n\t\tMaxCommitOverheadBytes -\n\t\tevidenceBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytes. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}",
  "func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) }",
  "func (f *FileTarget) SetMaxBufferByte(maxBufferByte int) {\n    f.maxBufferByte = maxBufferByte\n}",
  "func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {\n\treturn &maxBytesReader{respWriter: w, readCloser: r, bytesRemaining: n}\n}",
  "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}",
  "func (mm *BytesMonitor) MaximumBytes() int64 {\n\tmm.mu.Lock()\n\tdefer mm.mu.Unlock()\n\treturn mm.mu.maxAllocated\n}",
  "func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}) (int, error) {\n\tif msg == nil {\n\t\treturn 0, nil\n\t}\n\tb, err := e.doEncode(msg)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif uint64(len(b)) > MaxChunkSize {\n\t\treturn 0, fmt.Errorf(\n\t\t\t\"size of encoded message is %d which is larger than the provided max limit of %d\",\n\t\t\tlen(b),\n\t\t\tMaxChunkSize,\n\t\t)\n\t}\n\t// write varint first\n\t_, err = w.Write(proto.EncodeVarint(uint64(len(b))))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif e.UseSnappyCompression {\n\t\treturn writeSnappyBuffer(w, b)\n\t}\n\treturn w.Write(b)\n}",
  "func (c *BaseConn) SetMaxWriteDelay(delay time.Duration) {\n\tc.stream.SetMaxWriteDelay(delay)\n}",
  "func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCommitBytes(valsCount)\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytesUnknownEvidence. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}",
  "func MaxRequestMaxBytes(max int) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestMaxBytes = max\n\t\treturn nil\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Path Type implements the starlark.Value.Type() method. 
 | 
	func (p Path) Type() string {
	return fmt.Sprintf("Path")
} 
 | 
	[
  "func (p Path) Type(reference, value interface{}) Path { return p.with(Type, reference, value) }",
  "func (p *Path) Type() path.Type {\n\treturn PathType\n}",
  "func Type(value r.Value) r.Type {\n\tif !value.IsValid() || value == None {\n\t\treturn nil\n\t}\n\treturn value.Type()\n}",
  "func (a ValueNode) GetType() string {\n\treturn \"ValueNode\"\n}",
  "func (o HTTPIngressPathOutput) PathType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTPIngressPath) *string { return v.PathType }).(pulumi.StringPtrOutput)\n}",
  "func (r Ref) Type() string {\n\treturn r.parts()[0]\n}",
  "func (n *node) Type() (ty, error) {\n\tif n.Val == nil {\n\t\treturn unknown, errors.New(\"nil node\")\n\t}\n\tswitch len(n.Val) {\n\tcase 16: // Branch Node\n\t\treturn branch, nil\n\tcase 3: // Extension Node or Leaf Node\n\t\tif n.Val[0] == nil {\n\t\t\treturn unknown, errors.New(\"unknown node type\")\n\t\t}\n\t\treturn ty(n.Val[0][0]), nil\n\tdefault:\n\t\treturn unknown, errors.New(\"wrong node value, expect [16][]byte or [3][]byte, get [\" + string(len(n.Val)) + \"][]byte\")\n\t}\n}",
  "func (o IopingSpecVolumeVolumeSourceHostPathOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceHostPath) *string { return v.Type }).(pulumi.StringPtrOutput)\n}",
  "func (this *PosInfIf) Type() value.Type { return value.JSON }",
  "func (s *Shape) Type() string {\n\tswitch s.ShapeType {\n\tcase \"structure\":\n\t\treturn \"*\" + exportable(s.Name)\n\tcase \"integer\":\n\t\tif s.Name == \"ContentLength\" || s.Name == \"Size\" {\n\t\t\treturn \"aws.LongValue\"\n\t\t}\n\t\treturn \"aws.IntegerValue\"\n\tcase \"long\":\n\t\treturn \"aws.LongValue\"\n\tcase \"float\":\n\t\treturn \"aws.FloatValue\"\n\tcase \"double\":\n\t\treturn \"aws.DoubleValue\"\n\tcase \"string\":\n\t\treturn \"aws.StringValue\"\n\tcase \"map\":\n\t\tif service.Metadata.Protocol == \"query\" {\n\t\t\treturn exportable(s.Name)\n\t\t}\n\t\treturn \"map[\" + s.Key().ElementType() + \"]\" + s.Value().ElementType()\n\tcase \"list\":\n\t\treturn \"[]\" + s.Member().ElementType()\n\tcase \"boolean\":\n\t\treturn \"aws.BooleanValue\"\n\tcase \"blob\":\n\t\treturn \"[]byte\"\n\tcase \"timestamp\":\n\t\t// JSON protocol APIs use Unix timestamps\n\t\tif service.Metadata.Protocol == \"json\" {\n\t\t\treturn \"*aws.UnixTimestamp\"\n\t\t}\n\t\treturn \"time.Time\"\n\t}\n\n\tpanic(fmt.Errorf(\"type %q (%q) not found\", s.Name, s.ShapeType))\n}",
  "func (v Value) Type() Type {\n\tif !v.v.IsValid() {\n\t\treturn TypeUndefined\n\t}\n\n\tif v.v.CanInterface() {\n\t\ti := v.v.Interface()\n\t\tswitch i.(type) {\n\t\tcase Function:\n\t\t\treturn TypeFunction\n\t\tcase Object:\n\t\t\tif _, ok := i.(stringObject); ok {\n\t\t\t\treturn TypeString\n\t\t\t}\n\t\t\treturn TypeObject\n\t\t}\n\t}\n\n\tswitch v.v.Kind() {\n\tcase reflect.Ptr:\n\t\treturn TypeNull\n\tcase reflect.Bool:\n\t\treturn TypeBoolean\n\tcase reflect.Float64:\n\t\treturn TypeNumber\n\tdefault:\n\t\treturn TypeUndefined\n\t}\n\n}",
  "func (this *NaNIf) Type() value.Type { return value.JSON }",
  "func (element *Element) Type(value string) *Element {\n\treturn element.Attr(\"type\", value)\n}",
  "func (o JavaScriptFunctionBindingOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v JavaScriptFunctionBinding) string { return v.Type }).(pulumi.StringOutput)\n}",
  "func (s Schemas) Type() Type {\n\tif s.TF != nil {\n\t\tswitch s.TF.Type {\n\t\tcase schema.TypeBool:\n\t\t\treturn TypeBool\n\t\tcase schema.TypeInt, schema.TypeFloat:\n\t\t\treturn TypeNumber\n\t\tcase schema.TypeString:\n\t\t\treturn TypeString\n\t\tcase schema.TypeList, schema.TypeSet:\n\t\t\treturn s.ElemSchemas().Type().ListOf()\n\t\tcase schema.TypeMap:\n\t\t\treturn TypeMap\n\t\tdefault:\n\t\t\treturn TypeUnknown\n\t\t}\n\t}\n\n\treturn TypeUnknown\n}",
  "func (_this *Response) Type() ResponseType {\n\tvar ret ResponseType\n\tvalue := _this.Value_JS.Get(\"type\")\n\tret = ResponseTypeFromJS(value)\n\treturn ret\n}",
  "func (o ApiOperationRequestQueryParameterOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiOperationRequestQueryParameter) string { return v.Type }).(pulumi.StringOutput)\n}",
  "func (r Resource) Type() string {\n\treturn r.typ\n}",
  "func (rw ReadWritePathMap) TypeForPath(path string) (devicechange.ValueType, error) {\n\tfor k, elem := range rw {\n\t\tif k == path {\n\t\t\treturn elem.ValueType, nil\n\t\t}\n\t}\n\treturn devicechange.ValueType_EMPTY, fmt.Errorf(\"path %s not found in RW paths of model\", path)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Hash32 implements the Arg.Hash32() method. 
 | 
	func (p Path) Hash32(h hash.Hash32) { h.Write([]byte(p)) } 
 | 
	[
  "func (t *Target) Hash32(h hash.Hash32) {\n\th.Write([]byte(t.Name))\n\th.Write([]byte(t.Builder))\n\tfor _, arg := range t.Args {\n\t\targ.Hash32(h)\n\t}\n\tfor _, env := range t.Env {\n\t\th.Write([]byte(env))\n\t}\n}",
  "func CalcHash32(data []byte) Hash32 {\n\treturn hash.Sum(data)\n}",
  "func (s String) Hash32(h hash.Hash32) { h.Write([]byte(s)) }",
  "func (t *Transaction) Hash32() Hash32 {\n\treturn t.ID().Hash32()\n}",
  "func HexToHash32(s string) Hash32 { return BytesToHash(util.FromHex(s)) }",
  "func (id TransactionID) Hash32() Hash32 {\n\treturn Hash32(id)\n}",
  "func fnv32(hash hash.Hash32, text string) (uint32, error) {\n\t_, err := hash.Write([]byte(text))\n\treturn hash.Sum32(), err\n}",
  "func crc32Hash(data []byte) uint32 {\n\treturn crc32.Checksum(data, crc32Table)\n}",
  "func (s *Hash32) Hash(data []int) uint32 {\n\thash := Hash32(offset32)\n\tfor _, c := range data {\n\t\thash *= prime32\n\t\thash ^= Hash32(c)\n\t}\n\treturn uint32(hash)\n}",
  "func NewHash32() *Hash32 {\n\tvar s Hash32 = offset32\n\treturn &s\n}",
  "func Hash(strings ...string) uint32 {\n\tdigester := fnv.New32()\n\tfor _, s := range strings {\n\t\t_, _ = io.WriteString(digester, s)\n\t}\n\treturn digester.Sum32()\n}",
  "func FNVHash32(value uint32) uint32 {\n\thash := FNVOffsetBasis32\n\tfor i := 0; i < 4; i++ {\n\t\toctet := value & 0x00FF\n\t\tvalue >>= 8\n\n\t\thash ^= octet\n\t\thash *= FNVPrime32\n\t}\n\treturn hash\n}",
  "func hash(s string) int {\n\th := fnv.New32a()\n\tif _, err := h.Write([]byte(s)); err != nil {\n\t\tpanic(err) // should never happen\n\t}\n\n\treturn int(h.Sum32() & 0x7FFFFFFF) // mask MSB of uint32 as this will be sign bit\n}",
  "func hash(key string) uint32 {\n\tprefix := strings.Split(key, \":\")[0]\n\thasher := fnv.New32()\n\thasher.Write([]byte(prefix))\n\treturn hasher.Sum32()\n}",
  "func (h Hash20) ToHash32() (h32 Hash32) {\n\tcopy(h32[:], h[:])\n\treturn\n}",
  "func (d *digest) Sum32() uint32 {\n\treturn finalise(d.hash)\n}",
  "func (t *hashReader) Sum32() uint32 {\n\treturn t.h.Sum32()\n}",
  "func (h *Hash) Sum32() (uint32, bool) {\n\th32, ok := h.Hash.(hash.Hash32)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\n\treturn h32.Sum32(), true\n}",
  "func New32() hash.Hash32 {\n\treturn NewS32(0x0)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	starlarkPath parses Starlark kw/args and returns a corresponding `Path` 
 | 
	func starlarkPath(
	args starlark.Tuple,
	kwargs []starlark.Tuple,
) (starlark.Value, error) {
	if len(args) != 1 {
		return nil, errors.Errorf(
			"Expected exactly 1 positional argument; found %d",
			len(args),
		)
	}
	if len(kwargs) != 0 {
		return nil, errors.Errorf(
			"Expected exactly 0 positional arguments; found %d",
			len(kwargs),
		)
	}
	if s, ok := args[0].(starlark.String); ok {
		return Path(s), nil
	}
	return nil, errors.Errorf(
		"TypeError: Expected a string argument; found %s",
		args[0].Type(),
	)
} 
 | 
	[
  "func extractPath(c *gin.Context, tag string) (string, []string, error) {\n\tname, required, defaultVal, err := parseTagKey(tag)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tp := c.Param(name)\n\n\t// XXX: deprecated, use of \"default\" tag is preferred\n\tif p == \"\" && defaultVal != \"\" {\n\t\treturn name, []string{defaultVal}, nil\n\t}\n\t// XXX: deprecated, use of \"validate\" tag is preferred\n\tif p == \"\" && required {\n\t\treturn \"\", nil, fmt.Errorf(\"missing path parameter: %s\", name)\n\t}\n\n\treturn name, []string{p}, nil\n}",
  "func (r *BasicRequest) PathArgs() (map[string]string, error) {\n\treturn r.Path, nil\n}",
  "func path(stmt semantic.Node) snippets.Pathway {\n\tswitch expr := stmt.(type) {\n\tcase *semantic.Global:\n\t\treturn snippets.Variable(snippets.SymbolCategory_Global, expr.Name())\n\tcase *semantic.Parameter:\n\t\treturn snippets.Variable(snippets.SymbolCategory_Parameter, expr.Name())\n\tcase *semantic.Local:\n\t\treturn snippets.Variable(snippets.SymbolCategory_Local, expr.Name())\n\tcase *semantic.Member:\n\t\treturn snippets.Field(path(expr.Object), expr.Field.Name())\n\tcase *semantic.PointerRange:\n\t\treturn snippets.Range(path(expr.Pointer))\n\tcase *semantic.SliceRange:\n\t\t// Don't change the path, since range of slice is still a slice.\n\t\treturn path(expr.Slice)\n\tcase *semantic.ArrayIndex:\n\t\treturn snippets.Elem(path(expr.Array))\n\tcase *semantic.SliceIndex:\n\t\treturn snippets.Elem(path(expr.Slice))\n\tcase *semantic.MapIndex:\n\t\treturn snippets.Elem(path(expr.Map))\n\tcase *semantic.Observed:\n\t\treturn path(expr.Parameter)\n\tcase *semantic.Cast:\n\t\treturn path(expr.Object)\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unexpect path expression %T:%v\", stmt, stmt))\n\t}\n}",
  "func (j *Json) GetPath(args ...string) *Json {\n\td := j\n\tfor _, v := range args {\n\t\tmapData := d.GetData()\n\t\tif vv, ok := mapData[v]; ok {\n\t\t\td.Data = vv\n\t\t} else {\n\t\t\td.Data = nil\n\t\t\treturn d\n\t\t}\n\t}\n\treturn d\n}",
  "func ParsePath(raw storj.Path) (path Path) {\n\t// A path may contain a bucket and an unencrypted path.\n\tparts := strings.SplitN(raw, \"/\", 2)\n\tpath.bucket = parts[0]\n\tif len(parts) > 1 {\n\t\tpath.unencPath = paths.NewUnencrypted(parts[1])\n\t}\n\tpath.raw = []byte(raw)\n\treturn path\n}",
  "func Parse(rawpath string) (Path, error) {\n\tif err := validateRawPath(rawpath); err != nil {\n\t\treturn Path{}, err\n\t}\n\trootName := root(rawpath)\n\t// package name cannot contain \"-\" so gracefully remove them\n\t// if they present.\n\tpackageName := stripNonAlphaNumeric(rootName)\n\tif err := validatePackageName(packageName); err != nil {\n\t\treturn Path{}, err\n\t}\n\tp := Path{\n\t\tRawPath: rawpath,\n\t\tRoot:    rootName,\n\t\tPackage: packageName,\n\t}\n\n\treturn p, nil\n}",
  "func (hc *HealthCheckArgsOrString) Path() *string {\n\tif hc.IsBasic() {\n\t\treturn aws.String(hc.Basic)\n\t}\n\treturn hc.Advanced.Path\n}",
  "func (jm JSONMeta) Path() string {\n\tsb := make([]string, 0)\n\tvar c JSONMetaNode = jm\n\tfor c != nil {\n\t\tsb = append([]string{c.Key()}, sb...)\n\t\t// Prepend a \".\" for non-index segments.\n\t\tif _, ok := c.Parent().(JSONMetaContainerNode); ok {\n\t\t\tsb = append([]string{\".\"}, sb...)\n\t\t}\n\t\tc = c.Parent()\n\t}\n\n\treturn strings.TrimLeft(strings.Join(sb, \"\"), \".\")\n}",
  "func Path(name, usage string) *string {\n\treturn CmdVar.Path(name, usage)\n}",
  "func (p *Parser) Path(progPath string) *Parser {\n\tpaths := strings.Split(progPath, string(os.PathSeparator))\n\treturn p.Prog(paths[len(paths)-1])\n}",
  "func parseTwirpPath(path string) (string, string, string) {\n\tparts := strings.Split(path, \"/\")\n\tif len(parts) < 2 {\n\t\treturn \"\", \"\", \"\"\n\t}\n\tmethod := parts[len(parts)-1]\n\tpkgService := parts[len(parts)-2]\n\tprefix := strings.Join(parts[0:len(parts)-2], \"/\")\n\treturn prefix, pkgService, method\n}",
  "func (s *String) Path(path string) *Value {\n\topChain := s.chain.enter(\"Path(%q)\", path)\n\tdefer opChain.leave()\n\n\treturn jsonPath(opChain, s.value, path)\n}",
  "func Path(path string) PathRetriever {\n\treturn func() (string, error) { return path, nil }\n}",
  "func parsePath(path string) (root string) {\n\troot = strings.Trim(path, \"/\")\n\treturn\n}",
  "func PathForKey(raw string) paths.Unencrypted {\n\treturn paths.NewUnencrypted(strings.TrimSuffix(raw, \"/\"))\n}",
  "func parseStarPath(ss []string) ([]string, error) {\n\tfor _, p := range ss {\n\t\tif strings.Contains(p, \"*\") {\n\t\t\tpaths, err := filepath.Glob(p)\n\t\t\treturn paths, err\n\t\t}\n\t}\n\treturn ss, nil\n}",
  "func (s *Nap) Path(path string) *Nap {\n\tbaseURL, baseErr := url.Parse(s.rawURL)\n\tpathURL, pathErr := url.Parse(path)\n\tif baseErr == nil && pathErr == nil {\n\t\ts.rawURL = baseURL.ResolveReference(pathURL).String()\n\t\tif strings.HasSuffix(path, \"/\") && !strings.HasSuffix(s.rawURL, \"/\") {\n\t\t\ts.rawURL += \"/\"\n\t\t}\n\t\treturn s\n\t}\n\treturn s\n}",
  "func Paths(e *yang.Entry, p Path, ps *[]*Path, termcolor bool) {\n\tkeyColor := color.New(color.Bold)\n\ttypeColor := color.New(color.Faint)\n\tcolor.NoColor = false\n\tif !termcolor {\n\t\tcolor.NoColor = true\n\t}\n\tswitch e.Node.(type) {\n\tcase *yang.Module: // a module has no parent\n\t\tp.Module = e.Name\n\tcase *yang.Container:\n\t\tp.XPath += fmt.Sprintf(\"/%s\", e.Name)\n\t\tp.RestConfPath += fmt.Sprintf(\"/%s\", e.Name)\n\t\tif e.Config != yang.TSUnset {\n\t\t\tp.Config = e.Config\n\t\t}\n\tcase *yang.List:\n\t\tif e.Config != yang.TSUnset {\n\t\t\tp.Config = e.Config\n\t\t}\n\t\tvar xKElem, rKElem string // xpath and restconf key elements\n\t\tif e.Key != \"\" {          // for key-less lists skip the keyElem creation\n\t\t\tkeys := strings.Split(e.Key, \" \")\n\t\t\tfor _, k := range keys {\n\t\t\t\txKElem += keyColor.Sprintf(\"[%s=*]\", k)\n\t\t\t}\n\t\t\trKElem = keyColor.Sprintf(\"%s\", strings.Join(keys, \",\")) // catenating restconf keys delimited by comma\n\t\t}\n\t\tp.XPath += fmt.Sprintf(\"/%s%s\", e.Name, xKElem)\n\t\tp.RestConfPath += fmt.Sprintf(\"/%s=%s\", e.Name, rKElem)\n\tcase *yang.LeafList:\n\t\tif e.Config != yang.TSUnset {\n\t\t\tp.Config = e.Config\n\t\t}\n\tcase *yang.Leaf:\n\t\tif e.Config != yang.TSUnset {\n\t\t\tp.Config = e.Config\n\t\t}\n\t\tp.XPath += fmt.Sprintf(\"/%s\", e.Name)\n\t\tp.RestConfPath += fmt.Sprintf(\"/%s\", e.Name)\n\t\tp.Type = e.Node.(*yang.Leaf).Type\n\t\tp.SType = typeColor.Sprint(e.Node.(*yang.Leaf).Type.Name)\n\n\t\t// if the immediate type is identityref\n\t\tif e.Node.(*yang.Leaf).Type.IdentityBase != nil {\n\t\t\tp.SType += typeColor.Sprintf(\"->%v\", e.Node.(*yang.Leaf).Type.IdentityBase.Name)\n\t\t}\n\n\t\t//handling leafref\n\t\tif e.Type.Kind == yang.Yleafref {\n\t\t\tp.SType += typeColor.Sprintf(\"->%v\", e.Type.Path)\n\t\t}\n\n\t\t//handling enumeration types\n\t\tif e.Type.Kind == yang.Yenum {\n\t\t\tp.SType += typeColor.Sprintf(\"%+q\", e.Type.Enum.Names())\n\t\t}\n\n\t\t//handling union types\n\t\tif e.Type.Kind == yang.Yunion {\n\t\t\tvar u []string // list of union types\n\t\t\tfor _, ut := range e.Node.(*yang.Leaf).Type.Type {\n\t\t\t\tswitch {\n\t\t\t\tcase ut.IdentityBase != nil:\n\t\t\t\t\tu = append(u, fmt.Sprintf(\"identityref->%v\", ut.IdentityBase.Name))\n\t\t\t\tcase ut.YangType.Kind == yang.Yenum:\n\t\t\t\t\tu = append(u, fmt.Sprintf(\"enumeration%+q\", ut.YangType.Enum.Names()))\n\t\t\t\tdefault:\n\t\t\t\t\tu = append(u, ut.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.SType += typeColor.Sprintf(\"{%v}\", strings.Join(u, \" \"))\n\t\t}\n\t\t*ps = append(*ps, &p)\n\t}\n\n\t// ne is a nested entries list\n\tne := make([]string, 0, len(e.Dir))\n\n\tfor k := range e.Dir {\n\t\tne = append(ne, k)\n\t}\n\tsort.Strings(ne)\n\tfor _, k := range ne {\n\t\tPaths(e.Dir[k], p, ps, termcolor)\n\t}\n}",
  "func (f prefixed) QualifyPath(p string) string {\n\treturn path.Join(f.prefix, p)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	GlobGroup Type implements the starlark.Value.Type() method. 
 | 
	func (gg GlobGroup) Type() string {
	return fmt.Sprintf("GlobGroup")
} 
 | 
	[
  "func (o *AutoscalerResourceLimitsGPULimit) Type() string {\n\tif o != nil && o.bitmap_&2 != 0 {\n\t\treturn o.type_\n\t}\n\treturn \"\"\n}",
  "func Type(value r.Value) r.Type {\n\tif !value.IsValid() || value == None {\n\t\treturn nil\n\t}\n\treturn value.Type()\n}",
  "func (p *Package) Type(name string) (t *Type) {\n\tt, _ = p.Members[name].(*Type)\n\treturn\n}",
  "func (group *ContainerGroup_Spec_ARM) GetType() string {\n\treturn \"Microsoft.ContainerInstance/containerGroups\"\n}",
  "func (d *Driver) Type() (t string) {\n\treturn \"go\"\n}",
  "func (registry *Registry) GetType() string {\n\treturn \"Microsoft.ContainerRegistry/registries\"\n}",
  "func GetType() string {\n\treturn kind\n}",
  "func (t TsTimestampAggregation) Type() TsAggregationType {\n\treturn t.kind\n}",
  "func (o ReportGroupOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ReportGroup) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}",
  "func Type(typ string) gomock.Matcher {\n\treturn &typeMatcher{typ: typ}\n}",
  "func (element *Element) Type(value string) *Element {\n\treturn element.Attr(\"type\", value)\n}",
  "func (f BitFlag) Type() ValueType { return FLAG }",
  "func (m *RecurrencePattern) GetType()(*RecurrencePatternType) {\n    return m.type_escaped\n}",
  "func (a ValueNode) GetType() string {\n\treturn \"ValueNode\"\n}",
  "func (o ReplicationRecoveryPlanRecoveryGroupOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ReplicationRecoveryPlanRecoveryGroup) string { return v.Type }).(pulumi.StringOutput)\n}",
  "func (o *AutoscalerResourceLimitsGPULimit) GetType() (value string, ok bool) {\n\tok = o != nil && o.bitmap_&2 != 0\n\tif ok {\n\t\tvalue = o.type_\n\t}\n\treturn\n}",
  "func (m *PlayGroupMutation) Type() string {\n\treturn m.typ\n}",
  "func (group *ResourceGroup) GetType() string {\n\treturn \"Microsoft.Resources/resourceGroups\"\n}",
  "func (tp TemplateProperty) Type() string {\n\tif len(tp.ConstValue) > 0 {\n\t\treturn fmt.Sprintf(\"%s\", tp.ConstValue)\n\t}\n\treturn tp.Package\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	String Hash32 implements the Arg.Hash32() method. 
 | 
	func (s String) Hash32(h hash.Hash32) { h.Write([]byte(s)) } 
 | 
	[
  "func HexToHash32(s string) Hash32 { return BytesToHash(util.FromHex(s)) }",
  "func (p Path) Hash32(h hash.Hash32) { h.Write([]byte(p)) }",
  "func (t *Target) Hash32(h hash.Hash32) {\n\th.Write([]byte(t.Name))\n\th.Write([]byte(t.Builder))\n\tfor _, arg := range t.Args {\n\t\targ.Hash32(h)\n\t}\n\tfor _, env := range t.Env {\n\t\th.Write([]byte(env))\n\t}\n}",
  "func fnv32(hash hash.Hash32, text string) (uint32, error) {\n\t_, err := hash.Write([]byte(text))\n\treturn hash.Sum32(), err\n}",
  "func Hash(strings ...string) uint32 {\n\tdigester := fnv.New32()\n\tfor _, s := range strings {\n\t\t_, _ = io.WriteString(digester, s)\n\t}\n\treturn digester.Sum32()\n}",
  "func hash(s string) int {\n\th := fnv.New32a()\n\tif _, err := h.Write([]byte(s)); err != nil {\n\t\tpanic(err) // should never happen\n\t}\n\n\treturn int(h.Sum32() & 0x7FFFFFFF) // mask MSB of uint32 as this will be sign bit\n}",
  "func (id TransactionID) Hash32() Hash32 {\n\treturn Hash32(id)\n}",
  "func StrHash(s string) uint32 {\n\th := fnv.New32a()\n\th.Write([]byte(s))\n\treturn h.Sum32()\n}",
  "func HashStr(value string) uint32 {\n\th := fnv.New32a()\n\th.Write([]byte(value))\n\treturn h.Sum32()\n}",
  "func (t *Transaction) Hash32() Hash32 {\n\treturn t.ID().Hash32()\n}",
  "func CalcHash32(data []byte) Hash32 {\n\treturn hash.Sum(data)\n}",
  "func hash(key string) uint32 {\n\tprefix := strings.Split(key, \":\")[0]\n\thasher := fnv.New32()\n\thasher.Write([]byte(prefix))\n\treturn hasher.Sum32()\n}",
  "func hash(s string) string {\n\thash := fnv.New32a()\n\thash.Write([]byte(s))\n\tintHash := hash.Sum32()\n\tresult := fmt.Sprintf(\"%08x\", intHash)\n\treturn result\n}",
  "func fnvHash(str string) uint32 {\n    h := fnv.New32()\n    h.Write([]byte(str))\n    return h.Sum32()\n}",
  "func crc32Hash(data []byte) uint32 {\n\treturn crc32.Checksum(data, crc32Table)\n}",
  "func strhash(p *string, h uintptr) uintptr",
  "func StrHash(v uintptr) uint32 {\n\tc_v := (C.gconstpointer)(v)\n\n\tretC := C.g_str_hash(c_v)\n\tretGo := (uint32)(retC)\n\n\treturn retGo\n}",
  "func stringToHash(s string) uint32 {\n\th := fnv.New32a()\n\th.Write([]byte(s))\n\treturn h.Sum32()\n}",
  "func Sha3256(args ...[]byte) []byte {\n\thasher := sha3.New256()\n\tfor _, bytes := range args {\n\t\thasher.Write(bytes)\n\t}\n\treturn hasher.Sum(nil)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	starlarkSub parses Starlark kw/args and returns a corresponding `Sub` wrapped in a `starlark.Value` interface. This is used in the `sub()` starlark predefined/builtin function. 
 | 
	func starlarkSub(
	args starlark.Tuple,
	kwargs []starlark.Tuple,
) (starlark.Value, error) {
	// Expect exactly one positional argument, which represents the format
	// string.
	if len(args) != 1 {
		return nil, errors.Errorf(
			"Expected 1 positional argument 'format'; found %d",
			len(args),
		)
	}
	// Validate that the positional argument is a string.
	format, ok := args[0].(starlark.String)
	if !ok {
		return nil, errors.Errorf(
			"TypeError: Expected argument 'format' has type str; found %s",
			args[0].Type(),
		)
	}
	// Treat the keyword arguments as substitutions, including parsing their
	// values into `Arg`s.
	substitutions := make([]Substitution, len(kwargs))
	for i, kwarg := range kwargs {
		value, err := starlarkValueToArg(kwarg[1])
		if err != nil {
			return nil, err
		}
		substitutions[i] = Substitution{
			Key:   string(kwarg[0].(starlark.String)),
			Value: value,
		}
	}
	// TODO: Error if there are substitution placeholders in the format string
	// (e.g., `${Foo}`) for which there are no corresponding substitutions.
	// This is particularly important since the placeholder syntax is valid
	// bash, for example, if the placeholder is `${PATH}`, it would resolve at
	// runtime to the PATH env var, which would be a different down-the-road
	// error if it errored at all.
	// Build and return the resulting `*Sub` structure.
	return &Sub{Format: string(format), Substitutions: substitutions}, nil
} 
 | 
	[
  "func (n Name) Sub(v string) Name {\n\treturn n.WithType(v)\n}",
  "func Sub(a, b Expr) Expr {\n\treturn &subOp{&simpleOperator{a, b, scanner.SUB}}\n}",
  "func TestSub(t *testing.T) {\n\tfmt.Println(Sub(2,1))\n}",
  "func (o *IntrospectedOAuth2Token) SetSub(v string) {\n\to.Sub = &v\n}",
  "func Sub(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"sub\", Attributes: attrs, Children: children}\n}",
  "func (e Exp) Sub(operand interface{}) Exp {\n\treturn naryBuiltin(subtractKind, nil, e, operand)\n}",
  "func sub(s string, subs ...string) string {\n\tbuf := &bytes.Buffer{}\n\tif len(subs)%2 == 1 {\n\t\tpanic(\"some variable does not have a corresponding value\")\n\t}\n\n\t// copy 'subs' into a map\n\tsubsMap := make(map[string]string)\n\tfor i := 0; i < len(subs); i += 2 {\n\t\tsubsMap[subs[i]] = subs[i+1]\n\t}\n\n\t// do the substitution\n\ttemplate.Must(template.New(\"\").Parse(s)).Execute(buf, subsMap)\n\treturn buf.String()\n}",
  "func NewSub(first, second Expression) Function {\n\trv := &Sub{\n\t\t*NewBinaryFunctionBase(\"sub\", first, second),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
  "func NewSub(x, y value.Value) *InstSub {\n\tinst := &InstSub{X: x, Y: y}\n\t// Compute type.\n\tinst.Type()\n\treturn inst\n}",
  "func Sub(a, b Expr) Expr {\n\treturn &arithmeticOperator{&simpleOperator{a, b, scanner.SUB}}\n}",
  "func SUB(left interface{}, right interface{}) Expr {\n\treturn binaryExprFor(\"-\", left, right)\n}",
  "func Sub(el ...tuple.TupleElement) Subspace {\n\treturn subspace{tuple.Tuple(el).Pack()}\n}",
  "func Sub(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_sub(C.term_t(t1), C.term_t(t2)))\n}",
  "func Sub(v string) predicate.Account {\n\treturn predicate.Account(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldSub), v))\n\t})\n}",
  "func Sub(r rune) (rune, error) {\n\ts, ok := subscripts[r]\n\tif !ok {\n\t\treturn r, fmt.Errorf(\"no corresponding subscript: %c\", r)\n\t}\n\treturn s, nil\n}",
  "func (b *Subber) Sub(ctx *Ctx, bs Bindings, s string) (string, error) {\n\tvar (\n\t\t// s0 is just for an error message (if required).\n\t\ts0 = s\n\n\t\t// acc remembers all previous values to detect loops.\n\t\tacc = make([]string, 0, b.Limit)\n\t)\n\n\tfor i := 0; i < b.Limit; i++ {\n\t\tctx.trf(\"Subber.Sub at %s\", s)\n\t\tvar err error\n\t\ts, err = b.pipeSub(ctx, bs, s)\n\t\tif err != nil {\n\t\t\tctx.trf(\"Subber.Sub error at %s\", s)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tfor j, f := range b.Procs {\n\t\t\tif s, err = f(ctx, s); err != nil {\n\t\t\t\tctx.trf(\"Subber.Sub proc error at %s\", s)\n\t\t\t\treturn \"\", fmt.Errorf(\"subst proc %d: %w\", j, err)\n\t\t\t}\n\t\t}\n\n\t\t// Have we encountered this string before?\n\t\tfor _, s1 := range acc {\n\t\t\tif s == s1 {\n\t\t\t\tctx.trf(\"Subber.Sub output %s\", s)\n\t\t\t\treturn s, nil\n\t\t\t}\n\t\t}\n\t\t// Nope.  Remember it.\n\t\tacc = append(acc, s)\n\t}\n\n\tctx.trf(\"Subber.Sub limited at %s\", s)\n\treturn \"\", fmt.Errorf(\"recursive subst limit (%d) exceeded on at '%s' starting from '%s'\", b.Limit, s, s0)\n}",
  "func NewSub(x, y value.Value) *InstSub {\n\treturn &InstSub{\n\t\tX:        x,\n\t\tY:        y,\n\t\tMetadata: make(map[string]*metadata.Metadata),\n\t}\n}",
  "func (c Class) Sub(path ...string) (res Class) {\n\tres = make([]string, len(c)+len(path))\n\tcopy(res, c)\n\tcopy(res[len(c):], path)\n\treturn\n}",
  "func valSub(context interface{}, key, variable string, commands map[string]interface{}, vars map[string]string, results map[string]interface{}) error {\n\n\t// Before: {\"field\": \"#number:variable_name\"}  After: {\"field\": 1234}\n\t// key: \"field\"  variable:\"#cmd:variable_name\"\n\n\t// Remove the # characters from the left.\n\tvalue := variable[1:]\n\n\t// Find the first instance of the separator.\n\tidx := strings.IndexByte(value, ':')\n\tif idx == -1 {\n\t\terr := fmt.Errorf(\"Invalid variable format %q, missing :\", variable)\n\t\tlog.Error(context, \"varSub\", err, \"Parsing variable\")\n\t\treturn err\n\t}\n\n\t// Split the key and variable apart.\n\tcmd := value[0:idx]\n\tvari := value[idx+1:]\n\n\tswitch key {\n\tcase \"$in\":\n\t\tif len(cmd) != 6 || cmd[0:4] != \"data\" {\n\t\t\terr := fmt.Errorf(\"Invalid $in command %q, missing \\\"data\\\" keyword or malformed\", cmd)\n\t\t\tlog.Error(context, \"varSub\", err, \"$in command processing\")\n\t\t\treturn err\n\t\t}\n\n\t\tv, err := dataLookup(context, cmd[5:6], vari, results)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcommands[key] = v\n\t\treturn nil\n\n\tdefault:\n\t\tv, err := varLookup(context, cmd, vari, vars, results)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcommands[key] = v\n\t\treturn nil\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Hash32 implements the Arg.Hash32() method. 
 | 
	func (t *Target) Hash32(h hash.Hash32) {
	h.Write([]byte(t.Name))
	h.Write([]byte(t.Builder))
	for _, arg := range t.Args {
		arg.Hash32(h)
	}
	for _, env := range t.Env {
		h.Write([]byte(env))
	}
} 
 | 
	[
  "func (p Path) Hash32(h hash.Hash32) { h.Write([]byte(p)) }",
  "func CalcHash32(data []byte) Hash32 {\n\treturn hash.Sum(data)\n}",
  "func (s String) Hash32(h hash.Hash32) { h.Write([]byte(s)) }",
  "func (t *Transaction) Hash32() Hash32 {\n\treturn t.ID().Hash32()\n}",
  "func HexToHash32(s string) Hash32 { return BytesToHash(util.FromHex(s)) }",
  "func (id TransactionID) Hash32() Hash32 {\n\treturn Hash32(id)\n}",
  "func fnv32(hash hash.Hash32, text string) (uint32, error) {\n\t_, err := hash.Write([]byte(text))\n\treturn hash.Sum32(), err\n}",
  "func crc32Hash(data []byte) uint32 {\n\treturn crc32.Checksum(data, crc32Table)\n}",
  "func (s *Hash32) Hash(data []int) uint32 {\n\thash := Hash32(offset32)\n\tfor _, c := range data {\n\t\thash *= prime32\n\t\thash ^= Hash32(c)\n\t}\n\treturn uint32(hash)\n}",
  "func NewHash32() *Hash32 {\n\tvar s Hash32 = offset32\n\treturn &s\n}",
  "func Hash(strings ...string) uint32 {\n\tdigester := fnv.New32()\n\tfor _, s := range strings {\n\t\t_, _ = io.WriteString(digester, s)\n\t}\n\treturn digester.Sum32()\n}",
  "func FNVHash32(value uint32) uint32 {\n\thash := FNVOffsetBasis32\n\tfor i := 0; i < 4; i++ {\n\t\toctet := value & 0x00FF\n\t\tvalue >>= 8\n\n\t\thash ^= octet\n\t\thash *= FNVPrime32\n\t}\n\treturn hash\n}",
  "func hash(s string) int {\n\th := fnv.New32a()\n\tif _, err := h.Write([]byte(s)); err != nil {\n\t\tpanic(err) // should never happen\n\t}\n\n\treturn int(h.Sum32() & 0x7FFFFFFF) // mask MSB of uint32 as this will be sign bit\n}",
  "func hash(key string) uint32 {\n\tprefix := strings.Split(key, \":\")[0]\n\thasher := fnv.New32()\n\thasher.Write([]byte(prefix))\n\treturn hasher.Sum32()\n}",
  "func (h Hash20) ToHash32() (h32 Hash32) {\n\tcopy(h32[:], h[:])\n\treturn\n}",
  "func (d *digest) Sum32() uint32 {\n\treturn finalise(d.hash)\n}",
  "func (t *hashReader) Sum32() uint32 {\n\treturn t.h.Sum32()\n}",
  "func (h *Hash) Sum32() (uint32, bool) {\n\th32, ok := h.Hash.(hash.Hash32)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\n\treturn h32.Sum32(), true\n}",
  "func New32() hash.Hash32 {\n\treturn NewS32(0x0)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	starlarkTarget parses Starlark kw/args and returns a corresponding `Target` wrapped in a `starlark.Value` interface. This is used in the `target()` starlark predefined/builtin function. 
 | 
	func starlarkTarget(
	args starlark.Tuple,
	kwargs []starlark.Tuple,
) (starlark.Value, error) {
	// For the sake of simpler parsing, we'll simply require that all args are
	// passed as kwargs (no positional args).
	if len(args) != 0 {
		return nil, errors.Errorf(
			"Expected 0 positional args; found %d",
			len(args),
		)
	}
	// Make sure we have exactly the right number of keyword arguments.
	if len(kwargs) != 4 {
		found := make([]string, len(kwargs))
		for i, kwarg := range kwargs {
			found[i] = string(kwarg[0].(starlark.String))
		}
		return nil, errors.Errorf(
			"Expected kwargs {name, builder, args, env}; found {%s}",
			strings.Join(found, ", "),
		)
	}
	// Iterate through the keyword arguments and grab the values for each
	// kwarg, putting them into the right `starlark.Value` variable. We'll
	// convert these to Go values for the `*Target` struct later.
	var nameKwarg, builderKwarg, argsKwarg, envKwarg starlark.Value
	for _, kwarg := range kwargs {
		switch key := kwarg[0].(starlark.String); key {
		case "name":
			if nameKwarg != nil {
				return nil, errors.Errorf("Duplicate argument 'name' found")
			}
			nameKwarg = kwarg[1]
		case "builder":
			if builderKwarg != nil {
				return nil, errors.Errorf("Duplicate argument 'builder' found")
			}
			builderKwarg = kwarg[1]
		case "args":
			if argsKwarg != nil {
				return nil, errors.Errorf("Duplicate argument 'args' found")
			}
			argsKwarg = kwarg[1]
		case "env":
			if envKwarg != nil {
				return nil, errors.Errorf("Duplicate argument 'env' found")
			}
			envKwarg = kwarg[1]
		default:
			return nil, errors.Errorf("Unexpected argument '%s' found", key)
		}
	}
	// Ok, now we've made sure we have values for the required keyword args and
	// that no additional arguments were passed. Next, we'll convert these
	// `starlark.Value`-typed variables into Go values for the output `*Target`
	// struct.
	// Validate that the `name` kwarg was a string.
	name, ok := nameKwarg.(starlark.String)
	if !ok {
		return nil, errors.Errorf(
			"TypeError: argument 'name': expected str, got %s",
			nameKwarg.Type(),
		)
	}
	// Validate that the `builder` kwarg was a string.
	builder, ok := builderKwarg.(starlark.String)
	if !ok {
		return nil, errors.Errorf(
			"TypeError: argument 'builder': expected str, got %s",
			builderKwarg.Type(),
		)
	}
	// Validate that the `args` kwarg was a list of `Arg`s, and convert it
	// into a `[]Arg` for the `Target.Args` field.
	argsSL, ok := argsKwarg.(*starlark.List)
	if !ok {
		return nil, errors.Errorf(
			"TypeError: argument 'args': expected list, got %s",
			argsKwarg.Type(),
		)
	}
	args_ := make([]Arg, argsSL.Len())
	for i := range args_ {
		arg, err := starlarkValueToArg(argsSL.Index(i))
		if err != nil {
			return nil, errors.Wrapf(err, "Argument 'args[%d]'", i)
		}
		args_[i] = arg
	}
	// Validate that the `env` kwarg was a list of strings, and convert it into
	// a `[]string` for the `Target.Env` field.
	envSL, ok := envKwarg.(*starlark.List)
	if !ok {
		return nil, errors.Errorf(
			"TypeError: argument 'env': expected list, got %s",
			envKwarg.Type(),
		)
	}
	env := make([]string, envSL.Len())
	for i := range env {
		str, ok := envSL.Index(i).(starlark.String)
		if !ok {
			return nil, errors.Errorf(
				"TypeError: argument 'env[%d]': expected string; found %s",
				i,
				envSL.Index(i).Type(),
			)
		}
		env[i] = string(str)
	}
	// By now, all of the fields have been validated, so build and return the
	// final `*Target`.
	return &Target{
		Name:    string(name),
		Builder: string(builder),
		Args:    args_,
		Env:     env,
	}, nil
} 
 | 
	[
  "func NewTarget(val interface{}) (*SentryTarget, error) {\n\tt := new(SentryTarget)\n\terr := mapstructure.Decode(val, t)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error decoding target\")\n\t}\n\n\ttargetMap, err := getRawTargetMap(val)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"incorrect target type\")\n\t}\n\n\tif v, ok := targetMap[\"follow_redirects\"]; ok {\n\t\tt.FollowRedirects = v.(bool)\n\t} else {\n\t\tt.FollowRedirects = true\n\t}\n\tif len(t.CheckInterval) > 0 {\n\t\tt.interval, err = time.ParseDuration(t.CheckInterval)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"error parsing interval target=%s\", t.Name)\n\t\t}\n\t}\n\tt.nextCheckTime = time.Now().UTC()\n\tt.nextCheckTime = t.nextCheckTime.Add(time.Duration(-1 * t.nextCheckTime.Nanosecond()))\n\tt.CurrentState = true\n\n\tt.AlertEmailList, err = decodeVerifiedEmailList(t.AlertEmail)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error parsing alert_email target=%s\", t.Name)\n\t}\n\n\tif len(t.FromEmail) > 0 {\n\t\tverifiedEmail, err := FormatEmail(t.FromEmail)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"email format not valid: %s\", t.FromEmail)\n\t\t}\n\t\tt.FromEmail = verifiedEmail\n\t}\n\treturn t, nil\n}",
  "func (graph *BuildGraph) Target(label BuildLabel) *BuildTarget {\n\tt, _ := graph.targets.Get(label)\n\treturn t\n}",
  "func NewTarget(ctx *pulumi.Context,\n\tname string, args *TargetArgs, opts ...pulumi.ResourceOption) (*Target, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Target == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Target'\")\n\t}\n\tif args.UpstreamId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'UpstreamId'\")\n\t}\n\tif args.Weight == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Weight'\")\n\t}\n\tvar resource Target\n\terr := ctx.RegisterResource(\"kong:index/target:Target\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
  "func GetTarget(args []string, defaultTarget string) string {\n\tif len(args) >= 1 {\n\t\treturn args[0]\n\t} else {\n\t\treturn defaultTarget\n\t}\n}",
  "func NewTarget() *Target {\n\tt := &Target{\n\t\tType: TARGET_ASST,\n\t}\n\treturn t\n}",
  "func NewTarget() Target {\n\treturn Target{Alias: \"$tag_host $tag_name\", DsType: \"influxdb\"}\n}",
  "func NewTarget(typ string, g *graph.Graph, n *graph.Node, capture *types.Capture, uuids flow.UUIDs, bpf *flow.BPF, fta *flow.TableAllocator) (Target, error) {\n\tswitch typ {\n\tcase \"netflowv5\":\n\t\treturn NewNetFlowV5Target(g, n, capture, uuids)\n\tcase \"erspanv1\":\n\t\treturn NewERSpanTarget(g, n, capture)\n\tcase \"\", \"local\":\n\t\treturn NewLocalTarget(g, n, capture, uuids, fta)\n\t}\n\n\treturn nil, ErrTargetTypeUnknown\n}",
  "func TargetFromString(text string) (*Target, error) {\n\tt := NewTarget()\n\terr := yaml.Unmarshal([]byte(text), t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt.fixDefaults()\n\n\treturn t, nil\n}",
  "func (graph *BuildGraph) Target(label BuildLabel) *BuildTarget {\n\tgraph.mutex.RLock()\n\tdefer graph.mutex.RUnlock()\n\treturn graph.targets[label]\n}",
  "func parseTarget(target string) (Target, error) {\n\tu, err := url.Parse(target)\n\tif err != nil {\n\t\treturn Target{}, err\n\t}\n\t// For targets of the form \"[scheme]://[authority]/endpoint, the endpoint\n\t// value returned from url.Parse() contains a leading \"/\". Although this is\n\t// in accordance with RFC 3986, we do not want to break existing resolver\n\t// implementations which expect the endpoint without the leading \"/\". So, we\n\t// end up stripping the leading \"/\" here. But this will result in an\n\t// incorrect parsing for something like \"unix:///path/to/socket\". Since we\n\t// own the \"unix\" resolver, we can workaround in the unix resolver by using\n\t// the `URL` field instead of the `Endpoint` field.\n\tendpoint := u.Path\n\tif endpoint == \"\" {\n\t\tendpoint = u.Opaque\n\t}\n\tendpoint = strings.TrimPrefix(endpoint, \"/\")\n\treturn Target{\n\t\tScheme:    u.Scheme,\n\t\tAuthority: u.Host,\n\t\tEndpoint:  endpoint,\n\t\tURL:       *u,\n\t}, nil\n}",
  "func GetTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *TargetState, opts ...pulumi.ResourceOption) (*Target, error) {\n\tvar resource Target\n\terr := ctx.ReadResource(\"kong:index/target:Target\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
  "func (t *Target) GetTarget(spec *TestSpec) (*TargetDetails, error) {\n\n\tswitch t.Kind {\n\tcase nodePort, service:\n\t\thost, port, err := spec.Kub.GetServiceHostPort(helpers.DefaultNamespace, t.GetServiceName(spec))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &TargetDetails{\n\t\t\tPort: port,\n\t\t\tIP:   []byte(host),\n\t\t}, nil\n\tcase direct:\n\t\tfilter := `{.status.podIP}{\"=\"}{.spec.containers[0].ports[0].containerPort}`\n\t\tres, err := spec.Kub.Get(helpers.DefaultNamespace, fmt.Sprintf(\"pod %s\", spec.DestPod)).Filter(filter)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot get pod '%s' info: %s\", spec.DestPod, err)\n\t\t}\n\t\tvals := strings.Split(res.String(), \"=\")\n\t\tport, err := strconv.Atoi(vals[1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot get pod '%s' port: %s\", spec.DestPod, err)\n\t\t}\n\t\treturn &TargetDetails{\n\t\t\tPort: port,\n\t\t\tIP:   []byte(vals[0]),\n\t\t}, nil\n\t}\n\treturn nil, fmt.Errorf(\"%s not Implemented yet\", t.Kind)\n}",
  "func (client *Client) Target(kind string, name string) Target {\n\tclient.mutex.RLock()\n\tdefer client.mutex.RUnlock()\n\n\tfor _, target := range client.targets {\n\t\tif target.Kind() == kind && strings.EqualFold(name, target.Name()) {\n\t\t\treturn target\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func (tc *Configs) Target(name string) (*Target, bool) {\n\tfilePrefix, target := splitTarget(name)\n\tfor _, tf := range tc.Files {\n\t\tif filePrefix != \"\" && tf.Basename() != filePrefix {\n\t\t\tcontinue\n\t\t}\n\t\ttarget, ok := tf.Targets[target]\n\t\tif ok {\n\t\t\treturn target, ok\n\t\t}\n\t}\n\treturn nil, false\n}",
  "func ParseTarget(target string) resolver.Target {\n\tif strings.HasPrefix(target, ipv4Scheme+\":\") {\n\t\treturn resolver.Target{\n\t\t\tScheme:   ipv4Scheme,\n\t\t\tEndpoint: target[len(ipv4Scheme)+1:],\n\t\t}\n\t}\n\tif strings.HasPrefix(target, ipv6Scheme+\":\") {\n\t\treturn resolver.Target{\n\t\t\tScheme:   ipv6Scheme,\n\t\t\tEndpoint: target[len(ipv6Scheme)+1:],\n\t\t}\n\t}\n\treturn grpcutil.ParseTarget(target, false)\n}",
  "func NewTarget(url string) (t *Target) {\n    t = &Target{Url:url, method:defaultMethod, header:http.Header{}}\n    return t\n}",
  "func (f *SAMDGClientForwarder) Target() string {\n\treturn f.Config().TargetHost + \":\" + f.Config().TargetPort\n}",
  "func Target(urns []string) Option {\n\treturn optionFunc(func(opts *Options) {\n\t\topts.Target = urns\n\t})\n}",
  "func (launcher *Launcher) GetTarget() string {\n\tlauncher.Mutex.RLock()\n\targ := launcher.target\n\tlauncher.Mutex.RUnlock()\n\treturn arg\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Arg starlarkValueToArg takes a starlark value and parses it into an `Arg`. 
 | 
	func starlarkValueToArg(v starlark.Value) (Arg, error) {
	switch x := v.(type) {
	case Arg:
		return x, nil
	case starlark.String:
		return String(x), nil
	default:
		return nil, errors.Errorf(
			"Cannot convert %s into a target argument",
			v.Type(),
		)
	}
} 
 | 
	[
  "func ToArg(name, value string) string {\n\treturn name + \"=\" + value\n}",
  "func FromArg(arg string) (key, value string) {\n\tparts := strings.Split(arg, \"=\")\n\tif len(parts) == 1 {\n\t\treturn parts[0], \"\"\n\t}\n\treturn parts[0], parts[1]\n}",
  "func decodeArg(b *hcl.Block) (*Arg, errors.Error) {\n\targ := new(Arg)\n\targ.name = b.Labels[0]\n\tbc, d := b.Body.Content(schemaArg)\n\tif err := errors.EvalDiagnostics(d); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := arg.populateArgAttributes(bc.Attributes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn arg, nil\n}",
  "func castArg(prefix string, f field.Field, argIndex int) string {\n\tswitch f.DatatypeName {\n\tcase field.TypeString:\n\t\treturn fmt.Sprintf(\"%s%s := args[%d]\", prefix, f.Name.UpperCamel, argIndex)\n\tcase field.TypeUint, field.TypeInt, field.TypeBool:\n\t\treturn fmt.Sprintf(`%s%s, err := cast.To%sE(args[%d])\n            if err != nil {\n                return err\n            }`,\n\t\t\tprefix, f.Name.UpperCamel, strings.Title(f.Datatype), argIndex)\n\tcase field.TypeCustom:\n\t\treturn fmt.Sprintf(`%[1]v%[2]v := new(types.%[3]v)\n\t\t\terr = json.Unmarshal([]byte(args[%[4]v]), %[1]v%[2]v)\n    \t\tif err != nil {\n                return err\n            }`, prefix, f.Name.UpperCamel, f.Datatype, argIndex)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", f.DatatypeName))\n\t}\n}",
  "func parseFnArg(form interface{}) (interface{}, error) {\n\treturn parseAnyOf(form, parseVariable, parseConstant, parseSrcVar)\n}",
  "func marshalArg(arg any) any {\n\tif buf, err := json.Marshal(arg); err == nil {\n\t\targ = string(buf)\n\t}\n\treturn arg\n}",
  "func ParseArg(arg string) (Config, error) {\n\tc := Config{}\n\tparams, err := strvals.Parse(arg)\n\n\tif err != nil {\n\t\treturn c, err\n\t}\n\n\tc, err = ParseMap(params)\n\treturn c, err\n}",
  "func (c *Cmd) StringArg(name string, value string, desc string) *string {\n\treturn c.mkArg(arg{name: name, desc: desc}, value).(*string)\n}",
  "func (c *Cmd) IntArg(name string, value int, desc string) *int {\n\treturn c.mkArg(arg{name: name, desc: desc}, value).(*int)\n}",
  "func (cmd *flagable) flagFromArg(arg string) (bool, []*Flag) {\n\tvar flags []*Flag\n\n\t// Do nothing if flags terminated\n\tif cmd.flagsTerminated {\n\t\treturn false, flags\n\t}\n\tif arg[len(arg)-1] == '=' {\n\t\tcmd.errf(\"invalid flag format\")\n\t}\n\targ = strings.Split(arg, \"=\")[0]\n\n\t// Determine if we need to terminate flags\n\tisFlag := arg[0] == '-'\n\tareAliases := isFlag && arg[1] != '-'\n\tisTerminator := !areAliases && len(arg) == 2\n\n\tif !isFlag || isTerminator {\n\t\tcmd.flagsTerminated = true\n\t\treturn false, flags\n\t}\n\n\t// Determine if name or alias\n\tif areAliases {\n\t\taliases := arg[1:]\n\t\tfor _, c := range aliases {\n\t\t\tflag, ok := cmd.aliases[c]\n\t\t\tif !ok {\n\t\t\t\tcmd.errf(\"invalid alias: %v\", string(c))\n\t\t\t}\n\t\t\tflags = append(flags, flag)\n\t\t}\n\t} else {\n\t\tname := arg[2:]\n\t\tflag, ok := cmd.flags[name]\n\t\tif !ok {\n\t\t\tcmd.errf(\"invalid flag\")\n\t\t}\n\t\tflags = append(flags, flag)\n\t}\n\treturn areAliases, flags\n}",
  "func parseArgument(p *parser) (*ast.Argument, error) {\n\tvar label string\n\tvar labelStartPos, labelEndPos ast.Position\n\n\texpr, err := parseExpression(p, lowestBindingPower)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.skipSpaceAndComments()\n\n\t// If a colon follows the expression, the expression was our label.\n\tif p.current.Is(lexer.TokenColon) {\n\t\tlabelEndPos = p.current.EndPos\n\n\t\tidentifier, ok := expr.(*ast.IdentifierExpression)\n\t\tif !ok {\n\t\t\treturn nil, p.syntaxError(\n\t\t\t\t\"expected identifier for label, got %s\",\n\t\t\t\texpr,\n\t\t\t)\n\t\t}\n\t\tlabel = identifier.Identifier.Identifier\n\t\tlabelStartPos = expr.StartPosition()\n\n\t\t// Skip the identifier\n\t\tp.nextSemanticToken()\n\n\t\texpr, err = parseExpression(p, lowestBindingPower)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(label) > 0 {\n\t\treturn ast.NewArgument(\n\t\t\tp.memoryGauge,\n\t\t\tlabel,\n\t\t\t&labelStartPos,\n\t\t\t&labelEndPos,\n\t\t\texpr,\n\t\t), nil\n\t}\n\treturn ast.NewUnlabeledArgument(p.memoryGauge, expr), nil\n}",
  "func EscapeArg(param interface{}) interface{} {\n\tif param == nil {\n\t\tparam = \"\"\n\t}\n\tif str, ok := param.(string); ok {\n\t\tswitch GetTerminal() {\n\t\tcase TermBash:\n\t\t\treturn \"'\" + strings.Replace(str, \"'\", \"'\\\\''\", -1) + \"'\"\n\t\tcase TermCmd, TermPowershell:\n\t\t\treturn \"'\" + strings.Replace(str, \"'\", \"''\", -1) + \"'\"\n\t\t}\n\t}\n\treturn param\n}",
  "func OptArg(args map[string]interface{}, argName string, result interface{}, defaultValue interface{}) error {\n\tif _, ok := args[argName]; ok {\n\t\treturn Arg(args, argName, result)\n\t}\n\treturn mapstructure.Decode(defaultValue, result)\n}",
  "func NewArg(key, descr string) Arg {\n\treturn arg{key: key, descr: descr}\n}",
  "func decodeArg(aop instArg, x uint32) Arg {\n\tswitch aop {\n\tdefault:\n\t\treturn nil\n\n\tcase arg_APSR:\n\t\treturn APSR\n\tcase arg_FPSCR:\n\t\treturn FPSCR\n\n\tcase arg_R_0:\n\t\treturn Reg(x & (1<<4 - 1))\n\tcase arg_R_8:\n\t\treturn Reg((x >> 8) & (1<<4 - 1))\n\tcase arg_R_12:\n\t\treturn Reg((x >> 12) & (1<<4 - 1))\n\tcase arg_R_16:\n\t\treturn Reg((x >> 16) & (1<<4 - 1))\n\n\tcase arg_R_12_nzcv:\n\t\tr := Reg((x >> 12) & (1<<4 - 1))\n\t\tif r == R15 {\n\t\t\treturn APSR_nzcv\n\t\t}\n\t\treturn r\n\n\tcase arg_R_16_WB:\n\t\tmode := AddrLDM\n\t\tif (x>>21)&1 != 0 {\n\t\t\tmode = AddrLDM_WB\n\t\t}\n\t\treturn Mem{Base: Reg((x >> 16) & (1<<4 - 1)), Mode: mode}\n\n\tcase arg_R_rotate:\n\t\tRm := Reg(x & (1<<4 - 1))\n\t\ttyp, count := decodeShift(x)\n\t\t// ROR #0 here means ROR #0, but decodeShift rewrites to RRX #1.\n\t\tif typ == RotateRightExt {\n\t\t\treturn Reg(Rm)\n\t\t}\n\t\treturn RegShift{Rm, typ, uint8(count)}\n\n\tcase arg_R_shift_R:\n\t\tRm := Reg(x & (1<<4 - 1))\n\t\tRs := Reg((x >> 8) & (1<<4 - 1))\n\t\ttyp := Shift((x >> 5) & (1<<2 - 1))\n\t\treturn RegShiftReg{Rm, typ, Rs}\n\n\tcase arg_R_shift_imm:\n\t\tRm := Reg(x & (1<<4 - 1))\n\t\ttyp, count := decodeShift(x)\n\t\tif typ == ShiftLeft && count == 0 {\n\t\t\treturn Reg(Rm)\n\t\t}\n\t\treturn RegShift{Rm, typ, uint8(count)}\n\n\tcase arg_R1_0:\n\t\treturn Reg((x & (1<<4 - 1)))\n\tcase arg_R1_12:\n\t\treturn Reg(((x >> 12) & (1<<4 - 1)))\n\tcase arg_R2_0:\n\t\treturn Reg((x & (1<<4 - 1)) | 1)\n\tcase arg_R2_12:\n\t\treturn Reg(((x >> 12) & (1<<4 - 1)) | 1)\n\n\tcase arg_SP:\n\t\treturn SP\n\n\tcase arg_Sd_Dd:\n\t\tv := (x >> 12) & (1<<4 - 1)\n\t\tvx := (x >> 22) & 1\n\t\tsz := (x >> 8) & 1\n\t\tif sz != 0 {\n\t\t\treturn D0 + Reg(vx<<4+v)\n\t\t} else {\n\t\t\treturn S0 + Reg(v<<1+vx)\n\t\t}\n\n\tcase arg_Dd_Sd:\n\t\treturn decodeArg(arg_Sd_Dd, x^(1<<8))\n\n\tcase arg_Sd:\n\t\tv := (x >> 12) & (1<<4 - 1)\n\t\tvx := (x >> 22) & 1\n\t\treturn S0 + Reg(v<<1+vx)\n\n\tcase arg_Sm_Dm:\n\t\tv := (x >> 0) & (1<<4 - 1)\n\t\tvx := (x >> 5) & 1\n\t\tsz := (x >> 8) & 1\n\t\tif sz != 0 {\n\t\t\treturn D0 + Reg(vx<<4+v)\n\t\t} else {\n\t\t\treturn S0 + Reg(v<<1+vx)\n\t\t}\n\n\tcase arg_Sm:\n\t\tv := (x >> 0) & (1<<4 - 1)\n\t\tvx := (x >> 5) & 1\n\t\treturn S0 + Reg(v<<1+vx)\n\n\tcase arg_Dn_half:\n\t\tv := (x >> 16) & (1<<4 - 1)\n\t\tvx := (x >> 7) & 1\n\t\treturn RegX{D0 + Reg(vx<<4+v), int((x >> 21) & 1)}\n\n\tcase arg_Sn_Dn:\n\t\tv := (x >> 16) & (1<<4 - 1)\n\t\tvx := (x >> 7) & 1\n\t\tsz := (x >> 8) & 1\n\t\tif sz != 0 {\n\t\t\treturn D0 + Reg(vx<<4+v)\n\t\t} else {\n\t\t\treturn S0 + Reg(v<<1+vx)\n\t\t}\n\n\tcase arg_Sn:\n\t\tv := (x >> 16) & (1<<4 - 1)\n\t\tvx := (x >> 7) & 1\n\t\treturn S0 + Reg(v<<1+vx)\n\n\tcase arg_const:\n\t\tv := x & (1<<8 - 1)\n\t\trot := (x >> 8) & (1<<4 - 1) * 2\n\t\tif rot > 0 && v&3 == 0 {\n\t\t\t// could rotate less\n\t\t\treturn ImmAlt{uint8(v), uint8(rot)}\n\t\t}\n\t\tif rot >= 24 && ((v<<(32-rot))&0xFF)>>(32-rot) == v {\n\t\t\t// could wrap around to rot==0.\n\t\t\treturn ImmAlt{uint8(v), uint8(rot)}\n\t\t}\n\t\treturn Imm(v>>rot | v<<(32-rot))\n\n\tcase arg_endian:\n\t\treturn Endian((x >> 9) & 1)\n\n\tcase arg_fbits:\n\t\treturn Imm((16 << ((x >> 7) & 1)) - ((x&(1<<4-1))<<1 | (x>>5)&1))\n\n\tcase arg_fp_0:\n\t\treturn Imm(0)\n\n\tcase arg_imm24:\n\t\treturn Imm(x & (1<<24 - 1))\n\n\tcase arg_imm5:\n\t\treturn Imm((x >> 7) & (1<<5 - 1))\n\n\tcase arg_imm5_32:\n\t\tx = (x >> 7) & (1<<5 - 1)\n\t\tif x == 0 {\n\t\t\tx = 32\n\t\t}\n\t\treturn Imm(x)\n\n\tcase arg_imm5_nz:\n\t\tx = (x >> 7) & (1<<5 - 1)\n\t\tif x == 0 {\n\t\t\treturn nil\n\t\t}\n\t\treturn Imm(x)\n\n\tcase arg_imm_4at16_12at0:\n\t\treturn Imm((x>>16)&(1<<4-1)<<12 | x&(1<<12-1))\n\n\tcase arg_imm_12at8_4at0:\n\t\treturn Imm((x>>8)&(1<<12-1)<<4 | x&(1<<4-1))\n\n\tcase arg_imm_vfp:\n\t\tx = (x>>16)&(1<<4-1)<<4 | x&(1<<4-1)\n\t\treturn Imm(x)\n\n\tcase arg_label24:\n\t\timm := (x & (1<<24 - 1)) << 2\n\t\treturn PCRel(int32(imm<<6) >> 6)\n\n\tcase arg_label24H:\n\t\th := (x >> 24) & 1\n\t\timm := (x&(1<<24-1))<<2 | h<<1\n\t\treturn PCRel(int32(imm<<6) >> 6)\n\n\tcase arg_label_m_12:\n\t\td := int32(x & (1<<12 - 1))\n\t\treturn Mem{Base: PC, Mode: AddrOffset, Offset: int16(-d)}\n\n\tcase arg_label_p_12:\n\t\td := int32(x & (1<<12 - 1))\n\t\treturn Mem{Base: PC, Mode: AddrOffset, Offset: int16(d)}\n\n\tcase arg_label_pm_12:\n\t\td := int32(x & (1<<12 - 1))\n\t\tu := (x >> 23) & 1\n\t\tif u == 0 {\n\t\t\td = -d\n\t\t}\n\t\treturn Mem{Base: PC, Mode: AddrOffset, Offset: int16(d)}\n\n\tcase arg_label_pm_4_4:\n\t\td := int32((x>>8)&(1<<4-1)<<4 | x&(1<<4-1))\n\t\tu := (x >> 23) & 1\n\t\tif u == 0 {\n\t\t\td = -d\n\t\t}\n\t\treturn PCRel(d)\n\n\tcase arg_lsb_width:\n\t\tlsb := (x >> 7) & (1<<5 - 1)\n\t\tmsb := (x >> 16) & (1<<5 - 1)\n\t\tif msb < lsb || msb >= 32 {\n\t\t\treturn nil\n\t\t}\n\t\treturn Imm(msb + 1 - lsb)\n\n\tcase arg_mem_R:\n\t\tRn := Reg((x >> 16) & (1<<4 - 1))\n\t\treturn Mem{Base: Rn, Mode: AddrOffset}\n\n\tcase arg_mem_R_pm_R_postindex:\n\t\t// Treat [<Rn>],+/-<Rm> like [<Rn>,+/-<Rm>{,<shift>}]{!}\n\t\t// by forcing shift bits to <<0 and P=0, W=0 (postindex=true).\n\t\treturn decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^((1<<7-1)<<5|1<<24|1<<21))\n\n\tcase arg_mem_R_pm_R_W:\n\t\t// Treat [<Rn>,+/-<Rm>]{!} like [<Rn>,+/-<Rm>{,<shift>}]{!}\n\t\t// by forcing shift bits to <<0.\n\t\treturn decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^((1<<7-1)<<5))\n\n\tcase arg_mem_R_pm_R_shift_imm_offset:\n\t\t// Treat [<Rn>],+/-<Rm>{,<shift>} like [<Rn>,+/-<Rm>{,<shift>}]{!}\n\t\t// by forcing P=1, W=0 (index=false, wback=false).\n\t\treturn decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^(1<<21)|1<<24)\n\n\tcase arg_mem_R_pm_R_shift_imm_postindex:\n\t\t// Treat [<Rn>],+/-<Rm>{,<shift>} like [<Rn>,+/-<Rm>{,<shift>}]{!}\n\t\t// by forcing P=0, W=0 (postindex=true).\n\t\treturn decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^(1<<24|1<<21))\n\n\tcase arg_mem_R_pm_R_shift_imm_W:\n\t\tRn := Reg((x >> 16) & (1<<4 - 1))\n\t\tRm := Reg(x & (1<<4 - 1))\n\t\ttyp, count := decodeShift(x)\n\t\tu := (x >> 23) & 1\n\t\tw := (x >> 21) & 1\n\t\tp := (x >> 24) & 1\n\t\tif p == 0 && w == 1 {\n\t\t\treturn nil\n\t\t}\n\t\tsign := int8(+1)\n\t\tif u == 0 {\n\t\t\tsign = -1\n\t\t}\n\t\tmode := AddrMode(uint8(p<<1) | uint8(w^1))\n\t\treturn Mem{Base: Rn, Mode: mode, Sign: sign, Index: Rm, Shift: typ, Count: count}\n\n\tcase arg_mem_R_pm_imm12_offset:\n\t\t// Treat [<Rn>,#+/-<imm12>] like [<Rn>{,#+/-<imm12>}]{!}\n\t\t// by forcing P=1, W=0 (index=false, wback=false).\n\t\treturn decodeArg(arg_mem_R_pm_imm12_W, x&^(1<<21)|1<<24)\n\n\tcase arg_mem_R_pm_imm12_postindex:\n\t\t// Treat [<Rn>],#+/-<imm12> like [<Rn>{,#+/-<imm12>}]{!}\n\t\t// by forcing P=0, W=0 (postindex=true).\n\t\treturn decodeArg(arg_mem_R_pm_imm12_W, x&^(1<<24|1<<21))\n\n\tcase arg_mem_R_pm_imm12_W:\n\t\tRn := Reg((x >> 16) & (1<<4 - 1))\n\t\tu := (x >> 23) & 1\n\t\tw := (x >> 21) & 1\n\t\tp := (x >> 24) & 1\n\t\tif p == 0 && w == 1 {\n\t\t\treturn nil\n\t\t}\n\t\tsign := int8(+1)\n\t\tif u == 0 {\n\t\t\tsign = -1\n\t\t}\n\t\timm := int16(x & (1<<12 - 1))\n\t\tmode := AddrMode(uint8(p<<1) | uint8(w^1))\n\t\treturn Mem{Base: Rn, Mode: mode, Offset: int16(sign) * imm}\n\n\tcase arg_mem_R_pm_imm8_postindex:\n\t\t// Treat [<Rn>],#+/-<imm8> like [<Rn>{,#+/-<imm8>}]{!}\n\t\t// by forcing P=0, W=0 (postindex=true).\n\t\treturn decodeArg(arg_mem_R_pm_imm8_W, x&^(1<<24|1<<21))\n\n\tcase arg_mem_R_pm_imm8_W:\n\t\tRn := Reg((x >> 16) & (1<<4 - 1))\n\t\tu := (x >> 23) & 1\n\t\tw := (x >> 21) & 1\n\t\tp := (x >> 24) & 1\n\t\tif p == 0 && w == 1 {\n\t\t\treturn nil\n\t\t}\n\t\tsign := int8(+1)\n\t\tif u == 0 {\n\t\t\tsign = -1\n\t\t}\n\t\timm := int16((x>>8)&(1<<4-1)<<4 | x&(1<<4-1))\n\t\tmode := AddrMode(uint8(p<<1) | uint8(w^1))\n\t\treturn Mem{Base: Rn, Mode: mode, Offset: int16(sign) * imm}\n\n\tcase arg_mem_R_pm_imm8at0_offset:\n\t\tRn := Reg((x >> 16) & (1<<4 - 1))\n\t\tu := (x >> 23) & 1\n\t\tsign := int8(+1)\n\t\tif u == 0 {\n\t\t\tsign = -1\n\t\t}\n\t\timm := int16(x&(1<<8-1)) << 2\n\t\treturn Mem{Base: Rn, Mode: AddrOffset, Offset: int16(sign) * imm}\n\n\tcase arg_option:\n\t\treturn Imm(x & (1<<4 - 1))\n\n\tcase arg_registers:\n\t\treturn RegList(x & (1<<16 - 1))\n\n\tcase arg_registers2:\n\t\tx &= 1<<16 - 1\n\t\tn := 0\n\t\tfor i := 0; i < 16; i++ {\n\t\t\tif x>>uint(i)&1 != 0 {\n\t\t\t\tn++\n\t\t\t}\n\t\t}\n\t\tif n < 2 {\n\t\t\treturn nil\n\t\t}\n\t\treturn RegList(x)\n\n\tcase arg_registers1:\n\t\tRt := (x >> 12) & (1<<4 - 1)\n\t\treturn RegList(1 << Rt)\n\n\tcase arg_satimm4:\n\t\treturn Imm((x >> 16) & (1<<4 - 1))\n\n\tcase arg_satimm5:\n\t\treturn Imm((x >> 16) & (1<<5 - 1))\n\n\tcase arg_satimm4m1:\n\t\treturn Imm((x>>16)&(1<<4-1) + 1)\n\n\tcase arg_satimm5m1:\n\t\treturn Imm((x>>16)&(1<<5-1) + 1)\n\n\tcase arg_widthm1:\n\t\treturn Imm((x>>16)&(1<<5-1) + 1)\n\n\t}\n}",
  "func NewArgument(meta ScriptMetaData, node *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: node}, value: value}\n}",
  "func tokenToFormulaArg(token efp.Token) formulaArg {\n\tswitch token.TSubType {\n\tcase efp.TokenSubTypeLogical:\n\t\treturn newBoolFormulaArg(strings.EqualFold(token.TValue, \"TRUE\"))\n\tcase efp.TokenSubTypeNumber:\n\t\tnum, _ := strconv.ParseFloat(token.TValue, 64)\n\t\treturn newNumberFormulaArg(num)\n\tdefault:\n\t\treturn newStringFormulaArg(token.TValue)\n\t}\n}",
  "func getIntArg(arg flags.SplitArgument, args []string) (int, bool, error) {\n\tvar rawVal string\n\tconsumeValue := false\n\trawVal, hasVal := arg.Value()\n\tif !hasVal {\n\t\tif len(args) == 0 {\n\t\t\treturn 0, false, fmt.Errorf(\"no value specified\")\n\t\t}\n\t\trawVal = args[0]\n\t\tconsumeValue = true\n\t}\n\tval, err := strconv.Atoi(rawVal)\n\tif err != nil {\n\t\treturn val, consumeValue, fmt.Errorf(\"expected an integer value but got '%v'\", rawVal)\n\t}\n\treturn val, consumeValue, nil\n}",
  "func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {\n\tif !value.IsValid() {\n\t\tif !canBeNil(argType) {\n\t\t\treturn reflect.Value{}, fmt.Errorf(\"value is nil; should be of type %s\", argType)\n\t\t}\n\t\tvalue = reflect.Zero(argType)\n\t}\n\tif value.Type().AssignableTo(argType) {\n\t\treturn value, nil\n\t}\n\tif intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {\n\t\tvalue = value.Convert(argType)\n\t\treturn value, nil\n\t}\n\treturn reflect.Value{}, fmt.Errorf(\"value has type %s; should be %s\", value.Type(), argType)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	makeLoader makes a load function for a given workspace. 
 | 
	func makeLoader(root string, packages map[string]string) loadFunc {
	return makeLoaderHelper(
		root,
		packages,
		map[string]*cacheEntry{},
		starlark.StringDict{
			"target": builtinWrapper("target", starlarkTarget),
			"sub":    builtinWrapper("sub", starlarkSub),
			"path":   builtinWrapper("path", starlarkPath),
			"glob":   builtinWrapper("glob", starlarkGlob),
		},
	)
} 
 | 
	[
  "func makeLoad(workingDir string) func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\tf := repl.MakeLoad()\n\n\treturn func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\t\t// To ensure config generation is hermetic we require that all loads specify a module\n\t\t// with an explicit relative path.\n\t\tif !isExplicitRelativePath(module) {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"cannot load '%s', path to module must be relative (ie begin with ./ or ../)\",\n\t\t\t\tmodule,\n\t\t\t)\n\t\t}\n\n\t\tpath, err := filepath.Abs(filepath.Join(workingDir, module))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute path to %s\", module)\n\t\t}\n\n\t\treturn f(thread, path)\n\t}\n}",
  "func MakeLoad() func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\ttype entry struct {\n\t\tglobals starlark.StringDict\n\t\terr     error\n\t}\n\n\tvar cache = make(map[string]*entry)\n\n\treturn func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\t\te, ok := cache[module]\n\t\tif e == nil {\n\t\t\tif ok {\n\t\t\t\t// request for package whose loading is in progress\n\t\t\t\treturn nil, fmt.Errorf(\"cycle in load graph\")\n\t\t\t}\n\n\t\t\t// Add a placeholder to indicate \"load in progress\".\n\t\t\tcache[module] = nil\n\n\t\t\t// Load it.\n\t\t\tthread := &starlark.Thread{Name: \"exec \" + module, Load: thread.Load}\n\t\t\tglobals, err := starlark.ExecFile(thread, module, nil, nil)\n\t\t\te = &entry{globals, err}\n\n\t\t\t// Update the cache.\n\t\t\tcache[module] = e\n\t\t}\n\t\treturn e.globals, e.err\n\t}\n}",
  "func NewLoader(options *protocols.ExecuterOptions) (WorkflowLoader, error) {\n\ttagFilter := filter.New(&filter.Config{\n\t\tTags:        options.Options.Tags,\n\t\tExcludeTags: options.Options.ExcludeTags,\n\t\tAuthors:     options.Options.Author,\n\t\tSeverities:  options.Options.Severity,\n\t\tIncludeTags: options.Options.IncludeTags,\n\t})\n\tpathFilter := filter.NewPathFilter(&filter.PathFilterConfig{\n\t\tIncludedTemplates: options.Options.IncludeTemplates,\n\t\tExcludedTemplates: options.Options.ExcludedTemplates,\n\t}, options.Catalog)\n\treturn &workflowLoader{pathFilter: pathFilter, tagFilter: tagFilter, options: options}, nil\n}",
  "func NewLoader(st *State, api *API) *Loader {\n\treturn &Loader{\n\t\tS: st,\n\t\tA: api,\n\t}\n}",
  "func Loader(state *lua.LState) int {\n\tmod := state.SetFuncs(state.NewTable(), map[string]lua.LGFunction{\n\t\t\"compile\": func(L *lua.LState) int {\n\t\t\tcode := L.CheckString(1)\n\n\t\t\tluaCode, err := Compile(L, code)\n\t\t\tif err != nil {\n\t\t\t\tstate.Push(lua.LNil)\n\t\t\t\tstate.Push(lua.LString(err.Error()))\n\n\t\t\t\treturn 2\n\t\t\t}\n\n\t\t\tL.Push(lua.LString(luaCode))\n\t\t\treturn 1\n\t\t},\n\t})\n\n\t// returns the module\n\tstate.Push(mod)\n\treturn 1\n}",
  "func NewLoader(ctx context.Context) (Loader, error) {\n\tvar l allTheLoaders\n\tvar err error\n\n\tl.sm, err = NewSecretManagerLoader(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &l, nil\n}",
  "func NewLoader(opts LoaderOptions) *Loader {\n\tvar (\n\t\tglobals  = opts.ComponentGlobals\n\t\tservices = opts.Services\n\t\thost     = opts.Host\n\t\treg      = opts.ComponentRegistry\n\t)\n\n\tif reg == nil {\n\t\treg = DefaultComponentRegistry{}\n\t}\n\n\tl := &Loader{\n\t\tlog:          log.With(globals.Logger, \"controller_id\", globals.ControllerID),\n\t\ttracer:       tracing.WrapTracerForLoader(globals.TraceProvider, globals.ControllerID),\n\t\tglobals:      globals,\n\t\tservices:     services,\n\t\thost:         host,\n\t\tcomponentReg: reg,\n\n\t\tgraph:         &dag.Graph{},\n\t\toriginalGraph: &dag.Graph{},\n\t\tcache:         newValueCache(),\n\t\tcm:            newControllerMetrics(globals.ControllerID),\n\t}\n\tl.cc = newControllerCollector(l, globals.ControllerID)\n\n\tif globals.Registerer != nil {\n\t\tglobals.Registerer.MustRegister(l.cc)\n\t\tglobals.Registerer.MustRegister(l.cm)\n\t}\n\n\treturn l\n}",
  "func NewLoader(r *proxy.Register) *Loader {\n\treturn &Loader{Register: r}\n}",
  "func Loader(config map[string]interface{}) (go2chef.Source, error) {\n\ts := &Source{\n\t\tlogger:     go2chef.GetGlobalLogger(),\n\t\tSourceName: \"\",\n\t}\n\tif err := mapstructure.Decode(config, s); err != nil {\n\t\treturn nil, err\n\t}\n\tif s.SourceName == \"\" {\n\t\ts.SourceName = TypeName\n\t}\n\t// default to using the secret id as the filename if one wasn't provided\n\tif s.FileName == \"\" {\n\t\ts.FileName = s.SecretId\n\t}\n\treturn s, nil\n}",
  "func Loader(thread *skylark.Thread, module string) (dict skylark.StringDict, err error) {\n\tswitch module {\n\tcase time.ModuleName:\n\t\treturn time.LoadModule()\n\tcase http.ModuleName:\n\t\treturn http.LoadModule()\n\tcase xlsx.ModuleName:\n\t\treturn xlsx.LoadModule()\n\tcase html.ModuleName:\n\t\treturn html.LoadModule()\n\t}\n\n\treturn nil, fmt.Errorf(\"invalid module\")\n}",
  "func Loader(l *lua.LState) int {\n\tfn, err := l.LoadString(LarkLib)\n\tif err != nil {\n\t\tl.RaiseError(\"%s\", err)\n\t}\n\tl.Push(fn)\n\tl.Call(0, 1)\n\treturn 1\n}",
  "func (l *loader) loadFunc() build.LoadFunc {\n\n\treturn func(pos token.Pos, path string) *build.Instance {\n\t\tcfg := l.cfg\n\n\t\timpPath := importPath(path)\n\t\tif isLocalImport(path) {\n\t\t\treturn cfg.newErrInstance(pos, impPath,\n\t\t\t\terrors.Newf(pos, \"relative import paths not allowed (%q)\", path))\n\t\t}\n\n\t\t// is it a builtin?\n\t\tif strings.IndexByte(strings.Split(path, \"/\")[0], '.') == -1 {\n\t\t\tif l.cfg.StdRoot != \"\" {\n\t\t\t\tp := cfg.newInstance(pos, impPath)\n\t\t\t\t_ = l.importPkg(pos, p)\n\t\t\t\treturn p\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\tp := cfg.newInstance(pos, impPath)\n\t\t_ = l.importPkg(pos, p)\n\t\treturn p\n\t}\n}",
  "func (m *Runtime) GetLoader(_ modules.Job) lua.LGFunction {\n\treturn func() lua.LGFunction {\n\t\treturn func(luaState *lua.LState) int {\n\t\t\tvar exports = map[string]lua.LGFunction{\n\t\t\t\t\"logLevel\":     m.returnString(m.flg.LogLevel),\n\t\t\t\t\"isDebug\":      m.returnBool(m.flg.Debug),\n\t\t\t\t\"isOnce\":       m.returnBool(m.flg.Once),\n\t\t\t\t\"withScript\":   m.returnString(m.flg.Script),\n\t\t\t\t\"configSource\": m.returnString(m.flg.ConfigFilePath),\n\t\t\t\t\"safeMode\":     m.returnBool(m.flg.SafeMode),\n\t\t\t}\n\n\t\t\tmod := luaState.SetFuncs(luaState.NewTable(), exports)\n\n\t\t\tluaState.Push(mod)\n\t\t\treturn 1\n\t\t}\n\t}()\n}",
  "func SpecLoader(specFileOrDirs []string, createBlkTxTables bool) (*Projection, error) {\n\tvar projection *Projection\n\tvar err error\n\n\tif len(specFileOrDirs) == 0 {\n\t\treturn nil, fmt.Errorf(\"please provide a spec file or directory\")\n\t}\n\n\tprojection, err = NewProjectionFromFolder(specFileOrDirs...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing spec: %v\", err)\n\t}\n\n\tif createBlkTxTables {\n\t\t// add block & tx to tables definition\n\t\tblkTxTables := getBlockTxTablesDefinition()\n\n\t\tfor k, v := range blkTxTables {\n\t\t\tprojection.Tables[k] = v\n\t\t}\n\n\t}\n\n\treturn projection, nil\n}",
  "func loader(filename string) (content []byte, err error) {\n\tcontent, err = ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn content, nil\n}",
  "func LuaLoader(L *lua.LState) int {\n\tmod := L.SetFuncs(L.NewTable(), exports)\n\tmt := L.NewTypeMetatable(\"factory\")\n\tL.SetGlobal(\"factory\", mt)\n\tL.Push(mod)\n\n\treturn 1\n}",
  "func NewLoader(dimension *Dimension, x, z int32) *Loader {\n\treturn &Loader{dimension, x, z, func(chunk *chunks.Chunk) {}, func(chunk *chunks.Chunk) {}, func(){}, sync.RWMutex{}, make(map[int]*chunks.Chunk), make(map[int]bool), make(map[int]bool)}\n}",
  "func NewLoader(basepath string, downloader Downloader, logger logger) Loader {\n\treturn Loader{\n\t\tapi:  downloader,\n\t\troot: basepath,\n\t\tlog:  logger,\n\t}\n}",
  "func NewLoader(config *config.Config) Loader {\n\treturn &diskLoader{config}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	execModule executes a module using a given load function and returns the global variables. 
 | 
	func execModule(module string, load loadFunc) (starlark.StringDict, error) {
	return load(&starlark.Thread{Name: module, Load: load}, module)
} 
 | 
	[
  "func loader(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\tpipePos := strings.LastIndex(module, \"|\")\n\tmustLoad := pipePos < 0\n\tvar defaultSymbol string\n\tif !mustLoad {\n\t\tdefaultSymbol = module[pipePos+1:]\n\t\tmodule = module[:pipePos]\n\t}\n\tmodulePath, err := moduleName2AbsPath(module, thread.Local(callerDirKey).(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te, ok := moduleCache[modulePath]\n\tif e == nil {\n\t\tif ok {\n\t\t\treturn nil, fmt.Errorf(\"cycle in load graph\")\n\t\t}\n\n\t\t// Add a placeholder to indicate \"load in progress\".\n\t\tmoduleCache[modulePath] = nil\n\n\t\t// Decide if we should load.\n\t\tif !mustLoad {\n\t\t\tif _, err := os.Stat(modulePath); err == nil {\n\t\t\t\tmustLoad = true\n\t\t\t}\n\t\t}\n\n\t\t// Load or return default\n\t\tif mustLoad {\n\t\t\tchildThread := &starlark.Thread{Name: \"exec \" + module, Load: thread.Load}\n\t\t\t// Cheating for the sake of testing:\n\t\t\t// propagate starlarktest's Reporter key, otherwise testing\n\t\t\t// the load function may cause panic in starlarktest code.\n\t\t\tconst testReporterKey = \"Reporter\"\n\t\t\tif v := thread.Local(testReporterKey); v != nil {\n\t\t\t\tchildThread.SetLocal(testReporterKey, v)\n\t\t\t}\n\n\t\t\tchildThread.SetLocal(callerDirKey, filepath.Dir(modulePath))\n\t\t\tglobals, err := starlark.ExecFile(childThread, modulePath, nil, builtins)\n\t\t\te = &modentry{globals, err}\n\t\t} else {\n\t\t\te = &modentry{starlark.StringDict{defaultSymbol: starlark.None}, nil}\n\t\t}\n\n\t\t// Update the cache.\n\t\tmoduleCache[modulePath] = e\n\t}\n\treturn e.globals, e.err\n}",
  "func (self *Build) load(ctx core.Context, moduleLabel core.Label) (starlark.StringDict, error) {\n\t// Only .bzl files can ever be loaded.\n\tif filepath.Ext(string(moduleLabel.Target)) != \".bzl\" {\n\t\treturn nil, fmt.Errorf(\"%v: load not allowed: %v is not a .bzl file\", ctx.Label(),\n\t\t\tmoduleLabel)\n\t}\n\tmoduleLabelString := moduleLabel.String()\n\n\te, ok := self.loadCache[moduleLabelString]\n\tif ok {\n\t\tif e == nil {\n\t\t\treturn nil, fmt.Errorf(\"%v: load of %v failed: cycle in load graph\",\n\t\t\t\tctx.Label(), moduleLabel)\n\t\t}\n\t\treturn e.globals, e.err\n\t}\n\n\tsourceData, err := self.sourceFileReader(moduleLabel)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%v: load of %v failed: read failed: %v\", ctx.Label(),\n\t\t\tmoduleLabel, err)\n\t}\n\n\tself.loadCache[moduleLabelString] = nil\n\n\tthread := createThread(self, moduleLabel, core.FileTypeBzl)\n\tglobals, err := starlark.ExecFile(thread, moduleLabelString, sourceData,\n\t\tbuiltins.InitialGlobals(core.FileTypeBzl))\n\tself.loadCache[moduleLabelString] = &loadCacheEntry{globals, err}\n\treturn globals, err\n}",
  "func makeLoad(workingDir string) func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\tf := repl.MakeLoad()\n\n\treturn func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\t\t// To ensure config generation is hermetic we require that all loads specify a module\n\t\t// with an explicit relative path.\n\t\tif !isExplicitRelativePath(module) {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"cannot load '%s', path to module must be relative (ie begin with ./ or ../)\",\n\t\t\t\tmodule,\n\t\t\t)\n\t\t}\n\n\t\tpath, err := filepath.Abs(filepath.Join(workingDir, module))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute path to %s\", module)\n\t\t}\n\n\t\treturn f(thread, path)\n\t}\n}",
  "func MakeLoad() func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\ttype entry struct {\n\t\tglobals starlark.StringDict\n\t\terr     error\n\t}\n\n\tvar cache = make(map[string]*entry)\n\n\treturn func(thread *starlark.Thread, module string) (starlark.StringDict, error) {\n\t\te, ok := cache[module]\n\t\tif e == nil {\n\t\t\tif ok {\n\t\t\t\t// request for package whose loading is in progress\n\t\t\t\treturn nil, fmt.Errorf(\"cycle in load graph\")\n\t\t\t}\n\n\t\t\t// Add a placeholder to indicate \"load in progress\".\n\t\t\tcache[module] = nil\n\n\t\t\t// Load it.\n\t\t\tthread := &starlark.Thread{Name: \"exec \" + module, Load: thread.Load}\n\t\t\tglobals, err := starlark.ExecFile(thread, module, nil, nil)\n\t\t\te = &entry{globals, err}\n\n\t\t\t// Update the cache.\n\t\t\tcache[module] = e\n\t\t}\n\t\treturn e.globals, e.err\n\t}\n}",
  "func (sc Scripting) FunctionLoad(ctx context.Context, lib, script string) error {\n\t// FUNCTION LOAD engine-name library-name [REPLACE] [DESCRIPTION library-description] function-code\n\treturn nil\n}",
  "func Execute(functions []*Function, main Node, globals map[string]Node, w io.Writer, r io.Reader) (map[string]int, map[string]int) {\n\tfindFunction := func(name string) *Function {\n\t\tfor _, function := range functions {\n\t\t\tif function.Name == name {\n\t\t\t\treturn function\n\t\t\t}\n\t\t}\n\n\t\tlog.Fatal(\"function not found\")\n\t\treturn nil\n\t}\n\n\tevaluated := map[string]int{}\n\tcalled := map[string]int{}\n\n\tglobalValues := map[string]interface{}{}\n\n\tvar evaluate func(Node, map[string]interface{}) interface{}\n\tevaluate = func(node Node, values map[string]interface{}) interface{} {\n\t\t{\n\t\t\top := reflect.TypeOf(node).String()\n\t\t\top = op[strings.LastIndex(op, \".\")+1:]\n\t\t\tevaluated[op]++\n\t\t}\n\n\t\tgetValue := func(name string) interface{} {\n\t\t\tif v, ok := values[name]; ok {\n\t\t\t\treturn v\n\t\t\t}\n\t\t\tif v, ok := globalValues[name]; ok {\n\t\t\t\treturn v\n\t\t\t}\n\t\t\tpanic(fmt.Sprintf(\"variable not found: %s\", name))\n\t\t}\n\n\t\tswitch n := node.(type) {\n\t\tcase *Variable:\n\t\t\treturn getValue(n.Name)\n\t\tcase *Unit:\n\t\t\treturn nil\n\t\tcase *Int:\n\t\t\treturn node.(*Int).Value\n\t\tcase *Bool:\n\t\t\treturn node.(*Bool).Value\n\t\tcase *Float:\n\t\t\treturn node.(*Float).Value\n\t\tcase *Add:\n\t\t\treturn getValue(n.Left).(int32) + getValue(n.Right).(int32)\n\t\tcase *AddImmediate:\n\t\t\treturn getValue(n.Left).(int32) + n.Right\n\t\tcase *Sub:\n\t\t\treturn getValue(n.Left).(int32) - getValue(n.Right).(int32)\n\t\tcase *SubFromZero:\n\t\t\treturn -getValue(n.Inner).(int32)\n\t\tcase *FloatAdd:\n\t\t\treturn getValue(n.Left).(float32) + getValue(n.Right).(float32)\n\t\tcase *FloatSub:\n\t\t\treturn getValue(n.Left).(float32) - getValue(n.Right).(float32)\n\t\tcase *FloatSubFromZero:\n\t\t\treturn -getValue(n.Inner).(float32)\n\t\tcase *FloatDiv:\n\t\t\treturn getValue(n.Left).(float32) / getValue(n.Right).(float32)\n\t\tcase *FloatMul:\n\t\t\treturn getValue(n.Left).(float32) * getValue(n.Right).(float32)\n\t\tcase *Not:\n\t\t\treturn !getValue(n.Inner).(bool)\n\t\tcase *Equal:\n\t\t\tif getValue(n.Left) == getValue(n.Right) {\n\t\t\t\treturn true\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase *EqualZero:\n\t\t\treturn getValue(n.Inner) == int32(0) || getValue(n.Inner) == float32(0)\n\t\tcase *LessThan:\n\t\t\treturn getValue(n.Left).(int32) < getValue(n.Right).(int32)\n\t\tcase *LessThanFloat:\n\t\t\treturn getValue(n.Left).(float32) < getValue(n.Right).(float32)\n\t\tcase *LessThanZero:\n\t\t\treturn getValue(n.Inner).(int32) < 0\n\t\tcase *LessThanZeroFloat:\n\t\t\treturn getValue(n.Inner).(float32) < 0\n\t\tcase *GreaterThanZero:\n\t\t\treturn getValue(n.Inner).(int32) > 0\n\t\tcase *GreaterThanZeroFloat:\n\t\t\treturn getValue(n.Inner).(float32) > 0\n\t\tcase *IfEqual:\n\t\t\tif getValue(n.Left) == getValue(n.Right) {\n\t\t\t\treturn evaluate(n.True, values)\n\t\t\t} else {\n\t\t\t\treturn evaluate(n.False, values)\n\t\t\t}\n\t\tcase *IfEqualZero:\n\t\t\tif value, ok := getValue(n.Inner).(int32); ok {\n\t\t\t\tif value == 0 {\n\t\t\t\t\treturn evaluate(n.True, values)\n\t\t\t\t} else {\n\t\t\t\t\treturn evaluate(n.False, values)\n\t\t\t\t}\n\t\t\t} else if value, ok := getValue(n.Inner).(float32); ok {\n\t\t\t\tif value == 0 {\n\t\t\t\t\treturn evaluate(n.True, values)\n\t\t\t\t} else {\n\t\t\t\t\treturn evaluate(n.False, values)\n\t\t\t\t}\n\t\t\t}\n\t\tcase *IfEqualTrue:\n\t\t\tif getValue(n.Inner).(bool) {\n\t\t\t\treturn evaluate(n.True, values)\n\t\t\t} else {\n\t\t\t\treturn evaluate(n.False, values)\n\t\t\t}\n\t\tcase *IfLessThan:\n\t\t\tif getValue(n.Left).(int32) < getValue(n.Right).(int32) {\n\t\t\t\treturn evaluate(n.True, values)\n\t\t\t}\n\t\t\treturn evaluate(n.False, values)\n\t\tcase *IfLessThanFloat:\n\t\t\tif getValue(n.Left).(float32) < getValue(n.Right).(float32) {\n\t\t\t\treturn evaluate(n.True, values)\n\t\t\t}\n\t\t\treturn evaluate(n.False, values)\n\t\tcase *IfLessThanZero:\n\t\t\tif getValue(n.Inner).(int32) < 0 {\n\t\t\t\treturn evaluate(n.True, values)\n\t\t\t} else {\n\t\t\t\treturn evaluate(n.False, values)\n\t\t\t}\n\t\tcase *IfLessThanZeroFloat:\n\t\t\tif getValue(n.Inner).(float32) < 0 {\n\t\t\t\treturn evaluate(n.True, values)\n\t\t\t} else {\n\t\t\t\treturn evaluate(n.False, values)\n\t\t\t}\n\t\tcase *Assignment:\n\t\t\tvalues[n.Name] = evaluate(n.Value, values)\n\t\t\tret := evaluate(n.Next, values)\n\t\t\tdelete(values, n.Name)\n\t\t\treturn ret\n\t\tcase *Application:\n\t\t\tf := findFunction(n.Function)\n\t\t\tcalled[f.Name]++\n\t\t\tupdated := map[string]interface{}{}\n\t\t\tfor i, arg := range f.Args {\n\t\t\t\tupdated[arg] = getValue(n.Args[i])\n\t\t\t}\n\t\t\treturn evaluate(f.Body, updated)\n\t\tcase *Tuple:\n\t\t\ttuple := []interface{}{}\n\t\t\tfor _, element := range n.Elements {\n\t\t\t\ttuple = append(tuple, getValue(element))\n\t\t\t}\n\t\t\treturn tuple\n\t\tcase *ArrayCreate:\n\t\t\tlength := getValue(n.Length).(int32)\n\t\t\tvalue := getValue(n.Value)\n\t\t\tarray := []interface{}{}\n\t\t\tfor i := 0; i < int(length); i++ {\n\t\t\t\tarray = append(array, value)\n\t\t\t}\n\t\t\treturn array\n\t\tcase *ArrayCreateImmediate:\n\t\t\tvalue := getValue(n.Value)\n\t\t\tarray := []interface{}{}\n\t\t\tfor i := 0; i < int(n.Length); i++ {\n\t\t\t\tarray = append(array, value)\n\t\t\t}\n\t\t\treturn array\n\t\tcase *ArrayGet:\n\t\t\tarray := getValue(n.Array).([]interface{})\n\t\t\tindex := getValue(n.Index).(int32)\n\t\t\treturn array[index]\n\t\tcase *ArrayGetImmediate:\n\t\t\tarray := getValue(n.Array).([]interface{})\n\t\t\treturn array[n.Index]\n\t\tcase *ArrayPut:\n\t\t\tarray := getValue(n.Array).([]interface{})\n\t\t\tindex := getValue(n.Index).(int32)\n\t\t\tvalue := getValue(n.Value)\n\t\t\tarray[index] = value\n\t\t\treturn nil\n\t\tcase *ArrayPutImmediate:\n\t\t\tarray := getValue(n.Array).([]interface{})\n\t\t\tvalue := getValue(n.Value)\n\t\t\tarray[n.Index] = value\n\t\t\treturn nil\n\t\tcase *ReadInt:\n\t\t\tvar value int32\n\t\t\tfmt.Fscan(r, &value)\n\t\t\treturn value\n\t\tcase *ReadFloat:\n\t\t\tvar value float32\n\t\t\tfmt.Fscan(r, &value)\n\t\t\treturn value\n\t\tcase *WriteByte:\n\t\t\tw.Write([]byte{byte(getValue(n.Arg).(int32) % 256)})\n\t\t\treturn nil\n\t\tcase *IntToFloat:\n\t\t\treturn float32(getValue(n.Arg).(int32))\n\t\tcase *FloatToInt:\n\t\t\treturn int32(math.Round(float64(getValue(n.Arg).(float32))))\n\t\tcase *Sqrt:\n\t\t\treturn float32(math.Sqrt(float64(getValue(n.Arg).(float32))))\n\t\tcase *TupleGet:\n\t\t\ttuple := getValue(n.Tuple).([]interface{})\n\t\t\treturn tuple[n.Index]\n\t\tdefault:\n\t\t\tlog.Fatal(\"invalid ir node\")\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t{\n\t\tdefined := stringset.New()\n\t\tfor len(defined) < len(globals) {\n\t\t\tfor name, node := range globals {\n\t\t\t\tif !defined.Has(name) && len(node.FreeVariables(defined)) == 0 {\n\t\t\t\t\tglobalValues[name] = evaluate(node, globalValues)\n\t\t\t\t\tdefined.Add(name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tevaluate(main, map[string]interface{}{})\n\n\treturn evaluated, called\n}",
  "func Loader(thread *skylark.Thread, module string) (dict skylark.StringDict, err error) {\n\tswitch module {\n\tcase time.ModuleName:\n\t\treturn time.LoadModule()\n\tcase http.ModuleName:\n\t\treturn http.LoadModule()\n\tcase xlsx.ModuleName:\n\t\treturn xlsx.LoadModule()\n\tcase html.ModuleName:\n\t\treturn html.LoadModule()\n\t}\n\n\treturn nil, fmt.Errorf(\"invalid module\")\n}",
  "func (rt *Runtime) LoadModule(name string) (string, error) {\n\trsp, err := rt.LCClient.LoadModule(context.Background(), &pb.LoadModuleRequest{Module: name})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn rsp.Address, nil\n}",
  "func LoadModule(module string) error {\n\tif shared.PathExists(fmt.Sprintf(\"/sys/module/%s\", module)) {\n\t\treturn nil\n\t}\n\n\t_, err := shared.RunCommand(\"modprobe\", \"-b\", module)\n\treturn err\n}",
  "func execEval(_ int, p *gop.Context) {\n\targs := p.GetArgs(4)\n\tret, ret1 := types.Eval(args[0].(*token.FileSet), args[1].(*types.Package), token.Pos(args[2].(int)), args[3].(string))\n\tp.Ret(4, ret, ret1)\n}",
  "func LoadModule() (starlark.StringDict, error) {\n\tonce.Do(func() {\n\t\tbsoupModule = starlark.StringDict{\n\t\t\t\"bsoup\": &starlarkstruct.Module{\n\t\t\t\tName: \"bsoup\",\n\t\t\t\tMembers: starlark.StringDict{\n\t\t\t\t\t\"parseHtml\": starlark.NewBuiltin(\"parseHtml\", ParseHTML),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t})\n\treturn bsoupModule, nil\n}",
  "func loadPlugin(module *python.PyObject) (*python.PyObject, *python.PyObject) {\n\tmapf := module.GetAttrString(\"map\")\n\tdefer mapf.DecRef()\n\treducef := module.GetAttrString(\"reduce\")\n\tdefer reducef.DecRef()\n\treturn mapf, reducef\n\n}",
  "func (r *Runtime) Load(wasmBytes []byte) (*Module, error) {\n\tresult := C.m3Err_none\n\tbytes := C.CBytes(wasmBytes)\n\tlength := len(wasmBytes)\n\tvar module C.IM3Module\n\tresult = C.m3_ParseModule(\n\t\tr.cfg.Environment.Ptr(),\n\t\t&module,\n\t\t(*C.uchar)(bytes),\n\t\tC.uint(length),\n\t)\n\tif result != nil {\n\t\treturn nil, errParseModule\n\t}\n\tif module.memoryImported {\n\t\tmodule.memoryImported = false\n\t}\n\tresult = C.m3_LoadModule(\n\t\tr.Ptr(),\n\t\tmodule,\n\t)\n\tif result != nil {\n\t\treturn nil, errLoadModule\n\t}\n\tresult = C.m3_LinkSpecTest(r.Ptr().modules)\n\tif result != nil {\n\t\treturn nil, errors.New(\"LinkSpecTest failed\")\n\t}\n\t// if r.cfg.EnableWASI {\n\t// \tC.m3_LinkWASI(r.Ptr().modules)\n\t// }\n\tm := NewModule((ModuleT)(module))\n\treturn m, nil\n}",
  "func (c *Client) LoadModule(name string, argument string) (index uint32, err error) {\n\tvar idx uint32\n\tr, err := c.request(commandLoadModule,\n\t\tstringTag, []byte(name), byte(0), stringTag, []byte(argument), byte(0))\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = bread(r, uint32Tag, &idx)\n\treturn idx, err\n}",
  "func (l *Loader) Load(module string) (syms *Struct, err error) {\n\tdefer func() {\n\t\terr = errors.Annotate(err, \"in %s\", module).Err()\n\t}()\n\n\tl.init()\n\tif !l.loading.Add(module) {\n\t\treturn nil, errors.New(\"recursive dependency\")\n\t}\n\tdefer l.loading.Del(module)\n\n\t// Already processed it?\n\tif syms, ok := l.symbols[module]; ok {\n\t\treturn syms, nil\n\t}\n\n\t// Load and parse the source code into a distilled AST.\n\tsrc, err := l.Source(module)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl.sources[module] = src\n\tmod, err := ast.ParseModule(module, src, func(s string) (string, error) {\n\t\treturn l.Normalize(module, s)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Recursively resolve all references in 'mod' to their concrete definitions\n\t// (perhaps in other modules). This returns a struct with a list of all\n\t// symbols defined in the module.\n\tvar top *Struct\n\tif top, err = l.resolveRefs(&mod.Namespace, nil); err != nil {\n\t\treturn nil, err\n\t}\n\tl.symbols[module] = top\n\treturn top, nil\n}",
  "func(r *Runtime) Load(wasmBytes []byte) (*Module, error) {\n\tresult := C.m3Err_none\n\tbytes := C.CBytes(wasmBytes)\n\tlength := len(wasmBytes)\n\tvar module C.IM3Module\n\tresult = C.m3_ParseModule(\n\t\tr.cfg.Environment.Ptr(),\n\t\t&module,\n\t\t(*C.uchar)(bytes),\n\t\tC.uint(length),\n\t)\n\tif result != nil {\n\t\treturn nil, errParseModule\n\t}\n\tresult = C.m3_LoadModule(\n\t\tr.Ptr(),\n\t\tmodule,\n\t)\n\tif result != nil {\n\t\treturn nil, errLoadModule\n\t}\n\tresult = C.m3_LinkSpecTest(r.Ptr().modules)\n\tif result != nil {\n\t\treturn nil, errors.New(\"LinkSpecTest failed\")\n\t}\n\tif r.cfg.EnableWASI {\n\t\tC.m3_LinkWASI(r.Ptr().modules)\n\t}\n\tm := NewModule((ModuleT)(module))\n\treturn m, nil\n}",
  "func Loader(state *lua.LState) int {\n\tmod := state.SetFuncs(state.NewTable(), map[string]lua.LGFunction{\n\t\t\"compile\": func(L *lua.LState) int {\n\t\t\tcode := L.CheckString(1)\n\n\t\t\tluaCode, err := Compile(L, code)\n\t\t\tif err != nil {\n\t\t\t\tstate.Push(lua.LNil)\n\t\t\t\tstate.Push(lua.LString(err.Error()))\n\n\t\t\t\treturn 2\n\t\t\t}\n\n\t\t\tL.Push(lua.LString(luaCode))\n\t\t\treturn 1\n\t\t},\n\t})\n\n\t// returns the module\n\tstate.Push(mod)\n\treturn 1\n}",
  "func (m luaModule) load(L *lua.LState) {\n\tL.Push(L.NewFunction(m.loader))\n\tL.Push(lua.LString(m.name))\n\tL.Call(1, 0)\n\n\tif m.name == lua.BaseLibName {\n\t\tL.SetGlobal(\"print\", L.NewFunction(printToLog))\n\t\tL.SetGlobal(\"sleep\", L.NewFunction(sleep))\n\t}\n\n\tif len(m.disabledSymbols) > 0 {\n\t\tst := m.table(L)\n\t\tfor _, name := range m.disabledSymbols {\n\t\t\tst.RawSetString(name, lua.LNil)\n\t\t}\n\t}\n}",
  "func Load(name string) (Module, error) {\n\tvar mod C.CUmodule\n\tif err := result(C.cuModuleLoad(&mod, C.CString(name))); err != nil {\n\t\treturn 0, err\n\t}\n\treturn makeModule(mod), nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	NewHelm creates a new helm. 
 | 
	func NewHelm(namespace, repoFile, repoCache string) (*Helm, error) {
	configFlags := genericclioptions.NewConfigFlags(true)
	configFlags.Namespace = commonutil.String(namespace)
	kubeClient := kube.New(configFlags)
	cfg := &action.Configuration{
		KubeClient: kubeClient,
		Log: func(s string, i ...interface{}) {
			logrus.Debugf(s, i)
		},
		RESTClientGetter: configFlags,
	}
	helmDriver := ""
	settings := cli.New()
	settings.Debug = true
	// set namespace
	namespacePtr := (*string)(unsafe.Pointer(settings))
	*namespacePtr = namespace
	settings.RepositoryConfig = repoFile
	settings.RepositoryCache = repoCache
	// initializes the action configuration
	if err := cfg.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver, func(format string, v ...interface{}) {
		logrus.Debugf(format, v)
	}); err != nil {
		return nil, errors.Wrap(err, "init config")
	}
	return &Helm{
		cfg:       cfg,
		settings:  settings,
		namespace: namespace,
		repoFile:  repoFile,
		repoCache: repoCache,
	}, nil
} 
 | 
	[
  "func newHelms(cc *AppsV1Client, namespace string) *helm {\n\tif len(namespace) == 0 {\n\t\tnamespace = defaultNamespace\n\t}\n\n\tclient := cc.HelmClient()\n\treturn &helm{\n\t\thelmClient: client.GetClient(),\n\t\tns:         namespace,\n\t}\n}",
  "func (c *FakeHelms) Create(helm *v1alpha1.Helm) (result *v1alpha1.Helm, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(helmsResource, c.ns, helm), &v1alpha1.Helm{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.Helm), err\n}",
  "func (f *factory) CreateHelm(verbose bool,\n\thelmBinary string,\n\tnoTiller bool,\n\thelmTemplate bool) helm.Helmer {\n\n\tif helmBinary == \"\" {\n\t\thelmBinary = \"helm\"\n\t}\n\tfeatureFlag := \"none\"\n\tif helmTemplate {\n\t\tfeatureFlag = \"template-mode\"\n\t} else if noTiller {\n\t\tfeatureFlag = \"no-tiller-server\"\n\t}\n\tif verbose {\n\t\tfmt.Sprintf(\"Using helmBinary %s with feature flag: %s\", util.ColorInfo(helmBinary), util.ColorInfo(featureFlag))\n\t}\n\thelmCLI := helm.NewHelmCLI(helmBinary, helm.V2, \"\", verbose)\n\tvar h helm.Helmer = helmCLI\n\tif helmTemplate {\n\t\tkubeClient, ns, _ := f.CreateKubeClient()\n\t\th = helm.NewHelmTemplate(helmCLI, \"\", kubeClient, ns)\n\t} else {\n\t\th = helmCLI\n\t}\n\tif noTiller && !helmTemplate {\n\t\th.SetHost(helm.GetTillerAddress())\n\t\thelm.StartLocalTillerIfNotRunning()\n\t}\n\treturn h\n}",
  "func New(config Config) (*HelmInstaller, error) {\n\t// Dependencies.\n\tif config.Configurers == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"configurers must not be empty\")\n\t}\n\tif config.FileSystem == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"file system must not be empty\")\n\t}\n\tif config.Logger == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"logger must not be empty\")\n\t}\n\n\t// Settings.\n\tif config.HelmBinaryPath == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"helm binary path must not be empty\")\n\t}\n\tif config.Organisation == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"organisation must not be empty\")\n\t}\n\tif config.Password == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"password must not be empty\")\n\t}\n\tif config.Registry == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"registry must not be empty\")\n\t}\n\tif config.Username == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"username must not be empty\")\n\t}\n\n\tif _, err := os.Stat(config.HelmBinaryPath); os.IsNotExist(err) {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"helm binary does not exist\")\n\t}\n\n\tinstaller := &HelmInstaller{\n\t\t// Dependencies.\n\t\tconfigurers: config.Configurers,\n\t\tfileSystem:  config.FileSystem,\n\t\tlogger:      config.Logger,\n\n\t\t// Settings.\n\t\thelmBinaryPath: config.HelmBinaryPath,\n\t\torganisation:   config.Organisation,\n\t\tpassword:       config.Password,\n\t\tregistry:       config.Registry,\n\t\tusername:       config.Username,\n\t}\n\n\tif err := installer.login(); err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\treturn installer, nil\n}",
  "func (f *HelmClientFactory) NewHelmClient(namespace string, c client.Client, log logr.Logger) (*HelmClient, error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\n\tf.cleanup()\n\n\tcfg, ok := f.configurations[namespace]\n\tif !ok {\n\t\tvar err error\n\t\tcfg, err = f.getActionConfig(namespace)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tf.configurations[namespace] = cfg\n\t}\n\tf.configurationsLastUsedTimes[namespace] = time.Now()\n\treturn &HelmClient{cfg: cfg, namespace: namespace, c: c, log: log.WithValues(\"helm-client\", namespace), statusFunc: getHelmStatus}, nil\n}",
  "func CreateHelmPlugin(version string) jenkinsv1.Plugin {\n\tbinaries := CreateBinaries(func(p Platform) string {\n\t\treturn fmt.Sprintf(\"https://get.helm.sh/helm-v%s-%s-%s.%s\", version, strings.ToLower(p.Goos), strings.ToLower(p.Goarch), p.Extension())\n\t})\n\n\tplugin := jenkinsv1.Plugin{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: HelmPluginName,\n\t\t},\n\t\tSpec: jenkinsv1.PluginSpec{\n\t\t\tSubCommand:  \"helm\",\n\t\t\tBinaries:    binaries,\n\t\t\tDescription: \"helm 3 binary\",\n\t\t\tName:        HelmPluginName,\n\t\t\tVersion:     version,\n\t\t},\n\t}\n\treturn plugin\n}",
  "func (in *Helm) DeepCopy() *Helm {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Helm)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
  "func NewHelmConfigurator() *helmConfigurator {\n\treturn &helmConfigurator{}\n}",
  "func NewHelmAddon(provider clustermanager.ClusterProvider, communicator clustermanager.NodeCommunicator) ClusterAddon {\n\tmasterNode, _ := provider.GetMasterNode()\n\treturn HelmAddon{masterNode: masterNode, communicator: communicator}\n}",
  "func helmUpgrade() error {\n\tprojectName := projectConfig.GetString(\"project_name\")\n\tenvironment := projectConfig.GetString(\"environment\")\n\n\tcolor.Cyan(\"Installing project chart via helm...\")\n\n\tvar waitFlag string\n\tif projectConfig.GetBool(\"wait\") {\n\t\t// Intended for CI, wait for updated infrastructure to apply fully so subsequent commands (drush) run against new infra\n\t\twaitFlag = \"--wait\"\n\t\tcolor.Cyan(\"Using wait, command will take a moment...\")\n\t}\n\n\tvar helmValues HelmValues\n\n\thelmValues.appendValue(\"general.project_name\", projectName, true);\n\thelmValues.appendValue(\"general.environment\", environment, true);\n\n\t// These come from environment vars\n\t// TODO - Blackfire credential management? Currently deploying to both environments - MEA\n\thelmValues.appendProjectValue(\"applications.blackfire.server_id\", \"BLACKFIRE_SERVER_ID\", false)\n\thelmValues.appendProjectValue(\"applications.blackfire.server_token\", \"BLACKFIRE_SERVER_TOKEN\", false)\n\thelmValues.appendProjectValue(\"applications.blackfire.client_id\", \"BLACKFIRE_CLIENT_ID\", false)\n\thelmValues.appendProjectValue(\"applications.blackfire.client_token\", \"BLACKFIRE_CLIENT_TOKEN\", false)\n\n\t// Derived values\n\tif environment == \"local\" {\n\t\t// Using https://github.com/mstrzele/minikube-nfs\n\t\t// Creates a persistent volume with contents of /Users mounted from an nfs share from host\n\t\t// So rather than using /Users directly, grab the path within the project\n\t\t// Check the helm chart mounts\n\t\tprojectPath, _ := util.ExecCmdChain(\"printf ${PWD#/Users/}\")\n\t\thelmValues.appendValue(\"applications.drupal.local.project_root\", projectPath, true)\n\t\tlocalIp, _ := util.ExecCmdChain(\"ifconfig | grep \\\"inet \\\" | grep -v 127.0.0.1 | awk '{print $2}' | sed -n 1p\")\n\t\thelmValues.appendValue(\"applications.xdebug.host_ip\", localIp, true)\n\t} else {\n\t\t// Obtain the git commit from env vars if present in CircleCI\n\t\t// TODO - Make this a required argument rather than inferred environment variable\n\t\tif circleSha := projectConfig.GetString(\"CIRCLE_SHA1\"); len(circleSha) > 0 {\n\t\t\thelmValues.appendValue(\"general.git_commit\", circleSha, false)\n\t\t} else {\n\t\t\tif approve := util.GetApproval(\"No CIRCLE_SHA1 environment variable set (should be git sha1 hash of commit), use 'latest' tag for application container?\"); !approve {\n\t\t\t\tcolor.Red(\"User exited.  Please run 'export CIRCLE_SHA1=' adding your targetted git commit hash, or run again and agree to use 'latest.'\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tchartConfigPath := fmt.Sprintf(\"charts.%s\", environment)\n\thelmChartPath := projectConfig.GetString(chartConfigPath)\n\n\tcommand := fmt.Sprintf(\"helm upgrade --install --values %s %s %s-%s %s --set %s\",\n\t\tenvConfig.ConfigFileUsed(),\n\t\twaitFlag,\n\t\tprojectName,\n\t\tenvironment,\n\t\thelmChartPath,\n\t\tstrings.Join(helmValues.values, \",\"))\n\tout, err := util.ExecCmdChainCombinedOut(command)\n\tif (err != nil) {\n\t\tcolor.Red(out)\n\t\tif (projectConfig.GetBool(\"rollback-on-failure\")) {\n\t\t\tcolor.Yellow(\"Your helm upgrade resulted in a failure, attempting to rollback...\")\n\t\t\trollbackCmd.Run(nil, nil)\n\t\t\tcolor.Yellow(\"Successfully rolled back attempted update, exiting with error.  You will want to correct this.\")\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tos.Exit(1)\n\t\t}\n\t} else if debugMode := viper.GetString(\"debug\"); len(debugMode) > 0 {\n\t\tcolor.Green(out)\n\t}\n\treturn err\n}",
  "func New(clientSet *kubernetes.Clientset) (renderer.Renderer, error) {\n\tif clientSet == nil {\n\t\tcfg, err := config.GetConfig()\n\t\tif err != nil {\n\t\t\treturn nil, errors.NewRendererError(\"helm\", \"unable to set up client config\", err)\n\t\t}\n\n\t\tclientSet, err = kubernetes.NewForConfig(cfg)\n\t\tif err != nil {\n\t\t\treturn nil, errors.NewRendererError(\"helm\", \"failed to create kubernetes client\", err)\n\t\t}\n\t}\n\tsv, err := clientSet.ServerVersion()\n\n\tif err != nil {\n\t\treturn nil, errors.NewRendererError(\"helm\", \"failed to get kubernetes server version\", err)\n\t}\n\treturn &helmRenderer{\n\t\tclientSet:    clientSet,\n\t\trenderer:     engine.New(),\n\t\tcapabilities: &chartutil.Capabilities{KubeVersion: sv},\n\t}, nil\n}",
  "func HelmTemplate() *jsonnet.NativeFunction {\n\treturn &jsonnet.NativeFunction{\n\t\tName: \"helmTemplate\",\n\t\t// Lines up with `helm template [NAME] [CHART] [flags]` except 'conf' is a bit more elaborate\n\t\tParams: ast.Identifiers{\"name\", \"chart\", \"conf\"},\n\t\tFunc: func(data []interface{}) (interface{}, error) {\n\t\t\tname, chart := data[0].(string), data[1].(string)\n\n\t\t\tc, err := json.Marshal(data[2])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tvar conf HelmConf\n\t\t\tif err := json.Unmarshal(c, &conf); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\t// the basic arguments to make this work\n\t\t\targs := []string{\n\t\t\t\t\"template\",\n\t\t\t\tname,\n\t\t\t\tchart,\n\t\t\t}\n\n\t\t\tconfArgs, tempFiles, err := confToArgs(conf)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil\n\t\t\t}\n\t\t\tfor _, file := range tempFiles {\n\t\t\t\tdefer os.Remove(file)\n\t\t\t}\n\t\t\tif confArgs != nil {\n\t\t\t\targs = append(args, confArgs...)\n\t\t\t}\n\n\t\t\thelmBinary := \"helm\"\n\t\t\tif hc := os.Getenv(\"TANKA_HELM_PATH\"); hc != \"\" {\n\t\t\t\thelmBinary = hc\n\t\t\t}\n\n\t\t\t// convert the values map into a yaml file\n\t\t\tcmd := exec.Command(helmBinary, args...)\n\t\t\tbuf := bytes.Buffer{}\n\t\t\tcmd.Stdout = &buf\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"running 'helm %s': %w\", strings.Join(args, \" \"), err)\n\t\t\t}\n\n\t\t\treturn parseYamlToMap(buf.Bytes())\n\t\t},\n\t}\n}",
  "func NewHelmClient() data.Client {\n\tconf, _ := config.GetConfig()\n\treturn &helmClient{\n\t\tportForward: conf.TillerPortForward,\n\t}\n}",
  "func NewMockHelmClient(ctrl *gomock.Controller) *MockHelmClient {\n\tmock := &MockHelmClient{ctrl: ctrl}\n\tmock.recorder = &MockHelmClientMockRecorder{mock}\n\treturn mock\n}",
  "func (a RepositoryManagementAPI) CreateHelmHosted(r HelmHostedRepository) error {\n\tpath := fmt.Sprintf(\"beta/repositories/helm/hosted\")\n\n\tb := new(bytes.Buffer)\n\tjson.NewEncoder(b).Encode(r)\n\n\t_, err := a.client.sendRequest(http.MethodPost, path, b, nil)\n\treturn err\n}",
  "func Helm(\n\tclient *api.Client,\n\tprojectID uint,\n) (uint, error) {\n\t// if project ID is 0, ask the user to set the project ID or create a project\n\tif projectID == 0 {\n\t\treturn 0, fmt.Errorf(\"no project set, please run porter project set [id]\")\n\t}\n\n\t// query for helm repo name\n\thelmName, err := utils.PromptPlaintext(fmt.Sprintf(`Give this Helm repository a name: `))\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\trepoURL, err := utils.PromptPlaintext(fmt.Sprintf(`Provide the Helm repository URL: `))\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tuserResp, err := utils.PromptPlaintext(\n\t\tfmt.Sprintf(`Does this endpoint require a username/password to authenticate? %s `,\n\t\t\tcolor.New(color.FgCyan).Sprintf(\"[y/n]\"),\n\t\t),\n\t)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tusername := \"\"\n\tpassword := \"\"\n\n\tif userResp := strings.ToLower(userResp); userResp == \"y\" || userResp == \"yes\" {\n\t\tusername, err = utils.PromptPlaintext(fmt.Sprintf(`Username: `))\n\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tpassword, err = utils.PromptPasswordWithConfirmation()\n\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\t// create the basic auth integration\n\tintegration, err := client.CreateBasicAuthIntegration(\n\t\tcontext.Background(),\n\t\tprojectID,\n\t\t&api.CreateBasicAuthIntegrationRequest{\n\t\t\tUsername: username,\n\t\t\tPassword: password,\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcolor.New(color.FgGreen).Printf(\"created basic auth integration with id %d\\n\", integration.ID)\n\n\t// create the helm repo\n\thr, err := client.CreateHelmRepo(\n\t\tcontext.Background(),\n\t\tprojectID,\n\t\t&api.CreateHelmRepoRequest{\n\t\t\tName:               helmName,\n\t\t\tRepoURL:            repoURL,\n\t\t\tBasicIntegrationID: integration.ID,\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcolor.New(color.FgGreen).Printf(\"created helm repo with id %d and name %s\\n\", hr.ID, hr.Name)\n\n\treturn hr.ID, nil\n}",
  "func InitHelmOnCluster(c *gin.Context) {\n\tlog := logger.WithFields(logrus.Fields{\"tag\": constants.TagHelmInstall})\n\tlog.Info(\"Start helm install\")\n\n\tcommonCluster, ok := GetCommonClusterFromRequest(c)\n\tif ok != true {\n\t\treturn\n\t}\n\n\tkubeConfig, err := commonCluster.GetK8sConfig()\n\tif err != nil {\n\t\tlog.Errorf(\"Error during getting kubeconfig: %s\", err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode:    http.StatusBadRequest,\n\t\t\tMessage: \"Error getting kubeconfig\",\n\t\t\tError:   err.Error(),\n\t\t})\n\t\treturn\n\t}\n\t// bind request body to struct\n\tvar helmInstall htype.Install\n\tif err := c.BindJSON(&helmInstall); err != nil {\n\t\t// bind failed\n\t\tlog.Errorf(\"Required field is empty: %s\", err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode:    http.StatusBadRequest,\n\t\t\tMessage: \"Error parsing request\",\n\t\t\tError:   err.Error(),\n\t\t})\n\t\treturn\n\t}\n\terr = helm.Install(&helmInstall, kubeConfig, commonCluster.GetName())\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to install chart: %s\", err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode:    http.StatusBadRequest,\n\t\t\tMessage: \"Error installing helm\",\n\t\t\tError:   err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tmessage := \"helm initialising\"\n\tc.JSON(http.StatusCreated, htype.InstallResponse{\n\t\tStatus:  http.StatusCreated,\n\t\tMessage: message,\n\t})\n\tlog.Info(message)\n\treturn\n}",
  "func (r *ReconcileHelmRelease) newHelmReleaseManager(\n\ts *appv1alpha1.HelmRelease) (helmrelease.Manager, error) {\n\thelmReleaseSecret, err := utils.GetSecret(r.GetClient(),\n\t\ts.Namespace,\n\t\t&corev1.ObjectReference{Name: s.Spec.ReleaseName})\n\tif err == nil {\n\t\tif !utils.IsOwned(s.ObjectMeta, helmReleaseSecret.ObjectMeta) {\n\t\t\treturn nil,\n\t\t\t\tfmt.Errorf(\"duplicate release name: found existing release with name %q for another helmRelease %v\",\n\t\t\t\t\ts.Spec.ReleaseName, helmReleaseSecret.GetOwnerReferences())\n\t\t}\n\t} else if errors.IsNotFound(err) {\n\t\thelmReleaseSecret, err = createSecret(r, s)\n\t\tif err != nil {\n\t\t\tklog.Error(err)\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\treturn nil, err\n\t}\n\n\tconfigMap, err := utils.GetConfigMap(r.GetClient(), s.Namespace, s.Spec.ConfigMapRef)\n\tif err != nil {\n\t\tklog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tsecret, err := utils.GetSecret(r.GetClient(), s.Namespace, s.Spec.SecretRef)\n\tif err != nil {\n\t\tklog.Error(err, \" - Failed to retrieve secret \", s.Spec.SecretRef.Name)\n\t\treturn nil, err\n\t}\n\n\to := &unstructured.Unstructured{}\n\to.SetGroupVersionKind(schema.GroupVersionKind{\n\t\tVersion: \"v1\",\n\t\tKind:    \"Secret\",\n\t})\n\to.SetNamespace(helmReleaseSecret.GetNamespace())\n\n\to.SetName(helmReleaseSecret.GetName())\n\tklog.V(2).Info(\"ReleaseName :\", o.GetName())\n\to.SetUID(helmReleaseSecret.GetUID())\n\tklog.V(5).Info(\"uuid:\", o.GetUID())\n\n\tchartsDir := os.Getenv(appv1alpha1.ChartsDir)\n\tif chartsDir == \"\" {\n\t\tchartsDir, err = ioutil.TempDir(\"/tmp\", \"charts\")\n\t\tif err != nil {\n\t\t\tklog.Error(err, \" - Can not create tempdir\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tchartDir, err := utils.DownloadChart(configMap, secret, chartsDir, s)\n\tklog.V(3).Info(\"ChartDir: \", chartDir)\n\n\tif s.DeletionTimestamp == nil {\n\t\tif err != nil {\n\t\t\tklog.Error(err, \" - Failed to download the chart\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif s.Spec.Values != \"\" {\n\t\t\tvar spec interface{}\n\n\t\t\terr = yaml.Unmarshal([]byte(s.Spec.Values), &spec)\n\t\t\tif err != nil {\n\t\t\t\tklog.Error(err, \" - Failed to Unmarshal the values \", s.Spec.Values)\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\to.Object[\"spec\"] = spec\n\t\t}\n\t} else if err != nil {\n\t\t//If error when download for deletion then create a fake chart.yaml.\n\t\t//The helmrelease manager needs only the releaseName\n\t\tklog.Info(\"Unable to download ChartDir: \", chartDir, \" creating a fake chart.yaml\")\n\t\tchartDir, err = utils.CreateFakeChart(chartsDir, s)\n\t\tif err != nil {\n\t\t\tklog.Error(err, \" - Failed to create fake chart for uninstall\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tf := helmrelease.NewManagerFactory(r.Manager, chartDir)\n\n\thelmManager, err := f.NewManager(o)\n\n\treturn helmManager, err\n}",
  "func (repo *HelmRepoRepository) CreateHelmRepo(hr *models.HelmRepo) (*models.HelmRepo, error) {\n\terr := repo.EncryptHelmRepoData(hr, repo.key)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tproject := &models.Project{}\n\n\tif err := repo.db.Where(\"id = ?\", hr.ProjectID).First(&project).Error; err != nil {\n\t\treturn nil, err\n\t}\n\n\tassoc := repo.db.Model(&project).Association(\"HelmRepos\")\n\n\tif assoc.Error != nil {\n\t\treturn nil, assoc.Error\n\t}\n\n\tif err := assoc.Append(hr); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create a token cache by default\n\tassoc = repo.db.Model(hr).Association(\"TokenCache\")\n\n\tif assoc.Error != nil {\n\t\treturn nil, assoc.Error\n\t}\n\n\tif err := assoc.Append(&hr.TokenCache); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = repo.DecryptHelmRepoData(hr, repo.key)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hr, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Load loads the chart from the repository. 
 | 
	func (h *Helm) Load(chart, version string) (string, error) {
	return h.locateChart(chart, version)
} 
 | 
	[
  "func (w *Worker) loadChart(u string) (*chart.Chart, error) {\n\t// Rate limit requests to Github to avoid them being rejected\n\tif strings.HasPrefix(u, \"https://github.com\") {\n\t\t_ = w.rl.Wait(w.ctx)\n\t}\n\n\tresp, err := w.hg.Get(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusOK {\n\t\tchart, err := loader.LoadArchive(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn chart, nil\n\t}\n\treturn nil, fmt.Errorf(\"unexpected status code received: %d\", resp.StatusCode)\n}",
  "func Load(chart string) (*Chart, error) {\n\tif fi, err := os.Stat(chart); err != nil {\n\t\treturn nil, err\n\t} else if !fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"Chart %s is not a directory.\", chart)\n\t}\n\n\tcf, err := LoadChartfile(filepath.Join(chart, \"Chart.yaml\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Chart{\n\t\tChartfile: cf,\n\t\tKind:      map[string][]*manifest.Manifest{},\n\t}\n\n\tms, err := manifest.ParseDir(chart)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\n\tc.attachManifests(ms)\n\n\treturn c, nil\n}",
  "func Load(name string, gcs *storage.Client) (*Repo, error) {\n\tentry, err := retrieveRepositoryEntry(name)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"entry\")\n\t}\n\tif entry == nil {\n\t\treturn nil, fmt.Errorf(\"repository \\\"%s\\\" not found. Make sure you add it to helm\", name)\n\t}\n\n\tindexFileURL, err := resolveReference(entry.URL, \"index.yaml\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"resolve reference\")\n\t}\n\n\treturn &Repo{\n\t\tentry:        entry,\n\t\tindexFileURL: indexFileURL,\n\t\tgcs:          gcs,\n\t}, nil\n}",
  "func LoadChartArchive(ctx context.Context, u *url.URL, o *LoadChartArchiveOptions) (*chart.Chart, error) {\n\tvar r io.Reader\n\n\tswitch u.Scheme {\n\tcase \"http\", \"https\":\n\t\t// Get chart content\n\t\treq, _ := http.NewRequest(\"GET\", u.String(), nil)\n\t\treq = req.WithContext(ctx)\n\t\treq.Header.Set(\"Accept-Encoding\", \"identity\")\n\t\tif o.Username != \"\" || o.Password != \"\" {\n\t\t\treq.SetBasicAuth(o.Username, o.Password)\n\t\t}\n\t\thc := o.Hc\n\t\tif hc == nil {\n\t\t\thc = util.SetupHTTPClient(false, util.HTTPClientDefaultTimeout)\n\t\t}\n\t\tresp, err := hc.Do(req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\tcase http.StatusNotFound:\n\t\t\treturn nil, hub.ErrNotFound\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected status code received: %d\", resp.StatusCode)\n\t\t}\n\t\tr = resp.Body\n\tcase \"oci\":\n\t\top := o.Op\n\t\tif op == nil {\n\t\t\top = oci.NewPuller(nil)\n\t\t}\n\t\tref := strings.TrimPrefix(u.String(), hub.RepositoryOCIPrefix)\n\t\t_, data, err := op.PullLayer(ctx, ref, ChartContentLayerMediaType, o.Username, o.Password)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, oci.ErrLayerNotFound) {\n\t\t\t\t_, data, err = op.PullLayer(ctx, ref, legacyChartContentLayerMediaType, o.Username, o.Password)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tr = bytes.NewReader(data)\n\tdefault:\n\t\treturn nil, repo.ErrSchemeNotSupported\n\t}\n\n\t// Load chart from reader previously set up\n\tchrt, err := loader.LoadArchive(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn chrt, nil\n}",
  "func (c *ChartOpts) LoadChart() (*chart.Chart, error) {\n\tif c.Chart != nil {\n\t\treturn c.Chart, nil\n\t}\n\tif c.LocalPath != \"\" {\n\t\t_, err := os.Stat(c.LocalPath)\n\t\tif err == nil {\n\t\t\treturn loader.Load(c.LocalPath)\n\t\t}\n\t}\n\tif c.ChartArchive != nil {\n\t\treturn loader.LoadArchive(c.ChartArchive)\n\t}\n\tif c.ChartURL != \"\" {\n\t\treturn utils.DownloadChartWithTLS(c.ChartURL, c.AuthInfo.Username, c.AuthInfo.Password, c.AuthInfo.RootCAPath,\n\t\t\tc.AuthInfo.CertPath, c.AuthInfo.PrivateKeyPath, c.InsecureSkipTLSVerify)\n\t}\n\tif c.RepoOptions != nil {\n\t\turl, err := FindChartInAuthAndTLSRepoURL(c.RepoOptions.RepoURL, c.RepoOptions.Username, c.RepoOptions.Password,\n\t\t\tc.ChartName, c.ChartVersion, c.RepoOptions.CertFile, c.RepoOptions.KeyFile, c.RepoOptions.CAFile, c.RepoOptions.InsecureSkipTLSVerify,\n\t\t\tgetter.All(&cli.EnvSettings{}))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn utils.DownloadChartWithTLS(url, c.RepoOptions.Username, c.RepoOptions.Password,\n\t\t\tc.RepoOptions.CAFile, c.RepoOptions.CertFile, c.RepoOptions.KeyFile, false)\n\t}\n\n\treturn nil, errors.New(\"load chart error ,chart load method not config\")\n}",
  "func Load(ctx context.Context, root string, verify bool) (*graph.Graph, error) {\n\tbase, err := Nodes(ctx, root, verify)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"loading failed\")\n\t}\n\n\tresolved, err := ResolveDependencies(ctx, base)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not resolve dependencies\")\n\t}\n\n\tresourced, err := SetResources(ctx, resolved)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not resolve resources\")\n\t}\n\treturn resourced, nil\n}",
  "func (dependency *Dependency) Load(filename string) error {\n\treturn util.LoadYAML(filename, dependency)\n}",
  "func Load(path string) *Graph {\n\tvar data Graph\n\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer f.Close()\n\n\tfz, _ := gzip.NewReader(f)\n\tdefer fz.Close()\n\n\tdecoder := gob.NewDecoder(fz)\n\n\terr = decoder.Decode(&data)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\treturn &data\n}",
  "func (s *DataStore) Load() error {\n\tfile, err := os.Open(s.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr = json.NewDecoder(file).Decode(&s.model)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
  "func (r *projectRepository) Load(ctx context.Context, id valueobject.Identifier) (*project.Aggregate, error) {\n\tstreamID := \"Project-\" + id.String()\n\n\t// TODO: figure out the correct way to replay all the events, currently hardcoded to replay the last 1000 events\n\trecordedEvents, err := r.c.ReadStreamEvents(ctx, direction.Forwards, streamID, streamrevision.StreamRevisionStart, 1000, false)\n\tif err != nil {\n\t\tlog.Printf(\"Unexpected failure %+v\", err)\n\t\treturn &project.Aggregate{}, project.ErrProjectNotFound\n\t}\n\n\tvar events []event.Event\n\n\tfor _, record := range recordedEvents {\n\t\tswitch eventType := record.EventType; eventType {\n\t\tcase \"ProjectCreated\":\n\t\t\tvar e event.ProjectCreated\n\t\t\terr := json.Unmarshal(record.Data, &e)\n\t\t\tif err != nil {\n\t\t\t\treturn &project.Aggregate{}, fmt.Errorf(\"problem deserializing '%s' event from json\", record.EventType)\n\t\t\t}\n\t\t\tevents = append(events, &e)\n\t\tcase \"ProjectChanged\":\n\t\t\tvar e event.ProjectChanged\n\t\t\terr := json.Unmarshal(record.Data, &e)\n\t\t\tif err != nil {\n\t\t\t\treturn &project.Aggregate{}, fmt.Errorf(\"problem deserializing '%s' event from json\", record.EventType)\n\t\t\t}\n\t\t\tevents = append(events, &e)\n\t\tcase \"ProjectDeleted\":\n\t\t\tvar e event.ProjectDeleted\n\t\t\terr := json.Unmarshal(record.Data, &e)\n\t\t\tif err != nil {\n\t\t\t\treturn &project.Aggregate{}, fmt.Errorf(\"problem deserializing '%s' event from json\", record.EventType)\n\t\t\t}\n\t\t\tevents = append(events, &e)\n\t\tdefault:\n\t\t\tlog.Printf(\"unexpected event type: %T\", eventType)\n\t\t}\n\n\t}\n\n\treturn project.NewAggregateFromEvents(events), nil\n}",
  "func Load(fileName string) ([]Dog, error) {\n\tdata, _ := ioutil.ReadFile(fileName)\n\treturn DogsFromJSON(data)\n}",
  "func (b *Bolt) Load(id string, data interface{}) error {\n\terr := b.client.View(func(tx *bolt.Tx) error {\n\t\tbkt := tx.Bucket([]byte(b.bucket))\n\t\tv := bkt.Get([]byte(id))\n\t\tif v == nil {\n\t\t\treturn storage.ErrNotFound\n\t\t}\n\n\t\terr := json.Unmarshal(v, data)\n\t\treturn err\n\t})\n\n\treturn err\n}",
  "func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) error {\n\tminioClient, err := s3Driver.newMinioClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Download the file to a local file path\n\tlog.Infof(\"Loading from s3 (endpoint: %s, bucket: %s, key: %s) to %s\",\n\t\tinputArtifact.S3.Endpoint, inputArtifact.S3.Bucket, inputArtifact.S3.Key, path)\n\terr = minioClient.FGetObject(inputArtifact.S3.Bucket, inputArtifact.S3.Key, path)\n\tif err != nil {\n\t\treturn errors.InternalWrapError(err)\n\t}\n\treturn nil\n}",
  "func (c *Catalog) Load() error {\n\tif _, err := c.GetLenses(); err != nil {\n\t\treturn err\n\t}\n\tif _, err := c.GetCameras(); err != nil {\n\t\treturn err\n\t}\n\tif _, err := c.GetStats(); err != nil {\n\t\treturn err\n\t}\n\tif _, err := c.GetPhotos(); err != nil {\n\t\treturn err\n\t}\n\tif _, err := c.GetCollections(); err != nil {\n\t\treturn err\n\t}\n\tif _, err := c.GetCollectionTree(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
  "func (fb *FlowBuilder) Load(rawData []byte) *FlowBuilder {\n\tfb.flow = flow.New()\n\tfb.flow.UseRegistry(fb.registry)\n\n\tdoc := &FlowDocument{[]Node{}, []Link{}, []Trigger{}}\n\tlog.Println(\"Loading document from:\", string(rawData))\n\terr := json.Unmarshal(rawData, doc)\n\tif err != nil {\n\t\tfb.Err = err\n\t\treturn fb\n\t}\n\n\tfb.Doc = doc\n\n\treturn fb\n}",
  "func (r *Registry) Load() error {\n\tpkgs, err := allPackages()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Empty our cache and update it with the results from the database\n\tr.Packages = make(map[string][]*guppy.Package)\n\tfor _, pkg := range pkgs {\n\t\tr.Packages[pkg.Name] = append(r.Packages[pkg.Name], pkg)\n\t}\n\n\treturn nil\n}",
  "func (p *WarPlugin) Load(bot *mmmorty.Bot, service mmmorty.Discord, data []byte) error {\n\tif data != nil {\n\t\tif err := json.Unmarshal(data, p); err != nil {\n\t\t\tlog.Println(\"Error loading data\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func (n *nameHistory) Load() error {\n\tfp, err := os.OpenFile(n.filepath, os.O_RDONLY, os.ModePerm)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open %q file: %v\", n.filepath, err)\n\t}\n\tdefer fp.Close()\n\n\tif err := yaml.NewDecoder(fp).Decode(&n.entries); err != nil {\n\t\treturn fmt.Errorf(\"could not decode file: %v\", err)\n\t}\n\n\tn.isChanged = false\n\n\treturn nil\n}",
  "func (r *LocalRegistry) Load() {\n\tvar (\n\t\tregBytes []byte\n\t\terr      error\n\t)\n\t// check if localRepo file exist\n\t_, err = os.Stat(r.file())\n\tif err != nil {\n\t\t// then assume localRepo.json is not there: try and create it\n\t\tr.save()\n\t} else {\n\t\tregBytes, err = ioutil.ReadFile(r.file())\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = json.Unmarshal(regBytes, r)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	LocateChart looks for a chart directory in known places, and returns either the full path or an error. 
 | 
	func (c *ChartPathOptions) LocateChart(name, dest string, settings *cli.EnvSettings) (string, error) {
	name = strings.TrimSpace(name)
	version := strings.TrimSpace(c.Version)
	if _, err := os.Stat(name); err == nil {
		abs, err := filepath.Abs(name)
		if err != nil {
			return abs, err
		}
		if c.Verify {
			if _, err := downloader.VerifyChart(abs, c.Keyring); err != nil {
				return "", err
			}
		}
		return abs, nil
	}
	if filepath.IsAbs(name) || strings.HasPrefix(name, ".") {
		return name, errors.Errorf("path %q not found", name)
	}
	dl := downloader.ChartDownloader{
		Out:     os.Stdout,
		Keyring: c.Keyring,
		Getters: getter.All(settings),
		Options: []getter.Option{
			getter.WithBasicAuth(c.Username, c.Password),
			getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile),
			getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify),
		},
		RepositoryConfig: settings.RepositoryConfig,
		RepositoryCache:  settings.RepositoryCache,
	}
	if c.Verify {
		dl.Verify = downloader.VerifyAlways
	}
	if c.RepoURL != "" {
		chartURL, err := repo.FindChartInAuthAndTLSRepoURL(c.RepoURL, c.Username, c.Password, name, version,
			c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, getter.All(settings))
		if err != nil {
			return "", err
		}
		name = chartURL
	}
	if err := os.MkdirAll(dest, 0755); err != nil {
		return "", err
	}
	filename, _, err := dl.DownloadTo(name, version, dest)
	if err == nil {
		lname, err := filepath.Abs(filename)
		if err != nil {
			return filename, err
		}
		return lname, nil
	} else if settings.Debug {
		return filename, err
	}
	atVersion := ""
	if version != "" {
		atVersion = fmt.Sprintf(" at version %q", version)
	}
	return filename, errors.Errorf("failed to download %q%s (hint: running `helm repo update` may help)", name, atVersion)
} 
 | 
	[
  "func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (string, error) {\n\tf, err := repo.LoadFile(settings.RepositoryConfig)\n\tif err != nil || len(f.Repositories) == 0 {\n\t\treturn \"\", errors.Wrap(err, \"no repositories exist, need to add repo first\")\n\t}\n\n\te := f.Get(name)\n\tif e == nil {\n\t\treturn \"\", errors.Errorf(\"entry %s is not found\", name)\n\t}\n\treturn e.ExperimentFile, nil\n}",
  "func locateChartPath(repoURL, username, password, name, version string, verify bool, keyring,\n\tcertFile, keyFile, caFile string) (string, error) {\n\tname = strings.TrimSpace(name)\n\tversion = strings.TrimSpace(version)\n\tif fi, err := os.Stat(name); err == nil {\n\t\tabs, err := filepath.Abs(name)\n\t\tif err != nil {\n\t\t\treturn abs, err\n\t\t}\n\t\tif verify {\n\t\t\tif fi.IsDir() {\n\t\t\t\treturn \"\", errors.New(\"cannot verify a directory\")\n\t\t\t}\n\t\t\tif _, err := downloader.VerifyChart(abs, keyring); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\treturn abs, nil\n\t}\n\tif filepath.IsAbs(name) || strings.HasPrefix(name, \".\") {\n\t\treturn name, fmt.Errorf(\"path %q not found\", name)\n\t}\n\n\tcrepo := filepath.Join(Settings.Home.Repository(), name)\n\tif _, err := os.Stat(crepo); err == nil {\n\t\treturn filepath.Abs(crepo)\n\t}\n\n\tdl := downloader.ChartDownloader{\n\t\tHelmHome: Settings.Home,\n\t\tOut:      os.Stdout,\n\t\tKeyring:  keyring,\n\t\tGetters:  getter.All(Settings),\n\t\tUsername: username,\n\t\tPassword: password,\n\t}\n\tif verify {\n\t\tdl.Verify = downloader.VerifyAlways\n\t}\n\tif repoURL != \"\" {\n\t\tchartURL, err := repo.FindChartInAuthRepoURL(repoURL, username, password, name, version,\n\t\t\tcertFile, keyFile, caFile, getter.All(Settings))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tname = chartURL\n\t}\n\n\tif _, err := os.Stat(Settings.Home.Archive()); os.IsNotExist(err) {\n\t\tos.MkdirAll(Settings.Home.Archive(), 0744)\n\t}\n\n\tfilename, _, err := dl.DownloadTo(name, version, Settings.Home.Archive())\n\tif err == nil {\n\t\tlname, err := filepath.Abs(filename)\n\t\tif err != nil {\n\t\t\treturn filename, err\n\t\t}\n\t\t//debug(\"Fetched %s to %s\\n\", name, filename)\n\t\treturn lname, nil\n\t} else if Settings.Debug {\n\t\treturn filename, err\n\t}\n\n\treturn filename, fmt.Errorf(\"failed to download %q (hint: running `helm repo update` may help)\", name)\n}",
  "func locateChartPath(settings *environment.EnvSettings, repoURL, username, password, name, version string, verify bool, keyring,\n\tcertFile, keyFile, caFile string) (string, error) {\n\tname = strings.TrimSpace(name)\n\tversion = strings.TrimSpace(version)\n\tif fi, err := os.Stat(name); err == nil {\n\t\tabs, err := filepath.Abs(name)\n\t\tif err != nil {\n\t\t\treturn abs, err\n\t\t}\n\t\tif verify {\n\t\t\tif fi.IsDir() {\n\t\t\t\treturn \"\", errors.New(\"cannot verify a directory\")\n\t\t\t}\n\t\t\tif _, err := downloader.VerifyChart(abs, keyring); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\treturn abs, nil\n\t}\n\tif filepath.IsAbs(name) || strings.HasPrefix(name, \".\") {\n\t\treturn name, fmt.Errorf(\"path %q not found\", name)\n\t}\n\n\tcrepo := filepath.Join(settings.Home.Repository(), name)\n\tif _, err := os.Stat(crepo); err == nil {\n\t\treturn filepath.Abs(crepo)\n\t}\n\n\tdl := downloader.ChartDownloader{\n\t\tHelmHome: settings.Home,\n\t\tOut:      os.Stdout,\n\t\tKeyring:  keyring,\n\t\tGetters:  getter.All(*settings),\n\t\tUsername: username,\n\t\tPassword: password,\n\t}\n\tif verify {\n\t\tdl.Verify = downloader.VerifyAlways\n\t}\n\tif repoURL != \"\" {\n\t\tchartURL, err := repo.FindChartInAuthRepoURL(repoURL, username, password, name, version,\n\t\t\tcertFile, keyFile, caFile, getter.All(*settings))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tname = chartURL\n\t}\n\n\tif _, err := os.Stat(settings.Home.Archive()); os.IsNotExist(err) {\n\t\tos.MkdirAll(settings.Home.Archive(), 0744)\n\t}\n\n\tfilename, _, err := dl.DownloadTo(name, version, settings.Home.Archive())\n\tif err == nil {\n\t\tlname, err := filepath.Abs(filename)\n\t\tif err != nil {\n\t\t\treturn filename, err\n\t\t}\n\n\t\treturn lname, nil\n\t}\n\n\treturn filename, fmt.Errorf(\"failed to download %q (hint: running `helm repo update` may help)\", name)\n}",
  "func FindCharts(chartSearchDir string) ([]string, error) {\n\tfileList := []string{}\n\terr := filepath.Walk(chartSearchDir, func(path string, f os.FileInfo, err error) error {\n\t\tfileName := filepath.Base(path)\n\t\tif fileName == \"Chart.yaml\" {\n\t\t\tfileList = append(fileList, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn fileList, err\n}",
  "func LocateHelmChart(chartRepo, chartName, chartVersion string) (*chart.Chart, error) {\n\tclient := action.NewInstall(nil)\n\tclient.ChartPathOptions.RepoURL = chartRepo\n\tclient.ChartPathOptions.Version = chartVersion\n\n\tcp, err := client.ChartPathOptions.LocateChart(chartName, settings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tklog.V(5).Infof(\"chart %s/%s:%s locates at: %s\", chartRepo, chartName, chartVersion, cp)\n\n\t// Check chart dependencies to make sure all are present in /charts\n\tchartRequested, err := loader.Load(cp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := CheckIfInstallable(chartRequested); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn chartRequested, nil\n}",
  "func findHelmCharts(root string) ([]string, error) {\n\tcharts := []string{}\n\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\tif _, err := os.Stat(filepath.Join(path, \"Chart.yaml\")); err == nil {\n\t\t\t\tcharts = append(charts, path)\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tsort.Strings(charts)\n\n\treturn charts, err\n}",
  "func (c *helmWrapper) chartDir() string {\n\treturn filepath.Join(c.Workspace(), \"chart\")\n}",
  "func findChartInRepoIndex(repoIndex *repo.IndexFile, repoURL, chartName, chartVersion string) (string, error) {\n\terrMsg := fmt.Sprintf(\"chart %q\", chartName)\n\tif chartVersion != \"\" {\n\t\terrMsg = fmt.Sprintf(\"%s version %q\", errMsg, chartVersion)\n\t}\n\tcv, err := repoIndex.Get(chartName, chartVersion)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%s not found in repository\", errMsg)\n\t}\n\tif len(cv.URLs) == 0 {\n\t\treturn \"\", fmt.Errorf(\"%s has no downloadable URLs\", errMsg)\n\t}\n\treturn resolveChartURL(repoURL, cv.URLs[0])\n}",
  "func (v Ver) GetChartsDir() string {\n\tif len(common.Config.Rendering.ChartsDir) == 0 {\n\t\treturn path.Join(common.Config.Rendering.ResourceDir, \"helm\", v.String())\n\t}\n\treturn path.Join(common.Config.Rendering.ChartsDir, v.String())\n}",
  "func ChartsRepoDir(relativeTo string) (string, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\toutputDir := os.Getenv(\"OUTPUT_DIR\")\n\tif outputDir == \"\" {\n\t\treturn \"\", fmt.Errorf(\"output directory is not defined in environment\")\n\t}\n\treturn path.Join(cwd, path.Join(relativeTo, path.Join(outputDir, \"charts-repo\"))), nil\n}",
  "func normalizeChart(basePath, chart string) string {\n\tif !isLocalChart(chart) || chart[0] == '/' {\n\t\treturn chart\n\t}\n\treturn filepath.Join(basePath, chart)\n}",
  "func GetEtcdChartPath() string {\n\treturn filepath.Join(\"..\", \"..\", \"..\", \"..\", \"charts\", \"etcd\")\n}",
  "func CheckChartPathExist(chartPath string) bool {\n\tcheckPath := Exists(chartPath)\n\tif checkPath == true {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
  "func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) {\n\n\t// Download and write the index file to a temporary location\n\tbuf := make([]byte, 20)\n\trand.Read(buf)\n\tname := strings.ReplaceAll(base64.StdEncoding.EncodeToString(buf), \"/\", \"-\")\n\n\tc := repo.Entry{\n\t\tURL:                   repoURL,\n\t\tUsername:              username,\n\t\tPassword:              password,\n\t\tCertFile:              certFile,\n\t\tKeyFile:               keyFile,\n\t\tCAFile:                caFile,\n\t\tName:                  name,\n\t\tInsecureSkipTLSverify: insecureSkipTLSverify,\n\t}\n\tr, err := repo.NewChartRepository(&c, getters)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tidx, err := r.DownloadIndexFile()\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"looks like %q is not a valid chart repository or cannot be reached\", repoURL)\n\t}\n\n\t// Read the index file for the repository to get chart information and return chart URL\n\trepoIndex, err := repo.LoadIndexFile(idx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terrMsg := fmt.Sprintf(\"chart %q\", chartName)\n\tif chartVersion != \"\" {\n\t\terrMsg = fmt.Sprintf(\"%s version %q\", errMsg, chartVersion)\n\t}\n\tcv, err := repoIndex.Get(chartName, chartVersion)\n\tif err != nil {\n\t\treturn \"\", errors.Errorf(\"%s not found in %s repository\", errMsg, repoURL)\n\t}\n\n\tif len(cv.URLs) == 0 {\n\t\treturn \"\", errors.Errorf(\"%s has no downloadable URLs\", errMsg)\n\t}\n\n\tchartURL := cv.URLs[0]\n\n\tabsoluteChartURL, err := repo.ResolveReferenceURL(repoURL, chartURL)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to make chart URL absolute\")\n\t}\n\n\treturn absoluteChartURL, nil\n}",
  "func FindConfig(path, dirname string) string {\n\t// dirs := []string{\".\", homeDir}\n\t// /etc/dot, $HOME/.dot/config, $HOME/.config/dot...\n\tif filepath.IsAbs(path) {\n\t\treturn path\n\t}\n\t// Current working directory\n\tif exists(path) {\n\t\treturn path\n\t}\n\t// Home directory\n\tif rc := filepath.Join(homeDir, path); exists(rc) {\n\t\treturn rc\n\t}\n\t// path = strings.TrimPrefix(path, \".\")\n\treturn filepath.Join(homeDir, dirname, \"config\")\n}",
  "func (x *XDGDir) Find(suffix string) (absPath string, err error) {\n\tvar firstError error = nil\n\tfor _, path := range x.Dirs() {\n\t\tname := filepath.Join(path, suffix)\n\t\t_, err = os.Stat(name)\n\t\tif err == nil {\n\t\t\treturn name, nil\n\t\t} else if firstError == nil {\n\t\t\tfirstError = err\n\t\t}\n\t}\n\treturn \"\", firstError\n}",
  "func FindInfo(basePath string) (string, error) {\n\tvar infoPath string\n\terr := filepath.Walk(basePath, func(subpath string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif strings.ToLower(info.Name()) == \"info.dat\" {\n\t\t\tinfoPath = subpath\n\t\t}\n\t\treturn nil\n\t})\n\treturn infoPath, err\n}",
  "func FindInSearchPath(searchPath, pkg string) string {\n\tpathsList := filepath.SplitList(searchPath)\n\tfor _, path := range pathsList {\n\t\tif evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, \"src\", pkg)); err == nil {\n\t\t\tif _, err := os.Stat(evaluatedPath); err == nil {\n\t\t\t\treturn evaluatedPath\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}",
  "func (c *ChartOpts) LoadChart() (*chart.Chart, error) {\n\tif c.Chart != nil {\n\t\treturn c.Chart, nil\n\t}\n\tif c.LocalPath != \"\" {\n\t\t_, err := os.Stat(c.LocalPath)\n\t\tif err == nil {\n\t\t\treturn loader.Load(c.LocalPath)\n\t\t}\n\t}\n\tif c.ChartArchive != nil {\n\t\treturn loader.LoadArchive(c.ChartArchive)\n\t}\n\tif c.ChartURL != \"\" {\n\t\treturn utils.DownloadChartWithTLS(c.ChartURL, c.AuthInfo.Username, c.AuthInfo.Password, c.AuthInfo.RootCAPath,\n\t\t\tc.AuthInfo.CertPath, c.AuthInfo.PrivateKeyPath, c.InsecureSkipTLSVerify)\n\t}\n\tif c.RepoOptions != nil {\n\t\turl, err := FindChartInAuthAndTLSRepoURL(c.RepoOptions.RepoURL, c.RepoOptions.Username, c.RepoOptions.Password,\n\t\t\tc.ChartName, c.ChartVersion, c.RepoOptions.CertFile, c.RepoOptions.KeyFile, c.RepoOptions.CAFile, c.RepoOptions.InsecureSkipTLSVerify,\n\t\t\tgetter.All(&cli.EnvSettings{}))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn utils.DownloadChartWithTLS(url, c.RepoOptions.Username, c.RepoOptions.Password,\n\t\t\tc.RepoOptions.CAFile, c.RepoOptions.CertFile, c.RepoOptions.KeyFile, false)\n\t}\n\n\treturn nil, errors.New(\"load chart error ,chart load method not config\")\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	checkIfInstallable validates if a chart can be installed Application chart type is only installable 
 | 
	func checkIfInstallable(ch *chart.Chart) error {
	switch ch.Metadata.Type {
	case "", "application":
		return nil
	}
	return errors.Errorf("%s charts are not installable", ch.Metadata.Type)
} 
 | 
	[
  "func CheckIfInstallable(ch *chart.Chart) error {\n\tswitch ch.Metadata.Type {\n\tcase \"\", \"application\":\n\t\treturn nil\n\t}\n\treturn errors.Errorf(\"%s charts are not installable\", ch.Metadata.Type)\n}",
  "func isChartInstallable(ch *chart.Chart) (bool, error) {\n\tswitch ch.Metadata.Type {\n\tcase \"\", \"application\":\n\t\treturn true, nil\n\t}\n\treturn false, liberrors.Errorf(\"%s charts are not installable\", ch.Metadata.Type)\n}",
  "func isChartInstallable(ch *chart.Chart) (bool, error) {\n\tswitch ch.Metadata.Type {\n\tcase \"\", \"application\":\n\t\treturn true, nil\n\t}\n\treturn false, errors.Errorf(\"%s charts are not installable\", ch.Metadata.Type)\n}",
  "func (c *Module) IsInstallableToApex() bool {\n\tif shared, ok := c.linker.(interface {\n\t\tshared() bool\n\t}); ok {\n\t\t// Stub libs and prebuilt libs in a versioned SDK are not\n\t\t// installable to APEX even though they are shared libs.\n\t\treturn shared.shared() && !c.IsStubs() && c.ContainingSdk().Unversioned()\n\t} else if _, ok := c.linker.(testPerSrc); ok {\n\t\treturn true\n\t}\n\treturn false\n}",
  "func (r *SelfDeploymentReconciler) checkCrdInstalled(groupVersion, kind string) (bool, error) {\n\tresources, err := r.DiscoveryClient.ServerResourcesForGroupVersion(groupVersion)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tfor _, r := range resources.APIResources {\n\t\tif r.Kind == kind {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
  "func (app *App) CheckIsInstalled() error {\n\t// Can't use [whereis], because it doesn't return correct exit code\n\t// based on search results. Can use [type], as an option.\n\twhApp := NewApp(\"which\", app.cmd.Path)\n\tst := whApp.Run(nil, nil)\n\tif st.Error != nil {\n\t\treturn st.Error\n\t}\n\tif st.ExitCode != 0 {\n\t\treturn fmt.Errorf(\"App \\\"%s\\\" does not exist\", app.cmd.Path)\n\t}\n\treturn nil\n}",
  "func (c Initializer) verifyIsNotInstalled(client crdclient.CustomResourceDefinitionsGetter, crd *apiextv1.CustomResourceDefinition, result *verifier.Result) error {\n\t_, err := client.CustomResourceDefinitions().Get(context.TODO(), crd.Name, v1.GetOptions{})\n\tif err != nil {\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tresult.AddErrors(fmt.Sprintf(\"CRD %s is already installed. Did you mean to use --upgrade?\", crd.Name))\n\treturn nil\n}",
  "func checkAlreadyInstalled(\n\ttracer trace.Tracer,\n\tcontext context.T,\n\trepository localpackages.Repository,\n\tinstalledVersion string,\n\tinstallState localpackages.InstallState,\n\tinst installer.Installer,\n\tuninst installer.Installer,\n\toutput contracts.PluginOutputter) bool {\n\n\tcheckTrace := tracer.BeginSection(\"check if already installed\")\n\tdefer checkTrace.End()\n\n\tif inst != nil {\n\t\ttargetVersion := inst.Version()\n\t\tpackageName := inst.PackageName()\n\t\tvar instToCheck installer.Installer\n\n\t\t// TODO: When existing packages have idempotent installers and no reboot loops, remove this check for installing packages and allow the install to continue until it reports success without reboot\n\t\tif uninst != nil && installState == localpackages.RollbackInstall {\n\t\t\t// This supports rollback to a version whose installer contains an unconditional reboot\n\t\t\tinstToCheck = uninst\n\t\t}\n\t\tif (targetVersion == installedVersion &&\n\t\t\t(installState == localpackages.Installed || installState == localpackages.Unknown)) ||\n\t\t\tinstallState == localpackages.Installing || installState == localpackages.Updating {\n\t\t\tinstToCheck = inst\n\t\t}\n\t\tif instToCheck != nil {\n\t\t\tvalidateTrace := tracer.BeginSection(fmt.Sprintf(\"run validate for %s/%s\", instToCheck.PackageName(), instToCheck.Version()))\n\n\t\t\tvalidateOutput := instToCheck.Validate(tracer, context)\n\t\t\tvalidateTrace.WithExitcode(int64(validateOutput.GetExitCode()))\n\n\t\t\tif validateOutput.GetStatus() == contracts.ResultStatusSuccess {\n\t\t\t\tif installState == localpackages.Installing || installState == localpackages.Updating {\n\t\t\t\t\tvalidateTrace.AppendInfof(\"Successfully installed %v %v\", packageName, targetVersion)\n\t\t\t\t\tif uninst != nil {\n\t\t\t\t\t\tcleanupAfterUninstall(tracer, repository, uninst, output)\n\t\t\t\t\t}\n\t\t\t\t\toutput.MarkAsSucceeded()\n\t\t\t\t} else if installState == localpackages.RollbackInstall {\n\t\t\t\t\tvalidateTrace.AppendInfof(\"Failed to install %v %v, successfully rolled back to %v %v\", uninst.PackageName(), uninst.Version(), inst.PackageName(), inst.Version())\n\t\t\t\t\tcleanupAfterUninstall(tracer, repository, inst, output)\n\t\t\t\t\toutput.MarkAsFailed(nil, nil)\n\t\t\t\t} else if installState == localpackages.Unknown {\n\t\t\t\t\tvalidateTrace.AppendInfof(\"The package install state is Unknown. Continue to check if there are package files already downloaded.\")\n\t\t\t\t\tif err := repository.ValidatePackage(tracer, packageName, targetVersion); err != nil {\n\t\t\t\t\t\t// If the install state is Unkown and there's no package files downloaded previously, need to return false here so that the package can be downloaded and installed again.\n\t\t\t\t\t\t// This scenario happens when the installation of a package fails because package download fails due to lack of permissions (s3 bucket policy etc.)\n\t\t\t\t\t\tvalidateTrace.AppendInfof(\"There are no package files downloaded.\")\n\t\t\t\t\t\tvalidateTrace.End()\n\t\t\t\t\t\tcheckTrace.WithExitcode(1)\n\t\t\t\t\t\treturn false\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvalidateTrace.AppendInfof(\"There are package files already downloaded. Considering the package has already been installed.\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvalidateTrace.AppendInfof(\"%v %v is already installed\", packageName, targetVersion).End()\n\t\t\t\t\toutput.MarkAsSucceeded()\n\t\t\t\t}\n\t\t\t\tif installState != localpackages.Installed && installState != localpackages.Unknown {\n\t\t\t\t\trepository.SetInstallState(tracer, packageName, instToCheck.Version(), localpackages.Installed)\n\t\t\t\t}\n\n\t\t\t\tvalidateTrace.End()\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tvalidateTrace.AppendInfo(validateOutput.GetStdout())\n\t\t\tvalidateTrace.AppendError(validateOutput.GetStderr())\n\t\t\tvalidateTrace.End()\n\t\t}\n\t}\n\n\tcheckTrace.WithExitcode(1)\n\treturn false\n}",
  "func (mr *MockAPIMockRecorder) IsInstallable(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsInstallable\", reflect.TypeOf((*MockAPI)(nil).IsInstallable), arg0)\n}",
  "func (p *preImpl) checkUsable(ctx context.Context, pkgs map[string]struct{}) error {\n\tctx, st := timing.Start(ctx, \"check_arc\")\n\tdefer st.End()\n\n\tctx, cancel := context.WithTimeout(ctx, 5*time.Second)\n\tdefer cancel()\n\n\t// Check that the init process is the same as before. Otherwise, ARC was probably restarted.\n\tif pid, err := InitPID(); err != nil {\n\t\treturn err\n\t} else if pid != p.origInitPID {\n\t\treturn errors.Errorf(\"init process changed from %v to %v; probably crashed\", p.origInitPID, pid)\n\t}\n\n\t// Check that the package manager service is running.\n\tconst pkg = \"android\"\n\tif _, ok := pkgs[pkg]; !ok {\n\t\treturn errors.Errorf(\"pm didn't list %q among %d package(s)\", pkg, len(pkgs))\n\t}\n\n\t// TODO(nya): Should we also check that p.cr is still usable?\n\treturn nil\n}",
  "func checkAmbInstWithFlavor(kubectl Kubectl, flavor string) error {\n\tambInstallation, err := findAmbassadorInstallation(kubectl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ambInstallation.IsEmpty() {\n\t\treturn errors.New(\"no AmbassadorInstallation found\")\n\t}\n\n\tif !ambInstallation.IsInstalled() {\n\t\treturn errors.New(\"AmbassadorInstallation is not installed\")\n\t}\n\n\treason, message := ambInstallation.LastConditionExplain()\n\tif strings.Contains(reason, \"Error\") {\n\t\treturn LoopFailedError(message)\n\t}\n\n\tf, err := ambInstallation.GetFlavor()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f != flavor {\n\t\treturn errors.New(fmt.Sprintf(\"AmbassadorInstallation is not a %s installation\", flavor))\n\t}\n\treturn nil\n}",
  "func (qi *QliksenseInstances) IsInstalled(crName string) bool {\n\tq := qi.InstanceMap[crName]\n\tif q == nil {\n\t\treturn false\n\t}\n\t// Get a config to talk to the apiserver\n\tcfg, err := config.GetConfig()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tdynamicClient, err := dynamic.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tengineRes := schema.GroupVersionResource{Group: \"qixmanager.qlik.com\", Version: \"v1\", Resource: \"engines\"}\n\n\tlist, err := dynamicClient.Resource(engineRes).Namespace(q.Namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, d := range list.Items {\n\t\trls, _, err := unstructured.NestedString(d.Object, \"metadata\", \"labels\", searchingLabel)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tif rls == q.Name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
  "func (m PlatformManager) IsInstalled(c Context, r Requirement) bool {\n\t_, lookErr := exec.LookPath(r.Package) // TODO: check version of package\n\treturn lookErr == nil\n}",
  "func (o *KubernetesAddonDefinitionAllOf) HasChartUrl() bool {\n\tif o != nil && o.ChartUrl != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
  "func isCommandCenterInstalled(envFile string) bool {\n\tif IsValueEmpty(environment(envFile).GpccInstanceName) {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}",
  "func (c *Command) CheckApInterface() {\n\tcmd := exec.Command(\"ifconfig\", \"uap0\")\n\tgo c.Runner.ProcessCmd(\"ifconfig_uap0\", cmd)\n}",
  "func isPackageInstalled(pkgName string) bool {\n\tcmd := exec.Command(\"python\", \"-m\", \"pip\", \"show\", pkgName)\n\tif err := cmd.Run(); err != nil {\n\t\tif _, ok := err.(*exec.ExitError); ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
  "func (c *Module) EverInstallable() bool {\n\treturn c.installer != nil &&\n\t\t// Check to see whether the module is actually ever installable.\n\t\tc.installer.everInstallable()\n}",
  "func (r ChartGroupReconciler) installArmadaChartGroup(mgr armadaif.ArmadaChartGroupManager, instance *av1.ArmadaChartGroup) (bool, error) {\n\treclog := acglog.WithValues(\"namespace\", instance.Namespace, \"acg\", instance.Name)\n\treclog.Info(\"Updating\")\n\n\t// The concept of installing a chartgroup is kind of fuzzy since\n\t// the mgr can not really create chart. It can only check\n\t// that the charts are present before beeing able to proceed.\n\tinstalledResource, err := mgr.InstallResource(context.TODO())\n\tif err != nil {\n\t\tinstance.Status.RemoveCondition(av1.ConditionRunning)\n\n\t\thrc := av1.HelmResourceCondition{\n\t\t\tType:    av1.ConditionFailed,\n\t\t\tStatus:  av1.ConditionStatusTrue,\n\t\t\tReason:  av1.ReasonInstallError,\n\t\t\tMessage: err.Error(),\n\t\t}\n\t\tinstance.Status.SetCondition(hrc, instance.Spec.TargetState)\n\t\tr.logAndRecordFailure(instance, &hrc, err)\n\n\t\t_ = r.updateResourceStatus(instance)\n\t\treturn false, err\n\t}\n\tinstance.Status.RemoveCondition(av1.ConditionFailed)\n\n\tif err := r.watchArmadaCharts(instance, installedResource); err != nil {\n\t\treturn false, err\n\t}\n\n\thrc := av1.HelmResourceCondition{\n\t\tType:         av1.ConditionRunning,\n\t\tStatus:       av1.ConditionStatusTrue,\n\t\tReason:       av1.ReasonInstallSuccessful,\n\t\tMessage:      \"HardcodedMessage\",\n\t\tResourceName: installedResource.GetName(),\n\t}\n\tinstance.Status.SetCondition(hrc, instance.Spec.TargetState)\n\tr.logAndRecordSuccess(instance, &hrc)\n\n\terr = r.updateResourceStatus(instance)\n\treturn true, err\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Deprecated: Use ResponseListUnacceptedAgreements.ProtoReflect.Descriptor instead. 
 | 
	func (*ResponseListUnacceptedAgreements) Descriptor() ([]byte, []int) {
	return file_response_list_unaccepted_agreements_proto_rawDescGZIP(), []int{0}
} 
 | 
	[
  "func (*RequestListUnacceptedAgreements) Descriptor() ([]byte, []int) {\n\treturn file_request_list_unaccepted_agreements_proto_rawDescGZIP(), []int{0}\n}",
  "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
  "func (*UnWarnRequest) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{22}\n}",
  "func (*UnWarnResponse) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{23}\n}",
  "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{15}\n}",
  "func (*GetWarnRequest) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{35}\n}",
  "func (*DeleteTeamRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_team_proto_rawDescGZIP(), []int{21}\n}",
  "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
  "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{1}\n}",
  "func (*Filter_DeprecatedV1) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_base_proto_rawDescGZIP(), []int{8, 0}\n}",
  "func (*WarnRequest) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{12}\n}",
  "func (*ConfigRequest_V1_Deprecated) Descriptor() ([]byte, []int) {\n\treturn file_config_opensearch_config_request_proto_rawDescGZIP(), []int{0, 0, 23}\n}",
  "func (*ListProblemsRequest) Descriptor() ([]byte, []int) {\n\treturn file_problempb_service_proto_rawDescGZIP(), []int{20}\n}",
  "func (*DeleteTeamResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_team_proto_rawDescGZIP(), []int{22}\n}",
  "func (*DropTeamRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{22}\n}",
  "func (*DeleteCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{162}\n}",
  "func (*DeleteFeedbackResponse) Descriptor() ([]byte, []int) {\n\treturn file_feedbackreq_proto_rawDescGZIP(), []int{7}\n}",
  "func (*ListProblemsResponse) Descriptor() ([]byte, []int) {\n\treturn file_problempb_service_proto_rawDescGZIP(), []int{21}\n}",
  "func (*CMsgDevDeleteEventActionsResponse) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{321}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Parse the given text into a Vault instance 
 | 
	func Parse(urlText string) (Vault, error) {
	result, ok := factory.Load(urlText)
	if ok {
		return result.(Vault), nil
	}
	u, err := url.Parse(urlText)
	if err != nil {
		return nil, err
	}
	if u.Scheme == "" {
		return nil, fmt.Errorf("url scheme is empty in secret.Parse(%q)", urlText)
	}
	switch u.Scheme {
	case "env":
		vault, newVaultErr := newEnvVault(u.Hostname())
		if newVaultErr != nil {
			return nil, newVaultErr
		}
		factory.Store(urlText, vault)
		return vault, nil
	case "passwd":
		vault := &passPhraseVault{passPhrase: u.Hostname()}
		factory.Store(urlText, vault)
		return vault, nil
	case "plain":
		return plainTextVault, nil
	default:
		return nil, fmt.Errorf("Unable to handle unknown scheme %q in %q", u.Scheme, u.String())
	}
} 
 | 
	[
  "func Parse(bytes []byte) (*List, error) {\n\tl := NewList(\"temp\")\n\terr := l.UnmarshalText(bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to read list from text\")\n\t}\n\treturn l, nil\n}",
  "func (s *Safe) Vault(name, tag string) (*safe.Vault, error) {\n\th, err := s.hash()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tv := safe.NewVault(h, name, safe.NewTag(tag), nil)\n\treturn s.db.Vault(h, string(v.Key()))\n}",
  "func (c *CoreNLPClient) Parse(text string) (*Text, error) {\n\tresponse, err := http.Post(c.queryURL, \"text/plain\", strings.NewReader(text))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\tb, _ := ioutil.ReadAll(response.Body)\n\t\treturn nil, fmt.Errorf(\"Received status %s, %s\", response.Status, b)\n\t}\n\n\tvar textObj Text\n\tjson.NewDecoder(response.Body).Decode(&textObj)\n\treturn &textObj, nil\n}",
  "func VaultInit(token, apiurl string) (*Vaulter, error) {\n\turlParts, err := url.Parse(apiurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcfg := &vaulter.VaultAPIConfig{\n\t\tParentToken: token,\n\t\tHost:        urlParts.Hostname(),\n\t\tPort:        urlParts.Port(),\n\t\tScheme:      urlParts.Scheme,\n\t}\n\tapi := &vaulter.VaultAPI{}\n\tif err = vaulter.InitAPI(api, cfg, cfg.ParentToken); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Vaulter{\n\t\tapi: api,\n\t}, nil\n}",
  "func ParseText(content []byte) []interface{} {\n  jsonObject := []interface{}{}\n  if err := json.Unmarshal(content, &jsonObject); err != nil {\n    panic(err)\n  }\n  return parse(jsonObject)\n}",
  "func UnmarshalVault(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(Vault)\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_by\", &obj.CreatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_by\", &obj.UpdatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"href\", &obj.Href)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
  "func (s *Safe) Vault(hash crypto.Hash, key string) (*safe.Vault, error) {\n\tres := safe.EmptyVault(hash)\n\terr := s.db.View(func(tx *bolt.Tx) error {\n\t\tbuf := tx.Bucket([]byte(VaultTable)).Get([]byte(key))\n\t\tif len(buf) == 0 {\n\t\t\treturn safe.ErrNotFound\n\t\t}\n\t\treturn json.Unmarshal(buf, &res)\n\t})\n\treturn res, err\n}",
  "func (h *handler) Load(state []byte) (core.Template, error) {\n\tdec := scale.NewDecoder(bytes.NewBuffer(state))\n\tvault := &Vault{}\n\tif _, err := vault.DecodeScale(dec); err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: %w\", core.ErrInternal, err)\n\t}\n\treturn vault, nil\n}",
  "func New(cfg interface{}, cleanup bool) error {\n\t// load from vault\n\tvault, err := api.NewClient(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cleanup {\n\t\t// cleanup the env\n\t\terr = cleanEnv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = loadToken(vault)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = InitFromVault(cfg, vault)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
  "func (a *Block) UnmarshalText(text []byte) error {\n\ts := string(text)\n\tif len(s) == 0 { // this is no error condition\n\t\t*a = BlockZero\n\t\treturn nil\n\t}\n\n\tx, err := blockFromString(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*a = x\n\treturn nil\n}",
  "func (ri *Remittance) Parse(record string) error {\n\tif utf8.RuneCountInString(record) < 6 {\n\t\treturn NewTagMinLengthErr(6, len(record))\n\t}\n\n\tri.tag = record[:6]\n\tlength := 6\n\n\tvalue, read, err := ri.parseVariableStringField(record[length:], 5)\n\tif err != nil {\n\t\treturn fieldError(\"SwiftFieldTag\", err)\n\t}\n\tri.CoverPayment.SwiftFieldTag = value\n\tlength += read\n\n\tvalue, read, err = ri.parseVariableStringField(record[length:], 35)\n\tif err != nil {\n\t\treturn fieldError(\"SwiftLineOne\", err)\n\t}\n\tri.CoverPayment.SwiftLineOne = value\n\tlength += read\n\n\tvalue, read, err = ri.parseVariableStringField(record[length:], 35)\n\tif err != nil {\n\t\treturn fieldError(\"SwiftLineTwo\", err)\n\t}\n\tri.CoverPayment.SwiftLineTwo = value\n\tlength += read\n\n\tvalue, read, err = ri.parseVariableStringField(record[length:], 35)\n\tif err != nil {\n\t\treturn fieldError(\"SwiftLineThree\", err)\n\t}\n\tri.CoverPayment.SwiftLineThree = value\n\tlength += read\n\n\tvalue, read, err = ri.parseVariableStringField(record[length:], 35)\n\tif err != nil {\n\t\treturn fieldError(\"SwiftLineFour\", err)\n\t}\n\tri.CoverPayment.SwiftLineFour = value\n\tlength += read\n\n\tif err := ri.verifyDataWithReadLength(record, length); err != nil {\n\t\treturn NewTagMaxLengthErr(err)\n\t}\n\n\treturn nil\n}",
  "func (rft *RemittanceFreeText) Parse(record string) error {\n\tif utf8.RuneCountInString(record) < 6 {\n\t\treturn NewTagMinLengthErr(6, len(record))\n\t}\n\n\trft.tag = record[:6]\n\tlength := 6\n\n\tvalue, read, err := rft.parseVariableStringField(record[length:], 140)\n\tif err != nil {\n\t\treturn fieldError(\"LineOne\", err)\n\t}\n\trft.LineOne = value\n\tlength += read\n\n\tvalue, read, err = rft.parseVariableStringField(record[length:], 140)\n\tif err != nil {\n\t\treturn fieldError(\"LineTwo\", err)\n\t}\n\trft.LineTwo = value\n\tlength += read\n\n\tvalue, read, err = rft.parseVariableStringField(record[length:], 140)\n\tif err != nil {\n\t\treturn fieldError(\"LineThree\", err)\n\t}\n\trft.LineThree = value\n\tlength += read\n\n\tif err := rft.verifyDataWithReadLength(record, length); err != nil {\n\t\treturn NewTagMaxLengthErr(err)\n\t}\n\n\treturn nil\n}",
  "func (h *handler) New(args any) (core.Template, error) {\n\tspawn := args.(*SpawnArguments)\n\tif spawn.InitialUnlockAmount > spawn.TotalAmount {\n\t\treturn nil, fmt.Errorf(\"initial %d should be less or equal to total %d\", spawn.InitialUnlockAmount, spawn.TotalAmount)\n\t}\n\tif spawn.VestingEnd.Before(spawn.VestingStart) {\n\t\treturn nil, fmt.Errorf(\"vesting end %s should be atleast equal to start %s\",\n\t\t\tspawn.VestingEnd, spawn.VestingStart)\n\t}\n\treturn &Vault{\n\t\tOwner:               spawn.Owner,\n\t\tTotalAmount:         spawn.TotalAmount,\n\t\tInitialUnlockAmount: spawn.InitialUnlockAmount,\n\t\tVestingStart:        spawn.VestingStart,\n\t\tVestingEnd:          spawn.VestingEnd,\n\t}, nil\n}",
  "func Parse(str string) (*Version, error) {\n\tif !versionRE.MatchString(str) {\n\t\treturn nil, fmt.Errorf(\"invalid version string '%s'\", str)\n\t}\n\n\tvar v Version\n\tr := strings.NewReader(str)\n\t// Read the major.minor.patch part.\n\t_, err := fmt.Fscanf(r, \"v%d.%d.%d\", &v.major, &v.minor, &v.patch)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"invalid version '%s' passed the regex: %s\", str, err))\n\t}\n\tremaining := str[len(str)-r.Len():]\n\t// Read the pre-release, if present.\n\tif len(remaining) > 0 && remaining[0] == '-' {\n\t\tp := strings.IndexRune(remaining, '+')\n\t\tif p == -1 {\n\t\t\tp = len(remaining)\n\t\t}\n\t\tv.preRelease = remaining[1:p]\n\t\tremaining = remaining[p:]\n\t}\n\t// Read the metadata, if present.\n\tif len(remaining) > 0 {\n\t\tif remaining[0] != '+' {\n\t\t\tpanic(fmt.Sprintf(\"invalid version '%s' passed the regex\", str))\n\t\t}\n\t\tv.metadata = remaining[1:]\n\t}\n\treturn &v, nil\n}",
  "func (klv *KLV) Parse(bytes []byte) error {\n\t// Check length\n\tif len(bytes) != 8 {\n\t\treturn errors.New(\"KLV: Invalid packet length\")\n\t}\n\n\t// Four CC\n\tklv.FourCC = bytes[0:4]\n\tfor _, c := range klv.FourCC {\n\t\tif c < 0x41 || c > 0x5A {\n\t\t\treturn errors.New(\"KLV: Invalid Four CC Character\")\n\t\t}\n\t}\n\n\t// Format\n\tklv.Format = bytes[4]\n\thasFormat := false\n\tfor _, f := range formats {\n\t\thasFormat = hasFormat || (f == klv.Format)\n\t}\n\tif !hasFormat {\n\t\treturn errors.New(\"KLV: Invalid Format Character\")\n\t}\n\n\t// Size & Count\n\tklv.Size = bytes[5]\n\tklv.Count = binary.BigEndian.Uint16(bytes[6:8])\n\n\t// No error\n\treturn nil\n}",
  "func Parse(rawtemplate string) (template *Template, err error) {\n\ttemplate = new(Template)\n\ttemplate.raw = rawtemplate\n\tsplit := strings.Split(rawtemplate, \"{\")\n\ttemplate.parts = make([]templatePart, len(split)*2-1)\n\tfor i, s := range split {\n\t\tif i == 0 {\n\t\t\tif strings.Contains(s, \"}\") {\n\t\t\t\terr = errors.New(\"unexpected }\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemplate.parts[i].raw = s\n\t\t} else {\n\t\t\tsubsplit := strings.Split(s, \"}\")\n\t\t\tif len(subsplit) != 2 {\n\t\t\t\terr = errors.New(\"malformed template\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texpression := subsplit[0]\n\t\t\ttemplate.parts[i*2-1], err = parseExpression(expression)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemplate.parts[i*2].raw = subsplit[1]\n\t\t}\n\t}\n\tif err != nil {\n\t\ttemplate = nil\n\t}\n\treturn template, err\n}",
  "func Parse(s string) (SandID, error) {\n\tsID := SandID{}\n\treturn sID, sID.UnmarshalText([]byte(s))\n}",
  "func Parse(name, text string) (*Parser, error) {\n\tp := NewParser(name)\n\terr := p.Parse(text)\n\tif err != nil {\n\t\tp = nil\n\t}\n\treturn p, err\n}",
  "func New(config *api.Config, mountpoint string, root string, token string, authMethod string, authUser string, authSecret string) (*VaultFS, error) {\n\tclient, err := api.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Prompt for a password if none is specified.\n\tif authMethod == \"ldap\" {\n\t\tif authSecret == \"\" {\n\t\t\tpasswordQuery := &survey.Password{\n\t\t\t\tMessage: \"Enter Password (will be hidden):\",\n\t\t\t}\n\t\t\tif err := survey.AskOne(passwordQuery, &authSecret ,nil) ; err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// preAuthBackend is used to authenticate\n\tpreAuthBackend := vaultapi.NewVaultLogicalBackend(client, token, authMethod, authUser, authSecret)\n\n\tif err := preAuthBackend.Auth(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &VaultFS{\n\t\tlogical:    preAuthBackend,\n\t\troot:       root,\n\t\tmountpoint: mountpoint,\n\t\tlogger:     log.WithField(\"address\", config.Address),\n\t}, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	EncryptText does not encrypt, just sends the text back as plaintext 
 | 
	func (v nullVault) EncryptText(text string) (string, error) {
	return text, nil
} 
 | 
	[
  "func EncryptText(text, key []byte) ([]byte, error) {\n\tc, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgcm, err := cipher.NewGCM(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnonce := make([]byte, gcm.NonceSize())\n\t_, err = io.ReadFull(cryptoRand.Reader, nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn gcm.Seal(nonce, nonce, text, nil), nil\n}",
  "func EncryptText(c context.Context, api KMSEncryptAPI, input *kms.EncryptInput) (*kms.EncryptOutput, error) {\n\treturn api.Encrypt(c, input)\n}",
  "func (v envVault) EncryptText(text string) (string, error) {\n\treturn EncryptText(text, v.passPhrase)\n}",
  "func Text(secret, plainText string) (*Msg, error) {\n\tsalt, err := Salt()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, h := Key(secret, salt)\n\tcipherText, err := text.Encrypt([]byte(plainText), key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &Msg{v: cipherText, s: salt, h: h}\n\tm.encode(true)\n\treturn m, nil\n}",
  "func (v passPhraseVault) EncryptText(text string) (string, error) {\n\treturn EncryptText(text, v.passPhrase)\n}",
  "func encrypt(key, text []byte) ([]byte, error) {\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := base64.StdEncoding.EncodeToString(text)\n\tciphertext := make([]byte, aes.BlockSize+len(b))\n\tiv := ciphertext[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn nil, err\n\t}\n\tcfb := cipher.NewCFBEncrypter(block, iv)\n\tcfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(b))\n\treturn ciphertext, nil\n}",
  "func (kd *Keydoor) Encrypt(plainText string) (encryptBytes []byte, err error) {\n\tencryptBytes = make([]byte, aes.BlockSize+len(plainText))\n\tiv := encryptBytes[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn encryptBytes, err\n\t}\n\tstream := cipher.NewCTR(*kd.block, iv)\n\tstream.XORKeyStream(encryptBytes[aes.BlockSize:], []byte(plainText))\n\n\treturn encryptBytes, nil\n}",
  "func (e Encrypter) Encrypt(message cipher.PlainContent) (cipher.EncryptedContent, error) {\n\treturn bytesEncode(cipher.EncryptedContent(message)), nil\n}",
  "func (k *keeper) Encrypt(ctx context.Context, plaintext []byte) ([]byte, error) {\n\tsecret, err := k.client.Logical().Write(\n\t\tpath.Join(\"transit/encrypt\", k.keyID),\n\t\tmap[string]interface{}{\n\t\t\t\"plaintext\": plaintext,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(secret.Data[\"ciphertext\"].(string)), nil\n}",
  "func encrypt(aesKey []byte, plainText []byte, associatedData []byte) ([]byte, error) {\n\tcipher, err := subtle.NewAESGCM(aesKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initialize cipher: %v\", err)\n\t}\n\treturn cipher.Encrypt(plainText, associatedData)\n}",
  "func (ae *AES) Encrypt(encryptText []byte, src []byte) []byte {\n\tae.mutex.Lock()\n\tdefer ae.mutex.Unlock()\n\tdstlen := ae.EncryptSize(len(src))\n\tif len(encryptText) < dstlen {\n\t\tencryptText = make([]byte, 0, dstlen)\n\t}\n\tencryptText = ae.BlockPadding(encryptText, src)\n\tae.blockEncrypt.CryptBlocks(encryptText, encryptText)\n\treturn encryptText\n}",
  "func enc(w string, id int, index int) {\n\tc1, c2 := userEncryption(w)\n\n\tCT := preServerEnc(c1, c2)\n\tX, Y := getOnChainCipher(CT)\n\t// X.Neg(X) \t\t\t\t\t\t\t\t// For bn256 in golang, X should not be the negative of original X\n\tvar indexItem CipherText\n\tindexItem.X = X\n\tindexItem.Y = Y\n\tindexItem.id = id\n\tcipher[index] = indexItem\n}",
  "func (e encryptionScheme) encrypt(plaintext string) (string, error) {\n\tciphertext, err := e.encryptor.Encrypt(e.hashedKey, []byte(plaintext))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.StdEncoding.EncodeToString(ciphertext), nil\n}",
  "func Encrypt(pubKey *PublicKey, plainText []byte) ([]byte, error) {\n\tc, _, err := EncryptAndNonce(pubKey, plainText)\n\treturn c, err\n}",
  "func TestEncryptionEncryptByPassPhrase(t *testing.T) {\n\tpassPhrase := \"123\"\n\tplaintext := []byte{1, 2, 3, 4}\n\n\tciphertextStr, err := encryptByPassPhrase(passPhrase, plaintext)\n\tfmt.Println(\"ciphertextStr : \", ciphertextStr)\n\n\tassert.Equal(t, nil, err)\n\tassert.Greater(t, len(ciphertextStr), 0)\n\n\tplaintext2, err := decryptByPassPhrase(passPhrase, ciphertextStr)\n\tassert.Equal(t, plaintext, plaintext2)\n}",
  "func Encrypt(hash crypto.Hash, publicKeyPem, plainText string, keyParser key_parser.RsaPublicKeyParser,\n\tplainTextEncoder, cipherTextEncoder text_encoder.Encoder) (cipherText string, err error) {\n\tplainTextByte, err := plainTextEncoder.Decode(plainText)\n\tif err != nil {\n\t\terr = errors.New(ErrDecodePlainText + err.Error())\n\t\treturn\n\t}\n\tpublicKey, err := keyParser.ParsePublicKeyFromPemStr(publicKeyPem)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trng := rand.Reader\n\tcipherTextBytes, err := rsa.EncryptOAEP(hash.New(), rng, publicKey, plainTextByte, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcipherText = cipherTextEncoder.Encode(cipherTextBytes)\n\treturn\n}",
  "func (c AESCBC) Encrypt(plainText []byte) []byte {\n\t// CBC mode always works in whole blocks.\n\tplainText = PadToMultipleNBytes(plainText, len(c.key))\n\tencryptedText := make([]byte, len(plainText))\n\n\tmode := cipher.NewCBCEncrypter(c.block, c.iv)\n\n\tfor i := 0; i < len(plainText)/aes.BlockSize; i++ {\n\t\t// CryptBlocks can work in-place if the two arguments are the same.\n\t\tmode.CryptBlocks(encryptedText[i*aes.BlockSize:(i+1)*aes.BlockSize], plainText[i*aes.BlockSize:(i+1)*aes.BlockSize])\n\t}\n\treturn encryptedText\n}",
  "func Encriptar(text string) string {\n\thash, err := bcrypt.GenerateFromPassword([]byte(text), bcrypt.DefaultCost)\n\tif err != nil {\n\t\tfmt.Println(\"no se pudo encriptar el text\")\n\t\treturn text\n\t}\n\treturn string(hash)\n}",
  "func EncryptHandler(text string) error {\n\tenc, err := encrypt.Encrypt(text)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"encrypt text\", zap.String(\"encrypted\", string(enc)))\n\treturn nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	EncryptText encrypts text using a passphrase given a specific the env variable 
 | 
	func (v envVault) EncryptText(text string) (string, error) {
	return EncryptText(text, v.passPhrase)
} 
 | 
	[
  "func (v passPhraseVault) EncryptText(text string) (string, error) {\n\treturn EncryptText(text, v.passPhrase)\n}",
  "func (v nullVault) EncryptText(text string) (string, error) {\n\treturn text, nil\n}",
  "func EncryptText(text, key []byte) ([]byte, error) {\n\tc, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgcm, err := cipher.NewGCM(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnonce := make([]byte, gcm.NonceSize())\n\t_, err = io.ReadFull(cryptoRand.Reader, nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn gcm.Seal(nonce, nonce, text, nil), nil\n}",
  "func EncryptText(c context.Context, api KMSEncryptAPI, input *kms.EncryptInput) (*kms.EncryptOutput, error) {\n\treturn api.Encrypt(c, input)\n}",
  "func TestEncryptionEncryptByPassPhrase(t *testing.T) {\n\tpassPhrase := \"123\"\n\tplaintext := []byte{1, 2, 3, 4}\n\n\tciphertextStr, err := encryptByPassPhrase(passPhrase, plaintext)\n\tfmt.Println(\"ciphertextStr : \", ciphertextStr)\n\n\tassert.Equal(t, nil, err)\n\tassert.Greater(t, len(ciphertextStr), 0)\n\n\tplaintext2, err := decryptByPassPhrase(passPhrase, ciphertextStr)\n\tassert.Equal(t, plaintext, plaintext2)\n}",
  "func Encriptar(text string) string {\n\thash, err := bcrypt.GenerateFromPassword([]byte(text), bcrypt.DefaultCost)\n\tif err != nil {\n\t\tfmt.Println(\"no se pudo encriptar el text\")\n\t\treturn text\n\t}\n\treturn string(hash)\n}",
  "func encrypt(key, text []byte) ([]byte, error) {\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := base64.StdEncoding.EncodeToString(text)\n\tciphertext := make([]byte, aes.BlockSize+len(b))\n\tiv := ciphertext[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn nil, err\n\t}\n\tcfb := cipher.NewCFBEncrypter(block, iv)\n\tcfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(b))\n\treturn ciphertext, nil\n}",
  "func (w *KeystoreWallet) SignTextWithPassphrase(passphrase string, text []byte) ([]byte, error) {\n\t// Account seems valid, request the keystore to sign\n\treturn w.Keystore.SignHashWithPassphrase(w.Account, passphrase, accounts.TextHash(text))\n}",
  "func encrypt(aesKey []byte, plainText []byte, associatedData []byte) ([]byte, error) {\n\tcipher, err := subtle.NewAESGCM(aesKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initialize cipher: %v\", err)\n\t}\n\treturn cipher.Encrypt(plainText, associatedData)\n}",
  "func Encrypt(str string) (string, error) {\n\tblock, err := aes.NewCipher([]byte(os.Getenv(\"ENC_KEY\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//IV needs to be unique, but doesn't have to be secure.\n\t//It's common to put it at the beginning of the ciphertext.\n\tcipherText := make([]byte, aes.BlockSize+len(str))\n\tiv := cipherText[:aes.BlockSize]\n\tif _, err = io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstream := cipher.NewCFBEncrypter(block, iv)\n\tstream.XORKeyStream(cipherText[aes.BlockSize:], []byte(str))\n\n\t//returns to base64 encoded string\n\tencmess := base64.URLEncoding.EncodeToString(cipherText)\n\treturn encmess, nil\n}",
  "func SecretPhrase(w http.ResponseWriter, r *http.Request) {\n        unknown := \"Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg\\n\" +\n                \"aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq\\n\" +\n                \"dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg\\n\" +\n                \"YnkK\"\n\n\tgetVars, err := url.ParseQuery(r.URL.RawQuery)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdata := getVars.Get(\"input\")\n\tfmt.Println(\"Input:\", data)\n        decoded, _ := base64.StdEncoding.DecodeString(unknown)\n        amended := append([]byte(data), decoded...)\n        padded := AddPadding(amended, keysize)\n\n        w.Write(ECBEncrypt(key, padded))\n}",
  "func Encrypt(pt string) string {\n\tvar (\n\t\tround_keys = make([]string, 16)\n\t)\n\tgenerate_keys(key, &round_keys)\n\tfmt.Printf(\"before encrtypting - %v\\n\", pt)\n\tct := DES(pt, round_keys)\n\tfmt.Printf(\"after  encrtypting - %v\\n\", ct)\n\treturn ct\n}",
  "func (k *keeper) Encrypt(ctx context.Context, plaintext []byte) ([]byte, error) {\n\tsecret, err := k.client.Logical().Write(\n\t\tpath.Join(\"transit/encrypt\", k.keyID),\n\t\tmap[string]interface{}{\n\t\t\t\"plaintext\": plaintext,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(secret.Data[\"ciphertext\"].(string)), nil\n}",
  "func (v envVault) DecryptText(text string) (string, error) {\n\treturn DecryptText(text, v.passPhrase)\n}",
  "func (o *CTROracle) Encrypt(plaintext string) []byte {\n\ttemp := strings.Replace(plaintext, \"=\", \"'='\", -1)\n\tsanitized := strings.Replace(temp, \";\", \"';'\", -1)\n\ttoEncrypt := \"comment1=cooking%20MCs;userdata=\" + sanitized + \";comment2=%20like%20a%20pound%20of%20bacon\"\n\n\tctr := stream.NewCTR(o.Key, o.Nonce)\n\treturn ctr.Encrypt([]byte(toEncrypt))\n}",
  "func Encrypt(passphrase string, plaintext []byte) (string, error) {\n\tnow := time.Now()\n\tkey, salt, err := deriveKey(passphrase, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tutils.LogDebug(fmt.Sprintf(\"PBKDF2 key derivation took %d ms\", time.Now().Sub(now).Milliseconds()))\n\n\tiv := make([]byte, 12)\n\t// http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf\n\t// Section 8.2\n\t_, err = rand.Read(iv)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tb, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taesgcm, err := cipher.NewGCM(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata := aesgcm.Seal(nil, iv, plaintext, nil)\n\treturn hex.EncodeToString(salt) + \"-\" + hex.EncodeToString(iv) + \"-\" + hex.EncodeToString(data), nil\n}",
  "func Encrypt(key string, c config.Config) error {\n\tthisVault, err := vault.New(c.VaultPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclr, err := cleartextFromCurses()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvs, err := clr.Encrypt(c.KMSClient, c.KMSKeyARN)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn thisVault.Add(key, vs)\n}",
  "func (i *GPG) Encrypt(\n\tkeys []string,\n\tb []byte,\n) ([]byte, error) {\n\treturn i.cli.Encrypt(i.ctx, b, keys)\n}",
  "func AESEncrypt(key, text []byte) ([]byte, error) {\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := base64.StdEncoding.EncodeToString(text)\n\tciphertext := make([]byte, aes.BlockSize+len(b))\n\tiv := ciphertext[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn nil, err\n\t}\n\tcfb := cipher.NewCFBEncrypter(block, iv)\n\tcfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(b))\n\treturn ciphertext, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	DecryptText decrypts text using a passphrase given a specific the env variable 
 | 
	func (v envVault) DecryptText(text string) (string, error) {
	return DecryptText(text, v.passPhrase)
} 
 | 
	[
  "func (v passPhraseVault) DecryptText(text string) (string, error) {\n\treturn DecryptText(text, v.passPhrase)\n}",
  "func DecryptText(secret string, m *Msg) (string, error) {\n\terr := m.decode(true)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tkey, hash := Key(secret, m.s)\n\tif !hmac.Equal(hash, m.h) {\n\t\treturn \"\", ErrSecret\n\t}\n\tplainText, err := text.Decrypt(m.v, key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(plainText), nil\n}",
  "func Decrypt(str string) (string, error) {\n\tcipherText, err := base64.URLEncoding.DecodeString(str)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tblock, err := aes.NewCipher([]byte(os.Getenv(\"ENC_KEY\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(cipherText) < aes.BlockSize {\n\t\terr = errors.New(\"Ciphertext block size is too short!\")\n\t\treturn \"\", err\n\t}\n\n\t//IV needs to be unique, but doesn't have to be secure.\n\t//It's common to put it at the beginning of the ciphertext.\n\tiv := cipherText[:aes.BlockSize]\n\tcipherText = cipherText[aes.BlockSize:]\n\n\tstream := cipher.NewCFBDecrypter(block, iv)\n\t// XORKeyStream can work in-place if the two arguments are the same.\n\tstream.XORKeyStream(cipherText, cipherText)\n\n\treturn string(cipherText), nil\n}",
  "func runDecrypt(key, data string) error {\n\tbuf := &bytes.Buffer{}\n\tkeyDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(key))\n\tif _, err := io.CopyN(buf, keyDecoder, 32); err != nil {\n\t\treturn fmt.Errorf(\"failed to decode key: %w\", err)\n\t}\n\n\taesCipher, err := aes.NewCipher(buf.Bytes())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create AES cipher: %w\", err)\n\t}\n\n\tbuf = &bytes.Buffer{}\n\tdataDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(data))\n\tif _, err := io.CopyN(buf, dataDecoder, aes.BlockSize); err != nil {\n\t\treturn fmt.Errorf(\"failed to decode IV: %w\", err)\n\t}\n\n\tenvSet := make(map[string]string)\n\treader := cipher.StreamReader{S: cipher.NewOFB(aesCipher, buf.Bytes()), R: dataDecoder}\n\tif err := json.NewDecoder(reader).Decode(&envSet); err != nil {\n\t\treturn fmt.Errorf(\"failed to decode JSON data: %w\", err)\n\t}\n\n\tfor name, value := range envSet {\n\t\tfmt.Printf(\"%s=%q\\n\", name, value)\n\t}\n\n\treturn nil\n}",
  "func (q MockService) Decrypt(encKey string, envVal string) (result string, err error) {\n\tresult = \"Q_Qesb1Z2hA7H94iXu3_buJeQ7416\"\n\terr = nil\n\n\treturn result, err\n}",
  "func Decrypt(encrypted string) (string, error) {\n\tdata, err := base64.StdEncoding.DecodeString(encrypted)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsecret := os.Getenv(\"TEAM254_SECRET\")\n\tif secret == \"\" {\n\t\treturn \"\", errors.New(\"TEAM254_SECRET environment variable not set.\")\n\t}\n\tsecretDigest := sha256.Sum256([]byte(secret))\n\tblock, err := aes.NewCipher(secretDigest[:])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tiv := make([]byte, aes.BlockSize)\n\tmode := cipher.NewCBCDecrypter(block, iv)\n\tmode.CryptBlocks(data, data)\n\t// Remove any PKCS#7 padding.\n\tpaddingSize := int(data[len(data)-1])\n\treturn string(data[:len(data)-paddingSize]), nil\n}",
  "func (v envVault) EncryptText(text string) (string, error) {\n\treturn EncryptText(text, v.passPhrase)\n}",
  "func Decrypt(input, passphrase string) (output []byte, decryptError error) {\n\n\tif len(passphrase) < 1 {\n\t\tdecryptError = &InvalidPassphraseError{\"Passphrase length must be greater than zero\"}\n\t\treturn\n\t}\n\n\titems := strings.Split(input, \"#\")\n\tif len(items) != 3 {\n\t\tdecryptError = &InvalidEncryptedDataError{\"Encrypted data must contain salt, iv and data\"}\n\t\treturn\n\t}\n\n\tsalt, err := base64.StdEncoding.DecodeString(items[0])\n\tif err != nil || len(salt) != SALT_LENGTH {\n\t\tdecryptError = &InvalidSaltError{\"Could not derive salt from input\"}\n\t\treturn\n\t}\n\n\tinputBytes, err := base64.StdEncoding.DecodeString(items[2])\n\tif err != nil {\n\t\tdecryptError = &InvalidEncryptedDataError{\"Could not derive cipher text from input\"}\n\t\treturn\n\t}\n\n\t// CBC mode always works in whole blocks.\n\tif len(inputBytes)%aes.BlockSize != 0 {\n\t\tdecryptError = &InvalidEncryptedDataError{\"Cipher text is not a multiple of AES block size\"}\n\t\treturn\n\t}\n\n\tkey := pbkdf2.Key([]byte(passphrase), salt, ITERATION_COUNT, KEY_LENGTH, sha1.New)\n\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tdecryptError = &InvalidAESKeyError{err.Error()}\n\t\treturn\n\t}\n\n\t// Note - IV length is always 16 bytes for AES, regardless of key size\n\tiv, err := base64.StdEncoding.DecodeString(items[1])\n\tif err != nil || len(iv) != block.BlockSize() {\n\t\tdecryptError = &InvalidIVError{\"Could not derive IV from input\"}\n\t\treturn\n\t}\n\n\tcbc := cipher.NewCBCDecrypter(block, iv)\n\tcipherText := make([]byte, len(inputBytes))\n\tcopy(cipherText, inputBytes)\n\tcbc.CryptBlocks(cipherText, cipherText)\n\n\toutput, err = pkcs7.UnpadPKCS7(cipherText, block.BlockSize())\n\tif err != nil {\n\t\tdecryptError = &IncorrectPassphraseError{}\n\t\treturn\n\t}\n\n\treturn\n\n}",
  "func Decrypt(password []byte, decryptionKey string, ttl int) (string, error) {\n\tk, err := fernet.DecodeKeys(decryptionKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmessage := fernet.VerifyAndDecrypt(password, time.Duration(ttl)*time.Second, k)\n\treturn string(message), err\n}",
  "func PwDecrypt(encrypted, byteSecret []byte) (string, error) {\n\n\tvar secretKey [32]byte\n\tcopy(secretKey[:], byteSecret)\n\n\tvar decryptNonce [24]byte\n\tcopy(decryptNonce[:], encrypted[:24])\n\tdecrypted, ok := secretbox.Open(nil, encrypted[24:], &decryptNonce, &secretKey)\n\tif !ok {\n\t\treturn \"\", errors.New(\"PwDecrypt(secretbox.Open)\")\n\t}\n\n\treturn string(decrypted), nil\n}",
  "func Decrypt(c []byte, q Poracle, l *log.Logger) (string, error) {\n\tn := len(c) / CipherBlockLen\n\tif n < 2 {\n\t\treturn \"\", ErrInvalidCiphertext\n\t}\n\tif len(c)%CipherBlockLen != 0 {\n\t\treturn \"\", ErrInvalidCiphertext\n\t}\n\t// The clear text have the same length as the cyphertext - 1\n\t// (the IV).\n\tvar m []byte\n\tfor i := 1; i < n; i++ {\n\t\tc0 := c[(i-1)*CipherBlockLen : CipherBlockLen*(i-1)+CipherBlockLen]\n\t\tc1 := c[CipherBlockLen*i : (CipherBlockLen*i)+CipherBlockLen]\n\t\tl.Printf(\"\\ndecripting block %d of %d\", i, n)\n\t\tmi, err := decryptBlock(c0, c1, q, l)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tm = append(m, mi...)\n\t}\n\treturn string(m), nil\n}",
  "func decrypt(aesKey []byte, cipherText []byte, associatedData []byte) ([]byte, error) {\n\tcipher, err := subtle.NewAESGCM(aesKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initialize cipher: %v\", err)\n\t}\n\treturn cipher.Decrypt(cipherText, associatedData)\n}",
  "func Decrypt(cipherText string, privateKey *rsa.PrivateKey) ([]byte, error) {\n\tbytes, err := rsa.DecryptPKCS1v15(rand.Reader, privateKey, []byte(cipherText))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bytes, nil\n}",
  "func DecryptText(ciphertext, key []byte) ([]byte, error) {\n\tc, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgcm, err := cipher.NewGCM(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnonceSize := gcm.NonceSize()\n\tif len(ciphertext) < nonceSize {\n\t\treturn nil, errors.New(\"Invalid ciphertext\")\n\t}\n\tnonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:]\n\tplaintext, err := gcm.Open(nil, nonce, ciphertext, nil)\n\n\treturn plaintext, err\n}",
  "func (v *Vault) Decipher(value string) string {\n\ts5Engine := s5Vault.Client{\n\t\tClient: v.Client,\n\t\tConfig: &s5Vault.Config{\n\t\t\tKey: s.Vault.TransitKey,\n\t\t},\n\t}\n\n\tparsedInput, err := s5.ParseInput(value)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdecipheredValue, err := s5Engine.Decipher(parsedInput)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn decipheredValue\n}",
  "func doDecrypt(ctx context.Context, data string, subscriptionID string, providerVaultName string, providerKeyName string, providerKeyVersion string) ([]byte, error) {\n\tkvClient, vaultBaseUrl, keyName, keyVersion, err := getKey(subscriptionID, providerVaultName, providerKeyName, providerKeyVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparameter := kv.KeyOperationsParameters {\n\t\tAlgorithm: kv.RSA15,\n\t\tValue: &data,\n\t}\n\t\n\tresult, err := kvClient.Decrypt(vaultBaseUrl, keyName, keyVersion, parameter)\n\tif err != nil {\n\t\tfmt.Print(\"failed to decrypt, error: \", err)\n\t\treturn nil, err\n\t}\n\tbytes, err := base64.RawURLEncoding.DecodeString(*result.Result)\n\treturn bytes, nil\n}",
  "func decrypt(c *ishell.Context) {\n\tps.Decrypt = !ps.Decrypt\n\tshell.Println(\"Decrypt is\", ps.Decrypt)\n}",
  "func Decrypt(key []byte) []byte {\n\tdecryptedbin, err := dpapi.DecryptBytes(key)\n\terror_log.Check(err, \"Unprotect String with DPAPI\", \"decrypter\")\n\treturn decryptedbin\n}",
  "func Decrypt(hash crypto.Hash, privateKeyPem, cipherText string, keyParser key_parser.RsaPrivateKeyParser,\n\tcipherTextEncoder, plainTextEncoder text_encoder.Encoder) (plainText string, err error) {\n\tcipherTextBytes, err := cipherTextEncoder.Decode(cipherText)\n\tif err != nil {\n\t\terr = errors.New(ErrDecodeCipherText + err.Error())\n\t\treturn\n\t}\n\tprivateKey, err := keyParser.ParsePrivateKeyFromPemStr(privateKeyPem)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trng := rand.Reader\n\tplainTextBytes, err := rsa.DecryptOAEP(hash.New(), rng, privateKey, cipherTextBytes, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tplainText = plainTextEncoder.Encode(plainTextBytes)\n\treturn\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	EncryptText encrypts text using a literal passphrase 
 | 
	func (v passPhraseVault) EncryptText(text string) (string, error) {
	return EncryptText(text, v.passPhrase)
} 
 | 
	[
  "func (v envVault) EncryptText(text string) (string, error) {\n\treturn EncryptText(text, v.passPhrase)\n}",
  "func (v nullVault) EncryptText(text string) (string, error) {\n\treturn text, nil\n}",
  "func EncryptText(text, key []byte) ([]byte, error) {\n\tc, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgcm, err := cipher.NewGCM(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnonce := make([]byte, gcm.NonceSize())\n\t_, err = io.ReadFull(cryptoRand.Reader, nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn gcm.Seal(nonce, nonce, text, nil), nil\n}",
  "func EncryptText(c context.Context, api KMSEncryptAPI, input *kms.EncryptInput) (*kms.EncryptOutput, error) {\n\treturn api.Encrypt(c, input)\n}",
  "func (w *KeystoreWallet) SignTextWithPassphrase(passphrase string, text []byte) ([]byte, error) {\n\t// Account seems valid, request the keystore to sign\n\treturn w.Keystore.SignHashWithPassphrase(w.Account, passphrase, accounts.TextHash(text))\n}",
  "func TestEncryptionEncryptByPassPhrase(t *testing.T) {\n\tpassPhrase := \"123\"\n\tplaintext := []byte{1, 2, 3, 4}\n\n\tciphertextStr, err := encryptByPassPhrase(passPhrase, plaintext)\n\tfmt.Println(\"ciphertextStr : \", ciphertextStr)\n\n\tassert.Equal(t, nil, err)\n\tassert.Greater(t, len(ciphertextStr), 0)\n\n\tplaintext2, err := decryptByPassPhrase(passPhrase, ciphertextStr)\n\tassert.Equal(t, plaintext, plaintext2)\n}",
  "func Text(secret, plainText string) (*Msg, error) {\n\tsalt, err := Salt()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, h := Key(secret, salt)\n\tcipherText, err := text.Encrypt([]byte(plainText), key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &Msg{v: cipherText, s: salt, h: h}\n\tm.encode(true)\n\treturn m, nil\n}",
  "func encrypt(key, text []byte) ([]byte, error) {\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := base64.StdEncoding.EncodeToString(text)\n\tciphertext := make([]byte, aes.BlockSize+len(b))\n\tiv := ciphertext[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn nil, err\n\t}\n\tcfb := cipher.NewCFBEncrypter(block, iv)\n\tcfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(b))\n\treturn ciphertext, nil\n}",
  "func Encriptar(text string) string {\n\thash, err := bcrypt.GenerateFromPassword([]byte(text), bcrypt.DefaultCost)\n\tif err != nil {\n\t\tfmt.Println(\"no se pudo encriptar el text\")\n\t\treturn text\n\t}\n\treturn string(hash)\n}",
  "func (kd *Keydoor) Encrypt(plainText string) (encryptBytes []byte, err error) {\n\tencryptBytes = make([]byte, aes.BlockSize+len(plainText))\n\tiv := encryptBytes[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn encryptBytes, err\n\t}\n\tstream := cipher.NewCTR(*kd.block, iv)\n\tstream.XORKeyStream(encryptBytes[aes.BlockSize:], []byte(plainText))\n\n\treturn encryptBytes, nil\n}",
  "func Encrypt(passphrase string, plaintext []byte) (string, error) {\n\tnow := time.Now()\n\tkey, salt, err := deriveKey(passphrase, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tutils.LogDebug(fmt.Sprintf(\"PBKDF2 key derivation took %d ms\", time.Now().Sub(now).Milliseconds()))\n\n\tiv := make([]byte, 12)\n\t// http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf\n\t// Section 8.2\n\t_, err = rand.Read(iv)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tb, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taesgcm, err := cipher.NewGCM(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata := aesgcm.Seal(nil, iv, plaintext, nil)\n\treturn hex.EncodeToString(salt) + \"-\" + hex.EncodeToString(iv) + \"-\" + hex.EncodeToString(data), nil\n}",
  "func (o *CTROracle) Encrypt(plaintext string) []byte {\n\ttemp := strings.Replace(plaintext, \"=\", \"'='\", -1)\n\tsanitized := strings.Replace(temp, \";\", \"';'\", -1)\n\ttoEncrypt := \"comment1=cooking%20MCs;userdata=\" + sanitized + \";comment2=%20like%20a%20pound%20of%20bacon\"\n\n\tctr := stream.NewCTR(o.Key, o.Nonce)\n\treturn ctr.Encrypt([]byte(toEncrypt))\n}",
  "func encrypt(aesKey []byte, plainText []byte, associatedData []byte) ([]byte, error) {\n\tcipher, err := subtle.NewAESGCM(aesKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initialize cipher: %v\", err)\n\t}\n\treturn cipher.Encrypt(plainText, associatedData)\n}",
  "func EncryptString(plaintext string, key []byte) ([]byte, error) {\n\tblock, e := aes.NewCipher(key)\n\tif e != nil {\n\t\treturn []byte{}, e\n\t}\n\tiv, e := GenerateRandomBytes(aes.BlockSize)\n\n\tmode := cipher.NewCBCEncrypter(block, iv) //, iv []byte) NewGCM(block) //, iv)\n\n\tpt, e := Pkcs7Pad([]byte(plaintext), aes.BlockSize)\n\tct := make([]byte, len(pt))\n\n\tmode.CryptBlocks(ct, pt)\n\n\tct = append(iv, ct...)\n\n\t//authenticate\n\tmac := hmac.New(crypto.SHA256.New, key)\n\tmac.Write(ct)\n\tmmac := mac.Sum(nil)\n\n\tct = append(mmac, ct...)\n\n\treturn ct, nil\n}",
  "func Encrypt(pt string) string {\n\tvar (\n\t\tround_keys = make([]string, 16)\n\t)\n\tgenerate_keys(key, &round_keys)\n\tfmt.Printf(\"before encrtypting - %v\\n\", pt)\n\tct := DES(pt, round_keys)\n\tfmt.Printf(\"after  encrtypting - %v\\n\", ct)\n\treturn ct\n}",
  "func EncryptString(plainText string, key []byte, iv []byte) []byte {\n\tplainTextAsBytes := []byte(plainText)\n\treturn Encrypt(plainTextAsBytes, key, iv)\n}",
  "func (o *OpenSSL) EncryptString(passphrase, plaintextString string) ([]byte, error) {\n\tsalt := make([]byte, 8) // Generate an 8 byte salt\n\t_, err := io.ReadFull(rand.Reader, salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn o.EncryptStringWithSalt(passphrase, salt, plaintextString)\n}",
  "func Encrypt(hash crypto.Hash, publicKeyPem, plainText string, keyParser key_parser.RsaPublicKeyParser,\n\tplainTextEncoder, cipherTextEncoder text_encoder.Encoder) (cipherText string, err error) {\n\tplainTextByte, err := plainTextEncoder.Decode(plainText)\n\tif err != nil {\n\t\terr = errors.New(ErrDecodePlainText + err.Error())\n\t\treturn\n\t}\n\tpublicKey, err := keyParser.ParsePublicKeyFromPemStr(publicKeyPem)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trng := rand.Reader\n\tcipherTextBytes, err := rsa.EncryptOAEP(hash.New(), rng, publicKey, plainTextByte, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcipherText = cipherTextEncoder.Encode(cipherTextBytes)\n\treturn\n}",
  "func encryptPassword(password string) {\n\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	DecryptText decrypts text using a literal passphrase 
 | 
	func (v passPhraseVault) DecryptText(text string) (string, error) {
	return DecryptText(text, v.passPhrase)
} 
 | 
	[
  "func (v envVault) DecryptText(text string) (string, error) {\n\treturn DecryptText(text, v.passPhrase)\n}",
  "func DecryptText(secret string, m *Msg) (string, error) {\n\terr := m.decode(true)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tkey, hash := Key(secret, m.s)\n\tif !hmac.Equal(hash, m.h) {\n\t\treturn \"\", ErrSecret\n\t}\n\tplainText, err := text.Decrypt(m.v, key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(plainText), nil\n}",
  "func DecryptText(ciphertext, key []byte) ([]byte, error) {\n\tc, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgcm, err := cipher.NewGCM(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnonceSize := gcm.NonceSize()\n\tif len(ciphertext) < nonceSize {\n\t\treturn nil, errors.New(\"Invalid ciphertext\")\n\t}\n\tnonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:]\n\tplaintext, err := gcm.Open(nil, nonce, ciphertext, nil)\n\n\treturn plaintext, err\n}",
  "func Decrypt(input, passphrase string) (output []byte, decryptError error) {\n\n\tif len(passphrase) < 1 {\n\t\tdecryptError = &InvalidPassphraseError{\"Passphrase length must be greater than zero\"}\n\t\treturn\n\t}\n\n\titems := strings.Split(input, \"#\")\n\tif len(items) != 3 {\n\t\tdecryptError = &InvalidEncryptedDataError{\"Encrypted data must contain salt, iv and data\"}\n\t\treturn\n\t}\n\n\tsalt, err := base64.StdEncoding.DecodeString(items[0])\n\tif err != nil || len(salt) != SALT_LENGTH {\n\t\tdecryptError = &InvalidSaltError{\"Could not derive salt from input\"}\n\t\treturn\n\t}\n\n\tinputBytes, err := base64.StdEncoding.DecodeString(items[2])\n\tif err != nil {\n\t\tdecryptError = &InvalidEncryptedDataError{\"Could not derive cipher text from input\"}\n\t\treturn\n\t}\n\n\t// CBC mode always works in whole blocks.\n\tif len(inputBytes)%aes.BlockSize != 0 {\n\t\tdecryptError = &InvalidEncryptedDataError{\"Cipher text is not a multiple of AES block size\"}\n\t\treturn\n\t}\n\n\tkey := pbkdf2.Key([]byte(passphrase), salt, ITERATION_COUNT, KEY_LENGTH, sha1.New)\n\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tdecryptError = &InvalidAESKeyError{err.Error()}\n\t\treturn\n\t}\n\n\t// Note - IV length is always 16 bytes for AES, regardless of key size\n\tiv, err := base64.StdEncoding.DecodeString(items[1])\n\tif err != nil || len(iv) != block.BlockSize() {\n\t\tdecryptError = &InvalidIVError{\"Could not derive IV from input\"}\n\t\treturn\n\t}\n\n\tcbc := cipher.NewCBCDecrypter(block, iv)\n\tcipherText := make([]byte, len(inputBytes))\n\tcopy(cipherText, inputBytes)\n\tcbc.CryptBlocks(cipherText, cipherText)\n\n\toutput, err = pkcs7.UnpadPKCS7(cipherText, block.BlockSize())\n\tif err != nil {\n\t\tdecryptError = &IncorrectPassphraseError{}\n\t\treturn\n\t}\n\n\treturn\n\n}",
  "func Decrypt(c []byte, q Poracle, l *log.Logger) (string, error) {\n\tn := len(c) / CipherBlockLen\n\tif n < 2 {\n\t\treturn \"\", ErrInvalidCiphertext\n\t}\n\tif len(c)%CipherBlockLen != 0 {\n\t\treturn \"\", ErrInvalidCiphertext\n\t}\n\t// The clear text have the same length as the cyphertext - 1\n\t// (the IV).\n\tvar m []byte\n\tfor i := 1; i < n; i++ {\n\t\tc0 := c[(i-1)*CipherBlockLen : CipherBlockLen*(i-1)+CipherBlockLen]\n\t\tc1 := c[CipherBlockLen*i : (CipherBlockLen*i)+CipherBlockLen]\n\t\tl.Printf(\"\\ndecripting block %d of %d\", i, n)\n\t\tmi, err := decryptBlock(c0, c1, q, l)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tm = append(m, mi...)\n\t}\n\treturn string(m), nil\n}",
  "func (v envVault) EncryptText(text string) (string, error) {\n\treturn EncryptText(text, v.passPhrase)\n}",
  "func Decrypt(hash crypto.Hash, privateKeyPem, cipherText string, keyParser key_parser.RsaPrivateKeyParser,\n\tcipherTextEncoder, plainTextEncoder text_encoder.Encoder) (plainText string, err error) {\n\tcipherTextBytes, err := cipherTextEncoder.Decode(cipherText)\n\tif err != nil {\n\t\terr = errors.New(ErrDecodeCipherText + err.Error())\n\t\treturn\n\t}\n\tprivateKey, err := keyParser.ParsePrivateKeyFromPemStr(privateKeyPem)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trng := rand.Reader\n\tplainTextBytes, err := rsa.DecryptOAEP(hash.New(), rng, privateKey, cipherTextBytes, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tplainText = plainTextEncoder.Encode(plainTextBytes)\n\treturn\n}",
  "func (v passPhraseVault) EncryptText(text string) (string, error) {\n\treturn EncryptText(text, v.passPhrase)\n}",
  "func Decrypt(str string) (string, error) {\n\tcipherText, err := base64.URLEncoding.DecodeString(str)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tblock, err := aes.NewCipher([]byte(os.Getenv(\"ENC_KEY\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(cipherText) < aes.BlockSize {\n\t\terr = errors.New(\"Ciphertext block size is too short!\")\n\t\treturn \"\", err\n\t}\n\n\t//IV needs to be unique, but doesn't have to be secure.\n\t//It's common to put it at the beginning of the ciphertext.\n\tiv := cipherText[:aes.BlockSize]\n\tcipherText = cipherText[aes.BlockSize:]\n\n\tstream := cipher.NewCFBDecrypter(block, iv)\n\t// XORKeyStream can work in-place if the two arguments are the same.\n\tstream.XORKeyStream(cipherText, cipherText)\n\n\treturn string(cipherText), nil\n}",
  "func decrypt(aesKey []byte, cipherText []byte, associatedData []byte) ([]byte, error) {\n\tcipher, err := subtle.NewAESGCM(aesKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initialize cipher: %v\", err)\n\t}\n\treturn cipher.Decrypt(cipherText, associatedData)\n}",
  "func Decrypt(cipherText string, privateKey *rsa.PrivateKey) ([]byte, error) {\n\tbytes, err := rsa.DecryptPKCS1v15(rand.Reader, privateKey, []byte(cipherText))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bytes, nil\n}",
  "func (v nullVault) EncryptText(text string) (string, error) {\n\treturn text, nil\n}",
  "func Decrypt(password []byte, decryptionKey string, ttl int) (string, error) {\n\tk, err := fernet.DecodeKeys(decryptionKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmessage := fernet.VerifyAndDecrypt(password, time.Duration(ttl)*time.Second, k)\n\treturn string(message), err\n}",
  "func (ae *NonceAES) Decrypt(decryptText []byte, src []byte) ([]byte, error) {\n\tae.mutex.Lock()\n\tdefer ae.mutex.Unlock()\n\tsrclen := len(src) - ae.hdrlen\n\tif srclen%aes.BlockSize != 0 {\n\t\treturn nil, fmt.Errorf(\"NonceAES Decrypt, invalid input length, %d % %d = %d(should be zero, nonce cut off)\", srclen, aes.BlockSize, srclen%aes.BlockSize)\n\t}\n\tif len(decryptText) < srclen {\n\t\tdecryptText = make([]byte, srclen)\n\t}\n\tdecryptText = decryptText[:srclen]\n\t// get nonce, no error handle\n\tbinary.Read(NewBRWC(src[:ae.hdrlen]), binary.BigEndian, &ae.nonce)\n\t// update nonce iv\n\tae.iv, _ = ae.ivnonce(ae.nonce)\n\t//fmt.Printf(\"NonceAES, Decrypt IV(%d): %x\\n\", ae.nonce, ae.iv)\n\tae.blockDecrypt = cipher.NewCBCDecrypter(ae.block, ae.iv)\n\tae.blockDecrypt.CryptBlocks(decryptText, src[ae.hdrlen:])\n\tvar err error\n\tdecryptText, err = ae.BlockUnPadding(decryptText)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decryptText, nil\n}",
  "func PwDecrypt(encrypted, byteSecret []byte) (string, error) {\n\n\tvar secretKey [32]byte\n\tcopy(secretKey[:], byteSecret)\n\n\tvar decryptNonce [24]byte\n\tcopy(decryptNonce[:], encrypted[:24])\n\tdecrypted, ok := secretbox.Open(nil, encrypted[24:], &decryptNonce, &secretKey)\n\tif !ok {\n\t\treturn \"\", errors.New(\"PwDecrypt(secretbox.Open)\")\n\t}\n\n\treturn string(decrypted), nil\n}",
  "func (ae *AES) Decrypt(decryptText []byte, src []byte) ([]byte, error) {\n\tae.mutex.Lock()\n\tdefer ae.mutex.Unlock()\n\tsrclen := len(src)\n\tif srclen%aes.BlockSize != 0 {\n\t\treturn nil, fmt.Errorf(\"AES Decrypt, invalid input length, %d % %d = %d(should be zero)\", srclen, aes.BlockSize, srclen%aes.BlockSize)\n\t}\n\tif len(decryptText) < srclen {\n\t\tdecryptText = make([]byte, srclen)\n\t}\n\tae.blockDecrypt.CryptBlocks(decryptText, src)\n\tvar err error\n\tdecryptText, err = ae.BlockUnPadding(decryptText)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decryptText, nil\n}",
  "func (c AESCBC) Decrypt(cipherText []byte) []byte {\n\t// CBC mode always works in whole blocks.\n\tif len(cipherText)%aes.BlockSize != 0 {\n\t\tpanic(\"ciphertext is not a multiple of the block size\")\n\t}\n\n\tdecryptedText := make([]byte, len(cipherText))\n\n\tmode := cipher.NewCBCDecrypter(c.block, c.iv)\n\n\tfor i := 0; i < len(cipherText)/aes.BlockSize; i++ {\n\t\tmode.CryptBlocks(decryptedText[i*aes.BlockSize:(i+1)*aes.BlockSize], cipherText[i*aes.BlockSize:(i+1)*aes.BlockSize])\n\t}\n\treturn decryptedText\n}",
  "func Decrypt(passphrase string, ciphertext []byte) (string, error) {\n\tarr := strings.Split(string(ciphertext), \"-\")\n\tsalt, err := hex.DecodeString(arr[0])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tiv, err := hex.DecodeString(arr[1])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err := hex.DecodeString(arr[2])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey, _, err := deriveKey(passphrase, salt)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tb, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taesgcm, err := cipher.NewGCM(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err = aesgcm.Open(nil, iv, data, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(data), nil\n}",
  "func (k *keeper) Decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) {\n\tout, err := k.client.Logical().Write(\n\t\tpath.Join(\"transit/decrypt\", k.keyID),\n\t\tmap[string]interface{}{\n\t\t\t\"ciphertext\": string(ciphertext),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn base64.StdEncoding.DecodeString(out.Data[\"plaintext\"].(string))\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	GetChanHistory returns slice of messages 
 | 
	func GetChanHistory(tdlibClient *client.Client, chatID int64, fromMessageID int64, toMessageID int64) (messages []*client.Message) {
	var totalMessages int
	messagesSet := make(map[int]*client.Message)
	totalLimit := 99999999999
	// Read first message (newest) separetely, because messageReading does not return exactly message - fromMessageId
	if fromMessageID != 0 {
		lastMessage, err := tdlibClient.GetMessage(&client.GetMessageRequest{ChatId: chatID, MessageId: fromMessageID})
		checkError(err, "Getting chan history")
		messagesSet[int(lastMessage.Id)] = lastMessage
	}
messageReading:
	for {
		fmt.Println("Retriving messages from ", fromMessageID, "..")
		chanHistory, err := tdlibClient.GetChatHistory(&client.GetChatHistoryRequest{
			ChatId:        chatID,
			Limit:         100,
			OnlyLocal:     false,
			FromMessageId: fromMessageID,
		})
		checkError(err, "Getting chan history")
		if chanHistory.TotalCount == 0 {
			break
		}
		for _, m := range chanHistory.Messages {
			if totalLimit > 0 && totalMessages >= totalLimit {
				break messageReading
			}
			// Read to needed MessageID
			if toMessageID == m.Id {
				break messageReading
			}
			totalMessages++
			// Read next set of messages
			fromMessageID = m.Id
			messagesSet[int(m.Id)] = m
		}
	}
	messagesIDsSorted := make([]int, 0, len(messagesSet))
	for k := range messagesSet {
		messagesIDsSorted = append(messagesIDsSorted, k)
	}
	sort.Ints(messagesIDsSorted)
	for _, i := range messagesIDsSorted {
		messages = append(messages, messagesSet[i])
	}
	return
} 
 | 
	[
  "func History(ctx context.Context, channelName string) ([]*types.Message, error) {\n\tchannel := getChannel(channelName)\n\tmessages := messagesFromHistory(channel.History)\n\n\treturn messages, nil\n}",
  "func (m *ChatMessage) GetMessageHistory()([]ChatMessageHistoryItemable) {\n    val, err := m.GetBackingStore().Get(\"messageHistory\")\n    if err != nil {\n        panic(err)\n    }\n    if val != nil {\n        return val.([]ChatMessageHistoryItemable)\n    }\n    return nil\n}",
  "func (e *TarantoolEngine) history(chID ChannelID) (msgs []Message, err error) {\n\tconn, err := e.pool.get()\n\tif err != nil {\n\t\tlogger.ERROR.Printf(\"history tarantool pool error: %v\\n\", err.Error())\n\t\treturn nil, err\n\t}\n\n\thistory, err := conn.Call(\"notification_channel_history\", []interface{}{chID})\n\tif err != nil {\n\t\tlogger.ERROR.Printf(\"history error: %v\\n\", err.Error())\n\t\treturn nil, err\n\t}\n\n\treturn processHistory(history)\n}",
  "func (c *Client) GetChatHistory(ctx context.Context, request *GetChatHistoryRequest) (*Messages, error) {\n\tvar result Messages\n\n\tif err := c.rpc.Invoke(ctx, request, &result); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}",
  "func (f *ConnSendFunc) History() []ConnSendFuncCall {\n\treturn f.history\n}",
  "func (f *GitserverClientRawContentsFunc) History() []GitserverClientRawContentsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]GitserverClientRawContentsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
  "func (f *BundleManagerClientSendDBFunc) History() []BundleManagerClientSendDBFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]BundleManagerClientSendDBFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
  "func (d *Discord) MessageHistory(channel string) []Message {\n\tc, err := d.Channel(channel)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tmessages := make([]Message, len(c.Messages))\n\tfor i := 0; i < len(c.Messages); i++ {\n\t\tmessages[i] = &DiscordMessage{\n\t\t\tDiscord:          d,\n\t\t\tDiscordgoMessage: c.Messages[i],\n\t\t\tMessageType:      MessageTypeCreate,\n\t\t}\n\t}\n\n\treturn messages\n}",
  "func (f *BundleManagerClientSendUploadFunc) History() []BundleManagerClientSendUploadFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]BundleManagerClientSendUploadFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
  "func messagesFromHistory(history *tailbuf.TailBuf) []*types.Message {\n\tdata := history.Read()\n\tresult := make([]*types.Message, len(data))\n\n\tfor i, value := range data {\n\t\tresult[i] = value.(*types.Message)\n\t}\n\n\treturn result\n}",
  "func (f *WorkerStoreDequeueFunc) History() []WorkerStoreDequeueFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]WorkerStoreDequeueFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
  "func (pub *Pubnub) History(channel string, limit int, start int64, end int64, reverse bool, callbackChannel chan []byte, errorChannel chan []byte) {\n\tcheckCallbackNil(callbackChannel, false, \"History\")\n\tcheckCallbackNil(errorChannel, true, \"History\")\n\n\tpub.executeHistory(channel, limit, start, end, reverse, callbackChannel, errorChannel, 0)\n}",
  "func (c Clipboard) GetHistory() ([]string, error) {\n\titems, err := c.Storage.GetHistory(c.HistorySize)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn items, nil\n}",
  "func (p *RestPresence) History(params *PaginateParams) (*PaginatedResult, error) {\n\tpath := \"/channels/\" + p.channel.uriName + \"/presence/history\"\n\treturn newPaginatedResult(presMsgType, path, params, query(p.client.get), p.logger())\n}",
  "func (f *WorkerStoreHeartbeatFunc) History() []WorkerStoreHeartbeatFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]WorkerStoreHeartbeatFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
  "func (f *WorkerStoreQueuedCountFunc) History() []WorkerStoreQueuedCountFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]WorkerStoreQueuedCountFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
  "func (f *BundleManagerClientGetUploadFunc) History() []BundleManagerClientGetUploadFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]BundleManagerClientGetUploadFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
  "func (s *Client) GetHistory(username string) (*sessions.History, error) {\n\tdata := &sessions.History{\n\t\tInput: []string{},\n\t\tReply: []string{},\n\t}\n\n\tfor i := 0; i < sessions.HistorySize; i++ {\n\t\tdata.Input = append(data.Input, \"undefined\")\n\t\tdata.Reply = append(data.Reply, \"undefined\")\n\t}\n\n\trows, err := s.db.Query(\"SELECT input,reply FROM history WHERE user_id = (SELECT id FROM users WHERE username = ?) ORDER BY timestamp ASC LIMIT 10;\", username)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar input, reply string\n\t\terr := rows.Scan(&input, &reply)\n\t\tif err != nil {\n\t\t\tlog.Println(\"[ERROR]\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdata.Input = data.Input[:len(data.Input)-1]                            // Pop\n\t\tdata.Input = append([]string{strings.TrimSpace(input)}, data.Input...) // Unshift\n\t\tdata.Reply = data.Reply[:len(data.Reply)-1]                            // Pop\n\t\tdata.Reply = append([]string{strings.TrimSpace(reply)}, data.Reply...) // Unshift\n\n\t}\n\n\treturn data, nil\n}",
  "func (f *StoreQueuedCountFunc) History() []StoreQueuedCountFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]StoreQueuedCountFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	NewBackend is the exported constructor by which the DEX will import the DCRBackend. The provided context.Context should be cancelled when the DEX application exits. If configPath is an empty string, the backend will attempt to read the settings directly from the dcrd config file in its default system location. 
 | 
	func NewBackend(ctx context.Context, configPath string, logger dex.Logger, network dex.Network) (*DCRBackend, error) {
	// loadConfig will set fields if defaults are used and set the chainParams
	// package variable.
	cfg, err := loadConfig(configPath, network)
	if err != nil {
		return nil, err
	}
	dcr := unconnectedDCR(ctx, logger)
	notifications := &rpcclient.NotificationHandlers{
		OnBlockConnected: dcr.onBlockConnected,
	}
	// When the exported constructor is used, the node will be an
	// rpcclient.Client.
	dcr.client, err = connectNodeRPC(cfg.RPCListen, cfg.RPCUser, cfg.RPCPass,
		cfg.RPCCert, notifications)
	if err != nil {
		return nil, err
	}
	err = dcr.client.NotifyBlocks()
	if err != nil {
		return nil, fmt.Errorf("error registering for block notifications")
	}
	dcr.node = dcr.client
	// Prime the cache with the best block.
	bestHash, _, err := dcr.client.GetBestBlock()
	if err != nil {
		return nil, fmt.Errorf("error getting best block from dcrd: %v", err)
	}
	if bestHash != nil {
		_, err := dcr.getDcrBlock(bestHash)
		if err != nil {
			return nil, fmt.Errorf("error priming the cache: %v", err)
		}
	}
	return dcr, nil
} 
 | 
	[
  "func NewBackend(cfg BackendConfig) (*Backend, error) {\n\t// Default the context.\n\tif cfg.Context == nil {\n\t\tcfg.Context = context.Background()\n\t}\n\n\t// If no client was specified, build one and configure the backend with it including waiting\n\t// for the caches to sync.\n\tif cfg.ClientConfig == nil {\n\t\tvar err error\n\t\tcfg, err = loadConfig(cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\topts := tinkcontrollers.GetServerOptions()\n\topts.Namespace = cfg.Namespace\n\n\t// Use a manager from the tink project so we can take advantage of the indexes and caching it\n\t// configures. Once started, we don't really need any of the manager capabilities hence we don't\n\t// store it in the Backend.\n\tmanager, err := tinkcontrollers.NewManager(cfg.ClientConfig, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO(chrisdoherty4) Stop panicing on error. This will likely require exposing Start in\n\t// some capacity and allowing the caller to handle the error.\n\tgo func() {\n\t\tif err := manager.Start(cfg.Context); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn &Backend{\n\t\tcloser:           cfg.Context.Done(),\n\t\tclient:           manager.GetClient(),\n\t\tWaitForCacheSync: manager.GetCache().WaitForCacheSync,\n\t}, nil\n}",
  "func NewBackend(configPath string, logger dex.Logger, network dex.Network) (asset.Backend, error) {\n\tvar params *dexbtc.CloneParams\n\tswitch network {\n\tcase dex.Mainnet:\n\t\tparams = MainNetParams\n\tcase dex.Testnet:\n\t\tparams = TestNet4Params\n\tcase dex.Regtest:\n\t\tparams = RegressionNetParams\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown network ID %v\", network)\n\t}\n\n\t// Designate the clone ports. These will be overwritten by any explicit\n\t// settings in the configuration file.\n\tports := dexbtc.NetPorts{\n\t\tMainnet: \"9332\",\n\t\tTestnet: \"19332\",\n\t\tSimnet:  \"19443\",\n\t}\n\n\tif configPath == \"\" {\n\t\tconfigPath = dexbtc.SystemConfigPath(\"litecoin\")\n\t}\n\n\treturn btc.NewBTCClone(assetName, configPath, logger, network, dexbtc.ReadCloneParams(params), ports)\n}",
  "func NewBackend(conf *conf.Conf) (*Backend, error) {\n\tbackend := &Backend{}\n\n\tcfg := api.DefaultConfig()\n\tcfg.Address = conf.ConsulAddr\n\n\tif conf.Timeout != 0 {\n\t\tcfg.HttpClient.Timeout = time.Duration(conf.Timeout) * time.Second\n\t}\n\n\tif conf.Token != \"\" {\n\t\tcfg.Token = conf.Token\n\t}\n\n\tif conf.AuthEnabled {\n\t\tcfg.HttpAuth = &api.HttpBasicAuth{\n\t\t\tUsername: conf.AuthUserName,\n\t\t\tPassword: conf.AuthPassword,\n\t\t}\n\t}\n\n\tcli, err := api.NewClient(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbackend.cli = cli\n\tbackend.agent = cli.Agent()\n\n\treturn backend, nil\n}",
  "func New(c *Config) (*Backend, error) {\n\tif err := validation.Validate.Struct(c); err != nil {\n\t\treturn nil, err\n\t}\n\tb := &Backend{\n\t\tkeys: make(chan *template.Key, 500),\n\t\tlog:  c.Logger,\n\t\tsvc:  c.SSM,\n\t}\n\treturn b, nil\n}",
  "func New(capsuleChan chan *capsule.Capsule) (*Backend, error) {\n\t// Loads a new structured configuration with the informations of a given\n\t// configuration file.\n\tproviderConfig, err := loadConfig()\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"initiliazing backend\")\n\t}\n\n\t// Loads backend providers defined as activated.\n\tp, err := loadProvider(providerConfig)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"initiliazing backend\")\n\t}\n\n\treturn &Backend{\n\t\tactivatedProvider: p,\n\t\tcapsule:           capsuleChan,\n\t\twg:                &sync.WaitGroup{},\n\t}, nil\n}",
  "func NewBackend(ipc string, logger dex.Logger, network dex.Network) (*Backend, error) {\n\tcfg, err := load(ipc, network)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn unconnectedETH(logger, cfg), nil\n}",
  "func NewWithConfig(ctx context.Context, cfg Config) (*Backend, error) {\n\tif err := cfg.CheckAndSetDefaults(); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tconnectionURI := cfg.ConnectionURI()\n\tpath := filepath.Join(cfg.Path, defaultDBFile)\n\t// Ensure that the path to the root directory exists.\n\terr := os.MkdirAll(cfg.Path, os.ModeDir|defaultDirMode)\n\tif err != nil {\n\t\treturn nil, trace.ConvertSystemError(err)\n\t}\n\n\tsetPermissions := false\n\tif _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {\n\t\tsetPermissions = true\n\t}\n\n\tdb, err := sql.Open(\"sqlite3\", cfg.ConnectionURI())\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err, \"error opening URI: %v\", connectionURI)\n\t}\n\n\tif setPermissions {\n\t\t// Ensure the database has restrictive access permissions.\n\t\terr = db.PingContext(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\terr = os.Chmod(path, dbMode)\n\t\tif err != nil {\n\t\t\treturn nil, trace.ConvertSystemError(err)\n\t\t}\n\t}\n\n\t// serialize access to sqlite, as we're using immediate transactions anyway,\n\t// and in-memory go locks are faster than sqlite locks\n\tdb.SetMaxOpenConns(1)\n\tbuf := backend.NewCircularBuffer(\n\t\tbackend.BufferCapacity(cfg.BufferSize),\n\t)\n\tcloseCtx, cancel := context.WithCancel(ctx)\n\tl := &Backend{\n\t\tConfig: cfg,\n\t\tdb:     db,\n\t\tEntry:  log.WithFields(log.Fields{trace.Component: BackendName}),\n\t\tclock:  cfg.Clock,\n\t\tbuf:    buf,\n\t\tctx:    closeCtx,\n\t\tcancel: cancel,\n\t}\n\tl.Debugf(\"Connected to: %v, poll stream period: %v\", connectionURI, cfg.PollStreamPeriod)\n\tif err := l.createSchema(); err != nil {\n\t\treturn nil, trace.Wrap(err, \"error creating schema: %v\", connectionURI)\n\t}\n\tif err := l.showPragmas(); err != nil {\n\t\tl.Warningf(\"Failed to show pragma settings: %v.\", err)\n\t}\n\tgo l.runPeriodicOperations()\n\treturn l, nil\n}",
  "func New(ctx context.Context, params backend.Params) (*Backend, error) {\n\tvar cfg *Config\n\terr := utils.ObjectToStruct(params, &cfg)\n\tif err != nil {\n\t\treturn nil, trace.BadParameter(\"SQLite configuration is invalid: %v\", err)\n\t}\n\treturn NewWithConfig(ctx, *cfg)\n}",
  "func New(driver, dsn string, extStore ExternalStore, opts Opts) (*Backend, error) {\n\tb := &Backend{\n\t\tfetchStmtsCache:       make(map[string]*sql.Stmt),\n\t\tflagsSearchStmtsCache: make(map[string]*sql.Stmt),\n\t\taddFlagsStmtsCache:    make(map[string]*sql.Stmt),\n\t\tremFlagsStmtsCache:    make(map[string]*sql.Stmt),\n\n\t\tsqliteOptimizeLoopStop: make(chan struct{}),\n\n\t\textStore: extStore,\n\t\tOpts:     opts,\n\n\t\tmngr: mess.NewManager(),\n\t}\n\tvar err error\n\n\tif b.Opts.CompressAlgo != \"\" {\n\t\timpl, ok := compressionAlgos[b.Opts.CompressAlgo]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"New: unknown compression algorithm: %s\", b.Opts.CompressAlgo)\n\t\t}\n\n\t\tb.compressAlgo = impl\n\t} else {\n\t\tb.compressAlgo = nullCompression{}\n\t}\n\n\tb.Opts = opts\n\n\tif b.Opts.Log == nil {\n\t\tb.Opts.Log = globalLogger{}\n\t}\n\n\tif b.Opts.PRNG != nil {\n\t\tb.prng = opts.PRNG\n\t} else {\n\t\tb.prng = mathrand.New(mathrand.NewSource(time.Now().Unix()))\n\t}\n\n\tif driver == \"sqlite3\" {\n\t\tdsn = b.addSqlite3Params(dsn)\n\t}\n\n\tb.db.driver = driver\n\tb.db.dsn = dsn\n\n\tb.db.DB, err = sql.Open(driver, dsn)\n\tif err != nil {\n\t\treturn nil, wrapErr(err, \"NewBackend (open)\")\n\t}\n\tb.DB = b.db.DB\n\n\tver, err := b.schemaVersion()\n\tif err != nil {\n\t\treturn nil, wrapErr(err, \"NewBackend (schemaVersion)\")\n\t}\n\t// Zero version indicates \"empty database\".\n\tif ver > SchemaVersion {\n\t\treturn nil, fmt.Errorf(\"incompatible database schema, too new (%d > %d)\", ver, SchemaVersion)\n\t}\n\tif ver < SchemaVersion && ver != 0 {\n\t\tb.Opts.Log.Printf(\"Upgrading database schema (from %d to %d)\", ver, SchemaVersion)\n\t\tif err := b.upgradeSchema(ver); err != nil {\n\t\t\treturn nil, wrapErr(err, \"NewBackend (schemaUpgrade)\")\n\t\t}\n\t}\n\tif err := b.setSchemaVersion(SchemaVersion); err != nil {\n\t\treturn nil, wrapErr(err, \"NewBackend (setSchemaVersion)\")\n\t}\n\n\tif err := b.configureEngine(); err != nil {\n\t\treturn nil, wrapErr(err, \"NewBackend (configureEngine)\")\n\t}\n\n\tif err := b.initSchema(); err != nil {\n\t\treturn nil, wrapErr(err, \"NewBackend (initSchema)\")\n\t}\n\tif err := b.prepareStmts(); err != nil {\n\t\treturn nil, wrapErr(err, \"NewBackend (prepareStmts)\")\n\t}\n\n\tfor _, item := range [...]imap.FetchItem{\n\t\timap.FetchFlags, imap.FetchEnvelope,\n\t\timap.FetchBodyStructure, \"BODY[]\", \"BODY[HEADER.FIELDS (From To)]\"} {\n\n\t\tif _, err := b.getFetchStmt([]imap.FetchItem{item}); err != nil {\n\t\t\treturn nil, wrapErrf(err, \"fetchStmt prime (%s)\", item)\n\t\t}\n\t}\n\n\tif b.db.driver == \"sqlite3\" {\n\t\tgo b.sqliteOptimizeLoop()\n\t}\n\n\treturn b, nil\n}",
  "func (o *TridentOrchestrator) AddBackend(\n\tctx context.Context, configJSON, configRef string,\n) (backendExternal *storage.BackendExternal, err error) {\n\tctx = GenerateRequestContextForLayer(ctx, LogLayerCore)\n\n\tif o.bootstrapError != nil {\n\t\treturn nil, o.bootstrapError\n\t}\n\n\tdefer recordTiming(\"backend_add\", &err)()\n\n\to.mutex.Lock()\n\tdefer o.mutex.Unlock()\n\tdefer o.updateMetrics()\n\n\tbackend, err := o.addBackend(ctx, configJSON, uuid.New().String(), configRef)\n\tif err != nil {\n\t\treturn backend, err\n\t}\n\n\tb, err := o.getBackendByBackendUUID(backend.BackendUUID)\n\tif err != nil {\n\t\treturn backend, err\n\t}\n\terr = o.reconcileNodeAccessOnBackend(ctx, b)\n\tif err != nil {\n\t\treturn backend, err\n\t}\n\n\treturn backend, nil\n}",
  "func NewBackend(input *NewBackendInput) Backend {\n\treturn &backendImpl{\n\t\trecordDB: input.RecordDB,\n\t}\n}",
  "func NewBackend(backendType, uri string) (Backend, error) {\n\tif activeBackend != nil {\n\t\tif activebackendType != backendType {\n\t\t\treturn nil, errors.New(\"A backend with a different type has already been created\")\n\t\t}\n\n\t\treturn activeBackend, nil\n\t}\n\n\tbackendFactory, ok := stores[backendType]\n\n\tif !ok {\n\t\treturn nil, errors.New(\"backend type not registered\")\n\t}\n\n\tbackend, err := backendFactory(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tactiveBackend = backend\n\tactivebackendType = backendType\n\n\treturn backend, nil\n}",
  "func NewBackend(backend string, azureOpts *AzureBlobOpts, localFileOpts *LocalFileOpts) (Backend, error) {\n\tswitch backend {\n\tcase AzureBlobStorage:\n\t\tif azureOpts == nil {\n\t\t\treturn nil, errors.New(\"azure storage options must be set when using azure as storage target\")\n\t\t}\n\t\treturn NewAzureBlob(azureOpts)\n\tcase FileStorage:\n\t\treturn NewLocalFile(localFileOpts.Path)\n\t}\n\treturn nil, fmt.Errorf(\"Unsupported storage backend type %s\", backend)\n}",
  "func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Diagnostics) {\n\t// Get the local backend configuration.\n\tc, cHash, diags := m.backendConfig(opts)\n\tif diags.HasErrors() {\n\t\treturn nil, diags\n\t}\n\n\t// ------------------------------------------------------------------------\n\t// For historical reasons, current backend configuration for a working\n\t// directory is kept in a *state-like* file, using the legacy state\n\t// structures in the Terraform package. It is not actually a Terraform\n\t// state, and so only the \"backend\" portion of it is actually used.\n\t//\n\t// The remainder of this code often confusingly refers to this as a \"state\",\n\t// so it's unfortunately important to remember that this is not actually\n\t// what we _usually_ think of as \"state\", and is instead a local working\n\t// directory \"backend configuration state\" that is never persisted anywhere.\n\t//\n\t// Since the \"real\" state has since moved on to be represented by\n\t// states.State, we can recognize the special meaning of state that applies\n\t// to this function and its callees by their continued use of the\n\t// otherwise-obsolete terraform.State.\n\t// ------------------------------------------------------------------------\n\n\t// Get the path to where we store a local cache of backend configuration\n\t// if we're using a remote backend. This may not yet exist which means\n\t// we haven't used a non-local backend before. That is okay.\n\tstatePath := filepath.Join(m.DataDir(), DefaultStateFilename)\n\tsMgr := &clistate.LocalState{Path: statePath}\n\tif err := sMgr.RefreshState(); err != nil {\n\t\tdiags = diags.Append(fmt.Errorf(\"Failed to load state: %s\", err))\n\t\treturn nil, diags\n\t}\n\n\t// Load the state, it must be non-nil for the tests below but can be empty\n\ts := sMgr.State()\n\tif s == nil {\n\t\tlog.Printf(\"[TRACE] Meta.Backend: backend has not previously been initialized in this working directory\")\n\t\ts = legacy.NewState()\n\t} else if s.Backend != nil {\n\t\tlog.Printf(\"[TRACE] Meta.Backend: working directory was previously initialized for %q backend\", s.Backend.Type)\n\t} else {\n\t\tlog.Printf(\"[TRACE] Meta.Backend: working directory was previously initialized but has no backend (is using legacy remote state?)\")\n\t}\n\n\t// if we want to force reconfiguration of the backend, we set the backend\n\t// state to nil on this copy. This will direct us through the correct\n\t// configuration path in the switch statement below.\n\tif m.reconfigure {\n\t\ts.Backend = nil\n\t}\n\n\t// Upon return, we want to set the state we're using in-memory so that\n\t// we can access it for commands.\n\tm.backendState = nil\n\tdefer func() {\n\t\tif s := sMgr.State(); s != nil && !s.Backend.Empty() {\n\t\t\tm.backendState = s.Backend\n\t\t}\n\t}()\n\n\tif !s.Remote.Empty() {\n\t\t// Legacy remote state is no longer supported. User must first\n\t\t// migrate with Terraform 0.11 or earlier.\n\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\ttfdiags.Error,\n\t\t\t\"Legacy remote state not supported\",\n\t\t\t\"This working directory is configured for legacy remote state, which is no longer supported from Terraform v0.12 onwards. To migrate this environment, first run \\\"terraform init\\\" under a Terraform 0.11 release, and then upgrade Terraform again.\",\n\t\t))\n\t\treturn nil, diags\n\t}\n\n\t// This switch statement covers all the different combinations of\n\t// configuring new backends, updating previously-configured backends, etc.\n\tswitch {\n\t// No configuration set at all. Pure local state.\n\tcase c == nil && s.Backend.Empty():\n\t\tlog.Printf(\"[TRACE] Meta.Backend: using default local state only (no backend configuration, and no existing initialized backend)\")\n\t\treturn nil, nil\n\n\t// We're unsetting a backend (moving from backend => local)\n\tcase c == nil && !s.Backend.Empty():\n\t\tlog.Printf(\"[TRACE] Meta.Backend: previously-initialized %q backend is no longer present in config\", s.Backend.Type)\n\n\t\tinitReason := fmt.Sprintf(\"Unsetting the previously set backend %q\", s.Backend.Type)\n\t\tif !opts.Init {\n\t\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\t\ttfdiags.Error,\n\t\t\t\t\"Backend initialization required, please run \\\"terraform init\\\"\",\n\t\t\t\tfmt.Sprintf(strings.TrimSpace(errBackendInit), initReason),\n\t\t\t))\n\t\t\treturn nil, diags\n\t\t}\n\n\t\tif s.Backend.Type != \"cloud\" && !m.migrateState {\n\t\t\tdiags = diags.Append(migrateOrReconfigDiag)\n\t\t\treturn nil, diags\n\t\t}\n\n\t\treturn m.backend_c_r_S(c, cHash, sMgr, true)\n\n\t// Configuring a backend for the first time or -reconfigure flag was used\n\tcase c != nil && s.Backend.Empty():\n\t\tlog.Printf(\"[TRACE] Meta.Backend: moving from default local state only to %q backend\", c.Type)\n\t\tif !opts.Init {\n\t\t\tif c.Type == \"cloud\" {\n\t\t\t\tinitReason := \"Initial configuration of Terraform Cloud\"\n\t\t\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\t\t\ttfdiags.Error,\n\t\t\t\t\t\"Terraform Cloud initialization required: please run \\\"terraform init\\\"\",\n\t\t\t\t\tfmt.Sprintf(strings.TrimSpace(errBackendInitCloud), initReason),\n\t\t\t\t))\n\t\t\t} else {\n\t\t\t\tinitReason := fmt.Sprintf(\"Initial configuration of the requested backend %q\", c.Type)\n\t\t\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\t\t\ttfdiags.Error,\n\t\t\t\t\t\"Backend initialization required, please run \\\"terraform init\\\"\",\n\t\t\t\t\tfmt.Sprintf(strings.TrimSpace(errBackendInit), initReason),\n\t\t\t\t))\n\t\t\t}\n\t\t\treturn nil, diags\n\t\t}\n\t\treturn m.backend_C_r_s(c, cHash, sMgr, opts)\n\t// Potentially changing a backend configuration\n\tcase c != nil && !s.Backend.Empty():\n\t\t// We are not going to migrate if...\n\t\t//\n\t\t// We're not initializing\n\t\t// AND the backend cache hash values match, indicating that the stored config is valid and completely unchanged.\n\t\t// AND we're not providing any overrides. An override can mean a change overriding an unchanged backend block (indicated by the hash value).\n\t\tif (uint64(cHash) == s.Backend.Hash) && (!opts.Init || opts.ConfigOverride == nil) {\n\t\t\tlog.Printf(\"[TRACE] Meta.Backend: using already-initialized, unchanged %q backend configuration\", c.Type)\n\t\t\tsavedBackend, diags := m.savedBackend(sMgr)\n\t\t\t// Verify that selected workspace exist. Otherwise prompt user to create one\n\t\t\tif opts.Init && savedBackend != nil {\n\t\t\t\tif err := m.selectWorkspace(savedBackend); err != nil {\n\t\t\t\t\tdiags = diags.Append(err)\n\t\t\t\t\treturn nil, diags\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn savedBackend, diags\n\t\t}\n\n\t\t// If our configuration (the result of both the literal configuration and given\n\t\t// -backend-config options) is the same, then we're just initializing a previously\n\t\t// configured backend. The literal configuration may differ, however, so while we\n\t\t// don't need to migrate, we update the backend cache hash value.\n\t\tif !m.backendConfigNeedsMigration(c, s.Backend) {\n\t\t\tlog.Printf(\"[TRACE] Meta.Backend: using already-initialized %q backend configuration\", c.Type)\n\t\t\tsavedBackend, moreDiags := m.savedBackend(sMgr)\n\t\t\tdiags = diags.Append(moreDiags)\n\t\t\tif moreDiags.HasErrors() {\n\t\t\t\treturn nil, diags\n\t\t\t}\n\n\t\t\t// It's possible for a backend to be unchanged, and the config itself to\n\t\t\t// have changed by moving a parameter from the config to `-backend-config`\n\t\t\t// In this case, we update the Hash.\n\t\t\tmoreDiags = m.updateSavedBackendHash(cHash, sMgr)\n\t\t\tif moreDiags.HasErrors() {\n\t\t\t\treturn nil, diags\n\t\t\t}\n\t\t\t// Verify that selected workspace exist. Otherwise prompt user to create one\n\t\t\tif opts.Init && savedBackend != nil {\n\t\t\t\tif err := m.selectWorkspace(savedBackend); err != nil {\n\t\t\t\t\tdiags = diags.Append(err)\n\t\t\t\t\treturn nil, diags\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn savedBackend, diags\n\t\t}\n\t\tlog.Printf(\"[TRACE] Meta.Backend: backend configuration has changed (from type %q to type %q)\", s.Backend.Type, c.Type)\n\n\t\tcloudMode := cloud.DetectConfigChangeType(s.Backend, c, false)\n\n\t\tif !opts.Init {\n\t\t\t//user ran another cmd that is not init but they are required to initialize because of a potential relevant change to their backend configuration\n\t\t\tinitDiag := m.determineInitReason(s.Backend.Type, c.Type, cloudMode)\n\t\t\tdiags = diags.Append(initDiag)\n\t\t\treturn nil, diags\n\t\t}\n\n\t\tif !cloudMode.InvolvesCloud() && !m.migrateState {\n\t\t\tdiags = diags.Append(migrateOrReconfigDiag)\n\t\t\treturn nil, diags\n\t\t}\n\n\t\tlog.Printf(\"[WARN] backend config has changed since last init\")\n\t\treturn m.backend_C_r_S_changed(c, cHash, sMgr, true, opts)\n\n\tdefault:\n\t\tdiags = diags.Append(fmt.Errorf(\n\t\t\t\"Unhandled backend configuration state. This is a bug. Please\\n\"+\n\t\t\t\t\"report this error with the following information.\\n\\n\"+\n\t\t\t\t\"Config Nil: %v\\n\"+\n\t\t\t\t\"Saved Backend Empty: %v\\n\",\n\t\t\tc == nil, s.Backend.Empty(),\n\t\t))\n\t\treturn nil, diags\n\t}\n}",
  "func New() *Backend {\n\treturn &Backend{\n\t\tmu: &sync.RWMutex{},\n\t}\n}",
  "func (o *TridentOrchestrator) validateAndCreateBackendFromConfig(\n\tctx context.Context, configJSON, configRef, backendUUID string,\n) (backendExternal storage.Backend, err error) {\n\tvar backendSecret map[string]string\n\n\tcommonConfig, configInJSON, err := factory.ValidateCommonSettings(ctx, configJSON)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For backends created using CRD Controller ensure there are no forbidden fields\n\tif o.isCRDContext(ctx) {\n\t\tif err = factory.SpecOnlyValidation(ctx, commonConfig, configInJSON); err != nil {\n\t\t\treturn nil, errors.WrapUnsupportedConfigError(err)\n\t\t}\n\t}\n\n\t// If Credentials are set, fetch them and set them in the configJSON matching field names\n\tif len(commonConfig.Credentials) != 0 {\n\t\tsecretName, _, err := commonConfig.GetCredentials()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if secretName == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"credentials `name` field cannot be empty\")\n\t\t}\n\n\t\tif backendSecret, err = o.storeClient.GetBackendSecret(ctx, secretName); err != nil {\n\t\t\treturn nil, err\n\t\t} else if backendSecret == nil {\n\t\t\treturn nil, fmt.Errorf(\"backend credentials not found\")\n\t\t}\n\t}\n\n\treturn factory.NewStorageBackendForConfig(ctx, configInJSON, configRef, backendUUID, commonConfig, backendSecret)\n}",
  "func NewBackend(ctrl *gomock.Controller) *Backend {\n\tmock := &Backend{ctrl: ctrl}\n\tmock.recorder = &BackendMockRecorder{mock}\n\treturn mock\n}",
  "func New(d diag.Sink, cloudURL string, project *workspace.Project, insecure bool) (Backend, error) {\n\tcloudURL = ValueOrDefaultURL(cloudURL)\n\taccount, err := workspace.GetAccount(cloudURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting stored credentials: %w\", err)\n\t}\n\tapiToken := account.AccessToken\n\n\tclient := client.NewClient(cloudURL, apiToken, insecure, d)\n\tcapabilities := detectCapabilities(d, client)\n\n\treturn &cloudBackend{\n\t\td:              d,\n\t\turl:            cloudURL,\n\t\tclient:         client,\n\t\tcapabilities:   capabilities,\n\t\tcurrentProject: project,\n\t}, nil\n}",
  "func FromConfig(l log.Logger, backedType string, cfg Config) (Backend, error) {\n\tvar (\n\t\tb   Backend\n\t\terr error\n\t)\n\n\tswitch backedType {\n\tcase Azure:\n\t\tlevel.Warn(l).Log(\"msg\", \"using azure blob as backend\")\n\t\tb, err = azure.New(log.With(l, \"backend\", Azure), cfg.Azure)\n\tcase S3:\n\t\tlevel.Warn(l).Log(\"msg\", \"using aws s3 as backend\")\n\t\tb, err = s3.New(log.With(l, \"backend\", S3), cfg.S3, cfg.Debug)\n\tcase GCS:\n\t\tlevel.Warn(l).Log(\"msg\", \"using gc storage as backend\")\n\t\tb, err = gcs.New(log.With(l, \"backend\", GCS), cfg.GCS)\n\tcase FileSystem:\n\t\tlevel.Warn(l).Log(\"msg\", \"using filesystem as backend\")\n\t\tb, err = filesystem.New(log.With(l, \"backend\", FileSystem), cfg.FileSystem)\n\tcase SFTP:\n\t\tlevel.Warn(l).Log(\"msg\", \"using sftp as backend\")\n\t\tb, err = sftp.New(log.With(l, \"backend\", SFTP), cfg.SFTP)\n\tcase AliOSS:\n\t\tlevel.Warn(l).Log(\"msg\", \"using Alibaba OSS storage as backend\")\n\t\tb, err = alioss.New(log.With(l, \"backend\", AliOSS), cfg.Alioss, cfg.Debug)\n\tdefault:\n\t\treturn nil, errors.New(\"unknown backend\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initialize backend, %w\", err)\n\t}\n\n\treturn b, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	InitTxSize is an asset.DEXAsset method that must produce the max size of a standardized atomic swap initialization transaction. 
 | 
	func (btc *DCRBackend) InitTxSize() uint32 {
	return dexdcr.InitTxSize
} 
 | 
	[
  "func (eth *Backend) InitTxSize() uint32 {\n\treturn eth.initTxSize\n}",
  "func (eth *Backend) InitTxSizeBase() uint32 {\n\treturn eth.initTxSize\n}",
  "func GetMaxTxSize() int64 {\r\n\treturn converter.StrToInt64(SysString(MaxTxSize))\r\n}",
  "func updateTxSize(tx *transaction.Transaction) (*transaction.Transaction, error) {\n\tbw := io.NewBufBinWriter()\n\ttx.EncodeBinary(bw.BinWriter)\n\tif bw.Err != nil {\n\t\treturn nil, fmt.Errorf(\"encode binary: %w\", bw.Err)\n\t}\n\treturn transaction.NewTransactionFromBytes(tx.Bytes())\n}",
  "func InitGas(n int, contractVer uint32) uint64 {\n\tif n == 0 {\n\t\treturn 0\n\t}\n\tg, ok := VersionedGases[contractVer]\n\tif !ok {\n\t\treturn math.MaxUint64\n\t}\n\treturn g.InitGas + (uint64(n)-1)*g.AdditionalInitGas\n}",
  "func (d *AD9910) MaxTxSize() int {\n\tif c, ok := d.spiConn.(*spitest.LogConn); ok {\n\t\tif l, ok := c.Conn.(conn.Limits); ok {\n\t\t\treturn l.MaxTxSize()\n\t\t}\n\t}\n\tif l, ok := d.spiConn.(conn.Limits); ok {\n\t\treturn l.MaxTxSize()\n\t}\n\n\treturn d.config.Config.SPIMaxTxSize\n}",
  "func (t *TxContract) SerializeSize() int {\n\t// serialized int size for GasLimit\n\treturn 4\n}",
  "func (in *ActionIpAddressIndexInput) SetMaxTx(value int64) *ActionIpAddressIndexInput {\n\tin.MaxTx = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"MaxTx\"] = nil\n\treturn in\n}",
  "func InitBufferSize(size int) Options {\n\treturn func(c *config) {\n\t\tif size > 0 {\n\t\t\tc.initBufferSize = size\n\t\t}\n\t}\n}",
  "func (msg *MsgTx) SerializeSize() int {\n\t// Version 4 bytes + LockTime 4 bytes + TxContract 4 bytes + Serialized varint size for the\n\t// number of transaction inputs and outputs.\n\tn := 12 + serialization.VarIntSerializeSize(uint64(len(msg.TxIn))) +\n\t\tserialization.VarIntSerializeSize(uint64(len(msg.TxOut)))\n\n\tfor _, txIn := range msg.TxIn {\n\t\tn += txIn.SerializeSize()\n\t}\n\n\tfor _, txOut := range msg.TxOut {\n\t\tn += txOut.SerializeSize()\n\t}\n\n\treturn n\n}",
  "func (_ConfigContract *ConfigContractCaller) ConfigTransactionSizeLimit(opts *bind.CallOpts, configIndex uint64) (uint64, error) {\n\tvar out []interface{}\n\terr := _ConfigContract.contract.Call(opts, &out, \"configTransactionSizeLimit\", configIndex)\n\n\tif err != nil {\n\t\treturn *new(uint64), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(uint64)).(*uint64)\n\n\treturn out0, err\n\n}",
  "func (t *Transaction) Size() int {\n\tif t.size == 0 {\n\t\tt.size = io.GetVarSize(t)\n\t}\n\treturn t.size\n}",
  "func (tx *Tx) Size() int {\r\n\treturn len(tx.Bytes())\r\n}",
  "func (_ConfigContract *ConfigContractCallerSession) ConfigTransactionSizeLimit(configIndex uint64) (uint64, error) {\n\treturn _ConfigContract.Contract.ConfigTransactionSizeLimit(&_ConfigContract.CallOpts, configIndex)\n}",
  "func (owner *WalletOwnerAPI) InitSendTx(initTxArgs libwallet.InitTxArgs) (*slateversions.SlateV4, error) {\n\tparams := struct {\n\t\tToken string               `json:\"token\"`\n\t\tArgs  libwallet.InitTxArgs `json:\"args\"`\n\t}{\n\t\tToken: owner.token,\n\t\tArgs:  initTxArgs,\n\t}\n\tparamsBytes, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tenvl, err := owner.client.EncryptedRequest(\"init_send_tx\", paramsBytes, owner.sharedSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif envl == nil {\n\t\treturn nil, errors.New(\"WalletOwnerAPI: Empty RPC Response from grin-wallet\")\n\t}\n\tif envl.Error != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"code\":    envl.Error.Code,\n\t\t\t\"message\": envl.Error.Message,\n\t\t}).Error(\"WalletOwnerAPI: RPC Error during InitSendTx\")\n\t\treturn nil, errors.New(string(envl.Error.Code) + \"\" + envl.Error.Message)\n\t}\n\tvar result Result\n\tif err = json.Unmarshal(envl.Result, &result); err != nil {\n\t\treturn nil, err\n\t}\n\tif result.Err != nil {\n\t\treturn nil, errors.New(string(result.Err))\n\t}\n\n\tvar slate slateversions.SlateV4\n\tif err := json.Unmarshal(result.Ok, &slate); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &slate, nil\n}",
  "func initTtySize(ctx context.Context, cli ExecCli, isExec bool, resizeTtyFunc func(ctx context.Context, cli ExecCli, isExec bool) error) {\n\trttyFunc := resizeTtyFunc\n\tif rttyFunc == nil {\n\t\trttyFunc = resizeTty\n\t}\n\tif err := rttyFunc(ctx, cli, isExec); err != nil {\n\t\tgo func() {\n\t\t\tvar err error\n\t\t\tfor retry := 0; retry < 5; retry++ {\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\tif err = rttyFunc(ctx, cli, isExec); err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(cli.Err(), \"failed to resize tty, using default size\")\n\t\t\t}\n\t\t}()\n\t}\n}",
  "func (tx *Tx) Size() int64 {\n\treturn tx.size\n}",
  "func (vmctx *vmContext) checkTransactionSize() error {\n\tessence, _ := vmctx.BuildTransactionEssence(state.L1CommitmentNil, false)\n\ttx := transaction.MakeAnchorTransaction(essence, &iotago.Ed25519Signature{})\n\tif tx.Size() > parameters.L1().MaxPayloadSize {\n\t\treturn vmexceptions.ErrMaxTransactionSizeExceeded\n\t}\n\treturn nil\n}",
  "func (tp *TXPool) Init() {\n\ttp.Lock()\n\tdefer tp.Unlock()\n\ttp.txList = make(map[common.Uint256]*TXEntry)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	BlockChannel creates and returns a new channel on which to receive block updates. If the returned channel is ever blocking, there will be no error logged from the dcr package. Part of the asset.DEXAsset interface. 
 | 
	func (dcr *DCRBackend) BlockChannel(size int) chan uint32 {
	c := make(chan uint32, size)
	dcr.signalMtx.Lock()
	defer dcr.signalMtx.Unlock()
	dcr.blockChans = append(dcr.blockChans, c)
	return c
} 
 | 
	[
  "func (eth *Backend) BlockChannel(size int) <-chan *asset.BlockUpdate {\n\tc := make(chan *asset.BlockUpdate, size)\n\teth.blockChansMtx.Lock()\n\tdefer eth.blockChansMtx.Unlock()\n\teth.blockChans[c] = struct{}{}\n\treturn c\n}",
  "func Block(f *Feed) error {\n\tf.Channel.Block = ValueYes\n\treturn nil\n}",
  "func (mgr *ServiceManager) SetBlockChannel(ch chan *types.Block) {\n\tmgr.bld.onBlock = ch\n}",
  "func Block(config Config, params Params, ip string, duration time.Duration,\n\tcancelChannel <-chan struct{}, logger log.DebugLogger) error {\n\treturn block(config, params, ip, duration, cancelChannel, logger)\n}",
  "func (c *ChannelParticipant) Block() error {\n\tif err := c.FetchParticipant(); err != nil {\n\t\treturn err\n\t}\n\n\tc.StatusConstant = ChannelParticipant_STATUS_BLOCKED\n\tif err := c.Update(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
  "func (p *proxy) SetBlockChannel(ch chan *types.Block) {\n\tp.orc.setBlockChannel(ch)\n}",
  "func (c *Client) Block() *Block {\n\treturn &Block{c}\n}",
  "func Block(c *blocker.Client, h http.Handler) http.Handler {\n\treturn BlockWithCode(c, h, http.StatusForbidden)\n}",
  "func initBlock(blockID int, xIndex int, yIndex int, windowD int, blockDim int, percentColor float32) (b block) {\n\n\t//Configure fields for Block\n\tb.dimension = blockDim\n\tb.percentColor = percentColor\n\tb.isFilled = false\n\tb.owner = -1\n\tb.coloredPixels = 0\n\tb.blockID = blockID\n\tb.offsetX = xIndex\n\tb.offsetY = yIndex\n\n\tb.pixels = createPixelArray(b.offsetX, b.offsetY, b.dimension)\n\n\treturn b\n}",
  "func (c *Channel) GenesisBlock(request *fab.GenesisBlockRequest) (*common.Block, error) {\n\tlogger.Debug(\"GenesisBlock - start\")\n\n\t// verify that we have an orderer configured\n\tif len(c.Orderers()) == 0 {\n\t\treturn nil, fmt.Errorf(\"GenesisBlock - error: Missing orderer assigned to this channel for the GenesisBlock request\")\n\t}\n\t// verify that we have transaction id\n\tif request.TxnID.ID == \"\" {\n\t\treturn nil, fmt.Errorf(\"GenesisBlock - error: Missing txId input parameter with the required transaction identifier\")\n\t}\n\t// verify that we have the nonce\n\tif request.TxnID.Nonce == nil {\n\t\treturn nil, fmt.Errorf(\"GenesisBlock - error: Missing nonce input parameter with the required single use number\")\n\t}\n\n\tif c.clientContext.GetUserContext() == nil {\n\t\treturn nil, fmt.Errorf(\"User context needs to be set\")\n\t}\n\tcreator, err := c.clientContext.GetUserContext().Identity()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error getting creator: %v\", err)\n\t}\n\n\t// now build the seek info , will be used once the channel is created\n\t// to get the genesis block back\n\tseekStart := fc.NewSpecificSeekPosition(0)\n\tseekStop := fc.NewSpecificSeekPosition(0)\n\tseekInfo := &ab.SeekInfo{\n\t\tStart:    seekStart,\n\t\tStop:     seekStop,\n\t\tBehavior: ab.SeekInfo_BLOCK_UNTIL_READY,\n\t}\n\tprotos_utils.MakeChannelHeader(common.HeaderType_DELIVER_SEEK_INFO, 1, c.Name(), 0)\n\tseekInfoHeader, err := BuildChannelHeader(common.HeaderType_DELIVER_SEEK_INFO, c.Name(), request.TxnID.ID, 0, \"\", time.Now())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error building channel header: %v\", err)\n\t}\n\tseekHeader, err := fc.BuildHeader(creator, seekInfoHeader, request.TxnID.Nonce)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error building header: %v\", err)\n\t}\n\tseekPayload := &common.Payload{\n\t\tHeader: seekHeader,\n\t\tData:   fc.MarshalOrPanic(seekInfo),\n\t}\n\tseekPayloadBytes := fc.MarshalOrPanic(seekPayload)\n\n\tsignedEnvelope, err := c.SignPayload(seekPayloadBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error signing payload: %v\", err)\n\t}\n\n\tblock, err := c.SendEnvelope(signedEnvelope)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error sending envelope: %v\", err)\n\t}\n\treturn block, nil\n}",
  "func NewBlock(x, y, z, w, h, d int, blocking bool, name string) Collider {\n\tb := &Block{w: w, h: h, d: d}\n\tb.x, b.y, b.z = x, y, z\n\tb.name = name\n\tb.xyshape = resolv.Shape(resolv.NewRectangle(int32(x), int32(y), int32(w), int32(h)))\n\tb.xzshape = resolv.Shape(resolv.NewRectangle(int32(x), int32(z), int32(w), int32(d)))\n\tb.zyshape = resolv.Shape(resolv.NewRectangle(int32(z), int32(y), int32(d), int32(h)))\n\tb.ref = -1\n\tb.bodyType = &BodyType{blocking: blocking}\n\tb.reactionHub = events.NewReactionHub()\n\treturn b\n}",
  "func (c *Channel) block(pos *ab.SeekPosition) (*common.Block, error) {\n\tnonce, err := fc.GenerateRandomNonce()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when generating nonce: %v\", err)\n\t}\n\n\tif c.clientContext.GetUserContext() == nil {\n\t\treturn nil, fmt.Errorf(\"User context needs to be set\")\n\t}\n\tcreator, err := c.clientContext.GetUserContext().Identity()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when serializing identity: %v\", err)\n\t}\n\n\ttxID, err := protos_utils.ComputeProposalTxID(nonce, creator)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when generating TX ID: %v\", err)\n\t}\n\n\tseekInfoHeader, err := BuildChannelHeader(common.HeaderType_DELIVER_SEEK_INFO, c.Name(), txID, 0, \"\", time.Now())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when building channel header: %v\", err)\n\t}\n\n\tseekInfoHeaderBytes, err := proto.Marshal(seekInfoHeader)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when marshalling channel header: %v\", err)\n\t}\n\n\tsignatureHeader := &common.SignatureHeader{\n\t\tCreator: creator,\n\t\tNonce:   nonce,\n\t}\n\n\tsignatureHeaderBytes, err := proto.Marshal(signatureHeader)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when marshalling signature header: %v\", err)\n\t}\n\n\tseekHeader := &common.Header{\n\t\tChannelHeader:   seekInfoHeaderBytes,\n\t\tSignatureHeader: signatureHeaderBytes,\n\t}\n\n\tseekInfo := &ab.SeekInfo{\n\t\tStart:    pos,\n\t\tStop:     pos,\n\t\tBehavior: ab.SeekInfo_BLOCK_UNTIL_READY,\n\t}\n\n\tseekInfoBytes, err := proto.Marshal(seekInfo)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when marshalling seek info: %v\", err)\n\t}\n\n\tseekPayload := &common.Payload{\n\t\tHeader: seekHeader,\n\t\tData:   seekInfoBytes,\n\t}\n\n\tseekPayloadBytes, err := proto.Marshal(seekPayload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsignedEnvelope, err := c.SignPayload(seekPayloadBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when signing payload: %v\", err)\n\t}\n\n\treturn c.SendEnvelope(signedEnvelope)\n}",
  "func NewBlockCapability(\n\tmode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability {\n\n\treturn &csi.VolumeCapability{\n\t\tAccessMode: &csi.VolumeCapability_AccessMode{\n\t\t\tMode: mode,\n\t\t},\n\t\tAccessType: &csi.VolumeCapability_Block{\n\t\t\tBlock: &csi.VolumeCapability_BlockVolume{},\n\t\t},\n\t}\n}",
  "func (api *API) NewBlockLoop(blockChan chan<- *NewBlockMessage, balanceChan chan<- *BalancesChangedMessage, done <-chan bool, start uint32) {\n\tblockNum := start\n\n\tconfig, err := api.client.Database.GetConfig()\n\tif err != nil {\n\t\tlog.Printf(\"get config: %s\", err)\n\t\treturn\n\t}\n\n\tfor {\n\t\tprops, err := api.client.Database.GetDynamicGlobalProperties()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"get global properties: %s\", err)\n\t\t\ttime.Sleep(time.Duration(config.SteemitBlockInterval) * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tif blockNum == 0 {\n\t\t\tblockNum = props.HeadBlockNumber\n\t\t}\n\t\t// maybe LastIrreversibleBlockNum, cause possible microforks\n\t\tif props.HeadBlockNumber-blockNum > 0 {\n\t\t\tblock, err := api.client.Database.GetBlock(blockNum + 1)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"get block: %s\", err)\n\t\t\t\ttime.Sleep(time.Duration(config.SteemitBlockInterval) * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmsg := &NewBlockMessage{\n\t\t\t\tHeight:       block.Number,\n\t\t\t\tTime:         block.Timestamp.Unix(),\n\t\t\t\tTransactions: block.Transactions,\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\tclose(blockChan)\n\t\t\t\tclose(balanceChan)\n\t\t\t\tlog.Println(\"end new block loop\")\n\t\t\t\treturn\n\t\t\tcase blockChan <- msg:\n\t\t\t\t// process block, now its only balance change check\n\t\t\t\tgo api.processBalance(block, balanceChan, done)\n\t\t\t}\n\t\t\tblockNum++\n\t\t} else {\n\t\t\ttime.Sleep(time.Duration(config.SteemitBlockInterval) * time.Second)\n\t\t}\n\t}\n}",
  "func New() *block {\n\treturn &block{\n\t\tBroadcastChan: make(chan Message, broadcastChanSize),\n\t\tbroadcastSeen: map[string]struct{}{},\n\t}\n}",
  "func (api CoreHTTP) Block() coreiface.BlockAPI {\n\treturn (BlockAPI)(api)\n}",
  "func (w *FilteredBlockWrapper) Block() *pb.FilteredBlock {\r\n\treturn w.block\r\n}",
  "func New() *block {\n\treturn &block{\n\t\tbroadcastChan:    make(chan Message, broadcastChanSize),\n\t\tbroadcastMsgSeen: map[string]struct{}{},\n\t}\n}",
  "func (obj Events) Block() Block {\n\treturn Block(obj)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	UnspentDetails gets the recipient address, value, and confs of an unspent P2PKH transaction output. If the utxo does not exist or has a pubkey script of the wrong type, an error will be returned. 
 | 
	func (dcr *DCRBackend) UnspentDetails(txid string, vout uint32) (string, uint64, int64, error) {
	txHash, err := chainhash.NewHashFromStr(txid)
	if err != nil {
		return "", 0, -1, fmt.Errorf("error decoding tx ID %s: %v", txid, err)
	}
	txOut, pkScript, err := dcr.getUnspentTxOut(txHash, vout)
	if err != nil {
		return "", 0, -1, err
	}
	scriptType := dexdcr.ParseScriptType(dexdcr.CurrentScriptVersion, pkScript, nil)
	if scriptType == dexdcr.ScriptUnsupported {
		return "", 0, -1, dex.UnsupportedScriptError
	}
	if !scriptType.IsP2PKH() {
		return "", 0, -1, dex.UnsupportedScriptError
	}
	scriptAddrs, err := dexdcr.ExtractScriptAddrs(pkScript, chainParams)
	if err != nil {
		return "", 0, -1, fmt.Errorf("error parsing utxo script addresses")
	}
	if scriptAddrs.NumPK != 0 {
		return "", 0, -1, fmt.Errorf("pubkey addresses not supported for P2PKHDetails")
	}
	if scriptAddrs.NumPKH != 1 {
		return "", 0, -1, fmt.Errorf("multi-sig not supported for P2PKHDetails")
	}
	return scriptAddrs.PkHashes[0].String(), toAtoms(txOut.Value), txOut.Confirmations, nil
} 
 | 
	[
  "func (dcr *DCRBackend) getUnspentTxOut(txHash *chainhash.Hash, vout uint32) (*chainjson.GetTxOutResult, []byte, error) {\n\ttxOut, err := dcr.node.GetTxOut(txHash, vout, true)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"GetTxOut error for output %s:%d: %v\", txHash, vout, err)\n\t}\n\tif txOut == nil {\n\t\treturn nil, nil, fmt.Errorf(\"UTXO - no unspent txout found for %s:%d\", txHash, vout)\n\t}\n\tpkScript, err := hex.DecodeString(txOut.ScriptPubKey.Hex)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode pubkey script from '%s' for output %s:%d\", txOut.ScriptPubKey.Hex, txHash, vout)\n\t}\n\treturn txOut, pkScript, nil\n}",
  "func (am *AccountManager) ListUnspent(minconf, maxconf int,\n\taddresses map[string]bool) ([]map[string]interface{}, error) {\n\tbs, err := GetCurBlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfos := []map[string]interface{}{}\n\tfor _, a := range am.AllAccounts() {\n\t\tfor _, record := range a.TxStore.UnspentOutputs() {\n\t\t\tinfo := record.TxInfo(a.name, bs.Height, cfg.Net())[0]\n\t\t\tinfos = append(infos, info)\n\t\t}\n\n\t}\n\treturn infos, nil\n}",
  "func (c *Client) AddressUnspentTransactionDetails(ctx context.Context, address string, maxTransactions int) (history AddressHistory, err error) {\n\n\t// Get the address UTXO history\n\tvar utxos AddressHistory\n\tif utxos, err = c.AddressUnspentTransactions(ctx, address); err != nil {\n\t\treturn\n\t} else if len(utxos) == 0 {\n\t\treturn\n\t}\n\n\t// Do we have a \"custom max\" amount?\n\tif maxTransactions > 0 {\n\t\ttotal := len(utxos)\n\t\tif total > maxTransactions {\n\t\t\tutxos = utxos[:total-(total-maxTransactions)]\n\t\t}\n\t}\n\n\t// Break up the UTXOs into batches\n\tvar batches []AddressHistory\n\tchunkSize := MaxTransactionsUTXO\n\n\tfor i := 0; i < len(utxos); i += chunkSize {\n\t\tend := i + chunkSize\n\n\t\tif end > len(utxos) {\n\t\t\tend = len(utxos)\n\t\t}\n\n\t\tbatches = append(batches, utxos[i:end])\n\t}\n\n\t// todo: use channels/wait group to fire all requests at the same time (rate limiting)\n\n\t// Loop Batches - and get each batch (multiple batches of MaxTransactionsUTXO)\n\tfor _, batch := range batches {\n\n\t\ttxHashes := new(TxHashes)\n\n\t\t// Loop the batch (max MaxTransactionsUTXO)\n\t\tfor _, utxo := range batch {\n\n\t\t\t// Append to the list to send and return\n\t\t\ttxHashes.TxIDs = append(txHashes.TxIDs, utxo.TxHash)\n\t\t\thistory = append(history, utxo)\n\t\t}\n\n\t\t// Get the tx details (max of MaxTransactionsUTXO)\n\t\tvar txList TxList\n\t\tif txList, err = c.BulkTransactionDetails(ctx, txHashes); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Add to the history list\n\t\tfor index, tx := range txList {\n\t\t\tfor _, utxo := range history {\n\t\t\t\tif utxo.TxHash == tx.TxID {\n\t\t\t\t\tutxo.Info = txList[index]\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}",
  "func GetAddressUnspent(addr string, page int, pagesize int) (*model.AddressUnspent, error) {\n\turl := fmt.Sprintf(bchapi.AddressUnspentUrl, addr, page, pagesize)\n\tresult, err := bchapi.HttpGet(url, bchapi.ConnTimeoutMS, bchapi.ServeTimeoutMS)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddressUnspent, err := model.StringToAddressUnspent(result)\n\treturn addressUnspent, err\n}",
  "func (ds DataStore) GetUnspentForWallet(ctx sdk.Context, wallet Wallet) (utxos []TxOutput) {\n\tfor _, p := range wallet.Unspent {\n\t\toutput, ok := ds.GetOutput(ctx, p)\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"Corrupted store: Wallet contains unspent position (%v) that doesn't exist in store\", p))\n\t\t}\n\t\ttx, ok := ds.GetTxWithPosition(ctx, p)\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"Corrupted store: Wallet contains unspent position (%v) that doesn't have corresponding tx\", p))\n\t\t}\n\n\t\ttxo := NewTxOutput(output.Output, p, tx.ConfirmationHash, tx.Transaction.TxHash(), output.Spent, output.SpenderTx)\n\t\tutxos = append(utxos, txo)\n\t}\n\treturn utxos\n}",
  "func GetUnspentOutputCoins(rpcClient *rpcclient.HttpClient, keyWallet *wallet.KeyWallet) ([]*crypto.OutputCoin, error) {\n\tprivateKey := &keyWallet.KeySet.PrivateKey\n\tpaymentAddressStr := keyWallet.Base58CheckSerialize(wallet.PaymentAddressType)\n\tviewingKeyStr := keyWallet.Base58CheckSerialize(wallet.ReadonlyKeyType)\n\n\toutputCoins, err := GetListOutputCoins(rpcClient, paymentAddressStr, viewingKeyStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserialNumbers, err := DeriveSerialNumbers(privateKey, outputCoins)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tisExisted, err := CheckExistenceSerialNumber(rpcClient, paymentAddressStr, serialNumbers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutxos := make([]*crypto.OutputCoin, 0)\n\tfor i, out := range outputCoins {\n\t\tif !isExisted[i] {\n\t\t\tutxos = append(utxos, out)\n\t\t}\n\t}\n\n\treturn utxos, nil\n}",
  "func CreateUnspent(bh BlockHeader, txn Transaction, outIndex int) (UxOut, error) {\n\tif outIndex < 0 || outIndex >= len(txn.Out) {\n\t\treturn UxOut{}, fmt.Errorf(\"Transaction out index overflows transaction outputs\")\n\t}\n\n\tvar h cipher.SHA256\n\t// The genesis block uses the null hash as the SrcTransaction [FIXME hardfork]\n\tif bh.BkSeq != 0 {\n\t\th = txn.Hash()\n\t}\n\n\treturn UxOut{\n\t\tHead: UxHead{\n\t\t\tTime:  bh.Time,\n\t\t\tBkSeq: bh.BkSeq,\n\t\t},\n\t\tBody: UxBody{\n\t\t\tSrcTransaction: h,\n\t\t\tAddress:        txn.Out[outIndex].Address,\n\t\t\tCoins:          txn.Out[outIndex].Coins,\n\t\t\tHours:          txn.Out[outIndex].Hours,\n\t\t},\n\t}, nil\n}",
  "func (wc *rpcClient) listUnspent() ([]*ListUnspentResult, error) {\n\tunspents := make([]*ListUnspentResult, 0)\n\t// TODO: listunspent 0 9999999 []string{}, include_unsafe=false\n\treturn unspents, wc.call(methodListUnspent, anylist{uint8(0)}, &unspents)\n}",
  "func CreateUnspent(bh BlockHeader, tx Transaction, outIndex int) (UxOut, error) {\n\tif len(tx.Out) <= outIndex {\n\t\treturn UxOut{}, fmt.Errorf(\"Transaction out index is overflow\")\n\t}\n\n\tvar h cipher.SHA256\n\tif bh.BkSeq != 0 {\n\t\th = tx.Hash()\n\t}\n\n\treturn UxOut{\n\t\tHead: UxHead{\n\t\t\tTime:  bh.Time,\n\t\t\tBkSeq: bh.BkSeq,\n\t\t},\n\t\tBody: UxBody{\n\t\t\tSrcTransaction: h,\n\t\t\tAddress:        tx.Out[outIndex].Address,\n\t\t\tCoins:          tx.Out[outIndex].Coins,\n\t\t\tHours:          tx.Out[outIndex].Hours,\n\t\t},\n\t}, nil\n}",
  "func valueUnspentCredit(cred *credit) ([]byte, error) {\n\tif len(cred.scriptHash) != 32 {\n\t\treturn nil, fmt.Errorf(\"short script hash (expect 32 bytes)\")\n\t}\n\tv := make([]byte, 45)\n\tbinary.BigEndian.PutUint64(v, cred.amount.UintValue())\n\tif cred.flags.Change {\n\t\tv[8] |= 1 << 1\n\t}\n\tif cred.flags.Class == ClassStakingUtxo {\n\t\tv[8] |= 1 << 2\n\t}\n\tif cred.flags.Class == ClassBindingUtxo {\n\t\tv[8] |= 1 << 3\n\t}\n\tbinary.BigEndian.PutUint32(v[9:13], cred.maturity)\n\tcopy(v[13:45], cred.scriptHash)\n\treturn v, nil\n}",
  "func (s *Store) UnspentOutputs() []*RecvTxOut {\n\tunspent := make([]*RecvTxOut, 0, len(s.unspent))\n\tfor _, record := range s.unspent {\n\t\tunspent = append(unspent, record.record(s).(*RecvTxOut))\n\t}\n\treturn unspent\n}",
  "func AddressUnspentOutputs(t *testing.T, node *framework.Node, address devnetvm.Address, numOfExpectedOuts int) []jsonmodels.WalletOutput {\n\tresp, err := node.PostAddressUnspentOutputs([]string{address.Base58()})\n\trequire.NoErrorf(t, err, \"node=%s, address=%s, PostAddressUnspentOutputs failed\", node, address.Base58())\n\trequire.Lenf(t, resp.UnspentOutputs, numOfExpectedOuts, \"invalid response\")\n\trequire.Equalf(t, address.Base58(), resp.UnspentOutputs[0].Address.Base58, \"invalid response\")\n\n\treturn resp.UnspentOutputs[0].Outputs\n}",
  "func ToUTXO(utxos []Unspent, privs string) (tx.UTXOs, error) {\n\t//prepare private key.\n\tpriv, err := address.FromWIF(privs, address.BitcoinMain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttxs := make(tx.UTXOs, len(utxos))\n\tfor i, utxo := range utxos {\n\t\thash, err := hex.DecodeString(utxo.Tx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thash = tx.Reverse(hash)\n\t\tscript, err := hex.DecodeString(utxo.Script)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttxs[i] = &tx.UTXO{\n\t\t\tValue:   utxo.Amount,\n\t\t\tKey:     priv,\n\t\t\tTxHash:  hash,\n\t\t\tTxIndex: utxo.N,\n\t\t\tScript:  script,\n\t\t}\n\t}\n\treturn txs, nil\n}",
  "func (c *Client) AddressUnspentTransactions(ctx context.Context, address string) (history AddressHistory, err error) {\n\n\tvar resp string\n\t// https://api.whatsonchain.com/v1/bsv/<network>/address/<address>/unspent\n\tif resp, err = c.request(\n\t\tctx,\n\t\tfmt.Sprintf(\"%s%s/address/%s/unspent\", apiEndpoint, c.Network(), address),\n\t\thttp.MethodGet, nil,\n\t); err != nil {\n\t\treturn\n\t}\n\tif len(resp) == 0 {\n\t\treturn nil, ErrAddressNotFound\n\t}\n\terr = json.Unmarshal([]byte(resp), &history)\n\treturn\n}",
  "func (u UTXOSet) FindUnspentTransactionOutputs(pubKeyHash []byte) []TxOutput {\n\tvar UTXOs []TxOutput\n\n\tdb := u.BlockChain.Database\n\n\terr := db.View(func(txn *badger.Txn) error {\n\t\topts := badger.DefaultIteratorOptions\n\n\t\tit := txn.NewIterator(opts)\n\t\tdefer it.Close()\n\n\t\t// iterate through all transactions with UTXOs\n\t\tfor it.Seek(utxoPrefix); it.ValidForPrefix(utxoPrefix); it.Next() {\n\t\t\titem := it.Item()\n\t\t\tv, err := item.Value()\n\t\t\tHandle(err)\n\t\t\touts := DeserializeOutputs(v)\n\t\t\t// go through all outputs of that transaction\n\t\t\tfor _, out := range outs.Outputs {\n\t\t\t\t// check the output was locked with this address (belongs to this receiver and can be unlocked by this address to use as new input)\n\t\t\t\tif out.IsLockedWithKey(pubKeyHash) {\n\t\t\t\t\tUTXOs = append(UTXOs, out)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tHandle(err)\n\treturn UTXOs\n}",
  "func (bav *UtxoView) GetUnspentUtxoEntrysForPublicKey(pkBytes []byte) ([]*UtxoEntry, error) {\n\t// Fetch the relevant utxos for this public key from the db. We do this because\n\t// the db could contain utxos that are not currently loaded into the view.\n\tutxoEntriesForPublicKey, err := DbGetUtxosForPubKey(pkBytes, bav.Handle)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"UtxoView.GetUnspentUtxoEntrysForPublicKey: Problem fetching \"+\n\t\t\t\"utxos for public key %s\", PkToString(pkBytes, bav.Params))\n\t}\n\n\t// Load all the utxos associated with this public key into\n\t// the view. This makes it so that the view can enumerate all of the utxoEntries\n\t// known for this public key. To put it another way, it allows the view to\n\t// contain the union of:\n\t// - utxos in the db\n\t// - utxos in the view from previously-connected transactions\n\tfor _, utxoEntry := range utxoEntriesForPublicKey {\n\t\tbav.GetUtxoEntryForUtxoKey(utxoEntry.UtxoKey)\n\t}\n\n\t// Now that all of the utxos for this key have been loaded, filter the\n\t// ones for this public key and return them.\n\tutxoEntriesToReturn := []*UtxoEntry{}\n\tfor utxoKeyTmp, utxoEntry := range bav.UtxoKeyToUtxoEntry {\n\t\t// Make a copy of the iterator since it might change from underneath us\n\t\t// if we take its pointer.\n\t\tutxoKey := utxoKeyTmp\n\t\tutxoEntry.UtxoKey = &utxoKey\n\t\tif !utxoEntry.isSpent && reflect.DeepEqual(utxoEntry.PublicKey, pkBytes) {\n\t\t\tutxoEntriesToReturn = append(utxoEntriesToReturn, utxoEntry)\n\t\t}\n\t}\n\n\treturn utxoEntriesToReturn, nil\n}",
  "func FilterUnspentTransactionOutput(unspentTransactionOutputs []*transactions.UnspentTransactionOutput, address string) []*transactions.UnspentTransactionOutput {\n\n\t// initialize result\n\tvar filteredUnspentOutputs []*transactions.UnspentTransactionOutput\n\n\t// loop through and add matching outputs\n\tfor _, unspentTransaction := range unspentTransactionOutputs {\n\n\t\t// check address\n\t\tif unspentTransaction.Address == address {\n\t\t\tfilteredUnspentOutputs = append(filteredUnspentOutputs, unspentTransaction)\n\t\t}\n\t}\n\n\t// return what was found\n\treturn filteredUnspentOutputs\n}",
  "func (s *Client) ListUnspent(ctx context.Context, scripthash string) ([]*ListUnspentResult, error) {\n\tvar resp ListUnspentResp\n\n\terr := s.request(ctx, \"blockchain.scripthash.listunspent\", []interface{}{scripthash}, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.Result, err\n}",
  "func (w *rpcWallet) Unspents(ctx context.Context, account string) ([]walletjson.ListUnspentResult, error) {\n\tvar unspents []walletjson.ListUnspentResult\n\t// minconf, maxconf (rpcdefault=9999999), [address], account\n\tparams := anylist{0, 9999999, nil, account}\n\terr := w.rpcClientRawRequest(ctx, methodListUnspent, params, &unspents)\n\treturn unspents, err\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	unconnectedDCR returns a DCRBackend without a node. The node should be set before use. 
 | 
	func unconnectedDCR(ctx context.Context, logger dex.Logger) *DCRBackend {
	dcr := &DCRBackend{
		ctx:        ctx,
		blockChans: make([]chan uint32, 0),
		blockCache: newBlockCache(logger),
		anyQ:       make(chan interface{}, 128), // way bigger than needed.
		log:        logger,
	}
	go dcr.superQueue()
	return dcr
} 
 | 
	[
  "func NewBackend(ctx context.Context, configPath string, logger dex.Logger, network dex.Network) (*DCRBackend, error) {\n\t// loadConfig will set fields if defaults are used and set the chainParams\n\t// package variable.\n\tcfg, err := loadConfig(configPath, network)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdcr := unconnectedDCR(ctx, logger)\n\tnotifications := &rpcclient.NotificationHandlers{\n\t\tOnBlockConnected: dcr.onBlockConnected,\n\t}\n\t// When the exported constructor is used, the node will be an\n\t// rpcclient.Client.\n\tdcr.client, err = connectNodeRPC(cfg.RPCListen, cfg.RPCUser, cfg.RPCPass,\n\t\tcfg.RPCCert, notifications)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = dcr.client.NotifyBlocks()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error registering for block notifications\")\n\t}\n\tdcr.node = dcr.client\n\t// Prime the cache with the best block.\n\tbestHash, _, err := dcr.client.GetBestBlock()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting best block from dcrd: %v\", err)\n\t}\n\tif bestHash != nil {\n\t\t_, err := dcr.getDcrBlock(bestHash)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error priming the cache: %v\", err)\n\t\t}\n\t}\n\treturn dcr, nil\n}",
  "func unconnectedETH(logger dex.Logger, cfg *config) *Backend {\n\tctx, cancel := context.WithCancel(context.Background())\n\t// TODO: At some point multiple contracts will need to be used, at\n\t// least for transitory periods when updating the contract, and\n\t// possibly a random contract setup, and so this section will need to\n\t// change to support multiple contracts.\n\tvar contractAddr common.Address\n\tswitch cfg.network {\n\tcase dex.Simnet:\n\t\tcontractAddr = common.HexToAddress(simnetContractAddr)\n\tcase dex.Testnet:\n\t\tcontractAddr = common.HexToAddress(testnetContractAddr)\n\tcase dex.Mainnet:\n\t\tcontractAddr = common.HexToAddress(mainnetContractAddr)\n\t}\n\treturn &Backend{\n\t\trpcCtx:       ctx,\n\t\tcancelRPCs:   cancel,\n\t\tcfg:          cfg,\n\t\tlog:          logger,\n\t\tblockChans:   make(map[chan *asset.BlockUpdate]struct{}),\n\t\tcontractAddr: contractAddr,\n\t\tinitTxSize:   uint32(dexeth.InitGas(1, version)),\n\t}\n}",
  "func (vm *VM) Disconnected(id ids.ShortID) error {\n\treturn nil\n}",
  "func (s *eremeticScheduler) Disconnected(sched.SchedulerDriver) {\n\tlog.Debugf(\"Framework disconnected with master\")\n}",
  "func unconnectedWallet(cfg *asset.WalletConfig, dcrCfg *Config, logger dex.Logger) *ExchangeWallet {\n\t// If set in the user config, the fallback fee will be in units of DCR/kB.\n\t// Convert to atoms/B.\n\tfallbackFeesPerByte := toAtoms(dcrCfg.FallbackFeeRate / 1000)\n\tif fallbackFeesPerByte == 0 {\n\t\tfallbackFeesPerByte = defaultFee\n\t}\n\tlogger.Tracef(\"Fallback fees set at %d atoms/byte\", fallbackFeesPerByte)\n\n\tredeemConfTarget := dcrCfg.RedeemConfTarget\n\tif redeemConfTarget == 0 {\n\t\tredeemConfTarget = defaultRedeemConfTarget\n\t}\n\tlogger.Tracef(\"Redeem conf target set to %d blocks\", redeemConfTarget)\n\n\treturn &ExchangeWallet{\n\t\tlog:                 logger,\n\t\tacct:                cfg.Settings[\"account\"],\n\t\ttipChange:           cfg.TipChange,\n\t\tfundingCoins:        make(map[outPoint]*fundingCoin),\n\t\tfindRedemptionQueue: make(map[outPoint]*findRedemptionReq),\n\t\tfallbackFeeRate:     fallbackFeesPerByte,\n\t\tredeemConfTarget:    redeemConfTarget,\n\t\tuseSplitTx:          dcrCfg.UseSplitTx,\n\t}\n}",
  "func (w *rpcWallet) Disconnected() bool {\n\treturn w.rpcConnector.Disconnected()\n}",
  "func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {\n\tif k.isDone() {\n\t\treturn\n\t}\n\tlog.Infof(\"Slave is disconnected\\n\")\n\tif !k.swapState(connectedState, disconnectedState) {\n\t\t//programming error?\n\t\tpanic(\"already disconnected?!\")\n\t}\n}",
  "func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {\n\tif k.isDone() {\n\t\treturn\n\t}\n\tlog.Infof(\"Slave is disconnected\\n\")\n\tif !(&k.state).transition(connectedState, disconnectedState) {\n\t\tlog.Errorf(\"failed to disconnect/transition to a disconnected state\")\n\t}\n}",
  "func (er *EventRelay) Disconnected(err error) {\n\tlogger.Warnf(\"Disconnected: %s. Attempting to reconnect...\\n\", err)\n\n\ter.ehmutex.Lock()\n\tdefer er.ehmutex.Unlock()\n\n\ter.eventHub = nil\n\n\tgo er.connectEventHub()\n}",
  "func (k *Executor) Disconnected(driver bindings.ExecutorDriver) {\n\tif k.isDone() {\n\t\treturn\n\t}\n\tlog.Infof(\"Slave is disconnected\\n\")\n\tif !(&k.state).transition(connectedState, disconnectedState) {\n\t\tlog.Errorf(\"failed to disconnect/transition to a disconnected state\")\n\t}\n}",
  "func (dc *Dynamic) disconnected() {\n\tif dc.cancel != nil {\n\t\tdc.cancel()\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdc.cancel = cancel\n\tdc.c = NewDisconnected(ctx)\n\tdc.stat = machine.Status{}\n}",
  "func (shard *GRShard) disconnectedInstance() (*grInstance, error) {\n\tprimaryInstance := shard.findShardPrimaryTablet()\n\t// if there is no primary, we should recover from DiagnoseTypeWrongPrimaryTablet\n\tif primaryInstance == nil {\n\t\treturn nil, fmt.Errorf(\"%v does not have primary\", formatKeyspaceShard(shard.KeyspaceShard))\n\t}\n\t// Up to this check, we know:\n\t// - shard has an agreed group\n\t// - shard has a primary tablet\n\t// - shard primary tablet is running on the same node as mysql\n\trand.Shuffle(len(shard.instances), func(i, j int) {\n\t\tshard.instances[i], shard.instances[j] = shard.instances[j], shard.instances[i]\n\t})\n\tfor _, instance := range shard.instances {\n\t\t// Skip instance without hostname because they are not up and running\n\t\t// also skip instances that raised unrecoverable errors\n\t\tif shard.shardStatusCollector.isUnreachable(instance) {\n\t\t\tshard.logger.Infof(\"Skip %v to check disconnectedInstance because it is unhealthy\", instance.alias)\n\t\t\tcontinue\n\t\t}\n\t\tisUnconnected := shard.sqlGroup.IsUnconnectedReplica(instance.instanceKey)\n\t\tif isUnconnected {\n\t\t\treturn instance, nil\n\t\t}\n\t}\n\treturn nil, nil\n}",
  "func (k *KubernetesScheduler) Disconnected(driver mesos.SchedulerDriver) {\n\tlog.Infof(\"Master disconnected!\\n\")\n\tk.registered = false\n\n\tk.Lock()\n\tdefer k.Unlock()\n\n\t// discard all cached offers to avoid unnecessary TASK_LOST updates\n\tfor offerId := range k.offers {\n\t\tk.deleteOffer(offerId)\n\t}\n\n\t// TODO(jdef): it's possible that a task is pending, in between Schedule() and\n\t// Bind(), such that it's offer is now invalid. We should check for that and\n\t// clearing the offer from the task (along with a related check in Bind())\n}",
  "func (eventHub *EventHub) Disconnected(err error) {\n\tif !eventHub.connected {\n\t\treturn\n\t}\n\teventHub.client.Stop()\n\teventHub.connected = false\n\n}",
  "func (m *peerManager) Disconnected(peerID NodeID) error {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\tdelete(m.connected, peerID)\n\tm.broadcast(PeerUpdate{\n\t\tPeerID: peerID,\n\t\tStatus: PeerStatusDown,\n\t})\n\treturn nil\n}",
  "func NoOpBackendConnector(b Backend) BackendConnector {\n\treturn noOpConnectedBackend{backend: b}\n}",
  "func GetEmptyBackend() BackEnd {\n\treturn emptyBackEnd{}\n}",
  "func (tc *TorConn) unusedCircID() []byte {\n\tif tc.linkProtoVersion != 4 {\n\t\tpanic(\"unusedCircID: reimplement for other protocols by tor-spec.txt section 4.1\")\n\t}\n\tvar d [4]byte\n\tfor {\n\t\trand.Read(d[:])\n\t\td[0] |= (1 << 7) // protocol version 4: the node that initiated the connection sets the big-endian MSB to 1\n\t\tif _, used := tc.circuits[string(d[:])]; !used {\n\t\t\treturn d[:]\n\t\t}\n\t}\n}",
  "func (currentLabel *Label) removeNode(removeLabel *Label, g *Graph) uint16 {\n    Assert(nilGraph, g != nil)\n    Assert(nilLabelStore, g.labelStore != nil)\n    Assert(nilLabel, removeLabel != nil)\n    \n    // make sure we haven't reached the end of the road\n    if (currentLabel == nil) {  // TODO should this cause an error?\n        return uint16(0)\n    }\n    \n    // this is the one we want\n    if removeLabel.Id == currentLabel.Id {\n        // remove this label\n        cl, _ := currentLabel.left(g) // TODO do not ignore error\n        cr, _ := currentLabel.right(g) // TODO do not ignore error\n        \n        if cl == nil && cr == nil { // no descendents\n            return uint16(0)\n        } else if cl == nil { // one descendent\n            return cr.Id\n        } else if cr == nil { // one descendent\n            return cl.Id\n        } else if cl.height() > cr.height() {\n            // get the right most node of the left branch\n            rLabel := cl.rightmostNode(g)\n            rLabel.l = cl.removeNode(rLabel, g)\n            rLabel.r = currentLabel.r\n            g.labelStore.writes[rLabel.Id] = rLabel\n            return rLabel.balance(g)\n        } else {\n            // get the left most node of the right branch\n            lLabel := cr.leftmostNode(g)\n            lLabel.r = cl.removeNode(lLabel, g)\n            lLabel.l = currentLabel.l\n            g.labelStore.writes[lLabel.Id] = lLabel\n            return lLabel.balance(g)\n        }\n       \n    // keep looking\n    } else if removeLabel.Value(g) < currentLabel.Value(g) {\n        left, _ := currentLabel.left(g) // TODO do not ignore error\n        l := left.removeNode(removeLabel, g)\n        if (l != currentLabel.l) {\n            g.labelStore.writes[currentLabel.Id] = currentLabel\n        }\n        currentLabel.l = l\n    } else {\n        right, _ := currentLabel.right(g) // TODO do not ignore error\n        r := right.removeNode(removeLabel, g)\n        if (r != currentLabel.r) {\n            g.labelStore.writes[currentLabel.Id] = currentLabel\n        }\n        currentLabel.r = r\n    }\n    \n    return currentLabel.balance(g)\n    \n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	superQueue should be run as a goroutine. The dcrdregistered handlers should perform any necessary type conversion and then deposit the payload into the anyQ channel. superQueue processes the queue and monitors the application context. 
 | 
	func (dcr *DCRBackend) superQueue() {
out:
	for {
		select {
		case rawMsg := <-dcr.anyQ:
			switch msg := rawMsg.(type) {
			case *chainhash.Hash:
				// This is a new block notification.
				blockHash := msg
				dcr.log.Debugf("superQueue: Processing new block %s", blockHash)
				blockVerbose, err := dcr.node.GetBlockVerbose(blockHash, false)
				if err != nil {
					dcr.log.Errorf("onBlockConnected error retrieving block %s: %v", blockHash, err)
					return
				}
				// Check if this forces a reorg.
				currentTip := int64(dcr.blockCache.tipHeight())
				if blockVerbose.Height <= currentTip {
					dcr.blockCache.reorg(blockVerbose)
				}
				block, err := dcr.blockCache.add(blockVerbose)
				if err != nil {
					dcr.log.Errorf("error adding block to cache")
				}
				dcr.signalMtx.RLock()
				for _, c := range dcr.blockChans {
					select {
					case c <- block.height:
					default:
						dcr.log.Errorf("tried sending block update on blocking channel")
					}
				}
				dcr.signalMtx.RUnlock()
			default:
				dcr.log.Warn("unknown message type in superQueue: %T", rawMsg)
			}
		case <-dcr.ctx.Done():
			dcr.shutdown()
			break out
		}
	}
} 
 | 
	[
  "func (h *DataJobHandler) HandleQueue(ctx context.Context, data []byte) error {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tblog.Errorf(\"data job handle panic: %v\\n\", r)\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tblog.Errorf(\"queue handler timeout ctx done.\")\n\tcase <-h.stopCtx.Done():\n\t\tblog.Errorf(\"data job handler has been closed.\")\n\t\treturn nil\n\tdefault:\n\t}\n\tdataJobHandlerData := &msgqueue.HandlerData{}\n\terr := json.Unmarshal(data, dataJobHandlerData)\n\tif err != nil {\n\t\tblog.Errorf(\"Unmarshal handler data failed: %v\", err)\n\t\treturn err\n\t}\n\tdataJob := &datajob.DataJob{}\n\terr = json.Unmarshal(dataJobHandlerData.Body, dataJob)\n\tif err != nil {\n\t\tblog.Errorf(\"unmarshal job error: %v\", err)\n\t\treturn fmt.Errorf(\"unmarshal job error: %v\", err)\n\t}\n\tswitch dataJob.Opts.ObjectType {\n\tcase types.ProjectType, types.PublicType:\n\t\tif _, ok := h.chanMap.Load(\"public\"); !ok {\n\t\t\tpublicChan := chanx.NewUnboundedChan(100)\n\t\t\th.jobChanList <- *publicChan\n\t\t\th.chanMap.Store(\"public\", *publicChan)\n\t\t\tblog.Infof(\"[handler] add public chan\")\n\t\t}\n\t\tpublicCh, _ := h.chanMap.Load(\"public\")\n\t\tpublicChan, ok := publicCh.(chanx.UnboundedChan)\n\t\tif !ok {\n\t\t\tblog.Errorf(\"trans publicChan to chanx.UnboundedChan error\")\n\t\t\treturn fmt.Errorf(\"trans publicChan to chanx.UnboundedChan error\")\n\t\t}\n\t\tpublicChan.In <- *dataJob\n\tdefault:\n\t\tif _, ok := h.chanMap.Load(dataJob.Opts.ClusterID); !ok {\n\t\t\tclusterChan := chanx.NewUnboundedChan(100)\n\t\t\th.jobChanList <- *clusterChan\n\t\t\th.chanMap.Store(dataJob.Opts.ClusterID, *clusterChan)\n\t\t\tblog.Infof(\"[handler] add cluster chan:%s\", dataJob.Opts.ClusterID)\n\t\t}\n\t\tclusterCh, _ := h.chanMap.Load(dataJob.Opts.ClusterID)\n\t\tclusterChan, ok := clusterCh.(chanx.UnboundedChan)\n\t\tif !ok {\n\t\t\tblog.Errorf(\"trans clusterChan to chanx.UnboundedChan error\")\n\t\t\treturn fmt.Errorf(\"trans clusterChan to chanx.UnboundedChan error\")\n\t\t}\n\t\tclusterChan.In <- *dataJob\n\t}\n\treturn nil\n}",
  "func (pc *partitionConsumer) dispatcher() {\n\tdefer func() {\n\t\tpc.log.Debug(\"exiting dispatch loop\")\n\t}()\n\tvar messages []*message\n\tfor {\n\t\tvar queueCh chan []*message\n\t\tvar messageCh chan ConsumerMessage\n\t\tvar nextMessage ConsumerMessage\n\t\tvar nextMessageSize int\n\n\t\t// are there more messages to send?\n\t\tif len(messages) > 0 {\n\t\t\tnextMessage = ConsumerMessage{\n\t\t\t\tConsumer: pc.parentConsumer,\n\t\t\t\tMessage:  messages[0],\n\t\t\t}\n\t\t\tnextMessageSize = messages[0].size()\n\n\t\t\tif pc.dlq.shouldSendToDlq(&nextMessage) {\n\t\t\t\t// pass the message to the DLQ router\n\t\t\t\tpc.metrics.DlqCounter.Inc()\n\t\t\t\tmessageCh = pc.dlq.Chan()\n\t\t\t} else {\n\t\t\t\t// pass the message to application channel\n\t\t\t\tmessageCh = pc.messageCh\n\t\t\t}\n\n\t\t\tpc.metrics.PrefetchedMessages.Dec()\n\t\t\tpc.metrics.PrefetchedBytes.Sub(float64(len(messages[0].payLoad)))\n\t\t} else {\n\t\t\tqueueCh = pc.queueCh\n\t\t}\n\n\t\tselect {\n\t\tcase <-pc.closeCh:\n\t\t\treturn\n\n\t\tcase _, ok := <-pc.connectedCh:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpc.log.Debug(\"dispatcher received connection event\")\n\n\t\t\tmessages = nil\n\n\t\t\t// reset available permits\n\t\t\tpc.availablePermits.reset()\n\n\t\t\tvar initialPermits uint32\n\t\t\tif pc.options.autoReceiverQueueSize {\n\t\t\t\tinitialPermits = uint32(pc.currentQueueSize.Load())\n\t\t\t} else {\n\t\t\t\tinitialPermits = uint32(pc.maxQueueSize)\n\t\t\t}\n\n\t\t\tpc.log.Debugf(\"dispatcher requesting initial permits=%d\", initialPermits)\n\t\t\t// send initial permits\n\t\t\tif err := pc.internalFlow(initialPermits); err != nil {\n\t\t\t\tpc.log.WithError(err).Error(\"unable to send initial permits to broker\")\n\t\t\t}\n\n\t\tcase msgs, ok := <-queueCh:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// we only read messages here after the consumer has processed all messages\n\t\t\t// in the previous batch\n\t\t\tmessages = msgs\n\n\t\t// if the messageCh is nil or the messageCh is full this will not be selected\n\t\tcase messageCh <- nextMessage:\n\t\t\t// allow this message to be garbage collected\n\t\t\tmessages[0] = nil\n\t\t\tmessages = messages[1:]\n\n\t\t\tpc.availablePermits.inc()\n\n\t\t\tif pc.options.autoReceiverQueueSize {\n\t\t\t\tpc.incomingMessages.Dec()\n\t\t\t\tpc.client.memLimit.ReleaseMemory(int64(nextMessageSize))\n\t\t\t\tpc.expectMoreIncomingMessages()\n\t\t\t}\n\n\t\tcase clearQueueCb := <-pc.clearQueueCh:\n\t\t\t// drain the message queue on any new connection by sending a\n\t\t\t// special nil message to the channel so we know when to stop dropping messages\n\t\t\tvar nextMessageInQueue *trackingMessageID\n\t\t\tgo func() {\n\t\t\t\tpc.queueCh <- nil\n\t\t\t}()\n\n\t\t\tfor m := range pc.queueCh {\n\t\t\t\t// the queue has been drained\n\t\t\t\tif m == nil {\n\t\t\t\t\tbreak\n\t\t\t\t} else if nextMessageInQueue == nil {\n\t\t\t\t\tnextMessageInQueue = toTrackingMessageID(m[0].msgID)\n\t\t\t\t}\n\t\t\t\tif pc.options.autoReceiverQueueSize {\n\t\t\t\t\tpc.incomingMessages.Sub(int32(len(m)))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tmessages = nil\n\n\t\t\tclearQueueCb(nextMessageInQueue)\n\t\t}\n\t}\n}",
  "func Dispatcher(jobQueue chan<- Job) {\n\tStat.IncrementValue(\"alert-dispatcher.ticks-skipped-due-to-slow-tickqueue\", 0)\n\tgo dispatchJobs(jobQueue)\n\tfor {\n\t\tticker := getAlignedTicker()\n\t\tselect {\n\t\tcase t := <-ticker.C:\n\t\t\tStat.Gauge(\"alert-tickqueue.items\", int64(len(tickQueue)))\n\t\t\tStat.Gauge(\"alert-tickqueue.size\", int64(tickQueueSize))\n\t\t\tselect {\n\t\t\tcase tickQueue <- t:\n\t\t\tdefault:\n\t\t\t\t// TODO: alert when this happens\n\t\t\t\tStat.Increment(\"alert-dispatcher.ticks-skipped-due-to-slow-tickqueue\")\n\t\t\t}\n\t\t}\n\t}\n}",
  "func dispatchQueue(loader *DataLoader) {\n\t// Take the current loader queue, replacing it with an empty queue.\n\titems := loader.queue.DumpItems()\n\n\t// Collect all keys to be loaded in this dispatch\n\tkeys := make([]interface{}, len(items))\n\tfor i, item := range items {\n\t\tkeys[i] = item.Key\n\t}\n\n\t// Call the provided batchLoadFn for this loader with the loader queue's keys.\n\tvalues, err := loader.batchLoadFn(keys)\n\n\tif err != nil {\n\t\tfailedDispatch(loader, items, err)\n\t\treturn\n\t}\n\n\tif len(values) != len(keys) {\n\t\tfailedDispatch(loader, items, fmt.Errorf(\n\t\t\t\"DataLoader must be constructed with a function which accepts \"+\n\t\t\t\t\"Array<key> and returns Future<Array<value>>, but the function did \"+\n\t\t\t\t\"not return a Future of an Array of the same length as the Array \"+\n\t\t\t\t\"of keys. \\n\\nKeys:\\n%s \\n\\nValues:\\n%s\", keys, values))\n\t\treturn\n\t}\n\n\t// Step through the values, resolving or rejecting each Future in the\n\t// loaded queue.\n\tfor i, item := range items {\n\t\tswitch value := values[i].(type) {\n\t\tcase error:\n\t\t\titem.Resolve(nil, value)\n\t\tdefault:\n\t\t\titem.Resolve(value, nil)\n\t\t}\n\t}\n}",
  "func fncSubbedToRedis(c configData, q *dque.DQue) {\n\n\t//log.Println(\"Redis Addr: \" + c.getRedisClientAddr())\n\t//log.Println(\"Redis port: \" + c.getRedisClientPort())\n\t//log.Println(\"Redis pass: \" + c.getRedisClientPass())\n\t//log.Println(\"Redis DB  : \" + strconv.Itoa(c.getRedisClientDB()))\n\t//Create new Redis Client\n\tredisClient := redis.NewClient(&redis.Options{\n\t\tAddr:     c.getRedisClientAddr() + \":\" + c.getRedisClientPort(),\n\t\tPassword: c.getRedisClientPass(),\n\t\tDB:       c.getRedisClientDB(),\n\t})\n\n\t//Ping redis server and see if we had any errors.\n\terr := redisClient.Ping().Err()\n\n\tif err != nil {\n\t\t//Sleep 3, try again.\n\t\ttime.Sleep(3 * time.Second)\n\n\t\t//try again\n\t\terr := redisClient.Ping().Err()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tlog.Println(\"Subscribing to Topic: \" + c.getRedisSubscribeTopic())\n\t//Subscribe to that topic thing\n\ttopic := redisClient.Subscribe(c.getRedisSubscribeTopic())\n\n\t//We're getting a channel\n\t//Channel is basically a built-in for pinging and pulling messages nicely.\n\tchannel := topic.Channel()\n\n\t//For messages in channel\n\tfor msg := range channel {\n\t\t//instantiate a copy of the struct where we're storing data.\n\t\t//Unmarshal the data into the user.\n\t\t//For Debug\n\t\tlog.Println(\"enq:\" + msg.Payload)\n\n\t\t//\"wrap\" the msg.payload in a \"queueThisData Struct\"\n\t\teqr := &queueThisData{IngestData: msg.Payload}\n\t\t//Drop the message to queue.\n\t\terr := q.Enqueue(eqr)\n\t\tif err != nil {\n\t\t\tlog.Println(\"EQ Err: \", err)\n\t\t}\n\t\t//log.Println(\"Queue Size\", q.Size())\n\t\t//engageTurbo(c, q)\n\n\t\t//log.Println(\"Sub to Redis side: Queue Size\", q.Size())\n\t}\n}",
  "func (c *commander) dispatcher() {\n\tfor {\n\t\tif len(c.queue) != 0 {\n\t\t\tc.dispatch_chan <- c.queue[0]\n\t\t\tif c.settings&(1<<LOG_OUT) != 0 {\n\t\t\t\tfmt.Fprintf(c.log, \"Dispatch\\t%v\\n\", c.queue[0])\n\t\t\t}\n\t\t\tc.queue = c.queue[1:]\n\t\t} else {\n\t\t\ttime.Sleep(PAUSE * time.Millisecond)\n\t\t}\n\t}\n}",
  "func (d dispatcher) startDispatcher(ctx context.Context) {\n\tlogger, _ := zap.NewDevelopment()\n\n\tfor {\n\t\tselect {\n\t\tcase survey := <-d.surveys:\n\t\t\td.jobs <- survey\n\t\tcase <-ctx.Done():\n\t\t\tlogger.Info(\"*************************Dispatcher is closing jobs and surveys channels!***************************\\n\")\n\t\t\tclose(d.surveys)\n\n\t\t\tclose(d.jobs)\n\n\t\t\treturn\n\t\t}\n\t}\n}",
  "func (runner *TestRunner) handleQueue (queueControl <-chan batchExecQueueControl) {\n\texecEndSignal := make(chan string)\n\n\texecute := func (enq batchExecQueueControlEnqueue, stopRequest <-chan struct{}, executionLogQuery <-chan chan<- string) {\n\t\t// Handle the execution log in a separate goroutine rather than in\n\t\t// the main loop of runner.executeBatch, so that any stalls in\n\t\t// executeBatch don't delay execution log queries.\n\t\t// The handler is controlled via the following channels.\n\t\texecutionLogAppend\t:= make(chan string)\t// Append a string to the log; don't commit to DB yet.\n\t\texecutionLogCommit\t:= make(chan struct{})\t// Commit uncommitted changes to DB.\n\t\texecutionLogStop\t:= make(chan struct{})\t// Stop the goroutine.\n\t\tgo func() {\n\t\t\texecutionLog := bytes.Buffer{}\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\t\tcase dst := <-executionLogQuery:\n\t\t\t\t\t\tdst <- runner.getBatchResultPastExecutionLog(enq.batchResultId) + executionLog.String()\n\t\t\t\t\tcase str := <-executionLogAppend:\n\t\t\t\t\t\texecutionLog.WriteString(str)\n\t\t\t\t\tcase <-executionLogCommit:\n\t\t\t\t\t\topSet := rtdb.NewOpSet()\n\t\t\t\t\t\topSet.Call(typeBatchResultExecutionLog, enq.batchResultId, \"Append\", executionLog.String())\n\t\t\t\t\t\terr := runner.rtdbServer.ExecuteOpSet(opSet)\n\t\t\t\t\t\tif err != nil { panic(err) }\n\t\t\t\t\t\texecutionLog.Reset()\n\t\t\t\t\tcase <-executionLogStop:\n\t\t\t\t\t\tif executionLog.Len() > 0 { panic(\"Execution log handler stopped, but non-committed data remains\") }\n\t\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\texecutionLogAppend <- fmt.Sprintf(\"Batch reached front of its queue at %v\\n\", time.Now().Format(defaultHumanReadableTimeFormat))\n\n\t\tvar batchResult BatchResult\n\t\terr := runner.rtdbServer.GetObject(enq.batchResultId, &batchResult)\n\t\tif err != nil { panic(err) }\n\n\t\tvar testCaseList TestCaseList\n\t\terr = runner.rtdbServer.GetObject(enq.batchResultId, &testCaseList)\n\t\tif err != nil { panic(err) }\n\n\t\tcasePaths := testCaseList.Paths\n\t\tif runner.isPartiallyExecutedBatch(enq.batchResultId) {\n\t\t\texecutionLogAppend <- fmt.Sprintf(\"Batch is partially executed, filtering pending cases\\n\")\n\t\t\tcasePaths = runner.filterPendingCasePaths(enq.batchResultId, casePaths)\n\t\t}\n\n\t\trunner.executeBatch(enq.batchResultId, batchResult.ExecParams, casePaths, stopRequest, executionLogAppend)\n\n\t\texecutionLogCommit <- struct{}{}\n\t\texecEndSignal <- enq.queueId\n\t\texecutionLogStop <- struct{}{}\n\t}\n\n\tqueueStopRequest\t\t:= make(map[string]chan<- struct{})\n\tqueueExecutionLogQuery\t:= make(map[string]chan<- chan<- string)\n\n\tlaunch := func (enq batchExecQueueControlEnqueue) {\n\t\tstopRequest\t\t\t:= make(chan struct{}, 1)\n\t\texecutionLogQuery\t:= make(chan chan<- string, 1)\n\t\tqueueStopRequest[enq.queueId]\t\t\t= stopRequest\n\t\tqueueExecutionLogQuery[enq.queueId]\t\t= executionLogQuery\n\t\tgo execute(enq, stopRequest, executionLogQuery)\n\t}\n\n\tfor {\n\t\tselect {\n\t\t\tcase command := <-queueControl:\n\t\t\t\tswitch cmd := command.(type) {\n\t\t\t\t\tcase batchExecQueueControlEnqueue:\n\t\t\t\t\t\tvar queue DeviceBatchQueue\n\t\t\t\t\t\terr := runner.rtdbServer.GetObject(cmd.queueId, &queue)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t// Queue does not exist; create it.\n\n\t\t\t\t\t\t\topSet := rtdb.NewOpSet()\n\t\t\t\t\t\t\topSet.Call(typeDeviceBatchQueueList, \"deviceBatchQueueList\", \"Append\", cmd.queueId)\n\t\t\t\t\t\t\topSet.Call(typeDeviceBatchQueue, cmd.queueId, \"Init\")\n\t\t\t\t\t\t\terr = runner.rtdbServer.ExecuteOpSet(opSet)\n\t\t\t\t\t\t\tif err != nil { panic(err) }\n\n\t\t\t\t\t\t\tlog.Printf(\"[runner] created queue '%s'\", cmd.queueId)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\topSet := rtdb.NewOpSet()\n\t\t\t\t\t\topSet.Call(typeDeviceBatchQueue, cmd.queueId, \"Append\", cmd.batchResultId)\n\t\t\t\t\t\terr = runner.rtdbServer.ExecuteOpSet(opSet)\n\t\t\t\t\t\tif err != nil { panic(err) }\n\n\t\t\t\t\t\tif len(queue.BatchResultIds) == 0 { // \\note queue is the queue before appending.\n\t\t\t\t\t\t\tlaunch(cmd);\n\t\t\t\t\t\t}\n\n\t\t\t\t\tcase batchExecQueueControlStopBatch:\n\t\t\t\t\t\tvar queue DeviceBatchQueue\n\t\t\t\t\t\terr := runner.rtdbServer.GetObject(cmd.queueId, &queue)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"[runner] WARNING: stop request for non-existent queue '%s'\", cmd.queueId)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfound := false\n\t\t\t\t\t\tfor ndx, enqueuedId := range queue.BatchResultIds {\n\t\t\t\t\t\t\tif enqueuedId == cmd.batchResultId {\n\t\t\t\t\t\t\t\tif ndx == 0 {\n\t\t\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\t\t\t\tcase queueStopRequest[cmd.queueId] <- struct{}{}:\n\t\t\t\t\t\t\t\t\t\t\tlog.Printf(\"[runner] stop request sent for batch '%s'\\n\", cmd.batchResultId)\n\t\t\t\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\t\t\t\tlog.Printf(\"[runner] stop request already sent for batch '%s'\\n\", cmd.batchResultId)\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"[runner] cancelled pending batch '%s'\\n\", cmd.batchResultId)\n\n\t\t\t\t\t\t\t\t\t// Set batch status, and remove it from the queue and active batch list.\n\t\t\t\t\t\t\t\t\topSet := rtdb.NewOpSet()\n\t\t\t\t\t\t\t\t\topSet.Call(typeBatchResult, cmd.batchResultId, \"SetStatus\", BATCH_STATUS_CODE_CANCELED)\n\t\t\t\t\t\t\t\t\topSet.Call(typeActiveBatchResultList, \"activeBatchResultList\", \"Remove\", cmd.batchResultId)\n\t\t\t\t\t\t\t\t\topSet.Call(typeDeviceBatchQueue, cmd.queueId, \"Remove\", cmd.batchResultId)\n\t\t\t\t\t\t\t\t\terr = runner.rtdbServer.ExecuteOpSet(opSet)\n\t\t\t\t\t\t\t\t\tif err != nil { panic(err) }\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !found {\n\t\t\t\t\t\t\tlog.Printf(\"[runner] WARNING: stop request for batch '%s', does not exist in queue '%s'\\n\", cmd.batchResultId, cmd.queueId)\n\t\t\t\t\t\t}\n\n\t\t\t\t\tcase batchExecQueueControlMove:\n\t\t\t\t\t\tvar queue DeviceBatchQueue\n\t\t\t\t\t\terr := runner.rtdbServer.GetObject(cmd.queueId, &queue)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"[runner] WARNING: move command for non-existent queue '%s'\", cmd.queueId)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfound := false\n\t\t\t\t\t\tfor srcNdx, enqueuedId := range queue.BatchResultIds {\n\t\t\t\t\t\t\tif enqueuedId == cmd.batchResultId {\n\t\t\t\t\t\t\t\tdstNdx := srcNdx + cmd.offset\n\t\t\t\t\t\t\t\tif srcNdx == 0 || dstNdx == 0 {\n\t\t\t\t\t\t\t\t\t// \\todo [nuutti] Support moving running batch? We'd have to automatically\n\t\t\t\t\t\t\t\t\t//\t\t\t\t  stop it first, which can be slow, so it could get confusing?\n\t\t\t\t\t\t\t\t\tlog.Printf(\"[runner] WARNING: trying to move currently to/from running batch in queue\\n\")\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tif dstNdx < 0 || dstNdx >= len(queue.BatchResultIds) {\n\t\t\t\t\t\t\t\t\t\tlog.Printf(\"[runner] WARNING: trying to move batch to position %d\\n\", dstNdx)\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\topSet := rtdb.NewOpSet()\n\t\t\t\t\t\t\t\t\t\topSet.Call(typeDeviceBatchQueue, cmd.queueId, \"Move\", srcNdx, dstNdx)\n\t\t\t\t\t\t\t\t\t\terr := runner.rtdbServer.ExecuteOpSet(opSet)\n\t\t\t\t\t\t\t\t\t\tif err != nil { panic(err) }\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !found {\n\t\t\t\t\t\t\tlog.Printf(\"[runner] WARNING: move command for batch '%s', does not exist in queue '%s'\\n\", cmd.batchResultId, cmd.queueId)\n\t\t\t\t\t\t}\n\n\t\t\t\t\tcase batchExecQueueControlExecutionLogQuery:\n\t\t\t\t\t\tvar queue DeviceBatchQueue\n\t\t\t\t\t\terr := runner.rtdbServer.GetObject(cmd.queueId, &queue)\n\t\t\t\t\t\tif err != nil { cmd.dst <- runner.getBatchResultPastExecutionLog(cmd.batchResultId); continue }\n\n\t\t\t\t\t\tquerySent := false\n\t\t\t\t\t\tfor ndx, enqueueId := range queue.BatchResultIds {\n\t\t\t\t\t\t\tif enqueueId == cmd.batchResultId {\n\t\t\t\t\t\t\t\tif ndx == 0 {\n\t\t\t\t\t\t\t\t\tqueueExecutionLogQuery[cmd.queueId] <- cmd.dst\n\t\t\t\t\t\t\t\t\tquerySent = true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !querySent {\n\t\t\t\t\t\t\tcmd.dst <- runner.getBatchResultPastExecutionLog(cmd.batchResultId)\n\t\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase queueId := <-execEndSignal:\n\t\t\t\tvar queue DeviceBatchQueue\n\t\t\t\terr := runner.rtdbServer.GetObject(queueId, &queue)\n\t\t\t\tif err != nil { panic(err) } // \\note This shouldn't happen (a batch run ends while it's not even in the queue).\n\n\t\t\t\topSet := rtdb.NewOpSet()\n\t\t\t\topSet.Call(typeDeviceBatchQueue, queueId, \"Remove\", queue.BatchResultIds[0])\n\t\t\t\terr = runner.rtdbServer.ExecuteOpSet(opSet)\n\t\t\t\tif err != nil { panic(err) }\n\n\t\t\t\tif len(queue.BatchResultIds) > 1 { // \\note queue is the queue before removal.\n\t\t\t\t\tlaunch(batchExecQueueControlEnqueue{\n\t\t\t\t\t\tbatchResultId:\tqueue.BatchResultIds[1],\n\t\t\t\t\t\tqueueId:\t\tqueueId,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t}\n\t}\n}",
  "func (t *UdpClient) runRequestQueue() {\r\n\tfor req := range t.requestQueue {\r\n\t\tmessage := req.message\r\n\t\tresponse_ch := req.response_ch\r\n\r\n\t\tmsg, err := t.execRequest(message)\r\n\r\n\t\tresponse_ch <- response{msg, err}\r\n\t}\r\n}",
  "func (t *Terminal) queue(data []byte) {\n\tt.outBuf = append(t.outBuf, data...)\n}",
  "func (w *Whisper) processQueue() {\n\tvar e *Envelope\n\tfor {\n\t\tselect {\n\t\tcase <-w.quit:\n\t\t\treturn\n\n\t\tcase e = <-w.messageQueue:\n\t\t\tw.filters.NotifyWatchers(e, false)\n\n\t\tcase e = <-w.p2pMsgQueue:\n\t\t\tw.filters.NotifyWatchers(e, true)\n\t\t}\n\t}\n}",
  "func (m *wsNotificationManager) queueHandler() {\n\tqueueHandler(m.queueNotification, m.notificationMsgs, m.quit)\n\tm.wg.Done()\n}",
  "func (s *PostfixQueueCollectScheduler) Collect() {\n\tlevel.Debug(s.collector.logger).Log(\"msg\", \"Start collecting\")\n\tnow := time.Now()\n\n\ts.collector.mu.Lock()\n\tdefer s.collector.mu.Unlock()\n\n\ts.collector.sizeBytesHistogram.Reset()\n\ts.collector.ageSecondsHistogram.Reset()\n\ts.collector.scrapeSuccessGauge.Reset()\n\ts.collector.scrapeDurationGauge.Reset()\n\n\tcnt := 0\n\tmu := sync.Mutex{}\n\terr := s.collector.postqueue.EachProduce(func(message *showq.Message) {\n\t\tfor i := 0; i < len(message.Recipients); i++ {\n\t\t\tmessage.Recipients[i].Address = util.EmailMask(message.Recipients[i].Address)\n\t\t}\n\t\tb, _ := json.Marshal(message)\n\t\tlevel.Debug(s.collector.logger).Log(\"msg\", \"Collected items\", \"item\", b)\n\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\n\t\ts.collector.sizeBytesHistogram.WithLabelValues(message.QueueName).Observe(float64(message.MessageSize))\n\t\ts.collector.ageSecondsHistogram.WithLabelValues(message.QueueName).Observe(now.Sub(time.Time(message.ArrivalTime)).Seconds())\n\t\tcnt++\n\t})\n\n\tif err != nil {\n\t\tif e, ok := err.(*showq.ParseError); ok {\n\t\t\tlevel.Error(s.collector.logger).Log(\"err\", err, \"line\", util.EmailMask(e.Line()))\n\t\t} else {\n\t\t\tlevel.Error(s.collector.logger).Log(\"err\", err)\n\t\t}\n\t\ts.collector.scrapeSuccessGauge.WithLabelValues(\"postfix_queue\").Set(0)\n\t} else {\n\t\ts.collector.scrapeSuccessGauge.WithLabelValues(\"postfix_queue\").Set(1)\n\t}\n\ts.collector.scrapeDurationGauge.WithLabelValues(\"postfix_queue\").Set(time.Now().Sub(now).Seconds())\n\n\t_, nextTime := gocron.NextRun()\n\tlevel.Debug(s.collector.logger).Log(\"msg\", \"Finish collecting\", \"length\", cnt, \"duration\", time.Now().Sub(now).Seconds(), \"next\", nextTime)\n}",
  "func (ca *channelAccessor) filterQueue() {\n\tif len(ca.fundsReqQueue) == 0 {\n\t\treturn\n\t}\n\n\t// Remove cancelled requests\n\ti := 0\n\tfor _, r := range ca.fundsReqQueue {\n\t\tif r.isActive() {\n\t\t\tca.fundsReqQueue[i] = r\n\t\t\ti++\n\t\t}\n\t}\n\n\t// Allow GC of remaining slice elements\n\tfor rem := i; rem < len(ca.fundsReqQueue); rem++ {\n\t\tca.fundsReqQueue[i] = nil\n\t}\n\n\t// Resize slice\n\tca.fundsReqQueue = ca.fundsReqQueue[:i]\n}",
  "func (d *Deployer) processDeployQueueWorker() {\n\tlog.Debug(\"Deployer process deploy queue: starting\")\n\n\t// invoke pprocessDeployQueueNextItem to fetch and consume the next change\n\t// to a watched or listed resource\n\tfor d.processDeployQueueNextItem() {\n\t\tlog.Debug(\"Deployer.runWorker: processing next item\")\n\t}\n\n\tlog.Debug(\"Deployer process deploy queue: completed\")\n}",
  "func (h *Handler) Run() (err error) {\n\n\t// Connect to activemq\n\tclient, err := h.getAMQPClient()\n\tif err != nil {\n\t\tlog.Logger.Fatal(\"Dialing AMQP server:\", err)\n\t\treturn\n\t}\n\tlog.Logger.Info(\"Connected\")\n\n\t// Open queue channel for reading\n\tchannel, err := client.Channel()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tqueue, err := channel.Consume(\n\t\th.RabbitMQ.Queue,\n\t\th.RabbitMQ.Consumer,\n\t\tfalse,\n\t\tfalse,\n\t\ttrue,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// process messages\n\tfor message := range queue {\n\n\t\tfiksMsg := fiksMessage{&message}\n\n\t\tlog := fiksMsg.LoggerWithFields()\n\t\terr = h.handleAMQMessage(fiksMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to handle AMQP message: %s\", fiksMsg.GetMeldingID(), err)\n\t\t} else {\n\t\t\tlog.Infof(\"Successfully handled message\")\n\t\t}\n\n\t\t// Always ack.\n\t\t//TODO: Consider alternate processing on errors\n\t\t//\t\tI.e. if report failure fails.\n\t\tmessage.Ack(true)\n\t}\n\n\t// TODO: consider reconnecting rather than shutting down.\n\t// connection broken, return err.\n\treturn\n}",
  "func (ca *channelAccessor) enqueue(task *fundsReq) {\n\tca.lk.Lock()\n\tdefer ca.lk.Unlock()\n\n\tca.fundsReqQueue = append(ca.fundsReqQueue, task)\n\tgo ca.processQueue(\"\") // nolint: errcheck\n}",
  "func (d *D) Start() {\n\t// inChan we be accessed via closure\n\tvar inChan chan<- []byte = d.psChan\n\n\t// create a context to cancel our Receive call\n\tctx := context.Background()\n\tctx, cancel := context.WithCancel(ctx)\n\n\t// keep handle to cancel func for Stop() method\n\td.cancel = cancel\n\n\t// TODO: Should this be put into a retry loop?\n\terr := d.sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {\n\t\t// Send data onto inChan\n\t\t// IN THE ACT OF SLOW CONSUMERS MESSAGES WILL BE REQUEUED TO PUBSUB IF DOWNSTREAM BUFFER IS FULL\n\t\tselect {\n\t\tcase inChan <- m.Data:\n\t\t\t// Successfully pushed message downstream, ACK message\n\t\t\tm.Ack()\n\t\t\tlog.Printf(\"enqueued message onto internal channel. current channel length: %d\", len(d.psChan))\n\t\tdefault:\n\t\t\t// Could not push message downstream, requeue to pubsub server\n\t\t\tm.Nack()\n\t\t\tlog.Printf(\"could not enqueue received message. current channel buffer: %d. indicates slow consumers. Message has been requeued to pubsub\", len(d.psChan))\n\t\t}\n\n\t\treturn\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"received error on pubsub Receive, Dequeuer is not receiving messages: %s\", err)\n\t}\n\n}",
  "func (t *TcpClient) runRequestQueue() {\n\tfor req := range t.requestQueue {\n\t\tmessage := req.message\n\t\tresponse_ch := req.response_ch\n\n\t\tmsg, err := t.execRequest(message)\n\n\t\tresponse_ch <- response{msg, err}\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Get information for an unspent transaction output. 
 | 
	func (dcr *DCRBackend) getUnspentTxOut(txHash *chainhash.Hash, vout uint32) (*chainjson.GetTxOutResult, []byte, error) {
	txOut, err := dcr.node.GetTxOut(txHash, vout, true)
	if err != nil {
		return nil, nil, fmt.Errorf("GetTxOut error for output %s:%d: %v", txHash, vout, err)
	}
	if txOut == nil {
		return nil, nil, fmt.Errorf("UTXO - no unspent txout found for %s:%d", txHash, vout)
	}
	pkScript, err := hex.DecodeString(txOut.ScriptPubKey.Hex)
	if err != nil {
		return nil, nil, fmt.Errorf("failed to decode pubkey script from '%s' for output %s:%d", txOut.ScriptPubKey.Hex, txHash, vout)
	}
	return txOut, pkScript, nil
} 
 | 
	[
  "func (entry *UtxoEntry) UnspendOutput() {\n\tentry.Spent = false\n}",
  "func (s *Store) UnspentOutputs() []*RecvTxOut {\n\tunspent := make([]*RecvTxOut, 0, len(s.unspent))\n\tfor _, record := range s.unspent {\n\t\tunspent = append(unspent, record.record(s).(*RecvTxOut))\n\t}\n\treturn unspent\n}",
  "func (s *Store) GetUnspentOutputs(ns walletdb.ReadBucket) ([]Credit, er.R) {\n\tvar unspent []Credit\n\terr := s.ForEachUnspentOutput(ns, nil, func(_ []byte, c *Credit) er.R {\n\t\tunspent = append(unspent, *c)\n\t\treturn nil\n\t})\n\treturn unspent, err\n}",
  "func (am *AccountManager) ListUnspent(minconf, maxconf int,\n\taddresses map[string]bool) ([]map[string]interface{}, error) {\n\tbs, err := GetCurBlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfos := []map[string]interface{}{}\n\tfor _, a := range am.AllAccounts() {\n\t\tfor _, record := range a.TxStore.UnspentOutputs() {\n\t\t\tinfo := record.TxInfo(a.name, bs.Height, cfg.Net())[0]\n\t\t\tinfos = append(infos, info)\n\t\t}\n\n\t}\n\treturn infos, nil\n}",
  "func (ds DataStore) GetUnspentForWallet(ctx sdk.Context, wallet Wallet) (utxos []TxOutput) {\n\tfor _, p := range wallet.Unspent {\n\t\toutput, ok := ds.GetOutput(ctx, p)\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"Corrupted store: Wallet contains unspent position (%v) that doesn't exist in store\", p))\n\t\t}\n\t\ttx, ok := ds.GetTxWithPosition(ctx, p)\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"Corrupted store: Wallet contains unspent position (%v) that doesn't have corresponding tx\", p))\n\t\t}\n\n\t\ttxo := NewTxOutput(output.Output, p, tx.ConfirmationHash, tx.Transaction.TxHash(), output.Spent, output.SpenderTx)\n\t\tutxos = append(utxos, txo)\n\t}\n\treturn utxos\n}",
  "func GetUnspentOutputCoins(rpcClient *rpcclient.HttpClient, keyWallet *wallet.KeyWallet) ([]*crypto.OutputCoin, error) {\n\tprivateKey := &keyWallet.KeySet.PrivateKey\n\tpaymentAddressStr := keyWallet.Base58CheckSerialize(wallet.PaymentAddressType)\n\tviewingKeyStr := keyWallet.Base58CheckSerialize(wallet.ReadonlyKeyType)\n\n\toutputCoins, err := GetListOutputCoins(rpcClient, paymentAddressStr, viewingKeyStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserialNumbers, err := DeriveSerialNumbers(privateKey, outputCoins)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tisExisted, err := CheckExistenceSerialNumber(rpcClient, paymentAddressStr, serialNumbers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutxos := make([]*crypto.OutputCoin, 0)\n\tfor i, out := range outputCoins {\n\t\tif !isExisted[i] {\n\t\t\tutxos = append(utxos, out)\n\t\t}\n\t}\n\n\treturn utxos, nil\n}",
  "func (b *BlockChain) GetUnspentTxns(address string) []Transaction {\n\tvar unspentTxns []Transaction\n\tvar spentTxnMap = make(map[string][]int) // map txnID -> output index\n\n\t// go over blocks one by one\n\titer := b.GetIterator()\n\tfor {\n\t\tblck := iter.Next()\n\n\t\t// go over all Transactions in this block\n\t\tfor _, txn := range blck.Transactions {\n\t\t\t// get string identifying this transaction\n\t\t\ttxID := hex.EncodeToString(txn.ID)\n\n\t\tOutputLoop:\n\t\t\t// go over all outputs in this Txn\n\t\t\tfor outIndex, output := range txn.Out {\n\n\t\t\t\t// check if this output is spent.\n\t\t\t\tif spentTxnMap[txID] != nil {\n\t\t\t\t\tfor _, indx := range spentTxnMap[txID] {\n\t\t\t\t\t\tif indx == outIndex {\n\t\t\t\t\t\t\tcontinue OutputLoop\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// check if this output belongs to this address\n\t\t\t\tif output.CheckOutputUnlock(address) {\n\t\t\t\t\tunspentTxns = append(unspentTxns, *txn)\n\t\t\t\t}\n\n\t\t\t\t// if this is not genesis block, go over all inputs\n\t\t\t\t// that refers to output that belongs to this address\n\t\t\t\t// and mark them as unspent\n\t\t\t\tif txn.IsCoinbase() == false {\n\t\t\t\t\tfor _, inp := range txn.In {\n\t\t\t\t\t\tif inp.CheckInputUnlock(address) {\n\t\t\t\t\t\t\tspentTxnMap[txID] = append(spentTxnMap[txID], inp.Out)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(blck.PrevBlockHash) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn unspentTxns\n}",
  "func FilterUnspentTransactionOutput(unspentTransactionOutputs []*transactions.UnspentTransactionOutput, address string) []*transactions.UnspentTransactionOutput {\n\n\t// initialize result\n\tvar filteredUnspentOutputs []*transactions.UnspentTransactionOutput\n\n\t// loop through and add matching outputs\n\tfor _, unspentTransaction := range unspentTransactionOutputs {\n\n\t\t// check address\n\t\tif unspentTransaction.Address == address {\n\t\t\tfilteredUnspentOutputs = append(filteredUnspentOutputs, unspentTransaction)\n\t\t}\n\t}\n\n\t// return what was found\n\treturn filteredUnspentOutputs\n}",
  "func (dcr *DCRBackend) UnspentDetails(txid string, vout uint32) (string, uint64, int64, error) {\n\ttxHash, err := chainhash.NewHashFromStr(txid)\n\tif err != nil {\n\t\treturn \"\", 0, -1, fmt.Errorf(\"error decoding tx ID %s: %v\", txid, err)\n\t}\n\ttxOut, pkScript, err := dcr.getUnspentTxOut(txHash, vout)\n\tif err != nil {\n\t\treturn \"\", 0, -1, err\n\t}\n\tscriptType := dexdcr.ParseScriptType(dexdcr.CurrentScriptVersion, pkScript, nil)\n\tif scriptType == dexdcr.ScriptUnsupported {\n\t\treturn \"\", 0, -1, dex.UnsupportedScriptError\n\t}\n\tif !scriptType.IsP2PKH() {\n\t\treturn \"\", 0, -1, dex.UnsupportedScriptError\n\t}\n\n\tscriptAddrs, err := dexdcr.ExtractScriptAddrs(pkScript, chainParams)\n\tif err != nil {\n\t\treturn \"\", 0, -1, fmt.Errorf(\"error parsing utxo script addresses\")\n\t}\n\tif scriptAddrs.NumPK != 0 {\n\t\treturn \"\", 0, -1, fmt.Errorf(\"pubkey addresses not supported for P2PKHDetails\")\n\t}\n\tif scriptAddrs.NumPKH != 1 {\n\t\treturn \"\", 0, -1, fmt.Errorf(\"multi-sig not supported for P2PKHDetails\")\n\t}\n\treturn scriptAddrs.PkHashes[0].String(), toAtoms(txOut.Value), txOut.Confirmations, nil\n}",
  "func (n *Client) RequestUnspentAliasOutput(addr *ledgerstate.AliasAddress) {\n\tn.sendMessage(&txstream.MsgGetUnspentAliasOutput{\n\t\tAliasAddress: addr,\n\t})\n}",
  "func (u UTXOSet) FindUnspentTransactionOutputs(pubKeyHash []byte) []TxOutput {\n\tvar UTXOs []TxOutput\n\n\tdb := u.BlockChain.Database\n\n\terr := db.View(func(txn *badger.Txn) error {\n\t\topts := badger.DefaultIteratorOptions\n\n\t\tit := txn.NewIterator(opts)\n\t\tdefer it.Close()\n\n\t\t// iterate through all transactions with UTXOs\n\t\tfor it.Seek(utxoPrefix); it.ValidForPrefix(utxoPrefix); it.Next() {\n\t\t\titem := it.Item()\n\t\t\tv, err := item.Value()\n\t\t\tHandle(err)\n\t\t\touts := DeserializeOutputs(v)\n\t\t\t// go through all outputs of that transaction\n\t\t\tfor _, out := range outs.Outputs {\n\t\t\t\t// check the output was locked with this address (belongs to this receiver and can be unlocked by this address to use as new input)\n\t\t\t\tif out.IsLockedWithKey(pubKeyHash) {\n\t\t\t\t\tUTXOs = append(UTXOs, out)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tHandle(err)\n\treturn UTXOs\n}",
  "func (s *State) output(id OutputID) (output Output, err error) {\n\toutput, exists := s.unspentOutputs[id]\n\tif exists {\n\t\treturn\n\t}\n\n\terr = errors.New(\"output not in utxo set\")\n\treturn\n}",
  "func GetUnspentOutputs(nodeAddr string, addrs []string) ([]Utxo, error) {\n\tvar url string\n\tif len(addrs) == 0 {\n\t\treturn []Utxo{}, nil\n\t}\n\n\taddrParam := strings.Join(addrs, \",\")\n\turl = fmt.Sprintf(\"http://%s/outputs?addrs=%s\", nodeAddr, addrParam)\n\n\trsp, err := http.Get(url)\n\tif err != nil {\n\t\treturn []Utxo{}, errors.New(\"get outputs failed\")\n\t}\n\tdefer rsp.Body.Close()\n\toutputSet := visor.ReadableOutputSet{}\n\tif err := json.NewDecoder(rsp.Body).Decode(&outputSet); err != nil {\n\t\treturn []Utxo{}, err\n\t}\n\n\tspendableOuts := outputSet.SpendableOutputs()\n\tux := make([]Utxo, len(spendableOuts))\n\tfor i, u := range spendableOuts {\n\t\tux[i] = SkyUtxo{u}\n\t}\n\treturn ux, nil\n}",
  "func (*UnspentTxOut) Descriptor() ([]byte, []int) {\n\treturn file_mobilecoind_api_proto_rawDescGZIP(), []int{1}\n}",
  "func (wc *rpcClient) listUnspent() ([]*ListUnspentResult, error) {\n\tunspents := make([]*ListUnspentResult, 0)\n\t// TODO: listunspent 0 9999999 []string{}, include_unsafe=false\n\treturn unspents, wc.call(methodListUnspent, anylist{uint8(0)}, &unspents)\n}",
  "func (blockchain *Blockchain) FindUTXO() map[string]TXOutputs {\n\tUTXO := make(map[string]TXOutputs)\n\tspentTXOs := make(map[string][]int)\n\titer := blockchain.Iterator()\n\n\tfor {\n\t\tblock := iter.Next()\n\n\t\tfor _, tx := range block.Transactions {\n\t\t\tfmt.Printf(\"transaction ID: %x\\n\", tx.ID)\n\t\t\ttxID := hex.EncodeToString(tx.ID)\n\n\t\tOutputs:\n\t\t\tfor outIdx, out := range tx.Vout {\n\t\t\t\t// Was the output spent?\n\t\t\t\tif spentTXOs[txID] != nil {\n\t\t\t\t\tfor _, spentOutIdx := range spentTXOs[txID] {\n\t\t\t\t\t\tif spentOutIdx == outIdx {\n\t\t\t\t\t\t\tcontinue Outputs\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\"unspent TXOutput: %s\\n\", out)\n\n\t\t\t\touts := UTXO[txID]\n\t\t\t\touts.Outputs = append(outs.Outputs, out)\n\t\t\t\tUTXO[txID] = outs\n\t\t\t}\n\n\t\t\tif tx.IsCoinbase() == false {\n\t\t\t\tfor _, in := range tx.Vin {\n\t\t\t\t\tinTxID := hex.EncodeToString(in.Txid)\n\t\t\t\t\tspentTXOs[inTxID] = append(spentTXOs[inTxID], in.Vout)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(block.PrevBlockHash) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn UTXO\n}",
  "func (gw *Gateway) GetUnspentOutputsSummary(filters []visor.OutputsFilter) (*visor.UnspentOutputsSummary, error) {\n\tvar summary *visor.UnspentOutputsSummary\n\tvar err error\n\tgw.strand(\"GetUnspentOutputsSummary\", func() {\n\t\tsummary, err = gw.v.GetUnspentOutputsSummary(filters)\n\t})\n\treturn summary, err\n}",
  "func (w *Wallet) GetUnspentBlockStakeOutputs() (unspent []types.UnspentBlockStakeOutput, err error) {\n\tw.mu.RLock()\n\tdefer w.mu.RUnlock()\n\n\tif !w.unlocked {\n\t\terr = modules.ErrLockedWallet\n\t\treturn\n\t}\n\n\tunspent = make([]types.UnspentBlockStakeOutput, 0)\n\n\t// prepare fulfillable context\n\tctx := w.getFulfillableContextForLatestBlock()\n\n\t// collect all fulfillable block stake outputs\n\tfor usbsoid, output := range w.blockstakeOutputs {\n\t\tif output.Condition.Fulfillable(ctx) {\n\t\t\tunspent = append(unspent, w.unspentblockstakeoutputs[usbsoid])\n\t\t}\n\t}\n\treturn\n}",
  "func GetAddressUnspent(addr string, page int, pagesize int) (*model.AddressUnspent, error) {\n\turl := fmt.Sprintf(bchapi.AddressUnspentUrl, addr, page, pagesize)\n\tresult, err := bchapi.HttpGet(url, bchapi.ConnTimeoutMS, bchapi.ServeTimeoutMS)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddressUnspent, err := model.StringToAddressUnspent(result)\n\treturn addressUnspent, err\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Get the block information, checking the cache first. Same as getDcrBlock, but takes a string argument. 
 | 
	func (dcr *DCRBackend) getBlockInfo(blockid string) (*dcrBlock, error) {
	blockHash, err := chainhash.NewHashFromStr(blockid)
	if err != nil {
		return nil, fmt.Errorf("unable to decode block hash from %s", blockid)
	}
	return dcr.getDcrBlock(blockHash)
} 
 | 
	[
  "func (cache *BlockCache) Get(blockID BlockID) (string, error) {\n\tcache.mu.Lock()\n\tdefer cache.mu.Unlock()\n\tif entry, ok := cache.entries[blockID]; ok {\n\t\tcache.callCount++\n\t\tentry.lastUsed = cache.callCount\n\t\treturn entry.block, nil\n\t}\n\treturn \"\", errors.New(\"Block \" + string(blockID) + \" is not cached\")\n}",
  "func GetBlock(cache *BlockCache, height int) (*walletrpc.CompactBlock, error) {\n\t// First, check the cache to see if we have the block\n\tblock := cache.Get(height)\n\tif block != nil {\n\t\treturn block, nil\n\t}\n\n\t// Not in the cache, ask zcashd\n\tblock, err := getBlockFromRPC(height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif block == nil {\n\t\t// Block height is too large\n\t\treturn nil, errors.New(\"block requested is newer than latest block\")\n\t}\n\treturn block, nil\n}",
  "func (s *State) getCachedBlock(blkID ids.ID) (snowman.Block, bool) {\n\tif blk, ok := s.verifiedBlocks[blkID]; ok {\n\t\treturn blk, true\n\t}\n\n\tif blk, ok := s.decidedBlocks.Get(blkID); ok {\n\t\treturn blk.(snowman.Block), true\n\t}\n\n\tif blk, ok := s.unverifiedBlocks.Get(blkID); ok {\n\t\treturn blk.(snowman.Block), true\n\t}\n\n\treturn nil, false\n}",
  "func (c *BlockCache) Get(height int) *walletrpc.CompactBlock {\n\tc.mutex.RLock()\n\tdefer c.mutex.RUnlock()\n\n\tif height < c.firstBlock || height >= c.nextBlock {\n\t\treturn nil\n\t}\n\tblock := c.readBlock(height)\n\tif block == nil {\n\t\tgo func() {\n\t\t\t// We hold only the read lock, need the exclusive lock.\n\t\t\tc.mutex.Lock()\n\t\t\tc.recoverFromCorruption(height - 10000)\n\t\t\tc.mutex.Unlock()\n\t\t}()\n\t\treturn nil\n\t}\n\treturn block\n}",
  "func (e *offlineExchange) GetBlock(_ context.Context, k u.Key) (*blocks.Block, error) {\n\treturn e.bs.Get(k)\n}",
  "func (bidb *BlockInfoStorage) GetBlockInfo(atTime time.Time) (common.BlockInfo, error) {\n\tvar (\n\t\tlogger = bidb.sugar.With(\n\t\t\t\"func\", caller.GetCurrentFunctionName(),\n\t\t\t\"time\", atTime.String(),\n\t\t)\n\t\tresult common.BlockInfo\n\t)\n\tconst selectStmt = `SELECT block, time FROM %[1]s WHERE time>$1 AND time<$2 Limit 1`\n\tquery := fmt.Sprintf(selectStmt, bidb.tableNames[blockInfoTable])\n\tlogger.Debugw(\"querying blockInfo...\", \"query\", query)\n\tif err := bidb.db.Get(&result, query, timeutil.Midnight(atTime), timeutil.Midnight(atTime).AddDate(0, 0, 1)); err != nil {\n\t\treturn common.BlockInfo{}, err\n\t}\n\treturn result, nil\n}",
  "func (cache *diskBlockCacheWrapped) Get(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID) (\n\tbuf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf,\n\tprefetchStatus PrefetchStatus, err error) {\n\tcache.mtx.RLock()\n\tdefer cache.mtx.RUnlock()\n\tprimaryCache := cache.workingSetCache\n\tsecondaryCache := cache.syncCache\n\tif cache.config.IsSyncedTlf(tlfID) && cache.syncCache != nil {\n\t\tprimaryCache, secondaryCache = secondaryCache, primaryCache\n\t}\n\t// Check both caches if the primary cache doesn't have the block.\n\tbuf, serverHalf, prefetchStatus, err =\n\t\tprimaryCache.Get(ctx, tlfID, blockID)\n\tif _, isNoSuchBlockError := err.(NoSuchBlockError); isNoSuchBlockError &&\n\t\tsecondaryCache != nil {\n\t\treturn secondaryCache.Get(ctx, tlfID, blockID)\n\t}\n\treturn buf, serverHalf, prefetchStatus, err\n}",
  "func (dao *Simple) GetBlock(hash util.Uint256) (*block.Block, error) {\n\tkey := storage.AppendPrefix(storage.DataExecutable, hash.BytesBE())\n\tb, err := dao.Store.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := io.NewBinReaderFromBuf(b)\n\tif r.ReadB() != storage.ExecBlock {\n\t\treturn nil, errors.New(\"internal DB inconsistency\")\n\t}\n\tblock, err := block.NewTrimmedFromReader(dao.Version.StateRootInHeader, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn block, nil\n}",
  "func GetBlock(\n\tctx context.Context,\n\tencStrat encryption.Strategy,\n\tdbm db.Db,\n\tblockRef *storageref.StorageRef,\n) (*Block, error) {\n\tblk, err := FollowBlockRef(ctx, blockRef, encStrat)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tblkHeader, err := FollowBlockHeaderRef(ctx, blk.GetBlockHeaderRef(), encStrat)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := &Block{\n\t\tid:       base64.StdEncoding.EncodeToString(blockRef.GetObjectDigest()),\n\t\tdbm:      dbm,\n\t\tblk:      blk,\n\t\tencStrat: encStrat,\n\t\theader:   blkHeader,\n\t\tblkRef:   blockRef,\n\t}\n\tb.dbKey = []byte(fmt.Sprintf(\"/%s\", b.id))\n\n\tif err := b.ReadState(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}",
  "func (cache *diskBlockCacheWrapped) Get(\n\tctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID,\n\tpreferredCacheType DiskBlockCacheType) (\n\tbuf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf,\n\tprefetchStatus PrefetchStatus, err error) {\n\tcache.mtx.RLock()\n\tdefer cache.mtx.RUnlock()\n\tprimaryCache, secondaryCache := cache.rankCachesLocked(preferredCacheType)\n\t// Check both caches if the primary cache doesn't have the block.\n\tbuf, serverHalf, prefetchStatus, err = primaryCache.Get(ctx, tlfID, blockID)\n\tif _, isNoSuchBlockError := errors.Cause(err).(data.NoSuchBlockError); isNoSuchBlockError &&\n\t\tsecondaryCache != nil {\n\t\tbuf, serverHalf, prefetchStatus, err = secondaryCache.Get(\n\t\t\tctx, tlfID, blockID)\n\t\tif err != nil {\n\t\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{}, NoPrefetch, err\n\t\t}\n\t\tif preferredCacheType != DiskBlockAnyCache {\n\t\t\tcache.moveBetweenCachesWithBlockLocked(\n\t\t\t\tctx, tlfID, blockID, buf, serverHalf, prefetchStatus,\n\t\t\t\tpreferredCacheType)\n\t\t}\n\t}\n\treturn buf, serverHalf, prefetchStatus, err\n}",
  "func (b *AbstractBaseEntity) GetBlock(parent string) (string, error) {\n\tparent = `(?m)^` + parent + `$`\n\treturn b.node.GetSection(parent, \"running-config\")\n}",
  "func (db *Bolt) GetBlock(hash []byte) (bloc *block.Block, err error) {\n\terr = db.client.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(db.table))\n\t\traw := b.Get(hash)\n\t\tvar bl block.Block\n\t\terr := json.Unmarshal(raw, &bl)\n\t\tbloc = &bl\n\t\treturn err\n\t})\n\treturn bloc, err\n}",
  "func GetBlock(hostURL string, hostPort int, hash string) *bytes.Buffer {\n\tparams := make(map[string]interface{})\n\tparams[\"hash\"] = hash\n\treturn makePostRequest(hostURL, hostPort, \"f_block_json\", params)\n}",
  "func (c *Cache) getBlock(aoffset int64, locked bool) *cacheBlock {\n\tif !locked {\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t}\n\n\tif blk, ok := c.blocks[aoffset]; ok {\n\t\tc.lru.MoveToFront(blk.lru)\n\t\treturn blk\n\t}\n\n\treturn nil\n}",
  "func (api *APIClient) GetBlockByRepoName(repoPieces RepoPieces) (Block, error) {\n\tu, err := url.Parse(fmt.Sprintf(\"%s/api/v1/blocks\", api.baseURL))\n\tif err != nil {\n\t\treturn Block{}, errors.New(\"unable to parse Learn remote\")\n\t}\n\tv := url.Values{}\n\tv.Set(\"repo_name\", repoPieces.RepoName)\n\tv.Set(\"org\", repoPieces.Org)\n\tv.Set(\"origin\", repoPieces.Origin)\n\tu.RawQuery = v.Encode()\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Source\", \"gLearn_cli\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", api.Credentials.token))\n\n\tres, err := api.client.Do(req)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn Block{}, fmt.Errorf(\"Error: response status: %d\", res.StatusCode)\n\t}\n\n\tvar blockResp blockResponse\n\terr = json.NewDecoder(res.Body).Decode(&blockResp)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\n\tif len(blockResp.Blocks) == 1 {\n\t\treturn blockResp.Blocks[0], nil\n\t}\n\treturn Block{}, nil\n}",
  "func (c *Cache) GetBlock(k Key) Block {\n\tidx := uint64(0)\n\tif len(c.shards) > 1 {\n\t\th := k.hashUint64()\n\t\tidx = h % uint64(len(c.shards))\n\t}\n\tshard := c.shards[idx]\n\treturn shard.GetBlock(k)\n}",
  "func (db *SqliteDb) fetchBlockCache(sha *btcwire.ShaHash) (*blockCacheObj, bool) {\n\n\tdb.blockCache.cacheLock.RLock()\n\tdefer db.blockCache.cacheLock.RUnlock()\n\n\tblkobj, ok := db.blockCache.blockMap[*sha]\n\tif !ok { // could this just return the map deref?\n\t\treturn nil, false\n\t}\n\treturn blkobj, true\n}",
  "func (c *DaemonClient) GetBlock(height uint, hash string) (Block, error) {\n\tvar b Block\n\treq := struct {\n\t\theight uint   `json:\"height, omitempty\"`\n\t\thash   string `json:\"hash, omitempty\"`\n\t}{\n\t\theight,\n\t\thash,\n\t}\n\tif err := call(c.endpoint, \"getblock\", req, &b); err != nil {\n\t\treturn b, err\n\t}\n\treturn b, nil\n}",
  "func (c *Client) GetBlock(hash string) (resp *Block, e error) {\n\tif hash == \"\" || len(hash) != 64 {\n\t\treturn nil, c.err(ErrBHW)\n\t}\n\n\tresp = &Block{}\n\treturn resp, c.Do(\"/rawblock/\"+hash, resp, nil)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Get the mainchain block at the given height, checking the cache first. 
 | 
	func (dcr *DCRBackend) getMainchainDcrBlock(height uint32) (*dcrBlock, error) {
	cachedBlock, found := dcr.blockCache.atHeight(height)
	if found {
		return cachedBlock, nil
	}
	hash, err := dcr.node.GetBlockHash(int64(height))
	if err != nil {
		// Likely not mined yet. Not an error.
		return nil, nil
	}
	return dcr.getDcrBlock(hash)
} 
 | 
	[
  "func (c *BlockCache) Get(height int) *walletrpc.CompactBlock {\n\tc.mutex.RLock()\n\tdefer c.mutex.RUnlock()\n\n\tif height < c.firstBlock || height >= c.nextBlock {\n\t\treturn nil\n\t}\n\tblock := c.readBlock(height)\n\tif block == nil {\n\t\tgo func() {\n\t\t\t// We hold only the read lock, need the exclusive lock.\n\t\t\tc.mutex.Lock()\n\t\t\tc.recoverFromCorruption(height - 10000)\n\t\t\tc.mutex.Unlock()\n\t\t}()\n\t\treturn nil\n\t}\n\treturn block\n}",
  "func GetBlock(cache *BlockCache, height int) (*walletrpc.CompactBlock, error) {\n\t// First, check the cache to see if we have the block\n\tblock := cache.Get(height)\n\tif block != nil {\n\t\treturn block, nil\n\t}\n\n\t// Not in the cache, ask zcashd\n\tblock, err := getBlockFromRPC(height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif block == nil {\n\t\t// Block height is too large\n\t\treturn nil, errors.New(\"block requested is newer than latest block\")\n\t}\n\treturn block, nil\n}",
  "func (g *Geth) GetBlockAtHeight(height uint64) (WrkChainBlockHeader, error) {\n\n\tqueryUrl := viper.GetString(types.FlagWrkchainRpc)\n\n\tatHeight := \"latest\"\n\n\tif height > 0 {\n\t\tatHeight = \"0x\" + strconv.FormatUint(height, 16)\n\t}\n\n\tvar jsonStr = []byte(`{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockByNumber\",\"params\":[\"` + atHeight + `\",false],\"id\":1}`)\n\n\tresp, err := http.Post(queryUrl, \"application/json\", bytes.NewBuffer(jsonStr))\n\tif err != nil {\n\t\treturn WrkChainBlockHeader{}, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn WrkChainBlockHeader{}, err\n\t}\n\n\tvar res GethBlockHeaderResult\n\terr = json.Unmarshal(body, &res)\n\tif err != nil {\n\t\treturn WrkChainBlockHeader{}, err\n\t}\n\n\theader := res.Result\n\tcleanedHeight := strings.Replace(header.Number, \"0x\", \"\", -1)\n\tblockNumber, err := strconv.ParseUint(cleanedHeight, 16, 64)\n\n\tif err != nil {\n\t\treturn WrkChainBlockHeader{}, err\n\t}\n\n\tblockHash := header.Hash\n\tparentHash := \"\"\n\thash1 := \"\"\n\thash2 := \"\"\n\thash3 := \"\"\n\tblockHeight := blockNumber\n\n\tif height == 0 {\n\t\tg.lastHeight = blockNumber\n\t}\n\n\tif viper.GetBool(types.FlagParentHash) {\n\t\tparentHash = header.ParentHash\n\t}\n\n\thash1Ref := viper.GetString(types.FlagHash1)\n\thash2Ref := viper.GetString(types.FlagHash2)\n\thash3Ref := viper.GetString(types.FlagHash3)\n\n\tif len(hash1Ref) > 0 {\n\t\thash1 = g.getHash(header, hash1Ref)\n\t}\n\n\tif len(hash2Ref) > 0 {\n\t\thash2 = g.getHash(header, hash2Ref)\n\t}\n\n\tif len(hash3Ref) > 0 {\n\t\thash3 = g.getHash(header, hash3Ref)\n\t}\n\n\twrkchainBlock := NewWrkChainBlockHeader(blockHeight, blockHash, parentHash, hash1, hash2, hash3)\n\n\treturn wrkchainBlock, nil\n}",
  "func (db *SqliteDb) fetchBlockHeightCache(height int64) (*blockCacheObj, bool) {\n\n\tdb.blockCache.cacheLock.RLock()\n\tdefer db.blockCache.cacheLock.RUnlock()\n\n\tblkobj, ok := db.blockCache.blockHeightMap[height]\n\tif !ok { // could this just return the map deref?\n\t\treturn nil, false\n\t}\n\treturn blkobj, true\n}",
  "func (s *SKYScanner) getBlockAtHeight(seq int64) (*CommonBlock, error) {\n\tb, err := s.getBlock(seq)\n\treturn b, err\n}",
  "func (blockChain *BlockChain) Get(height int32) ([]Block, bool) {\n\tfound := false\n\tif blockChain.Chain[height] != nil {\n\t\tfound = true\n\t\treturn blockChain.Chain[height], found\n\t}\n\treturn blockChain.Chain[height], found\n}",
  "func (exp *MockExplorer) GetBlockchainHeight() (int64, error) {\n\treturn randInt64(), nil\n}",
  "func (rpc *Client) GetBlockByHeight(h uint64) ([]byte, error) {\n\tvar (\n\t\tblockHash string\n\t\tblockData []byte\n\t\terr       error\n\t)\n\tblockHash, err = rpc.GetBlockHash(h)\n\tif err != nil {\n\t\treturn blockData, err\n\t}\n\tblockData, err = rpc.GetBlockByHash(blockHash)\n\treturn blockData, err\n}",
  "func (dao *Simple) GetBlock(hash util.Uint256) (*block.Block, error) {\n\tkey := storage.AppendPrefix(storage.DataExecutable, hash.BytesBE())\n\tb, err := dao.Store.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := io.NewBinReaderFromBuf(b)\n\tif r.ReadB() != storage.ExecBlock {\n\t\treturn nil, errors.New(\"internal DB inconsistency\")\n\t}\n\tblock, err := block.NewTrimmedFromReader(dao.Version.StateRootInHeader, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn block, nil\n}",
  "func getBlockChainLatestHeight() (int64, error) {\n\tclient := helper.GetClient()\n\tdefer func() {\n\t\tclient.Release()\n\t}()\n\tstatus, err := client.Status()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tcurrentBlockHeight := status.SyncInfo.LatestBlockHeight\n\n\treturn currentBlockHeight, nil\n}",
  "func (s *State) blockAtHeight(height BlockHeight) (b Block, err error) {\n\tif bn, ok := s.blockMap[s.currentPath[height]]; ok {\n\t\tb = bn.Block\n\t\treturn\n\t}\n\terr = fmt.Errorf(\"no block at height %v found.\", height)\n\treturn\n}",
  "func (c *Client) GetBlockAt(height uint64) (block *rpctypes.ResultBlock, err error) {\n\tblock = new(rpctypes.ResultBlock)\n\tvar url string\n\tif height == 0 {\n\t\turl = c.URL(\"block/current\")\n\t} else {\n\t\turl = c.URL(\"block/height/%d\", height)\n\t}\n\terr = c.get(block, url)\n\terr = errors.Wrap(err, \"getting block by height\")\n\treturn\n}",
  "func (c cacheProvider) GetByHeight(h int) (fc FullCommit, err error) {\n\tfor _, p := range c.Providers {\n\t\tvar tfc FullCommit\n\t\ttfc, err = p.GetByHeight(h)\n\t\tif err == nil {\n\t\t\tif tfc.Height() > fc.Height() {\n\t\t\t\tfc = tfc\n\t\t\t}\n\t\t\tif tfc.Height() == h {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t// even if the last one had an error, if any was a match, this is good\n\tif fc.Height() > 0 {\n\t\terr = nil\n\t}\n\treturn fc, err\n}",
  "func (b *Backend) GetLatestBlock(ctx context.Context, isSealed bool) (*flowgo.Block, error) {\n\tblock, err := b.emulator.GetLatestBlock()\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tblockID := block.ID()\n\n\tb.logger.WithFields(logrus.Fields{\n\t\t\"blockHeight\": block.Header.Height,\n\t\t\"blockID\":     hex.EncodeToString(blockID[:]),\n\t}).Debug(\"🎁  GetLatestBlock called\")\n\n\treturn block, nil\n}",
  "func (db *DBlock) Get(c *Client) error {\n\tif db.IsPopulated() {\n\t\treturn nil\n\t}\n\n\tresult := struct{ DBlock *DBlock }{DBlock: db}\n\tif err := c.FactomdRequest(\"dblock-by-height\", db, &result); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
  "func (d *AddressCacheItem) BlockHeight() int64 {\n\td.mtx.RLock()\n\tdefer d.mtx.RUnlock()\n\treturn d.height\n}",
  "func (c *Chain) Get(h []byte) (*block.Block, error) {\n\tfor _, blk := range c.blocks {\n\t\tif bytes.Compare(h, blk.Hash) == 0 {\n\t\t\treturn blk, nil\n\t\t}\n\t}\n\treturn nil, errors.New(\"could not find block\")\n}",
  "func (dao *blockDAO) getBlockchainHeight() (uint64, error) {\n\tvalue, err := dao.kvstore.Get(blockNS, topHeightKey)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"failed to get top height\")\n\t}\n\tif len(value) == 0 {\n\t\treturn 0, errors.Wrap(db.ErrNotExist, \"blockchain height missing\")\n\t}\n\treturn enc.MachineEndian.Uint64(value), nil\n}",
  "func (c *BlockCache) GetFirstHeight() int {\n\tc.mutex.RLock()\n\tdefer c.mutex.RUnlock()\n\treturn c.firstBlock\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	connectNodeRPC attempts to create a new websocket connection to a dcrd node with the given credentials and notification handlers. 
 | 
	func connectNodeRPC(host, user, pass, cert string,
	notifications *rpcclient.NotificationHandlers) (*rpcclient.Client, error) {
	dcrdCerts, err := ioutil.ReadFile(cert)
	if err != nil {
		return nil, fmt.Errorf("TLS certificate read error: %v", err)
	}
	config := &rpcclient.ConnConfig{
		Host:         host,
		Endpoint:     "ws", // websocket
		User:         user,
		Pass:         pass,
		Certificates: dcrdCerts,
	}
	dcrdClient, err := rpcclient.New(config, notifications)
	if err != nil {
		return nil, fmt.Errorf("Failed to start dcrd RPC client: %v", err)
	}
	return dcrdClient, nil
} 
 | 
	[
  "func (w *rpcWallet) Connect(ctx context.Context) error {\n\tw.rpcMtx.Lock()\n\tdefer w.rpcMtx.Unlock()\n\n\t// NOTE: rpcclient.(*Client).Disconnected() returns false prior to connect,\n\t// so we cannot block incorrect Connect calls on that basis. However, it is\n\t// always safe to call Shutdown, so do it just in case.\n\tw.rpcConnector.Shutdown()\n\n\t// Prepare a fresh RPC client.\n\tntfnHandlers := &rpcclient.NotificationHandlers{\n\t\t// Setup an on-connect handler for logging (re)connects.\n\t\tOnClientConnected: func() { w.handleRPCClientReconnection(ctx) },\n\t}\n\tnodeRPCClient, err := rpcclient.New(w.rpcCfg, ntfnHandlers)\n\tif err != nil { // should never fail since we validated the config in newRPCWallet\n\t\treturn fmt.Errorf(\"failed to create dcrwallet RPC client: %w\", err)\n\t}\n\n\tatomic.StoreUint32(&w.connectCount, 0) // handleRPCClientReconnection should skip checkRPCConnection on first Connect\n\n\tw.rpcConnector = nodeRPCClient\n\tw.rpcClient = newCombinedClient(nodeRPCClient, w.chainParams)\n\n\terr = nodeRPCClient.Connect(ctx, false) // no retry\n\tif err != nil {\n\t\treturn fmt.Errorf(\"dcrwallet connect error: %w\", err)\n\t}\n\n\tnet, err := w.rpcClient.GetCurrentNet(ctx)\n\tif err != nil {\n\t\treturn translateRPCCancelErr(err)\n\t}\n\tif net != w.chainParams.Net {\n\t\treturn fmt.Errorf(\"unexpected wallet network %s, expected %s\", net, w.chainParams.Net)\n\t}\n\n\t// The websocket client is connected now, so if the following check\n\t// fails and we return with a non-nil error, we must shutdown the\n\t// rpc client otherwise subsequent reconnect attempts will be met\n\t// with \"websocket client has already connected\".\n\tspv, err := checkRPCConnection(ctx, w.rpcConnector, w.rpcClient, w.log)\n\tif err != nil {\n\t\t// The client should still be connected, but if not, do not try to\n\t\t// shutdown and wait as it could hang.\n\t\tif !errors.Is(err, rpcclient.ErrClientShutdown) {\n\t\t\t// Using w.Disconnect would deadlock with rpcMtx already locked.\n\t\t\tw.rpcConnector.Shutdown()\n\t\t\tw.rpcConnector.WaitForShutdown()\n\t\t}\n\t\treturn err\n\t}\n\n\tw.spvMode = spv\n\n\treturn nil\n}",
  "func connectRPC(host, user, pass, certPath string) (*rpcclient.Client, error) {\n\t// Attempt to read certs\n\tcerts := []byte{}\n\tvar readCerts []byte\n\tvar err error\n\tif len(certPath) > 0 {\n\t\treadCerts, err = ioutil.ReadFile(certPath)\n\t} else {\n\t\t// Try a default cert path\n\t\tsoterdDir := soterutil.AppDataDir(\"soterd\", false)\n\t\treadCerts, err = ioutil.ReadFile(filepath.Join(soterdDir, \"rpc.cert\"))\n\t}\n\tif err == nil {\n\t\tcerts = readCerts\n\t}\n\n\tcfg := rpcclient.ConnConfig{\n\t\tHost: host,\n\t\tEndpoint: \"ws\",\n\t\tUser: user,\n\t\tPass: pass,\n\t\tCertificates: certs,\n\t\tDisableAutoReconnect: true,\n\t}\n\n\tclient, err := rpcclient.New(&cfg, nil)\n\tif err != nil {\n\t\treturn client, err\n\t}\n\n\treturn client, nil\n}",
  "func ConnectTransferNode(url string, auth string) *TransferNodeConnection {\n\tconn, _, err := websocket.DefaultDialer.Dial(url, http.Header{})\n\tnewConn := &TransferNodeConnection{\n\t\tConn:                  conn,\n\t\tError:                 err,\n\t\tAuthKey:               auth,\n\t\tChunksRequests:        make(chan ChunksRequest, 10), // buffered so we can write to it before listening to it\n\t\tReceivedFileID:        make(chan string),\n\t\tReceivedProgressBytes: make(chan uint64),\n\t\tReceivedError:         make(chan error),\n\t\tReceivedRecipients:    make(chan []*protobufs.RecipientsData_Recipient),\n\t\tDone:                  make(chan bool),\n\t}\n\tgo newConn.readLoop()\n\treturn newConn\n}",
  "func (mgr *connectionsManager) Connect(opts ConnectOpts) {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tconn := &connection{\n\t\tctx:       ctx,\n\t\tcancel:    cancel,\n\t\tconnected: false,\n\t}\n\n\tmgr.wgClosed.Add(1)\n\n\tmgr.mu.Lock()\n\tmgr.connections[opts.FeedsManagerID] = conn\n\tmgr.mu.Unlock()\n\n\tgo recovery.WrapRecover(mgr.lggr, func() {\n\t\tdefer mgr.wgClosed.Done()\n\n\t\tmgr.lggr.Infow(\"Connecting to Feeds Manager...\", \"feedsManagerID\", opts.FeedsManagerID)\n\n\t\tclientConn, err := wsrpc.DialWithContext(conn.ctx, opts.URI,\n\t\t\twsrpc.WithTransportCreds(opts.Privkey, ed25519.PublicKey(opts.Pubkey)),\n\t\t\twsrpc.WithBlock(),\n\t\t\twsrpc.WithLogger(mgr.lggr),\n\t\t)\n\t\tif err != nil {\n\t\t\t// We only want to log if there was an error that did not occur\n\t\t\t// from a context cancel.\n\t\t\tif conn.ctx.Err() == nil {\n\t\t\t\tmgr.lggr.Warnf(\"Error connecting to Feeds Manager server: %v\", err)\n\t\t\t} else {\n\t\t\t\tmgr.lggr.Infof(\"Closing wsrpc websocket connection: %v\", err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t\tdefer clientConn.Close()\n\n\t\tmgr.lggr.Infow(\"Connected to Feeds Manager\", \"feedsManagerID\", opts.FeedsManagerID)\n\n\t\t// Initialize a new wsrpc client to make RPC calls\n\t\tmgr.mu.Lock()\n\t\tconn.connected = true\n\t\tconn.client = pb.NewFeedsManagerClient(clientConn)\n\t\tmgr.connections[opts.FeedsManagerID] = conn\n\t\tmgr.mu.Unlock()\n\n\t\t// Initialize RPC call handlers on the client connection\n\t\tpb.RegisterNodeServiceServer(clientConn, opts.Handlers)\n\n\t\tif opts.OnConnect != nil {\n\t\t\topts.OnConnect(conn.client)\n\t\t}\n\n\t\t// Detect changes in connection status\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\ts := clientConn.GetState()\n\n\t\t\t\tclientConn.WaitForStateChange(conn.ctx, s)\n\n\t\t\t\ts = clientConn.GetState()\n\n\t\t\t\t// Exit the goroutine if we shutdown the connection\n\t\t\t\tif s == connectivity.Shutdown {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tmgr.mu.Lock()\n\t\t\t\tconn.connected = s == connectivity.Ready\n\t\t\t\tmgr.mu.Unlock()\n\t\t\t}\n\t\t}()\n\n\t\t// Wait for close\n\t\t<-conn.ctx.Done()\n\t})\n}",
  "func DiscordConnect() (err error) {\n\tdg, err = discordgo.New(\"Bot \" + o.DiscordToken)\n\tif err != nil {\n\t\tlog.Println(\"FATA: error creating Discord session,\", err)\n\t\treturn\n\t}\n\tlog.Println(\"INFO: Bot is Opening\")\n\tdg.AddHandler(MessageCreateHandler)\n\tdg.AddHandler(GuildCreateHandler)\n\t// dg.AddHandler(GuildDeleteHandler)\n\tdg.AddHandler(VoiceStatusUpdateHandler)\n\tdg.AddHandler(ConnectHandler)\n\tif o.DiscordNumShard > 1 {\n\t\tdg.ShardCount = o.DiscordNumShard\n\t\tdg.ShardID = o.DiscordShardID\n\t}\n\n\tif o.Debug {\n\t\tdg.LogLevel = discordgo.LogDebug\n\t}\n\t// Open Websocket\n\terr = dg.Open()\n\tif err != nil {\n\t\tlog.Println(\"FATA: Error Open():\", err)\n\t\treturn\n\t}\n\t_, err = dg.User(\"@me\")\n\tif err != nil {\n\t\t// Login unsuccessful\n\t\tlog.Println(\"FATA:\", err)\n\t\treturn\n\t} // Login successful\n\tlog.Println(\"INFO: Bot is now running. Press CTRL-C to exit.\")\n\tinitRoutine()\n\tdg.UpdateGameStatus(0, o.DiscordStatus)\n\treturn nil\n}",
  "func (h *Harness) connectRPCClient() error {\n\tvar client *rpc.Client\n\tvar err error\n\n\trpcConf := h.node.config.rpcConnConfig()\n\tfor i := 0; i < h.maxConnRetries; i++ {\n\t\tif client, err = rpc.New(&rpcConf, h.handlers); err != nil {\n\t\t\ttime.Sleep(time.Duration(math.Log(float64(i+3))) * 50 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\tif client == nil {\n\t\treturn fmt.Errorf(\"connection timed out: %v\", err)\n\t}\n\n\terr = client.NotifyBlocks()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.Node = client\n\treturn nil\n}",
  "func (cm *RPCConnManager) Connect(addr string, permanent bool) error {\n\treplyChan := make(chan error)\n\tcm.server.query <- connectNodeMsg{\n\t\taddr:      addr,\n\t\tpermanent: permanent,\n\t\treply:     replyChan,\n\t}\n\treturn <-replyChan\n}",
  "func (cm *rpcConnManager) Connect(addr string, permanent bool) error {\n\treplyChan := make(chan error)\n\tcm.server.query <- connectNodeMsg{\n\t\taddr:      addr,\n\t\tpermanent: permanent,\n\t\treply:     replyChan,\n\t}\n\treturn <-replyChan\n}",
  "func (h *Harness) connectRPCClient() (e error) {\n\tvar client *rpcclient.Client\n\trpcConf := h.node.config.rpcConnConfig()\n\tfor i := 0; i < h.maxConnRetries; i++ {\n\t\tif client, e = rpcclient.New(&rpcConf, h.handlers, qu.T()); E.Chk(e) {\n\t\t\ttime.Sleep(time.Duration(i) * 50 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif client == nil {\n\t\treturn fmt.Errorf(\"connection timeout\")\n\t}\n\th.Node = client\n\th.wallet.SetRPCClient(client)\n\treturn nil\n}",
  "func ws_Server_Connect(server_url string) (ws *websocket.Conn, err error) {\n\n\tvar wss_server_url = wss_prefix + server_url\n\tws, err = wsProxyDial(wss_server_url, \"tcp\", wss_server_url)\n\n\tif err != nil {\n\t\tlog.Printf(\"[%s] wsProxyDial : \", __FILE__, err)\n\t\treturn nil, err\n\t}\n\n\tname := getUniqueID()\n\n\terr = wsReqeustConnection(ws, name)\n\tif err != nil {\n\t\tlog.Printf(\"[%s] ws_Server_Connect error = \", err)\n\t\treturn ws, err\n\t}\n\n\treturn ws, nil\n\n}",
  "func connectWebsocket(port string) {\n\tfor {\n\t\tvar err error\n\t\tServerIP = utils.DiscoverServer()\n\t\tif ServerIP == \"\" {\n\t\t\tlogger.WithFields(logger.Fields{}).Debugf(\"Wait 5 seconds to redail...\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tu := url.URL{Scheme: \"ws\", Host: ServerIP + \":\" + os.Getenv(\"CASA_SERVER_PORT\"), Path: \"/v1/ws\"}\n\t\tWS, _, err = websocket.DefaultDialer.Dial(u.String(), nil)\n\t\tif err != nil {\n\t\t\tlogger.WithFields(logger.Fields{\"code\": \"CGGWCCW001\"}).Errorf(\"%s\", err.Error())\n\t\t\tlogger.WithFields(logger.Fields{}).Debugf(\"Wait 5 seconds to redail...\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\taddr := strings.Split(WS.LocalAddr().String(), \":\")[0]\n\tmessage := WebsocketMessage{\n\t\tAction: \"newConnection\",\n\t\tBody:   []byte(addr + \":\" + port),\n\t}\n\tbyteMessage, _ := json.Marshal(message)\n\terr := WS.WriteMessage(websocket.TextMessage, byteMessage)\n\tif err != nil {\n\t\tlogger.WithFields(logger.Fields{\"code\": \"CGGWCCW002\"}).Errorf(\"%s\", err.Error())\n\t\treturn\n\t}\n\tlogger.WithFields(logger.Fields{}).Debugf(\"Websocket connected!\")\n}",
  "func connectConsumer(client *http.Client) (*websocket.Conn, error) {\n\n\ttransport, ok := client.Transport.(*http.Transport)\n\tif !ok {\n\t\treturn nil, errors.New(\"HTTP client doens't have http.Transport\")\n\t}\n\n\tsocket := &websocket.Dialer{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tRootCAs: transport.TLSClientConfig.RootCAs,\n\t\t\tCertificates: []tls.Certificate{\n\t\t\t\ttransport.TLSClientConfig.Certificates[0]},\n\t\t\tServerName: common.Cfg.EaaCommonName,\n\t\t\tMinVersion: tls.VersionTLS12,\n\t\t},\n\t}\n\n\thostHeader := http.Header{}\n\thostHeader.Add(\"Host\", common.Cfg.Namespace+\":\"+common.Cfg.ConsumerAppID)\n\n\tconn, resp, err := socket.Dial(\"wss://\"+common.Cfg.EdgeNodeEndpoint+\n\t\t\"/notifications\", hostHeader)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Couldn't dial to wss\")\n\t}\n\tdefer func() {\n\t\tif err := resp.Body.Close(); err != nil {\n\t\t\tlog.Println(\"Failed to close response body\")\n\t\t}\n\t}()\n\n\treturn conn, nil\n}",
  "func (c *Client) connect(ctx context.Context) error {\n\theader := http.Header{}\n\t// Disable compression by requiring \"identity\"\n\theader.Set(\"Accept-Encoding\", \"identity\")\n\theader.Set(\"User-Agent\", useragent.GetEncodedUserAgent())\n\theader.Set(\"X-Stripe-Client-User-Agent\", useragent.GetEncodedStripeUserAgent())\n\theader.Set(\"Websocket-Id\", c.WebSocketID)\n\n\turl := c.URL\n\tif c.cfg.NoWSS && strings.HasPrefix(url, \"wss\") {\n\t\turl = \"ws\" + strings.TrimPrefix(c.URL, \"wss\")\n\t}\n\n\turl = url + \"?websocket_feature=\" + c.WebSocketAuthorizedFeature\n\n\tc.cfg.Log.WithFields(log.Fields{\n\t\t\"prefix\": \"websocket.Client.connect\",\n\t\t\"url\":    url,\n\t}).Debug(\"Dialing websocket\")\n\n\tconn, resp, err := c.cfg.Dialer.DialContext(ctx, url, header)\n\tif err != nil {\n\t\tmessage := readWSConnectErrorMessage(resp)\n\t\tc.cfg.Log.WithFields(log.Fields{\n\t\t\t\"prefix\":  \"websocket.Client.connect\",\n\t\t\t\"error\":   err,\n\t\t\t\"message\": message,\n\t\t}).Debug(\"Websocket connection error\")\n\t\tif message == unknownIDMessage {\n\t\t\treturn ErrUnknownID\n\t\t}\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tc.changeConnection(conn)\n\tc.isConnected = true\n\n\tc.wg = &sync.WaitGroup{}\n\tc.wg.Add(2)\n\n\tgo c.readPump()\n\n\tgo c.writePump()\n\n\tc.cfg.Log.WithFields(log.Fields{\n\t\t\"prefix\": \"websocket.client.connect\",\n\t}).Debug(\"Connected!\")\n\n\treturn err\n}",
  "func ConnectWebsocketServer(addr, origin string) (c net.Conn, err error) {\n\tcfg, err := websocket.NewConfig(addr, origin)\n\tif err != nil {\n\t\treturn\n\t}\n\tcfg.Dialer = &net.Dialer{\n\t\tTimeout: time.Second * 10,\n\t}\n\n\tconn, err := websocket.DialConfig(cfg)\n\tif err != nil {\n\t\treturn\n\t}\n\tc = NewWebSocketConn(conn)\n\treturn\n}",
  "func (c *WebsocketController) Connect(ctx *app.ConnectWebsocketContext) error {\n\tc.ConnectWSHandler(ctx).ServeHTTP(ctx.ResponseWriter, ctx.Request)\n\treturn nil\n}",
  "func (s *Server) ConnectNode(w http.ResponseWriter, req *http.Request) {\n\tnode := req.Context().Value(\"node\").(*peer.Node)\n\tpeer := req.Context().Value(\"peer\").(*peer.Node)\n\n\tif err := s.network.Connect(node.ID, peer.ID); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts.JSON(w, http.StatusOK, node.ID)\n}",
  "func newRPCWallet(settings map[string]string, logger dex.Logger, net dex.Network) (*rpcWallet, error) {\n\tcfg, chainParams, err := loadRPCConfig(settings, net)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing config: %w\", err)\n\t}\n\n\t// Check rpc connection config values\n\tmissing := \"\"\n\tif cfg.RPCUser == \"\" {\n\t\tmissing += \" username\"\n\t}\n\tif cfg.RPCPass == \"\" {\n\t\tmissing += \" password\"\n\t}\n\tif missing != \"\" {\n\t\treturn nil, fmt.Errorf(\"missing dcrwallet rpc credentials:%s\", missing)\n\t}\n\n\tlog := logger.SubLogger(\"RPC\")\n\trpcw := &rpcWallet{\n\t\tchainParams: chainParams,\n\t\tlog:         log,\n\t}\n\n\tcerts, err := os.ReadFile(cfg.RPCCert)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"TLS certificate read error: %w\", err)\n\t}\n\n\tlog.Infof(\"Setting up rpc client to communicate with dcrwallet at %s with TLS certificate %q.\",\n\t\tcfg.RPCListen, cfg.RPCCert)\n\trpcw.rpcCfg = &rpcclient.ConnConfig{\n\t\tHost:                cfg.RPCListen,\n\t\tEndpoint:            \"ws\",\n\t\tUser:                cfg.RPCUser,\n\t\tPass:                cfg.RPCPass,\n\t\tCertificates:        certs,\n\t\tDisableConnectOnNew: true, // don't start until Connect\n\t}\n\t// Validate the RPC client config, and create a placeholder (non-nil) RPC\n\t// connector and client that will be replaced on Connect. Any method calls\n\t// prior to Connect will be met with rpcclient.ErrClientNotConnected rather\n\t// than a panic.\n\tnodeRPCClient, err := rpcclient.New(rpcw.rpcCfg, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up rpc client: %w\", err)\n\t}\n\trpcw.rpcConnector = nodeRPCClient\n\trpcw.rpcClient = newCombinedClient(nodeRPCClient, chainParams)\n\n\treturn rpcw, nil\n}",
  "func (c *Client) connectCmd(req *protocol.ConnectRequest, cmd *protocol.Command, started time.Time, rw *replyWriter) (*protocol.ConnectResult, error) {\n\tc.mu.RLock()\n\tauthenticated := c.authenticated\n\tclosed := c.status == statusClosed\n\tc.mu.RUnlock()\n\n\tif closed {\n\t\treturn nil, DisconnectConnectionClosed\n\t}\n\n\tif authenticated {\n\t\treturn nil, c.logDisconnectBadRequest(\"client already authenticated\")\n\t}\n\n\tconfig := c.node.config\n\tversion := config.Version\n\tuserConnectionLimit := config.UserConnectionLimit\n\tchannelLimit := config.ClientChannelLimit\n\n\tvar (\n\t\tcredentials       *Credentials\n\t\tauthData          protocol.Raw\n\t\tsubscriptions     map[string]SubscribeOptions\n\t\tclientSideRefresh bool\n\t)\n\n\tif c.node.clientEvents.connectingHandler != nil {\n\t\te := ConnectEvent{\n\t\t\tClientID:  c.ID(),\n\t\t\tData:      req.Data,\n\t\t\tToken:     req.Token,\n\t\t\tName:      req.Name,\n\t\t\tVersion:   req.Version,\n\t\t\tTransport: c.transport,\n\t\t}\n\t\tif len(req.Subs) > 0 {\n\t\t\tchannels := make([]string, 0, len(req.Subs))\n\t\t\tfor ch := range req.Subs {\n\t\t\t\tchannels = append(channels, ch)\n\t\t\t}\n\t\t\te.Channels = channels\n\t\t}\n\t\treply, err := c.node.clientEvents.connectingHandler(c.ctx, e)\n\t\tif err != nil {\n\t\t\tc.startWriter(0, 0, 0)\n\t\t\treturn nil, err\n\t\t}\n\t\tif reply.PingPongConfig != nil {\n\t\t\tc.pingInterval, c.pongTimeout = getPingPongPeriodValues(*reply.PingPongConfig)\n\t\t} else {\n\t\t\tc.pingInterval, c.pongTimeout = getPingPongPeriodValues(c.transport.PingPongConfig())\n\t\t}\n\t\tc.replyWithoutQueue = reply.ReplyWithoutQueue\n\t\tc.startWriter(reply.WriteDelay, reply.MaxMessagesInFrame, reply.QueueInitialCap)\n\n\t\tif reply.Credentials != nil {\n\t\t\tcredentials = reply.Credentials\n\t\t}\n\t\tc.storage = reply.Storage\n\t\tif reply.Context != nil {\n\t\t\tc.mu.Lock()\n\t\t\tc.ctx = reply.Context\n\t\t\tc.mu.Unlock()\n\t\t}\n\t\tif reply.Data != nil {\n\t\t\tauthData = reply.Data\n\t\t}\n\t\tclientSideRefresh = reply.ClientSideRefresh\n\t\tif len(reply.Subscriptions) > 0 {\n\t\t\tsubscriptions = make(map[string]SubscribeOptions, len(reply.Subscriptions))\n\t\t\tfor ch, opts := range reply.Subscriptions {\n\t\t\t\tif ch == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsubscriptions[ch] = opts\n\t\t\t}\n\t\t}\n\t} else {\n\t\tc.startWriter(0, 0, 0)\n\t\tc.pingInterval, c.pongTimeout = getPingPongPeriodValues(c.transport.PingPongConfig())\n\t}\n\n\tif channelLimit > 0 && len(subscriptions) > channelLimit {\n\t\treturn nil, DisconnectChannelLimit\n\t}\n\n\tif credentials == nil {\n\t\t// Try to find Credentials in context.\n\t\tif cred, ok := GetCredentials(c.ctx); ok {\n\t\t\tcredentials = cred\n\t\t}\n\t}\n\n\tvar (\n\t\texpires bool\n\t\tttl     uint32\n\t)\n\n\tc.mu.Lock()\n\tc.clientSideRefresh = clientSideRefresh\n\tc.mu.Unlock()\n\n\tif credentials == nil {\n\t\treturn nil, c.logDisconnectBadRequest(\"client credentials not found\")\n\t}\n\n\tc.mu.Lock()\n\tc.user = credentials.UserID\n\tc.info = credentials.Info\n\tc.exp = credentials.ExpireAt\n\n\tuser := c.user\n\texp := c.exp\n\tclosed = c.status == statusClosed\n\tc.mu.Unlock()\n\n\tif closed {\n\t\treturn nil, DisconnectConnectionClosed\n\t}\n\n\tif c.node.LogEnabled(LogLevelDebug) {\n\t\tc.node.logger.log(newLogEntry(LogLevelDebug, \"client authenticated\", map[string]any{\"client\": c.uid, \"user\": c.user}))\n\t}\n\n\tif userConnectionLimit > 0 && user != \"\" && len(c.node.hub.UserConnections(user)) >= userConnectionLimit {\n\t\tc.node.logger.log(newLogEntry(LogLevelInfo, \"limit of connections for user reached\", map[string]any{\"user\": user, \"client\": c.uid, \"limit\": userConnectionLimit}))\n\t\treturn nil, DisconnectConnectionLimit\n\t}\n\n\tc.mu.RLock()\n\tif exp > 0 {\n\t\texpires = true\n\t\tnow := time.Now().Unix()\n\t\tif exp < now {\n\t\t\tc.mu.RUnlock()\n\t\t\tc.node.logger.log(newLogEntry(LogLevelInfo, \"connection expiration must be greater than now\", map[string]any{\"client\": c.uid, \"user\": c.UserID()}))\n\t\t\treturn nil, ErrorExpired\n\t\t}\n\t\tttl = uint32(exp - now)\n\t}\n\tc.mu.RUnlock()\n\n\tres := &protocol.ConnectResult{\n\t\tVersion: version,\n\t\tExpires: expires,\n\t\tTtl:     ttl,\n\t}\n\n\tif c.pingInterval > 0 {\n\t\tres.Ping = uint32(c.pingInterval.Seconds())\n\t}\n\tif !c.transport.Unidirectional() && c.pongTimeout > 0 {\n\t\tres.Pong = true\n\t}\n\n\tif c.transport.Unidirectional() || c.transport.Emulation() {\n\t\tres.Session = c.session\n\t}\n\tif c.transport.Emulation() {\n\t\tres.Node = c.node.ID()\n\t}\n\n\t// Client successfully connected.\n\tc.mu.Lock()\n\tc.authenticated = true\n\tc.mu.Unlock()\n\n\terr := c.node.addClient(c)\n\tif err != nil {\n\t\tc.node.logger.log(newLogEntry(LogLevelError, \"error adding client\", map[string]any{\"client\": c.uid, \"error\": err.Error()}))\n\t\treturn nil, DisconnectServerError\n\t}\n\n\tif !clientSideRefresh {\n\t\t// Server will do refresh itself.\n\t\tres.Expires = false\n\t\tres.Ttl = 0\n\t}\n\n\tres.Client = c.uid\n\tif authData != nil {\n\t\tres.Data = authData\n\t}\n\n\tvar subCtxMap map[string]subscribeContext\n\tif len(subscriptions) > 0 {\n\t\tvar subMu sync.Mutex\n\t\tsubCtxMap = make(map[string]subscribeContext, len(subscriptions))\n\t\tsubs := make(map[string]*protocol.SubscribeResult, len(subscriptions))\n\t\tvar subDisconnect *Disconnect\n\t\tvar subError *Error\n\t\tvar wg sync.WaitGroup\n\n\t\twg.Add(len(subscriptions))\n\t\tfor ch, opts := range subscriptions {\n\t\t\tgo func(ch string, opts SubscribeOptions) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tsubCmd := &protocol.SubscribeRequest{\n\t\t\t\t\tChannel: ch,\n\t\t\t\t}\n\t\t\t\tif subReq, ok := req.Subs[ch]; ok {\n\t\t\t\t\tsubCmd.Recover = subReq.Recover\n\t\t\t\t\tsubCmd.Offset = subReq.Offset\n\t\t\t\t\tsubCmd.Epoch = subReq.Epoch\n\t\t\t\t}\n\t\t\t\tsubCtx := c.subscribeCmd(subCmd, SubscribeReply{Options: opts}, nil, true, started, nil)\n\t\t\t\tsubMu.Lock()\n\t\t\t\tsubs[ch] = subCtx.result\n\t\t\t\tsubCtxMap[ch] = subCtx\n\t\t\t\tif subCtx.disconnect != nil {\n\t\t\t\t\tsubDisconnect = subCtx.disconnect\n\t\t\t\t}\n\t\t\t\tif subCtx.err != nil {\n\t\t\t\t\tsubError = subCtx.err\n\t\t\t\t}\n\t\t\t\tsubMu.Unlock()\n\t\t\t}(ch, opts)\n\t\t}\n\t\twg.Wait()\n\n\t\tif subDisconnect != nil || subError != nil {\n\t\t\tc.unlockServerSideSubscriptions(subCtxMap)\n\t\t\tfor channel := range subCtxMap {\n\t\t\t\tc.onSubscribeError(channel)\n\t\t\t}\n\t\t\tif subDisconnect != nil {\n\t\t\t\treturn nil, subDisconnect\n\t\t\t}\n\t\t\treturn nil, subError\n\t\t}\n\t\tres.Subs = subs\n\t}\n\n\tif c.transport.Unidirectional() {\n\t\tif !hasFlag(c.transport.DisabledPushFlags(), PushFlagConnect) {\n\t\t\tprotoReply, err := c.getConnectPushReply(res)\n\t\t\tif err != nil {\n\t\t\t\tc.unlockServerSideSubscriptions(subCtxMap)\n\t\t\t\tc.node.logger.log(newLogEntry(LogLevelError, \"error encoding connect\", map[string]any{\"error\": err.Error()}))\n\t\t\t\treturn nil, DisconnectServerError\n\t\t\t}\n\t\t\tc.writeEncodedPush(protoReply, rw)\n\t\t}\n\t} else {\n\t\tprotoReply, err := c.getConnectCommandReply(res)\n\t\tif err != nil {\n\t\t\tc.unlockServerSideSubscriptions(subCtxMap)\n\t\t\tc.node.logger.log(newLogEntry(LogLevelError, \"error encoding connect\", map[string]any{\"error\": err.Error()}))\n\t\t\treturn nil, DisconnectServerError\n\t\t}\n\t\tc.writeEncodedCommandReply(commandConnect, cmd, protoReply, rw)\n\t\tdefer c.releaseConnectCommandReply(protoReply)\n\t\tdefer c.handleCommandFinished(cmd, commandConnect, nil, protoReply, started)\n\t}\n\n\tc.mu.Lock()\n\tfor channel, subCtx := range subCtxMap {\n\t\tc.channels[channel] = subCtx.channelContext\n\t}\n\tc.mu.Unlock()\n\n\tc.unlockServerSideSubscriptions(subCtxMap)\n\n\tif len(subCtxMap) > 0 {\n\t\tfor channel, subCtx := range subCtxMap {\n\t\t\tgo func(channel string, subCtx subscribeContext) {\n\t\t\t\tif channelHasFlag(subCtx.channelContext.flags, flagEmitJoinLeave) && subCtx.clientInfo != nil {\n\t\t\t\t\t_ = c.node.publishJoin(channel, subCtx.clientInfo)\n\t\t\t\t}\n\t\t\t}(channel, subCtx)\n\t\t}\n\t}\n\n\treturn res, nil\n}",
  "func BtcdConnect(certificates []byte) (*BtcdRPCConn, error) {\n\t// Open websocket connection.\n\tws, err := BtcdWS(certificates)\n\tif err != nil {\n\t\tlog.Errorf(\"Cannot open websocket connection to btcd: %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// Create and start RPC connection using the btcd websocket.\n\trpc := NewBtcdRPCConn(ws)\n\trpc.Start()\n\treturn rpc, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	NewHuffmanEncoder creates an encoder from the input io.ReadSeeker and prepares it for writing to output io.Writer. It calculates the dictionary by doing frequency counting on the input bytes. 
 | 
	func NewHuffmanEncoder(inp io.ReadSeeker, wc io.Writer) *HuffmanEncoder {
	he := new(HuffmanEncoder)
	freq := make(map[byte]int)
	var b [1]byte
	// using the reader, count the frequency of bytes
	for {
		_, err := inp.Read(b[:])
		if err != nil {
			if err == io.EOF {
				break
			}
			panic(err)
		}
		_, ok := freq[b[0]]
		if !ok {
			freq[b[0]] = 0
		}
		freq[b[0]]++
	}
	_, err := inp.Seek(0, io.SeekStart)
	if err != nil {
		panic(err)
	}
	pQ := make(PriorityQueue, len(freq))
	i := 0
	for v, f := range freq {
		pQ[i] = NewHNode(v, f)
		i++
	}
	heap.Init(&pQ)
	for pQ.Len() > 1 {
		zero := pQ.Pop()
		l := zero.(Item)
		one := pQ.Pop()
		r := one.(Item)
		ht := NewHTree(l, r)
		heap.Push(&pQ, ht)
	}
	htree := pQ.Pop()
	root, ok := htree.(*HTree)
	if !ok {
		panic("Huffman Tree")
	}
	he.root = root
	he.dict = make(map[byte]Huffcode)
	filldict(he.root, "", he.dict)
	he.bw = bs.NewWriter(wc)
	return he
} 
 | 
	[
  "func Encode(in, out *os.File) {\n\tcounts := count(in)\n\tp := makePQ(counts)\n\th := makeHuffman(p)\n\tm := make(map[byte]string)\n\tfillMap(h, m, \"\")\n\tfor k, v := range m {\n\t\tfmt.Printf(\"k: %c, v: %s\\n\", k, v)\n\t}\n}",
  "func newEncoder(out io.Writer, order binary.ByteOrder, fds []int) *encoder {\n\tenc := newEncoderAtOffset(out, 0, order, fds)\n\treturn enc\n}",
  "func createFrequencyTable(bytes *vector.Vector) *dictionary.Dictionary {\n\tdict := dictionary.New()\n\tfor i := 0; i < bytes.Size(); i++ {\n\t\tbyt := bytes.MustGet(i)\n\n\t\tif frequency, exists := dict.Get(byt); !exists {\n\t\t\tdict.Set(byt, 1)\n\t\t} else {\n\t\t\tdict.Set(byt, frequency.(int)+1)\n\t\t}\n\t}\n\n\treturn dict\n}",
  "func BuildHuffmanCode(depth []byte, counts, values []int) {\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tcounts[depth[i]]++\n\t\t}\n\t}\n\tvar offset [kJpegHuffmanMaxBitLength + 1]int\n\tfor i := 1; i <= kJpegHuffmanMaxBitLength; i++ {\n\t\toffset[i] = offset[i-1] + counts[i-1]\n\t}\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tvalues[offset[depth[i]]] = i\n\t\t\toffset[depth[i]]++\n\t\t}\n\t}\n}",
  "func NewHuffmanEncoderWithDict(wc io.Writer, dict []byte) *HuffmanEncoder {\n\the := new(HuffmanEncoder)\n\n\tpQ := make(PriorityQueue, len(dict))\n\tMaxPri := len(dict)\n\tfor i, v := range dict {\n\t\tpQ[i] = NewHNode(v, MaxPri - i)\t// prioritize in order of dict\n\t}\n\n\theap.Init(&pQ)\n\n\tfor pQ.Len() > 1 {\n\t\tzero := pQ.Pop()\n\t\tl := zero.(Item)\n\t\tone := pQ.Pop()\n\t\tr := one.(Item)\n\t\tht := NewHTree(l, r)\n\t\theap.Push(&pQ, ht)\n\t}\n\n\thtree := pQ.Pop()\n\troot, ok := htree.(*HTree)\n\tif !ok {\n\t\tpanic(\"Huffman Tree\")\n\t}\n\the.root = root\n\the.dict = make(map[byte]Huffcode)\n\tfilldict(he.root, \"\", he.dict)\n\the.bw = bs.NewWriter(wc)\n\treturn he\n}",
  "func NewEncoder(w io.WriteSeeker, sampleRate, bitDepth, numChans, audioFormat int) *Encoder {\n\treturn &Encoder{\n\t\tw:              w,\n\t\tbuf:            bytes.NewBuffer(make([]byte, 0, bytesNumFromDuration(time.Minute, sampleRate, bitDepth)*numChans)),\n\t\tSampleRate:     sampleRate,\n\t\tBitDepth:       bitDepth,\n\t\tNumChans:       numChans,\n\t\tWavAudioFormat: audioFormat,\n\t}\n}",
  "func NewEncodingTree(freq map[uint8]uint) *Node {\n\tvar head Node // Fictitious head\n\n\tfor i, v := range freq {\n\t\tnode := &Node{\n\t\t\tvalue:  i,\n\t\t\tweight: v,\n\t\t}\n\t\thead.insert(node)\n\t}\n\n\tfor head.next != nil && head.next.next != nil {\n\t\tl := head.popFirst()\n\t\tr := head.popFirst()\n\n\t\tnode := join(l, r)\n\t\thead.insert(node)\n\t}\n\n\t// Fictitious head point to tree root\n\tif head.next != nil {\n\t\thead.next.prev = nil\n\t}\n\treturn head.next\n}",
  "func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {}",
  "func NewEncoder(w io.Writer) io.WriteCloser {}",
  "func newCountHashWriter(w io.Writer) *countHashWriter {\n\treturn &countHashWriter{w: w}\n}",
  "func buildPrefixTree(byteFrequencies *dictionary.Dictionary) *huffmanTreeNode {\n\ttree := new(priorityqueue.PriorityQueue)\n\tkeys := byteFrequencies.Keys()\n\n\tfor i := 0; i < keys.Size(); i++ {\n\t\tbyt := keys.MustGet(i)\n\t\tfrequency, _ := byteFrequencies.Get(byt)\n\n\t\ttree.Enqueue(frequency.(int), &huffmanTreeNode{frequency: frequency.(int), value: byt.(byte)})\n\t}\n\n\tfor tree.Size() > 1 {\n\t\taPrio, a := tree.Dequeue()\n\t\tbPrio, b := tree.Dequeue()\n\n\t\tnewPrio := aPrio + bPrio\n\n\t\tnode := &huffmanTreeNode{frequency: newPrio, left: a.(*huffmanTreeNode), right: b.(*huffmanTreeNode)}\n\n\t\ttree.Enqueue(newPrio, node)\n\t}\n\n\t_, root := tree.Dequeue()\n\n\treturn root.(*huffmanTreeNode)\n}",
  "func newEncoderDict(dictCap, bufSize int) (d *encoderDict, err error) {\n\tif !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {\n\t\treturn nil, errors.New(\n\t\t\t\"lzma: dictionary capacity out of range\")\n\t}\n\tif bufSize < 1 {\n\t\treturn nil, errors.New(\n\t\t\t\"lzma: buffer size must be larger then zero\")\n\t}\n\tm, err := newHashTable(dictCap, 4)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuf, err := newBuffer(dictCap + bufSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topbuf, err := newOpBuffer(bufSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td = &encoderDict{\n\t\tbuf:      *buf,\n\t\tops:      *opbuf,\n\t\tm:        m,\n\t\tcapacity: dictCap,\n\t}\n\treturn d, nil\n}",
  "func main() {\r\n\ttest := \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n\tsymFreqs := make(map[rune]int)\r\n\t// read each symbol and record the frequencies\r\n\tfor _, c := range test {\r\n\t\tsymFreqs[c]++\r\n\t}\r\n\r\n\t// example tree\r\n\texampleTree := buildTree(symFreqs)\r\n\r\n\t// print out results\r\n\tfmt.Println(\"SYMBOL\\tWEIGHT\\tHUFFMAN CODE\")\r\n\tprintCodes(exampleTree, []byte{})\r\n}",
  "func NewEncoder() Encoder { return Encoder{} }",
  "func NewEntropyEncoder(obs kanzi.OutputBitStream, ctx map[string]interface{},\n\tentropyType uint32) (kanzi.EntropyEncoder, error) {\n\tswitch entropyType {\n\n\tcase HUFFMAN_TYPE:\n\t\treturn NewHuffmanEncoder(obs)\n\n\tcase ANS0_TYPE:\n\t\treturn NewANSRangeEncoder(obs, 0)\n\n\tcase ANS1_TYPE:\n\t\treturn NewANSRangeEncoder(obs, 1)\n\n\tcase RANGE_TYPE:\n\t\treturn NewRangeEncoder(obs)\n\n\tcase FPAQ_TYPE:\n\t\treturn NewFPAQEncoder(obs)\n\n\tcase CM_TYPE:\n\t\tpredictor, _ := NewCMPredictor()\n\t\treturn NewBinaryEntropyEncoder(obs, predictor)\n\n\tcase TPAQ_TYPE:\n\t\tpredictor, _ := NewTPAQPredictor(&ctx)\n\t\treturn NewBinaryEntropyEncoder(obs, predictor)\n\n\tcase TPAQX_TYPE:\n\t\tpredictor, _ := NewTPAQPredictor(&ctx)\n\t\treturn NewBinaryEntropyEncoder(obs, predictor)\n\n\tcase NONE_TYPE:\n\t\treturn NewNullEntropyEncoder(obs)\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unsupported entropy codec type: '%c'\", entropyType)\n\t}\n}",
  "func BuildJpegHuffmanTable(count_in, symbols []int, lut []HuffmanTableEntry) int {\n\tvar (\n\t\tcode       HuffmanTableEntry   // current table entry\n\t\ttable      []HuffmanTableEntry // next available space in table\n\t\tlength     int                 // current code length\n\t\tidx        int                 // symbol index\n\t\tkey        int                 // prefix code\n\t\treps       int                 // number of replicate key values in current table\n\t\tlow        int                 // low bits for current root entry\n\t\ttable_bits int                 // key length of current table\n\t\ttable_size int                 // size of current table\n\t\ttotal_size int                 // sum of root table size and 2nd level table sizes\n\t)\n\n\t// Make a local copy of the input bit length histogram.\n\tvar count [kJpegHuffmanMaxBitLength + 1]int\n\ttotal_count := 0\n\tfor length = 1; length <= kJpegHuffmanMaxBitLength; length++ {\n\t\tcount[length] = count_in[length]\n\t\ttotal_count += count[length]\n\t}\n\n\ttable = lut\n\t// table_delta used in go version, to work around pointer arithmetic\n\ttable_delta := 0\n\ttable_bits = kJpegHuffmanRootTableBits\n\ttable_size = 1 << uint(table_bits)\n\ttotal_size = table_size\n\n\t// Special case code with only one value.\n\tif total_count == 1 {\n\t\tcode.bits = 0\n\t\tcode.value = uint16(symbols[0])\n\t\tfor key = 0; key < total_size; key++ {\n\t\t\ttable[key] = code\n\t\t}\n\t\treturn total_size\n\t}\n\n\t// Fill in root table.\n\tkey = 0\n\tidx = 0\n\tfor length = 1; length <= kJpegHuffmanRootTableBits; length++ {\n\t\tfor ; count[length] > 0; count[length]-- {\n\t\t\tcode.bits = uint8(length)\n\t\t\tcode.value = uint16(symbols[idx])\n\t\t\tidx++\n\t\t\treps = 1 << uint(kJpegHuffmanRootTableBits-length)\n\t\t\tfor ; reps > 0; reps-- {\n\t\t\t\ttable[key] = code\n\t\t\t\tkey++\n\t\t\t}\n\t\t}\n\t}\n\n\t// Fill in 2nd level tables and add pointers to root table.\n\ttable = table[table_size:]\n\ttable_delta += table_size\n\ttable_size = 0\n\tlow = 0\n\tfor length = kJpegHuffmanRootTableBits + 1; length <= kJpegHuffmanMaxBitLength; length++ {\n\t\tfor ; count[length] > 0; count[length]-- {\n\t\t\t// Start a new sub-table if the previous one is full.\n\t\t\tif low >= table_size {\n\t\t\t\ttable = table[table_size:]\n\t\t\t\ttable_delta += table_size\n\t\t\t\ttable_bits = NextTableBitSize(count[:], length)\n\t\t\t\ttable_size = 1 << uint(table_bits)\n\t\t\t\ttotal_size += table_size\n\t\t\t\tlow = 0\n\t\t\t\tlut[key].bits = uint8(table_bits + kJpegHuffmanRootTableBits)\n\t\t\t\tlut[key].value = uint16(table_delta - key)\n\t\t\t\tkey++\n\t\t\t}\n\t\t\tcode.bits = uint8(length - kJpegHuffmanRootTableBits)\n\t\t\tcode.value = uint16(symbols[idx])\n\t\t\tidx++\n\t\t\treps = 1 << uint(table_bits-int(code.bits))\n\t\t\tfor ; reps > 0; reps-- {\n\t\t\t\ttable[low] = code\n\t\t\t\tlow++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn total_size\n}",
  "func NewEncoder(f *Format, w *os.File) (*Encoder, error) {\n\tenc := &Encoder{w: w, f: f}\n\th := &hdr{}\n\tenc.h = h\n\tif e := h.Write(w); e != nil {\n\t\treturn nil, e\n\t}\n\tif e := f.Write(w); e != nil {\n\t\treturn nil, e\n\t}\n\td := &chunk{fourCc: _dat4Cc}\n\tif e := d.writeHdr(w); e != nil {\n\t\treturn nil, e\n\t}\n\tenc.p = 0\n\tenc.buf = make([]byte, f.Bytes()*f.Channels()*1024)\n\t//ef := f.Encoder()\n\t//enc.eFunc = ef\n\tenc.w = w\n\treturn enc, nil\n}",
  "func NewEncoder() Encoder {\n    return &encoder{}\n}",
  "func newHasher() *hasher {\n\th := &hasher{\n\t\th:       sha256.New(),\n\t\tvisited: map[uintptr]bool{},\n\t}\n\th.bw = bufio.NewWriterSize(h.h, h.h.BlockSize())\n\treturn h\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	NewHuffmanEncoder creates a new encoder given an existing dictionary. It prepares the encoder to write to the io.Writer that is provided. The order of the dictionary slice determines its priority. 
 | 
	func NewHuffmanEncoderWithDict(wc io.Writer, dict []byte) *HuffmanEncoder {
	he := new(HuffmanEncoder)
	pQ := make(PriorityQueue, len(dict))
	MaxPri := len(dict)
	for i, v := range dict {
		pQ[i] = NewHNode(v, MaxPri - i)	// prioritize in order of dict
	}
	heap.Init(&pQ)
	for pQ.Len() > 1 {
		zero := pQ.Pop()
		l := zero.(Item)
		one := pQ.Pop()
		r := one.(Item)
		ht := NewHTree(l, r)
		heap.Push(&pQ, ht)
	}
	htree := pQ.Pop()
	root, ok := htree.(*HTree)
	if !ok {
		panic("Huffman Tree")
	}
	he.root = root
	he.dict = make(map[byte]Huffcode)
	filldict(he.root, "", he.dict)
	he.bw = bs.NewWriter(wc)
	return he
} 
 | 
	[
  "func NewHuffmanEncoder(inp io.ReadSeeker, wc io.Writer) *HuffmanEncoder {\n\the := new(HuffmanEncoder)\n\tfreq := make(map[byte]int)\n\n\tvar b [1]byte\n\t// using the reader, count the frequency of bytes\n\tfor {\n\t\t_, err := inp.Read(b[:])\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpanic(err)\n\t\t}\n\n\t\t_, ok := freq[b[0]]\n\t\tif !ok {\n\t\t\tfreq[b[0]] = 0\n\t\t}\n\t\tfreq[b[0]]++\n\t}\n\t_, err := inp.Seek(0, io.SeekStart)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpQ := make(PriorityQueue, len(freq))\n\ti := 0\n\tfor v, f := range freq {\n\t\tpQ[i] = NewHNode(v, f)\n\t\ti++\n\t}\n\n\theap.Init(&pQ)\n\n\tfor pQ.Len() > 1 {\n\t\tzero := pQ.Pop()\n\t\tl := zero.(Item)\n\t\tone := pQ.Pop()\n\t\tr := one.(Item)\n\t\tht := NewHTree(l, r)\n\t\theap.Push(&pQ, ht)\n\t}\n\n\thtree := pQ.Pop()\n\troot, ok := htree.(*HTree)\n\tif !ok {\n\t\tpanic(\"Huffman Tree\")\n\t}\n\the.root = root\n\the.dict = make(map[byte]Huffcode)\n\tfilldict(he.root, \"\", he.dict)\n\the.bw = bs.NewWriter(wc)\n\treturn he\n}",
  "func newEncoder(out io.Writer, order binary.ByteOrder, fds []int) *encoder {\n\tenc := newEncoderAtOffset(out, 0, order, fds)\n\treturn enc\n}",
  "func NewEncoder(w io.Writer) io.WriteCloser {}",
  "func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {}",
  "func Encode(in, out *os.File) {\n\tcounts := count(in)\n\tp := makePQ(counts)\n\th := makeHuffman(p)\n\tm := make(map[byte]string)\n\tfillMap(h, m, \"\")\n\tfor k, v := range m {\n\t\tfmt.Printf(\"k: %c, v: %s\\n\", k, v)\n\t}\n}",
  "func newEncoderDict(dictCap, bufSize int) (d *encoderDict, err error) {\n\tif !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {\n\t\treturn nil, errors.New(\n\t\t\t\"lzma: dictionary capacity out of range\")\n\t}\n\tif bufSize < 1 {\n\t\treturn nil, errors.New(\n\t\t\t\"lzma: buffer size must be larger then zero\")\n\t}\n\tm, err := newHashTable(dictCap, 4)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuf, err := newBuffer(dictCap + bufSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topbuf, err := newOpBuffer(bufSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td = &encoderDict{\n\t\tbuf:      *buf,\n\t\tops:      *opbuf,\n\t\tm:        m,\n\t\tcapacity: dictCap,\n\t}\n\treturn d, nil\n}",
  "func NewEncoder() Encoder { return Encoder{} }",
  "func NewEncoder(w io.Writer, compressed bool) *Encoder {\n\tif compressed {\n\t\treturn &Encoder{w: gzip.NewWriter(w), compressed: true}\n\t}\n\treturn &Encoder{w: w}\n}",
  "func NewEncoder(w io.WriteSeeker, sampleRate, bitDepth, numChans, audioFormat int) *Encoder {\n\treturn &Encoder{\n\t\tw:              w,\n\t\tbuf:            bytes.NewBuffer(make([]byte, 0, bytesNumFromDuration(time.Minute, sampleRate, bitDepth)*numChans)),\n\t\tSampleRate:     sampleRate,\n\t\tBitDepth:       bitDepth,\n\t\tNumChans:       numChans,\n\t\tWavAudioFormat: audioFormat,\n\t}\n}",
  "func NewEncoder() Encoder {\n    return &encoder{}\n}",
  "func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {\n\tif level < HuffmanOnly || level > BestCompression {\n\t\treturn nil, fmt.Errorf(\"zlib: invalid compression level: %d\", level)\n\t}\n\treturn &Writer{\n\t\tw:     w,\n\t\tlevel: level,\n\t\tdict:  dict,\n\t}, nil\n}",
  "func (d Digester) NewWriterShort() Writer {\n\thw := crypto.Hash(d).New()\n\treturn Writer{h: crypto.Hash(d), hw: hw, w: bufio.NewWriterSize(hw, digesterBufferSize)}\n}",
  "func NewEncoder(H *http.Header) *Encoder {\n\tif H == nil {\n\t\tH = &http.Header{}\n\t}\n\treturn &Encoder{h: *H}\n}",
  "func BuildHuffmanCode(depth []byte, counts, values []int) {\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tcounts[depth[i]]++\n\t\t}\n\t}\n\tvar offset [kJpegHuffmanMaxBitLength + 1]int\n\tfor i := 1; i <= kJpegHuffmanMaxBitLength; i++ {\n\t\toffset[i] = offset[i-1] + counts[i-1]\n\t}\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tvalues[offset[depth[i]]] = i\n\t\t\toffset[depth[i]]++\n\t\t}\n\t}\n}",
  "func NewEncoder(id uint32, w io.Writer) *Encoder {\n\treturn &Encoder{serial: id, w: w}\n}",
  "func NewEncodingTree(freq map[uint8]uint) *Node {\n\tvar head Node // Fictitious head\n\n\tfor i, v := range freq {\n\t\tnode := &Node{\n\t\t\tvalue:  i,\n\t\t\tweight: v,\n\t\t}\n\t\thead.insert(node)\n\t}\n\n\tfor head.next != nil && head.next.next != nil {\n\t\tl := head.popFirst()\n\t\tr := head.popFirst()\n\n\t\tnode := join(l, r)\n\t\thead.insert(node)\n\t}\n\n\t// Fictitious head point to tree root\n\tif head.next != nil {\n\t\thead.next.prev = nil\n\t}\n\treturn head.next\n}",
  "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}",
  "func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {\n\treturn &encoder{enc: enc, w: w}\n}",
  "func NewEncoder(f *Format, w *os.File) (*Encoder, error) {\n\tenc := &Encoder{w: w, f: f}\n\th := &hdr{}\n\tenc.h = h\n\tif e := h.Write(w); e != nil {\n\t\treturn nil, e\n\t}\n\tif e := f.Write(w); e != nil {\n\t\treturn nil, e\n\t}\n\td := &chunk{fourCc: _dat4Cc}\n\tif e := d.writeHdr(w); e != nil {\n\t\treturn nil, e\n\t}\n\tenc.p = 0\n\tenc.buf = make([]byte, f.Bytes()*f.Channels()*1024)\n\t//ef := f.Encoder()\n\t//enc.eFunc = ef\n\tenc.w = w\n\treturn enc, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Writes the Huffman tree that will be used for decoding XXX should probably save the uncompressed file size (in bytes) in the header; this would allow the reader to know how big the file should be. It might make sense to also have the compressed size as well, to be able to perform a sanity check that junk isn't being added to the end of the file somehow. 
 | 
	func (enc *HuffmanEncoder) WriteHeader() error {
	// for iterative tree walking use savedict
	// for recursive, use rsavedict
	// if err := savedict(enc.bw, enc.root); err != nil {
	if err := rsavedict(enc.bw, enc.root); err != nil {		// recursive version
		return err
	}
	return enc.bw.WriteBit(bs.Zero) // end of dictionary indicator
} 
 | 
	[
  "func (head *Node) WriteHeader(w *bitio.Writer, freq map[uint8]uint) (err error) {\n\tvar nEncoded uint32\n\tfor _, v := range freq {\n\t\tnEncoded += uint32(v)\n\t}\n\n\t// Write total number of encoded symbols\n\tw.TryWriteBitsUnsafe(uint64(nEncoded), 32)\n\n\t// Write total number of symbols in graph\n\tw.TryWriteBitsUnsafe(uint64(len(freq)), 8)\n\n\t// Write encoding tree information\n\tif err = head.writeHeader(w); err != nil {\n\t\treturn err\n\t}\n\tw.TryWriteBitsUnsafe(0, 1)\n\treturn w.TryError\n}",
  "func Encode(in, out *os.File) {\n\tcounts := count(in)\n\tp := makePQ(counts)\n\th := makeHuffman(p)\n\tm := make(map[byte]string)\n\tfillMap(h, m, \"\")\n\tfor k, v := range m {\n\t\tfmt.Printf(\"k: %c, v: %s\\n\", k, v)\n\t}\n}",
  "func (decTree *Tree) WriteTree(filename string) {\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\tfmt.Println(\"Error opening output file: \", filename)\n\t\treturn\n\t}\n\n\tcurrNode := decTree\n\tvar treeStack []*Tree\n\n\ttreeLen := 1\n\tfor treeLen != 0 {\n\t\tfile.WriteString(nodeToStr(currNode.Details))\n\n\t\tif currNode.Details.Leaf == false {\n\t\t\ttreeStack = append(treeStack, currNode.Right)\n\t\t\tcurrNode = currNode.Left\n\t\t\ttreeLen++\n\t\t} else {\n\t\t\t//get the length of the tree and set curr to the last element in the list\n\t\t\ttreeLen--\n\n\t\t\tif treeLen > 0 {\n\t\t\t\tcurrNode, treeStack = treeStack[treeLen-1], treeStack[:treeLen-1]\n\t\t\t}\n\t\t}\n\t}\n\n\tfile.Close()\n}",
  "func NewHuffmanEncoder(inp io.ReadSeeker, wc io.Writer) *HuffmanEncoder {\n\the := new(HuffmanEncoder)\n\tfreq := make(map[byte]int)\n\n\tvar b [1]byte\n\t// using the reader, count the frequency of bytes\n\tfor {\n\t\t_, err := inp.Read(b[:])\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpanic(err)\n\t\t}\n\n\t\t_, ok := freq[b[0]]\n\t\tif !ok {\n\t\t\tfreq[b[0]] = 0\n\t\t}\n\t\tfreq[b[0]]++\n\t}\n\t_, err := inp.Seek(0, io.SeekStart)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpQ := make(PriorityQueue, len(freq))\n\ti := 0\n\tfor v, f := range freq {\n\t\tpQ[i] = NewHNode(v, f)\n\t\ti++\n\t}\n\n\theap.Init(&pQ)\n\n\tfor pQ.Len() > 1 {\n\t\tzero := pQ.Pop()\n\t\tl := zero.(Item)\n\t\tone := pQ.Pop()\n\t\tr := one.(Item)\n\t\tht := NewHTree(l, r)\n\t\theap.Push(&pQ, ht)\n\t}\n\n\thtree := pQ.Pop()\n\troot, ok := htree.(*HTree)\n\tif !ok {\n\t\tpanic(\"Huffman Tree\")\n\t}\n\the.root = root\n\the.dict = make(map[byte]Huffcode)\n\tfilldict(he.root, \"\", he.dict)\n\the.bw = bs.NewWriter(wc)\n\treturn he\n}",
  "func BuildHuffmanCode(depth []byte, counts, values []int) {\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tcounts[depth[i]]++\n\t\t}\n\t}\n\tvar offset [kJpegHuffmanMaxBitLength + 1]int\n\tfor i := 1; i <= kJpegHuffmanMaxBitLength; i++ {\n\t\toffset[i] = offset[i-1] + counts[i-1]\n\t}\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tvalues[offset[depth[i]]] = i\n\t\t\toffset[depth[i]]++\n\t\t}\n\t}\n}",
  "func (e *encoder) writeDHT(nComponent int) {\n\tmarkerlen := 2\n\tspecs := theHuffmanSpec[:]\n\tif nComponent == 1 {\n\t\t// Drop the Chrominance tables.\n\t\tspecs = specs[:2]\n\t}\n\tfor _, s := range specs {\n\t\tmarkerlen += 1 + 16 + len(s.value)\n\t}\n\te.writeMarkerHeader(dhtMarker, markerlen)\n\tfor i, s := range specs {\n\t\te.writeByte(\"\\x00\\x10\\x01\\x11\"[i])\n\t\te.write(s.count[:])\n\t\te.write(s.value)\n\t}\n}",
  "func (z *Writer) writeHeader() (err error) {\n\tz.wroteHeader = true\n\t// ZLIB has a two-byte header (as documented in RFC 1950).\n\t// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.\n\t// The next four bits is the CM (compression method), which is 8 for deflate.\n\tz.scratch[0] = 0x78\n\t// The next two bits is the FLEVEL (compression level). The four values are:\n\t// 0=fastest, 1=fast, 2=default, 3=best.\n\t// The next bit, FDICT, is set if a dictionary is given.\n\t// The final five FCHECK bits form a mod-31 checksum.\n\tswitch z.level {\n\tcase -2, 0, 1:\n\t\tz.scratch[1] = 0 << 6\n\tcase 2, 3, 4, 5:\n\t\tz.scratch[1] = 1 << 6\n\tcase 6, -1:\n\t\tz.scratch[1] = 2 << 6\n\tcase 7, 8, 9:\n\t\tz.scratch[1] = 3 << 6\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n\tif z.dict != nil {\n\t\tz.scratch[1] |= 1 << 5\n\t}\n\tz.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)\n\tif _, err = z.w.Write(z.scratch[0:2]); err != nil {\n\t\treturn err\n\t}\n\tif z.dict != nil {\n\t\t// The next four bytes are the Adler-32 checksum of the dictionary.\n\t\tbinary.BigEndian.PutUint32(z.scratch[:], adler32.Checksum(z.dict))\n\t\tif _, err = z.w.Write(z.scratch[0:4]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
  "func (fmt *FixedMerkleTree) Write(b []byte) (int, error) {\n\n\tfmt.writeLock.Lock()\n\tdefer fmt.writeLock.Unlock()\n\tif fmt.isFinal {\n\t\treturn 0, goError.New(\"cannot write. Tree is already finalized\")\n\t}\n\n\tfor i, j := 0, MaxMerkleLeavesSize-fmt.writeCount; i < len(b); i, j = j, j+MaxMerkleLeavesSize {\n\t\tif j > len(b) {\n\t\t\tj = len(b)\n\t\t}\n\t\tprevWriteCount := fmt.writeCount\n\t\tfmt.writeCount += int(j - i)\n\t\tcopy(fmt.writeBytes[prevWriteCount:fmt.writeCount], b[i:j])\n\n\t\tif fmt.writeCount == MaxMerkleLeavesSize {\n\t\t\t// data fragment reached 64KB, so send this slice to write to leaf hashes\n\t\t\terr := fmt.writeToLeaves(fmt.writeBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tfmt.writeCount = 0 // reset writeCount\n\t\t}\n\t}\n\treturn len(b), nil\n}",
  "func (z *Writer) writeHeader() (err error) {\n\tz.wroteHeader = true\n\t// ZLIB has a two-byte header (as documented in RFC 1950).\n\t// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.\n\t// The next four bits is the CM (compression method), which is 8 for deflate.\n\tz.scratch[0] = 0x78\n\t// The next two bits is the FLEVEL (compression level). The four values are:\n\t// 0=fastest, 1=fast, 2=default, 3=best.\n\t// The next bit, FDICT, is set if a dictionary is given.\n\t// The final five FCHECK bits form a mod-31 checksum.\n\tswitch z.level {\n\tcase -2, 0, 1:\n\t\tz.scratch[1] = 0 << 6\n\tcase 2, 3, 4, 5:\n\t\tz.scratch[1] = 1 << 6\n\tcase 6, -1:\n\t\tz.scratch[1] = 2 << 6\n\tcase 7, 8, 9:\n\t\tz.scratch[1] = 3 << 6\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n\tif z.dict != nil {\n\t\tz.scratch[1] |= 1 << 5\n\t}\n\tz.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)\n\tif _, err = z.w.Write(z.scratch[0:2]); err != nil {\n\t\treturn err\n\t}\n\tif z.dict != nil {\n\t\t// The next four bytes are the Adler-32 checksum of the dictionary.\n\t\tchecksum := adler32.Checksum(z.dict)\n\t\tz.scratch[0] = uint8(checksum >> 24)\n\t\tz.scratch[1] = uint8(checksum >> 16)\n\t\tz.scratch[2] = uint8(checksum >> 8)\n\t\tz.scratch[3] = uint8(checksum >> 0)\n\t\tif _, err = z.w.Write(z.scratch[0:4]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif z.compressor == nil {\n\t\t// Initialize deflater unless the Writer is being reused\n\t\t// after a Reset call.\n\t\tz.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tz.digest = adler32.New()\n\t}\n\treturn nil\n}",
  "func (p *GameTree) writeTree(w *bufio.Writer, n TreeNodeIdx, needs bool, nMov int, nMovPerLine int) (err error) {\n\tdefer u(tr(\"writeTree\"))\n\tif needs == true {\n\t\tif nMov > 0 {\n\t\t\terr = w.WriteByte('\\n')\n\t\t\tnMov = 0\n\t\t}\n\t\terr = w.WriteByte('(')\n\t}\n\tif err == nil {\n\t\tif nMov == nMovPerLine {\n\t\t\terr = w.WriteByte('\\n')\n\t\t\tnMov = 0\n\t\t}\n\t\terr = w.WriteByte(';')\n\t\t// write the node\n\t\ttyp := p.treeNodes[n].TNodType\n\t\tswitch typ {\n\t\tcase GameInfoNode:\n\t\t\t//           fmt.Println(\"writing GameInfoNode\\n\")\n\t\t\terr = p.writeProperties(w, n, true)\n\t\tcase InteriorNode:\n\t\t\t//           fmt.Println(\"writing InteriorNode\\n\")\n\t\t\terr = p.writeProperties(w, n, false)\n\t\tcase BlackMoveNode:\n\t\t\t_, err = w.WriteString(\"B[\")\n\t\t\t_, err = w.Write(SGFCoords(ah.NodeLoc(p.treeNodes[n].propListOrNodeLoc), p.IsFF4()))\n\t\t\terr = w.WriteByte(']')\n\t\t\tnMov += 1\n\t\tcase WhiteMoveNode:\n\t\t\t_, err = w.WriteString(\"W[\")\n\t\t\t_, err = w.Write(SGFCoords(ah.NodeLoc(p.treeNodes[n].propListOrNodeLoc), p.IsFF4()))\n\t\t\terr = w.WriteByte(']')\n\t\t\tnMov += 1\n\t\tdefault:\n\t\t\tfmt.Println(\"*** unsupported TreeNodeType in writeTree\")\n\t\t\terr = errors.New(\"writeTree: unsupported TreeNodeType\" + strconv.FormatInt(int64(typ), 10))\n\t\t\treturn err\n\t\t}\n\t\tif err == nil {\n\t\t\t// write the children\n\t\t\tlastCh := p.treeNodes[n].Children\n\t\t\tif lastCh != nilTreeNodeIdx && err == nil {\n\t\t\t\tch := p.treeNodes[lastCh].NextSib\n\t\t\t\tchNeeds := (lastCh != ch)\n\t\t\t\terr = p.writeTree(w, ch, chNeeds, nMov, nMovPerLine)\n\t\t\t\tfor ch != lastCh && err == nil {\n\t\t\t\t\tch = p.treeNodes[ch].NextSib\n\t\t\t\t\t//\t\t\t\t\tnMov += 1\n\t\t\t\t\terr = p.writeTree(w, ch, chNeeds, nMov, nMovPerLine)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (err == nil) && (needs == true) {\n\t\t\t\terr = w.WriteByte(')')\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}",
  "func BuildJpegHuffmanTable(count_in, symbols []int, lut []HuffmanTableEntry) int {\n\tvar (\n\t\tcode       HuffmanTableEntry   // current table entry\n\t\ttable      []HuffmanTableEntry // next available space in table\n\t\tlength     int                 // current code length\n\t\tidx        int                 // symbol index\n\t\tkey        int                 // prefix code\n\t\treps       int                 // number of replicate key values in current table\n\t\tlow        int                 // low bits for current root entry\n\t\ttable_bits int                 // key length of current table\n\t\ttable_size int                 // size of current table\n\t\ttotal_size int                 // sum of root table size and 2nd level table sizes\n\t)\n\n\t// Make a local copy of the input bit length histogram.\n\tvar count [kJpegHuffmanMaxBitLength + 1]int\n\ttotal_count := 0\n\tfor length = 1; length <= kJpegHuffmanMaxBitLength; length++ {\n\t\tcount[length] = count_in[length]\n\t\ttotal_count += count[length]\n\t}\n\n\ttable = lut\n\t// table_delta used in go version, to work around pointer arithmetic\n\ttable_delta := 0\n\ttable_bits = kJpegHuffmanRootTableBits\n\ttable_size = 1 << uint(table_bits)\n\ttotal_size = table_size\n\n\t// Special case code with only one value.\n\tif total_count == 1 {\n\t\tcode.bits = 0\n\t\tcode.value = uint16(symbols[0])\n\t\tfor key = 0; key < total_size; key++ {\n\t\t\ttable[key] = code\n\t\t}\n\t\treturn total_size\n\t}\n\n\t// Fill in root table.\n\tkey = 0\n\tidx = 0\n\tfor length = 1; length <= kJpegHuffmanRootTableBits; length++ {\n\t\tfor ; count[length] > 0; count[length]-- {\n\t\t\tcode.bits = uint8(length)\n\t\t\tcode.value = uint16(symbols[idx])\n\t\t\tidx++\n\t\t\treps = 1 << uint(kJpegHuffmanRootTableBits-length)\n\t\t\tfor ; reps > 0; reps-- {\n\t\t\t\ttable[key] = code\n\t\t\t\tkey++\n\t\t\t}\n\t\t}\n\t}\n\n\t// Fill in 2nd level tables and add pointers to root table.\n\ttable = table[table_size:]\n\ttable_delta += table_size\n\ttable_size = 0\n\tlow = 0\n\tfor length = kJpegHuffmanRootTableBits + 1; length <= kJpegHuffmanMaxBitLength; length++ {\n\t\tfor ; count[length] > 0; count[length]-- {\n\t\t\t// Start a new sub-table if the previous one is full.\n\t\t\tif low >= table_size {\n\t\t\t\ttable = table[table_size:]\n\t\t\t\ttable_delta += table_size\n\t\t\t\ttable_bits = NextTableBitSize(count[:], length)\n\t\t\t\ttable_size = 1 << uint(table_bits)\n\t\t\t\ttotal_size += table_size\n\t\t\t\tlow = 0\n\t\t\t\tlut[key].bits = uint8(table_bits + kJpegHuffmanRootTableBits)\n\t\t\t\tlut[key].value = uint16(table_delta - key)\n\t\t\t\tkey++\n\t\t\t}\n\t\t\tcode.bits = uint8(length - kJpegHuffmanRootTableBits)\n\t\t\tcode.value = uint16(symbols[idx])\n\t\t\tidx++\n\t\t\treps = 1 << uint(table_bits-int(code.bits))\n\t\t\tfor ; reps > 0; reps-- {\n\t\t\t\ttable[low] = code\n\t\t\t\tlow++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn total_size\n}",
  "func WriteTree(writer io.Writer, hierarchy *Hierarchy, includeEmpty bool) {\n\ttree := assembleTree(hierarchy)\n\tkeys := make([]string, len(tree))\n\ti := 0\n\tfor k := range tree {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tb := tree[key]\n\t\twriteBranch(writer, b, \"\", hierarchy, includeEmpty)\n\t}\n}",
  "func (fs *FileSystem) Finalize(options FinalizeOptions) error {\n\tif fs.workspace == \"\" {\n\t\treturn fmt.Errorf(\"cannot finalize an already finalized filesystem\")\n\t}\n\n\t/*\n\t\tThere is nothing we can find about the order of files/directories, for any of:\n\t\t- inodes in inode table\n\t\t- entries in directory table\n\t\t- data in data section\n\t\t- fragments in fragment section\n\n\t\tto keep it simple, we will follow what mksquashfs on linux does, in the following order:\n\t\t- superblock at byte 0\n\t\t- compression options, if any, at byte 96\n\t\t- file data immediately following compression options (or superblock, if no compression options)\n\t\t- fragments immediately following file data\n\t\t- inode table\n\t\t- directory table\n\t\t- fragment table\n\t\t- export table\n\t\t- uid/gid lookup table\n\t\t- xattr table\n\n\t\tNote that until we actually copy and compress each section, we do not know the position of each subsequent\n\t\tsection. So we have to write one, keep track of it, then the next, etc.\n\n\n\t*/\n\n\tf := fs.file\n\tblocksize := int(fs.blocksize)\n\tcomp := compressionNone\n\tif options.Compression != nil {\n\t\tcomp = options.Compression.flavour()\n\t}\n\n\t// build out file and directory tree\n\t// this returns a slice of *finalizeFileInfo, each of which represents a directory\n\t// or file\n\tfileList, err := walkTree(fs.Workspace())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error walking tree: %v\", err)\n\t}\n\n\t// location holds where we are writing in our file\n\tvar (\n\t\tlocation int64\n\t\tb        []byte\n\t)\n\tlocation += superblockSize\n\tif options.Compression != nil {\n\t\tb = options.Compression.optionsBytes()\n\t\tif len(b) > 0 {\n\t\t\t_, _ = f.WriteAt(b, location)\n\t\t\tlocation += int64(len(b))\n\t\t}\n\t}\n\n\t// next write the file blocks\n\tcompressor := options.Compression\n\tif options.NoCompressData {\n\t\tcompressor = nil\n\t}\n\n\t// write file data blocks\n\t//\n\tdataWritten, err := writeDataBlocks(fileList, f, fs.workspace, blocksize, compressor, location)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing file data blocks: %v\", err)\n\t}\n\tlocation += int64(dataWritten)\n\n\t//\n\t// write file fragments\n\t//\n\tfragmentBlockStart := location\n\tfragmentBlocks, fragsWritten, err := writeFragmentBlocks(fileList, f, fs.workspace, blocksize, options, fragmentBlockStart)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing file fragment blocks: %v\", err)\n\t}\n\tlocation += fragsWritten\n\n\t// extract extended attributes, and save them for later; these are written at the very end\n\t// this must be done *before* creating inodes, as inodes reference these\n\txattrs := extractXattrs(fileList)\n\n\t// Now we need to write the inode table and directory table. But\n\t// we have a chicken and an egg problem.\n\t//\n\t// * On the one hand, inodes are written to the disk before the directories, so we need to know\n\t// the size of the inode data.\n\t// * On the other hand, inodes for directories point to directories, specifically, the block and offset\n\t// where the pointed-at directory resides in the directory table.\n\t//\n\t// So we need inode table to create directory table, and directory table to create inode table.\n\t//\n\t// Further complicating matters is that the data in the\n\t// directory inodes relies on having the directory data ready. Specifically,\n\t// it includes:\n\t// - index of the block in the directory table where the dir info starts. Note\n\t//   that this is not just the directory *table* index, but the *block* index.\n\t// - offset within the block in the directory table where the dir info starts.\n\t//   Same notes as previous entry.\n\t// - size of the directory table entries for this directory, all of it. Thus,\n\t//   you have to have converted it all to bytes to get the information.\n\t//\n\t// The only possible way to do this is to run one, then the other, then\n\t// modify them. Until you generate both, you just don't know.\n\t//\n\t// Something that eases it a bit is that the block index in directory inodes\n\t// is from the start of the directory table, rather than start of archive.\n\t//\n\t// Order of execution:\n\t// 1. Write the file (not directory) data and fragments to disk.\n\t// 2. Create inodes for the files. We cannot write them yet because we need to\n\t//    add the directory entries before compression.\n\t// 3. Convert the directories to a directory table. And no, we cannot just\n\t//    calculate it based on the directory size, since some directories have\n\t//    one header, some have multiple, so the size of each directory, even\n\t//    given the number of files, can change.\n\t// 4. Create inodes for the directories and write them to disk\n\t// 5. Update the directory entries based on the inodes.\n\t// 6. Write directory table to disk\n\t//\n\t// if storing the inodes and directory table entirely in memory becomes\n\t// burdensome, use temporary scratch disk space to cache data in flight\n\n\t//\n\t// Build inodes for files. They are saved onto the fileList items themselves.\n\t//\n\t// build up a table of uids/gids we can store later\n\tidtable := map[uint32]uint16{}\n\t// get the inodes in order as a slice\n\tif err := createInodes(fileList, idtable, options); err != nil {\n\t\treturn fmt.Errorf(\"error creating file inodes: %v\", err)\n\t}\n\n\t// convert the inodes to data, while keeping track of where each\n\t// one is, so we can update the directory entries\n\tupdateInodeLocations(fileList)\n\n\t// create the directory table. We already have every inode and its position,\n\t// so we do not need to dip back into the inodes. The only changes will be\n\t// the block/offset references into the directory table, but those sizes do\n\t// not change. However, we will have to break out the headers, so this is not\n\t// completely finalized yet.\n\tdirectories := createDirectories(fileList[0])\n\n\t// create the final version of the directory table by creating the headers\n\t// and entries.\n\tpopulateDirectoryLocations(directories)\n\n\tif err := updateInodesFromDirectories(directories); err != nil {\n\t\treturn fmt.Errorf(\"error updating inodes with final directory data: %v\", err)\n\t}\n\n\t// write the inodes to the file\n\tinodesWritten, inodeTableLocation, err := writeInodes(fileList, f, compressor, location)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing inode data blocks: %v\", err)\n\t}\n\tlocation += int64(inodesWritten)\n\n\t// write directory data\n\tdirsWritten, dirTableLocation, err := writeDirectories(directories, f, compressor, location)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing directory data blocks: %v\", err)\n\t}\n\tlocation += int64(dirsWritten)\n\n\t// write fragment table\n\n\t/*\n\t\tThe indexCount is used for indexed lookups.\n\n\t\tThe index is stored at the end of the inode (after the filename) for extended directory\n\t\tThere is one entry for each block after the 0th, so if there is just one block, then there is no index\n\t\tThe filenames in the directory are sorted alphabetically. Each entry gives the first filename found in\n\t\tthe respective block, so if the name found is larger than yours, it is in the previous block\n\n\t\tb[0:4] uint32 index - number of bytes where this entry is from the beginning of this directory\n\t\tb[4:8] uint32 startBlock - number of bytes in the filesystem from the start of the directory table that this block is\n\t\tb[8:12] uint32 size - size of the name (-1)\n\t\tb[12:12+size] string name\n\n\t\tHere is an example of 1 entry:\n\n\t\tf11f 0000 0000 0000 0b00 0000 6669 6c65 6e61 6d65 5f34 3638\n\n\t\tb[0:4] index 0x1ff1\n\t\tb[4:8] startBlock 0x00\n\t\tb[8:12] size 0x0b (+1 for a total of 0x0c = 12)\n\t\tb[12:24] name filename_468\n\t*/\n\n\t// TODO:\n\t/*\n\t\t FILL IN:\n\t\t - xattr table\n\n\t\tALSO:\n\t\t- we have been treating every file like it is a normal file, but need to handle all of the special cases:\n\t\t\t\t- symlink, IPC, block/char device, hardlink\n\t\t- deduplicate values in xattrs\n\t\t- utilize options to: not add xattrs; not compress things; etc.\n\t\t- blockPosition calculations appear to be off\n\n\t*/\n\n\t// write the fragment table and its index\n\tfragmentTableWritten, fragmentTableLocation, err := writeFragmentTable(fragmentBlocks, fragmentBlockStart, f, compressor, location)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing fragment table: %v\", err)\n\t}\n\tlocation += int64(fragmentTableWritten)\n\n\t// write the export table\n\tvar (\n\t\texportTableLocation uint64\n\t\texportTableWritten  int\n\t)\n\tif !options.NonExportable {\n\t\texportTableWritten, exportTableLocation, err = writeExportTable(fileList, f, compressor, location)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error writing export table: %v\", err)\n\t\t}\n\t\tlocation += int64(exportTableWritten)\n\t}\n\n\t// write the uidgid table\n\tidTableWritten, idTableLocation, err := writeIDTable(idtable, f, compressor, location)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing uidgid table: %v\", err)\n\t}\n\tlocation += int64(idTableWritten)\n\n\t// write the xattrs\n\tvar xAttrsLocation uint64\n\tif len(xattrs) == 0 {\n\t\txAttrsLocation = noXattrSuperblockFlag\n\t} else {\n\t\tvar xAttrsWritten int\n\t\txAttrsWritten, xAttrsLocation, err = writeXattrs(xattrs, f, compressor, location)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error writing xattrs table: %v\", err)\n\t\t}\n\t\tlocation += int64(xAttrsWritten)\n\t}\n\n\t// update and write the superblock\n\t// keep in mind that the superblock always needs to have a valid compression.\n\t// if there is no compression used, mark it as option gzip, and set all of the\n\t// flags to indicate that nothing is compressed.\n\tif comp == compressionNone {\n\t\tcomp = compressionGzip\n\t\toptions.NoCompressData = true\n\t\toptions.NoCompressInodes = true\n\t\toptions.NoCompressFragments = true\n\t\toptions.NoCompressXattrs = true\n\t}\n\tsb := &superblock{\n\t\tblocksize:           uint32(blocksize),\n\t\tcompression:         comp,\n\t\tinodes:              uint32(len(fileList)),\n\t\txattrTableStart:     xAttrsLocation,\n\t\tfragmentCount:       uint32(len(fragmentBlocks)),\n\t\tmodTime:             time.Now(),\n\t\tsize:                uint64(location),\n\t\tversionMajor:        4,\n\t\tversionMinor:        0,\n\t\tidTableStart:        idTableLocation,\n\t\texportTableStart:    exportTableLocation,\n\t\tinodeTableStart:     inodeTableLocation,\n\t\tidCount:             uint16(len(idtable)),\n\t\tdirectoryTableStart: dirTableLocation,\n\t\tfragmentTableStart:  fragmentTableLocation,\n\t\trootInode:           &inodeRef{fileList[0].inodeLocation.block, fileList[0].inodeLocation.offset},\n\t\tsuperblockFlags: superblockFlags{\n\t\t\tuncompressedInodes:    options.NoCompressInodes,\n\t\t\tuncompressedData:      options.NoCompressData,\n\t\t\tuncompressedFragments: options.NoCompressFragments,\n\t\t\tuncompressedXattrs:    options.NoCompressXattrs,\n\t\t\tnoFragments:           options.NoFragments,\n\t\t\tnoXattrs:              !options.Xattrs,\n\t\t\texportable:            !options.NonExportable,\n\t\t},\n\t}\n\n\t// write the superblock\n\tsbBytes := sb.toBytes()\n\tif _, err := f.WriteAt(sbBytes, 0); err != nil {\n\t\treturn fmt.Errorf(\"failed to write superblock: %v\", err)\n\t}\n\n\t// finish by setting as finalized\n\tfs.workspace = \"\"\n\treturn nil\n}",
  "func NewEncodingTree(freq map[uint8]uint) *Node {\n\tvar head Node // Fictitious head\n\n\tfor i, v := range freq {\n\t\tnode := &Node{\n\t\t\tvalue:  i,\n\t\t\tweight: v,\n\t\t}\n\t\thead.insert(node)\n\t}\n\n\tfor head.next != nil && head.next.next != nil {\n\t\tl := head.popFirst()\n\t\tr := head.popFirst()\n\n\t\tnode := join(l, r)\n\t\thead.insert(node)\n\t}\n\n\t// Fictitious head point to tree root\n\tif head.next != nil {\n\t\thead.next.prev = nil\n\t}\n\treturn head.next\n}",
  "func (enc *HuffmanEncoder) ShowHuffTree() {\n\ttraverse(enc.root, \"\")\n}",
  "func CreateBinaryTree() {\n\tfmt.Fprintln(os.Stderr, \"CreateBinaryTree\")\n\tvar min1i, min2i, pos1, pos2 int\n\tvar point []int = make([]int, MAX_CODE_LENGTH)\n\tvar code []byte = make([]byte, MAX_CODE_LENGTH)\n\tvar count []int64 = make([]int64, vocab_size*2+1)\n\tvar binaryt []int = make([]int, vocab_size*2+1)\n\tvar parent_node []int = make([]int, vocab_size*2+1)\n\tfor a := 0; a < vocab_size; a++ {\n\t\tcount[a] = int64(vocab[a].cn)\n\t}\n\tfor a := vocab_size; a < vocab_size*2; a++ {\n\t\tcount[a] = 1e15\n\t}\n\tpos1 = vocab_size - 1\n\tpos2 = vocab_size\n\t// Following algorithm constructs the Huffman tree by adding one node at a time\n\tfor a := 0; a < vocab_size-1; a++ {\n\t\t// First, find two smallest nodes 'min1, min2'\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin1i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin1i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin1i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin2i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin2i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin2i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tcount[vocab_size+a] = count[min1i] + count[min2i]\n\t\tparent_node[min1i] = vocab_size + a\n\t\tparent_node[min2i] = vocab_size + a\n\t\tbinaryt[min2i] = 1\n\t}\n\t// Now assign binary code to each vocabulary character\n\tfor a := 0; a < vocab_size; a++ {\n\t\tb := a\n\t\ti := 0\n\t\tfor {\n\t\t\tcode[i] = byte(binaryt[b])\n\t\t\tpoint[i] = b\n\t\t\ti++\n\t\t\tb = parent_node[b]\n\t\t\tif b == vocab_size*2-2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvocab[a].codelen = byte(i)\n\t\tvocab[a].point[0] = vocab_size - 2\n\t\tfor b = 0; b < i; b++ {\n\t\t\tvocab[a].code[i-b-1] = code[b]\n\t\t\tvocab[a].point[i-b] = point[b] - vocab_size\n\t\t}\n\t}\n}",
  "func (s *ShortenBlock) Write(offset int, data []byte) (int, error) {\n\tsize := len(data)\n\tlog.Debugf(\"writing %d bytes at offset %d\", size, offset)\n\tstartLeafIdx := offset / s.shortener.NodeSize()\n\tendLeafIdx := int(math.Ceil(float64(offset+size) / float64(s.shortener.NodeSize())))\n\n\tbytesWritten := 0\n\n\tfor leafIdx := startLeafIdx; leafIdx < endLeafIdx; leafIdx++ {\n\t\tlog.Debugf(\"writing to leaf %d of range (%d, %d)\", leafIdx, startLeafIdx, endLeafIdx)\n\t\tvar leaf *Node\n\t\tvar err error\n\t\tif leaf, err = s.getLeaf(int(leafIdx)); err != nil {\n\t\t\tlog.Debugf(\"could not retrieve leaf: %s\", err.Error())\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tvar subWriteStart int\n\t\tvar subWriteEnd int\n\t\tif leafIdx == startLeafIdx {\n\t\t\tsubWriteStart = offset % s.shortener.NodeSize()\n\t\t} else {\n\t\t\tsubWriteStart = 0\n\t\t}\n\t\tif leafIdx == endLeafIdx-1 {\n\t\t\tsubWriteEnd = (offset + size) % s.shortener.NodeSize()\n\t\t} else {\n\t\t\tsubWriteEnd = s.shortener.NodeSize()\n\t\t}\n\n\t\tvar leafData []byte\n\t\tif leaf.id != \"\" {\n\t\t\tif leafData, err = s.cachedNodeRead(leaf.id); err != nil {\n\t\t\t\tlog.Errorf(\"could not read leaf data: %s\", err.Error())\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t} else {\n\t\t\tleafData = bytes.Repeat([]byte{0}, s.shortener.NodeSize())\n\t\t}\n\t\tlog.Tracef(\"concatenating old leaf data of length %d with range (%d, %d)\", len(leafData), subWriteStart, subWriteEnd)\n\t\tnewLeafData := leafData[:subWriteStart]\n\t\tnewLeafData = append(newLeafData, data[:subWriteEnd-subWriteStart]...)\n\t\tnewLeafData = append(newLeafData, leafData[int(math.Min(float64(subWriteEnd), float64(len(leafData)))):]...)\n\t\tdata = data[subWriteEnd-subWriteStart:]\n\n\t\terr = s.nodeWrite(leaf, newLeafData)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"fs.nodeWrite returned error: %s\", err.Error())\n\t\t}\n\t\tbytesWritten += subWriteEnd - subWriteStart\n\t}\n\n\treturn bytesWritten, nil\n}",
  "func compressPrefixTree(root *huffmanTreeNode, to *vector.Vector) {\n\tswitch isLeafNode(root) {\n\tcase true:\n\t\tto.Append(byte(1))\n\t\tto.Append(root.value)\n\tcase false:\n\t\tto.Append(byte(0))\n\t\tcompressPrefixTree(root.left, to)\n\t\tcompressPrefixTree(root.right, to)\n\t}\n}",
  "func (f Frame) Encode(w io.Writer, timestamp time.Time, compress bool) error {\n\tf.CorrectTimestamps(timestamp)\n\tf.Length = f.correctLength()\n\tf.Count = uint16(len(f.Blocks))\n\tf.Reserved1 = 1\n\n\tvar compressedBlockData []byte\n\tif compress {\n\t\tvar err error\n\t\tcompressedBlockData, err = f.compressBlocks()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf.Compression = 1\n\t\tf.Length = uint32(40 + len(compressedBlockData))\n\t} else {\n\t\tf.Compression = 0\n\t}\n\n\tbuf := make([]byte, 40)\n\tcopy(buf[0:16], f.Preamble[:])\n\ttime := uint64(f.Time.UnixNano() / 1000000)\n\tbinary.LittleEndian.PutUint64(buf[16:24], time)\n\tbinary.LittleEndian.PutUint32(buf[24:28], f.Length)\n\tbinary.LittleEndian.PutUint16(buf[28:30], f.ConnectionType)\n\tbinary.LittleEndian.PutUint16(buf[30:32], f.Count)\n\tbuf[32] = f.Reserved1\n\tbuf[33] = f.Compression\n\tbinary.LittleEndian.PutUint16(buf[34:36], f.Reserved2)\n\tbinary.LittleEndian.PutUint32(buf[36:40], f.DecompressedLength)\n\n\t_, err := w.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif compress {\n\t\t_, err := w.Write(compressedBlockData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tfor _, b := range f.Blocks {\n\t\t\terr := b.Encode(w)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Print the constructed Huffman tree 
 | 
	func (enc *HuffmanEncoder) ShowHuffTree() {
	traverse(enc.root, "")
} 
 | 
	[
  "func printCodes(tree HuffmanTree, prefix []byte) {\n\tswitch i := tree.(type) {\n\tcase HuffmanLeaf:\n\t\t// If this is a leaf node, then it contains one of the input\n\t\t// characters, print the character and its code from byte[]\n\t\tfmt.Printf(\"%c\\t%d\\t%s\\n\", i.value, i.freq, string(prefix))\n\tcase HuffmanNode:\n\t\t// Assign 0 to left edge and recur\n\t\tprefix = append(prefix, '0')\n\t\tprintCodes(i.left, prefix)\n\t\tprefix = prefix[:len(prefix)-1]\n\n\t\t// Assign 1 to right edge and recur\n\t\tprefix = append(prefix, '1')\n\t\tprintCodes(i.right, prefix)\n\t\tprefix = prefix[:len(prefix)-1]\n\t}\n}",
  "func printCodes(tree HuffmanTree, prefix []byte, codes *[]HuffmanCode){\n\tswitch i := tree.(type) {\n\tcase HuffmanLeaf:\n\t\t// If this is a leaf node, then it contains one of the input\n\t\t// characters, print the character and its code from byte[]\n\t\tc := HuffmanCode{\n\t\t\tvalue: i.value,\n\t\t\tfreq:  i.freq,\n\t\t\tcode:  string(prefix),\n\t\t}\n\t\t*codes = append(*codes, c)\n\tcase HuffmanNode:\n\t\t// Assign 0 to left edge and recur\n\t\tprefix = append(prefix, '0')\n\t\tprintCodes(i.left, prefix, codes)\n\t\tprefix = prefix[:len(prefix)-1]\n\n\t\t// Assign 1 to right edge and recur\n\t\tprefix = append(prefix, '1')\n\t\tprintCodes(i.right, prefix, codes)\n\t\tprefix = prefix[:len(prefix)-1]\n\t}\n}",
  "func PrintTree(tree *ToTree) {\n  k := len(tree.Branches)\n  n := 0\n  for n < k {\n    s := fmt.Sprintf(\"%%d %s %%%ds\\n\", tree.Branches[n].State, tree.Branches[n].Depth+len(tree.Branches[n].Content))\n    fmt.Printf(s, tree.Branches[n].LineStart, tree.Branches[n].Content)\n    PrintTree(tree.Branches[n])\n    n++\n  }\n}",
  "func (t *ASCIITree) PrintTree(w io.Writer) {\n\tancestorPrefix := \"\"\n\tfor _, parent := range t.Ancestors() {\n\t\tif parent.Level() <= 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif parent.Last() {\n\t\t\tancestorPrefix += \"  \"\n\t\t} else {\n\t\t\tancestorPrefix += \" │\"\n\t\t}\n\t}\n\n\tmyPrefix := \"\"\n\tmultilinePrefix := \"\"\n\tif t.Level() > 0 {\n\t\tif t.Last() {\n\t\t\tif t.Empty() {\n\t\t\t\tmyPrefix += \" └── \"\n\t\t\t\tmultilinePrefix += \"     \"\n\t\t\t} else {\n\t\t\t\tmyPrefix += \" └─┬ \"\n\t\t\t\tmultilinePrefix += \" └─┬ \"\n\t\t\t}\n\t\t} else {\n\t\t\tif t.Empty() {\n\t\t\t\tmyPrefix += \" ├── \"\n\t\t\t\tmultilinePrefix += \" │   \"\n\t\t\t} else {\n\t\t\t\tmyPrefix += \" ├─┬ \"\n\t\t\t\tmultilinePrefix += \" │ │ \"\n\t\t\t}\n\t\t}\n\t}\n\n\tif t.Text != \"\" {\n\t\tlines := strings.Split(t.Text, \"\\n\")\n\t\tfmt.Fprintf(w, \"%s%s%s\\n\", ancestorPrefix, myPrefix, lines[0])\n\t\tfor _, line := range lines[1:] {\n\t\t\tfmt.Fprintf(w, \"%s%s%s\\n\", ancestorPrefix, multilinePrefix, line)\n\t\t}\n\t}\n\n\tfor _, child := range t.children {\n\t\tchild.PrintTree(w)\n\t}\n}",
  "func (t *Tree) Print(w io.Writer, f IterateFunc, itemSiz int) {\n\n\tfmt.Fprintf(w, \"treeNode-+-Left \\t / Left High\\n\")\n\tfmt.Fprintf(w, \"     |      \\t = Equal\\n\")\n\tfmt.Fprintf(w, \"     +-Right\\t \\\\ Right High\\n\\n\")\n\n\tmaxHeight := t.Height()\n\n\tif f != nil && t.root != nil {\n\t\td := &printData{0, itemSiz, make([]byte, maxHeight), 0, f, w}\n\t\td.printer(t.root)\n\t}\n}",
  "func Print(root *Node) {\n\t// traverse traverses a subtree from the given node,\n\t// using the prefix code leading to this node, having the number of bits specified.\n\tvar traverse func(n *Node, code uint64, bits byte)\n\n\ttraverse = func(n *Node, code uint64, bits byte) {\n\t\tif n.Left == nil {\n\t\t\t// Leaf\n\t\t\tfmt.Printf(\"'%c': %0\"+strconv.Itoa(int(bits))+\"b\\n\", n.Value, code)\n\t\t\treturn\n\t\t}\n\t\tbits++\n\t\ttraverse(n.Left, code<<1, bits)\n\t\ttraverse(n.Right, code<<1+1, bits)\n\t}\n\n\ttraverse(root, 0, 0)\n}",
  "func print(tree *Tree) {\n\tif tree != nil {\n\t\tfmt.Println(\" Value\",tree.Value)\n\t\tfmt.Printf(\"Tree Node Left\")\n\t\tprint(tree.LeftNode)\n\t\tfmt.Printf(\"Tree Node Right\")\n\t\tprint(tree.RightNode)\n\t} else {\n\n\t}\n\tfmt.Printf(\"Nil\\n\")\n}",
  "func print(tree *Tree) {\n\tif tree != nil {\n\n\t\tfmt.Println(\" Value\", tree.Value)\n\t\tfmt.Printf(\"Tree Node Left\")\n\t\tprint(tree.LeftNode)\n\t\tfmt.Printf(\"Tree Node Right\")\n\t\tprint(tree.RightNode)\n\t} else {\n\t\tfmt.Printf(\"Nil\\n\")\n\t}\n}",
  "func print(tree *BinarySearchTree) {\n\tif tree != nil {\n\n\t\tfmt.Println(\" Value\", tree.rootNode.value)\n\t\tfmt.Printf(\"Root Tree Node\")\n\t\tprintTreeNode(tree.rootNode)\n\t\t//fmt.Printf(\"Tree Node Right\")\n\t\t//print(tree.rootNode.rightNode)\n\t} else {\n\t\tfmt.Printf(\"Nil\\n\")\n\t}\n}",
  "func main() {\r\n\ttest := \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n\tsymFreqs := make(map[rune]int)\r\n\t// read each symbol and record the frequencies\r\n\tfor _, c := range test {\r\n\t\tsymFreqs[c]++\r\n\t}\r\n\r\n\t// example tree\r\n\texampleTree := buildTree(symFreqs)\r\n\r\n\t// print out results\r\n\tfmt.Println(\"SYMBOL\\tWEIGHT\\tHUFFMAN CODE\")\r\n\tprintCodes(exampleTree, []byte{})\r\n}",
  "func (n *TreeNode) String() string {\n\tif n.leafnode {\n\t\treturn fmt.Sprintf(\"(leaf   [%3d %7d: %v %v])\\n\", n.byteid, n.freq, n.left, n.right)\n\t} else {\n\t\treturn fmt.Sprintf(\"(parent [%3d %7d: ...  ...])\", n.byteid, n.freq) +\n\t\t\tfmt.Sprintf(\"  \\t%v\", n.left) +\n\t\t\tfmt.Sprintf(\"  \\t%v\", n.right)\n\t}\n}",
  "func (bh *BinomialHeap) print() {\n    if bh.forest_head == nil {\n        fmt.Print(\"heap is empty.\")\n    }\n\n    for _, node := range nodeIterator(bh.forest_head) {\n        node.print_recursive(0)\n    }\n}",
  "func (p *CanonicalPrinter) tree(n syntax.Node) error {\n\n\t// Prime the position tracker with the first token to avoid printing\n\t// white-spaces before the first token.\n\tp.firstToken = true\n\n\tn.Inspect(func(n syntax.Node) bool {\n\n\t\t// We figure out spacing by looking at the token stream only.\n\t\t// Because the parser for creating a syntax tree is not yet\n\t\t// implemented.\n\t\tif !n.IsToken() {\n\t\t\treturn true\n\t\t}\n\n\t\t// Incorporate user-defined line breaks and token separators\n\t\t// into output stream.\n\t\tcurrPos := n.Span()\n\t\tswitch {\n\t\tcase currPos.Begin.Line > p.lastPos.Line:\n\t\t\tp.print(newline)\n\t\t\tif currPos.Begin.Line-p.lastPos.Line > 1 {\n\t\t\t\tp.print(newline)\n\t\t\t}\n\t\tcase currPos.Begin.Column > p.lastPos.Column:\n\t\t\tp.print(blank)\n\t\t}\n\t\tp.lastPos = currPos.End\n\n\t\tswitch k, s := n.Kind(), n.Text(); {\n\n\t\t// Increment indentation after opening {, [, (.\n\t\tcase k == syntax.LeftBrace, k == syntax.LeftBracket, k == syntax.LeftParen:\n\t\t\tp.print(s, indent)\n\n\t\t// Decrement indentation before closing }, ], ).\n\t\tcase k == syntax.RightBrace, k == syntax.RightBracket, k == syntax.RightParen:\n\t\t\tp.print(unindent, s)\n\n\t\t// Add space after comma\n\t\tcase k == syntax.Comma:\n\t\t\tp.print(\",\", blank)\n\n\t\t// Align assignments.\n\t\tcase k == syntax.Assign:\n\t\t\tp.print(blank, \":=\", blank)\n\n\t\t// Every line of a comment has to be indented individually.\n\t\tcase k == syntax.Comment:\n\t\t\tp.print(cell)\n\n\t\t\t// Before we split a comment into its lines, we have to\n\t\t\t// remove the trailing newline of `//` comments.\n\t\t\t//\n\t\t\t// This makes the logic of p.comment easier, because\n\t\t\t// printing `//` comments is then identical to printing\n\t\t\t// single line `/*` comments.\n\t\t\tp.comment(currPos.Begin.Column-1, strings.Split(strings.TrimSpace(s), \"\\n\"))\n\t\t\tif strings.HasSuffix(s, \"\\n\") {\n\t\t\t\tp.print(newline)\n\t\t\t}\n\n\t\t// Only literals may contain newlines and \\t and need to be quoted.\n\t\tcase n.Kind().IsLiteral():\n\t\t\tp.print(quote(s))\n\n\t\t// All other tokens are printed as is.\n\t\tdefault:\n\t\t\tp.print(s)\n\t\t}\n\t\treturn true\n\t})\n\n\t// Terminate the last line with a newline.\n\tif !p.firstToken {\n\t\tfmt.Fprint(p.w, \"\\n\")\n\t}\n\n\tif tw, ok := p.w.(*tabwriter.Writer); ok {\n\t\treturn tw.Flush()\n\t}\n\n\treturn nil\n}",
  "func printNode(n *node) {\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(fmt.Sprintf(\"[prefix: %s] \", n.Prefix))\n\tif n.Children != nil {\n\t\tbuf.WriteString(fmt.Sprintf(\"[child: %s] \", n.Children.Prefix))\n\t}\n\tif n.Next != nil {\n\t\tbuf.WriteString(fmt.Sprintf(\"[next: %s] \", n.Next.Prefix))\n\t}\n\tif n.Leaf != nil {\n\t\tbuf.WriteString(fmt.Sprintf(\"[value(key): %s]\", n.Leaf.Key))\n\t}\n\n\tprint(buf.String())\n}",
  "func (hr *HashRing) Print() string {\n\tkeys := hr.getSortedKeys()\n\ts := \"\"\n\tfor _, key := range keys {\n\t\tnode := hr.ring[key]\n\t\ts += fmt.Sprintf(\"%s<Degree: %v>\\n\", node.Print(), strconv.Itoa(key))\n\t}\n\treturn s\n}",
  "func (t *Tree) Print() {\n\tdfs(t.Root)\n}",
  "func (t *Trie) Show() {\n\tfmt.Println(\"root\")\n\tfmt.Println(\"|\")\n\tfor _, n := range t.root.getChilds() {\n\t\tn.show()\n\t}\n}",
  "func (node *Node) PrintStructure(indent int, character string) {\n\tfor i := 0; i < indent; i++ {\n\t\tfmt.Print(character)\n\t}\n\tfmt.Println(node.Data)\n\tfor _, child := range node.Children {\n\t\tchild.PrintStructure(indent+1, character)\n\t}\n\tif len(node.Children) == 0 {\n\t\treturn\n\t}\n\tfor i := 0; i < indent; i++ {\n\t\tfmt.Print(character)\n\t}\n\tfmt.Println(node.Data)\n}",
  "func (t *BuildTree) PrintTree(noColor bool) {\n\tfor _, node := range t.rootNodes {\n\t\tt.printTree(node, 0, noColor)\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	GenerateRequestToken generates a token for a check request 
 | 
	func GenerateRequestToken(proxy, uid, checkid int) (string, error) {
	claims := struct {
		Proxy   int `json:"proxy"`
		ID      int `json:"id"`
		CheckID int `json:"checkid"`
		jwt.StandardClaims
	}{
		proxy,
		uid,
		checkid,
		jwt.StandardClaims{
			ExpiresAt: time.Now().Add(time.Minute * 10).Unix(),
			Issuer:    "Server",
		},
	}
	token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
	return token.SignedString([]byte(os.Getenv("JWTSecret")))
} 
 | 
	[
  "func GenerateToken(id int, request int, expiresAfter time.Duration) (string, error) {\n\n\tclaims := MyJwtClaims{\n\t\tID:      id,\n\t\tRequest: request,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(expiresAfter).Unix(),\n\t\t\tIssuer:    \"test\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(mySigningKey)\n}",
  "func GenerateToken(res http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"GenerateToken Received request: %v\", req)\n\t// only valid with PUT\n\tif req.Method != http.MethodPut {\n\t\treturn\n\t}\n\t// check that header contains valid ttl\n\trequestedTTL := req.Header.Get(tokenTTLHeader)\n\tvalidTTL, err := extractValidTTL(requestedTTL)\n\tif err != nil {\n\t\tlog.Printf(\"Something went wrong with ttl validation: %v with requested TTL: %v\", err.Error(), requestedTTL)\n\t\tserver.ReturnBadRequestResponse(res)\n\t\treturn\n\t}\n\n\tkey := make([]byte, 32)\n\t_, err = rand.Read(key)\n\tif err != nil {\n\t\tserver.FormatAndReturnTextResponse(res, \"Something went wrong with token creation\")\n\t\treturn\n\t}\n\n\ttokenValue := base64.StdEncoding.EncodeToString(key)\n\ttoken := v2Token{\n\t\tValue:     tokenValue,\n\t\tTTL:       validTTL,\n\t\tCreatedAt: time.Now(),\n\t}\n\tgeneratedTokens[token.Value] = token\n\tres.Header().Set(tokenTTLHeader, strconv.Itoa(token.TTL))\n\tserver.FormatAndReturnTextResponse(res, token.Value)\n}",
  "func RequestToken(w http.ResponseWriter, r *http.Request) {\n\t// get the token from the body\n\tvar requestData requestTokenRequest\n\terr := json.NewDecoder(r.Body).Decode(&requestData)\n\tif err != nil {\n\t\ttemplates.JSONError(w, err)\n\t\treturn\n\t}\n\n\t// read and validate the token\n\ttoken, err := jwt.ParseWithClaims(requestData.Token, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(AccessTokenKey), nil\n\t})\n\n\tif claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {\n\t\tnow := time.Now().Unix()\n\t\tif now > claims.Expires {\n\t\t\ttemplates.JSONErrorExpiredToken(w)\n\t\t\treturn\n\t\t}\n\t\tif claims.TokenType != \"access-token\" {\n\t\t\ttemplates.JSONErrorMessage(w, \"The token is not valid\")\n\t\t\treturn\n\t\t}\n\n\t\t// create a request and refresh token\n\t\trequestToken, _ := createToken(claims.Username, claims.Namespace, \"request-token\")\n\t\trefreshToken, _ := createToken(claims.Username, claims.Namespace, \"refresh-token\")\n\t\tdata := requestTokenResponse{RequestToken: requestToken, RefreshToken: refreshToken}\n\t\ttemplates.JSON(w, data)\n\t} else {\n\t\ttemplates.JSONError(w, err)\n\t\treturn\n\t}\n\n}",
  "func CreateVerifyIdTokenRequest() (request *VerifyIdTokenRequest) {\n\trequest = &VerifyIdTokenRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"idaas-doraemon\", \"2021-05-20\", \"VerifyIdToken\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
  "func generateAppAuthTokenRequest(clientID string, clientSecret string) *github.AuthorizationRequest {\n\n\trand := randString()\n\tauth := github.AuthorizationRequest{\n\t\tNote:         github.String(\"App token: Note generated by test: \" + rand),\n\t\tScopes:       []github.Scope{github.ScopePublicRepo},\n\t\tFingerprint:  github.String(\"App token: Fingerprint generated by test: \" + rand),\n\t\tClientID:     github.String(clientID),\n\t\tClientSecret: github.String(clientSecret),\n\t}\n\n\treturn &auth\n}",
  "func GenerateToken(s *Server) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar data TokenParameter\n\n\t\tif err := c.BindJSON(&data); err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"JSON Body is missing fields\"})\n\t\t\treturn\n\t\t}\n\n\t\tif err := data.Validate(); err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"JSON Body has invalid data\"})\n\t\t\treturn\n\t\t}\n\n\t\tdeviceId := GetDeviceId(data.Device.Serial)\n\t\ttokenStr := GetTokenString(deviceId)\n\n\t\tif _, err := s.Redis.Do(\"SETEX\", tokenStr, LocalConfig.tokenLifetime, tokenStr); err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"Internal error\"})\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"deviceid\": deviceId,\n\t\t\t\"token\":    tokenStr,\n\t\t\t\"ttl\":      LocalConfig.tokenLifetime,\n\t\t})\n\t}\n}",
  "func GenerateToken(m *models.User) (*AuthToken, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(24 * time.Hour)\n\n\tclaims := userStdClaims{\n\t\tUser: m,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuedAt:  time.Now().Unix(),\n\t\t\tIssuer:    \"gin-server-api\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\tauthToken := &AuthToken{Token: token, ExpiresAt: expireTime.Format(\"2006-01-02 15:04:05\")}\n\treturn authToken, err\n}",
  "func generateToken(message, privateKey string) (string, error) {\n\tkey, err := base64.URLEncoding.DecodeString(privateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\th := hmac.New(sha256.New, key)\n\th.Write([]byte(message))\n\treturn base64.URLEncoding.EncodeToString(h.Sum(nil)), nil\n}",
  "func (t Token) generateToken() string {\n\tutoken, err := gonanoid.Nanoid(tokenlenght)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\treturn utoken\n}",
  "func (c *Cache) requestToken(req *http.Request) (string, error) {\n\tauthorization := req.Header.Get(\"Authorization\")\n\tif authorization == \"\" {\n\t\treturn \"\", failure.New(\"request contains no authorization header\")\n\t}\n\tfields := strings.Fields(authorization)\n\tif len(fields) != 2 || fields[0] != \"Bearer\" {\n\t\treturn \"\", failure.New(\"invalid authorization header: %q\", authorization)\n\t}\n\treturn fields[1], nil\n}",
  "func generateToken(user models.User) (string, error) {\n\tnow := time.Now()\n\texpiry := time.Now().Add(constants.AuthenticationTimeout)\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, datatransfers.JWTClaims{\n\t\tID:        user.ID,\n\t\tExpiresAt: expiry.Unix(),\n\t\tIssuedAt:  now.Unix(),\n\t})\n\treturn token.SignedString([]byte(config.AppConfig.JWTSecret))\n}",
  "func generateToken() (string, time.Time) {\n   now := time.Now();\n\n   randData := make([]byte, TOKEN_RANDOM_BYTE_LEN);\n   rand.Read(randData);\n\n   timeBinary := make([]byte, 8);\n   binary.LittleEndian.PutUint64(timeBinary, uint64(now.UnixNano() / 1000));\n\n   tokenData := bytes.Join([][]byte{timeBinary, randData}, []byte{});\n\n   return base64.URLEncoding.EncodeToString(tokenData), now;\n}",
  "func generatePersonalAuthTokenRequest() *github.AuthorizationRequest {\n\n\trand := randString()\n\tauth := github.AuthorizationRequest{\n\t\tNote:        github.String(\"Personal token: Note generated by test: \" + rand),\n\t\tScopes:      []github.Scope{github.ScopePublicRepo},\n\t\tFingerprint: github.String(\"Personal token: Fingerprint generated by test: \" + rand),\n\t}\n\n\treturn &auth\n}",
  "func GenerateActionsRunnerToken(ctx *context.PrivateContext) {\n\tvar genRequest private.GenerateTokenRequest\n\trd := ctx.Req.Body\n\tdefer rd.Close()\n\n\tif err := json.NewDecoder(rd).Decode(&genRequest); err != nil {\n\t\tlog.Error(\"%v\", err)\n\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\tErr: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\towner, repo, err := parseScope(ctx, genRequest.Scope)\n\tif err != nil {\n\t\tlog.Error(\"%v\", err)\n\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\tErr: err.Error(),\n\t\t})\n\t}\n\n\ttoken, err := actions_model.GetUnactivatedRunnerToken(ctx, owner, repo)\n\tif errors.Is(err, util.ErrNotExist) {\n\t\ttoken, err = actions_model.NewRunnerToken(ctx, owner, repo)\n\t\tif err != nil {\n\t\t\terr := fmt.Sprintf(\"error while creating runner token: %v\", err)\n\t\t\tlog.Error(\"%v\", err)\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: err,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t} else if err != nil {\n\t\terr := fmt.Sprintf(\"could not get unactivated runner token: %v\", err)\n\t\tlog.Error(\"%v\", err)\n\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\tErr: err,\n\t\t})\n\t\treturn\n\t}\n\n\tctx.PlainText(http.StatusOK, token.Token)\n}",
  "func generateNewAPIToken(ctx context.Context, serverURL string, verbose bool, decorator func(req *http.Request)) (string, error) {\n\tclient := http.Client{}\n\treq, err := http.NewRequest(http.MethodPost, util.UrlJoin(serverURL, fmt.Sprintf(\"/me/descriptorByName/jenkins.security.ApiTokenProperty/generateNewToken?newTokenName=%s\", url.QueryEscape(\"jx create jenkins token\"))), nil)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"building request to generate the API token\")\n\t}\n\treq = req.WithContext(ctx)\n\tdecorator(req)\n\tif verbose {\n\t\treq.Write(os.Stderr) //nolint:errcheck\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"execute generate API token request\")\n\t}\n\tdefer resp.Body.Close() //nolint:errcheck\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"reading API token from response body\")\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", fmt.Errorf(\"generate API token status code: %d, error: %s\", resp.StatusCode, string(body))\n\t}\n\n\ttype TokenData struct {\n\t\tTokenName  string `json:\"tokenName\"`\n\t\tTokenUuid  string `json:\"tokenUuid\"`\n\t\tTokenValue string `json:\"tokenValue\"`\n\t}\n\n\ttype TokenResponse struct {\n\t\tStatus string    `json:\"status\"`\n\t\tData   TokenData `json:\"data\"`\n\t}\n\ttokenResponse := &TokenResponse{}\n\tif err := json.Unmarshal(body, tokenResponse); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"parsing the API token from response\")\n\t}\n\treturn tokenResponse.Data.TokenValue, nil\n}",
  "func (p *portworxClient) tokenGenerator() (string, error) {\n\tif len(p.jwtSharedSecret) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tclaims := &auth.Claims{\n\t\tIssuer: p.jwtIssuer,\n\t\tName:   \"Stork\",\n\n\t\t// Unique id for stork\n\t\t// this id must be unique across all accounts accessing the px system\n\t\tSubject: p.jwtIssuer + \".\" + uniqueID,\n\n\t\t// Only allow certain calls\n\t\tRoles: []string{\"system.admin\"},\n\n\t\t// Be in all groups to have access to all resources\n\t\tGroups: []string{\"*\"},\n\t}\n\n\t// This never returns an error, but just in case, check the value\n\tsignature, err := auth.NewSignatureSharedSecret(p.jwtSharedSecret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Set the token expiration\n\toptions := &auth.Options{\n\t\tExpiration:  time.Now().Add(time.Hour * 1).Unix(),\n\t\tIATSubtract: 1 * time.Minute,\n\t}\n\n\ttoken, err := auth.Token(claims, signature, options)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}",
  "func NewRequestToken(index uint32) RequestToken {\n\treturn RequestToken{\n\t\tindex:    index,\n\t\tcreateAt: time.Now().UTC(),\n\t}\n}",
  "func GetRequestToken(r *http.Request) (string, error) {\n\t// Check for Authorization HTTP header\n\tif authorization := r.Header.Get(\"Authorization\"); len(authorization) > 7 && authorization[:7] == \"Bearer \" {\n\t\treturn authorization[7:], nil\n\t}\n\n\t// Check for session cookie\n\tcookie, err := r.Cookie(ocgateSessionCookieName)\n\tif err != nil || cookie.Value == \"\" {\n\t\treturn \"\", err\n\t}\n\treturn cookie.Value, nil\n}",
  "func GenerateRequestID() string {\n  h := sha1.New()\n  h.Write([]byte(time.Now().String()))\n  id := hex.EncodeToString(h.Sum(nil))\n  return id\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	DecodeRequestToken decodes a token for a check request 
 | 
	func DecodeRequestToken(ptoken string) (int, int, int, error) { // TODO: Return Values to Struct!
	token, err := jwt.Parse(ptoken, func(token *jwt.Token) (interface{}, error) {
		if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
			return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
		}
		return []byte(os.Getenv("JWTSecret")), nil
	})
	if err == nil {
		if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {
			return int(claims["proxy"].(float64)), int(claims["id"].(float64)), int(claims["checkid"].(float64)), nil
		}
	}
	return 0, 0, 0, err
} 
 | 
	[
  "func decodeVerifyRequest(_ context.Context, r interface{}) (interface{}, error) {\n\trq := r.(*pb.VerifyRequest)\n\n\treturn endpoint.VerifyRequest{\n\t\tToken: rq.Token,\n\t\tType:  rq.Type,\n\t\tCode:  rq.Code,\n\t}, nil\n}",
  "func (t *ExpireDelegationTokenRequest) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.Hmac, err = d.Bytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.ExpiryTimePeriodMs, err = d.Int64()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
  "func DecodeGrpcReqTokenSecretRequest(ctx context.Context, request interface{}) (interface{}, error) {\n\treq := request.(*TokenSecretRequest)\n\treturn req, nil\n}",
  "func DecodeVerifyJWTRequest(_ context.Context, r interface{}) (interface{}, error) {\n\treq := r.(*pb.VerifyJWTRequest)\n\treturn VerifyJWTRequest{\n\t\tJwt: req.Jwt,\n\t}, nil\n}",
  "func DecodeToken() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\ttokenStr := c.Request.Header.Get(\"Authorization\")\n\n\t\tuid, b := token.DecodeToken(tokenStr)\n\n\t\tif b {\n\t\t\tc.Set(common.TokenUid, uid)\n\t\t}\n\t\tc.Next()\n\t}\n}",
  "func DecodeGrpcRespTokenSecretRequest(ctx context.Context, response interface{}) (interface{}, error) {\n\treturn response, nil\n}",
  "func (t *RenewDelegationTokenRequest) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.Hmac, err = d.Bytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.RenewPeriodMs, err = d.Int64()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
  "func (m *Module) Decode(token string) (*csrfPayload, error) {\n\tobj, err := jose.ParseEncrypted(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := obj.Decrypt(m.decryptionKey)\n\tcsrfPayload := &csrfPayload{}\n\tif err = json.Unmarshal(b, csrfPayload); err != nil {\n\t\treturn nil, err\n\t}\n\tif time.Now().After(csrfPayload.ExpireAfter) {\n\t\treturn nil, errors.New(\"csrf token expired\")\n\t}\n\treturn csrfPayload, nil\n}",
  "func decodeToken(tokenString string) (*jwt.Token, error) {\n\treturn jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\",\n\t\t\t\ttoken.Header[\"alg\"])\n\t\t}\n\n\t\treturn signingSecret, nil\n\t})\n}",
  "func DecodeToken(token string) (name, filter string, err error) {\n\tvar encodedToken []byte\n\tif encodedToken, err = base64.RawURLEncoding.DecodeString(token); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\ttokenByte := make([]byte, base64.RawURLEncoding.DecodedLen(len(encodedToken)))\n\tif _, err = base64.RawURLEncoding.Decode(tokenByte, encodedToken); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tpi := &pb.ListPageIdentifier{}\n\tif err = proto.Unmarshal(tokenByte, pi); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn pi.GetName(), pi.GetFilter(), err\n}",
  "func (t *DescribeDelegationTokenRequest) Decode(d *Decoder, version int16) error {\n\tvar err error\n\t// Owners\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Owners = make([]DescribeDelegationTokenOwner41, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item DescribeDelegationTokenOwner41\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Owners[i] = item\n\t\t}\n\t}\n\treturn err\n}",
  "func decodeRequest(_ context.Context, r *http.Request) (request interface{}, err error) {\n\tdefer r.Body.Close()\n\treturn nil, nil\n}",
  "func decodeToken(encoded string) *oauth2.Token {\n\tt := new(oauth2.Token)\n\tf := strings.Fields(encoded)\n\tif len(f) > 0 {\n\t\tt.AccessToken = f[0]\n\t}\n\tif len(f) > 1 {\n\t\tt.RefreshToken = f[1]\n\t}\n\tif len(f) > 2 && f[2] != \"0\" {\n\t\tsec, err := strconv.ParseInt(f[2], 10, 64)\n\t\tif err == nil {\n\t\t\tt.Expiry = time.Unix(sec, 0)\n\t\t}\n\t}\n\treturn t\n}",
  "func (c *Client) DecodeToken(resp *http.Response) (*Token, error) {\n\tvar decoded Token\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}",
  "func DecodeGRPCBanTokenRequest(_ context.Context, grpcReq interface{}) (interface{}, error) {\n\treq := grpcReq.(*pb.BanTokenReq)\n\treturn req, nil\n}",
  "func decodeDeleteTagRequest(_ context.Context, r interface{}) (interface{}, error) {\n\trq := r.(*pb.DeleteTagRequest)\n\n\treturn endpoint.DeleteTagRequest{\n\t\tName:  rq.Name,\n\t\tToken: rq.Token,\n\t}, nil\n}",
  "func DecodeGRPCGetTokenRequest(_ context.Context, grpcReq interface{}) (interface{}, error) {\n\treq := grpcReq.(*pb.GetTokenReq)\n\treturn req, nil\n}",
  "func decodeHeader(t *Token, s string) error {\n\tb, err := base64.RawURLEncoding.DecodeString(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\theader := make(map[string]interface{})\n\tif err := json.Unmarshal(b, &header); err != nil {\n\t\treturn err\n\t}\n\n\tif v, ok := header[\"typ\"]; ok {\n\t\tif _, ok := v.(string); !ok {\n\t\t\treturn ErrInvalidToken\n\t\t}\n\t\tif _, ok := supportedTypes[Type(v.(string))]; !ok {\n\t\t\treturn ErrUnsupportedTokenType\n\t\t}\n\t\tt.Type = Type(v.(string))\n\t}\n\n\tif v, ok := header[\"alg\"]; ok {\n\t\tif _, ok := v.(string); !ok {\n\t\t\treturn ErrInvalidToken\n\t\t}\n\t\tt.Algorithm = Algorithm(v.(string))\n\t}\n\n\tif v, ok := header[\"kid\"]; ok {\n\t\tif _, ok := v.(string); !ok {\n\t\t\treturn ErrInvalidToken\n\t\t}\n\t\tt.KeyID = v.(string)\n\t}\n\n\treturn nil\n}",
  "func DecodeListenerRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (any, error) {\n\treturn func(r *http.Request) (any, error) {\n\t\tvar (\n\t\t\ttoken string\n\t\t\terr   error\n\t\t)\n\t\ttoken = r.Header.Get(\"Authorization\")\n\t\tif token == \"\" {\n\t\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"token\", \"header\"))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpayload := NewListenerPayload(token)\n\t\tif strings.Contains(payload.Token, \" \") {\n\t\t\t// Remove authorization scheme prefix (e.g. \"Bearer\")\n\t\t\tcred := strings.SplitN(payload.Token, \" \", 2)[1]\n\t\t\tpayload.Token = cred\n\t\t}\n\n\t\treturn payload, nil\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	LoadOffset loads the last offset associated with the given source application key sk. ak is the 'owner' application key. If there is no offset associated with the given source application key, the offset is returned as zero and error as nil. 
 | 
	func (driver) LoadOffset(
	ctx context.Context,
	db *sql.DB,
	ak, sk string,
) (uint64, error) {
	row := db.QueryRowContext(
		ctx,
		`SELECT
			next_offset
		FROM stream_offset
		WHERE app_key = ?
		AND source_app_key = ?`,
		ak,
		sk,
	)
	var o uint64
	err := row.Scan(&o)
	if err == sql.ErrNoRows {
		err = nil
	}
	return o, err
} 
 | 
	[
  "func recoveredDataOffset(chunkFetchOffset uint64, rs modules.ErasureCoder) uint64 {\n\t// If partialDecoding is not available we downloaded the whole sector and\n\t// recovered the whole chunk which means the offset and length are actually\n\t// equal to the chunkFetchOffset and chunkFetchLength.\n\tif !rs.SupportsPartialEncoding() {\n\t\treturn chunkFetchOffset\n\t}\n\t// Else we need to adjust the offset a bit.\n\trecoveredSegmentSize := uint64(rs.MinPieces() * crypto.SegmentSize)\n\treturn chunkFetchOffset % recoveredSegmentSize\n}",
  "func FetchOffset() int {\n\toffset := intFromEnvVar(\"FETCH_OFFSET\", -1)\n\tif offset < 0 {\n\t\tpanic(errors.New(\"FETCH_OFFSET could not be parsed or is a negative number\"))\n\t}\n\treturn offset\n}",
  "func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {\n\tch, err := t.open(f)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer ch.Release()\n\treturn ch.Value().(*table.Reader).OffsetOf(key)\n}",
  "func (r *Reader) findOffset(i int, f *os.File) (dataOffset int64, err error) {\n\tif r.cacheIndex {\n\t\treturn r.indexCache[i], nil\n\t}\n\n\t// We do not have an index that is storing our offsets.\n\toffset := r.header.indexOffset + (int64(i) * entryLen)\n\tif _, err := f.Seek(offset, 0); err != nil {\n\t\treturn 0, fmt.Errorf(\"cannot reach offset for key supplied by the index: %s\", err)\n\t}\n\tif err := binary.Read(f, endian, &dataOffset); err != nil {\n\t\treturn 0, fmt.Errorf(\"cannot read a data offset in index: %s\", err)\n\t}\n\n\treturn dataOffset, nil\n}",
  "func (s *SinceDB) RessourceOffset(ressource string) (int, error) {\n\n\t// If a value not already stored exists\n\tif value, ok := s.offsets.Load(ressource); ok {\n\t\toffset, err := strconv.Atoi(string(value.([]byte)))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn offset, nil\n\t}\n\n\t// Try to find value in storage\n\tv, err := s.options.Storage.Get(ressource, s.options.Identifier)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\toffset, _ := strconv.Atoi(string(v))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn offset, nil\n}",
  "func (c *CodeGenerator) loadAddressOfPreviousRecord(dest string, n int, m int) {\n\tc.emitMove(dest, \"$fp\")\n\tfor i := 0; i < n; i++ { // If we need to go back.\n\t\tc.emitLoadWord(dest, dest, 4) // Load old frame pointer.\n\t}\n\t// Load var at position m.\n\tc.emitSubUnsigned(dest, dest, 4*m)\n}",
  "func (b MockBroker) FetchOffset(request *sarama.OffsetFetchRequest) (*sarama.OffsetFetchResponse, error) {\n\targs := b.Called(request)\n\treturn args.Get(0).(*sarama.OffsetFetchResponse), args.Error(1)\n}",
  "func (m *FlatMemory) LoadAddress(addr uint16) uint16 {\n\tif (addr & 0xff) == 0xff {\n\t\treturn uint16(m.b[addr]) | uint16(m.b[addr-0xff])<<8\n\t}\n\treturn uint16(m.b[addr]) | uint16(m.b[addr+1])<<8\n}",
  "func (kp *KeyPool) loadKey(loadKey *signkeys.PublicKey) (*[signkeys.KeyIDSize]byte, error) {\n\tif kp.Generator.Usage != \"\" && loadKey.Usage != kp.Generator.Usage {\n\t\t// Don't load if usage is a mismatch\n\t\treturn nil, ErrBadUsage\n\t}\n\tif loadKey.Expire < times.Now() {\n\t\t// Don't load expired keys\n\t\treturn nil, ErrExpired\n\t}\n\tif !kp.HasVerifyKey(&loadKey.Signer, true) {\n\t\t// Don't load keys without matching signature\n\t\treturn nil, ErrBadSigner\n\t}\n\tif !loadKey.Verify(&loadKey.Signer) {\n\t\t// Don't load keys without matching signature\n\t\treturn nil, ErrBadSigner\n\t}\n\tif _, exists := kp.keys[loadKey.KeyID]; exists {\n\t\treturn &loadKey.KeyID, ErrExists\n\t}\n\tkp.keys[loadKey.KeyID] = loadKey\n\treturn &loadKey.KeyID, nil\n}",
  "func (f *factory) chooseStartingOffset(topic string, partition int32, offset int64) (int64, error) {\n\tnewestOffset, err := f.kafkaClt.GetOffset(topic, partition, sarama.OffsetNewest)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\toldestOffset, err := f.kafkaClt.GetOffset(topic, partition, sarama.OffsetOldest)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tswitch {\n\tcase offset == sarama.OffsetNewest || offset > newestOffset:\n\t\treturn newestOffset, nil\n\tcase offset == sarama.OffsetOldest || offset < oldestOffset:\n\t\treturn oldestOffset, nil\n\tdefault:\n\t\treturn offset, nil\n\t}\n}",
  "func (m *Ints) Load(addr uint) (int, error) {\n\tif err := m.checkLimit(addr, \"load\"); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif m.PageSize == 0 || len(m.pages) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tpageID := m.findPage(addr)\n\tbase := m.bases[pageID]\n\tpage := m.pages[pageID]\n\tif i := int(addr) - int(base); 0 <= i && i < len(page) {\n\t\treturn page[i], nil\n\t}\n\n\treturn 0, nil\n}",
  "func (q *throughputUsagePerNamespace) offset() *throughputUsageDataPointsRecoder {\n\toldRecorder := q.dpRecorder()\n\tq.dpRecorderv.Store(newThroughputUsageDataPointsRecorder())\n\n\treturn oldRecorder\n}",
  "func (o *GetModerationRulesParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}",
  "func (key *SharedAuthenticationKey) Load(otherKey string) {\n\n\tif len(otherKey) != SharedAuthenticationKeyEncodedSize {\n\t\tpanic(\"Encoded key is not the correct size\")\n\t}\n\n\tdecoded, err := Base64UrlDecode(otherKey)\n\n\tif err != nil {\n\t\tpanic(\"Failed to decode the key:\" + err.Error())\n\t}\n\n\tkey.Copy(decoded[:])\n}",
  "func (a *app) LoadKey(p string) error {\n\tif buf, err := ioutil.ReadFile(p); err != nil {\n\t\tError(\"Unable to find key: %s\", p)\n\t\treturn err\n\t} else {\n\t\ta.key = string(buf)\n\t}\n\n\treturn nil\n}",
  "func getOffset(n string, offset uint32, fb byte) ([4]byte, bool) {\n\tfilesL.RLock()\n\tdefer filesL.RUnlock()\n\n\t/* Get hold of the file */\n\tf, ok := files[n]\n\tif !ok {\n\t\tlog.Panicf(\"no file %q for offset\", n)\n\t}\n\n\t/* Make sure we have enough file */\n\tvar a [4]byte\n\tif uint32(len(f.contents)-1) < offset {\n\t\treturn a, false\n\t}\n\ta[0] = fb\n\tcopy(a[1:], f.contents[offset:])\n\treturn a, true\n}",
  "func (o *AdminGetBannedUsersV3Params) SetOffset(offset *int64) {\n\to.Offset = offset\n}",
  "func LoadRALedgerMarker(bid, lid, raid int64, dt *time.Time) LedgerMarker {\n\tlm := GetRALedgerMarkerOnOrBefore(bid, lid, raid, dt)\n\tif lm.LMID == 0 {\n\t\tlm.LID = lid\n\t\tlm.BID = bid\n\t\tlm.RAID = raid\n\t\tlm.Dt = *dt\n\t\tlm.State = MARKERSTATEORIGIN\n\t\terr := InsertLedgerMarker(&lm)\n\t\tif nil != err {\n\t\t\tfmt.Printf(\"LoadRALedgerMarker: Error creating LedgerMarker: %s\\n\", err.Error())\n\t\t}\n\t}\n\treturn lm\n}",
  "func (o *RetrieveAllUsersByPolicyVersionParams) SetOffset(offset *int32) {\n\to.Offset = offset\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	InsertOffset inserts a new offset associated with the given source application key sk. ak is the 'owner' application key. It returns false if the row already exists. 
 | 
	func (driver) InsertOffset(
	ctx context.Context,
	tx *sql.Tx,
	ak, sk string,
	n uint64,
) (_ bool, err error) {
	defer sqlx.Recover(&err)
	return sqlx.TryExecRow(
		ctx,
		tx,
		`INSERT INTO stream_offset SET
			app_key = ?,
			source_app_key = ?,
			next_offset = ?
		ON DUPLICATE KEY UPDATE
			app_key = app_key`, // do nothing
		ak,
		sk,
		n,
	), nil
} 
 | 
	[
  "func (driver) UpdateOffset(\n\tctx context.Context,\n\ttx *sql.Tx,\n\tak, sk string,\n\tc, n uint64,\n) (_ bool, err error) {\n\tdefer sqlx.Recover(&err)\n\n\treturn sqlx.TryExecRow(\n\t\tctx,\n\t\ttx,\n\t\t`UPDATE stream_offset SET\n\t\t\tnext_offset = ?\n\t\tWHERE app_key = ?\n\t\tAND source_app_key = ?\n\t\tAND next_offset = ?`,\n\t\tn,\n\t\tak,\n\t\tsk,\n\t\tc,\n\t), nil\n}",
  "func (cf *Filter64S) insertAt(idx uint64, fp fingerprintS) bool {\n\tu := cf.buckets[idx]\n\tfor i := 0; i < bucketNumS; i++ {\n\t\ts := uint(i * fpBitsNumS)\n\t\tif u>>s&fpMaskS == 0 {\n\t\t\tcf.buckets[idx] |= bucketS(fp) << s\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
  "func (driver) LoadOffset(\n\tctx context.Context,\n\tdb *sql.DB,\n\tak, sk string,\n) (uint64, error) {\n\trow := db.QueryRowContext(\n\t\tctx,\n\t\t`SELECT\n\t\t\tnext_offset\n\t\tFROM stream_offset\n\t\tWHERE app_key = ?\n\t\tAND source_app_key = ?`,\n\t\tak,\n\t\tsk,\n\t)\n\n\tvar o uint64\n\terr := row.Scan(&o)\n\tif err == sql.ErrNoRows {\n\t\terr = nil\n\t}\n\n\treturn o, err\n}",
  "func (t *strideTable[T]) insert(addr uint8, prefixLen int, val *T) {\n\tidx := prefixIndex(addr, prefixLen)\n\told := t.entries[idx].value\n\toldIdx := t.entries[idx].prefixIndex\n\tif oldIdx == idx && old == val {\n\t\t// This exact prefix+value is already in the table.\n\t\treturn\n\t}\n\tt.allot(idx, oldIdx, idx, val)\n\tif oldIdx != idx {\n\t\t// This route entry was freshly created (not just updated), that's a new\n\t\t// reference.\n\t\tt.refs++\n\t}\n\treturn\n}",
  "func (e *ObservableEditableBuffer) InsertAt(rp0 int, rs []rune) {\n\tp0 := e.f.RuneTuple(rp0)\n\ts, nr := RunesToBytes(rs)\n\n\te.Insert(p0, s, nr)\n}",
  "func (ust *UsersShopTrace) Insert(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\tvar res sql.Result\n\t// if already exist, bail\n\tif ust._exists {\n\t\treturn errors.New(\"insert failed: already exists\")\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetUsersShopTraceTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// sql insert query, primary key provided by autoincrement\n\tsqlstr := `INSERT INTO ` + tableName +\n\t\t` (` +\n\t\t`openid, unionid, appid, uid, fid, sid, updated` +\n\t\t`) VALUES (` +\n\t\t`?, ?, ?, ?, ?, ?, ?` +\n\t\t`)`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, ust.Openid, ust.Unionid, ust.Appid, ust.UID, ust.Fid, ust.Sid, ust.Updated)))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif tx != nil {\n\t\tres, err = tx.Exec(sqlstr, ust.Openid, ust.Unionid, ust.Appid, ust.UID, ust.Fid, ust.Sid, ust.Updated)\n\t} else {\n\t\tres, err = dbConn.Exec(sqlstr, ust.Openid, ust.Unionid, ust.Appid, ust.UID, ust.Fid, ust.Sid, ust.Updated)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// retrieve id\n\tid, err := res.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set primary key and existence\n\tust.ID = int(id)\n\tust._exists = true\n\n\treturn nil\n}",
  "func NewAccessEntryPK(publisher string, salt []byte) (*AccessEntry, error) {\n\tif len(publisher) != 66 {\n\t\treturn nil, fmt.Errorf(\"publisher should be 66 characters long, got %d\", len(publisher))\n\t}\n\tif len(salt) != 32 {\n\t\treturn nil, fmt.Errorf(\"salt should be 32 bytes long\")\n\t}\n\treturn &AccessEntry{\n\t\tType:      AccessTypePK,\n\t\tPublisher: publisher,\n\t\tSalt:      salt,\n\t}, nil\n}",
  "func insert(buf, insert []byte, offset int) []byte {\n\tn := len(insert)\n\tbuf = buf[:len(buf)+n]\n\tcopy(buf[offset+n:], buf[offset:])\n\tcopy(buf[offset:offset+n], insert)\n\treturn buf\n}",
  "func (m sketchMap) insert(ts int64, ck ckey.ContextKey, v float64, sampleRate float64) bool {\n\tif math.IsInf(v, 0) || math.IsNaN(v) {\n\t\treturn false\n\t}\n\n\tm.getOrCreate(ts, ck).Insert(v, sampleRate)\n\treturn true\n}",
  "func existingSAWithOwnerRef(tc *v1alpha1.TektonConfig) bool {\n\ttcLabels := tc.GetLabels()\n\t_, ok := tcLabels[serviceAccountCreationLabel]\n\treturn !ok\n}",
  "func (_Permission *PermissionTransactorSession) Insert(table_name string, addr string) (*types.RawTransaction, error) {\n\treturn _Permission.Contract.Insert(&_Permission.TransactOpts, table_name, addr)\n}",
  "func SliceInsert(sl *[]Ki, k Ki, idx int) {\n\tkl := len(*sl)\n\tif idx < 0 {\n\t\tidx = kl + idx\n\t}\n\tif idx < 0 { // still?\n\t\tidx = 0\n\t}\n\tif idx > kl { // last position allowed for insert\n\t\tidx = kl\n\t}\n\t// this avoids extra garbage collection\n\t*sl = append(*sl, nil)\n\tif idx < kl {\n\t\tcopy((*sl)[idx+1:], (*sl)[idx:kl])\n\t}\n\t(*sl)[idx] = k\n}",
  "func (mr *MockDatabaseMockRecorder) InsertTopicKey(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InsertTopicKey\", reflect.TypeOf((*MockDatabase)(nil).InsertTopicKey), arg0, arg1)\n}",
  "func (s *segmentKeysIndex) add(keyIdx int, key []byte) bool {\n\tif s.numKeys >= s.numIndexableKeys {\n\t\t// All keys that can be indexed already have been,\n\t\t// return false indicating that there's no room for\n\t\t// anymore.\n\t\treturn false\n\t}\n\n\tif len(key) > (len(s.data) - s.numKeyBytes) {\n\t\t// No room for any more keys.\n\t\treturn false\n\t}\n\n\tif keyIdx%(s.hop) != 0 {\n\t\t// Key does not satisfy the hop condition.\n\t\treturn true\n\t}\n\n\ts.offsets[s.numKeys] = uint32(s.numKeyBytes)\n\tcopy(s.data[s.numKeyBytes:], key)\n\ts.numKeys++\n\ts.numKeyBytes += len(key)\n\n\treturn true\n}",
  "func (_Permission *PermissionTransactor) Insert(opts *bind.TransactOpts, table_name string, addr string) (*types.RawTransaction, error) {\n\treturn _Permission.contract.Transact(opts, \"insert\", table_name, addr)\n}",
  "func (s *Segment) isPKExist(pk primaryKey) bool {\n\ts.statLock.Lock()\n\tdefer s.statLock.Unlock()\n\tif s.currentStat != nil && s.currentStat.PkExist(pk) {\n\t\treturn true\n\t}\n\n\tfor _, historyStats := range s.historyStats {\n\t\tif historyStats.PkExist(pk) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
  "func (am AttributeMap) InsertNull(k string) {\n\tif _, existing := am.Get(k); !existing {\n\t\t*am.orig = append(*am.orig, newAttributeKeyValueNull(k))\n\t}\n}",
  "func (dl *DataLocation) RequiresPK() bool {\n\treturn dl.Format == DoltDB\n}",
  "func (c *Cache) incrementOffset(key string, initial, offset, ttl int64) error {\n\tc.client.Do(\"WATCH\", key)\n\n\tif err := c.exists(key); err != nil {\n\t\tc.client.Do(\"MULTI\")\n\t\tdefer c.client.Do(\"EXEC\")\n\t\treturn c.Set(key, encoding.Int64Bytes(initial), ttl)\n\t}\n\n\tgetValue, _, err := c.Get(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tval, ok := encoding.BytesInt64(getValue)\n\n\tif !ok {\n\t\treturn errors.NewEncoding(key)\n\t}\n\n\t// We are watching our key. With using a transaction, we can check that this\n\t// increment doesn't inflect with another concurrent request that might\n\t// happen.\n\tc.client.Do(\"MULTI\")\n\tdefer c.client.Do(\"EXEC\")\n\n\tval += offset\n\tif val < 0 {\n\t\treturn errors.NewValueBelowZero(key)\n\t}\n\n\treturn c.Set(key, encoding.Int64Bytes(val), ttl)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	UpdateOffset updates the offset associated with the given source application key sk. ak is the 'owner' application key. It returns false if the row does not exist or c is not the current offset associated with the given application key. 
 | 
	func (driver) UpdateOffset(
	ctx context.Context,
	tx *sql.Tx,
	ak, sk string,
	c, n uint64,
) (_ bool, err error) {
	defer sqlx.Recover(&err)
	return sqlx.TryExecRow(
		ctx,
		tx,
		`UPDATE stream_offset SET
			next_offset = ?
		WHERE app_key = ?
		AND source_app_key = ?
		AND next_offset = ?`,
		n,
		ak,
		sk,
		c,
	), nil
} 
 | 
	[
  "func (driver) InsertOffset(\n\tctx context.Context,\n\ttx *sql.Tx,\n\tak, sk string,\n\tn uint64,\n) (_ bool, err error) {\n\tdefer sqlx.Recover(&err)\n\n\treturn sqlx.TryExecRow(\n\t\tctx,\n\t\ttx,\n\t\t`INSERT INTO stream_offset SET\n\t\t\tapp_key = ?,\n\t\t\tsource_app_key = ?,\n\t\t\tnext_offset = ?\n\t\tON DUPLICATE KEY UPDATE\n\t\t\tapp_key = app_key`, // do nothing\n\t\tak,\n\t\tsk,\n\t\tn,\n\t), nil\n}",
  "func (oom *OwnerOffsetMoveHelper) Update(key string, offset int, timestamp int64) {\n\t// It works for the current case. ie. concurrent competition only exists in different key.\n\t// It doesn't work for \"concurrent competition exists in the same key at the same time\".\n\tpartitionOffsetMove := oom.GetSyncMap().GetChild(key, protocol.PartitionOffsetMove{}).(protocol.PartitionOffsetMove)\n\n\tpartitionOffsetMove.LastOffset = partitionOffsetMove.CurtOffset\n\tpartitionOffsetMove.LastTimestamp = partitionOffsetMove.CurtTimestamp\n\n\tpartitionOffsetMove.CurtOffset = offset\n\tpartitionOffsetMove.CurtTimestamp = timestamp\n\n\toom.GetSyncMap().PutChild(key, partitionOffsetMove)\n}",
  "func (s skuPKE) updateSK(usk, sk []byte) ([]byte, error) {\n\tprivate := new(big.Int).SetBytes(sk)\n\tuprivate := new(big.Int).SetBytes(usk)\n\n\tupd := new(big.Int).Add(private, uprivate)\n\treturn new(big.Int).Mod(upd, s.curve.Params().N).Bytes(), nil\n}",
  "func updateEntryOffset(file *ast.File, entryOffKey uint32) {\n\t// Note that this field could be renamed in future Go versions.\n\tconst nameOffField = \"nameOff\"\n\tentryOffUpdated := false\n\n\t// During linker stage we encrypt funcInfo.entryoff using a random number and funcInfo.nameOff,\n\t// for correct program functioning we must decrypt funcInfo.entryoff at any access to it.\n\t// In runtime package all references to funcInfo.entryOff are made through one method entry():\n\t// func (f funcInfo) entry() uintptr {\n\t//\treturn f.datap.textAddr(f.entryoff)\n\t// }\n\t// It is enough to inject decryption into entry() method for program to start working transparently with encrypted value of funcInfo.entryOff:\n\t// func (f funcInfo) entry() uintptr {\n\t//\treturn f.datap.textAddr(f.entryoff ^ (uint32(f.nameOff) * <random int>))\n\t// }\n\tupdateEntryOff := func(node ast.Node) bool {\n\t\tcallExpr, ok := node.(*ast.CallExpr)\n\t\tif !ok {\n\t\t\treturn true\n\t\t}\n\n\t\ttextSelExpr, ok := callExpr.Fun.(*ast.SelectorExpr)\n\t\tif !ok || textSelExpr.Sel.Name != \"textAddr\" {\n\t\t\treturn true\n\t\t}\n\n\t\tselExpr, ok := callExpr.Args[0].(*ast.SelectorExpr)\n\t\tif !ok {\n\t\t\treturn true\n\t\t}\n\n\t\tcallExpr.Args[0] = &ast.BinaryExpr{\n\t\t\tX:  selExpr,\n\t\t\tOp: token.XOR,\n\t\t\tY: &ast.ParenExpr{X: &ast.BinaryExpr{\n\t\t\t\tX: ah.CallExpr(ast.NewIdent(\"uint32\"), &ast.SelectorExpr{\n\t\t\t\t\tX:   selExpr.X,\n\t\t\t\t\tSel: ast.NewIdent(nameOffField),\n\t\t\t\t}),\n\t\t\t\tOp: token.MUL,\n\t\t\t\tY: &ast.BasicLit{\n\t\t\t\t\tKind:  token.INT,\n\t\t\t\t\tValue: strconv.FormatUint(uint64(entryOffKey), 10),\n\t\t\t\t},\n\t\t\t}},\n\t\t}\n\t\tentryOffUpdated = true\n\t\treturn false\n\t}\n\n\tvar entryFunc *ast.FuncDecl\n\tfor _, decl := range file.Decls {\n\t\tdecl, ok := decl.(*ast.FuncDecl)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif decl.Name.Name == \"entry\" {\n\t\t\tentryFunc = decl\n\t\t\tbreak\n\t\t}\n\t}\n\tif entryFunc == nil {\n\t\tpanic(\"entry function not found\")\n\t}\n\n\tast.Inspect(entryFunc, updateEntryOff)\n\tif !entryOffUpdated {\n\t\tpanic(\"entryOff not found\")\n\t}\n}",
  "func (c *Cache) incrementOffset(key string, initial, offset, ttl int64) error {\n\tc.client.Do(\"WATCH\", key)\n\n\tif err := c.exists(key); err != nil {\n\t\tc.client.Do(\"MULTI\")\n\t\tdefer c.client.Do(\"EXEC\")\n\t\treturn c.Set(key, encoding.Int64Bytes(initial), ttl)\n\t}\n\n\tgetValue, _, err := c.Get(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tval, ok := encoding.BytesInt64(getValue)\n\n\tif !ok {\n\t\treturn errors.NewEncoding(key)\n\t}\n\n\t// We are watching our key. With using a transaction, we can check that this\n\t// increment doesn't inflect with another concurrent request that might\n\t// happen.\n\tc.client.Do(\"MULTI\")\n\tdefer c.client.Do(\"EXEC\")\n\n\tval += offset\n\tif val < 0 {\n\t\treturn errors.NewValueBelowZero(key)\n\t}\n\n\treturn c.Set(key, encoding.Int64Bytes(val), ttl)\n}",
  "func (mk *MockStore) UpdateSubOffset(name string, offset int64) {\n\n}",
  "func Update(crc uint64, tab *Table, p []byte) uint64 {}",
  "func (o *offsetMap) UpdateOffsetIfBigger(id string, inoffset int64) bool {\n\to.ofLock.RLock()\n\tdefer o.ofLock.RUnlock()\n\n\tif _, ok := o.offsets[id]; ok {\n\t\tif inoffset > o.offsets[id].Offset {\n\t\t\to.offsets[id].Offset = inoffset\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\treturn false\n}",
  "func (s skuPKE) updatePK(upk, pk []byte) ([]byte, error) {\n\tpkx, pky := elliptic.Unmarshal(s.curve, pk)\n\tif pkx == nil {\n\t\treturn nil, errors.New(\"unable to unmarshal sku-PKE public key\")\n\t}\n\tupkx, upky := elliptic.Unmarshal(s.curve, upk)\n\tif upkx == nil {\n\t\treturn nil, errors.New(\"unable to unmarshal sku-PKE update information\")\n\t}\n\n\tx, y := s.curve.Add(pkx, pky, upkx, upky)\n\treturn elliptic.Marshal(s.curve, x, y), nil\n}",
  "func checkMergeAndUpdate(c *Client, opt ReadTreeOptions, origidx map[IndexPath]*IndexEntry, newidx *Index) error {\n\tif opt.Update && opt.Prefix == \"\" && !opt.Merge && !opt.Reset {\n\t\treturn fmt.Errorf(\"-u is meaningless without -m, --reset or --prefix\")\n\t}\n\tif (opt.Prefix != \"\" && (opt.Merge || opt.Reset)) ||\n\t\t(opt.Merge && (opt.Prefix != \"\" || opt.Reset)) ||\n\t\t(opt.Reset && (opt.Prefix != \"\" || opt.Merge)) {\n\t\treturn fmt.Errorf(\"Can only specify one of -u, --reset, or --prefix\")\n\t}\n\n\t// Keep a list of index entries to be updated by CheckoutIndex.\n\tfiles := make([]File, 0, len(newidx.Objects))\n\n\tif opt.Merge {\n\t\t// Verify that merge won't overwrite anything that's been modified locally.\n\t\tfor _, entry := range newidx.Objects {\n\t\t\tif entry.Stage() != Stage0 {\n\t\t\t\t// Don't check unmerged entries. One will always\n\t\t\t\t// conflict, which means that -u won't work\n\t\t\t\t// if we check them.\n\t\t\t\t// (We also don't add them to files, so they won't\n\t\t\t\t// make it to checkoutindex\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\torig, ok := origidx[entry.PathName]\n\t\t\tif !ok {\n\t\t\t\t// If it wasn't in the original index, make sure\n\t\t\t\t// we check it out.\n\t\t\t\tfile, err := entry.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfiles = append(files, file)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif orig.Sha1 == entry.Sha1 {\n\t\t\t\t// Nothing was modified, so don't bother checking\n\t\t\t\t// anything out\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif entry.PathName.IsClean(c, orig.Sha1) {\n\t\t\t\t// it hasn't been modified locally, so we want to\n\t\t\t\t// make sure the newidx version is checked out.\n\t\t\t\tfile, err := entry.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfiles = append(files, file)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\t// There are local unmodified changes on the filesystem\n\t\t\t\t// from the original that would be lost by -u, so return\n\t\t\t\t// an error unless --reset is specified.\n\t\t\t\tif !opt.Reset {\n\t\t\t\t\treturn fmt.Errorf(\"%s has local changes. Can not merge.\", entry.PathName)\n\t\t\t\t} else {\n\t\t\t\t\t// with --reset, checkout the file anyways.\n\t\t\t\t\tfile, err := entry.PathName.FilePath(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tfiles = append(files, file)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif opt.Update || opt.Reset {\n\t\tif err := CheckoutIndexUncommited(c, newidx, CheckoutIndexOptions{Quiet: true, Force: true}, files); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Convert to a map for constant time lookup in our loop..\n\t\tnewidxMap := newidx.GetMap()\n\n\t\t// Before returning, delete anything that was in the old index, removed\n\t\t// from the new index, and hasn't been changed on the filesystem.\n\t\tfor path, entry := range origidx {\n\t\t\tif _, ok := newidxMap[path]; ok {\n\t\t\t\t// It was already handled by checkout-index\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// It was deleted from the new index, but was in the\n\t\t\t// original index, so delete it if it hasn't been\n\t\t\t// changed on the filesystem.\n\t\t\tif path.IsClean(c, entry.Sha1) {\n\t\t\t\tfile, err := path.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Don't error out since we've already\n\t\t\t\t\t// mucked up other stuff, just carry\n\t\t\t\t\t// on to the next file.\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\t\tcontinue\n\n\t\t\t\t}\n\t\t\t\tif err := file.Remove(); err != nil {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}",
  "func (c *KafkaClient) CommitOffset(group string, topic string, partition int32, offset int64) error {\n\tfor i := 0; i <= c.config.CommitOffsetRetries; i++ {\n\t\terr := c.tryCommitOffset(group, topic, partition, offset)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Debugf(\"Failed to commit offset %d for group %s, topic %s, partition %d after %d try: %s\", offset, group, topic, partition, i, err)\n\t\ttime.Sleep(c.config.CommitOffsetBackoff)\n\t}\n\n\treturn fmt.Errorf(\"Could not get commit offset %d for group %s, topic %s, partition %d after %d retries\", offset, group, topic, partition, c.config.CommitOffsetRetries)\n}",
  "func UpdateSourceControlEntry(iq IQ, applicationID, repositoryURL, token string) error {\n\tdoError := func(err error) error {\n\t\treturn fmt.Errorf(\"source control entry not updated for '%s': %v\", applicationID, err)\n\t}\n\n\tappInfo, err := GetApplicationByPublicID(iq, applicationID)\n\tif err != nil {\n\t\treturn doError(err)\n\t}\n\n\trequest, err := json.Marshal(SourceControlEntry{\"\", appInfo.ID, repositoryURL, token})\n\tif err != nil {\n\t\treturn doError(err)\n\t}\n\n\tendpoint := fmt.Sprintf(restSourceControl, appInfo.ID)\n\tif _, _, err = iq.Put(endpoint, bytes.NewBuffer(request)); err != nil {\n\t\treturn doError(err)\n\t}\n\n\treturn nil\n}",
  "func (c *Coordinator) CommitOffset(topic string, partition int32, offset int64) error {\n\tb, err := c.client.Coordinator(c.cfg.GroupID)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// OffsetCommitRequest retention time should be -1 to signify to use the broker default.\n\tvar rt int64 = -1\n\tif c.cfg.RetentionTime.Nanoseconds() != 0 {\n\t\trt = c.cfg.RetentionTime.Nanoseconds() / int64(time.Millisecond)\n\t}\n\treq := &sarama.OffsetCommitRequest{\n\t\tConsumerGroup:           c.cfg.GroupID,\n\t\tConsumerGroupGeneration: c.gid,\n\t\tConsumerID:              c.mid,\n\t\tRetentionTime:           rt,\n\t\tVersion:                 offsetCommitRequestVersion,\n\t}\n\treq.AddBlock(topic, partition, offset, 0, \"\")\n\tresp, err := b.CommitOffset(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// return first error we happen to iterate into (if any).\n\tfor _, topicErrs := range resp.Errors {\n\t\tfor _, partitionErr := range topicErrs {\n\t\t\tif partitionErr == sarama.ErrNoError {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn partitionErr\n\t\t}\n\t}\n\treturn nil\n}",
  "func (t *Testzzz) Update(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif t._deleted {\n\t\treturn errors.New(\"update failed: marked for deletion\")\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetTestzzzTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// sql query\n\tsqlstr := `UPDATE ` + tableName + ` SET ` +\n\t\t`a = ?, b = ?, c = ?` +\n\t\t` WHERE id = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, t.A, t.B, t.C, t.ID)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, t.A, t.B, t.C, t.ID)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, t.A, t.B, t.C, t.ID)\n\t}\n\treturn err\n}",
  "func (ust *UsersShopTrace) Update(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif ust._deleted {\n\t\treturn errors.New(\"update failed: marked for deletion\")\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetUsersShopTraceTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// sql query\n\tsqlstr := `UPDATE ` + tableName + ` SET ` +\n\t\t`openid = ?, unionid = ?, appid = ?, uid = ?, fid = ?, sid = ?, updated = ?` +\n\t\t` WHERE id = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, ust.Openid, ust.Unionid, ust.Appid, ust.UID, ust.Fid, ust.Sid, ust.Updated, ust.ID)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, ust.Openid, ust.Unionid, ust.Appid, ust.UID, ust.Fid, ust.Sid, ust.Updated, ust.ID)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, ust.Openid, ust.Unionid, ust.Appid, ust.UID, ust.Fid, ust.Sid, ust.Updated, ust.ID)\n\t}\n\treturn err\n}",
  "func (o *Source) Update(exec boil.Executor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tkey := makeCacheKey(columns, nil)\n\tsourceUpdateCacheMut.RLock()\n\tcache, cached := sourceUpdateCache[key]\n\tsourceUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tsourceAllColumns,\n\t\t\tsourcePrimaryKeyColumns,\n\t\t)\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"mdbmodels: unable to update sources, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"sources\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, sourcePrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(sourceType, sourceMapping, append(wl, sourcePrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"mdbmodels: unable to update sources row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"mdbmodels: failed to get rows affected by update for sources\")\n\t}\n\n\tif !cached {\n\t\tsourceUpdateCacheMut.Lock()\n\t\tsourceUpdateCache[key] = cache\n\t\tsourceUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, nil\n}",
  "func (a *AweGlobalTunnelIdOffset) Update(client sophos.ClientInterface, options ...sophos.Option) (err error) {\n\treturn put(client, \"/api/nodes/awe.global.tunnel_id_offset\", a.Value, options...)\n}",
  "func (m *Map) Inc(k uint64, d uint64) bool {\n\t// 0 is our value that indicates an empty slot so we can't accept\n\t// it as a valid key\n\tif k == 0 {\n\t\treturn false\n\t}\n\tx := -1\n\ti := 0\n\tfor {\n\t\tx = m.pos(k, i)\n\n\t\t// Is our key here?\n\t\tkexist := atomic.LoadUint64(&m.keys[x])\n\t\tif kexist == k {\n\t\t\tbreak\n\t\t}\n\n\t\t// Is it an empty slot?\n\t\tif kexist == 0 {\n\t\t\t// Try and store key into slot\n\t\t\tif atomic.CompareAndSwapUint64(&m.keys[x], 0, k) {\n\t\t\t\t// Stored key successfully\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Did another updater set the key while we were checking?\n\t\tkexist = atomic.LoadUint64(&m.keys[x])\n\t\tif kexist == k {\n\t\t\tbreak\n\t\t}\n\n\t\ti++\n\t\tif i > maxReprobe {\n\t\t\t// too many attempts to find an empty slot\n\t\t\treturn false\n\t\t}\n\t}\n\n\tatomic.AddUint64(&m.values[x], d)\n\treturn true\n}",
  "func (n *node) casNextNodeOffset(level uint8, old uint32, new uint32) bool {\n\treturn atomic.CompareAndSwapUint32(&n.layers[level], old, new)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	createOffsetSchema creates the schema elements for stream offsets. 
 | 
	func createOffsetSchema(ctx context.Context, db *sql.DB) {
	sqlx.Exec(
		ctx,
		db,
		`CREATE TABLE IF NOT EXISTS stream_offset (
			app_key VARBINARY(255) NOT NULL,
			source_app_key VARBINARY(255) NOT NULL,
			next_offset    BIGINT NOT NULL,
			PRIMARY KEY (app_key, source_app_key)
		) ENGINE=InnoDB`,
	)
} 
 | 
	[
  "func dropOffsetSchema(ctx context.Context, db *sql.DB) {\n\tsqlx.Exec(ctx, db, `DROP TABLE IF EXISTS stream_offset`)\n}",
  "func newSchema(schemaScan []string) (schema isql.CreateSchema, err error) {\n\tif len(schemaScan) < 3 {\n\t\treturn schema, errors.New(\"wrong schema sql\")\n\t}\n\n\tschemaName := tr(schemaScan[2])\n\n\tif schemaName == \"\" {\n\t\terr = errors.New(\"empty schema name\")\n\t}\n\n\tschemas := isql.CreateSchema{}\n\tschemas.Name = schemaName\n\n\treturn schemas, err\n}",
  "func NewSchema(m ...interface{}) *Schema {\n\tif len(m) > 0 {\n\t\tsche := &Schema{}\n\t\tstack := toMiddleware(m)\n\t\tfor _, s := range stack {\n\t\t\t*sche = append(*sche, s)\n\t\t}\n\t\treturn sche\n\t}\n\treturn nil\n}",
  "func (w *writer) createSchema(db *sql.DB) {\n\tstore.Logger.Printf(\"Creating Schema :: %s\", w.schema)\n\tcreateSchema, err := db.Prepare(fmt.Sprintf(\"CREATE SCHEMA IF NOT EXISTS %s\", w.schema))\n\tif err == nil {\n\t\tcreateSchema.Exec() //nolint\n\t}\n}",
  "func New() *types.Schema {\n\ts := &types.Schema{\n\t\tEntryPointNames: make(map[string]string),\n\t\tTypes:           make(map[string]types.NamedType),\n\t\tDirectives:      make(map[string]*types.DirectiveDefinition),\n\t}\n\tm := newMeta()\n\tfor n, t := range m.Types {\n\t\ts.Types[n] = t\n\t}\n\tfor n, d := range m.Directives {\n\t\ts.Directives[n] = d\n\t}\n\treturn s\n}",
  "func NewOffsetStore(path string) cqrs.OffsetStore {\n\tdb, err := bolt.Open(path, 0600, nil)\n\tif err != nil {\n\t\tglog.Fatal(\"Error while opening bolt db\", err)\n\t}\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\tcreateBucket(tx, OFFSET_BUCKET)\n\t\treturn nil\n\t})\n\treturn &BoltOffsetStore{db}\n}",
  "func emitDocsSchema(pkgSpec *schema.PackageSpec, outputPath string) error {\n\tschemaJSON, err := json.MarshalIndent(pkgSpec, \"\", \"    \")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshaling Pulumi schema\")\n\t}\n\n\treturn gen.EmitFile(outputPath, schemaJSON)\n}",
  "func SchemaCreate(w http.ResponseWriter, r *http.Request) {\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Get url path variables\n\turlVars := mux.Vars(r)\n\tschemaName := urlVars[\"schema\"]\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\t// Get project UUID First to use as reference\n\tprojectUUID := gorillaContext.Get(r, \"auth_project_uuid\").(string)\n\n\tschemaUUID := uuid.NewV4().String()\n\n\tschema := schemas.Schema{}\n\n\terr := json.NewDecoder(r.Body).Decode(&schema)\n\tif err != nil {\n\t\terr := APIErrorInvalidArgument(\"Schema\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\tschema, err = schemas.Create(projectUUID, schemaUUID, schemaName, schema.Type, schema.RawSchema, refStr)\n\tif err != nil {\n\t\tif err.Error() == \"exists\" {\n\t\t\terr := APIErrorConflict(\"Schema\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\n\t\t}\n\n\t\tif err.Error() == \"unsupported\" {\n\t\t\terr := APIErrorInvalidData(schemas.UnsupportedSchemaError)\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\n\t\t}\n\n\t\terr := APIErrorInvalidData(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\toutput, _ := json.MarshalIndent(schema, \"\", \" \")\n\trespondOK(w, output)\n}",
  "func setupSchema(cli *cli.Context) error {\n\tparams, err := parseConnectParams(cli)\n\tif err != nil {\n\t\treturn handleErr(schema.NewConfigError(err.Error()))\n\t}\n\tconn, err := newConn(params)\n\tif err != nil {\n\t\treturn handleErr(err)\n\t}\n\tdefer conn.Close()\n\tif err := schema.Setup(cli, conn); err != nil {\n\t\treturn handleErr(err)\n\t}\n\treturn nil\n}",
  "func generateSchema(models map[string]SourceModel, objectName string, fileName string) {\n\tnode := formatHead(buildObject(models, objectName))\n\n\tschema, err := json.MarshalIndent(node, \"\", \"  \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\toutput, err := os.Create(filepath.Join(homeDir(), \"go\", \"src\", \"github.com\", \"twuillemin\", \"kuboxy\", \"docs\", \"json_schemas\", fileName))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tdefer func() {\n\t\terr = output.Close()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}()\n\n\t_, err = output.Write(schema)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}",
  "func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder, fds []int) *encoder {\n\tenc := new(encoder)\n\tenc.out = out\n\tenc.order = order\n\tenc.pos = offset\n\tenc.fds = fds\n\treturn enc\n}",
  "func createSchema() graphql.Schema {\n\tschema, err := graphql.NewSchema(graphql.SchemaConfig{\n\t\tQuery: createQuery(),\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error creating Schema\")\n\t\tpanic(err)\n\t}\n\treturn schema\n}",
  "func SchemaRegister(svc string, cluster string, sdb string, table string, inputType string, output string, version int, formatType string, dst string, createTopic bool) error {\n\tavroSchema, err := schema.ConvertToAvro(&db.Loc{Cluster: cluster, Service: svc, Name: sdb}, table, inputType, formatType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutputSchemaName, err := encoder.GetOutputSchemaName(svc, sdb, table, inputType, output, version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif dst == \"state\" || dst == \"all\" {\n\t\terr = state.InsertSchema(outputSchemaName, formatType, string(avroSchema))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif createTopic {\n\t\ttm := time.Now()\n\t\tc, err := config.Get().GetChangelogTopicName(svc, sdb, table, inputType, \"kafka\", version, tm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = createKafkaTopic(c, inputType, svc, sdb, table)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\to, err := config.Get().GetOutputTopicName(svc, sdb, table, inputType, \"kafka\", version, tm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = createKafkaTopic(o, inputType, svc, sdb, table)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Infof(\"AvroSchema registered for(%v,%v, %v,%v,%v,%v,%v) = %s\", svc, cluster, sdb, table, inputType, output, version, avroSchema)\n\treturn nil\n}",
  "func (ipamStore *ipamStore) CreateSchemaPostProcess() error {\n\tdb := ipamStore.Db\n\tlog.Printf(\"ipamStore.CreateSchemaPostProcess(), DB is %v\", db)\n\tdb.Model(&Endpoint{}).AddUniqueIndex(\"idx_tenant_segment_host_network_id\", \"tenant_id\", \"segment_id\", \"host_id\", \"network_id\")\n\tdb.Model(&Endpoint{}).AddUniqueIndex(\"idx_ip\", \"ip\")\n\terr := common.MakeMultiError(db.GetErrors())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
  "func emitSchema(pkgSpec *pschema.PackageSpec, version, outDir string, goPackageName string) error {\n\tschemaJSON, err := json.MarshalIndent(pkgSpec, \"\", \"    \")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshaling Pulumi schema\")\n\t}\n\n\t// Ensure the spec is stamped with a version.\n\tpkgSpec.Version = version\n\n\tcompressedSchema := bytes.Buffer{}\n\tcompressedWriter := gzip.NewWriter(&compressedSchema)\n\terr = json.NewEncoder(compressedWriter).Encode(pkgSpec)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshaling metadata\")\n\t}\n\tif err = compressedWriter.Close(); err != nil {\n\t\treturn err\n\t}\n\n\terr = emitFile(outDir, \"schema.go\", []byte(fmt.Sprintf(`package %s\nvar pulumiSchema = %#v\n`, goPackageName, compressedSchema.Bytes())))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"saving metadata\")\n\t}\n\n\treturn emitFile(outDir, \"schema.json\", schemaJSON)\n}",
  "func CreateListDataSourceSchemaDatabaseRequest() (request *ListDataSourceSchemaDatabaseRequest) {\n\trequest = &ListDataSourceSchemaDatabaseRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Emr\", \"2016-04-08\", \"ListDataSourceSchemaDatabase\", \"emr\", \"openAPI\")\n\treturn\n}",
  "func StructFromSchema(schema avro.Schema, w io.Writer, cfg Config) error {\n\trec, ok := schema.(*avro.RecordSchema)\n\tif !ok {\n\t\treturn errors.New(\"can only generate Go code from Record Schemas\")\n\t}\n\n\topts := []OptsFunc{\n\t\tWithFullName(cfg.FullName),\n\t\tWithEncoders(cfg.Encoders),\n\t}\n\tg := NewGenerator(strcase.ToSnake(cfg.PackageName), cfg.Tags, opts...)\n\tg.Parse(rec)\n\n\tbuf := &bytes.Buffer{}\n\tif err := g.Write(buf); err != nil {\n\t\treturn err\n\t}\n\n\tformatted, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not format code: %w\", err)\n\t}\n\n\t_, err = w.Write(formatted)\n\treturn err\n}",
  "func payloadFromSchema(schema *arrow.Schema, mem memory.Allocator, memo *dictutils.Mapper) payloads {\n\tps := make(payloads, 1)\n\tps[0].msg = MessageSchema\n\tps[0].meta = writeSchemaMessage(schema, mem, memo)\n\n\treturn ps\n}",
  "func (svc *ServiceDefinition) createSchemas() *brokerapi.ServiceSchemas {\n\treturn &brokerapi.ServiceSchemas{\n\t\tInstance: brokerapi.ServiceInstanceSchema{\n\t\t\tCreate: brokerapi.Schema{\n\t\t\t\tParameters: createJsonSchema(svc.ProvisionInputVariables),\n\t\t\t},\n\t\t},\n\t\tBinding: brokerapi.ServiceBindingSchema{\n\t\t\tCreate: brokerapi.Schema{\n\t\t\t\tParameters: createJsonSchema(svc.BindInputVariables),\n\t\t\t},\n\t\t},\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	dropOffsetSchema drops the schema elements for stream offsets. 
 | 
	func dropOffsetSchema(ctx context.Context, db *sql.DB) {
	sqlx.Exec(ctx, db, `DROP TABLE IF EXISTS stream_offset`)
} 
 | 
	[
  "func (t *Telly) DropOffsets() error {\n\t_, err := t.metaTable().Get(t.offsetKey()).Delete().RunWrite(t.Executor())\n\treturn err\n}",
  "func createOffsetSchema(ctx context.Context, db *sql.DB) {\n\tsqlx.Exec(\n\t\tctx,\n\t\tdb,\n\t\t`CREATE TABLE IF NOT EXISTS stream_offset (\n\t\t\tapp_key VARBINARY(255) NOT NULL,\n\t\t\tsource_app_key VARBINARY(255) NOT NULL,\n\t\t\tnext_offset    BIGINT NOT NULL,\n\n\t\t\tPRIMARY KEY (app_key, source_app_key)\n\t\t) ENGINE=InnoDB`,\n\t)\n}",
  "func dropProcessSchema(ctx context.Context, db *sql.DB) {\n\tsqlx.Exec(ctx, db, `DROP TABLE IF EXISTS process_instance`)\n}",
  "func (sb *SchemaBuilder) Drop() string {\n\treturn fmt.Sprintf(`DROP SCHEMA %v`, sb.QualifiedName())\n}",
  "func DropSchema(ctx context.Context, db Execer, schema string) error {\n\t_, err := db.ExecContext(ctx, `DROP SCHEMA `+QuoteSchema(schema)+` CASCADE;`)\n\treturn err\n}",
  "func DropSchema(db *pg.DB) error {\n\tfor _, model := range []interface{}{\n\t\t&User{},\n\t\t&Message{},\n\t\t&Connection{},\n\t\t&Resource{},\n\t\t&Comment{},\n\t\t&Collection{},\n\t\t&Tag{},\n\t\t&ResourceTag{},\n\t\t&CollectionTag{},\n\t\t&UserConnection{},\n\t\t&Recommendation{},\n\t\t&ResourceCollection{},\n\t} {\n\t\tif err := db.DropTable(\n\t\t\tmodel,\n\t\t\t&orm.DropTableOptions{IfExists: true, Cascade: true},\n\t\t); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
  "func (o *TransferEventListRequest) UnsetOffset() {\n\to.Offset.Unset()\n}",
  "func (adapter *PostgresAdapter) DropSchemaQuery() string {\n\treturn fmt.Sprintf(\"DROP SCHEMA %s CASCADE;\", adapter.Schema)\n}",
  "func DropSchema(db *sql.DB, schema string) error {\n\t_, err := db.Exec(fmt.Sprintf(\"drop schema %s cascade\", strconv.Quote(schema)))\n\treturn err\n}",
  "func (query *Query) CleanOffset() *Query {\n\treturn query.clean(OFFSET)\n}",
  "func TestDropSchema(cfg DBConfig, schema string) error {\n\tpoolcfg := pgx.ConnPoolConfig{\n\t\tConnConfig: pgx.ConnConfig{\n\t\t\tHost:     \"localhost\",\n\t\t\tUser:     cfg.User,\n\t\t\tDatabase: cfg.DBName,\n\t\t\tPort:     5432,\n\t\t},\n\t}\n\tdb, err := pgx.NewConnPool(poolcfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t_, err = db.Exec(fmt.Sprintf(\"DROP SCHEMA %s CASCADE\", schema))\n\tif err != nil {\n\t\tlog.Printf(\"failed to create test schema: %s\", schema)\n\t\treturn err\n\t}\n\treturn nil\n}",
  "func (c ViewSchema) Drop() {\n\tvar quoteRegex = regexp.MustCompile(`(^\\w*)(\\.)(\\w*)`)\n\tfmt.Printf(\"DROP VIEW %s;\\n\\n\", quoteRegex.ReplaceAllString(c.get(\"viewname\"), `\"$1\"$2\"$3\"`))\n}",
  "func destroyTestSchema() {\n\tdsn := os.Getenv(\"GOSPEL_MARIADB_DSN\")\n\tif dsn == \"\" {\n\t\tdsn = gospelmaria.DefaultDSN\n\t}\n\n\tcfg, err := mysql.ParseDSN(dsn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer db.Close()\n\n\t_, err = db.Exec(`DROP SCHEMA ` + cfg.DBName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t_, err = db.Exec(`CREATE SCHEMA IF NOT EXISTS ` + cfg.DBName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
  "func DropCols(data Dstream, dropvars ...string) Dstream {\n\td := &drop{\n\t\txform: xform{\n\t\t\tsource: data,\n\t\t},\n\t\tdropVars: dropvars,\n\t}\n\td.init()\n\treturn d\n}",
  "func DestroySchema(tx *sql.Tx) error {\n\tfor i := len(allSQL) - 1; i >= 0; i-- {\n\t\tif _, err := tx.Exec(allSQL[i].DropSQL()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
  "func dropLockSchema(ctx context.Context, db *sql.DB) {\n\tsqlx.Exec(ctx, db, `DROP TABLE IF EXISTS app_lock`)\n}",
  "func (m *Migrator) MigrateSchemaDownFully(ctx context.Context, schema string) error {\n\ttx, deferFunc, err := m.migratePreamble(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer deferFunc(err)\n\terr = m.migrateDownFully(ctx, schema, false, tx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tx.Commit(ctx)\n\treturn err\n}",
  "func RemoveSchema(connDetail ConnectionDetails, schemaName string) error {\n\n\tvar db *sql.DB\n\tvar err error\n\n\tif db, err = connect(connDetail); err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t_, err = db.Exec(fmt.Sprintf(\"DROP SCHEMA IF EXISTS %s CASCADE;\", schemaName))\n\treturn err\n}",
  "func ClearSchema(p interface{}) {\n\tschema := p.(*Schema)\n\tschema.Indices = make(map[string]*Index)\n\n\ttemp2 := schema.Triggers\n\tschema.Triggers = make(map[string]*Trigger)\n\tfor _, t := range temp2 {\n\t\t(*sqlite3)(nil).DeleteTrigger(t)\n\t}\n\n\ttemp1 := schema.Tables\n\tschema.Tables = make(map[string]*Table)\n\tfor _, t := range temp1 {\n\t\t(*sqlite3)(nil).DeleteTable(0, t)\n\t}\n\n\tschema.ForeignKeys = make(map[string]*ForeignKey)\n\tschema.pSeqTab = nil\n\tif schema.flags & DB_SchemaLoaded {\n\t\tschema.iGeneration++\n\t\tschema.flags &= ~DB_SchemaLoaded\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Deprecated: Use SumSaleValuesRequest.ProtoReflect.Descriptor instead. 
 | 
	func (*SumSaleValuesRequest) Descriptor() ([]byte, []int) {
	return file_sale_proto_rawDescGZIP(), []int{0}
} 
 | 
	[
  "func (*SumSaleValuesResponse) Descriptor() ([]byte, []int) {\n\treturn file_sale_proto_rawDescGZIP(), []int{1}\n}",
  "func (*SumRequest) Descriptor() ([]byte, []int) {\n\treturn file_calculator_calculatorpb_calculator_proto_rawDescGZIP(), []int{0}\n}",
  "func (*SumRequest) Descriptor() ([]byte, []int) {\n\treturn file_calculatorpb_calculator_proto_rawDescGZIP(), []int{0}\n}",
  "func (*GetSaleOrderRequest) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{0}\n}",
  "func (*GetSaleOrdersRequest) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{2}\n}",
  "func (*CreateSaleOrderRequest) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{4}\n}",
  "func (*MetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{11}\n}",
  "func (*PriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_grpc_examples_wallet_stats_stats_proto_rawDescGZIP(), []int{0}\n}",
  "func (*DeviceDecommissioningStreamRequest) Descriptor() ([]byte, []int) {\n\treturn file_arista_inventory_v1_services_gen_proto_rawDescGZIP(), []int{6}\n}",
  "func (*SVRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_drkey_mgmt_v1_mgmt_proto_rawDescGZIP(), []int{0}\n}",
  "func (*MetricsServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{18}\n}",
  "func (*ServicesDistinctValuesReq) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{1}\n}",
  "func (*ValuesRequest) Descriptor() ([]byte, []int) {\n\treturn file_parca_query_v1alpha1_query_proto_rawDescGZIP(), []int{28}\n}",
  "func (*StreamingReadFeatureValuesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_featurestore_online_service_proto_rawDescGZIP(), []int{5}\n}",
  "func (*LinkedPropertyValuesRequest) Descriptor() ([]byte, []int) {\n\treturn file_v1_property_values_proto_rawDescGZIP(), []int{5}\n}",
  "func (*ListNodeMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_metrics_services_proto_rawDescGZIP(), []int{14}\n}",
  "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{0}\n}",
  "func (*ReadFeatureValuesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_featurestore_online_service_proto_rawDescGZIP(), []int{3}\n}",
  "func (RequestPnLPositionUpdates_Request) EnumDescriptor() ([]byte, []int) {\n\treturn file_request_pnl_position_updates_proto_rawDescGZIP(), []int{0, 0}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Deprecated: Use SumSaleValuesResponse.ProtoReflect.Descriptor instead. 
 | 
	func (*SumSaleValuesResponse) Descriptor() ([]byte, []int) {
	return file_sale_proto_rawDescGZIP(), []int{1}
} 
 | 
	[
  "func (*SumSaleValuesRequest) Descriptor() ([]byte, []int) {\n\treturn file_sale_proto_rawDescGZIP(), []int{0}\n}",
  "func (*ItemSaleDayStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_itemSaleStatsService_proto_rawDescGZIP(), []int{3}\n}",
  "func (*GetOnlineFeaturesResponse_FieldValues) Descriptor() ([]byte, []int) {\n\treturn file_feast_serving_ServingService_proto_rawDescGZIP(), []int{6, 0}\n}",
  "func (*ItemSaleStatsInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_itemSaleStatsService_proto_rawDescGZIP(), []int{5}\n}",
  "func (*ItemSaleStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_itemSaleStatsService_proto_rawDescGZIP(), []int{1}\n}",
  "func (*GetSaleOrderResponse) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{1}\n}",
  "func (*GetSaleOrdersResponse) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{3}\n}",
  "func (*ItemSaleDayStats) Descriptor() ([]byte, []int) {\n\treturn file_itemSaleStatsService_proto_rawDescGZIP(), []int{2}\n}",
  "func (*ItemSaleStatsInfo) Descriptor() ([]byte, []int) {\n\treturn file_itemSaleStatsService_proto_rawDescGZIP(), []int{4}\n}",
  "func (*ItemSaleStats) Descriptor() ([]byte, []int) {\n\treturn file_itemSaleStatsService_proto_rawDescGZIP(), []int{0}\n}",
  "func (*CreateSaleOrderResponse) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{5}\n}",
  "func (ThirdPartyPaymentType) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_proto_global_Global_proto_rawDescGZIP(), []int{19}\n}",
  "func (*GetSaleOrderRequest) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{0}\n}",
  "func (*MetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{11}\n}",
  "func (*SVResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_drkey_mgmt_v1_mgmt_proto_rawDescGZIP(), []int{1}\n}",
  "func (*DeviceDecommissioningStreamResponse) Descriptor() ([]byte, []int) {\n\treturn file_arista_inventory_v1_services_gen_proto_rawDescGZIP(), []int{7}\n}",
  "func (*GetSaleOrdersRequest) Descriptor() ([]byte, []int) {\n\treturn file_sandbox_sales_v1_proto_rawDescGZIP(), []int{2}\n}",
  "func (*ListNodeMetricsResponse) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_metrics_services_proto_rawDescGZIP(), []int{15}\n}",
  "func (*SVRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_drkey_mgmt_v1_mgmt_proto_rawDescGZIP(), []int{0}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	Send sends the webpush request with the push subscription. 
 | 
	func (c *PushServiceClient) Send(subscription *pb.PushSubscription, request *pb.WebpushRequest) (*WebpushResponse, error) {
	content, err := c.encrypt(subscription, request)
	if err != nil {
		return nil, err
	}
	req, err := http.NewRequest("POST", subscription.Endpoint, bytes.NewReader(content))
	if err != nil {
		return nil, err
	}
	req.Header.Add("TTL", "30")
	req.Header.Add("Content-Encoding", "aes128gcm")
	subject := "mailto:nokamoto.engr@gmail.com"
	expiry := time.Now().Add(12 * time.Hour).Unix()
	addAuthorizationHeader(req, subscription.Endpoint, subject, expiry, c.KeyPair)
	res, err := c.Client.Do(req)
	if err != nil {
		return nil, err
	}
	defer res.Body.Close()
	b, err := ioutil.ReadAll(res.Body)
	if err != nil {
		return nil, err
	}
	retryAfter, err := parseRetryAfter(time.Now(), &res.Header)
	if err != nil {
		// todo
	}
	wr := &WebpushResponse{
		Status:     res.Status,
		StatusCode: res.StatusCode,
		Text:       string(b),
		RetryAfter: retryAfter,
	}
	return wr, nil
} 
 | 
	[
  "func PushToWeb(req PushNotification) bool {\n\tLogAccess.Debug(\"Start push notification for Web\")\n\n\tvar retryCount = 0\n\tvar maxRetry = PushConf.Web.MaxRetry\n\n\tif req.Retry > 0 && req.Retry < maxRetry {\n\t\tmaxRetry = req.Retry\n\t}\n\n\t// check message\n\terr := CheckMessage(req)\n\n\tif err != nil {\n\t\tLogError.Error(\"request error: \" + err.Error())\n\t\treturn false\n\t}\n\n\tvar apiKey = PushConf.Web.APIKey\n\tif req.APIKey != \"\" {\n\t\tapiKey = req.APIKey\n\t}\n\nRetry:\n\tvar isError = false\n\n\tsuccessCount := 0\n\tfailureCount := 0\n\n\tfor _, subscription := range req.Subscriptions {\n\t\tnotification := getWebNotification(req, &subscription)\n\t\tresponse, err := WebClient.Push(notification, apiKey)\n\n\t\tif err != nil {\n\t\t\tisError = true\n\t\t\tfailureCount++\n\t\t\tLogPush(FailedPush, subscription.Endpoint, req, err)\n\t\t\tif PushConf.Core.Sync {\n\t\t\t\tif response == nil {\n\t\t\t\t\treq.AddLog(getLogPushEntry(FailedPush, subscription.Endpoint, req, err))\n\t\t\t\t} else {\n\t\t\t\t\tvar errorText = response.Body\n\t\t\t\t\t/*var browser web.Browser\n\t\t\t\t\tvar found = false\n\t\t\t\t\tfor _, current := range web.Browsers {\n\t\t\t\t\t\tif current.ReDetect.FindString(subscription.Endpoint) != \"\" {\n\t\t\t\t\t\t\tbrowser = current\n\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif found {\n\t\t\t\t\t\tmatch := browser.ReError.FindStringSubmatch(errorText)\n\t\t\t\t\t\tif match != nil && len(match) > 1 && match[1] != \"\" {\n\t\t\t\t\t\t\terrorText = match[1]\n\t\t\t\t\t\t}\n\t\t\t\t\t}*/\n\t\t\t\t\terrorText = strconv.Itoa(response.StatusCode)\n\t\t\t\t\tvar errorObj = errors.New(errorText)\n\t\t\t\t\treq.AddLog(getLogPushEntry(FailedPush, subscription.Endpoint, req, errorObj))\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tisError = false\n\t\t\tsuccessCount++\n\t\t\tLogPush(SucceededPush, subscription.Endpoint, req, nil)\n\t\t}\n\t}\n\n\tLogAccess.Debug(fmt.Sprintf(\"Web Success count: %d, Failure count: %d\", successCount, failureCount))\n\tStatStorage.AddWebSuccess(int64(successCount))\n\tStatStorage.AddWebError(int64(failureCount))\n\n\tif isError && retryCount < maxRetry {\n\t\tretryCount++\n\n\t\tgoto Retry\n\t}\n\n\treturn isError\n}",
  "func sendPush(apiKey string, name string, url string, newStatus string, oldStatus string) {\n\tlogging.MustGetLogger(\"\").Debug(\"Sending Push about \\\"\" + url + \"\\\"...\")\n\n\tpb := pushbullet.New(apiKey)\n\n\tpush := requests.NewLink()\n\tpush.Title = GetConfiguration().Application.Title + \" - Status Change\"\n\tpush.Body = name + \" went from \\\"\" + oldStatus + \"\\\" to \\\"\" + newStatus + \"\\\".\"\n\tpush.Url = url\n\n\t_, err := pb.PostPushesLink(push)\n\tif err != nil {\n\t\tlogging.MustGetLogger(\"\").Error(\"Unable to send Push: \", err)\n\t}\n}",
  "func (h *HitBTC) wsSend(data interface{}) error {\n\th.wsRequestMtx.Lock()\n\tdefer h.wsRequestMtx.Unlock()\n\tjson, err := common.JSONEncode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif h.Verbose {\n\t\tlog.Debugf(\"%v sending message to websocket %v\", h.Name, data)\n\t}\n\treturn h.WebsocketConn.WriteMessage(websocket.TextMessage, json)\n}",
  "func (session *session) webSocketSend(response configs.WsMessage) error {\n\tsession.writeMutex.Lock()\n\tdefer session.writeMutex.Unlock()\n\tresponse.Timestamp = time.Now().UnixNano() / 1000000\n\tresponse.SessionID = session.sessionID\n\n\treturn session.ws.WriteJSON(response)\n}",
  "func SendWebhook(wh data.WebhookServices, event string, data interface{}) {\n\theaders := make(map[string]string)\n\theaders[\"X-Webhook-Event\"] = event\n\n\tsubscribers, err := wh.AllSubscriptions(event)\n\tif err != nil {\n\t\tlog.Println(\"unable to get webhook subscribers for \", event)\n\t\treturn\n\t}\n\n\tfor _, sub := range subscribers {\n\t\tgo func(sub model.Webhook, headers map[string]string) {\n\t\t\tif err := post(sub.TargetURL, data, nil, headers); err != nil {\n\t\t\t\tlog.Println(\"error calling URL\", sub.TargetURL, err)\n\t\t\t}\n\t\t}(sub, headers)\n\t}\n}",
  "func (cli *Client) Push(c context.Context, p *Payload) error {\n\treturn cli.do(http.MethodPost, \"/push\", p)\n}",
  "func PushToHuawei(req *PushNotification, cfg *config.ConfYaml) (resp *ResponsePush, err error) {\n\tlogx.LogAccess.Debug(\"Start push notification for Huawei\")\n\n\tvar (\n\t\tclient     *client.HMSClient\n\t\tretryCount = 0\n\t\tmaxRetry   = cfg.Huawei.MaxRetry\n\t)\n\n\tif req.Retry > 0 && req.Retry < maxRetry {\n\t\tmaxRetry = req.Retry\n\t}\n\n\t// check message\n\terr = CheckMessage(req)\n\tif err != nil {\n\t\tlogx.LogError.Error(\"request error: \" + err.Error())\n\t\treturn nil, err\n\t}\n\n\tclient, err = InitHMSClient(cfg, cfg.Huawei.AppSecret, cfg.Huawei.AppID)\n\n\tif err != nil {\n\t\t// HMS server error\n\t\tlogx.LogError.Error(\"HMS server error: \" + err.Error())\n\t\treturn nil, err\n\t}\n\n\tresp = &ResponsePush{}\n\nRetry:\n\tisError := false\n\n\tnotification, _ := GetHuaweiNotification(req)\n\n\tres, err := client.SendMessage(context.Background(), notification)\n\tif err != nil {\n\t\t// Send Message error\n\t\terrLog := logPush(cfg, core.FailedPush, req.To, req, err)\n\t\tresp.Logs = append(resp.Logs, errLog)\n\t\tlogx.LogError.Error(\"HMS server send message error: \" + err.Error())\n\t\treturn resp, err\n\t}\n\n\t// Huawei Push Send API does not support exact results for each token\n\tif res.Code == \"80000000\" {\n\t\tstatus.StatStorage.AddHuaweiSuccess(int64(1))\n\t\tlogx.LogAccess.Debug(\"Huwaei Send Notification is completed successfully!\")\n\t} else {\n\t\tisError = true\n\t\tstatus.StatStorage.AddHuaweiError(int64(1))\n\t\tlogx.LogAccess.Debug(\"Huawei Send Notification is failed! Code: \" + res.Code)\n\t}\n\n\tif isError && retryCount < maxRetry {\n\t\tretryCount++\n\n\t\t// resend all tokens\n\t\tgoto Retry\n\t}\n\n\treturn resp, nil\n}",
  "func (s UniqushSubscriber) Push(message string) (response string, err error) {\n\n\tendpointUrl := fmt.Sprintf(\"%s/push\", s.UniqushService.UniqushClient.UniqushURL)\n\tformValues := url.Values{\n\t\t\"service\":    {s.UniqushService.Name},\n\t\t\"subscriber\": {s.Name},\n\t\t\"msg\":        {message},\n\t}\n\n\tresp, err := http.PostForm(endpointUrl, formValues)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error in form POST sending push to: %+v. Error: %v\", s, err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error reaading response body sending push to: %+v. Error: %v\", s, err)\n\t}\n\n\treturn string(body), nil\n\n}",
  "func push(w http.ResponseWriter, r *http.Request) {\n\tfun := \"rest.push\"\n\t//debug_show_request(r)\n\tif r.Method != \"POST\" {\n\t\twriteRestErr(w, \"method err\")\n\t\treturn\n\t}\n\n\tslog.Infof(\"%s %s\", fun, r.URL.Path)\n\tpath := strings.Split(r.URL.Path, \"/\")\n\t//slog.Info(\"%q\", path)\n\n\tif len(path) != 5 {\n\t\twriteRestErr(w, \"uri err\")\n\t\treturn\n\t}\n\n\t// path[0] \"\", path[1] push\n\tclientid := path[2]\n\n\tziptype, err := strconv.Atoi(path[3])\n\tif err != nil {\n\t\twriteRestErr(w, \"ziptype err\")\n\t\treturn\n\t}\n\n\tdatatype, err := strconv.Atoi(path[4])\n\tif err != nil {\n\t\twriteRestErr(w, \"datatype err\")\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body);\n\tif err != nil {\n\t\twriteRestErr(w, \"data err\")\n\t\treturn\n\t}\n\n\tif len(data) == 0 {\n\t\twriteRestErr(w, \"data empty\")\n\t\treturn\n\t}\n\n\n\tmsgid, link := connman.Send(clientid, int32(ziptype), int32(datatype), data)\n\tslog.Debugf(\"%s msgid:%d link:%s\", fun, msgid, link)\n\tjs, _ := json.Marshal(&RestReturn{Code: 0, Msgid: msgid, Link: link})\n\tfmt.Fprintf(w, \"%s\", js)\n\n\n}",
  "func (c *Client)Push(data string) error{\n\treturn c.con.SendData([]byte(data))\n}",
  "func (r PutSubscriptionFilterRequest) Send(ctx context.Context) (*PutSubscriptionFilterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &PutSubscriptionFilterResponse{\n\t\tPutSubscriptionFilterOutput: r.Request.Data.(*PutSubscriptionFilterOutput),\n\t\tresponse:                    &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}",
  "func (gs *GRPCClient) SendPush(userID string, frontendSv *Server, push *protos.Push) error {\n\tvar svID string\n\tvar err error\n\tif frontendSv.ID != \"\" {\n\t\tsvID = frontendSv.ID\n\t} else {\n\t\tif gs.bindingStorage == nil {\n\t\t\treturn constants.ErrNoBindingStorageModule\n\t\t}\n\t\tsvID, err = gs.bindingStorage.GetUserFrontendID(userID, frontendSv.Type)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif c, ok := gs.clientMap.Load(svID); ok {\n\t\tctxT, done := context.WithTimeout(context.Background(), gs.reqTimeout)\n\t\tdefer done()\n\t\terr := c.(*grpcClient).pushToUser(ctxT, push)\n\t\treturn err\n\t}\n\treturn constants.ErrNoConnectionToServer\n}",
  "func (c *Client) Push(n *Notification, signer RequestSigner, ctx context.Context, callback chan<- *Result) error {\n\tc.mu.RLock()\n\tstate := c.state\n\tc.mu.RUnlock()\n\tif state < stateStarting || state > stateRunning {\n\t\treturn ErrClientNotRunning\n\t}\n\t// Ensure that authentication is possible\n\tif c.Certificate == nil && (signer == NoSigner || !c.HasSigner() && signer == DefaultSigner) {\n\t\treturn ErrMissingAuth\n\t}\n\t// Everything else is done asynchronously\n\treq := &Request{\n\t\tNotification: n,\n\t\tSigner:       signer,\n\t\tContext:      ctx,\n\t\tCallback:     callback,\n\t}\n\terr := c.submit(req)\n\treturn err\n}",
  "func SendPushNotification(userID uint32, p PushNotification) error {\n\n\tvar l = logger.WithFields(logrus.Fields{\n\t\t\"method\":        \"Sending push notification\",\n\t\t\"param_data\":    p,\n\t\t\"param_user_id\": userID,\n\t})\n\n\tl.Infof(\"Sending push notifications to the users\")\n\n\tdb := getDB()\n\n\tif p.LogoUrl == \"\" {\n\t\tp.LogoUrl = fmt.Sprintf(\"/public/src/images/dalalfavicon.png\")\n\t}\n\n\tvar subscriptions []UserSubscription\n\n\tif userID == 0 {\n\t\t// broadcast notif\n\t\tl.Infof(\"A broadcast notification was requested\")\n\t\tif err := db.Table(\"UserSubscription\").Find(&subscriptions).Error; err != nil {\n\t\t\tl.Errorf(\"Error while finding the user subscription, %+v\", err)\n\t\t\treturn err\n\t\t}\n\t\tl.Debugf(\"Found a total of %v subscriptions were found\", len(subscriptions))\n\t} else {\n\t\t// single notif\n\t\tl.Infof(\"Notification for a specific user was requested\")\n\t\tif err := db.Table(\"UserSubscription\").Where(\"userId = ?\", userID).Find(&subscriptions).Error; err != nil {\n\t\t\tl.Errorf(\"Error while finding the user subscription, %+v\", err)\n\t\t\treturn err\n\t\t}\n\t\tl.Debugf(\"Found a total of %v subscriptions for the user\", len(subscriptions))\n\t}\n\n\tfor i, sub := range subscriptions {\n\t\tl.Debugf(\"Sending notif to the %v-th subscription, %+v\", i, sub)\n\t\tmessage, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tl.Errorf(\"Error while marshalling payload, %+v . Error, %+v\", p, err)\n\t\t}\n\t\tresp, err := sendPushNotification(message, &sub, &options{\n\t\t\tSubscriber:      config.PushNotificationEmail,\n\t\t\tVAPIDPublicKey:  config.PushNotificationVAPIDPublicKey,\n\t\t\tVAPIDPrivateKey: config.PushNotificationVAPIDPrivateKey,\n\t\t})\n\t\tif err != nil {\n\t\t\tl.Errorf(\"Couldn't send notification to the subscription, %+v. Error : %+v\", sub, err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t}\n\tl.Infof(\"Successfully sent push notification to the user\")\n\n\treturn nil\n}",
  "func publish(r *http.Request) {\n\tr.ParseForm()\n\tmessage := r.Form.Get(\"message\")\n\thubURL := r.Form.Get(\"hub.url\")\n\tmessageJSON := []byte(`{\"message\":\"` + message + `\"}`)\n\n\tfor cb, sub := range subscriptions {\n\t\tif sub.topic == hubURL {\n\t\t\tsignature := encryptHMAC(messageJSON, sub.secret)\n\t\t\treq, err := http.NewRequest(\"POST\", cb, bytes.NewBuffer(messageJSON))\n\t\t\treq.Header.Set(\"X-Hub-Signature\", \"sha256=\"+signature)\n\t\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\t\t\tclient := &http.Client{}\n\t\t\tresp, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"client: \"+cb, err)\n\t\t\t\tfmt.Println(\"client: \", resp.Status)\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t}\n}",
  "func SubModPush(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\tsubName := urlVars[\"subscription\"]\n\n\t// Get project UUID First to use as reference\n\tprojectUUID := gorillaContext.Get(r, \"auth_project_uuid\").(string)\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\t// Read POST JSON body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terr := APIErrorInvalidRequestBody()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Parse pull options\n\tpostBody, err := subscriptions.GetFromJSON(body)\n\tif err != nil {\n\t\tAPIErrorInvalidArgument(\"Subscription\")\n\t\tlog.Error(string(body[:]))\n\t\treturn\n\t}\n\n\tpushEnd := \"\"\n\trPolicy := \"\"\n\trPeriod := 0\n\tvhash := \"\"\n\tverified := false\n\tmaxMessages := int64(0)\n\tpushWorker := auth.User{}\n\tpwToken := gorillaContext.Get(r, \"push_worker_token\").(string)\n\n\tif postBody.PushCfg != (subscriptions.PushConfig{}) {\n\n\t\tpushEnabled := gorillaContext.Get(r, \"push_enabled\").(bool)\n\n\t\t// check the state of the push functionality\n\t\tif !pushEnabled {\n\t\t\terr := APIErrorPushConflict()\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tpushWorker, err = auth.GetPushWorker(pwToken, refStr)\n\t\tif err != nil {\n\t\t\terr := APIErrInternalPush()\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tpushEnd = postBody.PushCfg.Pend\n\t\t// Check if push endpoint is not a valid https:// endpoint\n\t\tif !(isValidHTTPS(pushEnd)) {\n\t\t\terr := APIErrorInvalidData(\"Push endpoint should be addressed by a valid https url\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t\trPolicy = postBody.PushCfg.RetPol.PolicyType\n\t\trPeriod = postBody.PushCfg.RetPol.Period\n\t\tmaxMessages = postBody.PushCfg.MaxMessages\n\n\t\tif rPolicy == \"\" {\n\t\t\trPolicy = subscriptions.LinearRetryPolicyType\n\t\t}\n\t\tif rPeriod <= 0 {\n\t\t\trPeriod = 3000\n\t\t}\n\n\t\tif !subscriptions.IsRetryPolicySupported(rPolicy) {\n\t\t\terr := APIErrorInvalidData(subscriptions.UnSupportedRetryPolicyError)\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Get Result Object\n\tres, err := subscriptions.Find(projectUUID, \"\", subName, \"\", 0, refStr)\n\n\tif err != nil {\n\t\terr := APIErrGenericBackend()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\tif res.Empty() {\n\t\terr := APIErrorNotFound(\"Subscription\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\texistingSub := res.Subscriptions[0]\n\n\tif maxMessages == 0 {\n\t\tif existingSub.PushCfg.MaxMessages == 0 {\n\t\t\tmaxMessages = int64(1)\n\t\t} else {\n\t\t\tmaxMessages = existingSub.PushCfg.MaxMessages\n\t\t}\n\t}\n\n\t// if the request wants to transform a pull subscription to a push one\n\t// we need to begin the verification process\n\tif postBody.PushCfg != (subscriptions.PushConfig{}) {\n\n\t\t// if the endpoint in not the same with the old one, we need to verify it again\n\t\tif postBody.PushCfg.Pend != existingSub.PushCfg.Pend {\n\t\t\tvhash, err = auth.GenToken()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Could not generate verification hash for subscription %v, %v\", urlVars[\"subscription\"], err.Error())\n\t\t\t\terr := APIErrGenericInternal(\"Could not generate verification hash\")\n\t\t\t\trespondErr(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// else keep the already existing data\n\t\t} else {\n\t\t\tvhash = existingSub.PushCfg.VerificationHash\n\t\t\tverified = existingSub.PushCfg.Verified\n\t\t}\n\t}\n\n\terr = subscriptions.ModSubPush(projectUUID, subName, pushEnd, maxMessages, rPolicy, rPeriod, vhash, verified, refStr)\n\n\tif err != nil {\n\t\tif err.Error() == \"not found\" {\n\t\t\terr := APIErrorNotFound(\"Subscription\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// if this is an deactivate request, try to retrieve the push worker in order to remove him from the sub's acl\n\tif existingSub.PushCfg != (subscriptions.PushConfig{}) && postBody.PushCfg == (subscriptions.PushConfig{}) {\n\t\tpushWorker, _ = auth.GetPushWorker(pwToken, refStr)\n\t}\n\n\t// if the sub, was push enabled before the update and the endpoint was verified\n\t// we need to deactivate it on the push server\n\tif existingSub.PushCfg != (subscriptions.PushConfig{}) {\n\t\tif existingSub.PushCfg.Verified {\n\t\t\t// deactivate the subscription on the push backend\n\t\t\tapsc := gorillaContext.Get(r, \"apsc\").(push.Client)\n\t\t\tapsc.DeactivateSubscription(context.TODO(), existingSub.FullName)\n\n\t\t\t// remove the push worker user from the sub's acl\n\t\t\terr = auth.RemoveFromACL(projectUUID, \"subscriptions\", existingSub.Name, []string{pushWorker.Name}, refStr)\n\t\t\tif err != nil {\n\t\t\t\terr := APIErrGenericInternal(err.Error())\n\t\t\t\trespondErr(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t// if the update on push configuration is not intended to stop the push functionality\n\t// activate the subscription with the new values\n\tif postBody.PushCfg != (subscriptions.PushConfig{}) {\n\n\t\t// reactivate only if the push endpoint hasn't changed and it wes already verified\n\t\t// otherwise we need to verify the ownership again before wee activate it\n\t\tif postBody.PushCfg.Pend == existingSub.PushCfg.Pend && existingSub.PushCfg.Verified {\n\n\t\t\t// activate the subscription on the push backend\n\t\t\tapsc := gorillaContext.Get(r, \"apsc\").(push.Client)\n\t\t\tapsc.ActivateSubscription(context.TODO(), existingSub.FullName, existingSub.FullTopic,\n\t\t\t\tpushEnd, rPolicy, uint32(rPeriod), maxMessages)\n\n\t\t\t// modify the sub's acl with the push worker's uuid\n\t\t\terr = auth.AppendToACL(projectUUID, \"subscriptions\", existingSub.Name, []string{pushWorker.Name}, refStr)\n\t\t\tif err != nil {\n\t\t\t\terr := APIErrGenericInternal(err.Error())\n\t\t\t\trespondErr(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// link the sub's project with the push worker\n\t\t\terr = auth.AppendToUserProjects(pushWorker.UUID, projectUUID, refStr)\n\t\t\tif err != nil {\n\t\t\t\terr := APIErrGenericInternal(err.Error())\n\t\t\t\trespondErr(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Write empty response if everything's ok\n\trespondOK(w, output)\n}",
  "func (c *gcmSimpleClient) SendHTTP(m HTTPMessage) (*HTTPResponse, error) {\n\treturn c.httpClient.Send(m)\n}",
  "func (s *streamTx) PushRequest(req *http.Request, extra *RequestExtra) (resp *http.Response, err error) {\n\treturn s.connection.startRequest((*stream)(s), req, extra)\n}",
  "func (minion Minion) Send(message Message) {\n\t// minion.outgoing <- message\n\terr := websocket.JSON.Send(minion.websocket, message)\n\tif err != nil {\n\t\tminion.done <- true\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	GetIllegalLoginUsersAndIp Main purpose is to detect employees who have not logged in from office for 30days 
 | 
	func  (action ReportAction)  GetIllegalLoginUsersAndIp(activities []*admin.Activity, officeIPs []string) error {
	data := make(map[string]*LoginInformation)
	for _, activity := range activities {
		email := activity.Actor.Email
		ip := activity.IpAddress
		if value, ok := data[email]; ok {
			if !value.OfficeLogin {
				// If an user has logged in from not verified IP so far
				// then check if new IP is the one from office or not.
				value.OfficeLogin = containIP(officeIPs, ip)
			}
			value.LoginIPs = append(value.LoginIPs, ip)
		} else {
			data[email] = &LoginInformation{
				email,
				containIP(officeIPs, ip),
				[]string{ip}}
		}
	}
	for key, value := range data {
		if !value.OfficeLogin {
			fmt.Println(key)
			fmt.Print("     IP: ")
			fmt.Println(value.LoginIPs)
		}
	}
	return nil
} 
 | 
	[
  "func IsLimitedAccess(ipaddr string, ref db.DBClient) bool {\n\tresults, err := ref.Fetch(ipaddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tjst, _ := time.LoadLocation(\"Asia/Tokyo\")\n\tnow := time.Now().In(jst)\n\tisLimited := false\n\tfor _, r := range results {\n\t\tvar u User\n\t\tif err := r.Unmarshal(&u); err != nil {\n\t\t\tlog.Fatalln(\"Error unmarshaling result:\", err)\n\t\t}\n\t\tintervalTime := getIntervalTime(u.Amount)\n\n\t\tdbTime, _ := time.Parse(\"2006-01-02 15:04:05 -0700 MST\", u.Time)\n\t\tdbTime = dbTime.Add(time.Duration(intervalTime) * time.Hour)\n\t\tisLimited = now.Unix() < dbTime.Unix()\n\t}\n\n\treturn isLimited\n}",
  "func internalIPs(node corev1.Node) sets.String {\n\tips := sets.NewString()\n\t// check the node.Status.Addresses\n\tfor _, address := range node.Status.Addresses {\n\t\tif address.Type == \"InternalIP\" {\n\t\t\tips.Insert(address.Address)\n\t\t}\n\t}\n\treturn ips\n}",
  "func (f *Finding) BadIPs() []string {\n\treturn f.badNetwork.JSONPayload.Properties.IP\n}",
  "func extractNodesInternalIps(nodes []*v1.Node) []string {\n\tvar nodesList []string\n\tfor _, node := range nodes {\n\t\tfor _, address := range node.Status.Addresses {\n\t\t\tif address.Type == v1.NodeInternalIP {\n\t\t\t\tnodesList = append(nodesList, address.Address)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nodesList\n}",
  "func (c *certRotateFlow) getAllIPs(infra *AutomateHAInfraDetails) []string {\n\tips := append(infra.Outputs.AutomatePrivateIps.Value, infra.Outputs.ChefServerPrivateIps.Value...)\n\tips = append(ips, infra.Outputs.PostgresqlPrivateIps.Value...)\n\tips = append(ips, infra.Outputs.OpensearchPrivateIps.Value...)\n\treturn ips\n}",
  "func internalIP(node corev1.Node) string {\n\tfor _, address := range node.Status.Addresses {\n\t\tif address.Type == \"InternalIP\" {\n\t\t\treturn address.Address\n\t\t}\n\t}\n\treturn \"\"\n}",
  "func (p *Provider) logIllegalServices(task marathon.Task, application marathon.Application) {\n\tfor _, serviceName := range p.getServiceNames(application) {\n\t\t// Check for illegal/missing ports.\n\t\tif _, err := p.processPorts(application, task, serviceName); err != nil {\n\t\t\tlog.Warnf(\"%s has an illegal configuration: no proper port available\", identifier(application, task, serviceName))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check for illegal port label combinations.\n\t\t_, hasPortLabel := p.getLabel(application, types.LabelPort, serviceName)\n\t\t_, hasPortIndexLabel := p.getLabel(application, types.LabelPortIndex, serviceName)\n\t\tif hasPortLabel && hasPortIndexLabel {\n\t\t\tlog.Warnf(\"%s has both port and port index specified; port will take precedence\", identifier(application, task, serviceName))\n\t\t}\n\t}\n}",
  "func hideMyIP(URL string) []string {\n\n\thmiRegExp := regexp.MustCompile(`<td>([0-9]{2,}.[0-9]{2,}.[0-9]{1,}.[0-9]{1,})</td><td>([0-9]{2,})</td><td>([A-Z]{2})</td>`)\n\n\tvar (\n\t\tbody    []byte\n\t\terr     error\n\t\tresults = make([]string, 0, 256)\n\t\tclient  = client.New(5 * time.Second)\n\t)\n\n\tif body, err = client.Read(URL); err != nil {\n\t\tlog.Printf(\"Error Requesting: %v\\n\", err)\n\t\treturn results\n\t}\n\n\tfor _, m := range hmiRegExp.FindAllSubmatch(body, -1) {\n\t\tresults = append(results, string(m[1])+\":\"+string(m[2]))\n\t}\n\treturn results\n}",
  "func UsersBusy(thismeet User) error {\r\n\tcollection := client.Database(\"appointytask\").Collection(\"users\")\r\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\r\n\tdefer cancel()\r\n\tvar meet User\r\n\tfor _, thisperson := range thismeet.Users {\r\n\t\tif thisperson.Password == \"Yes\" {\r\n\t\t\tfilter := bson.M{\r\n\t\t\t\t\"users.email\": thisperson.Email,\r\n\t\t\t\t\"users.password\":  \"Yes\",\r\n\t\t\t\t\"endtime\":            bson.M{\"$gt\": string(time.Now().Format(time.RFC3339))},\r\n\t\t\t}\r\n\t\t\tcursor, _ := collection.Find(ctx, filter)\r\n\t\t\tfor cursor.Next(ctx) {\r\n\t\t\t\tcursor.Decode(&meet)\r\n\t\t\t\tif (thismeet.Starttime >= meet.Starttime && thismeet.Starttime <= meet.Endtime) ||\r\n\t\t\t\t\t(thismeet.Endtime >= meet.Starttime && thismeet.Endtime <= meet.Endtime) {\r\n\t\t\t\t\treturnerror := \"Error 400: User \" + thisperson.Name + \" Password Clash\"\r\n\t\t\t\t\treturn errors.New(returnerror)\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn nil\r\n}",
  "func (udp *UdpServer) getItfIps(itf *net.Interface) ([]net.IP, error) {\n    var ip net.IP\n    var ips []net.IP\n\n    addresses, err := itf.Addrs()\n    if err != nil {\n        return ips, err\n    }\n\n    for _, address := range addresses {\n        switch v := address.(type) {\n        case *net.IPNet:\n            ip = v.IP\n        case *net.IPAddr:\n            ip = v.IP\n        }\n    }\n\n    ip = ip.To4()\n    if ip != nil {\n        ips = append(ips, ip)\n    }\n\n    return ips, nil\n}",
  "func uniqLoginIPs(loginRecords []LoginData) []string {\n\tvar loginRecordMap = make(map[string]int)\n\tfor _, record := range loginRecords {\n\t\tloginRecordMap[strings.Split(record.RemoteIP, \":\")[0]] = 1\n\t}\n\n\tkeys := make([]string, 0, len(loginRecordMap))\n\tfor key, _ := range loginRecordMap {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}",
  "func (n *Notifier) getCurrentDayNonReporters(channelID string) ([]model.StandupUser, error) {\n\ttimeFrom := time.Date(time.Now().Year(), time.Now().Month(), time.Now().Day(), 0, 0, 0, 0, time.UTC)\n\tnonReporters, err := n.DB.GetNonReporters(channelID, timeFrom, time.Now())\n\tif err != nil && err != errors.New(\"no rows in result set\") {\n\t\tlogrus.Errorf(\"notifier: GetNonReporters failed: %v\\n\", err)\n\t\treturn nil, err\n\t}\n\treturn nonReporters, nil\n}",
  "func getIPRangesForPort(isAllow bool, sg secGroup, myIP string, userName *string, port int64) (ipr []*ec2.IpRange) {\n\tif isAllow {\n\t\tif !strings.Contains(myIP, \"/\") {\n\t\t\tmyIP += \"/32\"\n\t\t}\n\t\tfor _, cidr := range sg.portToMyIPs[port] {\n\t\t\tif cidr == myIP {\n\t\t\t\tout.Highlight(out.WARN, \"skipping existing access for %s - IP %s to port %s in SG %s (%s)\", *userName, cidr, strconv.Itoa(int(port)), sg.name, sg.id)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tipr = append(ipr, &ec2.IpRange{\n\t\t\tCidrIp:      aws.String(myIP),\n\t\t\tDescription: aws.String(*userName),\n\t\t})\n\t} else {\n\t\tfor _, cidr := range sg.portToMyIPs[port] {\n\t\t\tipr = append(ipr, &ec2.IpRange{\n\t\t\t\tCidrIp:      aws.String(cidr),\n\t\t\t\tDescription: aws.String(*userName),\n\t\t\t})\n\t\t}\n\t}\n\treturn\n}",
  "func (u *MockUserRecord) NumLoginDays() int { return 0 }",
  "func extractNodesExternalIps(nodes []*v1.Node) []string {\n\tvar nodesList []string\n\tfor _, node := range nodes {\n\t\tfor _, address := range node.Status.Addresses {\n\t\t\tif address.Type == v1.NodeExternalIP {\n\t\t\t\tnodesList = append(nodesList, address.Address)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nodesList\n}",
  "func (r Hardware_Server) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Hardware_Server\", \"getNetworkComponentFirewallProtectableIpAddresses\", nil, &r.Options, &resp)\n\treturn\n}",
  "func getAllowedIP(ip string) []net.IPNet {\n\t_, ipnet, err := net.ParseCIDR(\"0.0.0.0/32\")\n\tcheck(err)\n\n\tnetwork := *ipnet\n\tnetwork.IP = net.ParseIP(ip)\n\n\treturn []net.IPNet{network}\n}",
  "func FindInactiveNodes(hours int, host string, protocol string, uri string, ignoreCertErrors bool, includeFilters []string) ([]string, error) {\n\tt := time.Now().Add(time.Hour * time.Duration(hours*-1)).Format(time.RFC3339)\n\n\turl := fmt.Sprintf(\"%s://%s%s\", protocol, host, uri)\n\n\tdata := fmt.Sprintf(\n\t\t\"{ \\\"query\\\": \\\"nodes[certname]{ report_timestamp < \\\\\\\"%s\\\\\\\" %s }\\\"}\",\n\t\tt,\n\t\tstrings.Join(includeFilters, \" \"),\n\t)\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: ignoreCertErrors},\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\tvar list []string\n\tresp, err := client.Post(url, \"application/json\", strings.NewReader(data))\n\tif err != nil {\n\t\treturn list, errors.Wrap(err, \"Post Error: \")\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn list, errors.New(fmt.Sprintf(\"Unable to download: %d\", resp.StatusCode))\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\tvar l []PuppetDBNode\n\tif err := json.Unmarshal(body, &l); err != nil {\n\t\treturn list, errors.Wrap(err, \"Unmarshal Error: \")\n\t}\n\n\tfor _, val := range l {\n\t\tlist = append(list, val.Certname)\n\t}\n\n\treturn list, nil\n}",
  "func (j *JoinHelper) getAllIPs () ([]string, error){\n\n\tvpnName := j.getVPNNicName()\n\tips := make ([]string, 0)\n\n\tinterfaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn ips, err\n\t}\n\tfor _, iface := range interfaces {\n\t\tif iface.Name != vpnName {\n\t\t\taddresses, err := iface.Addrs()\n\t\t\tif err != nil {\n\t\t\t\treturn ips, err\n\t\t\t}\n\t\t\tfor _, addr := range addresses {\n\t\t\t\tnetIP, ok := addr.(*net.IPNet)\n\t\t\t\tif ok && !netIP.IP.IsLoopback() && netIP.IP.To4() != nil {\n\t\t\t\t\tip := netIP.IP.String()\n\t\t\t\t\tips = append(ips, ip)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ips, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	ConvertManifest changes application/octetstream to schema2 config media type if need. NOTE: 1. original manifest will be deleted by next gc round. 2. don't cover manifest list. 
 | 
	func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
	if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest ||
		desc.MediaType == ocispec.MediaTypeImageManifest) {
		log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType)
		return desc, nil
	}
	// read manifest data
	mb, err := content.ReadBlob(ctx, store, desc)
	if err != nil {
		return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err)
	}
	var manifest ocispec.Manifest
	if err := json.Unmarshal(mb, &manifest); err != nil {
		return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err)
	}
	// check config media type
	if manifest.Config.MediaType != LegacyConfigMediaType {
		return desc, nil
	}
	manifest.Config.MediaType = images.MediaTypeDockerSchema2Config
	data, err := json.MarshalIndent(manifest, "", "   ")
	if err != nil {
		return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err)
	}
	// update manifest with gc labels
	desc.Digest = digest.Canonical.FromBytes(data)
	desc.Size = int64(len(data))
	labels := map[string]string{}
	for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) {
		labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String()
	}
	ref := remotes.MakeRefKey(ctx, desc)
	if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil {
		return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err)
	}
	return desc, nil
} 
 | 
	[
  "func MakeSchema2Manifest(config distribution.Descriptor, layers []distribution.Descriptor) (string, distribution.Manifest, error) {\n\tm := schema2.Manifest{\n\t\tVersioned: schema2.SchemaVersion,\n\t\tConfig:    config,\n\t\tLayers:    make([]distribution.Descriptor, 0, len(layers)),\n\t}\n\tm.Config.MediaType = schema2.MediaTypeConfig\n\n\tfor _, layer := range layers {\n\t\tlayer.MediaType = schema2.MediaTypeLayer\n\t\tm.Layers = append(m.Layers, layer)\n\t}\n\n\tmanifest, err := schema2.FromStruct(m)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\t_, payload, err := manifest.Payload()\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\treturn string(payload), manifest, nil\n}",
  "func (c *converter) Convert(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {\n\tmfst, err := images.Manifest(ctx, c.provider, desc, platforms.Default())\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, errors.Wrap(err, \"failed to get manifest\")\n\t}\n\n\torigMfstJSON, err := json.MarshalIndent(mfst, \"\", \"   \")\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, errors.Wrap(err, \"failed to marshal manifest JSON\")\n\t}\n\tlog.Printf(\"Original Manifest [%d] %s:\\n%s\", len(origMfstJSON), desc.Digest, origMfstJSON)\n\n\torigMfstConfigJSON, err := content.ReadBlob(ctx, c.provider, mfst.Config)\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, errors.Wrap(err, \"failed to get original manifest config JSON\")\n\t}\n\tlog.Printf(\"Original Manifest Config [%d] %s:\\n%s\", len(origMfstConfigJSON), mfst.Config.Digest, origMfstConfigJSON)\n\n\tmfst.Config.Digest, err = copyFile(ctx, c.api, c.provider, mfst.Config)\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, errors.Wrapf(err, \"failed to upload manifest config blob %q\", mfst.Config.Digest)\n\t}\n\n\tfor i, layer := range mfst.Layers {\n\t\tmfst.Layers[i].Digest, err = copyFile(ctx, c.api, c.provider, layer)\n\t\tif err != nil {\n\t\t\treturn ocispec.Descriptor{}, errors.Wrapf(err, \"failed to upload blob %q\", layer.Digest)\n\t\t}\n\t}\n\n\tmfstJSON, err := json.MarshalIndent(mfst, \"\", \"   \")\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, errors.Wrap(err, \"failed to marshal manifest JSON\")\n\t}\n\n\tmfstDigest, err := addFile(ctx, c.api, files.NewBytesFile(mfstJSON))\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, errors.Wrap(err, \"failed to upload manifest\")\n\t}\n\tlog.Printf(\"Converted Manifest [%d] %s:\\n%s\", len(mfstJSON), mfstDigest, mfstJSON)\n\n\treturn ocispec.Descriptor{\n\t\tMediaType: ocispec.MediaTypeImageManifest,\n\t\tDigest:    mfstDigest,\n\t\tSize:      int64(len(mfstJSON)),\n\t}, nil\n}",
  "func GenerateManifestObj(manifestBytes []byte, manifestType string, osFilterList, archFilterList []string,\n\ti *ImageSource, parent *manifest.Schema2List) (interface{}, []byte, []*ManifestInfo, error) {\n\n\tswitch manifestType {\n\tcase manifest.DockerV2Schema2MediaType:\n\t\tmanifestObj, err := manifest.Schema2FromManifest(manifestBytes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\t// platform info stored in config blob\n\t\tif parent == nil && manifestObj.ConfigInfo().Digest != \"\" {\n\t\t\tblob, _, err := i.GetABlob(manifestObj.ConfigInfo())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t\tdefer blob.Close()\n\t\t\tbytes, err := io.ReadAll(blob)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t\tresults := gjson.GetManyBytes(bytes, \"architecture\", \"os\")\n\n\t\t\tif !platformValidate(osFilterList, archFilterList,\n\t\t\t\t&manifest.Schema2PlatformSpec{Architecture: results[0].String(), OS: results[1].String()}) {\n\t\t\t\treturn nil, nil, nil, nil\n\t\t\t}\n\t\t}\n\n\t\treturn manifestObj, manifestBytes, nil, nil\n\tcase manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:\n\t\tmanifestObj, err := manifest.Schema1FromManifest(manifestBytes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\t// v1 only support architecture and this field is for information purposes and not currently used by the engine.\n\t\tif parent == nil && !platformValidate(osFilterList, archFilterList,\n\t\t\t&manifest.Schema2PlatformSpec{Architecture: manifestObj.Architecture}) {\n\t\t\treturn nil, nil, nil, nil\n\t\t}\n\n\t\treturn manifestObj, manifestBytes, nil, nil\n\tcase specsv1.MediaTypeImageManifest:\n\t\t//TODO: platform filter?\n\t\tmanifestObj, err := manifest.OCI1FromManifest(manifestBytes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\treturn manifestObj, manifestBytes, nil, nil\n\tcase manifest.DockerV2ListMediaType:\n\t\tvar subManifestInfoSlice []*ManifestInfo\n\n\t\tmanifestSchemaListObj, err := manifest.Schema2ListFromManifest(manifestBytes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\tvar filteredDescriptors []manifest.Schema2ManifestDescriptor\n\n\t\tfor index, manifestDescriptorElem := range manifestSchemaListObj.Manifests {\n\t\t\t// select os and arch\n\t\t\tif !platformValidate(osFilterList, archFilterList, &manifestDescriptorElem.Platform) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfilteredDescriptors = append(filteredDescriptors, manifestDescriptorElem)\n\t\t\tmfstBytes, mfstType, err := i.source.GetManifest(i.ctx, &manifestDescriptorElem.Digest)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\n\t\t\t//TODO: will the sub manifest be list-type?\n\t\t\tsubManifest, _, _, err := GenerateManifestObj(mfstBytes, mfstType,\n\t\t\t\tarchFilterList, osFilterList, i, manifestSchemaListObj)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\n\t\t\tif subManifest != nil {\n\t\t\t\tsubManifestInfoSlice = append(subManifestInfoSlice, &ManifestInfo{\n\t\t\t\t\tObj: subManifest.(manifest.Manifest),\n\n\t\t\t\t\t// cannot use &manifestDescriptorElem.Digest here, because manifestDescriptorElem is a fixed copy object\n\t\t\t\t\tDigest: &manifestSchemaListObj.Manifests[index].Digest,\n\t\t\t\t\tBytes:  mfstBytes,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\t// no sub manifests need to transport\n\t\tif len(filteredDescriptors) == 0 {\n\t\t\treturn nil, nil, nil, nil\n\t\t}\n\n\t\t// return a new Schema2List\n\t\tif len(filteredDescriptors) != len(manifestSchemaListObj.Manifests) {\n\t\t\tmanifestSchemaListObj.Manifests = filteredDescriptors\n\t\t}\n\n\t\tnewManifestBytes, _ := manifestSchemaListObj.Serialize()\n\n\t\treturn manifestSchemaListObj, newManifestBytes, subManifestInfoSlice, nil\n\tcase specsv1.MediaTypeImageIndex:\n\t\tvar subManifestInfoSlice []*ManifestInfo\n\n\t\tociIndexesObj, err := manifest.OCI1IndexFromManifest(manifestBytes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\tvar filteredDescriptors []specsv1.Descriptor\n\n\t\tfor index, descriptor := range ociIndexesObj.Manifests {\n\t\t\t// select os and arch\n\t\t\tif !platformValidate(osFilterList, archFilterList, &manifest.Schema2PlatformSpec{\n\t\t\t\tArchitecture: descriptor.Platform.Architecture,\n\t\t\t\tOS:           descriptor.Platform.OS,\n\t\t\t}) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfilteredDescriptors = append(filteredDescriptors, descriptor)\n\n\t\t\tmfstBytes, mfstType, innerErr := i.source.GetManifest(i.ctx, &descriptor.Digest)\n\t\t\tif innerErr != nil {\n\t\t\t\treturn nil, nil, nil, innerErr\n\t\t\t}\n\n\t\t\t//TODO: will the sub manifest be list-type?\n\t\t\tsubManifest, _, _, innerErr := GenerateManifestObj(mfstBytes, mfstType,\n\t\t\t\tarchFilterList, osFilterList, i, nil)\n\t\t\tif innerErr != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\n\t\t\tif subManifest != nil {\n\t\t\t\tsubManifestInfoSlice = append(subManifestInfoSlice, &ManifestInfo{\n\t\t\t\t\tObj: subManifest.(manifest.Manifest),\n\n\t\t\t\t\t// cannot use &descriptor.Digest here, because descriptor is a fixed copy object\n\t\t\t\t\tDigest: &ociIndexesObj.Manifests[index].Digest,\n\t\t\t\t\tBytes:  mfstBytes,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\t// no sub manifests need to transport\n\t\tif len(filteredDescriptors) == 0 {\n\t\t\treturn nil, nil, nil, nil\n\t\t}\n\n\t\t// return a new Schema2List\n\t\tif len(filteredDescriptors) != len(ociIndexesObj.Manifests) {\n\t\t\tociIndexesObj.Manifests = filteredDescriptors\n\t\t}\n\n\t\tnewManifestBytes, _ := ociIndexesObj.Serialize()\n\n\t\treturn ociIndexesObj, newManifestBytes, subManifestInfoSlice, nil\n\tdefault:\n\t\treturn nil, nil, nil, fmt.Errorf(\"unsupported manifest type: %v\", manifestType)\n\t}\n}",
  "func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {\n\tif options.ManifestMIMEType == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tconverter, ok := converters[options.ManifestMIMEType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Unsupported conversion type: %v\", options.ManifestMIMEType)\n\t}\n\n\toptionsCopy := options\n\tconvertedManifest, err := converter(ctx, &optionsCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconvertedImage := memoryImageFromManifest(convertedManifest)\n\n\toptionsCopy.ManifestMIMEType = \"\"\n\treturn convertedImage.UpdatedImage(ctx, optionsCopy)\n}",
  "func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) {\n\tctx := context.Background()\n\tblobStore := repository.Blobs(ctx)\n\tbuilder := schema2.NewManifestBuilder(blobStore, schema2.MediaTypeImageConfig, []byte{})\n\tfor _, digest := range digests {\n\t\tbuilder.AppendReference(distribution.Descriptor{Digest: digest})\n\t}\n\n\tmanifest, err := builder.Build(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error generating manifest: %v\", err)\n\t}\n\n\treturn manifest, nil\n}",
  "func (registry *Registry) manifest(repository, reference string, acceptedMediaTypes []string) (distribution.Manifest, error) {\n\tresp, err := registry.fetchManifest(repository, reference, \"GET\", acceptedMediaTypes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tactualMediaType := resp.Header.Get(\"Content-Type\")\n\tacceptable := false\n\tfor i := range acceptedMediaTypes {\n\t\tif actualMediaType == acceptedMediaTypes[i] {\n\t\t\tacceptable = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !acceptable && actualMediaType != manifestlist.MediaTypeManifestList {\n\t\t// NOTE: a manifest list may be legally returned even it wasn't asked for\n\t\treturn nil, fmt.Errorf(\"unexpected manifest schema was received from registry: %s (registry may not support the manifest type(s) you want: %v)\", actualMediaType, acceptedMediaTypes)\n\t}\n\tdecoder := json.NewDecoder(resp.Body)\n\tswitch actualMediaType {\n\n\tcase schema1.MediaTypeSignedManifest:\n\t\tdeserialized := &schema1.SignedManifest{}\n\t\terr = decoder.Decode(&deserialized)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn deserialized, nil\n\n\tcase schema2.MediaTypeManifest:\n\t\tdeserialized := &schema2.DeserializedManifest{}\n\t\terr = decoder.Decode(&deserialized)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif deserialized.MediaType != actualMediaType {\n\t\t\treturn nil, fmt.Errorf(\"mediaType in manifest should be '%s' not '%s'\", actualMediaType, deserialized.MediaType)\n\t\t}\n\t\treturn deserialized, nil\n\n\tcase manifestlist.MediaTypeManifestList:\n\t\tdeserialized := &manifestlist.DeserializedManifestList{}\n\t\terr = decoder.Decode(&deserialized)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif deserialized.MediaType != actualMediaType {\n\t\t\treturn nil, fmt.Errorf(\"mediaType in manifest should be '%s' not '%s'\", actualMediaType, deserialized.MediaType)\n\t\t}\n\t\tif acceptable {\n\t\t\treturn deserialized, nil\n\t\t}\n\n\t\t// if `reference` is an image digest, a manifest list may be received even if a schema2 Manifest was requested.\n\t\t// (since the Docker Image Digest is often the hash of a manifest list (a.k.a. fat manifest), not a schema2 manifest)\n\t\t// if that's the case: select and unwrap the default referred manifest in this case\n\t\tif len(deserialized.Manifests) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"empty manifest list: repository=%s reference=%s\", repository, reference)\n\t\t}\n\n\t\t// use linux/amd64 manifest by default\n\t\t// TODO: query current platform architecture, OS and Variant and use those as selection criteria\n\t\tfor _, m := range deserialized.Manifests {\n\t\t\tif strings.ToLower(m.Platform.Architecture) == \"amd64\" && strings.ToLower(m.Platform.OS) == \"linux\" {\n\t\t\t\t// address the manifest explicitly with its digest\n\t\t\t\treturn registry.manifest(repository, m.Digest.String(), acceptedMediaTypes)\n\t\t\t}\n\t\t}\n\t\t// fallback: use the first manifest in the list\n\t\t// NOTE: emptiness of the list was checked above\n\t\treturn registry.manifest(repository, deserialized.Manifests[0].Digest.String(), acceptedMediaTypes)\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected manifest schema was received from registry: %s\", actualMediaType)\n\t}\n\n}",
  "func transformManifest(obj runtime.Object, retrieveImageConfiguration configurationRetriever) bool {\n\tone := int32(1)\n\tswitch o := obj.(type) {\n\tcase *v1.Pod:\n\t\treturn transformPodSpec(&o.ObjectMeta, &o.Spec, retrieveImageConfiguration)\n\tcase *v1.PodList:\n\t\tchanged := false\n\t\tfor i := range o.Items {\n\t\t\tif transformPodSpec(&o.Items[i].ObjectMeta, &o.Items[i].Spec, retrieveImageConfiguration) {\n\t\t\t\tchanged = true\n\t\t\t}\n\t\t}\n\t\treturn changed\n\tcase *v1.ReplicationController:\n\t\tif o.Spec.Replicas != nil {\n\t\t\to.Spec.Replicas = &one\n\t\t}\n\t\treturn transformPodSpec(&o.Spec.Template.ObjectMeta, &o.Spec.Template.Spec, retrieveImageConfiguration)\n\tcase *appsv1.Deployment:\n\t\tif o.Spec.Replicas != nil {\n\t\t\to.Spec.Replicas = &one\n\t\t}\n\t\treturn transformPodSpec(&o.Spec.Template.ObjectMeta, &o.Spec.Template.Spec, retrieveImageConfiguration)\n\tcase *appsv1.DaemonSet:\n\t\treturn transformPodSpec(&o.Spec.Template.ObjectMeta, &o.Spec.Template.Spec, retrieveImageConfiguration)\n\tcase *appsv1.ReplicaSet:\n\t\tif o.Spec.Replicas != nil {\n\t\t\to.Spec.Replicas = &one\n\t\t}\n\t\treturn transformPodSpec(&o.Spec.Template.ObjectMeta, &o.Spec.Template.Spec, retrieveImageConfiguration)\n\tcase *appsv1.StatefulSet:\n\t\tif o.Spec.Replicas != nil {\n\t\t\to.Spec.Replicas = &one\n\t\t}\n\t\treturn transformPodSpec(&o.Spec.Template.ObjectMeta, &o.Spec.Template.Spec, retrieveImageConfiguration)\n\tcase *batchv1.Job:\n\t\treturn transformPodSpec(&o.Spec.Template.ObjectMeta, &o.Spec.Template.Spec, retrieveImageConfiguration)\n\n\tdefault:\n\t\tlogrus.Debugf(\"skipping unknown object: %T (%v)\\n\", obj.GetObjectKind(), obj)\n\t\treturn false\n\t}\n}",
  "func descriptorFromManifest(manifest v1.Manifest, platform *v1.Platform) (v1.Descriptor, error) {\n\t_, size, hash, err := contentSizeAndHash(manifest)\n\tif err != nil {\n\t\treturn v1.Descriptor{}, trace.Wrap(err)\n\t}\n\treturn v1.Descriptor{\n\t\tMediaType: types.DockerManifestSchema2,\n\t\tSize:      size,\n\t\tDigest:    hash,\n\t\tPlatform:  platform,\n\t}, err\n\n}",
  "func (a *ACBuild) ReplaceManifest(manifestPath string) (err error) {\n\tif err = a.lock(); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err1 := a.unlock(); err == nil {\n\t\t\terr = err1\n\t\t}\n\t}()\n\n\tfinfo, err := os.Stat(manifestPath)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn fmt.Errorf(\"no such file or directory: %s\", manifestPath)\n\tcase err != nil:\n\t\treturn err\n\tcase finfo.IsDir():\n\t\treturn fmt.Errorf(\"%s is a directory\", manifestPath)\n\tdefault:\n\t\tbreak\n\t}\n\n\tmanblob, err := ioutil.ReadFile(manifestPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Marshal and Unmarshal the manifest to assert that it's valid and to\n\t// strip any whitespace\n\n\tvar man schema.ImageManifest\n\terr = man.UnmarshalJSON(manblob)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanblob, err = man.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(path.Join(a.CurrentACIPath, aci.ManifestFile), manblob, 0755)\n}",
  "func (li *legacyImage) genManifest() error {\n\tli.manifest = &v1.Manifest{\n\t\tSchemaVersion: 2,\n\t\tMediaType:     types.DockerManifestSchema2,\n\t\tConfig: v1.Descriptor{\n\t\t\tMediaType: types.DockerConfigJSON,\n\t\t\tSize:      int64(len(li.rawConfig)),\n\t\t\tDigest:    li.configDigest,\n\t\t},\n\t}\n\tfor _, l := range li.layers {\n\t\tmediaType, err := l.MediaType()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to get media type of layer\")\n\t\t}\n\t\tdigest, err := l.Digest()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to get digest of layer\")\n\t\t}\n\t\tsize, err := l.Size()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to get size of layer\")\n\t\t}\n\t\tvar urls []string\n\t\tif fl, ok := l.(*foreignLayer); ok {\n\t\t\t// The size returned by the Size method on foreign layers is always\n\t\t\t// zero. But the implementation has a private variable with the real\n\t\t\t// size which we need to report in the manifest.\n\t\t\tsize = fl.size\n\t\t\turls = fl.urls\n\t\t}\n\t\tli.manifest.Layers = append(li.manifest.Layers, v1.Descriptor{\n\t\t\tMediaType: mediaType,\n\t\t\tDigest:    digest,\n\t\t\tSize:      size,\n\t\t\tURLs:      urls,\n\t\t})\n\t}\n\tmanifestBlob, err := json.Marshal(li.manifest)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to encode generate manifest to JSON\")\n\t}\n\tli.rawManifest = manifestBlob\n\tli.digest = v1.Hash{\n\t\tAlgorithm: \"sha256\",\n\t\tHex:       sha256Blob(li.rawManifest),\n\t}\n\treturn nil\n}",
  "func (d *DockerV2Manifest) PutManifest() {\n\treponame := d.Ctx.Input.Param(\":splat\")\n\ttags := d.Ctx.Input.Param(\":tags\")\n\tlogs.Debug(\"PutManifest of '%s:%s'.\", reponame, tags)\n\n\tdata := d.Ctx.Input.CopyBody(utils.MaxSize)\n\tlogs.Debug(\"The manifest is <%s>\", data)\n\terr := storage.PutManifest(d.Ctx, reponame, tags, \"docker\", \"v2\", data)\n\tif err != nil {\n\t\tCtxErrorWrap(d.Ctx, http.StatusInternalServerError, err, fmt.Sprintf(\"Failed to put manifest of '%s:%s'.\", reponame, tags))\n\t\treturn\n\t}\n\n\t//TODO: rollback the storage.. add error checks\n\t_, err = models.AddImage(reponame, tags, \"docker\", \"v2\")\n\tif err != nil {\n\t\tCtxErrorWrap(d.Ctx, http.StatusInternalServerError, err, fmt.Sprintf(\"Failed to add image '%s:%s' to db.\", reponame, tags))\n\t\treturn\n\t}\n\n\tdigest, _ := utils.DigestManifest(data)\n\theader := make(map[string]string)\n\theader[\"Docker-Content-Digest\"] = digest\n\tCtxSuccessWrap(d.Ctx, http.StatusOK, \"{}\", header)\n}",
  "func (m *Manifest) WriteManifest(dst io.Writer) error {\n\tdata, err := json.Marshal(m)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error marshaling manifest\")\n\t}\n\n\tbWriter := brotli.NewWriter(dst)\n\n\tdefer func() {\n\t\tif closeErr := bWriter.Close(); closeErr != nil {\n\t\t\tzap.S().Errorf(\"error closing brotli writer: %v\", closeErr)\n\t\t}\n\t}()\n\tnumWritten, err := bWriter.Write(data)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error compressing marshaled manifest\")\n\t}\n\n\tdataLength := len(data)\n\tif dataLength != numWritten {\n\t\treturn fmt.Errorf(\"%d bytes written, expected %d during manifest compression\", numWritten, dataLength)\n\t}\n\n\treturn nil\n}",
  "func toImageManifest(m *schema.ImageManifest) *aciManifest {\n\treturn &aciManifest{\n\t\tACKind:        aciKind(m.ACKind),\n\t\tACVersion:     m.ACVersion,\n\t\tName:          aciName(m.Name),\n\t\tLabels:        aciLabels(m.Labels),\n\t\tApp:           (*aciApp)(m.App),\n\t\tAnnotations:   aciAnnotations(m.Annotations),\n\t\tDependencies:  aciDependencies(m.Dependencies),\n\t\tPathWhitelist: m.PathWhitelist,\n\t}\n}",
  "func ManifestUnpack([]byte) Manifest { panic(\"\") }",
  "func (h *HLSFilter) FilterManifest(filters *parsers.MediaFilters) (string, error) {\n\tm, manifestType, err := m3u8.DecodeFrom(strings.NewReader(h.manifestContent), true)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif manifestType != m3u8.MASTER {\n\t\treturn \"\", errors.New(\"manifest type is wrong\")\n\t}\n\n\t// convert into the master playlist type\n\tmanifest := m.(*m3u8.MasterPlaylist)\n\tfilteredManifest := m3u8.NewMasterPlaylist()\n\n\tfor _, v := range manifest.Variants {\n\t\tabsoluteURL, _ := filepath.Split(h.manifestURL)\n\t\tabsolute, aErr := url.Parse(absoluteURL)\n\t\tif aErr != nil {\n\t\t\treturn h.manifestContent, aErr\n\t\t}\n\t\tnormalizedVariant, err := h.normalizeVariant(v, *absolute)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tvalidatedFilters, err := h.validateVariants(filters, normalizedVariant)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif validatedFilters {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilteredManifest.Append(normalizedVariant.URI, normalizedVariant.Chunklist, normalizedVariant.VariantParams)\n\t}\n\n\treturn filteredManifest.String(), nil\n}",
  "func (registry *Registry) ManifestV2(repository, reference string) (*schema2.DeserializedManifest, error) {\n\tmediaTypes := []string{schema2.MediaTypeManifest}\n\tm, err := registry.manifest(repository, reference, mediaTypes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdeserializedManifest, ok := m.(*schema2.DeserializedManifest)\n\tif !ok {\n\t\tmediaType, _, _ := m.Payload()\n\t\treturn nil, fmt.Errorf(\"unexpected manifest schema was received from registry: %s\", mediaType)\n\t}\n\treturn deserializedManifest, nil\n}",
  "func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {\n\t// 1. Check all files in manifest exist.\n\tfor id := range mf.Tables {\n\t\tif _, ok := idMap[id]; !ok {\n\t\t\treturn fmt.Errorf(\"file does not exist for table %d\", id)\n\t\t}\n\t}\n\n\t// 2. Delete files that shouldn't exist.\n\tfor id := range idMap {\n\t\tif _, ok := mf.Tables[id]; !ok {\n\t\t\tkv.elog.Printf(\"Table file %d not referenced in MANIFEST\\n\", id)\n\t\t\tfilename := table.NewFilename(id, kv.opt.Dir)\n\t\t\tif err := os.Remove(filename); err != nil {\n\t\t\t\treturn y.Wrapf(err, \"While removing table %d\", id)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {\n\tswitch manifest.NormalizedMIMEType(mt) {\n\tcase manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:\n\t\treturn manifestSchema1FromManifest(manblob)\n\tcase imgspecv1.MediaTypeImageManifest:\n\t\treturn manifestOCI1FromManifest(src, manblob)\n\tcase manifest.DockerV2Schema2MediaType:\n\t\treturn manifestSchema2FromManifest(src, manblob)\n\tcase manifest.DockerV2ListMediaType:\n\t\treturn manifestSchema2FromManifestList(ctx, sys, src, manblob)\n\tcase imgspecv1.MediaTypeImageIndex:\n\t\treturn manifestOCI1FromImageIndex(ctx, sys, src, manblob)\n\tdefault: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.\n\t\treturn nil, fmt.Errorf(\"Unimplemented manifest MIME type %s\", mt)\n\t}\n}",
  "func (h *HLSFilter) FilterManifest(filters *parsers.MediaFilters) (string, error) {\n\tm, manifestType, err := m3u8.DecodeFrom(strings.NewReader(h.manifestContent), true)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif manifestType != m3u8.MASTER {\n\t\treturn h.filterRenditionManifest(filters, m.(*m3u8.MediaPlaylist))\n\t}\n\n\t// convert into the master playlist type\n\tmanifest := m.(*m3u8.MasterPlaylist)\n\tfilteredManifest := m3u8.NewMasterPlaylist()\n\n\tfor _, v := range manifest.Variants {\n\t\tabsolute, aErr := getAbsoluteURL(h.manifestURL)\n\t\tif aErr != nil {\n\t\t\treturn h.manifestContent, aErr\n\t\t}\n\n\t\tnormalizedVariant, err := h.normalizeVariant(v, *absolute)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tvalidatedFilters, err := h.validateVariants(filters, normalizedVariant)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif validatedFilters {\n\t\t\tcontinue\n\t\t}\n\n\t\turi := normalizedVariant.URI\n\t\tif filters.Trim != nil {\n\t\t\turi, err = h.normalizeTrimmedVariant(filters, uri)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\n\t\tfilteredManifest.Append(uri, normalizedVariant.Chunklist, normalizedVariant.VariantParams)\n\t}\n\n\treturn filteredManifest.String(), nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	NewCloudManager creates a cloud manager. 
 | 
	func NewCloudManager(dataStore *store.DataStore) (CloudManager, error) {
	if dataStore == nil {
		return nil, fmt.Errorf("Fail to new cloud manager as data store is nil.")
	}
	return &cloudManager{dataStore}, nil
} 
 | 
	[
  "func NewManager(system Resumer, name string, kubeClient kubernetes.Interface) *Manager {\n\treturn &Manager{\n\t\tstop:       make(chan struct{}),\n\t\tsystem:     system,\n\t\tname:       name,\n\t\tkubeClient: kubeClient,\n\t}\n}",
  "func NewGCEManager(cn string) GCE {\n\tif isCommandAvailable(\"gcloud\") {\n\t\tlog.Info(\"Commands found: gcloud\")\n\t} else {\n\n\t\tlog.Critical(\"gcloud not found\")\n\n\t}\n\taccountName, _ := exec.Command(\"sh\", \"-c\", getAccount).Output()\n\tlog.Debug(\"Authenticated as: %s\", string(accountName))\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", filepath.Join(homedir.HomeDir(), \".kube\", \"config\"))\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't build config from file\")\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot connect to kubernetes cluster , exiting...\")\n\t}\n\tcs := GCE{\n\t\tclusterName: cn,\n\t\tkube:        clientset,\n\t}\n\tcs.lastDeployedState = cs.GetActiveState()\n\tlog.Info(\"GCE manager created\")\n\treturn cs\n}",
  "func NewManager(client *clientv3.Client, target string) (Manager, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"invalid etcd client\")\n\t}\n\n\tif target == \"\" {\n\t\treturn nil, errors.New(\"invalid target\")\n\t}\n\n\tem := &endpointManager{\n\t\tclient: client,\n\t\ttarget: target,\n\t}\n\treturn em, nil\n}",
  "func NewManager(service domain.ProjectService) (Manager, error) {\n\tif service == nil {\n\t\treturn nil, errors.New(\"service is required\")\n\t}\n\n\treturn &manager{\n\t\tservice: service,\n\t}, nil\n}",
  "func NewManager(cfg cgroups.Config) Manager {\n\tif cfg.Count > 0 {\n\t\treturn newV1Manager(cfg)\n\t}\n\n\treturn &NoopManager{}\n}",
  "func NewManager(c config.Config) (m *Manager, err error) {\n\tverifications := make([]*Verification, 0)\n\tsts := make([]*Status, 0)\n\tk8sm, err := k8s.New(c.Namespace)\n\tif err != nil {\n\t\treturn\n\t}\n\tdb, err := database.NewDatabase(c, nil, k8sm)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tstgM, err := storage.NewManager(c.Storages, c.ServiceName, c.Backup.RestoreDir, db.GetLogPosition().Format)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor st, svc := range stgM.GetStorageServices() {\n\t\tif !svc.Verify() {\n\t\t\tcontinue\n\t\t}\n\t\tv := NewVerification(c.ServiceName, stgM.GetStorage(st), c.Verification, db, k8sm)\n\t\tif err != nil {\n\t\t\treturn m, err\n\t\t}\n\t\tverifications = append(verifications, v)\n\t\tsts = append(sts, v.status)\n\t}\n\n\tif len(verifications) == 0 {\n\t\treturn nil, fmt.Errorf(\"no verifications created\")\n\t}\n\n\tprometheus.MustRegister(NewMetricsCollector(sts))\n\treturn &Manager{\n\t\tcfg:           c,\n\t\tverifications: verifications,\n\t}, err\n}",
  "func NewManager(config MultusConf, multusAutoconfigDir string, forceCNIVersion bool) (*Manager, error) {\n\tdefaultCNIPluginName, err := getPrimaryCNIPluginName(multusAutoconfigDir)\n\tif err != nil {\n\t\t_ = logging.Errorf(\"failed to find the primary CNI plugin: %v\", err)\n\t\treturn nil, err\n\t}\n\treturn newManager(config, multusAutoconfigDir, defaultCNIPluginName, forceCNIVersion)\n}",
  "func New(log logr.Logger, k8sClient client.Client, dataDir string) Manager {\n\tm := &manager{\n\t\tmut:         sync.Mutex{},\n\t\tlog:         log,\n\t\tclient:      k8sClient,\n\t\tdataDir:     dataDir,\n\t\tcontrollers: make(map[string]*telemetry.Telemetry),\n\t}\n\tm.startCleanupInterval()\n\treturn m\n}",
  "func NewManager(ctx context.Context, config interface{}) (Manager, error) {\n\tswitch cfg := config.(type) {\n\tcase EtcdConfig:\n\t\treturn NewETCDManager(ctx, cfg)\n\t}\n\treturn nil, errors.Errorf(\"no KV manager found for config type %T\", config)\n}",
  "func New(ctx context.Context, h host.Host, peerstorePath string) *Manager {\n\treturn &Manager{\n\t\tctx:           ctx,\n\t\thost:          h,\n\t\tpeerstorePath: peerstorePath,\n\t}\n}",
  "func NewManager(run, shutdown func(context.Context) error) *Manager {\n\tmgr := &Manager{\n\t\trunFunc:      run,\n\t\tshutdownFunc: shutdown,\n\n\t\trunDone:      make(chan struct{}),\n\t\tstartupDone:  make(chan struct{}),\n\t\tshutdownDone: make(chan struct{}),\n\t\tpauseStart:   make(chan struct{}),\n\t\tstatus:       make(chan Status, 1),\n\t}\n\tmgr.status <- StatusUnknown\n\treturn mgr\n}",
  "func NewManager() *Manager {\n\treturn &Manager{\n\t\ttransfers: make(map[string]*Transfer),\n\t}\n}",
  "func NewManager(opts ...ManagerOpts) (*Manager, error) {\n\tm := &Manager{}\n\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\n\tif err := m.init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}",
  "func NewManager() *Manager {\n\treturn &Manager{}\n}",
  "func NewManager(client *rest.Client) *Manager {\n\treturn &Manager{\n\t\tClient: client,\n\t}\n}",
  "func NewManager() *Manager {\n\tm := new(Manager)\n\n\terr := m.Releases.LoadChannels()\n\tif err != nil {\n\t\tlog.Error(\"error: could not load list of k3s channels\")\n\t}\n\terr = m.Releases.LoadReleases()\n\tif err != nil {\n\t\tlog.Error(\"could not load list of k3s releases\")\n\t}\n\n\treturn m\n}",
  "func New(mgr manager.Manager) *Manager {\n\tam := &Manager{\n\t\tmgr: mgr,\n\t}\n\treturn am\n}",
  "func New() datastore.Manager {\n\treturn manager{}\n}",
  "func New() Manager {\n\treturn &manager{}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	CreateCloud creates a cloud. 
 | 
	func (m *cloudManager) CreateCloud(c *api.Cloud) (*api.Cloud, error) {
	cloudName := c.Name
	if _, err := m.ds.FindCloudByName(cloudName); err == nil {
		return nil, httperror.ErrorAlreadyExist.Format(cloudName)
	}
	// check auth info
	cp, err := cloud.NewCloudProvider(c)
	if err != nil {
		return nil, httperror.ErrorValidationFailed.Format("cloud body", err)
	}
	err = cp.Ping()
	if err != nil {
		return nil, httperror.ErrorValidationFailed.Format("cloud body", err)
	}
	if err := m.ds.InsertCloud(c); err != nil {
		return nil, err
	}
	return c, nil
} 
 | 
	[
  "func (c *Client) CloudCreateInstance(projectID, name, pubkeyID, flavorID, imageID, region string) (instance *types.CloudInstance, err error) {\n\tinstanceReq := types.CloudInstance{\n\t\tName:     name,\n\t\tSSHKeyID: pubkeyID,\n\t\tFlavorID: flavorID,\n\t\tImageID:  imageID,\n\t\tRegion:   region,\n\t}\n\terr = c.Post(queryEscape(\"/cloud/project/%s/instance\", projectID), instanceReq, &instance)\n\treturn instance, err\n}",
  "func New(ip string, user string, name string) *Cloud {\n\treturn &Cloud{\n\t\tIP:   ip,\n\t\tUser: user,\n\t\tName: name,\n\t\tType: types.CloudTypeDocker,\n\t}\n}",
  "func (o *Enterprise) CreateAzureCloud(child *AzureCloud) *bambou.Error {\n\n\treturn bambou.CurrentSession().CreateChild(o, child)\n}",
  "func (p *provider) Create(_ *v1alpha1.Machine, _ *cloud.MachineCreateDeleteData, _ string) (instance.Instance, error) {\n\treturn CloudProviderInstance{}, nil\n}",
  "func (client StorageGatewayClient) CreateCloudSync(ctx context.Context, request CreateCloudSyncRequest) (response CreateCloudSyncResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createCloudSync, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateCloudSyncResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateCloudSyncResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateCloudSyncResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateCloudSyncResponse\")\n\t}\n\treturn\n}",
  "func PostClouds(resp http.ResponseWriter, req *http.Request, params routing.Params) {\n\n\tcloud := &clouds.Cloud{}\n\tdecoder := json.NewDecoder(req.Body)\n\n\tif err := decoder.Decode(cloud); err != nil {\n\t\thttp.Error(resp, \"bad Request: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif cloud.ID == \"\" {\n\t\tcloud.ID = bson.NewObjectId().Hex()\n\t}\n\n\tif _, err := url.Parse(cloud.REST); err != nil {\n\t\thttp.Error(resp, \"bad request: mal formatted REST address\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif cloud.MQTT != \"\" {\n\t\tif _, err := url.Parse(cloud.MQTT); err != nil {\n\t\t\thttp.Error(resp, \"bad request: mal formatted MQTT address\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err := clouds.AddCloud(cloud); err != nil {\n\t\thttp.Error(resp, \"bad request: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Printf(\"[CLOUD] Created %q.\", cloud.ID)\n\n\twriteCloudFile()\n\tresp.Write([]byte(cloud.ID))\n}",
  "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToRegionCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\tresp, err := client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
  "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToTrustCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\tresp, err := client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
  "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToTrustCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\treturn\n}",
  "func NewCloud(cfg CloudConfig, metricsRegisterer prometheus.Registerer) (Cloud, error) {\n\tmetadataSess := session.Must(session.NewSession(aws.NewConfig()))\n\tmetadata := services.NewEC2Metadata(metadataSess)\n\tif len(cfg.Region) == 0 {\n\t\tregion, err := metadata.Region()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect region from EC2Metadata, specify --aws-region instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.Region = region\n\t}\n\n\tif len(cfg.VpcID) == 0 {\n\t\tvpcId, err := metadata.VpcID()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect vpcID from EC2Metadata, specify --aws-vpc-id instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.VpcID = vpcId\n\t}\n\n\tawsCFG := aws.NewConfig().WithRegion(cfg.Region).WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint).WithMaxRetries(cfg.MaxRetries)\n\tsess := session.Must(session.NewSession(awsCFG))\n\tinjectUserAgent(&sess.Handlers)\n\n\tif cfg.ThrottleConfig != nil {\n\t\tthrottler := throttle.NewThrottler(cfg.ThrottleConfig)\n\t\tthrottler.InjectHandlers(&sess.Handlers)\n\t}\n\tif metricsRegisterer != nil {\n\t\tmetricsCollector, err := metrics.NewCollector(metricsRegisterer)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to initialize sdk metrics collector\")\n\t\t}\n\t\tmetricsCollector.InjectHandlers(&sess.Handlers)\n\t}\n\n\treturn &defaultCloud{\n\t\tcfg:         cfg,\n\t\tec2:         services.NewEC2(sess),\n\t\telbv2:       services.NewELBV2(sess),\n\t\tacm:         services.NewACM(sess),\n\t\twafv2:       services.NewWAFv2(sess),\n\t\twafRegional: services.NewWAFRegional(sess, cfg.Region),\n\t\tshield:      services.NewShield(sess),\n\t\trgt:         services.NewRGT(sess),\n\t}, nil\n}",
  "func CreateCloudEvent(cloudEventVersion string) *event.Event {\n\tcloudEvent := event.New(cloudEventVersion)\n\tcloudEvent.SetID(EventId)\n\tcloudEvent.SetType(EventType)\n\tcloudEvent.SetSource(EventSource)\n\tcloudEvent.SetDataContentType(EventDataContentType)\n\tcloudEvent.SetSubject(EventSubject)\n\tcloudEvent.SetDataSchema(EventDataSchema)\n\tcloudEvent.SetExtension(constants.ExtensionKeyPartitionKey, PartitionKey)\n\t_ = cloudEvent.SetData(EventDataContentType, EventDataJson)\n\treturn &cloudEvent\n}",
  "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToResourceTypeCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\treturn\n}",
  "func CreateCloudClient() ([]*http.Client, error) {\n\treturn CreateCloudClientForService(defaultServiceRegistryName)\n}",
  "func CreateController(cloudName string, cloudOptionsFile string, instanceMap map[string]*model.ServiceInstance, bindingMap map[string]*model.ServiceBinding) (*Controller, error) {\n\tcloudClient, err := createCloudClient(cloudName, cloudOptionsFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"controller.CreateController: Could not create cloud: %s client, message: %s\", cloudName, err.Error())\n\t}\n\n\tcontroller := &Controller{\n\t\tcloudName:   cloudName,\n\t\tcloudClient: cloudClient,\n\n\t\tinstanceMap: instanceMap,\n\t\tbindingMap:  bindingMap,\n\t}\n\n\terr = controller.loadCatalog()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"controller.CreateController: Could not load catalog for cloud %s client, message: %s\", cloudName, err.Error())\n\t}\n\treturn controller, nil\n}",
  "func Create(c *eclcloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToServerCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = c.Post(createURL(c), b, &r.Body, &eclcloud.RequestOpts{\n\t\tOkCodes: []int{200},\n\t})\n\treturn\n}",
  "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {\n\tvar res CreateResult\n\n\treqBody, err := opts.ToProjectCreateMap()\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\t_, res.Err = client.Post(createURL(client), reqBody, &res.Body, nil)\n\treturn res\n}",
  "func (f *IBMPICloudConnectionClient) Create(pclouddef *p_cloud_cloud_connections.PcloudCloudconnectionsPostParams, powerinstanceid string) (*models.CloudConnection, error) {\n\n\tparams := p_cloud_cloud_connections.NewPcloudCloudconnectionsPostParamsWithTimeout(postTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(pclouddef.Body)\n\tpostok, postcreated, err, _ := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsPost(params, ibmpisession.NewAuth(f.session, powerinstanceid))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create cloud connection %s\", err)\n\t}\n\tif postok != nil {\n\t\treturn postok.Payload, nil\n\t}\n\tif postcreated != nil {\n\t\treturn postcreated.Payload, nil\n\t}\n\treturn nil, nil\n}",
  "func (ci CloudIntegrations) Create(cloudIntegration *CloudIntegration) error {\n\treturn doRest(\n\t\t\"POST\",\n\t\tbaseCloudIntegrationPath,\n\t\tci.client,\n\t\tdoPayload(cloudIntegration),\n\t\tdoResponse(cloudIntegration))\n}",
  "func CreateCloudCredential(provider, name string, uid, orgID string) {\n\tStep(fmt.Sprintf(\"Create cloud credential [%s] in org [%s]\", name, orgID), func() {\n\t\tlogrus.Printf(\"Create credential name %s for org %s provider %s\", name, orgID, provider)\n\t\tbackupDriver := Inst().Backup\n\t\tswitch provider {\n\t\tcase drivers.ProviderAws:\n\t\t\tlogrus.Infof(\"Create creds for aws\")\n\t\t\tid := os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\t\t\texpect(id).NotTo(equal(\"\"),\n\t\t\t\t\"AWS_ACCESS_KEY_ID Environment variable should not be empty\")\n\n\t\t\tsecret := os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\t\t\texpect(secret).NotTo(equal(\"\"),\n\t\t\t\t\"AWS_SECRET_ACCESS_KEY Environment variable should not be empty\")\n\n\t\t\tcredCreateRequest := &api.CloudCredentialCreateRequest{\n\t\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tUid:   uid,\n\t\t\t\t\tOrgId: orgID,\n\t\t\t\t},\n\t\t\t\tCloudCredential: &api.CloudCredentialInfo{\n\t\t\t\t\tType: api.CloudCredentialInfo_AWS,\n\t\t\t\t\tConfig: &api.CloudCredentialInfo_AwsConfig{\n\t\t\t\t\t\tAwsConfig: &api.AWSConfig{\n\t\t\t\t\t\t\tAccessKey: id,\n\t\t\t\t\t\t\tSecretKey: secret,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\t\terr))\n\t\t\t_, err = backupDriver.CreateCloudCredential(ctx, credCreateRequest)\n\t\t\tif err != nil && strings.Contains(err.Error(), \"already exists\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to create cloud credential [%s] in org [%s]\", name, orgID))\n\t\t// TODO: validate CreateCloudCredentialResponse also\n\t\tcase drivers.ProviderAzure:\n\t\t\tlogrus.Infof(\"Create creds for azure\")\n\t\t\ttenantID, clientID, clientSecret, subscriptionID, accountName, accountKey := GetAzureCredsFromEnv()\n\t\t\tcredCreateRequest := &api.CloudCredentialCreateRequest{\n\t\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tUid:   uid,\n\t\t\t\t\tOrgId: orgID,\n\t\t\t\t},\n\t\t\t\tCloudCredential: &api.CloudCredentialInfo{\n\t\t\t\t\tType: api.CloudCredentialInfo_Azure,\n\t\t\t\t\tConfig: &api.CloudCredentialInfo_AzureConfig{\n\t\t\t\t\t\tAzureConfig: &api.AzureConfig{\n\t\t\t\t\t\t\tTenantId:       tenantID,\n\t\t\t\t\t\t\tClientId:       clientID,\n\t\t\t\t\t\t\tClientSecret:   clientSecret,\n\t\t\t\t\t\t\tAccountName:    accountName,\n\t\t\t\t\t\t\tAccountKey:     accountKey,\n\t\t\t\t\t\t\tSubscriptionId: subscriptionID,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\t\terr))\n\t\t\t_, err = backupDriver.CreateCloudCredential(ctx, credCreateRequest)\n\t\t\tif err != nil && strings.Contains(err.Error(), \"already exists\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to create cloud credential [%s] in org [%s]\", name, orgID))\n\t\t\t// TODO: validate CreateCloudCredentialResponse also\n\t\t}\n\t})\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	ListClouds lists all clouds. 
 | 
	func (m *cloudManager) ListClouds() ([]api.Cloud, error) {
	return m.ds.FindAllClouds()
} 
 | 
	[
  "func (s *CloudsService) ListAll(ctx context.Context, org string) ([]*Cloud, *http.Response, error) {\n\tif org == \"\" {\n\t\treturn nil, nil, errors.New(\"org name must be non-empty\")\n\t}\n\toc := fmt.Sprintf(\"%v/clouds\", org)\n\n\treq, err := s.client.NewRequest(\"GET\", oc, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar orgClouds []*Cloud\n\tresp, err := s.client.Do(ctx, req, &orgClouds)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn orgClouds, resp, nil\n}",
  "func (c *client) GetClouds() ([]*models.AviCloud, error) {\n\tif c.Cloud == nil {\n\t\treturn nil, errors.Errorf(\"unable to make API calls before authentication\")\n\t}\n\n\tvar page = 1\n\tclouds := make([]*models.AviCloud, 0)\n\tfor {\n\t\tall, err := c.Cloud.GetAll(session.SetParams(map[string]string{\"fields\": \"name,uuid\", \"page\": strconv.Itoa(page), \"page_size\": pageSizeMax}))\n\t\tif err != nil {\n\t\t\tif page == 1 {\n\t\t\t\treturn nil, errors.Wrap(err, \"unable to get all clouds from avi controller due to error\")\n\t\t\t}\n\t\t\tbreak // end of result set reached\n\t\t}\n\n\t\tfor _, c := range all {\n\t\t\tclouds = append(clouds, &models.AviCloud{\n\t\t\t\tUUID:     *c.UUID,\n\t\t\t\tName:     *c.Name,\n\t\t\t\tLocation: *c.URL,\n\t\t\t})\n\t\t}\n\n\t\tpage++\n\t}\n\n\treturn clouds, nil\n}",
  "func GetClouds(resp http.ResponseWriter, req *http.Request, params routing.Params) {\n\n\tdata, err := json.Marshal(clouds.GetClouds())\n\tif err != nil {\n\t\tlog.Printf(\"[ERR  ] Error %v\", err)\n\t\thttp.Error(resp, \"internal server error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tresp.Header().Set(\"Content-Type\", \"application/json\")\n\tresp.Write(data)\n}",
  "func (client StorageGatewayClient) listCloudSyncs(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/storageGateways/{storageGatewayId}/cloudSyncs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListCloudSyncsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
  "func (client StorageGatewayClient) ListCloudSyncs(ctx context.Context, request ListCloudSyncsRequest) (response ListCloudSyncsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listCloudSyncs, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListCloudSyncsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListCloudSyncsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListCloudSyncsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListCloudSyncsResponse\")\n\t}\n\treturn\n}",
  "func (svc *DatacentersService) List(ctx context.Context) ([]Datacenter, *http.Response, error) {\n\tret := make([]Datacenter, 0)\n\tresp, err := svc.client.resourceList(ctx, datacentersPath, &ret)\n\treturn ret, resp, err\n}",
  "func ListImageOnCloud(client *s3.Client) {\n\tbucket := &bucketName\n\tinput := &s3.ListObjectsV2Input{\n\t\tBucket: bucket,\n\t}\n\n\tresp, err := GetObjects(context.TODO(), client, input)\n\tif err != nil {\n\t\tfmt.Println(\"Got error retrieving list of Images:\")\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Images:\\n\")\n\n\tfor i, item := range resp.Contents {\n\t\tfmt.Printf(\"=== Image %06d Begin ===\\n\", i)\n\t\tfmt.Println(\"Name:          \", *item.Key)\n\t\tfmt.Println(\"Last modified: \", *item.LastModified)\n\t\tfmt.Println(\"Size:          \", item.Size)\n\t\tfmt.Println(\"Storage class: \", item.StorageClass)\n\t\tfmt.Printf(\"=== Image %06d End ===\\n\", i)\n\n\t}\n\n\tfmt.Println(\"Found\", len(resp.Contents), \"images\", *bucket)\n}",
  "func (o *ClusterUninstaller) listCloudInstances() (cloudResources, error) {\n\to.Logger.Debugf(\"Listing virtual Cloud service instances\")\n\n\tctx, cancel := o.contextWithTimeout()\n\tdefer cancel()\n\n\toptions := o.vpcSvc.NewListInstancesOptions()\n\n\t// https://raw.githubusercontent.com/IBM/vpc-go-sdk/master/vpcv1/vpc_v1.go\n\tresources, _, err := o.vpcSvc.ListInstancesWithContext(ctx, options)\n\tif err != nil {\n\t\to.Logger.Warnf(\"Error o.vpcSvc.ListInstancesWithContext: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tvar foundOne = false\n\n\tresult := []cloudResource{}\n\tfor _, instance := range resources.Instances {\n\t\tif strings.Contains(*instance.Name, o.InfraID) {\n\t\t\tfoundOne = true\n\t\t\to.Logger.Debugf(\"listCloudInstances: FOUND: %s, %s, %s\", *instance.ID, *instance.Name, *instance.Status)\n\t\t\tresult = append(result, cloudResource{\n\t\t\t\tkey:      *instance.ID,\n\t\t\t\tname:     *instance.Name,\n\t\t\t\tstatus:   *instance.Status,\n\t\t\t\ttypeName: cloudInstanceTypeName,\n\t\t\t\tid:       *instance.ID,\n\t\t\t})\n\t\t}\n\t}\n\tif !foundOne {\n\t\to.Logger.Debugf(\"listCloudInstances: NO matching virtual instance against: %s\", o.InfraID)\n\t\tfor _, instance := range resources.Instances {\n\t\t\to.Logger.Debugf(\"listCloudInstances: only found virtual instance: %s\", *instance.Name)\n\t\t}\n\t}\n\n\treturn cloudResources{}.insert(result...), nil\n}",
  "func (c *Client) ListDatacenter() ([]models.Datacenter, error) {\n\tvar err error\n\tresult := make([]models.Datacenter, 0)\n\n\trequestURL := fmt.Sprintf(\"/%s\", datacenterPath)\n\t_, err = c.Get(requestURL, &result, V1API)\n\treturn result, err\n}",
  "func (s *StorageClusterAPI) List(w http.ResponseWriter, r *http.Request) {\n\tclusters, err := s.storageClusterService.List()\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tapi.OK(w, clusters)\n}",
  "func (o *Enterprise) AzureClouds(info *bambou.FetchingInfo) (AzureCloudsList, *bambou.Error) {\n\n\tvar list AzureCloudsList\n\terr := bambou.CurrentSession().FetchChildren(o, AzureCloudIdentity, &list, info)\n\treturn list, err\n}",
  "func (c *Client) CloudListInstance(projectID string) ([]types.CloudInstance, error) {\n\tinstances := []types.CloudInstance{}\n\terr := c.Get(queryEscape(\"/cloud/project/%s/instance\", projectID), &instances)\n\treturn instances, err\n}",
  "func (c *carClient) List() ([]*api.Car, error) {\n\tlog.Tracef(\"Client.List; GVK: %v\", c.gvk)\n\tlist, err := c.storage.List(c.gvk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults := make([]*api.Car, 0, len(list))\n\tfor _, item := range list {\n\t\tresults = append(results, item.(*api.Car))\n\t}\n\n\treturn results, nil\n}",
  "func (a *Client) ListDatacenters(params *ListDatacentersParams, authInfo runtime.ClientAuthInfoWriter) (*ListDatacentersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListDatacentersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID:                 \"listDatacenters\",\n\t\tMethod:             \"GET\",\n\t\tPathPattern:        \"/api/v1/dc\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes:            []string{\"https\"},\n\t\tParams:             params,\n\t\tReader:             &ListDatacentersReader{formats: a.formats},\n\t\tAuthInfo:           authInfo,\n\t\tContext:            params.Context,\n\t\tClient:             params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListDatacentersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*ListDatacentersDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
  "func (c *Client) CloudProjectFlavorsList(projectID, region string) ([]types.CloudFlavor, error) {\n\tvar path string\n\tif region == \"\" {\n\t\tpath = queryEscape(\"/cloud/project/%s/flavor\", projectID)\n\n\t} else {\n\t\tpath = queryEscape(\"/cloud/project/%s/flavor?region=%s\", projectID, region)\n\t}\n\tf := []types.CloudFlavor{}\n\treturn f, c.Get(path, &f)\n}",
  "func (k *Kubeclient) List(opts metav1.ListOptions) (*storagev1.StorageClassList, error) {\n\tcli, err := k.getClientsetOrCached()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to list storageclasses\")\n\t}\n\treturn k.list(cli, opts)\n}",
  "func listDatacenters(c context.Context, names stringset.Set) ([]*crimson.Datacenter, error) {\n\tdb := database.Get(c)\n\trows, err := db.QueryContext(c, `\n\t\tSELECT name, description, state\n\t\tFROM datacenters\n\t`)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to fetch datacenters\").Err()\n\t}\n\tdefer rows.Close()\n\n\tvar datacenters []*crimson.Datacenter\n\tfor rows.Next() {\n\t\tdc := &crimson.Datacenter{}\n\t\tif err = rows.Scan(&dc.Name, &dc.Description, &dc.State); err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"failed to fetch datacenter\").Err()\n\t\t}\n\t\tif matches(dc.Name, names) {\n\t\t\tdatacenters = append(datacenters, dc)\n\t\t}\n\t}\n\treturn datacenters, nil\n}",
  "func (cm *ClusterManager) ListBKCloud(ctx context.Context,\n\treq *cmproto.ListBKCloudRequest, resp *cmproto.CommonListResp) error {\n\treqID, err := requestIDFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstart := time.Now()\n\tla := thirdparty.NewListBKCloudAction()\n\tla.Handle(ctx, req, resp)\n\tmetrics.ReportAPIRequestMetric(\"ListBKCloud\", \"grpc\", strconv.Itoa(int(resp.Code)), start)\n\tblog.V(3).Infof(\"reqID: %s, action: ListBKCloud, req %v\", reqID, req)\n\treturn nil\n}",
  "func (c *TestClient) ListDisks(project, zone string, opts ...ListCallOption) ([]*compute.Disk, error) {\n\tif c.ListDisksFn != nil {\n\t\treturn c.ListDisksFn(project, zone, opts...)\n\t}\n\treturn c.client.ListDisks(project, zone, opts...)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	DeleteCloud deletes the cloud. 
 | 
	func (m *cloudManager) DeleteCloud(name string) error {
	return m.ds.DeleteCloudByName(name)
} 
 | 
	[
  "func (cc *Controller) DeleteCloud(name string) {\n\tdelete(cc.Clouds, name)\n}",
  "func (client StorageGatewayClient) DeleteCloudSync(ctx context.Context, request DeleteCloudSyncRequest) (response DeleteCloudSyncResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.deleteCloudSync, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = DeleteCloudSyncResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = DeleteCloudSyncResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(DeleteCloudSyncResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into DeleteCloudSyncResponse\")\n\t}\n\treturn\n}",
  "func (client StorageGatewayClient) deleteCloudSync(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/storageGateways/{storageGatewayId}/cloudSyncs/{cloudSyncName}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteCloudSyncResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
  "func (f *IBMPICloudConnectionClient) Delete(pclouddef *p_cloud_cloud_connections.PcloudCloudconnectionsDeleteParams) (models.Object, error) {\n\tparams := p_cloud_cloud_connections.NewPcloudCloudconnectionsDeleteParams().WithCloudInstanceID(pclouddef.CloudInstanceID).WithCloudConnectionID(pclouddef.CloudConnectionID)\n\trespok, _, err := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsDelete(params, ibmpisession.NewAuth(f.session, pclouddef.CloudInstanceID))\n\n\tif err != nil || respok.Payload == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to Delete all cloud connection %s\", err)\n\t}\n\treturn respok.Payload, nil\n}",
  "func (ci CloudIntegrations) Delete(cloudIntegration *CloudIntegration, skipTrash bool) error {\n\tif cloudIntegration.Id == \"\" {\n\t\treturn fmt.Errorf(\"cloud integration id must be specified\")\n\t}\n\n\tparams := map[string]string{\n\t\t\"skipTrash\": strconv.FormatBool(skipTrash),\n\t}\n\n\terr := doRest(\n\t\t\"DELETE\",\n\t\tfmt.Sprintf(\"%s/%s\", baseCloudIntegrationPath, cloudIntegration.Id),\n\t\tci.client,\n\t\tdoParams(params))\n\tif err == nil {\n\t\tcloudIntegration.Id = \"\"\n\t}\n\treturn err\n}",
  "func DeleteCloudCredential(name string, orgID string, cloudCredUID string) {\n\tStep(fmt.Sprintf(\"Delete cloud credential [%s] in org [%s]\", name, orgID), func() {\n\t\tbackupDriver := Inst().Backup\n\n\t\tcredDeleteRequest := &api.CloudCredentialDeleteRequest{\n\t\t\tName:  name,\n\t\t\tOrgId: orgID,\n\t\t\tUid:   cloudCredUID,\n\t\t}\n\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\texpect(err).NotTo(haveOccurred(),\n\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\terr))\n\t\tbackupDriver.DeleteCloudCredential(ctx, credDeleteRequest)\n\t\t// Best effort cleanup, dont fail test, if deletion fails\n\t\t// expect(err).NotTo(haveOccurred(),\n\t\t//  fmt.Sprintf(\"Failed to delete cloud credential [%s] in org [%s]\", name, orgID))\n\t\t// TODO: validate CreateCloudCredentialResponse also\n\t})\n}",
  "func (c *Client) CloudDeleteInstance(projectID, instanceID string) error {\n\terr := c.Delete(queryEscape(\"/cloud/project/%s/instance/%s\", projectID, instanceID), nil)\n\tif apierror, ok := err.(*APIError); ok && apierror.Code == 404 {\n\t\terr = nil\n\t}\n\treturn err\n}",
  "func Delete(client *gophercloud.ServiceClient, regionID string) (r DeleteResult) {\n\tresp, err := client.Delete(deleteURL(client, regionID), nil)\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
  "func Delete(name string) error {\n\tinstance, err := Get(name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find a cluster named '%s': %s\", name, err.Error())\n\t}\n\terr = instance.Delete()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete infrastructure of cluster '%s': %s\", name, err.Error())\n\t}\n\n\t// Deletes the network and related stuff\n\tutils.DeleteNetwork(instance.GetNetworkID())\n\n\t// Cleanup Object Storage data\n\treturn instance.RemoveDefinition()\n}",
  "func DeleteDatabase(name, instanceURL string) (*pb.GenericResponse, error) {\n\tconn, err := grpc.Dial(\n\t\tinstanceURL,\n\t\tgrpc.WithInsecure(),\n\t\tgrpc.WithPerRPCCredentials(authCredentials),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\tclient := pb.NewDatabaseFactoryClient(conn)\n\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tres, err := client.Delete(ctx, &pb.NameHolder{Name: name})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}",
  "func Delete(t *testing.T, knFunc *TestShellCmdRunner, project *FunctionTestProject) {\n\t// Invoke delete command\n\tresult := knFunc.Exec(\"delete\", project.FunctionName)\n\tif result.Error != nil && project.IsDeployed {\n\t\tt.Fail()\n\t}\n\tproject.IsDeployed = false\n\n\t// Invoke list to verify project was deleted\n\tList(t, knFunc, *project)\n}",
  "func Delete(client *gophercloud.ServiceClient, instanceID, dbName string) (r DeleteResult) {\n\tresp, err := client.Delete(dbURL(client, instanceID, dbName), nil)\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
  "func (s *API) DeleteDatabase(req *DeleteDatabaseRequest, opts ...scw.RequestOption) error {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tif fmt.Sprint(req.InstanceID) == \"\" {\n\t\treturn errors.New(\"field InstanceID cannot be empty in request\")\n\t}\n\n\tif fmt.Sprint(req.Name) == \"\" {\n\t\treturn errors.New(\"field Name cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod:  \"DELETE\",\n\t\tPath:    \"/rdb/v1/regions/\" + fmt.Sprint(req.Region) + \"/instances/\" + fmt.Sprint(req.InstanceID) + \"/databases/\" + fmt.Sprint(req.Name) + \"\",\n\t\tHeaders: http.Header{},\n\t}\n\n\terr = s.client.Do(scwReq, nil, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
  "func (o kubernetesClient) Delete(projectName string, wait bool) error {\n\tif projectName == \"\" {\n\t\treturn errors.New(\"no project name given\")\n\t}\n\n\tprojectSupport, err := o.client.IsProjectSupported()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to detect project support: %w\", err)\n\t}\n\n\tif projectSupport {\n\t\terr = o.client.DeleteProject(projectName, wait)\n\t} else {\n\t\terr = o.client.DeleteNamespace(projectName, wait)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to delete project %q: %w\", projectName, err)\n\t}\n\treturn nil\n}",
  "func (k *Kubeclient) Delete(name string, opts *metav1.DeleteOptions) error {\n\tcli, err := k.getClientsetOrCached()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to delete the storageclass: {%s}\", name)\n\t}\n\treturn k.del(cli, name, opts)\n}",
  "func (m *MultiDB) Delete() (ErrCode, error) {\n\tm.Close()\n\tm.system.Close()\n\tif err := removeContents(m.BaseDir()); err != nil {\n\t\treturn ErrFileSystem, err\n\t}\n\treturn OK, nil\n}",
  "func Delete(client *gophercloud.ServiceClient, trustID string) (r DeleteResult) {\n\tresp, err := client.Delete(deleteURL(client, trustID), nil)\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
  "func Delete(client *gophercloud.ServiceClient, trustID string) (r DeleteResult) {\n\t_, r.Err = client.Delete(deleteURL(client, trustID), nil)\n\treturn\n}",
  "func (client *Client) Destroy() error {\n\n\tconf, err := client.configClient.Load()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttfInputVars := client.tfInputVarsFactory.NewInputVars(conf)\n\n\tvar volumesToDelete []string\n\n\tswitch client.provider.IAAS() {\n\n\tcase iaas.AWS:\n\t\ttfOutputs, err1 := client.tfCLI.BuildOutput(tfInputVars)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tvpcID, err2 := tfOutputs.Get(\"VPCID\")\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tvolumesToDelete, err1 = client.provider.DeleteVMsInVPC(vpcID)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\n\tcase iaas.GCP:\n\t\tproject, err1 := client.provider.Attr(\"project\")\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tzone := client.provider.Zone(\"\", \"\")\n\t\terr1 = client.provider.DeleteVMsInDeployment(zone, project, conf.GetDeployment())\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t}\n\n\terr = client.tfCLI.Destroy(tfInputVars)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif client.provider.IAAS() == iaas.AWS {\n\t\tif len(volumesToDelete) > 0 {\n\t\t\tfmt.Printf(\"Scheduling to delete %v volumes\\n\", len(volumesToDelete))\n\t\t}\n\t\tif err1 := client.provider.DeleteVolumes(volumesToDelete, iaas.DeleteVolume); err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t}\n\n\tif err = client.configClient.DeleteAll(conf); err != nil {\n\t\treturn err\n\t}\n\n\treturn writeDestroySuccessMessage(client.stdout)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	PingCloud pings the cloud to check its health. 
 | 
	func (m *cloudManager) PingCloud(name string) error {
	c, err := m.ds.FindCloudByName(name)
	if err != nil {
		return httperror.ErrorContentNotFound.Format(name)
	}
	cp, err := cloud.NewCloudProvider(c)
	if err != nil {
		return err
	}
	return cp.Ping()
} 
 | 
	[
  "func (c *Client) Ping(ctx context.Context) error {\n\terr := c.Authorize(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Makes a simple http request to health check endpoint.\n\tresp, err := c.Request(ctx).Get(\"/v1/health\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode() != http.StatusOK {\n\t\treturn errors.New(\"not healthy\")\n\t}\n\n\treturn nil\n}",
  "func (c *Connector) Ping() (err error) {\n\turl, err := c.getURL(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"content-type\", \"application/json\")\n\treq.Header.Add(\"cache-control\", \"no-cache\")\n\n\tres, err := c.getHTTPClient().Do(req)\n\tif err != nil {\n\t\treturn err\n\t} else if res.StatusCode != http.StatusOK {\n\t\tdefer res.Body.Close()\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\t\terr = fmt.Errorf(\"%s\", string(body))\n\t}\n\treturn err\n}",
  "func (c *Client) Ping(name string, message string) []error {\n\turl := c.buildURL(\"/v1/ping\")\n\n\t_, _, errs := gorequest.New().\n\t\tPost(url).\n\t\tSet(\"Authorization\", c.buildAuthHeader()).\n\t\tSend(requests.PingRequest{\n\t\tName:    name,\n\t\tMessage: message,\n\t}).\n\t\tEnd()\n\n\tif errs != nil {\n\t\treturn errs\n\t}\n\n\treturn nil\n}",
  "func (c *Client) Ping() error {\n\tu := c.endpoint\n\tu.Path = `/`\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tio.Copy(os.Stdout, resp.Body)\n\treturn nil\n}",
  "func (c *HTTPClient) Ping() (bool, error) {\n\tres, err := utils.HTTPRequest(\"GET\",\n\t\tfmt.Sprintf(\"%v/health\", c.serverEndpoint),\n\t\tnil,\n\t\tnil,\n\t\tc.ticket,\n\t)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == http.StatusOK {\n\t\treturn true, nil\n\t} else {\n\t\treturn false, fmt.Errorf(ErrorMsg(res))\n\t}\n}",
  "func (cl *Client) Ping() error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\tdefer cancel()\n\tif _, err := cl.grafana.GetHealth(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
  "func (s *Client) Ping(ctx context.Context) error {\n\terr := s.request(ctx, \"server.ping\", []interface{}{}, nil)\n\n\treturn err\n}",
  "func (c *client) Ping() (bool, error) {\n\tif _, err := c.RbacV1().Roles(\"\").List(metav1.ListOptions{}); err != nil {\n\t\treturn false, nil\n\t}\n\n\tif _, err := c.AppsV1().Deployments(\"\").List(metav1.ListOptions{}); err != nil {\n\t\treturn false, nil\n\t}\n\n\tif _, err := c.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{}); err != nil {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}",
  "func (p *KiteHTTPPinger) Ping() Status {\n\tres, err := p.Client.Get(p.Address)\n\tif err != nil {\n\t\treturn Failure\n\t}\n\tdefer res.Body.Close()\n\n\tresData, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn Failure\n\t}\n\n\tif string(resData) != kiteHTTPResponse {\n\t\treturn Failure\n\t}\n\n\treturn Success\n}",
  "func (c *Client) Ping(checkAllMetaServers bool) error {\n\tc.mu.RLock()\n\tserver := c.metaServers[0]\n\tc.mu.RUnlock()\n\turl := c.url(server) + \"/ping\"\n\tif checkAllMetaServers {\n\t\turl = url + \"?all=true\"\n\t}\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn nil\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fmt.Errorf(string(b))\n}",
  "func (r *vtmClient) Ping() (bool, error) {\n\tif err := r.apiGet(vtmAPIPing, nil, nil); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
  "func (c *Client) Ping() bool {\n\tendpoint := fmt.Sprintf(\"%s/_ping\", baseAddr)\n\tr, err := c.http.Get(endpoint)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn statusCode(r.StatusCode, http.StatusOK) == nil\n}",
  "func (c *Conn) Ping() error {\n\tresponse := c.client.Cmd(cmdPing)\n\tif !isOK(response) {\n\t\treturn errx.Errorf(\"ping command failed\")\n\t}\n\treturn nil\n}",
  "func (c *HttpController) Ping(writer http.ResponseWriter, request *http.Request) {\n\tresponse := common.NewPingResponse()\n\tc.sendResponse(writer, request, contracts.ApiPingRoute, response, http.StatusOK)\n}",
  "func (p *Provider) Ping() (bool, error) {\n\tres, err := p.get(fmt.Sprintf(\"%s/data/testAuthentication\", baseURL), \"\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif msg, exists := res[\"message\"]; exists {\n\t\tif msg.(string) == \"Congratulations! You are communicating with the Pinata API!\" {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\n\treturn false, errors.New(\"unexpected failure\")\n}",
  "func (registry *Registry) Ping(ctx context.Context) error {\n\turl := registry.url(\"/v2/\")\n\tregistry.Logf(\"registry.ping url=%s\", url)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := registry.Client.Do(req.WithContext(ctx))\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\treturn err\n}",
  "func (sbd *State) PingChecker(updateChannel chan ServiceUpdate, shutdownPingSignal chan interface{}) {\n\tif sbd.Config.PingHosts { // The ping option was set\n\t\tilog.Println(\"Started the Ping Check Provider\")\n\n\t\ttotalWaitDuration := sbd.Config.TimeBetweenPingChecks / 1 * time.Second\n\t\tcurrentWaitDuration := totalWaitDuration\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-shutdownPingSignal:\n\t\t\t\tilog.Println(\"Shutting down the Ping Check Provider\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\t// Sleep before testing these hosts again\n\t\t\t\tif currentWaitDuration < totalWaitDuration {\n\t\t\t\t\tcurrentWaitDuration += 1 * time.Second\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tsbd.serviceLock.RLock()\n\t\t\t\tfor i := range sbd.Hosts {\n\t\t\t\t\thost := sbd.Hosts[i]\n\t\t\t\t\t// Asyncronously ping hosts so we don't wait full timeouts and can ping faster.\n\t\t\t\t\tgo host.PingHost(updateChannel, sbd.Config.PingTimeout)\n\t\t\t\t}\n\n\t\t\t\tsbd.serviceLock.RUnlock()\n\n\t\t\t\tcurrentWaitDuration -= totalWaitDuration\n\t\t\t}\n\t\t}\n\t}\n}",
  "func (c *Client) Ping(ctx context.Context) error {\n\tunixZeroTimestamp, err := ptypes.TimestampProto(time.Unix(0, 0))\n\tif err != nil {\n\t\treturn err\n\t}\n\tent := &logpb.LogEntry{\n\t\tPayload:   &logpb.LogEntry_TextPayload{TextPayload: \"ping\"},\n\t\tTimestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both\n\t\tInsertId:  \"ping\",            // necessary for the service to dedup these entries.\n\t}\n\t_, err = c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{\n\t\tLogName:  internal.LogPath(c.parent, \"ping\"),\n\t\tResource: monitoredResource(c.parent),\n\t\tEntries:  []*logpb.LogEntry{ent},\n\t})\n\treturn err\n}",
  "func (c *Connection) Ping(ctx context.Context) (time.Duration, error) {\n\tresp, err := c.Request(ctx).\n\t\tGet(\"/status\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn resp.Time(), nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	ListWorkers lists all workers. 
 | 
	func (m *cloudManager) ListWorkers(name string, extendInfo string) ([]api.WorkerInstance, error) {
	c, err := m.ds.FindCloudByName(name)
	if err != nil {
		return nil, httperror.ErrorContentNotFound.Format(name)
	}
	if c.Type == api.CloudTypeKubernetes && extendInfo == "" {
		err := fmt.Errorf("query parameter namespace can not be empty because cloud type is %v.",
			api.CloudTypeKubernetes)
		return nil, err
	}
	if c.Kubernetes != nil {
		if c.Kubernetes.InCluster {
			// default cluster, get default namespace.
			c.Kubernetes.Namespace = cloud.DefaultNamespace
		} else {
			c.Kubernetes.Namespace = extendInfo
		}
	}
	cp, err := cloud.NewCloudProvider(c)
	if err != nil {
		return nil, httperror.ErrorUnknownInternal.Format(err)
	}
	return cp.ListWorkers()
} 
 | 
	[
  "func (db *InMem) ListWorkers() ([]Worker, error) {\n\tvar workers []Worker\n\n\tfor _, worker := range db.workers {\n\t\tworkers = append(workers, worker)\n\t}\n\n\treturn workers, nil\n}",
  "func (taskBolt *TaskBolt) ListWorkers(ctx context.Context, req *pbf.ListWorkersRequest) (*pbf.ListWorkersResponse, error) {\n\tresp := &pbf.ListWorkersResponse{}\n\tresp.Workers = []*pbf.Worker{}\n\n\terr := taskBolt.db.Update(func(tx *bolt.Tx) error {\n\n\t\tbucket := tx.Bucket(Workers)\n\t\tc := bucket.Cursor()\n\n\t\tfor k, _ := c.First(); k != nil; k, _ = c.Next() {\n\t\t\tworker := getWorker(tx, string(k))\n\t\t\tresp.Workers = append(resp.Workers, worker)\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}",
  "func (db *Postgres) ListWorkers() ([]Worker, error) {\n\tvar workers []Worker\n\n\trows, err := db.db.Query(`SELECT id, queue, max_job_count, attributes FROM junction.workers`)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar worker Worker\n\t\tvar attributes hstore.Hstore\n\t\terr := rows.Scan(&worker.ID, &worker.Queue, &worker.MaxJobCount, &attributes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tworker.Attributes = make(map[string]string)\n\t\tif attributes.Map != nil {\n\t\t\tfor key, value := range attributes.Map {\n\t\t\t\tif value.Valid {\n\t\t\t\t\tworker.Attributes[key] = value.String\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tworkers = append(workers, worker)\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn workers, nil\n}",
  "func listWorker(ctx context.Context) {\n\tdefer utils.Recover()\n\n\tfor j := range jc {\n\t\tlogrus.Infof(\"Start listing job %s.\", j.Path)\n\n\t\terr := listJob(ctx, j)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogrus.Infof(\"Job %s listed.\", j.Path)\n\t}\n}",
  "func (md *ManagementNode) GetWorkerList(ctx context.Context, req *pb.DummyReq) (*pb.WorkerNodeList, error) {\n\n\tlog.Info(\"GetWorkerList\")\n\t// for now it gets all recorded workers\n\topts := []clientv3.OpOption{\n\t\tclientv3.WithPrefix(),\n\t}\n\n\teCtx, cancel := context.WithTimeout(ctx,\n\t\tmd.cfg.EtcdDialTimeout)\n\tgr, err := md.etcd.Get(eCtx, EtcdWorkerPrefix, opts...)\n\tcancel()\n\tif err != nil {\n\t\tcommon.PrintDebugErr(err)\n\t\treturn nil, err\n\t}\n\n\twnl := pb.WorkerNodeList{}\n\n\tfor _, item := range gr.Kvs {\n\t\tvar wn wrpc.WorkerRPCClient\n\t\terr := json.Unmarshal([]byte(item.Value), &wn)\n\t\tif err != nil {\n\t\t\tcommon.PrintDebugErr(err)\n\t\t\treturn nil, err\n\t\t}\n\t\twnl.Nodes = append(wnl.Nodes, wn.WN)\n\t}\n\n\treturn &wnl, nil\n}",
  "func (bq *InMemoryBuildQueue) ListWorkers(ctx context.Context, request *buildqueuestate.ListWorkersRequest) (*buildqueuestate.ListWorkersResponse, error) {\n\tsizeClassKey, err := newSizeClassKeyFromName(request.SizeClassQueueName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar startAfterWorkerKey *string\n\tif startAfter := request.StartAfter; startAfter != nil {\n\t\tworkerKey := string(newWorkerKey(startAfter.WorkerId))\n\t\tstartAfterWorkerKey = &workerKey\n\t}\n\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\tscq, ok := bq.sizeClassQueues[sizeClassKey]\n\tif !ok {\n\t\treturn nil, status.Error(codes.NotFound, \"No workers for this instance name, platform and size class exist\")\n\t}\n\n\t// Obtain IDs of all workers in sorted order.\n\tvar keyList []string\n\tfor workerKey, w := range scq.workers {\n\t\tif !request.JustExecutingWorkers || w.currentTask != nil {\n\t\t\tkeyList = append(keyList, string(workerKey))\n\t\t}\n\t}\n\tsort.Strings(keyList)\n\tpaginationInfo, endIndex := getPaginationInfo(len(keyList), request.PageSize, func(i int) bool {\n\t\treturn startAfterWorkerKey == nil || keyList[i] > *startAfterWorkerKey\n\t})\n\n\t// Extract status.\n\tkeyListRegion := keyList[paginationInfo.StartIndex:endIndex]\n\tworkers := make([]*buildqueuestate.WorkerState, 0, len(keyListRegion))\n\tfor _, key := range keyListRegion {\n\t\tworkerKey := workerKey(key)\n\t\tw := scq.workers[workerKey]\n\t\tvar currentOperation *buildqueuestate.OperationState\n\t\tif t := w.currentTask; t != nil {\n\t\t\t// A task may have more than one operation\n\t\t\t// associated with it, in case deduplication of\n\t\t\t// in-flight requests occurred. For the time\n\t\t\t// being, let's not expose the concept of tasks\n\t\t\t// through the web UI yet. Just show one of the\n\t\t\t// operations.\n\t\t\t//\n\t\t\t// Do make this deterministic by picking the\n\t\t\t// operation with the lowest name,\n\t\t\t// alphabetically.\n\t\t\tvar o *operation\n\t\t\tfor _, oCheck := range t.operations {\n\t\t\t\tif o == nil || o.name > oCheck.name {\n\t\t\t\t\to = oCheck\n\t\t\t\t}\n\t\t\t}\n\t\t\tcurrentOperation = o.getOperationState(bq)\n\t\t\tcurrentOperation.SizeClassQueueName = nil\n\t\t\tcurrentOperation.Stage = nil\n\t\t}\n\t\tworkerID := workerKey.getWorkerID()\n\t\tworkers = append(workers, &buildqueuestate.WorkerState{\n\t\t\tId:               workerID,\n\t\t\tTimeout:          bq.cleanupQueue.getTimestamp(w.cleanupKey),\n\t\t\tCurrentOperation: currentOperation,\n\t\t\tDrained:          w.isDrained(scq, workerID),\n\t\t})\n\t}\n\treturn &buildqueuestate.ListWorkersResponse{\n\t\tWorkers:        workers,\n\t\tPaginationInfo: paginationInfo,\n\t}, nil\n}",
  "func (gores *Gores) Workers() []string {\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, err := conn.Do(\"SMEMBERS\", watchedWorkers)\n\tif data == nil || err != nil {\n\t\treturn nil\n\t}\n\n\tworkers := make([]string, len(data.([]interface{})))\n\tfor i, w := range data.([]interface{}) {\n\t\tworkers[i] = string(w.([]byte))\n\t}\n\n\treturn workers\n}",
  "func (s *Backend) getWorkers() []*pbf.Worker {\n\tworkers := []*pbf.Worker{}\n\tresp, rerr := s.client.ListWorkers(context.Background(), &pbf.ListWorkersRequest{})\n\n\tif rerr != nil {\n\t\tlog.Error(\"Error getting workers. Recovering.\", rerr)\n\t\treturn workers\n\t}\n\n\tfor _, w := range resp.Workers {\n\t\tif w.Id != s.workerID || w.State != pbf.WorkerState_ALIVE {\n\t\t\t// Ignore workers that aren't alive\n\t\t\tcontinue\n\t\t}\n\t\tworkers = append(workers, w)\n\t}\n\treturn workers\n}",
  "func (s *Backend) getWorkers() []*pbf.Worker {\n\n\t// Get the workers from the funnel server\n\tworkers := []*pbf.Worker{}\n\treq := &pbf.ListWorkersRequest{}\n\tresp, err := s.client.ListWorkers(context.Background(), req)\n\n\t// If there's an error, return an empty list\n\tif err != nil {\n\t\tlog.Error(\"Failed ListWorkers request. Recovering.\", err)\n\t\treturn workers\n\t}\n\n\tworkers = resp.Workers\n\n\t// Include unprovisioned (template) workers.\n\t// This is how the scheduler can schedule tasks to workers that\n\t// haven't been started yet.\n\tfor _, t := range s.gce.Templates() {\n\t\tt.Id = scheduler.GenWorkerID(\"funnel\")\n\t\tworkers = append(workers, &t)\n\t}\n\n\treturn workers\n}",
  "func (mr *MapReduce) KillWorkers() *list.List {\n  l := list.New()\n  for _, w := range mr.Workers {\n    DPrintf(\"DoWork: shutdown %s\\n\", w.address)\n    args := &ShutdownArgs{}\n    var reply ShutdownReply;\n    ok := call(w.address, \"Worker.Shutdown\", args, &reply)\n    if ok == false {\n      fmt.Printf(\"DoWork: RPC %s shutdown error\\n\", w.address)\n    } else {\n      l.PushBack(reply.Njobs)\n    }\n  }\n  return l\n}",
  "func (p *localWorkerPool) GetAllWorkers() []api.LocalWorker {\n\tp.mutex.RLock()\n\tdefer p.mutex.RUnlock()\n\n\tresult := make([]api.LocalWorker, 0, len(p.workers))\n\tfor _, entry := range p.workers {\n\t\tresult = append(result, entry.LocalWorker)\n\t}\n\tsort.Slice(result, func(i, j int) bool {\n\t\treturn result[i].Id < result[j].Id\n\t})\n\treturn result\n}",
  "func (mr *MapReduce) KillWorkers() *list.List {\n\tl := list.New()\n\tfor _, w := range mr.Workers {\n\t\tDPrintf(\"DoWork: shutdown %s\\n\", w.address)\n\t\targs := &ShutdownArgs{}\n\t\tvar reply ShutdownReply\n\t\tok := call(w.address, \"Worker.Shutdown\", args, &reply)\n\t\tif ok == false {\n\t\t\tfmt.Printf(\"DoWork: RPC shutdown error\\n\")\n\t\t} else {\n\t\t\tl.PushBack(reply.Njobs)\n\t\t}\n\t}\n\treturn l\n}",
  "func (a *App) GetWorkers() map[string]*Worker {\n\ta.scheduler.mu.Lock()\n\tdefer a.scheduler.mu.Unlock()\n\treturn a.scheduler.workers\n}",
  "func (d Dispatcher) GetWorkers() map[string]*WorkerInfo {\n\treturn d.workers\n}",
  "func GetWorkers(token string) ([]*Worker, error) {\n\t// declarations\n\tvar workers []*Worker\n\trows, err := db.Query(\"SELECT * FROM workers \"+\n\t\t\"WHERE uid = (SELECT id FROM users WHERE token = $1)\", token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// fetch workers\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tworker := Worker{}\n\n\t\terr := rows.Scan(&worker.Id, &worker.Uid, &worker.Token, &worker.Name,\n\t\t\t&worker.Last_contact, &worker.Active, &worker.Shared)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tworkers = append(workers, &worker)\n\t}\n\n\treturn workers, nil\n}",
  "func (client *NginxClient) GetWorkers() ([]*Workers, error) {\n\tvar workers []*Workers\n\tif client.version < 9 {\n\t\treturn workers, nil\n\t}\n\terr := client.get(\"workers\", &workers)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get workers: %w\", err)\n\t}\n\treturn workers, nil\n}",
  "func getWorkers(c *gin.Context) {\n\tdata := []byte(WorkersResp)\n\n\tvar body []library.Worker\n\t_ = json.Unmarshal(data, &body)\n\n\tc.JSON(http.StatusOK, body)\n}",
  "func (p *Pool) Workers() (workers []*Worker) {\n\tp.muw.RLock()\n\tdefer p.muw.RUnlock()\n\n\tfor _, w := range p.workers {\n\t\tworkers = append(workers, w)\n\t}\n\n\treturn workers\n}",
  "func WorkersAll() []Worker {\n\treturn []Worker{\n\t\tNewBlockonomics(),\n\t\tNewBlockcypher(),\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	NewSummary creates a new tracing summary that can be passed to component constructors for adding traces. 
 | 
	func NewSummary() *Summary {
	return &Summary{}
} 
 | 
	[
  "func NewSummary(db database.Writer) *Summary {\n\treturn &Summary{\n\t\tID:     \"summary\",\n\t\tdb:     db,\n\t\tchecks: make(map[string]State),\n\t}\n}",
  "func NewSummary(namespace, subsystem, name, help string, labelMap map[string]string, objectives map[float64]float64) prometheus.Summary {\n\treturn prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace:   namespace,\n\t\tSubsystem:   subsystem,\n\t\tName:        name,\n\t\tHelp:        help,\n\t\tConstLabels: labelMap,\n\t\tObjectives:  objectives,\n\t},\n\t)\n}",
  "func NewTeamSummary()(*TeamSummary) {\n    m := &TeamSummary{\n    }\n    m.SetAdditionalData(make(map[string]interface{}));\n    return m\n}",
  "func NewSummary(a *Analysis, appliedRuleset *rulesets.AppliedRulesetSummary) *Summary {\n\tif a != nil {\n\t\trulesetName := \"N/A\"\n\t\trisk := \"high\"\n\t\tpassed := false\n\n\t\tif appliedRuleset != nil {\n\t\t\trisk, passed = appliedRuleset.SummarizeEvaluation()\n\n\t\t\tif appliedRuleset.RuleEvaluationSummary != nil && appliedRuleset.RuleEvaluationSummary.RulesetName != \"\" {\n\t\t\t\trulesetName = appliedRuleset.RuleEvaluationSummary.RulesetName\n\t\t\t}\n\t\t}\n\n\t\treturn &Summary{\n\t\t\tID:            a.ID,\n\t\t\tAnalysisID:    a.ID,\n\t\t\tTeamID:        a.TeamID,\n\t\t\tBranch:        a.Branch,\n\t\t\tDescription:   a.Description,\n\t\t\tRisk:          risk,\n\t\t\tSummary:       \"\",\n\t\t\tPassed:        passed,\n\t\t\tRulesetID:     a.RulesetID,\n\t\t\tRulesetName:   rulesetName,\n\t\t\tDuration:      a.Duration,\n\t\t\tCreatedAt:     a.CreatedAt,\n\t\t\tTriggerHash:   a.TriggerHash,\n\t\t\tTriggerText:   a.TriggerText,\n\t\t\tTriggerAuthor: a.TriggerAuthor,\n\t\t\tTrigger:       \"source commit\",\n\t\t}\n\t}\n\n\treturn &Summary{}\n}",
  "func NewSummary(h Holding) *Summary {\n\tmetric := &SummaryMetric{Price: h.Buy.Price, Date: h.Buy.Date}\n\treturn &Summary{\n\t\tName: h.Name, N: 0, Volume: h.Volume, AvgBid: &h.Buy.Price, AvgAsk: &h.Buy.Price,\n\t\tMaxBid: metric, MaxAsk: metric, MinBid: metric, MinAsk: metric, LastAsk: metric, LastBid: metric,\n\t}\n}",
  "func (ac *Accumulator) AddSummary(measurement string, fields map[string]interface{},\n\ttags map[string]string, t ...time.Time) {\n\t// as of right now metric always returns a nil error\n\tm, _ := metric.New(measurement, tags, fields, getTime(t), telegraf.Summary)\n\tac.AddMetric(m)\n}",
  "func (cm *customMetrics) AddSummary(\n\tnamespace, subsystem, name, help, internalKey string,\n\tmaxAge time.Duration, constLabels prometheus.Labels,\n\tobjectives map[float64]float64, ageBuckets, bufCap uint32) {\n\n\tcm.summaries[internalKey] = promauto.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace:   namespace,\n\t\tSubsystem:   subsystem,\n\t\tName:        name,\n\t\tHelp:        help,\n\t\tMaxAge:      maxAge,\n\t\tConstLabels: constLabels,\n\t\tObjectives:  objectives,\n\t\tAgeBuckets:  ageBuckets,\n\t\tBufCap:      bufCap,\n\t})\n}",
  "func NewTaskReportSummary()(*TaskReportSummary) {\n    m := &TaskReportSummary{\n    }\n    m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n    m.SetAdditionalData(make(map[string]any))\n    return m\n}",
  "func NewSummaryListener() Summary {\n\treturn Summary{triggerInterval: 10 * time.Second}\n}",
  "func NewLabelSummary(subsystem, name, help string, labels ...string) *prometheus.SummaryVec {\n\treturn prometheus.NewSummaryVec(\n\t\tprometheus.SummaryOpts{\n\t\t\tNamespace:   namespace,\n\t\t\tSubsystem:   subsystem,\n\t\t\tName:        name,\n\t\t\tHelp:        help,\n\t\t\tConstLabels: nil,\n\t\t}, labels)\n}",
  "func newSummaryCache(retain time.Duration) *summaryCache {\n\treturn &summaryCache{\n\t\tretain: retain,\n\t\titems:  make(map[string]*eventsStatementsSummaryByDigest),\n\t\tadded:  make(map[string]time.Time),\n\t}\n}",
  "func NewProvisioningObjectSummary()(*ProvisioningObjectSummary) {\n    m := &ProvisioningObjectSummary{\n        Entity: *NewEntity(),\n    }\n    return m\n}",
  "func NewTagSummary(tagName string, articleIDs []string) *TagSummary {\n\treturn &TagSummary{\n\t\tCount:      1,\n\t\tArticleIDs: articleIDs,\n\t\tTag:        tagName,\n\t}\n}",
  "func New(storages *storage.Storage) *Summaries {\n\treturn &Summaries{\n\t\tstorages: storages,\n\t}\n}",
  "func NewApplicationSignInDetailedSummary()(*ApplicationSignInDetailedSummary) {\n    m := &ApplicationSignInDetailedSummary{\n        Entity: *NewEntity(),\n    }\n    return m\n}",
  "func newChatSummary(vres *chatterviews.ChatSummaryView) *ChatSummary {\n\tres := &ChatSummary{\n\t\tLength: vres.Length,\n\t}\n\tif vres.Message != nil {\n\t\tres.Message = *vres.Message\n\t}\n\tif vres.SentAt != nil {\n\t\tres.SentAt = *vres.SentAt\n\t}\n\treturn res\n}",
  "func NewSummaryNoOp() Summary {\n\treturn summaryNoOp{}\n}",
  "func NewSummaryBuilder() *SummaryBuilder {\n\tr := SummaryBuilder{\n\t\t&Summary{},\n\t}\n\n\treturn &r\n}",
  "func Summary(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"summary\", Attributes: attrs, Children: children}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	InputEvents returns a map of input labels to events traced during the execution of a stream pipeline. 
 | 
	func (s *Summary) InputEvents() map[string][]NodeEvent {
	m := map[string][]NodeEvent{}
	s.inputEvents.Range(func(key, value any) bool {
		m[key.(string)] = value.(*events).Extract()
		return true
	})
	return m
} 
 | 
	[
  "func (e *Engine) EventsInput() chan<- *InEvent {\n\treturn e.inEvents\n}",
  "func (linux *linuxSystemObject) GetInputEvents() ([]gin.OsEvent, int64) {\n\tvar first_event *C.GlopKeyEvent\n\tcp := (*unsafe.Pointer)(unsafe.Pointer(&first_event))\n\tvar length C.int\n\tvar horizon C.longlong\n\tC.GlopGetInputEvents(cp, unsafe.Pointer(&length), unsafe.Pointer(&horizon))\n\tlinux.horizon = int64(horizon)\n\tc_events := (*[1000]C.GlopKeyEvent)(unsafe.Pointer(first_event))[:length]\n\tevents := make([]gin.OsEvent, length)\n\tfor i := range c_events {\n\t\twx, wy := linux.rawCursorToWindowCoords(int(c_events[i].cursor_x), int(c_events[i].cursor_y))\n\t\tevents[i] = gin.OsEvent{\n\t\t\tKeyId:     gin.KeyId(c_events[i].index),\n\t\t\tPress_amt: float64(c_events[i].press_amt),\n\t\t\tTimestamp: int64(c_events[i].timestamp),\n\t\t\tX:         wx,\n\t\t\tY:         wy,\n\t\t}\n\t}\n\treturn events, linux.horizon\n}",
  "func (s *Summary) ProcessorEvents() map[string][]NodeEvent {\n\tm := map[string][]NodeEvent{}\n\ts.processorEvents.Range(func(key, value any) bool {\n\t\tm[key.(string)] = value.(*events).Extract()\n\t\treturn true\n\t})\n\treturn m\n}",
  "func (c *Client) Inputs() (map[int]Input, error) {\n\treq, err := http.NewRequest(http.MethodGet, \"/menu_native/dynamic/tv_settings/devices/name_input\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp respInputs\n\tif err = c.do(req, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tinputs := make(map[int]Input, len(resp.Items))\n\tfor _, i := range resp.Items {\n\t\tinputs[i.Hash] = Input{\n\t\t\tDisplayName: i.Value.Name,\n\t\t\tHash:        i.Hash,\n\t\t\tName:        i.Name,\n\t\t}\n\t}\n\n\treturn inputs, nil\n}",
  "func Input() *Event {\n\treturn NewEvent(\"input\")\n\n}",
  "func getDataEvents(events []*input.Event) []common.MapStr {\n\tdataEvents := make([]common.MapStr, 0, len(events))\n\tfor _, event := range events {\n\t\tif event.HasData() {\n\t\t\tdataEvents = append(dataEvents, event.ToMapStr())\n\t\t}\n\t}\n\treturn dataEvents\n}",
  "func (u *InputUnifi) Events(filter *poller.Filter) (*poller.Events, error) {\n\tif u.Disable {\n\t\treturn nil, nil\n\t}\n\n\tlogs := []any{}\n\n\tif filter == nil {\n\t\tfilter = &poller.Filter{}\n\t}\n\n\tfor _, c := range u.Controllers {\n\t\tif filter.Path != \"\" && !strings.EqualFold(c.URL, filter.Path) {\n\t\t\tcontinue\n\t\t}\n\n\t\tevents, err := u.collectControllerEvents(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlogs = append(logs, events...)\n\t}\n\n\treturn &poller.Events{Logs: logs}, nil\n}",
  "func (s *Summary) OutputEvents() map[string][]NodeEvent {\n\tm := map[string][]NodeEvent{}\n\ts.outputEvents.Range(func(key, value any) bool {\n\t\tm[key.(string)] = value.(*events).Extract()\n\t\treturn true\n\t})\n\treturn m\n}",
  "func eventMapFromEvents(events []*Event) map[string]*Event {\n\tr := make(map[string]*Event, len(events))\n\tfor _, e := range events {\n\t\tif _, ok := r[e.EventID()]; !ok {\n\t\t\tr[e.EventID()] = e\n\t\t}\n\t}\n\treturn r\n}",
  "func eventStream(s []string) []event {\n\tvar id int\n\tevents := make([]event, len(s))\n\n\tfor i, l := range s {\n\t\tt, _ := time.Parse(\"2006-01-02 15:04\", l[1:17])\n\t\te := event{ts: t}\n\n\t\tswitch l[19:24] {\n\t\tcase \"Guard\":\n\t\t\te.typ = beginShift\n\t\t\tfmt.Sscanf(l[26:], \"%d\", &id)\n\t\t\te.id = id\n\t\tcase \"falls\":\n\t\t\te.typ = sleep\n\t\t\te.id = id\n\t\tcase \"wakes\":\n\t\t\te.typ = wake\n\t\t\te.id = id\n\t\t}\n\n\t\tevents[i] = e\n\t}\n\n\treturn events\n}",
  "func ParseEvents(in <-chan KeyEvent) (<-chan Report, <-chan error) {\n\tout := make(chan Report)\n\tchErr := make(chan error)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tdefer close(chErr)\n\n\t\tvar e KeyEvent\n\t\tvar r Report\n\t\tvar err error\n\n\t\tfor e = range in {\n\t\t\tswitch e.State {\n\t\t\tcase evdev.KeyDown:\n\t\t\t\terr = r.addKeyEvent(e)\n\t\t\t\tif err != nil {\n\t\t\t\t\tchErr <- err\n\t\t\t\t} else {\n\t\t\t\t\tout <- r\n\t\t\t\t}\n\t\t\t\tr.zero()\n\n\t\t\tcase evdev.KeyUp:\n\t\t\t\t// Send a zero report to indicate that no keys are pressed\n\t\t\t\tr.zero()\n\t\t\t\tout <- r\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn out, chErr\n}",
  "func Inputs(spec *logging.ClusterLogForwarderSpec, o Options) []Element {\n\tel := []Element{}\n\n\ttypes := GatherSources(spec, o)\n\t// route container_logs based on type\n\tif types.Has(logging.InputNameApplication) || types.Has(logging.InputNameInfrastructure) {\n\t\tr := Route{\n\t\t\tComponentID: \"route_container_logs\",\n\t\t\tInputs:      helpers.MakeInputs(InputContainerLogs),\n\t\t\tRoutes:      map[string]string{},\n\t\t}\n\t\tif types.Has(logging.InputNameApplication) {\n\t\t\tr.Routes[\"app\"] = Quote(AppContainerLogs)\n\t\t}\n\t\tif types.Has(logging.InputNameInfrastructure) {\n\t\t\tr.Routes[\"infra\"] = Quote(InfraContainerLogs)\n\t\t}\n\t\tel = append(el, r)\n\t}\n\n\tif types.Has(logging.InputNameApplication) {\n\t\tel = append(el, Remap{\n\t\t\tDesc:        `Set log_type to \"application\"`,\n\t\t\tComponentID: logging.InputNameApplication,\n\t\t\tInputs:      helpers.MakeInputs(\"route_container_logs.app\"),\n\t\t\tVRL:         AddLogTypeApp,\n\t\t})\n\t}\n\tif types.Has(logging.InputNameInfrastructure) {\n\t\tel = append(el, Remap{\n\t\t\tDesc:        `Set log_type to \"infrastructure\"`,\n\t\t\tComponentID: logging.InputNameInfrastructure,\n\t\t\tInputs:      helpers.MakeInputs(\"route_container_logs.infra\", InputJournalLogs),\n\t\t\tVRL:         AddLogTypeInfra,\n\t\t})\n\t}\n\tif types.Has(logging.InputNameAudit) {\n\t\tel = append(el,\n\t\t\tRemap{\n\t\t\t\tDesc:        `Set log_type to \"audit\"`,\n\t\t\t\tComponentID: logging.InputNameAudit,\n\t\t\t\tInputs:      helpers.MakeInputs(HostAuditLogs, K8sAuditLogs, OpenshiftAuditLogs, OvnAuditLogs),\n\t\t\t\tVRL: strings.Join(helpers.TrimSpaces([]string{\n\t\t\t\t\tAddLogTypeAudit,\n\t\t\t\t\tFixHostname,\n\t\t\t\t\tnormalize.FixTimestampField,\n\t\t\t\t}), \"\\n\"),\n\t\t\t})\n\t}\n\n\tuserDefinedAppRouteMap := UserDefinedAppRouting(spec, o)\n\tif len(userDefinedAppRouteMap) != 0 {\n\t\tel = append(el, Route{\n\t\t\tComponentID: RouteApplicationLogs,\n\t\t\tInputs:      helpers.MakeInputs(logging.InputNameApplication),\n\t\t\tRoutes:      userDefinedAppRouteMap,\n\t\t})\n\n\t\tuserDefined := spec.InputMap()\n\t\tfor inRef := range userDefinedAppRouteMap {\n\t\t\tif input, ok := userDefined[inRef]; ok && input.HasPolicy() && input.GetMaxRecordsPerSecond() > 0 {\n\t\t\t\t// Vector Throttle component cannot have zero threshold\n\t\t\t\tel = append(el, AddThrottle(input)...)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn el\n}",
  "func AvailableEvents() (map[string][]string, error) {\n\tevents := map[string][]string{}\n\t// BUG(hodgesds): this should ideally check mounts for debugfs\n\trawEvents, err := fileToStrings(TracingDir + \"/available_events\")\n\t// Events are colon delimited by type so parse the type and add sub\n\t// events appropriately.\n\tif err != nil {\n\t\treturn events, err\n\t}\n\tfor _, rawEvent := range rawEvents {\n\t\tsplits := strings.Split(rawEvent, \":\")\n\t\tif len(splits) <= 1 {\n\t\t\tcontinue\n\t\t}\n\t\teventTypeEvents, found := events[splits[0]]\n\t\tif found {\n\t\t\tevents[splits[0]] = append(eventTypeEvents, splits[1])\n\t\t\tcontinue\n\t\t}\n\t\tevents[splits[0]] = []string{splits[1]}\n\t}\n\treturn events, err\n}",
  "func (t *tScreen) collectEventsFromInput(buf *bytes.Buffer, expire bool) []tcell.Event {\r\n\tres := make([]tcell.Event, 0, 20)\r\n\r\n\tt.Lock()\r\n\tdefer t.Unlock()\r\n\r\n\tfor {\r\n\t\tb := buf.Bytes()\r\n\t\tif len(b) == 0 {\r\n\t\t\tbuf.Reset()\r\n\t\t\treturn res\r\n\t\t}\r\n\r\n\t\tpartials := 0\r\n\r\n\t\tif part, comp := t.parseRune(buf, &res); comp {\r\n\t\t\tcontinue\r\n\t\t} else if part {\r\n\t\t\tpartials++\r\n\t\t}\r\n\r\n\t\tif part, comp := t.parseFunctionKey(buf, &res); comp {\r\n\t\t\tcontinue\r\n\t\t} else if part {\r\n\t\t\tpartials++\r\n\t\t}\r\n\r\n\t\t// Only parse mouse records if this term claims to have\r\n\t\t// mouse support\r\n\r\n\t\tif t.ti.Mouse != \"\" {\r\n\t\t\tif part, comp := t.parseXtermMouse(buf, &res); comp {\r\n\t\t\t\tcontinue\r\n\t\t\t} else if part {\r\n\t\t\t\tpartials++\r\n\t\t\t}\r\n\r\n\t\t\tif part, comp := t.parseSgrMouse(buf, &res); comp {\r\n\t\t\t\tcontinue\r\n\t\t\t} else if part {\r\n\t\t\t\tpartials++\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tif partials == 0 || expire {\r\n\t\t\tif b[0] == '\\x1b' {\r\n\t\t\t\tif len(b) == 1 {\r\n\t\t\t\t\tres = append(res, tcell.NewEventKey(tcell.KeyEsc, 0, tcell.ModNone))\r\n\t\t\t\t\tt.escaped = false\r\n\t\t\t\t} else {\r\n\t\t\t\t\tt.escaped = true\r\n\t\t\t\t}\r\n\t\t\t\tbuf.ReadByte()\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\t// Nothing was going to match, or we timed out\r\n\t\t\t// waiting for more data -- just deliver the characters\r\n\t\t\t// to the app & let them sort it out.  Possibly we\r\n\t\t\t// should only do this for control characters like ESC.\r\n\t\t\tby, _ := buf.ReadByte()\r\n\t\t\tmod := tcell.ModNone\r\n\t\t\tif t.escaped {\r\n\t\t\t\tt.escaped = false\r\n\t\t\t\tmod = tcell.ModAlt\r\n\t\t\t}\r\n\t\t\tres = append(res, tcell.NewEventKey(tcell.KeyRune, rune(by), mod))\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\t// well we have some partial data, wait until we get\r\n\t\t// some more\r\n\t\tbreak\r\n\t}\r\n\r\n\treturn res\r\n}",
  "func (r *ProcessorResolver) Inputs(ctx context.Context, processor *model.Processor) ([]*model.ProcessorInput, error) {\n\tresult, err := r.DataLoaders.ProcessorLoader(ctx).InputsByProcessor(processor.ID)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get inputs from loader\")\n\t}\n\treturn result, nil\n}",
  "func (emitter *EventEmitter) EventNames() []string {\n\treturn getListenerMapKeys(emitter.listenerMap)\n}",
  "func (sc *SimpleProjection) Inputs() ([]Primitive, []map[string]any) {\n\treturn []Primitive{sc.Input}, nil\n}",
  "func NewInputs(inputsCfg config.Inputs) *Inputs {\n\tinputs := Inputs{\n\t\tRW:  *new(sync.RWMutex),\n\t\tMap: make(map[string]Input),\n\t}\n\n\tinputs.RW.Lock()\n\tdefer inputs.RW.Unlock()\n\n\tfor _, in := range inputsCfg {\n\t\tinputs.Map[in.Name] = NewInput(in.IO.Name, in.IO.Type, msgs.Representation(in.IO.Representation), in.IO.Channel, NewDefaultMessage(in.Type, in.Default))\n\t}\n\treturn &inputs\n}",
  "func (a *Action) InputNames() (names []string) {\n\tnames = []string{}\n\n\tfor k := range a.Input {\n\t\tnames = append(names, k)\n\t}\n\n\tsort.Strings(names)\n\n\treturn names\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	ProcessorEvents returns a map of processor labels to events traced during the execution of a stream pipeline. 
 | 
	func (s *Summary) ProcessorEvents() map[string][]NodeEvent {
	m := map[string][]NodeEvent{}
	s.processorEvents.Range(func(key, value any) bool {
		m[key.(string)] = value.(*events).Extract()
		return true
	})
	return m
} 
 | 
	[
  "func (s *Summary) InputEvents() map[string][]NodeEvent {\n\tm := map[string][]NodeEvent{}\n\ts.inputEvents.Range(func(key, value any) bool {\n\t\tm[key.(string)] = value.(*events).Extract()\n\t\treturn true\n\t})\n\treturn m\n}",
  "func (p *Producer) GetEventProcessingStats() map[string]uint64 {\n\taggStats := make(map[string]uint64)\n\n\tfor _, consumer := range p.getConsumers() {\n\t\tstats := consumer.GetEventProcessingStats()\n\t\tfor stat, value := range stats {\n\t\t\tif _, ok := aggStats[stat]; !ok {\n\t\t\t\taggStats[stat] = 0\n\t\t\t}\n\t\t\taggStats[stat] += value\n\t\t}\n\t}\n\n\tif p.workerSpawnCounter > 0 {\n\t\taggStats[\"worker_spawn_counter\"] = p.workerSpawnCounter\n\t}\n\n\treturn aggStats\n}",
  "func eventMapFromEvents(events []*Event) map[string]*Event {\n\tr := make(map[string]*Event, len(events))\n\tfor _, e := range events {\n\t\tif _, ok := r[e.EventID()]; !ok {\n\t\t\tr[e.EventID()] = e\n\t\t}\n\t}\n\treturn r\n}",
  "func AvailableEvents() (map[string][]string, error) {\n\tevents := map[string][]string{}\n\t// BUG(hodgesds): this should ideally check mounts for debugfs\n\trawEvents, err := fileToStrings(TracingDir + \"/available_events\")\n\t// Events are colon delimited by type so parse the type and add sub\n\t// events appropriately.\n\tif err != nil {\n\t\treturn events, err\n\t}\n\tfor _, rawEvent := range rawEvents {\n\t\tsplits := strings.Split(rawEvent, \":\")\n\t\tif len(splits) <= 1 {\n\t\t\tcontinue\n\t\t}\n\t\teventTypeEvents, found := events[splits[0]]\n\t\tif found {\n\t\t\tevents[splits[0]] = append(eventTypeEvents, splits[1])\n\t\t\tcontinue\n\t\t}\n\t\tevents[splits[0]] = []string{splits[1]}\n\t}\n\treturn events, err\n}",
  "func registerAllProcessors( eventProcessors []eventprocessors.EventProcessor) ( []EventProcessorChannelPair, map[string][]chan client.ResolvedEvent) {\n\tppMap := make(map[string][]EventProcessorChannelPair)\n\tppArray := []EventProcessorChannelPair{}\n\teventTypeChannelMap := make(map[string][]chan client.ResolvedEvent)\n\n\tfor _,p := range eventProcessors {\n\t\tch := make(chan client.ResolvedEvent , 10000)\n\n\t\t// make a map of event types to an array of channels that\n\t\t// need to receive that eventtype.\n\t\tfor _, et := range p.GetRegisteredEventTypes() {\n\t\t\tchannelArray, ok := eventTypeChannelMap[et]\n\t\t\tif !ok {\n\t\t\t\tchannelArray = []chan client.ResolvedEvent{}\n\t\t\t}\n\t\t\teventTypeChannelMap[et] = append(channelArray, ch)\n\t\t}\n\n\t\t\t// make sure create for correct count of instances.\n\t\tfor count:=0 ;count < p.GetNumberOfInstances(); count++ {\n\t\t\tpp := EventProcessorChannelPair{processor: p}\n\t\t\tpp.channel = ch\n\t\t\tppArray = append(ppArray, pp)\n\n\t\t\t// put\n\t\t\tfor _, et := range p.GetRegisteredEventTypes() {\n\t\t\t\tppArray, ok := ppMap[et]\n\t\t\t\tif !ok {\n\t\t\t\t\tppArray = []EventProcessorChannelPair{}\n\t\t\t\t}\n\t\t\t\tppMap[et] = append(ppArray, pp)\n\t\t\t}\n\t\t}\n\t}\n\treturn ppArray, eventTypeChannelMap\n}",
  "func (sec *sensorExecutionCtx) extractEvents(params []v1alpha1.TriggerParameter) map[string]apicommon.Event {\n\tevents := make(map[string]apicommon.Event)\n\tfor _, param := range params {\n\t\tif param.Src != nil {\n\t\t\tlog := sec.log.WithFields(\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"param-src\":  param.Src.Event,\n\t\t\t\t\t\"param-dest\": param.Dest,\n\t\t\t\t})\n\t\t\tnode := sn.GetNodeByName(sec.sensor, param.Src.Event)\n\t\t\tif node == nil {\n\t\t\t\tlog.Warn(\"event dependency node does not exist, cannot apply parameter\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif node.Event == nil {\n\t\t\t\tlog.Warn(\"event in event dependency does not exist, cannot apply parameter\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tevents[param.Src.Event] = *node.Event\n\t\t}\n\t}\n\treturn events\n}",
  "func NewEventProcessor(state StateType, stateKeyField StateKeyField, events []EventBuilder) (EventProcessor, error) {\n\terr := VerifyEventParameters(state, stateKeyField, events)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstateType := reflect.TypeOf(state)\n\n\tem := eventProcessor{\n\t\tstateType:     stateType,\n\t\tstateKeyField: stateKeyField,\n\t\tcallbacks:     make(map[EventName]callback),\n\t\ttransitions:   make(map[eKey]StateKey),\n\t}\n\n\t// Build transition map and store sets of all events and states.\n\tfor _, evtIface := range events {\n\t\tevt := evtIface.(eventBuilder)\n\n\t\tname := evt.name\n\n\t\tvar argumentTypes []reflect.Type\n\t\tif evt.action != nil {\n\t\t\tatType := reflect.TypeOf(evt.action)\n\t\t\targumentTypes = make([]reflect.Type, atType.NumIn()-1)\n\t\t\tfor i := range argumentTypes {\n\t\t\t\targumentTypes[i] = atType.In(i + 1)\n\t\t\t}\n\t\t}\n\t\tem.callbacks[name] = callback{\n\t\t\targumentTypes: argumentTypes,\n\t\t\taction:        evt.action,\n\t\t}\n\t\tfor src, dst := range evt.transitionsSoFar {\n\t\t\tem.transitions[eKey{name, src}] = dst\n\t\t}\n\t}\n\treturn em, nil\n}",
  "func (s *Summary) OutputEvents() map[string][]NodeEvent {\n\tm := map[string][]NodeEvent{}\n\ts.outputEvents.Range(func(key, value any) bool {\n\t\tm[key.(string)] = value.(*events).Extract()\n\t\treturn true\n\t})\n\treturn m\n}",
  "func (state PipelineRunState) ToMap() map[string]*ResolvedPipelineRunTask {\n\tm := make(map[string]*ResolvedPipelineRunTask)\n\tfor _, rprt := range state {\n\t\tm[rprt.PipelineTask.Name] = rprt\n\t}\n\treturn m\n}",
  "func policyEvents(us resource.PolicyUpdates, now time.Time) map[string]event.Event {\n\teventsByType := map[string]event.Event{}\n\tfor workloadID, update := range us {\n\t\tfor _, eventType := range policyEventTypes(update) {\n\t\t\te, ok := eventsByType[eventType]\n\t\t\tif !ok {\n\t\t\t\te = event.Event{\n\t\t\t\t\tServiceIDs: []resource.ID{},\n\t\t\t\t\tType:       eventType,\n\t\t\t\t\tStartedAt:  now,\n\t\t\t\t\tEndedAt:    now,\n\t\t\t\t\tLogLevel:   event.LogLevelInfo,\n\t\t\t\t}\n\t\t\t}\n\t\t\te.ServiceIDs = append(e.ServiceIDs, workloadID)\n\t\t\teventsByType[eventType] = e\n\t\t}\n\t}\n\treturn eventsByType\n}",
  "func GetEventMap() map[string]int {\n\treturn map[string]int{\n\t\t\"usercommand\":        0,\n\t\t\"systemevent\":        1,\n\t\t\"errorevent\":         2,\n\t\t\"quoteserver\":        3,\n\t\t\"accounttransaction\": 4,\n\t}\n}",
  "func (p *Producer) GetEventingConsumerPids() map[string]int {\n\tworkerPidMapping := make(map[string]int)\n\n\tfor _, consumer := range p.getConsumers() {\n\t\tworkerPidMapping[consumer.ConsumerName()] = consumer.Pid()\n\t}\n\n\treturn workerPidMapping\n}",
  "func TaskProcessInfoEvents(taskId string, ts time.Time, limit, sort int) db.Q {\n\tfilter := bson.M{\n\t\tDataKey + \".\" + ResourceTypeKey: EventTaskProcessInfo,\n\t\tResourceIdKey:                   taskId,\n\t\tTypeKey:                         EventTaskProcessInfo,\n\t}\n\n\tsortSpec := TimestampKey\n\n\tif sort < 0 {\n\t\tsortSpec = \"-\" + sortSpec\n\t\tfilter[TimestampKey] = bson.M{\"$lte\": ts}\n\t} else {\n\t\tfilter[TimestampKey] = bson.M{\"$gte\": ts}\n\t}\n\n\treturn db.Query(filter).Sort([]string{sortSpec}).Limit(limit)\n}",
  "func (e *EventLogger) ProcessEvents() {\n\te.CreateEventLogs()\n\te.WriteEventLogs()\n}",
  "func getCallbacks(m *mesosManager) map[string]schedulerEventCallback {\n\tprocedures := make(map[string]schedulerEventCallback)\n\tcallbacks := map[sched.Event_Type]schedulerEventCallback{\n\t\tsched.Event_SUBSCRIBED: m.Subscribed,\n\t\tsched.Event_MESSAGE:    m.Message,\n\t\tsched.Event_FAILURE:    m.Failure,\n\t\tsched.Event_ERROR:      m.Error,\n\t\tsched.Event_HEARTBEAT:  m.Heartbeat,\n\t\tsched.Event_UNKNOWN:    m.Unknown,\n\t}\n\tfor typ, hdl := range callbacks {\n\t\tname := typ.String()\n\t\tprocedures[name] = hdl\n\t}\n\treturn procedures\n}",
  "func getDataEvents(events []*input.Event) []common.MapStr {\n\tdataEvents := make([]common.MapStr, 0, len(events))\n\tfor _, event := range events {\n\t\tif event.HasData() {\n\t\t\tdataEvents = append(dataEvents, event.ToMapStr())\n\t\t}\n\t}\n\treturn dataEvents\n}",
  "func upstreamProcsForProc(node Node) map[string]Node {\n\tprocs := map[string]Node{}\n\tfor _, inp := range node.InPorts() {\n\t\tfor _, rpt := range inp.RemotePorts {\n\t\t\tprocs[rpt.Process().Name()] = rpt.Process()\n\t\t\tmergeWFMaps(procs, upstreamProcsForProc(rpt.Process()))\n\t\t}\n\t}\n\treturn procs\n}",
  "func aggregateEvents(events []*docker.ContainerEvent, filteredActions []string) map[string]*dockerEventBundle {\n\t// Pre-aggregate container events by image\n\teventsByImage := make(map[string]*dockerEventBundle)\n\tfilteredByType := make(map[string]int)\n\n\tfor _, event := range events {\n\t\tif matchFilter(event.Action, filteredActions) {\n\t\t\tfilteredByType[event.Action] = filteredByType[event.Action] + 1\n\t\t\tcontinue\n\t\t}\n\t\tbundle, found := eventsByImage[event.ImageName]\n\t\tif found == false {\n\t\t\tbundle = newDockerEventBundler(event.ImageName)\n\t\t\teventsByImage[event.ImageName] = bundle\n\t\t}\n\t\tbundle.addEvent(event) //nolint:errcheck\n\t}\n\n\tif len(filteredByType) > 0 {\n\t\tlog.Debugf(\"filtered out the following events: %s\", formatStringIntMap(filteredByType))\n\t}\n\treturn eventsByImage\n}",
  "func Events(payloadCh <-chan []byte, registryCh chan<- client.RegistryFunc) {\n\tpackets := make(map[uint64]client.RegistryFunc)\n\n\tgo func(payloadCh <-chan []byte, registryCh chan<- client.RegistryFunc) {\n\t\tvar index uint64 = startingIndex\n\n\t\tdefer close(registryCh)\n\n\t\tfor payload := range payloadCh {\n\t\t\tpkt, err := event.Parse(payload)\n\t\t\tif err != nil {\n\t\t\t\t// TODO(tmrts): might try to read the packet sequence no and skip that packet\n\t\t\t\t//              to make sure the flow continues.\n\t\t\t\tlog.Debug(fmt.Sprintf(\"event.Parse(%#q) got error %#q\", string(payload), err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tseq := pkt.Sequence()\n\t\t\t// Ignores packets with same sequence numbers or\n\t\t\t// lower than current index numbers.\n\t\t\tif _, ok := packets[seq]; !ok && seq >= index {\n\t\t\t\tpackets[seq] = notify.FuncFor(pkt)\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tpkt, ok := packets[index]\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tregistryCh <- pkt\n\n\t\t\t\t// Evicts used event packets\n\t\t\t\t// NOTE: Bulk delete might increase performance\n\t\t\t\tdelete(packets, index)\n\n\t\t\t\tindex++\n\t\t\t}\n\t\t}\n\n\t\t// Send the remaning events\n\t\tfor {\n\t\t\tpkt, ok := packets[index]\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tregistryCh <- pkt\n\t\t\tindex++\n\t\t}\n\t}(payloadCh, registryCh)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	OutputEvents returns a map of output labels to events traced during the execution of a stream pipeline. 
 | 
	func (s *Summary) OutputEvents() map[string][]NodeEvent {
	m := map[string][]NodeEvent{}
	s.outputEvents.Range(func(key, value any) bool {
		m[key.(string)] = value.(*events).Extract()
		return true
	})
	return m
} 
 | 
	[
  "func (e *Engine) EventsOutput() <-chan *OutEvent {\n\treturn e.outEvents\n}",
  "func (s *Summary) ProcessorEvents() map[string][]NodeEvent {\n\tm := map[string][]NodeEvent{}\n\ts.processorEvents.Range(func(key, value any) bool {\n\t\tm[key.(string)] = value.(*events).Extract()\n\t\treturn true\n\t})\n\treturn m\n}",
  "func (s *Summary) InputEvents() map[string][]NodeEvent {\n\tm := map[string][]NodeEvent{}\n\ts.inputEvents.Range(func(key, value any) bool {\n\t\tm[key.(string)] = value.(*events).Extract()\n\t\treturn true\n\t})\n\treturn m\n}",
  "func (ts Timers) Events() chan schedule.Schedule {\n\treturn ts.scheduleFilter.output\n}",
  "func (w *Window) Events() <-chan Event { return w.out }",
  "func AvailableEvents() (map[string][]string, error) {\n\tevents := map[string][]string{}\n\t// BUG(hodgesds): this should ideally check mounts for debugfs\n\trawEvents, err := fileToStrings(TracingDir + \"/available_events\")\n\t// Events are colon delimited by type so parse the type and add sub\n\t// events appropriately.\n\tif err != nil {\n\t\treturn events, err\n\t}\n\tfor _, rawEvent := range rawEvents {\n\t\tsplits := strings.Split(rawEvent, \":\")\n\t\tif len(splits) <= 1 {\n\t\t\tcontinue\n\t\t}\n\t\teventTypeEvents, found := events[splits[0]]\n\t\tif found {\n\t\t\tevents[splits[0]] = append(eventTypeEvents, splits[1])\n\t\t\tcontinue\n\t\t}\n\t\tevents[splits[0]] = []string{splits[1]}\n\t}\n\treturn events, err\n}",
  "func (p *StreamToSubStream) OutPorts() map[string]*scipipe.OutPort {\n\treturn map[string]*scipipe.OutPort{\n\t\tp.OutSubStream.Name(): p.OutSubStream,\n\t}\n}",
  "func (engine *DockerTaskEngine) TaskEvents() (<-chan api.ContainerStateChange, <-chan error) {\n\treturn engine.container_events, engine.event_errors\n}",
  "func getDataEvents(events []*input.Event) []common.MapStr {\n\tdataEvents := make([]common.MapStr, 0, len(events))\n\tfor _, event := range events {\n\t\tif event.HasData() {\n\t\t\tdataEvents = append(dataEvents, event.ToMapStr())\n\t\t}\n\t}\n\treturn dataEvents\n}",
  "func OutputAllE(t testing.TestingT, options *Options) (map[string]interface{}, error) {\n\treturn OutputForKeysE(t, options, nil)\n}",
  "func GetPluginOuts(plugins []*PluginConfiguration) map[string]string {\n\touts := make(map[string]string)\n\tfor _, plugin := range plugins {\n\t\tif plugin.Out == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\touts[plugin.Label.String()] = plugin.Out\n\t}\n\treturn outs\n}",
  "func (emitter *EventEmitter) EventNames() []string {\n\treturn getListenerMapKeys(emitter.listenerMap)\n}",
  "func (o *Output) ToMap() map[string]interface{} {\n\n\treturn map[string]interface{}{\n\t\t\"output\": o.Output,\n\t}\n}",
  "func (o *Output) ToMap() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"output\": o.Output,\n\t}\n}",
  "func (o *Output) ToMap() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"code\":    o.Code,\n\t\t\"message\": o.Message,\n\t\t\"result\":  o.Result,\n\t}\n}",
  "func OutputTypes() []string {\n\tvar ts []string\n\tfor k := range streamable {\n\t\tts = append(ts, k)\n\t}\n\tfor k := range outFuns {\n\t\tts = append(ts, k)\n\t}\n\tsort.Strings(ts)\n\n\treturn ts\n}",
  "func (a *Action) OutputMappings() []string {\n\treturn a.outputMappings\n}",
  "func OutputAll(t testing.TestingT, options *Options) map[string]interface{} {\n\tout, err := OutputAllE(t, options)\n\trequire.NoError(t, err)\n\treturn out\n}",
  "func (l *Logger) output(ctx context.Context, sev Severity, msg interface{}, options []Option) {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tt := l.time()\n\te := Event{\n\t\tTime:     &t,\n\t\tSeverity: sev,\n\t\tBody:     msg,\n\t}\n\n\tfor _, h := range l.hooks {\n\t\th(ctx, &e)\n\t}\n\n\tfor _, o := range options {\n\t\to(&e)\n\t}\n\n\ts, err := l.outputFormatter(&e)\n\tif err == nil {\n\t\tif len(s) == 0 || s[len(s)-1] != '\\n' {\n\t\t\ts = append(s, '\\n')\n\t\t}\n\t\tl.out.Write(s)\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	preConfigureUI preconfigures UI based on information about user terminal 
 | 
	func preConfigureUI() {
	term := os.Getenv("TERM")
	fmtc.DisableColors = true
	if term != "" {
		switch {
		case strings.Contains(term, "xterm"),
			strings.Contains(term, "color"),
			term == "screen":
			fmtc.DisableColors = false
		}
	}
	if !fsutil.IsCharacterDevice("/dev/stdout") && os.Getenv("FAKETTY") == "" {
		fmtc.DisableColors = true
	}
	if os.Getenv("NO_COLOR") != "" {
		fmtc.DisableColors = true
	}
} 
 | 
	[
  "func configureUI() {\n\tterminal.Prompt = \"› \"\n\tterminal.TitleColorTag = \"{s}\"\n\n\tif options.GetB(OPT_NO_COLOR) {\n\t\tfmtc.DisableColors = true\n\t}\n\n\tswitch {\n\tcase fmtc.IsTrueColorSupported():\n\t\tcolorTagApp, colorTagVer = \"{#CC1E2C}\", \"{#CC1E2C}\"\n\tcase fmtc.Is256ColorsSupported():\n\t\tcolorTagApp, colorTagVer = \"{#160}\", \"{#160}\"\n\tdefault:\n\t\tcolorTagApp, colorTagVer = \"{r}\", \"{r}\"\n\t}\n}",
  "func RunUI(cfg Config, inStatusCh chan inputStats, outputStatusChannel chan outputStats, cfgSource string) {\n\t// Let the goroutines initialize before starting GUI\n\ttime.Sleep(50 * time.Millisecond)\n\tif err := ui.Init(); err != nil {\n\t\tlog.Fatalf(\"failed to initialize termui: %v\", err)\n\t}\n\tdefer ui.Close()\n\n\ty := 0\n\theight := 5\n\twidth := 120\n\thalfWidth := width / 2\n\n\tp := widgets.NewParagraph()\n\tp.Title = applicationName()\n\tp.Text = fmt.Sprintf(\"PRESS q TO QUIT.\\nConfig from: %s\\n\", cfgSource)\n\n\tp.SetRect(0, y, width, height)\n\tp.TextStyle.Fg = ui.ColorWhite\n\tp.BorderStyle.Fg = ui.ColorCyan\n\n\ty += height\n\theight = 10\n\tinSrcHeight := height\n\tif cfg.RetransmitEnabled() {\n\t\tinSrcHeight = height * 2\n\n\t}\n\n\tinpSrcStatus := widgets.NewParagraph()\n\tinpSrcStatus.Title = \"GPS/GPS Compass in\"\n\tif cfg.InputEnabled() {\n\t\tinpSrcStatus.Text = \"Waiting for data\"\n\t} else {\n\t\tinpSrcStatus.Text = \"Input not enabled\"\n\t}\n\n\tinpSrcStatus.SetRect(0, y, halfWidth, y+inSrcHeight)\n\tinpSrcStatus.TextStyle.Fg = ui.ColorGreen\n\tinpSrcStatus.BorderStyle.Fg = ui.ColorCyan\n\n\tinpArrow := widgets.NewParagraph()\n\tinpArrow.Border = false\n\tinpArrow.Text = \"=>\"\n\tinpArrow.SetRect(halfWidth, y, halfWidth+5, y+height)\n\n\tinpDestStatus := widgets.NewParagraph()\n\tinpDestStatus.Title = \"GPS/GPS Compass out to UGPS\"\n\n\tinpDestStatus.SetRect(halfWidth+5, y, width, y+height)\n\tinpDestStatus.TextStyle.Fg = ui.ColorGreen\n\tinpDestStatus.BorderStyle.Fg = ui.ColorCyan\n\n\tinpRetransmitStatus := widgets.NewParagraph()\n\tif cfg.RetransmitEnabled() {\n\t\tinpRetransmitStatus.Title = \"Retransmit Input\"\n\n\t\tinpRetransmitStatus.SetRect(halfWidth+5, y+height, width, y+inSrcHeight)\n\t\tinpRetransmitStatus.TextStyle.Fg = ui.ColorGreen\n\t\tinpRetransmitStatus.BorderStyle.Fg = ui.ColorCyan\n\t}\n\n\t//y += height\n\ty += inSrcHeight\n\theight = 10\n\n\toutSrcStatus := widgets.NewParagraph()\n\toutSrcStatus.Title = \"Locator Position in from UGPS\"\n\toutSrcStatus.Text = \"Waiting for data\"\n\tif !cfg.OutputEnabled() {\n\t\toutSrcStatus.Text = \"Output not enabled\"\n\t}\n\toutSrcStatus.SetRect(0, y, halfWidth, y+height)\n\toutSrcStatus.TextStyle.Fg = ui.ColorGreen\n\toutSrcStatus.BorderStyle.Fg = ui.ColorCyan\n\n\toutArrow := widgets.NewParagraph()\n\toutArrow.Border = false\n\toutArrow.Text = \"=>\"\n\toutArrow.SetRect(halfWidth, y, halfWidth+5, y+height)\n\n\toutDestStatus := widgets.NewParagraph()\n\toutDestStatus.Title = \"Locator Position out to NMEA\"\n\toutDestStatus.Text = \"Waiting for data\"\n\tif !cfg.OutputEnabled() {\n\t\toutDestStatus.Text = \"Output not enabled\"\n\t}\n\toutDestStatus.SetRect(halfWidth+5, y, width, y+height)\n\toutDestStatus.TextStyle.Fg = ui.ColorGreen\n\toutDestStatus.BorderStyle.Fg = ui.ColorCyan\n\n\ty += height\n\theight = 15\n\n\tdbgText := widgets.NewList()\n\tdbgText.Title = \"Debug\"\n\tdbgText.Rows = dbgMsg\n\tdbgText.WrapText = true\n\tdbgText.SetRect(0, y, width, y+height)\n\tdbgText.BorderStyle.Fg = ui.ColorCyan\n\n\thideDebug := widgets.NewParagraph()\n\thideDebug.Text = \"\"\n\thideDebug.SetRect(0, y, width, y+height)\n\thideDebug.Border = false\n\n\tdraw := func() {\n\t\tui.Render(p, inpSrcStatus, inpArrow, inpDestStatus, outSrcStatus, outArrow, outDestStatus, inpRetransmitStatus)\n\t\tif debug {\n\t\t\tdbgText.Rows = dbgMsg\n\t\t\tui.Render(dbgText)\n\t\t} else {\n\t\t\tui.Render(hideDebug)\n\t\t}\n\t}\n\n\t// Initial draw before any events have occurred\n\tdraw()\n\n\tuiEvents := ui.PollEvents()\n\n\tfor {\n\t\tselect {\n\t\tcase inStats := <-inStatusCh:\n\t\t\tinpSrcStatus.TextStyle.Fg = ui.ColorGreen\n\t\t\tinpSrcStatus.Text = fmt.Sprintf(\"Source: %s\\n\\n\", cfg.Input.Device) +\n\t\t\t\t\"Supported NMEA sentences received:\\n\" +\n\t\t\t\tfmt.Sprintf(\" * Topside Position   : %s\\n\", inStats.src.posDesc) +\n\t\t\t\tfmt.Sprintf(\" * Topside Heading    : %s\\n\", inStats.src.headDesc) +\n\t\t\t\tfmt.Sprintf(\" * Parse error: %d\\n\\n\", inStats.src.unparsableCount) +\n\t\t\t\tinStats.src.errorMsg\n\t\t\tif inStats.src.errorMsg != \"\" {\n\t\t\t\tinpSrcStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t}\n\t\t\tinpDestStatus.TextStyle.Fg = ui.ColorGreen\n\t\t\tinpDestStatus.Text = fmt.Sprintf(\"Destination: %s\\n\\n\", cfg.BaseURL) +\n\t\t\t\tfmt.Sprintf(\"Sent successfully to\\n Underwater GPS: %d\\n\\n\", inStats.dst.sendOk) +\n\t\t\t\tinStats.dst.errorMsg\n\t\t\tif inStats.dst.errorMsg != \"\" {\n\t\t\t\tinpDestStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t}\n\n\t\t\tinpRetransmitStatus.Text = fmt.Sprintf(\"Destination: %s\\n\\n\", cfg.Input.Retransmit) +\n\t\t\t\tfmt.Sprintf(\"Count: %d\\n%s\", inStats.retransmit.count, inStats.retransmit.errorMsg)\n\t\t\tinpRetransmitStatus.TextStyle.Fg = ui.ColorGreen\n\t\t\tif inStats.retransmit.errorMsg != \"\" {\n\t\t\t\tinpRetransmitStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t}\n\t\t\tdraw()\n\t\tcase outStats := <-outputStatusChannel:\n\t\t\toutSrcStatus.Text = fmt.Sprintf(\"Source: %s\\n\\n\", cfg.BaseURL) +\n\t\t\t\tfmt.Sprintf(\"Positions from Underwater GPS:\\n  %d\\n\", outStats.src.getCount)\n\t\t\toutSrcStatus.TextStyle.Fg = ui.ColorGreen\n\n\t\t\tif outStats.src.errMsg != \"\" {\n\t\t\t\toutSrcStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t\toutSrcStatus.Text += fmt.Sprintf(\"\\n\\n%v (%d)\", outStats.src.errMsg, outStats.src.getErr)\n\t\t\t}\n\n\t\t\toutDestStatus.Text = fmt.Sprintf(\"Destination: %s\\n\\n\", cfg.Output.Device) +\n\t\t\t\t\"Sent:\\n\" +\n\t\t\t\tfmt.Sprintf(\" * Locator/ROV Position : %s: %d\\n\", strings.ToUpper(cfg.Output.PositionSentence), outStats.dst.sendOk)\n\t\t\toutDestStatus.TextStyle.Fg = ui.ColorGreen\n\n\t\t\tif outStats.dst.errMsg != \"\" {\n\t\t\t\toutDestStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t\toutDestStatus.Text += fmt.Sprintf(\"\\n\\n%s\", outStats.dst.errMsg)\n\t\t\t}\n\t\t\tdraw()\n\t\tcase e := <-uiEvents:\n\t\t\tswitch e.ID {\n\t\t\tcase \"q\", \"<C-c>\":\n\t\t\t\treturn\n\t\t\tcase \"d\":\n\t\t\t\tdbgMsg = nil\n\t\t\t\tdbgText.Rows = dbgMsg\n\t\t\t\tdebug = !debug\n\n\t\t\t\tdraw()\n\t\t\t}\n\t\t}\n\t}\n}",
  "func configureUI() {\n\tif options.GetB(OPT_NO_COLOR) {\n\t\tfmtc.DisableColors = true\n\t}\n\n\tswitch {\n\tcase fmtc.IsTrueColorSupported():\n\t\tcolorTagApp, colorTagVer = \"{#BCCF00}\", \"{#BCCF00}\"\n\tcase fmtc.Is256ColorsSupported():\n\t\tcolorTagApp, colorTagVer = \"{#148}\", \"{#148}\"\n\tdefault:\n\t\tcolorTagApp, colorTagVer = \"{g}\", \"{g}\"\n\t}\n}",
  "func InitUI(c *processor.Console) {\n\tconsole = c\n\tframe = image.NewRGBA(image.Rect(0, 0, width, height))\n\n\t// Call ebiten.Run to start your game loop.\n\tif err := ebiten.Run(update, width, height, scale, title); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
  "func CustomizeScreen(w fyne.Window) fyne.CanvasObject {\n\tc := conf.Get()\n\ttb := widget.NewTabContainer()\n\td, _ := ioutil.ReadFile(c.DynamicUIFile)\n\tvar info uiConf\n\terr := json.Unmarshal(d, &info)\n\tif err != nil {\n\t\t// fmt.Println(\"dReadUI:\", err)\n\t\ttb.Append(widget.NewTabItem(\"error\", widget.NewLabel(fmt.Sprint(err))))\n\t\ttb.SelectTabIndex(0)\n\t\treturn tb\n\t}\n\tfor _, it := range info.RunUI {\n\t\tif it.Hide {\n\t\t\tcontinue\n\t\t}\n\t\ttb.Append(widget.NewTabItem(\"R_\"+it.Name, newRunUI(w, it)))\n\t}\n\tfor _, it := range info.ViewUI {\n\t\tif it.Hide {\n\t\t\tcontinue\n\t\t}\n\t\ttb.Append(widget.NewTabItem(\"V_\"+it.Name, newReadUI(w, it)))\n\t}\n\ttb.SelectTabIndex(0)\n\treturn tb\n}",
  "func (gn *Gen) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Gen\")\n\tgi.SetAppAbout(`Gen concatenated strings of syllables`)\n\n\twin := gi.NewMainWindow(\"one\", \"Gen ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\t// vi.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMaxWidth()\n\tsplit.SetStretchMaxHeight()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(gn)\n\tgn.StructView = sv\n\n\t// tv := gi.AddNewTabView(split, \"tv\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen cat string\", Icon: \"new\", Tooltip: \"Generate a new initial random seed to get different results.  By default, Init re-establishes the same initial seed every time.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.CatNoRepeat(gn.syls1)\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}",
  "func InitTui(instanceRunning bool) bool {\n\tinitialModel.instanceAlreadyRunning = instanceRunning\n\tinitialModel.updateMenuItemsHomePage()\n\tp := tea.NewProgram(initialModel)\n\tif err := p.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn execBot\n}",
  "func (gn *Gen) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Gen\")\n\tgi.SetAppAbout(`Gen concatenated strings of syllables`)\n\n\twin := gi.NewMainWindow(\"one\", \"Gen ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\t// vi.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMaxWidth()\n\tsplit.SetStretchMaxHeight()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(gn)\n\tgn.StructView = sv\n\n\t// tv := gi.AddNewTabView(split, \"tv\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Reset\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.Reset()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Load Params\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.LoadParams()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen Wavs\", Icon: \"new\", Tooltip: \"Generate the .wav files\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.GenWavs()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Split Wavs\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.SplitWavs()\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}",
  "func (c *guiClient) torPromptUI() error {\n\tui := VBox{\n\t\twidgetBase: widgetBase{padding: 40, expand: true, fill: true, name: \"vbox\"},\n\t\tchildren: []Widget{\n\t\t\tLabel{\n\t\t\t\twidgetBase: widgetBase{font: \"DejaVu Sans 30\"},\n\t\t\t\ttext:       \"Cannot find Tor\",\n\t\t\t},\n\t\t\tLabel{\n\t\t\t\twidgetBase: widgetBase{\n\t\t\t\t\tpadding: 20,\n\t\t\t\t\tfont:    \"DejaVu Sans 14\",\n\t\t\t\t},\n\t\t\t\ttext: \"Please start Tor or the Tor Browser Bundle. Looking for a SOCKS proxy on port 9050 or 9150...\",\n\t\t\t\twrap: 600,\n\t\t\t},\n\t\t},\n\t}\n\n\tc.gui.Actions() <- SetBoxContents{name: \"body\", child: ui}\n\tc.gui.Signal()\n\n\tticker := time.NewTicker(1 * time.Second)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase _, ok := <-c.gui.Events():\n\t\t\tif !ok {\n\t\t\t\tc.ShutdownAndSuspend()\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tif c.detectTor() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func StartUI(api *API) {\n\terr := termui.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer termui.Close()\n\n\tui := &UI{\n\t\tapi:            api,\n\t\tsearchBox:      newSearchBox(),\n\t\tinstancesTable: newInstancesTable(),\n\t\thelpBox:        newHelpTable(),\n\t\tselectedRow:    -1,\n\t}\n\n\tgo func() {\n\t\tfor instances := range api.instancesChan {\n\t\t\ttermui.SendCustomEvt(\"/usr/instances\", instances)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor errors := range api.errChan {\n\t\t\ttermui.SendCustomEvt(\"/usr/errors\", errors)\n\t\t}\n\t}()\n\n\ttermui.Body.AddRows(\n\t\ttermui.NewRow(\n\t\t\ttermui.NewCol(12, 0, ui.searchBox),\n\t\t),\n\t\ttermui.NewRow(\n\t\t\ttermui.NewCol(12, 0, ui.instancesTable),\n\t\t),\n\t\ttermui.NewRow(\n\t\t\ttermui.NewCol(12, 0, ui.helpBox),\n\t\t),\n\t)\n\ttermui.Body.Align()\n\ttermui.Render(termui.Body)\n\n\tui.SetEvents()\n\tui.triggerInstancesUpdate()\n\n\ttermui.Loop()\n}",
  "func CreateMainWindow() {\n\n\tvBox := tui.NewVBox()\n\tvBox.SetSizePolicy(tui.Minimum, tui.Minimum)\n\tSidebar := tui.NewVBox()\n\tSidebar.SetSizePolicy(tui.Minimum, tui.Minimum)\n\n\tfor _, cmd := range strings.Split(libs.Cmds, \",\") {\n\t\tSidebar.Append(tui.NewLabel(wordwrap.WrapString(cmd, 50)))\n\t}\n\n\tSidebar.SetBorder(true)\n\tSidebar.Prepend(tui.NewLabel(\"***COMMANDS***\"))\n\n\tInput.SetFocused(true)\n\tInput.SetSizePolicy(tui.Expanding, tui.Maximum)\n\n\tinputBox := tui.NewHBox(Input)\n\tinputBox.SetBorder(true)\n\tinputBox.SetSizePolicy(tui.Expanding, tui.Maximum)\n\n\thistoryScroll := tui.NewScrollArea(History)\n\thistoryScroll.SetAutoscrollToBottom(true)\n\thistoryBox := tui.NewVBox(historyScroll)\n\thistoryBox.SetBorder(true)\n\n\tchat := tui.NewVBox(historyBox, inputBox)\n\tchat.SetSizePolicy(tui.Expanding, tui.Expanding)\n\n\t// create root window and add all windows\n\troot := tui.NewHBox(Sidebar, chat)\n\tui, err := tui.New(root)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tui.SetKeybinding(\"Esc\", func() { ui.Quit() })\n\n\tInput.OnSubmit(func(e *tui.Entry) {\n\t\t// this is just to see what command given\n\t\tuserCommand := e.Text()\n\t\tif userCommand == \"\" {\n\t\t\tHistory.Append(tui.NewLabel(\"that is not acceptable command\"))\n\t\t\tHistory.Append(tui.NewLabel(libs.PrintHelp()))\n\t\t} else {\n\t\t\tHistory.Append(tui.NewHBox(\n\t\t\t\ttui.NewLabel(\"Your Command: \" + userCommand),\n\t\t\t))\n\t\t\tHistory.Append(tui.NewHBox(tui.NewLabel(\"\")))\n\n\t\t\tif strings.HasPrefix(userCommand, \"\\\\\") {\n\t\t\t\t// then this is command ..\n\t\t\t\tswitch userCommand {\n\t\t\t\tcase \"\\\\help\":\n\t\t\t\t\tHistory.Append(tui.NewLabel(libs.PrintHelp()))\n\t\t\t\tcase \"\\\\monitor\":\n\t\t\t\t\tHistory.Append(tui.NewLabel(\"Switching to MONITOR mode for device \" + DeviceName))\n\t\t\t\t\tChangeToMonitorMode()\n\t\t\t\tcase \"\\\\managed\":\n\t\t\t\t\tHistory.Append(tui.NewLabel(\"Switching to MANAGED mode for device \" + DeviceName))\n\t\t\t\t\tChangeToManagedMode()\n\t\t\t\tcase \"\\\\exit\":\n\t\t\t\t\tHistory.Append(tui.NewHBox(tui.NewLabel(\"quitting...\")))\n\t\t\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t\t\t\t// os.Exit(0)\n\n\t\t\t\t}\n\t\t\t} else if strings.Contains(userCommand, \":\") {\n\t\t\t\t// then this is declaration\n\t\t\t\tcmdSplit := strings.Split(userCommand, \":\")\n\t\t\t\tif cmdSplit[1] == \"\" {\n\t\t\t\t\tHistory.Append(tui.NewLabel(\"that is not acceptable command\"))\n\t\t\t\t\tHistory.Append(tui.NewLabel(libs.PrintHelp()))\n\t\t\t\t} else {\n\t\t\t\t\tswitch cmdSplit[0] {\n\t\t\t\t\tcase \"device\":\n\t\t\t\t\t\tSetDeviceName(cmdSplit[1])\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tHistory.Append(tui.NewLabel(\"there is no such declaration or command\"))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tHistory.Append(tui.NewHBox(tui.NewLabel(userCommand + \" is not command or a declaration\")))\n\t\t\t}\n\t\t}\n\t\tInput.SetText(\"\")\n\t})\n\n\tif err := ui.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
  "func (u Util) MakeGUI(config Configuration.Config) {\n\n\tfmt.Println()\n\ttimeStart = time.Now()\n\tvar wg sync.WaitGroup\n\n\tfor _, t := range config.Tool {\n\t\tRelativePath = config.RelativePath\n\t\tif t.Enabled {\n\t\t\tdefaultFunc(t, &wg)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tfmt.Println(\"Triage Tool Done!\")\n\n}",
  "func (s *User) SettingsUI(title string, editors []string) {\n\tapp := tview.NewApplication()\n\n\tform := tview.NewForm().\n\t\tAddCheckbox(\"Update on starting katbox\", s.AutoUpdate, nil).\n\t\tAddDropDown(\"Editor\", editors, 0, nil).\n\t\tAddInputField(\"(optional) Custom editor Path\", s.Editor, 30, nil, nil).\n\t\tAddInputField(\"Git clone path\", s.GitPath, 30, nil, nil).\n\t\tAddCheckbox(\"Open URLs in Browser\", s.OpenURL, nil).\n\t\tAddButton(\"Save Settings\", func() { app.Stop() })\n\n\tform.SetBorder(true).SetTitle(title).SetTitleAlign(tview.AlignLeft)\n\tif err := app.SetRoot(form, true).Run(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Retrieve values and update settings\n\n\t_, s.Editor = form.GetFormItemByLabel(\"Editor\").(*tview.DropDown).GetCurrentOption()\n\t// If a custom editor has been selected then set the value from the custom Editor field\n\tif s.Editor == \"Custom\" {\n\t\ts.CustomEditor = form.GetFormItemByLabel(\"Editor Path\").(*tview.InputField).GetText()\n\t}\n\n\t// TODO - do a OS/Editor lookup and set the path accordingly\n\n\ts.OpenURL = form.GetFormItemByLabel(\"Open URLs in Browser\").(*tview.Checkbox).IsChecked()\n}",
  "func (ui *ReplApp) setupCommands() {\n\tui.commands = commanddefs{\n\t\t\"help\": {\n\t\t\tcmd:      \"help\",\n\t\t\thelptext: \"show information about commands\",\n\t\t},\n\n\t\t\"exit\": {\n\t\t\tcmd:      \"exit\",\n\t\t\thelptext: \"exit the chat client\",\n\t\t},\n\n\t\t\"ip\": {\n\t\t\tcmd:      \"ip\",\n\t\t\thelptext: \"display your current external IP and port chat client is using\",\n\t\t},\n\n\t\t\"me\": {\n\t\t\tcmd:        \"me\",\n\t\t\thelptext:   \"view and change user profile\",\n\t\t\tdefaultSub: \"show\",\n\t\t\tsubcmds: commanddefs{\n\t\t\t\t\"show\": {\n\t\t\t\t\tcmd:      \"show\",\n\t\t\t\t\thelptext: \"display user information\",\n\t\t\t\t},\n\t\t\t\t\"edit\": {\n\t\t\t\t\tcmd:      \"edit\",\n\t\t\t\t\thelptext: \"modify user information\",\n\t\t\t\t\targs: []argdef{\n\t\t\t\t\t\t{\"PROFILE\", re(profile)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t\"contacts\": {\n\t\t\tcmd:        \"contacts\",\n\t\t\thelptext:   \"manage contacts\",\n\t\t\tdefaultSub: \"list\",\n\t\t\tsubcmds: commanddefs{\n\t\t\t\t\"list\": {\n\t\t\t\t\tcmd:        \"list\",\n\t\t\t\t\thelptext:   \"list all contacts\",\n\t\t\t\t\tdefaultSub: \"all\",\n\t\t\t\t},\n\t\t\t\t\"add\": {\n\t\t\t\t\tcmd:      \"add\",\n\t\t\t\t\thelptext: \"add a new contact from an existing session or profile\",\n\t\t\t\t\targs: []argdef{\n\t\t\t\t\t\t{\"PROFILE\", re(profile)},\n\t\t\t\t\t\t{\"SESSION_NUMBER\", re(integer)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"delete\": {\n\t\t\t\t\tcmd:      \"delete\",\n\t\t\t\t\thelptext: \"delete a contacts\",\n\t\t\t\t\targs: []argdef{\n\t\t\t\t\t\t{\"CONTACT_NUMBER\", re(integer)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t\"requests\": {\n\t\t\tcmd:        \"requests\",\n\t\t\thelptext:   \"manage requests for chat\",\n\t\t\tdefaultSub: \"list\",\n\t\t\tsubcmds: commanddefs{\n\t\t\t\t\"list\": {\n\t\t\t\t\tcmd:      \"list\",\n\t\t\t\t\thelptext: \"display waiting requests\",\n\t\t\t\t},\n\t\t\t\t\"accept\": {\n\t\t\t\t\tcmd:      \"accept\",\n\t\t\t\t\thelptext: \"accept chat request and begin a session\",\n\t\t\t\t\targs: []argdef{\n\t\t\t\t\t\t{\"REQUEST_NUMBER\", re(integer)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"reject\": {\n\t\t\t\t\tcmd:      \"reject\",\n\t\t\t\t\thelptext: \"refuse a chat request\",\n\t\t\t\t\targs: []argdef{\n\t\t\t\t\t\t{\"REQUEST_NUMBER\", re(integer)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t\"sessions\": {\n\t\t\tcmd:        \"sessions\",\n\t\t\thelptext:   \"manage chat sessions\",\n\t\t\tdefaultSub: \"list\",\n\t\t\tsubcmds: commanddefs{\n\t\t\t\t\"list\": {\n\t\t\t\t\tcmd:      \"list\",\n\t\t\t\t\thelptext: \"display all pending and active sessions\",\n\t\t\t\t},\n\t\t\t\t\"start\": {\n\t\t\t\t\tcmd:      \"start\",\n\t\t\t\t\thelptext: \"ping another user to a session\",\n\t\t\t\t\targs: []argdef{\n\t\t\t\t\t\t{\"CONTACT_NUMBER\", re(integer)},\n\t\t\t\t\t\t{\"PROFILE\", re(profile)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"drop\": {\n\t\t\t\t\tcmd:      \"drop\",\n\t\t\t\t\thelptext: \"end a session\",\n\t\t\t\t\targs: []argdef{\n\t\t\t\t\t\t{\"SESSION_NUMBER\", re(integer)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t\"msg\": {\n\t\t\tcmd:      \"msg\",\n\t\t\thelptext: \"sends a message\",\n\t\t\targs: []argdef{\n\t\t\t\t{\"SESSION_NUMBER MESSAGE\", re(integer, rest)},\n\t\t\t},\n\t\t},\n\n\t\t\"show\": {\n\t\t\tcmd:      \"show\",\n\t\t\thelptext: \"show last few messages for a particular session\",\n\t\t\targs: []argdef{\n\t\t\t\t{\"SESSION_NUMBER\", re(integer)},\n\t\t\t},\n\t\t},\n\t}\n}",
  "func preFlightSetup(CommandDefinition *cobra.Command, _ []string) {\n\tif !cfg.Install && !cfg.Uninstall {\n\t\tfmt.Printf(\"ERROR: using '--install' or '--uninstall' options are mandatory\\n\")\n\t\t_ = CommandDefinition.Help()\n\t\tos.Exit(1)\n\t}\n\n\tif cfg.Install == cfg.Uninstall {\n\t\tfmt.Printf(\"ERROR: can't use '--install' and '--uninstall' options together\\n\")\n\t\t_ = CommandDefinition.Help()\n\t\tos.Exit(1)\n\t}\n}",
  "func (ss *Sim) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"hip\")\n\tgi.SetAppAbout(`This demonstrates a basic Hippocampus model in Leabra. See <a href=\"https://github.com/emer/emergent\">emergent on GitHub</a>.</p>`)\n\n\twin := gi.NewMainWindow(\"hip\", \"Hippocampus Close-Far\", width, height)\n\tss.Win = win\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\tss.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMax()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(ss)\n\n\ttv := gi.AddNewTabView(split, \"tv\")\n\n\tnv := tv.AddNewTab(netview.KiT_NetView, \"NetView\").(*netview.NetView)\n\tnv.Var = \"Act\"\n\t// nv.Params.ColorMap = \"Jet\" // default is ColdHot\n\t// which fares pretty well in terms of discussion here:\n\t// https://matplotlib.org/tutorials/colors/colormaps.html\n\tnv.SetNet(ss.Net)\n\tss.NetView = nv\n\tnv.ViewDefaults()\n\n\tplt := tv.AddNewTab(eplot.KiT_Plot2D, \"TrnTrlPlot\").(*eplot.Plot2D)\n\tss.TrnTrlPlot = ss.ConfigTrnTrlPlot(plt, ss.TrnTrlLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TrnEpcPlot\").(*eplot.Plot2D)\n\tss.TrnEpcPlot = ss.ConfigTrnEpcPlot(plt, ss.TrnEpcLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TstTrlPlot\").(*eplot.Plot2D)\n\tss.TstTrlPlot = ss.ConfigTstTrlPlot(plt, ss.TstTrlLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TstEpcPlot\").(*eplot.Plot2D)\n\tss.TstEpcPlot = ss.ConfigTstEpcPlot(plt, ss.TstEpcLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TstCycPlot\").(*eplot.Plot2D)\n\tss.TstCycPlot = ss.ConfigTstCycPlot(plt, ss.TstCycLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"RunPlot\").(*eplot.Plot2D)\n\tss.RunPlot = ss.ConfigRunPlot(plt, ss.RunLog)\n\n\tsplit.SetSplits(.3, .7)\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Init\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.Init()\n\t\tvp.SetNeedsFullRender()\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"AllOn\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.AllOn()\n\t})\n\n\tsmen := gi.AddNewMenuButton(tbar, \"load\")\n\tsmen.SetText(\"Load\")\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"LoadSem\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPatsSem()\n\t})\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"LoadStudy\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPats()\n\t})\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"LoadRP\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPatsRP()\n\t})\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"Sleep\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPatsSleep()\n\t})\n\n\ttrmen := gi.AddNewMenuButton(tbar, \"train\")\n\ttrmen.SetText(\"Train\")\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainHip\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainHip()\n\t\t}\n\t})\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainSem\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainSem()\n\t\t}\n\t})\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainRP\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainRetrievalPractice()\n\t\t}\n\t})\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainSleep\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainSleep()\n\t\t}\n\t})\n\n\texmen := gi.AddNewMenuButton(tbar, \"Exp\")\n\texmen.SetText(\"Exp\")\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"CloseUn\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.CloseUnrelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"CloseRe\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.CloseRelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"FarUn\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.FarUnrelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"FarRe\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.FarRelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"FullSet\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tss.ViewOn = false\n\t\t\tss.CloseUnrelated()\n\t\t\tss.CloseRelated()\n\t\t\tss.FarUnrelated()\n\t\t\tss.FarRelated()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Stop\", Icon: \"stop\", Tooltip: \"Interrupts running.  Hitting Train again will pick back up where it left off.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.Stop()\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"StepHip\", Icon: \"step-fwd\", Tooltip: \"Advances one training trial at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TrainTrialHip()\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"StepSem\", Icon: \"step-fwd\", Tooltip: \"Advances one training trial at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TrainTrialSem()\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Step Epoch\", Icon: \"fast-fwd\", Tooltip: \"Advances one epoch (complete set of training patterns) at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.TrainEpoch()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Step Run\", Icon: \"fast-fwd\", Tooltip: \"Advances one full training Run at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.TrainRun()\n\t\t}\n\t})\n\n\ttbar.AddSeparator(\"test\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"TestTrialSem\", Icon: \"step-fwd\", Tooltip: \"Runs the next testing trial.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TestTrialSem(false) // don't return on trial -- wrap\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"TestTrialH\", Icon: \"step-fwd\", Tooltip: \"Runs the next testing trial.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TestTrialHip(false) // don't return on trial -- wrap\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test Item\", Icon: \"step-fwd\", Tooltip: \"Prompts for a specific input pattern name to run, and runs it in testing mode.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tgi.StringPromptDialog(vp, \"\", \"Test Item\",\n\t\t\tgi.DlgOpts{Title: \"Test Item\", Prompt: \"Enter the Name of a given input pattern to test (case insensitive, contains given string.\"},\n\t\t\twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tdlg := send.(*gi.Dialog)\n\t\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\t\tval := gi.StringPromptDialogValue(dlg)\n\t\t\t\t\tidxs := ss.TestEnv.Table.RowsByString(\"Name\", val, true, true) // contains, ignoreCase\n\t\t\t\t\tif len(idxs) == 0 {\n\t\t\t\t\t\tgi.PromptDialog(nil, gi.DlgOpts{Title: \"Name Not Found\", Prompt: \"No patterns found containing: \" + val}, true, false, nil, nil)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif !ss.IsRunning {\n\t\t\t\t\t\t\tss.IsRunning = true\n\t\t\t\t\t\t\tfmt.Printf(\"testing index: %v\\n\", idxs[0])\n\t\t\t\t\t\t\tss.TestItem(idxs[0])\n\t\t\t\t\t\t\tss.IsRunning = false\n\t\t\t\t\t\t\tvp.SetNeedsFullRender()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"TestTrialF\", Icon: \"step-fwd\", Tooltip: \"Runs the next testing trial.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TestTrialFull(false) // don't return on trial -- wrap\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllSem\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllSem()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllH\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllHip()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllHPure\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllHipPure()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllF\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllFull()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Env\", Icon: \"gear\", Tooltip: \"select training input patterns: Close or Far.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgiv.CallMethod(ss, \"SetEnv\", vp)\n\t\t})\n\n\ttbar.AddSeparator(\"log\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Reset RunLog\", Icon: \"reset\", Tooltip: \"Reset the accumulated log of all Runs, which are tagged with the ParamSet used\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tss.RunLog.SetNumRows(0)\n\t\t\tss.RunPlot.Update()\n\t\t})\n\n\ttbar.AddSeparator(\"misc\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"New Seed\", Icon: \"new\", Tooltip: \"Generate a new initial random seed to get different results.  By default, Init re-establishes the same initial seed every time.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tss.NewRndSeed()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"README\", Icon: \"file-markdown\", Tooltip: \"Opens your browser on the README file that contains instructions for how to run this model.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgi.OpenURL(\"https://github.com/emer/leabra/blob/master/examples/ra25/README.md\")\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\t// note: Command in shortcuts is automatically translated into Control for\n\t// Linux, Windows or Meta for MacOS\n\t// fmen := win.MainMenu.ChildByName(\"File\", 0).(*gi.Action)\n\t// fmen.Menu.AddAction(gi.ActOpts{Label: \"Open\", Shortcut: \"Command+O\"},\n\t// \twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t// \t\tFileViewOpenSVG(vp)\n\t// \t})\n\t// fmen.Menu.AddSeparator(\"csep\")\n\t// fmen.Menu.AddAction(gi.ActOpts{Label: \"Close Window\", Shortcut: \"Command+W\"},\n\t// \twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t// \t\twin.Close()\n\t// \t})\n\n\tinQuitPrompt := false\n\tgi.SetQuitReqFunc(func() {\n\t\tif inQuitPrompt {\n\t\t\treturn\n\t\t}\n\t\tinQuitPrompt = true\n\t\tgi.PromptDialog(vp, gi.DlgOpts{Title: \"Really Quit?\",\n\t\t\tPrompt: \"Are you <i>sure</i> you want to quit and lose any unsaved params, weights, logs, etc?\"}, true, true,\n\t\t\twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\t\tgi.Quit()\n\t\t\t\t} else {\n\t\t\t\t\tinQuitPrompt = false\n\t\t\t\t}\n\t\t\t})\n\t})\n\n\t// gi.SetQuitCleanFunc(func() {\n\t// \tfmt.Printf(\"Doing final Quit cleanup here..\\n\")\n\t// })\n\n\tinClosePrompt := false\n\twin.SetCloseReqFunc(func(w *gi.Window) {\n\t\tif inClosePrompt {\n\t\t\treturn\n\t\t}\n\t\tinClosePrompt = true\n\t\tgi.PromptDialog(vp, gi.DlgOpts{Title: \"Really Close Window?\",\n\t\t\tPrompt: \"Are you <i>sure</i> you want to close the window?  This will Quit the App as well, losing all unsaved params, weights, logs, etc\"}, true, true,\n\t\t\twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\t\tgi.Quit()\n\t\t\t\t} else {\n\t\t\t\t\tinClosePrompt = false\n\t\t\t\t}\n\t\t\t})\n\t})\n\n\twin.SetCloseCleanFunc(func(w *gi.Window) {\n\t\tgo gi.Quit() // once main window is closed, quit\n\t})\n\n\twin.MainMenuUpdated()\n\treturn win\n}",
  "func (a *AppProvider) UI(ctx context.Context) ui.UI {\n\tcfg := Config(ctx)\n\treturn ui.NewStdout(cfg.UI.Colors, cfg.UI.Animations)\n}",
  "func InitUI() UI {\n\tUI76 := UI{}\n\tUI76.allowInterface = true\n\tUI76.mousePressed = false\n\tUI76.panels = append(UI76.panels, MenuMainMenu, MenuLanded, MenuFlying, MenuMap, MenuEvent)\n\tInitMenuMainMenu()\n\tInitMenuLanded()\n\tInitMenuFlying()\n\tInitMenuShop()\n\tInitMenuMap()\n\tInitMenuEvent()\n\tUI76.currentPanel = MenuMainMenu\n\tUI76.prevPanel = MenuMainMenu\n\t// Fonts\n\ttt, err := truetype.Parse(fonts.MPlus1pRegular_ttf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfont76 = truetype.NewFace(tt, &truetype.Options{\n\t\tSize:    8,\n\t\tDPI:     96,\n\t\tHinting: font.HintingFull,\n\t})\n\treturn UI76\n}",
  "func (i *Installation) installGPCCUI(args []string) error {\n\tinstallGPCCWebFile := Config.CORE.TEMPDIR + \"install_web_ui.sh\"\n\toptions := []string{\n\t\t\"source \" + i.EnvFile,\n\t\t\"source \" + i.GPCC.GpPerfmonHome + \"/gpcc_path.sh\",\n\t\t\"echo\",\n\t\t\"gpcmdr --setup << EOF\",\n\t}\n\tfor _, arg := range args {\n\t\toptions = append(options, arg)\n\t}\n\toptions = append(options, \"echo\")\n\tgenerateBashFileAndExecuteTheBashFile(installGPCCWebFile, \"/bin/sh\", options)\n\n\treturn nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	configureUI configures user interface 
 | 
	func configureUI() {
	terminal.Prompt = "› "
	terminal.TitleColorTag = "{s}"
	if options.GetB(OPT_NO_COLOR) {
		fmtc.DisableColors = true
	}
	switch {
	case fmtc.IsTrueColorSupported():
		colorTagApp, colorTagVer = "{#CC1E2C}", "{#CC1E2C}"
	case fmtc.Is256ColorsSupported():
		colorTagApp, colorTagVer = "{#160}", "{#160}"
	default:
		colorTagApp, colorTagVer = "{r}", "{r}"
	}
} 
 | 
	[
  "func configureUI() {\n\tif options.GetB(OPT_NO_COLOR) {\n\t\tfmtc.DisableColors = true\n\t}\n\n\tswitch {\n\tcase fmtc.IsTrueColorSupported():\n\t\tcolorTagApp, colorTagVer = \"{#BCCF00}\", \"{#BCCF00}\"\n\tcase fmtc.Is256ColorsSupported():\n\t\tcolorTagApp, colorTagVer = \"{#148}\", \"{#148}\"\n\tdefault:\n\t\tcolorTagApp, colorTagVer = \"{g}\", \"{g}\"\n\t}\n}",
  "func RunUI(cfg Config, inStatusCh chan inputStats, outputStatusChannel chan outputStats, cfgSource string) {\n\t// Let the goroutines initialize before starting GUI\n\ttime.Sleep(50 * time.Millisecond)\n\tif err := ui.Init(); err != nil {\n\t\tlog.Fatalf(\"failed to initialize termui: %v\", err)\n\t}\n\tdefer ui.Close()\n\n\ty := 0\n\theight := 5\n\twidth := 120\n\thalfWidth := width / 2\n\n\tp := widgets.NewParagraph()\n\tp.Title = applicationName()\n\tp.Text = fmt.Sprintf(\"PRESS q TO QUIT.\\nConfig from: %s\\n\", cfgSource)\n\n\tp.SetRect(0, y, width, height)\n\tp.TextStyle.Fg = ui.ColorWhite\n\tp.BorderStyle.Fg = ui.ColorCyan\n\n\ty += height\n\theight = 10\n\tinSrcHeight := height\n\tif cfg.RetransmitEnabled() {\n\t\tinSrcHeight = height * 2\n\n\t}\n\n\tinpSrcStatus := widgets.NewParagraph()\n\tinpSrcStatus.Title = \"GPS/GPS Compass in\"\n\tif cfg.InputEnabled() {\n\t\tinpSrcStatus.Text = \"Waiting for data\"\n\t} else {\n\t\tinpSrcStatus.Text = \"Input not enabled\"\n\t}\n\n\tinpSrcStatus.SetRect(0, y, halfWidth, y+inSrcHeight)\n\tinpSrcStatus.TextStyle.Fg = ui.ColorGreen\n\tinpSrcStatus.BorderStyle.Fg = ui.ColorCyan\n\n\tinpArrow := widgets.NewParagraph()\n\tinpArrow.Border = false\n\tinpArrow.Text = \"=>\"\n\tinpArrow.SetRect(halfWidth, y, halfWidth+5, y+height)\n\n\tinpDestStatus := widgets.NewParagraph()\n\tinpDestStatus.Title = \"GPS/GPS Compass out to UGPS\"\n\n\tinpDestStatus.SetRect(halfWidth+5, y, width, y+height)\n\tinpDestStatus.TextStyle.Fg = ui.ColorGreen\n\tinpDestStatus.BorderStyle.Fg = ui.ColorCyan\n\n\tinpRetransmitStatus := widgets.NewParagraph()\n\tif cfg.RetransmitEnabled() {\n\t\tinpRetransmitStatus.Title = \"Retransmit Input\"\n\n\t\tinpRetransmitStatus.SetRect(halfWidth+5, y+height, width, y+inSrcHeight)\n\t\tinpRetransmitStatus.TextStyle.Fg = ui.ColorGreen\n\t\tinpRetransmitStatus.BorderStyle.Fg = ui.ColorCyan\n\t}\n\n\t//y += height\n\ty += inSrcHeight\n\theight = 10\n\n\toutSrcStatus := widgets.NewParagraph()\n\toutSrcStatus.Title = \"Locator Position in from UGPS\"\n\toutSrcStatus.Text = \"Waiting for data\"\n\tif !cfg.OutputEnabled() {\n\t\toutSrcStatus.Text = \"Output not enabled\"\n\t}\n\toutSrcStatus.SetRect(0, y, halfWidth, y+height)\n\toutSrcStatus.TextStyle.Fg = ui.ColorGreen\n\toutSrcStatus.BorderStyle.Fg = ui.ColorCyan\n\n\toutArrow := widgets.NewParagraph()\n\toutArrow.Border = false\n\toutArrow.Text = \"=>\"\n\toutArrow.SetRect(halfWidth, y, halfWidth+5, y+height)\n\n\toutDestStatus := widgets.NewParagraph()\n\toutDestStatus.Title = \"Locator Position out to NMEA\"\n\toutDestStatus.Text = \"Waiting for data\"\n\tif !cfg.OutputEnabled() {\n\t\toutDestStatus.Text = \"Output not enabled\"\n\t}\n\toutDestStatus.SetRect(halfWidth+5, y, width, y+height)\n\toutDestStatus.TextStyle.Fg = ui.ColorGreen\n\toutDestStatus.BorderStyle.Fg = ui.ColorCyan\n\n\ty += height\n\theight = 15\n\n\tdbgText := widgets.NewList()\n\tdbgText.Title = \"Debug\"\n\tdbgText.Rows = dbgMsg\n\tdbgText.WrapText = true\n\tdbgText.SetRect(0, y, width, y+height)\n\tdbgText.BorderStyle.Fg = ui.ColorCyan\n\n\thideDebug := widgets.NewParagraph()\n\thideDebug.Text = \"\"\n\thideDebug.SetRect(0, y, width, y+height)\n\thideDebug.Border = false\n\n\tdraw := func() {\n\t\tui.Render(p, inpSrcStatus, inpArrow, inpDestStatus, outSrcStatus, outArrow, outDestStatus, inpRetransmitStatus)\n\t\tif debug {\n\t\t\tdbgText.Rows = dbgMsg\n\t\t\tui.Render(dbgText)\n\t\t} else {\n\t\t\tui.Render(hideDebug)\n\t\t}\n\t}\n\n\t// Initial draw before any events have occurred\n\tdraw()\n\n\tuiEvents := ui.PollEvents()\n\n\tfor {\n\t\tselect {\n\t\tcase inStats := <-inStatusCh:\n\t\t\tinpSrcStatus.TextStyle.Fg = ui.ColorGreen\n\t\t\tinpSrcStatus.Text = fmt.Sprintf(\"Source: %s\\n\\n\", cfg.Input.Device) +\n\t\t\t\t\"Supported NMEA sentences received:\\n\" +\n\t\t\t\tfmt.Sprintf(\" * Topside Position   : %s\\n\", inStats.src.posDesc) +\n\t\t\t\tfmt.Sprintf(\" * Topside Heading    : %s\\n\", inStats.src.headDesc) +\n\t\t\t\tfmt.Sprintf(\" * Parse error: %d\\n\\n\", inStats.src.unparsableCount) +\n\t\t\t\tinStats.src.errorMsg\n\t\t\tif inStats.src.errorMsg != \"\" {\n\t\t\t\tinpSrcStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t}\n\t\t\tinpDestStatus.TextStyle.Fg = ui.ColorGreen\n\t\t\tinpDestStatus.Text = fmt.Sprintf(\"Destination: %s\\n\\n\", cfg.BaseURL) +\n\t\t\t\tfmt.Sprintf(\"Sent successfully to\\n Underwater GPS: %d\\n\\n\", inStats.dst.sendOk) +\n\t\t\t\tinStats.dst.errorMsg\n\t\t\tif inStats.dst.errorMsg != \"\" {\n\t\t\t\tinpDestStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t}\n\n\t\t\tinpRetransmitStatus.Text = fmt.Sprintf(\"Destination: %s\\n\\n\", cfg.Input.Retransmit) +\n\t\t\t\tfmt.Sprintf(\"Count: %d\\n%s\", inStats.retransmit.count, inStats.retransmit.errorMsg)\n\t\t\tinpRetransmitStatus.TextStyle.Fg = ui.ColorGreen\n\t\t\tif inStats.retransmit.errorMsg != \"\" {\n\t\t\t\tinpRetransmitStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t}\n\t\t\tdraw()\n\t\tcase outStats := <-outputStatusChannel:\n\t\t\toutSrcStatus.Text = fmt.Sprintf(\"Source: %s\\n\\n\", cfg.BaseURL) +\n\t\t\t\tfmt.Sprintf(\"Positions from Underwater GPS:\\n  %d\\n\", outStats.src.getCount)\n\t\t\toutSrcStatus.TextStyle.Fg = ui.ColorGreen\n\n\t\t\tif outStats.src.errMsg != \"\" {\n\t\t\t\toutSrcStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t\toutSrcStatus.Text += fmt.Sprintf(\"\\n\\n%v (%d)\", outStats.src.errMsg, outStats.src.getErr)\n\t\t\t}\n\n\t\t\toutDestStatus.Text = fmt.Sprintf(\"Destination: %s\\n\\n\", cfg.Output.Device) +\n\t\t\t\t\"Sent:\\n\" +\n\t\t\t\tfmt.Sprintf(\" * Locator/ROV Position : %s: %d\\n\", strings.ToUpper(cfg.Output.PositionSentence), outStats.dst.sendOk)\n\t\t\toutDestStatus.TextStyle.Fg = ui.ColorGreen\n\n\t\t\tif outStats.dst.errMsg != \"\" {\n\t\t\t\toutDestStatus.TextStyle.Fg = ui.ColorRed\n\t\t\t\toutDestStatus.Text += fmt.Sprintf(\"\\n\\n%s\", outStats.dst.errMsg)\n\t\t\t}\n\t\t\tdraw()\n\t\tcase e := <-uiEvents:\n\t\t\tswitch e.ID {\n\t\t\tcase \"q\", \"<C-c>\":\n\t\t\t\treturn\n\t\t\tcase \"d\":\n\t\t\t\tdbgMsg = nil\n\t\t\t\tdbgText.Rows = dbgMsg\n\t\t\t\tdebug = !debug\n\n\t\t\t\tdraw()\n\t\t\t}\n\t\t}\n\t}\n}",
  "func SetUIConfig() error {\n\tf, err := os.Open(UIConfigFile)\n\tif err != nil {\n\t\treturn checkConfigs()\n\t}\n\tdefer f.Close()\n\n\tbytes, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(bytes, &UIConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn checkConfigs()\n}",
  "func InitUI(c *processor.Console) {\n\tconsole = c\n\tframe = image.NewRGBA(image.Rect(0, 0, width, height))\n\n\t// Call ebiten.Run to start your game loop.\n\tif err := ebiten.Run(update, width, height, scale, title); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
  "func (gn *Gen) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Gen\")\n\tgi.SetAppAbout(`Gen concatenated strings of syllables`)\n\n\twin := gi.NewMainWindow(\"one\", \"Gen ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\t// vi.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMaxWidth()\n\tsplit.SetStretchMaxHeight()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(gn)\n\tgn.StructView = sv\n\n\t// tv := gi.AddNewTabView(split, \"tv\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Reset\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.Reset()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Load Params\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.LoadParams()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen Wavs\", Icon: \"new\", Tooltip: \"Generate the .wav files\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.GenWavs()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Split Wavs\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.SplitWavs()\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}",
  "func (s *Slacker) SetupUI() error {\n\n\t// black toolbars are cool.\n\ttb := widgets.NewToolBar(\"toolbar\", &color.RGBA{0, 0, 0, 0xff})\n\tquitButton := widgets.NewToolbarItem(\"quitTBI\", s.QuitSlacker)\n\ttb.AddToolBarItem(quitButton)\n\ttb.SetSize(800, 30)\n\n\t// Main VPanel of the app. Will have 2 entries in it. The first is the top level toolbar\n\t// secondly will be a HPanel to have contacts and messages.\n\tvpanel := widgets.NewVPanelWithSize(\"main-vpanel\", 800, 600, &color.RGBA{0, 0, 0, 0xff})\n\tvpanel.AddWidget(tb)\n\ts.window.AddPanel(vpanel)\n\n\t// main Horizontal panel... first element is channels + people\n\t// second element is actual chat.\n\tmainHPanel := widgets.NewHPanel(\"hpanel1\", &color.RGBA{0, 100, 0, 255})\n\n\t// 2 main sections added to mainHPanel\n\t// contactsVPanel goes down complete left side, 100 width, 600-30 (toolbar) in height\n\ts.contactsChannelsVPanel = widgets.NewVPanelWithSize(\"contactsVPanel\", 150, 570, &color.RGBA{0, 0, 100, 0xff})\n\n\t// In messagesTypingVPanel we will have 2 vpanels.\n\tmessagesTypingVPanel := widgets.NewVPanelWithSize(\"messagesTypingVPanel\", 650, 570, &color.RGBA{0, 50, 50, 0xff})\n\n\t// The first for messages the second for typing widget.\n\ts.messagesVPanel = widgets.NewVPanelWithSize(\"messagesVPanel\", 650, 540, &color.RGBA{10, 50, 50, 0xff})\n\ttypingVPanel := widgets.NewVPanelWithSize(\"typingVPanel\", 650, 30, &color.RGBA{50, 50, 50, 0xff})\n\tmessagesTypingVPanel.AddWidget(s.messagesVPanel)\n\tmessagesTypingVPanel.AddWidget(typingVPanel)\n\n\tmainHPanel.AddWidget(s.contactsChannelsVPanel)\n\tmainHPanel.AddWidget(messagesTypingVPanel)\n\n\t// now add mainHPanel to VPanel.\n\tvpanel.AddWidget(mainHPanel)\n\n\treturn nil\n}",
  "func (s *User) SettingsUI(title string, editors []string) {\n\tapp := tview.NewApplication()\n\n\tform := tview.NewForm().\n\t\tAddCheckbox(\"Update on starting katbox\", s.AutoUpdate, nil).\n\t\tAddDropDown(\"Editor\", editors, 0, nil).\n\t\tAddInputField(\"(optional) Custom editor Path\", s.Editor, 30, nil, nil).\n\t\tAddInputField(\"Git clone path\", s.GitPath, 30, nil, nil).\n\t\tAddCheckbox(\"Open URLs in Browser\", s.OpenURL, nil).\n\t\tAddButton(\"Save Settings\", func() { app.Stop() })\n\n\tform.SetBorder(true).SetTitle(title).SetTitleAlign(tview.AlignLeft)\n\tif err := app.SetRoot(form, true).Run(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Retrieve values and update settings\n\n\t_, s.Editor = form.GetFormItemByLabel(\"Editor\").(*tview.DropDown).GetCurrentOption()\n\t// If a custom editor has been selected then set the value from the custom Editor field\n\tif s.Editor == \"Custom\" {\n\t\ts.CustomEditor = form.GetFormItemByLabel(\"Editor Path\").(*tview.InputField).GetText()\n\t}\n\n\t// TODO - do a OS/Editor lookup and set the path accordingly\n\n\ts.OpenURL = form.GetFormItemByLabel(\"Open URLs in Browser\").(*tview.Checkbox).IsChecked()\n}",
  "func (gn *Gen) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Gen\")\n\tgi.SetAppAbout(`Gen concatenated strings of syllables`)\n\n\twin := gi.NewMainWindow(\"one\", \"Gen ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\t// vi.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMaxWidth()\n\tsplit.SetStretchMaxHeight()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(gn)\n\tgn.StructView = sv\n\n\t// tv := gi.AddNewTabView(split, \"tv\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen cat string\", Icon: \"new\", Tooltip: \"Generate a new initial random seed to get different results.  By default, Init re-establishes the same initial seed every time.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.CatNoRepeat(gn.syls1)\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}",
  "func NewUI(e *environment.Environment, listenaddr string) *UI {\n\t// if it's already running an UI instance is not created a new one\n\n\t// set environment\n\tenv = e\n\n\tindex := path.Join(\"ui\",\"html\", \"index.html\")\n\theader := path.Join(\"ui\",\"html\", \"header.html\")\n\tcontent := path.Join(\"ui\",\"html\", \"content.html\")\n\tfooter := path.Join(\"ui\",\"html\", \"footer.html\")\n\tjsLibJQquery := path.Join(\"ui\",\"scripts\",\"lib\",\"jquery-3.1.1.min.js\")\n\tjsLibUnderscore := path.Join(\"ui\",\"scripts\", \"lib\", \"underscore-min.js\")\n\tjsUtils := path.Join(\"ui\",\"scripts\", \"utils.js\")\n\tjsMenu := path.Join(\"ui\",\"scripts\", \"menu.js\")\n\tjsClusterlist := path.Join(\"ui\",\"scripts\", \"clusterlist.js\")\n\tjsClusterlistitemdetails := path.Join(\"ui\",\"scripts\", \"itemdetails.js\")\n\tjsChecks := path.Join(\"ui\",\"scripts\", \"checks.js\")\n\tjsCheckDetails := path.Join(\"ui\",\"scripts\", \"checkdetails.js\")\n\tjsVerdmell := path.Join(\"ui\",\"scripts\", \"verdmell.js\")\n\tstyle := path.Join(\"ui\",\"style\", \"verdmell.css\")\n\n\tif ui == nil {\n\t\tui = new(UI)\n\t\tui.SetListenaddr(listenaddr)\n\t\tui.SetClientStormControlPeriod(20)\n\t\tui.SetRouter(mux.NewRouter().StrictSlash(true))\n\t\tui.SetTemplates(template.Must(template.ParseFiles(index,jsLibJQquery,jsLibUnderscore,jsUtils,jsMenu,jsClusterlist,jsClusterlistitemdetails,jsChecks,jsCheckDetails,jsVerdmell,style,header,content,footer)))\n\t\t//ui.SetTemplates(template.Must(template.ParseFiles(index,jsLibJQquery,jsLibUnderscore,jsUtils,jsMenu,jsClusterlist,jsClusterlistitemdetails,jsVerdmell,style,header,content,footer)))\n\t\tui.SetInputChannel(make(chan []byte))\n\t\tui.StartReceiver()\n\t\t\n\t\tenv.Output.WriteChDebug(\"(UI::server::NewUI) New UI listening at: \"+ui.Listenaddr)\n\t\n\t\tui.clients = make( map[chan []byte]bool)\n\t\tui.newClients = make( chan chan []byte)\n\t\tui.defunctClients = make( chan chan []byte)\n\n\t}\n\n\treturn ui\n}",
  "func InitUI() UI {\n\tUI76 := UI{}\n\tUI76.allowInterface = true\n\tUI76.mousePressed = false\n\tUI76.panels = append(UI76.panels, MenuMainMenu, MenuLanded, MenuFlying, MenuMap, MenuEvent)\n\tInitMenuMainMenu()\n\tInitMenuLanded()\n\tInitMenuFlying()\n\tInitMenuShop()\n\tInitMenuMap()\n\tInitMenuEvent()\n\tUI76.currentPanel = MenuMainMenu\n\tUI76.prevPanel = MenuMainMenu\n\t// Fonts\n\ttt, err := truetype.Parse(fonts.MPlus1pRegular_ttf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfont76 = truetype.NewFace(tt, &truetype.Options{\n\t\tSize:    8,\n\t\tDPI:     96,\n\t\tHinting: font.HintingFull,\n\t})\n\treturn UI76\n}",
  "func (this *Ui_AddFriendDialog) SetupUi(AddFriendDialog *qtwidgets.QDialog) {\n\tthis.AddFriendDialog = AddFriendDialog\n\t// { // 126\n\tif AddFriendDialog.ObjectName() == \"\" {\n\t\tAddFriendDialog.SetObjectName(\"AddFriendDialog\")\n\t}\n\tAddFriendDialog.Resize(606, 246)\n\tthis.VerticalLayout_2 = qtwidgets.NewQVBoxLayout1(this.AddFriendDialog)                                  // 111\n\tthis.VerticalLayout_2.SetObjectName(\"VerticalLayout_2\")                                                  // 112\n\tthis.VerticalLayout = qtwidgets.NewQVBoxLayout()                                                         // 111\n\tthis.VerticalLayout.SetObjectName(\"VerticalLayout\")                                                      // 112\n\tthis.Label = qtwidgets.NewQLabel(this.AddFriendDialog, 0)                                                // 111\n\tthis.Label.SetObjectName(\"Label\")                                                                        // 112\n\tthis.Label.SetTextInteractionFlags(qtcore.Qt__LinksAccessibleByMouse | qtcore.Qt__TextSelectableByMouse) // 114\n\n\tthis.VerticalLayout.Layout().AddWidget(this.Label) // 115\n\n\tthis.LineEdit = qtwidgets.NewQLineEdit(this.AddFriendDialog) // 111\n\tthis.LineEdit.SetObjectName(\"LineEdit\")                      // 112\n\n\tthis.VerticalLayout.Layout().AddWidget(this.LineEdit) // 115\n\n\tthis.Label_2 = qtwidgets.NewQLabel(this.AddFriendDialog, 0) // 111\n\tthis.Label_2.SetObjectName(\"Label_2\")                       // 112\n\n\tthis.VerticalLayout.Layout().AddWidget(this.Label_2) // 115\n\n\tthis.TextEdit = qtwidgets.NewQTextEdit(this.AddFriendDialog) // 111\n\tthis.TextEdit.SetObjectName(\"TextEdit\")                      // 112\n\tthis.TextEdit.SetAcceptRichText(false)                       // 114\n\n\tthis.VerticalLayout.Layout().AddWidget(this.TextEdit) // 115\n\n\tthis.VerticalLayout_2.AddLayout(this.VerticalLayout, 0) // 115\n\n\tthis.HorizontalLayout = qtwidgets.NewQHBoxLayout()      // 111\n\tthis.HorizontalLayout.SetObjectName(\"HorizontalLayout\") // 112\n\tthis.HorizontalSpacer = qtwidgets.NewQSpacerItem(40, 20, qtwidgets.QSizePolicy__Expanding, qtwidgets.QSizePolicy__Minimum)\n\tqtrt.ReleaseOwnerToQt(this.HorizontalSpacer)\n\n\tthis.HorizontalLayout.AddItem(this.HorizontalSpacer) // 115\n\n\tthis.ButtonBox = qtwidgets.NewQDialogButtonBox(this.AddFriendDialog)                                   // 111\n\tthis.ButtonBox.SetObjectName(\"ButtonBox\")                                                              // 112\n\tthis.ButtonBox.SetOrientation(qtcore.Qt__Horizontal)                                                   // 114\n\tthis.ButtonBox.SetStandardButtons(qtwidgets.QDialogButtonBox__Cancel | qtwidgets.QDialogButtonBox__Ok) // 114\n\n\tthis.HorizontalLayout.Layout().AddWidget(this.ButtonBox) // 115\n\n\tthis.VerticalLayout_2.AddLayout(this.HorizontalLayout, 0) // 115\n\n\tthis.RetranslateUi(AddFriendDialog)\n\t// QObject::connect(buttonBox, SIGNAL(accepted()), AddFriendDialog, SLOT(accept())); // 126\n\t// QObject::connect(buttonBox, SIGNAL(rejected()), AddFriendDialog, SLOT(reject())); // 126\n\n\tqtcore.QMetaObject_ConnectSlotsByName(AddFriendDialog) // 100111\n\t// } // setupUi // 126\n\n}",
  "func (ss *Sim) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"hip\")\n\tgi.SetAppAbout(`This demonstrates a basic Hippocampus model in Leabra. See <a href=\"https://github.com/emer/emergent\">emergent on GitHub</a>.</p>`)\n\n\twin := gi.NewMainWindow(\"hip\", \"Hippocampus Close-Far\", width, height)\n\tss.Win = win\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\tss.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMax()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(ss)\n\n\ttv := gi.AddNewTabView(split, \"tv\")\n\n\tnv := tv.AddNewTab(netview.KiT_NetView, \"NetView\").(*netview.NetView)\n\tnv.Var = \"Act\"\n\t// nv.Params.ColorMap = \"Jet\" // default is ColdHot\n\t// which fares pretty well in terms of discussion here:\n\t// https://matplotlib.org/tutorials/colors/colormaps.html\n\tnv.SetNet(ss.Net)\n\tss.NetView = nv\n\tnv.ViewDefaults()\n\n\tplt := tv.AddNewTab(eplot.KiT_Plot2D, \"TrnTrlPlot\").(*eplot.Plot2D)\n\tss.TrnTrlPlot = ss.ConfigTrnTrlPlot(plt, ss.TrnTrlLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TrnEpcPlot\").(*eplot.Plot2D)\n\tss.TrnEpcPlot = ss.ConfigTrnEpcPlot(plt, ss.TrnEpcLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TstTrlPlot\").(*eplot.Plot2D)\n\tss.TstTrlPlot = ss.ConfigTstTrlPlot(plt, ss.TstTrlLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TstEpcPlot\").(*eplot.Plot2D)\n\tss.TstEpcPlot = ss.ConfigTstEpcPlot(plt, ss.TstEpcLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"TstCycPlot\").(*eplot.Plot2D)\n\tss.TstCycPlot = ss.ConfigTstCycPlot(plt, ss.TstCycLog)\n\n\tplt = tv.AddNewTab(eplot.KiT_Plot2D, \"RunPlot\").(*eplot.Plot2D)\n\tss.RunPlot = ss.ConfigRunPlot(plt, ss.RunLog)\n\n\tsplit.SetSplits(.3, .7)\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Init\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.Init()\n\t\tvp.SetNeedsFullRender()\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"AllOn\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.AllOn()\n\t})\n\n\tsmen := gi.AddNewMenuButton(tbar, \"load\")\n\tsmen.SetText(\"Load\")\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"LoadSem\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPatsSem()\n\t})\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"LoadStudy\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPats()\n\t})\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"LoadRP\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPatsRP()\n\t})\n\n\tsmen.Menu.AddAction(gi.ActOpts{Label: \"Sleep\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.OpenPatsSleep()\n\t})\n\n\ttrmen := gi.AddNewMenuButton(tbar, \"train\")\n\ttrmen.SetText(\"Train\")\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainHip\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainHip()\n\t\t}\n\t})\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainSem\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainSem()\n\t\t}\n\t})\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainRP\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainRetrievalPractice()\n\t\t}\n\t})\n\n\ttrmen.Menu.AddAction(gi.ActOpts{Label: \"TrainSleep\", Icon: \"run\", Tooltip: \"Starts the network training, picking up from wherever it may have left off.  If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.\",\n\t\tUpdateFunc: func(act *gi.Action) {\n\t\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.TrainSleep()\n\t\t}\n\t})\n\n\texmen := gi.AddNewMenuButton(tbar, \"Exp\")\n\texmen.SetText(\"Exp\")\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"CloseUn\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.CloseUnrelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"CloseRe\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.CloseRelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"FarUn\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.FarUnrelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"FarRe\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tgo ss.FarRelated()\n\t\t}\n\t})\n\n\texmen.Menu.AddAction(gi.ActOpts{Label: \"FullSet\", Icon: \"update\", Tooltip: \"Initialize everything including network weights, and start over.  Also applies current params.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\t// ss.Train()\n\t\t\tss.ViewOn = false\n\t\t\tss.CloseUnrelated()\n\t\t\tss.CloseRelated()\n\t\t\tss.FarUnrelated()\n\t\t\tss.FarRelated()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Stop\", Icon: \"stop\", Tooltip: \"Interrupts running.  Hitting Train again will pick back up where it left off.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tss.Stop()\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"StepHip\", Icon: \"step-fwd\", Tooltip: \"Advances one training trial at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TrainTrialHip()\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"StepSem\", Icon: \"step-fwd\", Tooltip: \"Advances one training trial at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TrainTrialSem()\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Step Epoch\", Icon: \"fast-fwd\", Tooltip: \"Advances one epoch (complete set of training patterns) at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.TrainEpoch()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Step Run\", Icon: \"fast-fwd\", Tooltip: \"Advances one full training Run at a time.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.TrainRun()\n\t\t}\n\t})\n\n\ttbar.AddSeparator(\"test\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"TestTrialSem\", Icon: \"step-fwd\", Tooltip: \"Runs the next testing trial.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TestTrialSem(false) // don't return on trial -- wrap\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"TestTrialH\", Icon: \"step-fwd\", Tooltip: \"Runs the next testing trial.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TestTrialHip(false) // don't return on trial -- wrap\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test Item\", Icon: \"step-fwd\", Tooltip: \"Prompts for a specific input pattern name to run, and runs it in testing mode.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tgi.StringPromptDialog(vp, \"\", \"Test Item\",\n\t\t\tgi.DlgOpts{Title: \"Test Item\", Prompt: \"Enter the Name of a given input pattern to test (case insensitive, contains given string.\"},\n\t\t\twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tdlg := send.(*gi.Dialog)\n\t\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\t\tval := gi.StringPromptDialogValue(dlg)\n\t\t\t\t\tidxs := ss.TestEnv.Table.RowsByString(\"Name\", val, true, true) // contains, ignoreCase\n\t\t\t\t\tif len(idxs) == 0 {\n\t\t\t\t\t\tgi.PromptDialog(nil, gi.DlgOpts{Title: \"Name Not Found\", Prompt: \"No patterns found containing: \" + val}, true, false, nil, nil)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif !ss.IsRunning {\n\t\t\t\t\t\t\tss.IsRunning = true\n\t\t\t\t\t\t\tfmt.Printf(\"testing index: %v\\n\", idxs[0])\n\t\t\t\t\t\t\tss.TestItem(idxs[0])\n\t\t\t\t\t\t\tss.IsRunning = false\n\t\t\t\t\t\t\tvp.SetNeedsFullRender()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"TestTrialF\", Icon: \"step-fwd\", Tooltip: \"Runs the next testing trial.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\tss.TestTrialFull(false) // don't return on trial -- wrap\n\t\t\tss.IsRunning = false\n\t\t\tvp.SetNeedsFullRender()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllSem\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllSem()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllH\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllHip()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllHPure\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllHipPure()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Test AllF\", Icon: \"fast-fwd\", Tooltip: \"Tests all of the testing trials.\", UpdateFunc: func(act *gi.Action) {\n\t\tact.SetActiveStateUpdt(!ss.IsRunning)\n\t}}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\tif !ss.IsRunning {\n\t\t\tss.IsRunning = true\n\t\t\ttbar.UpdateActions()\n\t\t\tgo ss.RunTestAllFull()\n\t\t}\n\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Env\", Icon: \"gear\", Tooltip: \"select training input patterns: Close or Far.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgiv.CallMethod(ss, \"SetEnv\", vp)\n\t\t})\n\n\ttbar.AddSeparator(\"log\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Reset RunLog\", Icon: \"reset\", Tooltip: \"Reset the accumulated log of all Runs, which are tagged with the ParamSet used\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tss.RunLog.SetNumRows(0)\n\t\t\tss.RunPlot.Update()\n\t\t})\n\n\ttbar.AddSeparator(\"misc\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"New Seed\", Icon: \"new\", Tooltip: \"Generate a new initial random seed to get different results.  By default, Init re-establishes the same initial seed every time.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tss.NewRndSeed()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"README\", Icon: \"file-markdown\", Tooltip: \"Opens your browser on the README file that contains instructions for how to run this model.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgi.OpenURL(\"https://github.com/emer/leabra/blob/master/examples/ra25/README.md\")\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\t// note: Command in shortcuts is automatically translated into Control for\n\t// Linux, Windows or Meta for MacOS\n\t// fmen := win.MainMenu.ChildByName(\"File\", 0).(*gi.Action)\n\t// fmen.Menu.AddAction(gi.ActOpts{Label: \"Open\", Shortcut: \"Command+O\"},\n\t// \twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t// \t\tFileViewOpenSVG(vp)\n\t// \t})\n\t// fmen.Menu.AddSeparator(\"csep\")\n\t// fmen.Menu.AddAction(gi.ActOpts{Label: \"Close Window\", Shortcut: \"Command+W\"},\n\t// \twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t// \t\twin.Close()\n\t// \t})\n\n\tinQuitPrompt := false\n\tgi.SetQuitReqFunc(func() {\n\t\tif inQuitPrompt {\n\t\t\treturn\n\t\t}\n\t\tinQuitPrompt = true\n\t\tgi.PromptDialog(vp, gi.DlgOpts{Title: \"Really Quit?\",\n\t\t\tPrompt: \"Are you <i>sure</i> you want to quit and lose any unsaved params, weights, logs, etc?\"}, true, true,\n\t\t\twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\t\tgi.Quit()\n\t\t\t\t} else {\n\t\t\t\t\tinQuitPrompt = false\n\t\t\t\t}\n\t\t\t})\n\t})\n\n\t// gi.SetQuitCleanFunc(func() {\n\t// \tfmt.Printf(\"Doing final Quit cleanup here..\\n\")\n\t// })\n\n\tinClosePrompt := false\n\twin.SetCloseReqFunc(func(w *gi.Window) {\n\t\tif inClosePrompt {\n\t\t\treturn\n\t\t}\n\t\tinClosePrompt = true\n\t\tgi.PromptDialog(vp, gi.DlgOpts{Title: \"Really Close Window?\",\n\t\t\tPrompt: \"Are you <i>sure</i> you want to close the window?  This will Quit the App as well, losing all unsaved params, weights, logs, etc\"}, true, true,\n\t\t\twin.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\t\tgi.Quit()\n\t\t\t\t} else {\n\t\t\t\t\tinClosePrompt = false\n\t\t\t\t}\n\t\t\t})\n\t})\n\n\twin.SetCloseCleanFunc(func(w *gi.Window) {\n\t\tgo gi.Quit() // once main window is closed, quit\n\t})\n\n\twin.MainMenuUpdated()\n\treturn win\n}",
  "func (sc *ServiceConfig) ServeUI() {\n\tmime.AddExtensionType(\".json\", \"application/json\")\n\tmime.AddExtensionType(\".woff\", \"application/font-woff\")\n\n\thandler := rest.ResourceHandler{\n\t\tEnableRelaxedContentType: true,\n\t}\n\n\troutes := sc.getRoutes()\n\thandler.SetRoutes(routes...)\n\n\t// FIXME: bubble up these errors to the caller\n\tif err := http.ListenAndServe(\":7878\", &handler); err != nil {\n\t\tglog.Fatalf(\"could not setup internal web server: %s\", err)\n\t}\n}",
  "func (a *AppProvider) UI(ctx context.Context) ui.UI {\n\tcfg := Config(ctx)\n\treturn ui.NewStdout(cfg.UI.Colors, cfg.UI.Animations)\n}",
  "func NewUi(w *app.Window) *Ui {\n\tu := Ui{\n\t\tw:  w,\n\t\tth: material.NewTheme(gofont.Collection()),\n\t\tga: engine.NewGame(),\n\t}\n\tu.th.TextSize = unit.Dp(topMenuPx / 5)\n\tu.ga.ScaleOffset(WidthPx)\n\tu.nameEditor = &widget.Editor{\n\t\tSingleLine: true,\n\t\tSubmit:     true,\n\t}\n\tu.menuBtn.pressed = true\n\tu.titleScreen = true\n\treturn &u\n}",
  "func NewUIConfig(jaeger *v1alpha1.Jaeger) *UIConfig {\n\treturn &UIConfig{jaeger: jaeger}\n}",
  "func CreateUIConfig(context echo.Context) error {\n\troomID := fmt.Sprintf(\"%s-%s\", context.Param(\"building\"), context.Param(\"room\"))\n\tvar ui structs.UIConfig\n\n\terr := context.Bind(&ui)\n\n\ttoReturn, err := db.GetDB().CreateUIConfig(roomID, ui)\n\tif err != nil {\n\t\tlog.L.Errorf(\"[ui-config] Failed to create config file for %s : %s\", roomID, err.Error())\n\t\treturn context.JSON(http.StatusInternalServerError, err.Error())\n\t}\n\n\tlog.L.Infof(\"[ui-config] Successfully created ui config for %s\", roomID)\n\n\treturn context.JSON(http.StatusOK, toReturn)\n}",
  "func (a *myApp) buildUI() {\n\n\t// ##### TOP #####\n\n\tlabel1 := widget.NewLabel(\"Hello there\")\n\tlabel2 := widget.NewLabel(\"(right aligned)\")\n\tcontentTitle := container.NewHBox(label1, layout.NewSpacer(), label2)\n\n\t// ##### SIDEBAR #####\n\n\ta.sidebar = newSidebar(a)\n\tcontSidebar := a.sidebar.buildSidebar()\n\n\t// ##### IMAGE #####\n\n\timg := image.NewRGBA(image.Rect(0, 0, 400, 400))\n\tdraw.Draw(img, img.Bounds(), &image.Uniform{color.RGBA{10, 10, 10, 255}}, image.Point{0, 0}, draw.Src)\n\n\tcImg := canvas.NewImageFromImage(img)\n\tcImg.FillMode = canvas.ImageFillContain\n\tcImg.ScaleMode = canvas.ImageScaleFastest\n\tcImg.SetMinSize(fyne.NewSize(200, 200))\n\n\tallBlack := canvas.NewRectangle(color.RGBA{30, 30, 30, 255})\n\timageBorder := container.NewBorder(nil, nil, nil, nil, allBlack, cImg)\n\n\t// ##### ASSEMBLE #####\n\n\tborderCont := container.NewBorder(contentTitle, nil, contSidebar, nil,\n\t\timageBorder,\n\t)\n\n\ta.mainWin.SetContent(borderCont)\n}",
  "func getUiConfig(config *ini.File) (UiConfig, error) {\n\tuiConfig := UiConfig{}\n\n\t// Get route columns\n\troutesColumns, err := getRoutesColumns(config)\n\tif err != nil {\n\t\treturn uiConfig, err\n\t}\n\n\t// Get rejections and reasons\n\trejections, err := getRoutesRejections(config)\n\tif err != nil {\n\t\treturn uiConfig, err\n\t}\n\n\tnoexports, err := getRoutesNoexports(config)\n\tif err != nil {\n\t\treturn uiConfig, err\n\t}\n\n\t// Make config\n\tuiConfig = UiConfig{\n\t\tRoutesColumns:    routesColumns,\n\t\tRoutesRejections: rejections,\n\t\tRoutesNoexports:  noexports,\n\t}\n\n\treturn uiConfig, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	cloneRepository start repository clone process 
 | 
	func cloneRepository(url, dir string) {
	fmtc.Printf("Fetching index from {*}%s{!}…\n", url)
	i, err := fetchIndex(url)
	if err != nil {
		printErrorAndExit(err.Error())
	}
	if i.Meta.Items == 0 {
		printErrorAndExit("Repository is empty")
	}
	printRepositoryInfo(i)
	uuid := getCurrentIndexUUID(dir)
	if uuid == i.UUID {
		fmtc.Println("{g}Looks like you already have the same set of data{!}")
		return
	}
	if !options.GetB(OPT_YES) {
		ok, err := terminal.ReadAnswer("Clone this repository?", "N")
		fmtc.NewLine()
		if !ok || err != nil {
			os.Exit(0)
		}
	}
	downloadRepositoryData(i, url, dir)
	saveIndex(i, dir)
	fmtc.NewLine()
	fmtc.Printf("{g}Repository successfully cloned to {g*}%s{!}\n", dir)
} 
 | 
	[
  "func cloneRepo(URI string, destdir string, conf *Configuration) error {\n\t// NOTE: cloneRepo changes the working directory to the cloned repository\n\t// See: https://github.com/G-Node/gin-cli/issues/225\n\t// This will need to change when that issue is fixed\n\torigdir, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Printf(\"%s: Failed to get working directory when cloning repository. Was our working directory removed?\", lpStorage)\n\t\treturn err\n\t}\n\tdefer os.Chdir(origdir)\n\terr = os.Chdir(destdir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Cloning %s\", URI)\n\n\tclonechan := make(chan git.RepoFileStatus)\n\tgo conf.GIN.Session.CloneRepo(strings.ToLower(URI), clonechan)\n\tfor stat := range clonechan {\n\t\tlog.Print(stat)\n\t\tif stat.Err != nil {\n\t\t\tlog.Printf(\"Repository cloning failed: %s\", stat.Err)\n\t\t\treturn stat.Err\n\t\t}\n\t}\n\n\tdownloadchan := make(chan git.RepoFileStatus)\n\tgo conf.GIN.Session.GetContent(nil, downloadchan)\n\tfor stat := range downloadchan {\n\t\tlog.Print(stat)\n\t\tif stat.Err != nil {\n\t\t\tlog.Printf(\"Repository cloning failed during annex get: %s\", stat.Err)\n\t\t\treturn stat.Err\n\t\t}\n\t}\n\treturn nil\n}",
  "func cloneRepo(repo string) {\n\tfmt.Println(\"starting clone:\", repo)\n\tcmd := exec.Command(\"git\", \"clone\", repo)\n\tif err := cmd.Run(); err != nil {\n\t\tfmt.Printf(\"The repo %s could not be cloned. Error: %v\\n\", repo, err)\n\t}\n\tfmt.Println(\"Completed\", repo)\n}",
  "func clone(p provision.Provisioner, app provision.App) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tpath, err := repository.GetPath()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"tsuru is misconfigured: %s\", err)\n\t}\n\tcmd := fmt.Sprintf(\"git clone %s %s --depth 1\", repository.ReadOnlyURL(app.GetName()), path)\n\terr = p.ExecuteCommand(&buf, &buf, app, cmd)\n\tb := buf.Bytes()\n\tlog.Debugf(`\"git clone\" output: %s`, b)\n\treturn b, err\n}",
  "func (m Manager) Clone(url string, revision string, dir string) error {\n\tlog.Printf(\"Initializing repo %s into %s\\n\", url, dir)\n\trepo, err := vcs.NewRepo(url, dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(errorContainer, repoInitFailed, url, revision, dir, err.Error())\n\t}\n\tlog.Printf(\"Cloning %s into %s\\n\", url, dir)\n\terr = repo.Get()\n\tif err != nil {\n\t\treturn fmt.Errorf(errorContainer, repoCloneFailed, url, revision, dir, err.Error())\n\t}\n\tif revision != \"\" {\n\t\tlog.Printf(\"Checking out revision %s for repo %s\\n\", revision, url)\n\t\terr = repo.UpdateVersion(revision)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(errorContainer, repoCheckoutFailed, url, revision, dir, err.Error())\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Assuming default revision for repo %s\\n\", url)\n\t}\n\treturn nil\n}",
  "func cloneRepos() {\n\tsliceLength := len(gitHubProjects)\n\thomePath := getUserDir()\n\tpath := homePath + FRONTEND_APPS_BASE_DIR\n\tvar wg sync.WaitGroup\n\twg.Add(sliceLength)\n\tfor i := 0; i < sliceLength; i++ {\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\tfmt.Println(\"Git Cloning \", gitHubProjects[i])\n\t\t\tcmd := exec.Command(\"git\", \"clone\", gitHubProjects[i])\n\t\t\tcmd.Dir = path\n\t\t\t_, err := cmd.Output()\n\t\t\tcheck(err)\n\t\t}(i)\n\t}\n\twg.Wait()\n}",
  "func rawClone(secrets configure.SecretsOutline, repo api.Repo, path string) {\n\terr := os.MkdirAll(path, 0777)\n\tif err != nil {\n\t\tstatuser.Error(\"Failed to create folder at \"+path, err, 1)\n\t}\n\n\tspin := spinner.New(utils.SpinnerCharSet, utils.SpinnerSpeed)\n\tspin.Suffix = fmt.Sprintf(\" Cloning %v/%v\", repo.Owner, repo.Name)\n\tspin.Start()\n\n\t_, err = git.PlainClone(path, false, &git.CloneOptions{\n\t\tURL: fmt.Sprintf(\"https://github.com/%v/%v.git\", repo.Owner, repo.Name),\n\t\tAuth: &http.BasicAuth{\n\t\t\tUsername: secrets.Username,\n\t\t\tPassword: secrets.PAT,\n\t\t},\n\t})\n\n\tspin.Stop()\n\tif err != nil {\n\t\tstatuser.Error(\"Failed to clone repo\", err, 1)\n\t}\n}",
  "func cloneWorker(repo, dirName, token string, wg *sync.WaitGroup, bar *progressbar.ProgressBar) {\n\t// fmt.Printf(\"[gobackup] cloning %s\\n\", repo)\n\n\t// Decrement the waitgroup count when we are finished cloning the repository and increment our progress bar\n\tdefer bar.Add(1)\n\tdefer wg.Done()\n\t// Get the name of the repo we are cloning\n\trepoName := path.Base(repo)\n\n\trepoName = strings.TrimSuffix(repoName, filepath.Ext(repoName))\n\t// Dirname which will be <github_username>/<repo_name>\n\tdirName = dirName + \"/\" + repoName\n\n\t// Setup auth if we have a token\n\tvar auth *http.BasicAuth\n\tif token != \"\" {\n\t\t// If we have a token\n\t\tauth = &http.BasicAuth{\n\t\t\tUsername: \"gobackup\",\n\t\t\tPassword: token,\n\t\t}\n\t} else {\n\t\t// If we have no token, we dont want to use any auth\n\t\tauth = nil\n\t}\n\t// Clone the repository\n\t_, err := git.PlainClone(dirName, false, &git.CloneOptions{\n\t\tAuth: auth,\n\t\tURL:  repo,\n\t})\n\n\tcheckIfError(err)\n}",
  "func (am *AutogitManager) Clone(\n\tctx context.Context, srcTLF *libkbfs.TlfHandle, srcRepo, branchName string,\n\tdstTLF *libkbfs.TlfHandle, dstDir string) (\n\tdoneCh <-chan struct{}, err error) {\n\tam.log.CDebugf(ctx, \"Autogit clone request from %s/%s:%s to %s/%s\",\n\t\tsrcTLF.GetCanonicalPath(), srcRepo, branchName,\n\t\tdstTLF.GetCanonicalPath(), dstDir)\n\tdefer func() {\n\t\tam.deferLog.CDebugf(ctx, \"Clone request processed: %+v\", err)\n\t}()\n\n\tdstFS, err := libfs.NewFS(\n\t\tctx, am.config, dstTLF, dstDir, \"\", keybase1.MDPriorityNormal)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Take dst lock and create \"CLONING\" file if needed.\n\tlockFile, err := dstFS.Create(autogitLockName(srcRepo))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tcloseErr := lockFile.Close()\n\t\tif err == nil {\n\t\t\terr = closeErr\n\t\t}\n\t}()\n\terr = lockFile.Lock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dstFS.MkdirAll(srcRepo, 0600)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdstRepoFS, err := dstFS.Chroot(srcRepo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfis, err := dstRepoFS.ReadDir(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(fis) == 0 {\n\t\terr = am.makeCloningFile(ctx, dstRepoFS, srcTLF, srcRepo, branchName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Sync the CLONING file before starting the reset.\n\t\terr = lockFile.Unlock()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq := resetReq{\n\t\tsrcTLF, srcRepo, branchName, dstTLF, dstDir, make(chan struct{}),\n\t}\n\treturn am.queueReset(ctx, req)\n}",
  "func Clone(repoURL string, checkout string, cloneToDir string, noInput bool) string {\n\tcloneToDir, err := homedir.Expand(cloneToDir)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tMakeSurePathExist(cloneToDir)\n\t// repoType, repoUrl, err := IdentifyRepo(repoURL)\n\t// if err != nil {\n\t// log.Panic(err)\n\t// }\n\t// \"github.com/go-git/go-git/v5/plumbing/transport/http\"\n\t// Auth: &http.BasicAuth{\n\t// \tUsername: username,\n\t// \tPassword: password,\n\t// },\n\t// \"github.com/go-git/go-git/v5/plumbing/transport/http\"\n\t// Auth: &http.BasicAuth{\n\t// \tUsername: \"abc123\", // yes, this can be anything except an empty string\n\t// \tPassword: token,\n\t// },\n\tpublicKeys, _ := ssh.NewPublicKeysFromFile(\"git\", \"privateKeyFile\", \"\")\n\tr, _ := git.PlainClone(cloneToDir, false, &git.CloneOptions{\n\t\tURL:               repoURL,\n\t\tProgress:          os.Stdout,\n\t\tAuth:              publicKeys,\n\t\tRecurseSubmodules: git.DefaultSubmoduleRecursionDepth,\n\t})\n\tref, _ := r.Head()\n\tcommit, _ := r.CommitObject(ref.Hash())\n\n\treturn commit.String()\n}",
  "func (service *Service) CloneRepository(destination, repositoryURL, referenceName, username, password string, tlsSkipVerify bool) error {\n\toptions := cloneOption{\n\t\tfetchOption: fetchOption{\n\t\t\tbaseOption: baseOption{\n\t\t\t\trepositoryUrl: repositoryURL,\n\t\t\t\tusername:      username,\n\t\t\t\tpassword:      password,\n\t\t\t\ttlsSkipVerify: tlsSkipVerify,\n\t\t\t},\n\t\t\treferenceName: referenceName,\n\t\t},\n\t\tdepth: 1,\n\t}\n\n\treturn service.cloneRepository(destination, options)\n}",
  "func cloneRepo(r Repository) (string, removeDir, error) {\n\tdir, err := ioutil.TempDir(\"\", \"clone\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tremoveDir := func() {\n\t\tfunc(path string) {\n\t\t\terr := os.RemoveAll(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"unable to remove dir: \", err)\n\t\t\t}\n\t\t}(dir)\n\t}\n\n\tcloneCmd := exec.Command(\"git\", \"clone\", r.Url, dir)\n\tlog.Println(\"running command: \" + strings.Join(cloneCmd.Args, \" \"))\n\tif err := cloneCmd.Run(); err != nil {\n\t\tremoveDir()\n\t\tlog.Fatal(\"unable to git clone \"+r.Name, err)\n\t}\n\n\treturn dir, removeDir, nil\n}",
  "func cloneRepository(defaultBaseDir, url string) (string, error) {\n\tif err := os.RemoveAll(defaultBaseDir); err != nil {\n\t\treturn \"\", fmt.Errorf(\"error cloning the repository. error removing previous directory: %q\", err)\n\t}\n\n\tlog.Printf(\"Cloning the repository [%s] into [%s]\\n\\n\", url, defaultBaseDir)\n\tr, err := git.PlainClone(defaultBaseDir, false, &git.CloneOptions{\n\t\tURL:      url,\n\t\tProgress: os.Stdout,\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error cloning the repository: %q\", err)\n\t}\n\n\tref, err := r.Head()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error cloning the repository. error getting the HEAD reference of the repository: %q\", err)\n\t}\n\n\tcommit, err := r.CommitObject(ref.Hash())\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error cloning the repository. error getting the lattest commit of the repository: %q\", err)\n\t}\n\treturn commit.Hash.String(), nil\n}",
  "func (g Git) Clone(path, url string, opt *CloneOptions) (*Repository, error) {\n\tif err := os.MkdirAll(path, 0777); err != nil {\n\t\treturn nil, err\n\t}\n\tif opt == nil {\n\t\topt = &CloneOptions{}\n\t}\n\turl, err := opt.Credentials.addToURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &Repository{g, path}\n\targs := []string{\"clone\", url, \".\"}\n\tif opt.Branch != \"\" {\n\t\targs = append(args, \"--branch\", opt.Branch)\n\t}\n\tif _, err := r.run(nil, opt.Timeout, args...); err != nil {\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}",
  "func Clone(repo Repo, destination string) error {\n\tsem <- true\n\tdefer func() {\n\t\t<-sem\n\t}()\n\n\t// nolint: gosec\n\tcmd := exec.Command(\n\t\t\"git\", \"clone\", \"--depth\", \"1\", repo.URL,\n\t\tfilepath.Join(destination, repo.Name),\n\t)\n\tif bts, err := cmd.CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"%w: %v: %v\", ErrClone, repo.Name, string(bts))\n\t}\n\treturn nil\n}",
  "func (b *Bzr) Clone(d *Dependency) (err error) {\n\tif !util.Exists(d.Path()) {\n\t\terr = util.RunCommand(\"go get -u \" + d.Repo)\n\t}\n\treturn\n}",
  "func TestCloneRepo(t *testing.T) {\n\tdefer removeTempRepos()\n\trs := NewRepoService(testConf, gklog.NewNopLogger(), &statsd.Client{})\n\tif err := rs.CloneRepo(\"/tmp\", \"github.com/briandowns/smile\"); err != nil {\n\t\tt.Error(err)\n\t}\n}",
  "func Clone(gitURL string, targetPath string) (err error) {\n\tcloneCmd := fmt.Sprintf(\"git clone %s\", gitURL)\n\tcmd := exec.Command(cloneCmd)\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n}",
  "func gitClone(repo, dir string) error {\n\tlog.Printf(\"cloning %s\\n\", repo)\n\n\t_, err := git.PlainClone(dir, false, &git.CloneOptions{\n\t\tURL:      repo,\n\t\tProgress: os.Stdout,\n\t})\n\treturn err\n}",
  "func (repo *TestRepo) Clone(t *testing.T, pattern string) *TestRepo {\n\tt.Helper()\n\n\tpath, err := ioutil.TempDir(\"\", pattern)\n\trequire.NoError(t, err)\n\n\terr = repo.GitCommand(\n\t\tt, \"clone\", \"--bare\", \"--mirror\", repo.Path, path,\n\t).Run()\n\trequire.NoError(t, err)\n\n\treturn &TestRepo{\n\t\tPath: path,\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	printRepositoryInfo prints basic info about repository data 
 | 
	func printRepositoryInfo(i *index.Index) {
	fmtutil.Separator(false, "REPOSITORY INFO")
	updated := timeutil.Format(time.Unix(i.Meta.Created, 0), "%Y/%m/%d %H:%M:%S")
	fmtc.Printf("     {*}UUID{!}: %s\n", i.UUID)
	fmtc.Printf("  {*}Updated{!}: %s\n\n", updated)
	for _, distName := range i.Data.Keys() {
		size, items := int64(0), 0
		for archName, arch := range i.Data[distName] {
			for _, category := range arch {
				for _, version := range category {
					size += version.Size
					items++
					if len(version.Variations) != 0 {
						for _, variation := range version.Variations {
							items++
							size += variation.Size
						}
					}
				}
			}
			fmtc.Printf(
				"  {c*}%s{!}{c}/%s:{!} %3s {s-}|{!} %s\n", distName, archName,
				fmtutil.PrettyNum(items), fmtutil.PrettySize(size, " "),
			)
		}
	}
	fmtc.NewLine()
	fmtc.Printf(
		"  {*}Total:{!} %s {s-}|{!} %s\n",
		fmtutil.PrettyNum(i.Meta.Items),
		fmtutil.PrettySize(i.Meta.Size, " "),
	)
	fmtutil.Separator(false)
} 
 | 
	[
  "func (r *Repository) PrintInfo() {\n\tif r == nil {\n\t\treturn\n\t}\n\tgitLog := kvLogger{ValuePainter: magenta}\n\n\tgitLog.Println(\"Git Branch:\", r.BranchName)\n\tgitLog.Println(\"Git Commit:\", r.Commit)\n\tgitLog.Println(\"Git Tag:\", r.Tag)\n\tgitLog.Println(\"Git Build:\", r.Build)\n\tfmt.Println(\"\")\n}",
  "func (r *Repository) Info() string {\n\treturn fmt.Sprintf(\"Git Branch: %s\\nGit Commit: %s\\nGit Tag: %s\\nGit Build: %s\\n\", r.BranchName, r.Commit, r.Tag, r.Build)\n}",
  "func (c *Client) RepositoryInfo() (*SDRRepositoryInfoResponse, error) {\n\treq := &Request{\n\t\tNetworkFunctionStorge,\n\t\tCommandGetSDRRepositoryInfo,\n\t\t&SDRRepositoryInfoRequest{},\n\t}\n\tres := &SDRRepositoryInfoResponse{}\n\treturn res, c.Send(req, res)\n}",
  "func (r *GitRegistry) Info(ct *out.Container) {\n\tct.Add(out.NewValue(\"Repository\", r.Repository))\n}",
  "func (r repo) print(verbose bool) {\n\tif verbose {\n\t\tfmt.Printf(\"%s (%s)\\n\\t%s\\n\\t%s\\n\", r.Name, r.Desc, r.HTTP, r.SSH)\n\t\treturn\n\t}\n\tfmt.Println(r.SSH)\n}",
  "func (g *GitLocal) Info(dir string) (*GitRepository, error) {\n\treturn g.GitCLI.Info(dir)\n}",
  "func DisplayInformation(g *gocui.Gui, ag *askgit.AskGit, length time.Duration) error {\n\tout, err := g.View(\"Info\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tw := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)\n\tout.Clear()\n\tpath, err := filepath.Abs(usrInpt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(w, \"Repo \\t \"+path+\"\\t\")\n\n\trow := ag.DB().QueryRow(\"select count(*) from commits\")\n\tvar commitCount int\n\terr = row.Scan(&commitCount)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(w, \"# Commits \\t\", commitCount, \"\\t\")\n\n\trow = ag.DB().QueryRow(\"select count(distinct author_name) from commits\")\n\tvar distinctAuthorCount int\n\terr = row.Scan(&distinctAuthorCount)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(w, \"# Authors \\t\", distinctAuthorCount, \"\\t\")\n\n\trow = ag.DB().QueryRow(\"select count(distinct name) from branches where remote = 1\")\n\tvar distinctRemotes int\n\terr = row.Scan(&distinctRemotes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(w, \"# Remote branches \\t\", distinctRemotes, \"\\t\")\n\n\trow = ag.DB().QueryRow(\"select count(distinct name) from branches where remote = 0\")\n\tvar distinctLocals int\n\terr = row.Scan(&distinctLocals)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(w, \"# Local branches \\t\", distinctLocals, \"\\t\")\n\n\tfmt.Fprintln(w, \"Query time (ms)\\t\", length.String(), \"\\t\")\n\tw.Flush()\n\treturn nil\n\n}",
  "func (pkg *MCPMPackage) PrintInfo() {\n\tfmt.Printf(\" ID: %d\\n Name: %s (%s)\\n Type: %s\\n\", pkg.id, pkg.title, pkg.name, pkg.ptype)\n}",
  "func describeRepository(flags *pflag.FlagSet, image string) error {\n\torg, _, err := dockerhub.GetFlags(flags)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\trepoInfo, err := dockerhub.NewClient(org, \"\").DescribeRepository(image)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\tcolor.Blue(\"User: \" + repoInfo.User +\n\t\t\"\\nName: \" + repoInfo.Name +\n\t\t\"\\nNamespace: \" + repoInfo.Namespace +\n\t\t\"\\nRepositoryType: \" + repoInfo.RepositoryType +\n\t\t\"\\nStatus: \" + fmt.Sprintf(\"%d\", repoInfo.Status) +\n\t\t\"\\nDescription: \" + repoInfo.Description +\n\t\t\"\\nIsPrivate: \" + fmt.Sprintf(\"%t\", repoInfo.IsPrivate) +\n\t\t\"\\nIsAutomated: \" + fmt.Sprintf(\"%t\", repoInfo.IsAutomated) +\n\t\t\"\\nCanEdit: \" + fmt.Sprintf(\"%t\", repoInfo.CanEdit) +\n\t\t\"\\nStarCount: \" + fmt.Sprintf(\"%d\", repoInfo.StarCount) +\n\t\t\"\\nPullCount: \" + fmt.Sprintf(\"%d\", repoInfo.PullCount) +\n\t\t\"\\nLastUpdated: \" + fmt.Sprint(repoInfo.LastUpdated) +\n\t\t\"\\nIsMigrated: \" + fmt.Sprintf(\"%t\", repoInfo.IsMigrated) +\n\t\t\"\\nCollaboratorCount: \" + fmt.Sprintf(\"%d\", repoInfo.CollaboratorCount) +\n\t\t\"\\nAffiliation: \" + repoInfo.Affiliation +\n\t\t\"\\nHubUser: \" + repoInfo.HubUser)\n\n\treturn nil\n}",
  "func (r *Repositories) ShowRepos() {\n\tfor _, repo := range r.Repos {\n\t\tfmt.Printf(\"* Name: %s\\n\", repo.Name)\n\t\tfmt.Printf(\"  URL: %s\\n\", repo.URL)\n\t\tfmt.Printf(\"  Complexity: %d\\n\", repo.Complexity)\n\t}\n}",
  "func PrintInfo(descriptorDir string, status *tor.RouterStatus) {\n\n\tdesc, err := tor.LoadDescriptorFromDigest(descriptorDir, status.Digest, status.Publication)\n\tif err == nil {\n\t\tif !printedBanner {\n\t\t\tfmt.Println(\"fingerprint,nickname,ip_addr,or_port,dir_port,flags,published,version,platform,bandwidthavg,bandwidthburst,uptime,familysize\")\n\t\t\tprintedBanner = true\n\t\t}\n\t\tfmt.Printf(\"%s,%s,%d,%d,%d,%d\\n\", status, desc.OperatingSystem, desc.BandwidthAvg, desc.BandwidthBurst, desc.Uptime, len(desc.Family))\n\t} else {\n\t\tif !printedBanner {\n\t\t\tfmt.Println(\"fingerprint,nickname,ip_addr,or_port,dir_port,flags,published,version\")\n\t\t\tprintedBanner = true\n\t\t}\n\t\tfmt.Println(status)\n\t}\n}",
  "func (r *UserRepoImpl) Print() string {\n\treturn \"userrepo\" + fmt.Sprintf(\"%v\", r.TContext.Runtime())\n}",
  "func TestPrintStarredRepos(t *testing.T) {\n\tPrintStarredRepos()\n}",
  "func printRepos(repos []repo.Repo, limit uint32, sortFields []string) {\n\tvar str strings.Builder\n\tfor _, repo := range repos {\n\t\tfor _, sortField := range sortFields {\n\t\t\tif sortField == \"Commits\" {\n\t\t\t\tfmt.Fprintf(&str, \"%s:%d \", sortField, repo.CommitCount)\n\t\t\t} else if strings.Contains(sortField, \"Event\") {\n\t\t\t\tfmt.Fprintf(&str, \"%s:%d \", sortField, repo.EventTypeCount[sortField])\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(&str, \"ID:%s Name:%s \\n\", repo.ID, repo.Name)\n\t}\n\tfmt.Printf(\"Top %d Repos by %v \\n --- \\n%s --- \\n\", limit, sortFields, str.String())\n}",
  "func PrintRawInfo(app string) {\n\tfmt.Printf(\"Release Version (%s): %s\\n\", app, Version)\n\tfmt.Printf(\"Git Commit Hash: %s\\n\", GitHash)\n\tfmt.Printf(\"Git Branch: %s\\n\", GitBranch)\n\tfmt.Printf(\"UTC Build Time: %s\\n\", BuildTS)\n}",
  "func printRepositories(allRepositories [1000]string) {\n\tfor _, repo := range allRepositories {\n\t\tif len(repo) != 0 {\n\t\t\tfmt.Println(repo)\n\t\t}\n\t}\n}",
  "func (r *Repository) Info() (ReleaseInfo, error) {\n\ti := ReleaseInfo{}\n\n\t//\n\tlastVersion, lastVersionCommit, err := r.getLatestVersion()\n\tif err != nil {\n\t\treturn i, err\n\t}\n\ti.LatestVersion = lastVersion\n\n\t//\n\tnextVersion, commits, err := r.getNextVersion(lastVersion, lastVersionCommit)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\ti.NextVersion = nextVersion\n\ti.NextCommits = commits\n\n\t//\n\theadCommit, isDirty, err := r.getTreeStatus()\n\tif err != nil {\n\t\treturn i, err\n\t}\n\tvar hash string\n\tif lastVersionCommit != headCommit {\n\t\thash = headCommit\n\t}\n\ti.CurrentTag = createTag(lastVersion, hash, isDirty)\n\n\treturn i, nil\n}",
  "func PrintVersionInfo() {\n\tfmt.Println(\"Release Version:\", ShortVersion)\n\tfmt.Println(\"Git Commit Hash:\", GitSha1Version)\n\tfmt.Println(\"Build Time:\", BuildDate)\n}",
  "func (c *Coin) PrintCoinInfo(prefix string) error {\n\tphelper := func(s string, ok bool) string {\n\t\tst := \"MISSING\"\n\t\tif ok {\n\t\t\tst = \"     OK\"\n\t\t}\n\t\treturn fmt.Sprintf(\"[ %s ] %s\", st, s)\n\t}\n\n\tfmt.Printf(`%s\n  * Base directory:     %s\n  * Binary directory:   %s\n  * Coin daemon binary: %s\n  * Coin status binary: %s\n  * Data directory:     %s\n  * Config file:        %s\n`,\n\t\tprefix,\n\t\tphelper(c.state.walletPath, c.state.walletPathExists),\n\t\tphelper(c.state.binPath, c.state.binPathExists),\n\t\tphelper(c.state.daemonBinPath, c.state.daemonBinExists),\n\t\tphelper(c.state.statusBinPath, c.state.statusBinExists),\n\t\tphelper(c.state.dataPath, c.state.dataPathExists),\n\t\tphelper(c.state.configFilePath, c.state.configFileExists))\n\n\treturn nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	fetchIndex downloads remote repository index 
 | 
	func fetchIndex(url string) (*index.Index, error) {
	resp, err := req.Request{URL: url + "/" + INDEX_NAME}.Get()
	if err != nil {
		return nil, fmtc.Errorf("Can't fetch repository index: %v", err)
	}
	if resp.StatusCode != 200 {
		return nil, fmtc.Errorf("Can't fetch repository index: server return status code %d", resp.StatusCode)
	}
	repoIndex := &index.Index{}
	err = resp.JSON(repoIndex)
	if err != nil {
		return nil, fmtc.Errorf("Can't decode repository index: %v", err)
	}
	return repoIndex, nil
} 
 | 
	[
  "func fetchRepoIndex(netClient *HTTPClient, repoURL string, authHeader string) (*repo.IndexFile, error) {\n\treq, err := getReq(repoURL, authHeader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := (*netClient).Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := readResponseBody(res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseIndex(data)\n}",
  "func (hrsi *SubscriberItem) getHelmRepoIndex(client rest.HTTPClient, repoURL string) (indexFile *repo.IndexFile, hash string, err error) {\n\tcleanRepoURL := strings.TrimSuffix(repoURL, \"/\")\n\treq, err := http.NewRequest(http.MethodGet, cleanRepoURL+\"/index.yaml\", nil)\n\n\tif err != nil {\n\t\tklog.Error(err, \"Can not build request: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\tif hrsi.ChannelSecret != nil && hrsi.ChannelSecret.Data != nil {\n\t\tif authHeader, ok := hrsi.ChannelSecret.Data[\"authHeader\"]; ok {\n\t\t\treq.Header.Set(\"Authorization\", string(authHeader))\n\t\t} else if user, ok := hrsi.ChannelSecret.Data[\"user\"]; ok {\n\t\t\tif password, ok := hrsi.ChannelSecret.Data[\"password\"]; ok {\n\t\t\t\treq.SetBasicAuth(string(user), string(password))\n\t\t\t} else {\n\t\t\t\treturn nil, \"\", fmt.Errorf(\"password not found in secret for basic authentication\")\n\t\t\t}\n\t\t}\n\t}\n\n\tklog.V(5).Info(req)\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tklog.Error(err, \"Http request failed: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\tklog.V(5).Info(\"Get succeeded: \", cleanRepoURL)\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tklog.Error(err, \"Unable to read body: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\tdefer resp.Body.Close()\n\n\thash = hashKey(body)\n\tindexfile, err := loadIndex(body)\n\n\tif err != nil {\n\t\tklog.Error(err, \"Unable to parse the indexfile: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\terr = hrsi.filterCharts(indexfile)\n\n\treturn indexfile, hash, err\n}",
  "func (hrsi *SubscriberItem) getHelmRepoIndex(client rest.HTTPClient, repoURL string) (indexFile *repo.IndexFile, hash string, err error) {\n\tcleanRepoURL := strings.TrimSuffix(repoURL, \"/\") + \"/index.yaml\"\n\treq, err := http.NewRequest(http.MethodGet, cleanRepoURL, nil)\n\n\tif err != nil {\n\t\tklog.Error(err, \"Can not build request: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\tif hrsi.ChannelSecret != nil && hrsi.ChannelSecret.Data != nil {\n\t\tif authHeader, ok := hrsi.ChannelSecret.Data[\"authHeader\"]; ok {\n\t\t\treq.Header.Set(\"Authorization\", string(authHeader))\n\t\t} else if user, ok := hrsi.ChannelSecret.Data[\"user\"]; ok {\n\t\t\tif password, ok := hrsi.ChannelSecret.Data[\"password\"]; ok {\n\t\t\t\treq.SetBasicAuth(string(user), string(password))\n\t\t\t} else {\n\t\t\t\treturn nil, \"\", fmt.Errorf(\"password not found in secret for basic authentication\")\n\t\t\t}\n\t\t}\n\t}\n\n\tklog.V(5).Info(req)\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tklog.Error(err, \"Http request failed: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\tklog.V(5).Info(\"Get succeeded: \", cleanRepoURL)\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tklog.Error(err, \"Unable to read body: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\tdefer resp.Body.Close()\n\n\thash = hashKey(body)\n\tindexfile, err := loadIndex(body)\n\n\tif err != nil {\n\t\tklog.Error(err, \"Unable to parse the indexfile: \", cleanRepoURL)\n\t\treturn nil, \"\", err\n\t}\n\n\terr = utils.FilterCharts(hrsi.Subscription, indexfile)\n\n\treturn indexfile, hash, err\n}",
  "func downloadCacheIndex(ctx context.Context, cacheFile, stableRepositoryURL string, providers getter.Providers) (*repo.Entry, error) {\n\tc := repo.Entry{\n\t\tName:  stableRepository,\n\t\tURL:   stableRepositoryURL,\n\t\tCache: cacheFile,\n\t}\n\n\tr, err := repo.NewChartRepository(&c, providers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := r.DownloadIndexFile(\"\"); err != nil {\n\t\treturn nil, fmt.Errorf(\"looks like %q is not a valid chart repository or cannot be reached: %s\", stableRepositoryURL, err.Error())\n\t}\n\treturn &c, nil\n}",
  "func FetchIndex(addr string) (res *http.Response, err error) {\n\thc := newClientForIndex()\n\theaders := make(map[string]string)\n\theaders[\"Accept\"] = \"application/json\"\n\treq, err := newGetRequest(addr, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn hc.Do(req)\n}",
  "func fetchIndexPage(ctx context.Context, t time.Time) ([]IndexedModule, error) {\n\tvar q = make(url.Values)\n\tif !t.IsZero() {\n\t\tq.Set(\"since\", t.Format(time.RFC3339Nano))\n\t}\n\turl := (&url.URL{Scheme: \"https\", Host: \"index.golang.org\", Path: \"/index\", RawQuery: q.Encode()}).String()\n\tresp, err := ctxhttp.Get(ctx, nil, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\treturn nil, fmt.Errorf(\"non-200 OK status code: %v body: %q\", resp.Status, body)\n\t}\n\tvar mods []IndexedModule\n\tfor dec := json.NewDecoder(resp.Body); ; {\n\t\tvar v struct {\n\t\t\tmodule.Version\n\t\t\tIndex time.Time `json:\"Timestamp\"`\n\t\t}\n\t\terr := dec.Decode(&v)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmods = append(mods, IndexedModule(v))\n\t}\n\treturn mods, nil\n}",
  "func (s *Sync) fetchRemoteIdx() (*index.Index, error) {\n\tspv := client.NewSupervised(s.opts.ClientFunc, 30*time.Second)\n\treturn spv.MountGetIndex(s.m.RemotePath)\n}",
  "func (w *Worker) index(repoName api.RepoName, rev string, isPrimary bool) (err error) {\n\trepo, commit, err := resolveRevision(w.Ctx, repoName, rev)\n\tif err != nil {\n\t\t// Avoid infinite loop for always cloning test.\n\t\tif repo != nil && repo.Name == \"github.com/sourcegraphtest/AlwaysCloningTest\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\t// Check if index is already up-to-date\n\tif repo.IndexedRevision != nil && (repo.FreezeIndexedRevision || *repo.IndexedRevision == commit) {\n\t\treturn nil\n\t}\n\n\t// Get language\n\tinv, err := api.InternalClient.ReposGetInventoryUncached(w.Ctx, repo.ID, commit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Repos.GetInventory failed: %s\", err)\n\t}\n\tlang := inv.PrimaryProgrammingLanguage()\n\n\t// Update global refs & packages index\n\tif !repo.Fork() {\n\t\tvar errs []error\n\t\tif err := api.InternalClient.DefsRefreshIndex(w.Ctx, repo.Name, commit); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"Defs.RefreshIndex failed: %s\", err))\n\t\t}\n\n\t\tif err := api.InternalClient.PkgsRefreshIndex(w.Ctx, repo.Name, commit); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"Pkgs.RefreshIndex failed: %s\", err))\n\t\t}\n\n\t\tif err := makeMultiErr(errs...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = api.InternalClient.ReposUpdateIndex(w.Ctx, repo.ID, commit, lang)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
  "func GetIndex() (Index, error) {\n    token := utils.Config()[\"DROPBOX_CENTRAL_ACCOUNT_TOKEN\"]\n    resp, err := utils.MakeRequest(\n        \"https://content.dropboxapi.com/2/files/download\",\n        \"POST\",\n        []byte{},\n        map[string]string{\n            \"Dropbox-API-Arg\": \"{ \\\"path\\\": \\\"/dbpedia_index.json\\\" }\",\n            \"Authorization\": \"Bearer \" + token,\n        },\n    )\n    defer resp.Body.Close()\n\n    readBytes, err := ioutil.ReadAll(resp.Body)\n    if err != nil {\n        utils.Error(\"Error reading JSON for index\")\n        return Index{}, err\n    }\n\n    var result Index\n    err = json.Unmarshal(readBytes, &result)\n    if err != nil {\n        utils.Error(\"Error decoding JSON for index\")\n        return Index{}, err\n    }\n\n    return result, nil\n}",
  "func fetchPackedObject(sha1 string) error {\n\t// search for all index files\n\tpackPath := filepath.Join(ipfsRepoPath, \"objects\", \"pack\")\n\tlinks, err := ipfsShell.List(packPath)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"shell FileList(%q) failed\", packPath)\n\t}\n\tvar indexes []string\n\tfor _, lnk := range links {\n\t\tif lnk.Type == 2 && strings.HasSuffix(lnk.Name, \".idx\") {\n\t\t\tindexes = append(indexes, filepath.Join(packPath, lnk.Name))\n\t\t}\n\t}\n\tif len(indexes) == 0 {\n\t\treturn errors.New(\"fetchPackedObject: no idx files found\")\n\t}\n\tfor _, idx := range indexes {\n\t\tidxF, err := ipfsShell.Cat(idx)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"fetchPackedObject: idx<%s> cat(%s) failed\", sha1, idx)\n\t\t}\n\t\t// using external git show-index < idxF for now\n\t\t// TODO: parse index file in go to make this portable\n\t\tvar b bytes.Buffer\n\t\tshowIdx := exec.Command(\"git\", \"show-index\")\n\t\tshowIdx.Stdin = idxF\n\t\tshowIdx.Stdout = &b\n\t\tshowIdx.Stderr = &b\n\t\tif err := showIdx.Run(); err != nil {\n\t\t\treturn errors.Wrapf(err, \"fetchPackedObject: idx<%s> show-index start failed\", sha1)\n\t\t}\n\t\tcmdOut := b.String()\n\t\tif !strings.Contains(cmdOut, sha1) {\n\t\t\tlog.Log(\"idx\", filepath.Base(idx), \"event\", \"debug\", \"msg\", \"git show-index: sha1 not in index, next idx file\")\n\t\t\tcontinue\n\t\t}\n\t\t// we found an index with our hash inside\n\t\tpack := strings.Replace(idx, \".idx\", \".pack\", 1)\n\t\tpackF, err := ipfsShell.Cat(pack)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"fetchPackedObject: pack<%s> open() failed\", sha1)\n\t\t}\n\t\tb.Reset()\n\t\tunpackIdx := exec.Command(\"git\", \"unpack-objects\")\n\t\tunpackIdx.Dir = thisGitRepo // GIT_DIR\n\t\tunpackIdx.Stdin = packF\n\t\tunpackIdx.Stdout = &b\n\t\tunpackIdx.Stderr = &b\n\t\tif err := unpackIdx.Run(); err != nil {\n\t\t\treturn errors.Wrapf(err, \"fetchPackedObject: pack<%s> 'git unpack-objects' failed\\nOutput: %s\", sha1, b.String())\n\t\t}\n\t\treturn nil\n\t}\n\treturn errors.Errorf(\"did not find sha1<%s> in %d index files\", sha1, len(indexes))\n}",
  "func (api *API) GetIndex(w http.ResponseWriter, r *http.Request) {\n\n\tinfo := Info{Port: api.Session.Config.API.Port, Versions: Version}\n\td := Metadata{Info: info}\n\n\tres := CodeToResult[CodeOK]\n\tres.Data = d\n\tres.Message = \"Documentation available at https://github.com/netm4ul/netm4ul\"\n\tw.WriteHeader(res.HTTPCode)\n\tjson.NewEncoder(w).Encode(res)\n}",
  "func loadIndexFile(r *hub.ChartRepository) (*repo.IndexFile, error) {\n\trepoConfig := &repo.Entry{\n\t\tName: r.Name,\n\t\tURL:  r.URL,\n\t}\n\tgetters := getter.All(&cli.EnvSettings{})\n\tchartRepository, err := repo.NewChartRepository(repoConfig, getters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpath, err := chartRepository.DownloadIndexFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexFile, err := repo.LoadIndexFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn indexFile, nil\n}",
  "func GetIdxFile(ctx *context.Context) {\n\th := httpBase(ctx)\n\tif h != nil {\n\t\th.setHeaderCacheForever()\n\t\th.sendFile(\"application/x-git-packed-objects-toc\", \"objects/pack/pack-\"+ctx.Params(\"file\")+\".idx\")\n\t}\n}",
  "func (is *ImageStoreLocal) GetIndexContent(repo string) ([]byte, error) {\n\tdir := path.Join(is.rootDir, repo)\n\n\tbuf, err := os.ReadFile(path.Join(dir, \"index.json\"))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tis.log.Debug().Err(err).Str(\"dir\", dir).Msg(\"index.json doesn't exist\")\n\n\t\t\treturn []byte{}, zerr.ErrRepoNotFound\n\t\t}\n\n\t\tis.log.Error().Err(err).Str(\"dir\", dir).Msg(\"failed to read index.json\")\n\n\t\treturn []byte{}, err\n\t}\n\n\treturn buf, nil\n}",
  "func (r *ChartRepoReconciler) GetIndex(cr *v1beta1.ChartRepo, ctx context.Context) (*repo.IndexFile, error) {\n\tvar username string\n\tvar password string\n\n\tif cr.Spec.Secret != nil {\n\t\tns := cr.Spec.Secret.Namespace\n\t\tif ns == \"\" {\n\t\t\tns = cr.Namespace\n\t\t}\n\n\t\tkey := client.ObjectKey{Namespace: ns, Name: cr.Spec.Secret.Name}\n\n\t\tvar secret corev1.Secret\n\t\tif err := r.Get(ctx, key, &secret); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdata := secret.Data\n\t\tusername = string(data[\"username\"])\n\t\tpassword = string(data[\"password\"])\n\n\t}\n\n\tlink := strings.TrimSuffix(cr.Spec.URL, \"/\") + \"/index.yaml\"\n\treq, err := http.NewRequest(\"GET\", link, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif username != \"\" && password != \"\" {\n\t\treq.SetBasicAuth(username, password)\n\t}\n\n\treturn getRepoIndexFile(req)\n\n}",
  "func Index(\n\tstore *model.RepositoryStore,\n\ttxer repository.RootedTransactioner,\n\toutputFile string,\n\tworkers int,\n\tlimit uint64,\n\toffset uint64,\n\tlist []string,\n) {\n\tf, err := createOutputFile(outputFile)\n\tif err != nil {\n\t\tlogrus.WithField(\"file\", outputFile).WithField(\"err\", err).\n\t\t\tFatal(\"unable to create file\")\n\t}\n\tdefer f.Close()\n\n\tw := csv.NewWriter(f)\n\tif err := w.Write(csvHeader); err != nil {\n\t\tlogrus.WithField(\"err\", err).Fatal(\"unable to write csv header\")\n\t}\n\tw.Flush()\n\n\trs, total, err := getResultSet(store, limit, offset, list)\n\tif err != nil {\n\t\tlogrus.WithField(\"err\", err).Fatal(\"unable to get result set\")\n\t}\n\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, os.Interrupt)\n\trepos := processRepos(workers, txer, rs)\n\tvar processed int\n\tfor {\n\t\tselect {\n\t\tcase repo, ok := <-repos:\n\t\t\tif !ok {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\"processed\": processed,\n\t\t\t\t\t\"failed\":    total - int64(processed),\n\t\t\t\t\t\"total\":     total,\n\t\t\t\t}).Info(\"finished processing all repositories\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogrus.WithField(\"repo\", repo.URL).Debug(\"writing record to CSV\")\n\t\t\tif err := w.Write(repo.toRecord()); err != nil {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\"err\":  err,\n\t\t\t\t\t\"repo\": repo.URL,\n\t\t\t\t}).Fatal(\"unable to write csv record\")\n\t\t\t}\n\t\t\tw.Flush()\n\t\t\tprocessed++\n\t\tcase <-signals:\n\t\t\tlogrus.Warn(\"received an interrupt signal, stopping\")\n\t\t\treturn\n\t\t}\n\t}\n}",
  "func GetIndexByRepo(repo *Repo, downloadIndex IndexDownloader) (*Index, error) {\n\tif repo.Config.Name != \"\" {\n\t\treturn GetIndexByDownloader(func() ([]byte, error) {\n\t\t\treturn os.ReadFile(filepath.Join(repo.CachePath, fmt.Sprintf(\"%s-index.yaml\", repo.Config.Name)))\n\t\t})\n\t}\n\treturn GetIndexByDownloader(downloadIndex)\n}",
  "func downloadRepositoryData(i *index.Index, url, dir string) {\n\titems := getItems(i, url)\n\n\tpb := progress.New(int64(len(items)), \"Starting…\")\n\n\tpbs := progress.DefaultSettings\n\tpbs.IsSize = false\n\tpbs.ShowSpeed = false\n\tpbs.ShowRemaining = false\n\tpbs.ShowName = false\n\tpbs.NameColorTag = \"{*}\"\n\tpbs.BarFgColorTag = colorTagApp\n\tpbs.PercentColorTag = \"\"\n\tpbs.RemainingColorTag = \"{s}\"\n\n\tpb.UpdateSettings(pbs)\n\tpb.Start()\n\n\tfmtc.Printf(\n\t\t\"Downloading %s %s from remote repository…\\n\",\n\t\tfmtutil.PrettyNum(len(items)),\n\t\tpluralize.Pluralize(len(items), \"file\", \"files\"),\n\t)\n\n\tfor _, item := range items {\n\t\tfileDir := path.Join(dir, item.OS, item.Arch)\n\t\tfilePath := path.Join(dir, item.OS, item.Arch, item.File)\n\n\t\tif !fsutil.IsExist(fileDir) {\n\t\t\terr := os.MkdirAll(fileDir, 0755)\n\n\t\t\tif err != nil {\n\t\t\t\tpb.Finish()\n\t\t\t\tfmtc.NewLine()\n\t\t\t\tprintErrorAndExit(\"Can't create directory %s: %v\", fileDir, err)\n\t\t\t}\n\t\t}\n\n\t\tif fsutil.IsExist(filePath) {\n\t\t\tfileSize := fsutil.GetSize(filePath)\n\n\t\t\tif fileSize == item.Size {\n\t\t\t\tpb.Add(1)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\terr := downloadFile(item.URL, filePath)\n\n\t\tif err != nil {\n\t\t\tpb.Finish()\n\t\t\tfmtc.NewLine()\n\t\t\tprintErrorAndExit(\"%v\", err)\n\t\t}\n\n\t\tpb.Add(1)\n\t}\n\n\tpb.Finish()\n\n\tfmtc.Printf(\"\\n{g}Repository successfully cloned into %s{!}\\n\")\n}",
  "func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) {\n\tdir := path.Join(is.rootDir, repo)\n\n\tbuf, err := is.store.GetContent(context.Background(), path.Join(dir, \"index.json\"))\n\tif err != nil {\n\t\tif errors.Is(err, driver.PathNotFoundError{}) {\n\t\t\tis.log.Error().Err(err).Str(\"dir\", dir).Msg(\"index.json doesn't exist\")\n\n\t\t\treturn []byte{}, zerr.ErrRepoNotFound\n\t\t}\n\n\t\tis.log.Error().Err(err).Str(\"dir\", dir).Msg(\"failed to read index.json\")\n\n\t\treturn []byte{}, err\n\t}\n\n\treturn buf, nil\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	downloadRepositoryData downloads all files from repository 
 | 
	func downloadRepositoryData(i *index.Index, url, dir string) {
	items := getItems(i, url)
	pb := progress.New(int64(len(items)), "Starting…")
	pbs := progress.DefaultSettings
	pbs.IsSize = false
	pbs.ShowSpeed = false
	pbs.ShowRemaining = false
	pbs.ShowName = false
	pbs.NameColorTag = "{*}"
	pbs.BarFgColorTag = colorTagApp
	pbs.PercentColorTag = ""
	pbs.RemainingColorTag = "{s}"
	pb.UpdateSettings(pbs)
	pb.Start()
	fmtc.Printf(
		"Downloading %s %s from remote repository…\n",
		fmtutil.PrettyNum(len(items)),
		pluralize.Pluralize(len(items), "file", "files"),
	)
	for _, item := range items {
		fileDir := path.Join(dir, item.OS, item.Arch)
		filePath := path.Join(dir, item.OS, item.Arch, item.File)
		if !fsutil.IsExist(fileDir) {
			err := os.MkdirAll(fileDir, 0755)
			if err != nil {
				pb.Finish()
				fmtc.NewLine()
				printErrorAndExit("Can't create directory %s: %v", fileDir, err)
			}
		}
		if fsutil.IsExist(filePath) {
			fileSize := fsutil.GetSize(filePath)
			if fileSize == item.Size {
				pb.Add(1)
				continue
			}
		}
		err := downloadFile(item.URL, filePath)
		if err != nil {
			pb.Finish()
			fmtc.NewLine()
			printErrorAndExit("%v", err)
		}
		pb.Add(1)
	}
	pb.Finish()
	fmtc.Printf("\n{g}Repository successfully cloned into %s{!}\n")
} 
 | 
	[
  "func (repo *RemoteRepo) Download(d utils.Downloader, db database.Storage, packageRepo *Repository) error {\n\tlist := NewPackageList()\n\n\t// Download and parse all Release files\n\tfor _, component := range repo.Components {\n\t\tfor _, architecture := range repo.Architectures {\n\t\t\tpackagesReader, packagesFile, err := utils.DownloadTryCompression(d, repo.BinaryURL(component, architecture).String())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer packagesFile.Close()\n\n\t\t\tparas, err := debc.Parse(packagesReader)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, para := range paras {\n\t\t\t\tp := NewPackageFromControlFile(para)\n\n\t\t\t\tlist.Add(p)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Save package meta information to DB\n\tlist.ForEach(func(p *Package) {\n\t\tdb.Put(p.Key(), p.Encode())\n\t})\n\n\t// Download all package files\n\tch := make(chan error, list.Len())\n\tcount := 0\n\n\tlist.ForEach(func(p *Package) {\n\t\tpoolPath, err := packageRepo.PoolPath(p.Filename)\n\t\tif err == nil {\n\t\t\tif !p.VerifyFile(poolPath) {\n\t\t\t\td.Download(repo.PackageURL(p.Filename).String(), poolPath, ch)\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t})\n\n\t// Wait for all downloads to finish\n\t// TODO: report errors\n\tfor count > 0 {\n\t\t_ = <-ch\n\t\tcount--\n\t}\n\n\trepo.LastDownloadDate = time.Now()\n\trepo.packageRefs = NewPackageRefListFromPackageList(list)\n\n\treturn nil\n}",
  "func DownloadSelectedRepositoryFiles(c *http.Client, owner, repo, branch string, include Matcher) map[string]RepositoryFile {\n\tu := fmt.Sprintf(\"https://api.github.com/repos/%s/%s/tarball/%s\", owner, repo, branch)\n\tcore.Debugf(\"Downloading tarball for repo: %s\", u)\n\treq, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\tcore.Warningf(\"failed to download repository: %v\", err)\n\t\treturn nil\n\t}\n\tauthorize(req)\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\tcore.Warningf(\"failed to download repository: %v\", err)\n\t\treturn nil\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tcore.Warningf(\"failed to download repository: unexpected code %d\", resp.StatusCode)\n\t\treturn nil\n\t}\n\tdefer resp.Body.Close()\n\tvar body io.Reader = resp.Body\n\tswitch resp.Header.Get(\"Content-Type\") {\n\tcase \"application/gzip\", \"application/x-gzip\":\n\t\tbody, err = gzip.NewReader(body)\n\t\tif err != nil {\n\t\t\tcore.Warningf(\"failed to download repository: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t}\n\tfiles := map[string]RepositoryFile{}\n\ttr := tar.NewReader(body)\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak // End of archive\n\t\t}\n\t\tif err != nil {\n\t\t\tcore.Warningf(\"failed to download repository: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t\tif hdr.Format == tar.FormatPAX || hdr.FileInfo().IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tsplittedName := strings.SplitN(hdr.Name, \"/\", 2)\n\t\tif len(splittedName) > 1 {\n\t\t\tname := splittedName[1]\n\t\t\tif include(name) {\n\t\t\t\tcore.Debugf(\"Downloading %v\", hdr.Name)\n\t\t\t\tb := bytes.NewBuffer(nil)\n\t\t\t\tif _, err := io.Copy(b, tr); err != nil {\n\t\t\t\t\tcore.Warningf(\"failed to download repository: %v\", err)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfiles[name] = RepositoryFile{\n\t\t\t\t\tPath:     name,\n\t\t\t\t\tFileInfo: hdr.FileInfo(),\n\t\t\t\t\tData:     b.Bytes(),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn files\n}",
  "func (r *Repositories) Download() error {\n\tfor _, repo := range r.Repos {\n\t\tif repo.Complexity > r.config.Complexity {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogger := log.New(log.Fields{\"name\": repo.Name})\n\n\t\tpath := filepath.Join(r.config.RepositoriesCache, repo.Name)\n\t\texist, err := fileExist(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exist {\n\t\t\tlogger.Debugf(\"Repository already downloaded\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlogger = logger.New(log.Fields{\n\t\t\t\"url\":  repo.URL,\n\t\t\t\"path\": path,\n\t\t})\n\n\t\tlogger.Debugf(\"Downloading repository\")\n\t\terr = os.MkdirAll(r.config.RepositoriesCache, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = downloadRepo(logger, repo.URL, path)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(err, \"Could not download repository\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
  "func download(p int, repos []*github.Repository) []string {\n\tLogIfVerbose(\"Downloading %d number of repos with %d parallel threads.\\n\", len(repos), p)\n\tsema := make(chan struct{}, p)\n\tarchiveList := make([]string, 0)\n\tvar wg sync.WaitGroup\n\tclient := NewClient()\n\tfor _, r := range repos {\n\t\twg.Add(1)\n\t\tgo func(repo *github.Repository) {\n\t\t\tdefer wg.Done()\n\t\t\tsema <- struct{}{}\n\t\t\tdefer func() { <-sema }()\n\t\t\tLogIfVerbose(\"Downloading archive for repository: %s\\n\", *repo.URL)\n\t\t\tdownloadURL := \"https://github.com/\" + repo.GetOwner().GetLogin() + \"/\" + repo.GetName() + \"/archive/master.zip\"\n\t\t\tarchiveName := repo.GetName() + \".zip\"\n\t\t\tout, err := os.Create(archiveName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"failed downloading repo: \", repo.GetName())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer out.Close()\n\t\t\tLogIfVerbose(\"Started downloading archive for: %s\\n\", downloadURL)\n\t\t\tresp, err := client.client.Get(downloadURL)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"failed to get zip archive for repo: \", repo.GetName())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\tlog.Println(\"status was not 200: \", resp.Status)\n\t\t\t}\n\t\t\t_, _ = io.Copy(out, resp.Body)\n\t\t\tarchiveList = append(archiveList, archiveName)\n\t\t}(r)\n\t}\n\twg.Wait()\n\treturn archiveList\n}",
  "func downloadRemoteArtifactWorker(artDetails *jfauth.ServiceDetails, chFiles <-chan string, tgtDir string) {\n\trtBase := (*artDetails).GetUrl()\n\tdlcount := 0\n\tfor f := range chFiles {\n\t\trtURL := rtBase + f\n\t\tjflog.Debug(\"Getting '\" + rtURL + \"' details ...\")\n\t\t// fmt.Printf(\"Fetching : %s\\n\", rtURL)\n\t\treq, err := http.NewRequest(\"GET\", rtURL, nil)\n\t\tif err != nil {\n\t\t\tjflog.Error(\"http.NewRequest failed\")\n\t\t}\n\t\treq.SetBasicAuth((*artDetails).GetUser(), (*artDetails).GetApiKey())\n\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\tjflog.Error(\"http.DefaultClient.Do failed\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfpath := tgtDir + \"/\" + f\n\t\tfdir, _ := filepath.Split(fpath)\n\t\tif _, err := os.Stat(fpath); os.IsNotExist(err) {\n\t\t\tos.MkdirAll(fdir, 0700) // Create directory\n\t\t}\n\n\t\t// Create the file\n\t\tout, err := os.Create(fpath)\n\t\tif err != nil {\n\t\t\tjflog.Error(\"Failed to create file : %s\", fpath)\n\t\t\tresp.Body.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\t// Write the body to file\n\t\t_, err = io.Copy(out, resp.Body)\n\t\tif err != nil {\n\t\t\tjflog.Error(\"Failed to copy download to file : %s\", fpath)\n\t\t}\n\t\t//fmt.Printf(\"downloading to complete: %s\\n\", fpath)\n\t\tdlcount++\n\t\tresp.Body.Close()\n\t\tout.Close()\n\t}\n\t//fmt.Printf(\"downloadRemoteArtifactWorker() complete, downloaded %d files\\n\", dlcount)\n\tjflog.Info(fmt.Sprintf(\"downloadRemoteArtifactWorker() complete, downloaded %d files\", dlcount))\n}",
  "func DownloadandSaveFiles(elkdemoinstance Elkdemo) {\n\t{\n\t\tfor _, element := range elkdemoinstance.Filesets {\n\n\t\t\tfmt.Printf(\"Fileset Name : %v \\n\", element.Filepersona)\n\t\t\tif element.Action.Download == \"yes\" { //Download only if true\n\t\t\t\tsuccess, filename := getFileFromURL(element.Sourceurl, element.Savefileas)\n\t\t\t\tfmt.Printf(\"%v, %v\", success, filename)\n\t\t\t}\n\t\t}\n\t}\n\n}",
  "func Fetch(c *cli.Context) error {\n\tvar numDownloaded uint32\n\tvar numSkipped uint32\n\tvar ctx = context.Background()\n\tvar fileName = \"Jenkinsfile\"\n\tvar orgName = c.String(\"org\")\n\tvar numRepos = 0\n\tvar down = make(chan uint32, 1)\n\tvar skip = make(chan uint32, 1)\n\tvar path = c.String(\"directory\")\n\tvar debug = c.Bool(\"debug\")\n\tvar skippedRepos = make(chan string, 10000)\n\tif c.Args().First() != \"\" {\n\t\tfileName = c.Args().First()\n\t}\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: c.String(\"token\")},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\tclient := github.NewClient(tc)\n\topt := &github.RepositoryListByOrgOptions{\n\t\tListOptions: github.ListOptions{PerPage: 100},\n\t}\n\tdown <- numDownloaded\n\tskip <- numSkipped\n\tvar wg sync.WaitGroup\n\tfor {\n\t\trepos, resp, err := client.Repositories.ListByOrg(ctx, orgName, opt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnumRepos = numRepos + len(repos)\n\t\tfor _, repo := range repos {\n\t\t\trepoName := repo.GetName()\n\t\t\tbranchName := repo.GetDefaultBranch()\n\t\t\tif c.String(\"branch\") != \"\" {\n\t\t\t\tbranchName = c.String(\"branch\")\n\t\t\t}\n\t\t\ttime.Sleep(5 * time.Millisecond)\n\t\t\twg.Add(1)\n\t\t\tgo func(wg *sync.WaitGroup, repoName string, branchName string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\trawContent, _, _, err := client.Repositories.GetContents(ctx, orgName, repoName, fileName, &github.RepositoryContentGetOptions{Ref: branchName})\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Fprintf(c.App.Writer, \"Error getting file from %v: %v\\n\", repoName, err)\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase current := <-skip:\n\t\t\t\t\t\tskip <- (current + 1)\n\t\t\t\t\tcase skippedRepos <- repoName:\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontent, err := rawContent.GetContent()\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Fprintf(c.App.Writer, \"Error getting content from %v/%v: %v\\n\", repoName, fileName, err)\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase current := <-skip:\n\t\t\t\t\t\tskip <- (current + 1)\n\t\t\t\t\tcase skippedRepos <- repoName:\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdirLoc := filepath.Join(path, repoName)\n\t\t\t\tfileLoc := filepath.Join(path, repoName, fileName)\n\t\t\t\terr = os.MkdirAll(dirLoc, os.ModePerm)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Fprintf(c.App.Writer, \"Error making directory: %v\\n\", dirLoc)\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase current := <-skip:\n\t\t\t\t\t\tskip <- (current + 1)\n\t\t\t\t\tcase skippedRepos <- repoName:\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = ioutil.WriteFile(fileLoc, []byte(content), 0644)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Fprintf(c.App.Writer, \"Error writing file: %v\\n\", fileLoc)\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase current := <-skip:\n\t\t\t\t\t\tskip <- (current + 1)\n\t\t\t\t\tcase skippedRepos <- repoName:\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif debug {\n\t\t\t\t\tfmt.Fprintf(c.App.Writer, \"File delivered: %v\\n\", fileLoc)\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase current := <-down:\n\t\t\t\t\tdown <- (current + 1)\n\t\t\t\t}\n\t\t\t}(&wg, repoName, branchName)\n\t\t}\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\topt.Page = resp.NextPage\n\t}\n\twg.Wait()\n\tclose(skippedRepos)\n\tif debug {\n\t\tfmt.Fprintf(c.App.Writer, \"\\n\\n\\nSkipped Repositories:\\n\")\n\t\tfor r := range skippedRepos {\n\t\t\tfmt.Fprintf(c.App.Writer, \"%v\\n\", r)\n\t\t}\n\t}\n\tfmt.Fprintf(c.App.Writer, \"\\n\\n\\n\")\n\tfmt.Fprintf(c.App.Writer, \"total number of repositories found %v\\n\", numRepos)\n\tfmt.Fprintf(c.App.Writer, \"total number of files delivered:  %v\\n\", <-down)\n\tfmt.Fprintf(c.App.Writer, \"total number of repositories skipped:  %v\\n\", <-skip)\n\treturn nil\n}",
  "func (v *vcsCmd) Download(dir string) error {\n\tfor _, cmd := range v.downloadCmd {\n\t\tif err := v.run(dir, cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
  "func (p *FileInf) initDownload(fileList *fileListDl) error {\r\n\tvar err error\r\n\tif p.Progress {\r\n\t\tfmt.Printf(\"Download files from a folder '%s'.\\n\", fileList.SearchedFolder.Name)\r\n\t\tfmt.Printf(\"There are %d files and %d folders in the folder.\\n\", fileList.TotalNumberOfFiles, fileList.TotalNumberOfFolders-1)\r\n\t\tfmt.Println(\"Starting download.\")\r\n\t}\r\n\tidToName := map[string]interface{}{}\r\n\tfor i, e := range fileList.FolderTree.Folders {\r\n\t\tidToName[e] = fileList.FolderTree.Names[i]\r\n\t}\r\n\tfor _, e := range fileList.FileList {\r\n\t\tpath := p.Workdir\r\n\t\tfor _, dir := range e.FolderTree {\r\n\t\t\tpath = filepath.Join(path, idToName[dir].(string))\r\n\t\t}\r\n\t\terr = p.makeDirByCondition(path)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\tfor _, file := range e.Files {\r\n\t\t\tfile.Path = path\r\n\t\t\tsize, _ := strconv.ParseInt(file.Size, 10, 64)\r\n\t\t\tp.Size = size\r\n\t\t\terr = p.makeFileByCondition(file)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn err\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tp.Msgar = append(p.Msgar, fmt.Sprintf(\"There were %d files and %d folders in the folder.\", fileList.TotalNumberOfFiles, fileList.TotalNumberOfFolders-1))\r\n\treturn nil\r\n}",
  "func downloadContent(wg *sync.WaitGroup, linkTo string) {\n\tdefer wg.Done()\n\n\tsetDownloadFolder()\n\n\tresp, err := http.Get(linkTo)\n\tfmt.Println(\"Downloading... Please wait!\")\n\tcolor.Green(resp.Status)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tlog.Fatal(\"Trouble making GET request!\")\n\t}\n\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"Trouble reading response body!\")\n\t}\n\n\tfilename := path.Base(linkTo)\n\tif filename == \"\" {\n\t\tlog.Fatalf(\"Trouble deriving file name for %s\", linkTo)\n\t}\n\n\terr = ioutil.WriteFile(filename, contents, 0644)\n\tif err != nil {\n\t\tlog.Fatal(\"Trouble creating file! -- \", err)\n\t}\n}",
  "func Download() error {\n\tfor i := 2008; i < time.Now().Year(); i++ {\n\t\terr := DownloadYear(i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr := DownloadFile(\"/pub/data/noaa/isd-history.csv\", \"data/isd-history.csv\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = DownloadFile(\"/pub/data/noaa/isd-inventory.csv.z\", \"data/isd-history.csv.z\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
  "func downloadFiles(files *[]synth.File) {\n\tvar wg sync.WaitGroup\n\twg.Add(len(*files))\n\n\tfmt.Println()\n\tfor _, file := range *files {\n\t\tparams := make(map[string]string)\n\t\tparams[\"file\"] = file.Path\n\n\t\tconn := contact.NewConnection(bufferSize)\n\t\tconn.Dial(serverHost, \"/download\", params)\n\n\t\tgo saveFile(conn, file.Path, &wg)\n\t}\n\n\twg.Wait()\n\tfmt.Println()\n}",
  "func downloadFaces() error {\n\n\tstorer = memory.NewStorage()\n\torigin = memfs.New()\n\n\t//clone the repo\n\t_, err := git.Clone(storer, origin, &git.CloneOptions{\n\t\tURL: facesURL,\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error from clone  \", err)\n\t\tlog.Fatal(err)\n\t}\n\n\t//read in list of images from repo file system\n\tmemFaces, err := origin.ReadDir(repoDir)\n\tif err != nil {\n\t\tfmt.Println(\"Error from read dir  \", err)\n\t\tlog.Fatal(err)\n\t}\n\n\t//create tmp dir\n\tif _, err := os.Stat(imagesDir); os.IsNotExist(err) {\n\t\terr := os.Mkdir(imagesDir, 0755)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"error mkdir for \" + imagesDir)\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tfor _, f := range memFaces {\n\t\t//only interested in png files\n\t\text := filepath.Ext(f.Name())\n\t\tif ext != \".png\" {\n\t\t\tbreak\n\t\t}\n\n\t\t//open a file from repo file system\n\t\tsrc, err := origin.Open(repoDir + \"/\" + f.Name())\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error from open: \", err)\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t//create a new file in tmp\n\t\tdst, err := os.Create(imagesDir + \"/\" + f.Name())\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error from create: \", err)\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t//copy file to tmp\n\t\t_, err = io.Copy(dst, src)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error from copy: \", err)\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t//close tmp file\n\t\tif err := dst.Close(); err != nil {\n\t\t\tfmt.Println(\"Error from close (dist): \", err)\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t//close repo file\n\t\tif err := src.Close(); err != nil {\n\t\t\tfmt.Println(\"Error from close (src): \", err)\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\treturn nil\n}",
  "func TestDownloadAllFiles(t *testing.T) {\n\n\t//Test parsing a valid file\n\tfiles, err := parseFromFile(\"example.json\")\n\tif err != nil {\n\t\tt.Fatal(\"error should have  not occurred\", err.Error())\n\t} else if len(files) == 0 {\n\t\tt.Fatal(\"no files found, there should be at least 1\")\n\t}\n\n\t//Download all the files\n\terr = downloadAllFiles(files)\n\tif err != nil {\n\t\tt.Fatal(\"failed in downloading all files: \", err.Error())\n\t}\n}",
  "func (r Renderer) downloadRepositoryCRDs(ctx context.Context, repo RemoteRepositoryDefinition) ([]v1.CustomResourceDefinition, error) {\n\trefString := fmt.Sprintf(\"tags/%s\", repo.Reference)\n\tref, response, err := r.GithubClient.Git.GetRef(ctx, repo.Owner, repo.Name, refString)\n\tif err != nil && response.StatusCode == 404 {\n\t\trefString = fmt.Sprintf(\"heads/%s\", repo.Reference)\n\t\tref, _, err = r.GithubClient.Git.GetRef(ctx, repo.Owner, repo.Name, refString)\n\t}\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\tcommit, _, err := r.GithubClient.Git.GetCommit(ctx, repo.Owner, repo.Name, ref.Object.GetSHA())\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\ttree, _, err := r.GithubClient.Git.GetTree(ctx, repo.Owner, repo.Name, commit.Tree.GetSHA(), true)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\tvar targetEntries []*github.TreeEntry\n\tfor _, entry := range tree.Entries {\n\t\tif entry.GetType() == \"blob\" && strings.HasPrefix(entry.GetPath(), repo.Path) {\n\t\t\ttargetEntries = append(targetEntries, entry)\n\t\t}\n\t}\n\tif targetEntries == nil {\n\t\treturn nil, notFoundError\n\t}\n\n\tvar allCrds []v1.CustomResourceDefinition\n\tfor _, entry := range targetEntries {\n\t\tblob, _, err := r.GithubClient.Git.GetBlob(ctx, repo.Owner, repo.Name, entry.GetSHA())\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\t\tcontent, err := base64.StdEncoding.DecodeString(blob.GetContent())\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\t\tcontentReader := io.NopCloser(bytes.NewReader(content))\n\n\t\tcrds, err := decodeCRDs(contentReader)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\t\tallCrds = append(allCrds, crds...)\n\t}\n\n\treturn allCrds, nil\n}",
  "func downloadDataFile(dataDir string, filename string, url string, c chan error) {\n\tfullPath := fmt.Sprintf(\"%s/%s\", dataDir, filename)\n\tout, err := os.Create(fullPath)\n\tif err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n\tdefer out.Close()\n\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n\tif response.StatusCode != 200 {\n\t\tc <- fmt.Errorf(\"response code %d for %s\", response.StatusCode, url)\n\t\t//c <- DownloadError{message: fmt.Sprintf(\"response code %d for %s\", response.StatusCode, url)}\n\t\terr = out.Close()\n\t\tif err != nil {\n\t\t\tc <- err\n\t\t\treturn\n\t\t}\n\t\terr = os.Remove(fullPath)\n\t\tif err != nil {\n\t\t\tc <- err\n\t\t}\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\t_, err = io.Copy(out, response.Body)\n\tif err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n\n\tc <- nil\n}",
  "func downloadFiles(ctx *log.Context, dir string, cfg handlerSettings) error {\n\t// - prepare the output directory for files and the command output\n\t// - create the directory if missing\n\tctx.Log(\"event\", \"creating output directory\", \"path\", dir)\n\tif err := os.MkdirAll(dir, 0700); err != nil {\n\t\treturn errors.Wrap(err, \"failed to prepare output directory\")\n\t}\n\tctx.Log(\"event\", \"created output directory\")\n\n\t// - download files\n\tctx.Log(\"files\", len(cfg.FileURLs))\n\tfor i, f := range cfg.FileURLs {\n\t\tctx := ctx.With(\"file\", i)\n\t\tctx.Log(\"event\", \"download start\")\n\t\tif err := downloadAndProcessURL(ctx, f, dir, cfg.StorageAccountName, cfg.StorageAccountKey); err != nil {\n\t\t\tctx.Log(\"event\", \"download failed\", \"error\", err)\n\t\t\treturn errors.Wrapf(err, \"failed to download file[%d]\", i)\n\t\t}\n\t\tctx.Log(\"event\", \"download complete\", \"output\", dir)\n\t}\n\treturn nil\n}",
  "func (d *HTTPDownloader) Download(wd string) error {\n\tlog.Printf(\"Downloading %s\\n\", d.ProjectURL)\n\n\tclient := resty.New()\n\tclient.SetHeader(\"User-Agent\", \"overleaf2git - https://github.com/asasmoyo/overleaf2git\")\n\tclient.SetRedirectPolicy(resty.FlexibleRedirectPolicy(3))\n\tclient.SetOutputDirectory(wd)\n\tclient.SetCookie(&http.Cookie{\n\t\tName:  sessionKey,\n\t\tValue: d.SessionKey,\n\t})\n\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\tclient.Debug = true\n\t}\n\n\t// download the project\n\tprojectID := parseProjectID(d.ProjectURL)\n\tprojectURL := fmt.Sprintf(projectZipURLFormat, projectID)\n\t_, err := client.R().\n\t\tSetOutput(output).\n\t\tGet(projectURL)\n\treturn err\n}",
  "func (gc *recursiveGetContents) download(ctx context.Context) error {\n\tgc.wg.Add(1)\n\tgc.check(gc.recursive(ctx, gc.path))\n\tgc.wg.Wait()\n\n\tselect {\n\tcase err := <-gc.errors:\n\t\treturn err\n\tdefault:\n\t\treturn nil\n\t}\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	getItems returns slice with info about items in repository 
 | 
	func getItems(repoIndex *index.Index, url string) []FileInfo {
	var items []FileInfo
	for _, os := range repoIndex.Data.Keys() {
		for _, arch := range repoIndex.Data[os].Keys() {
			for _, category := range repoIndex.Data[os][arch].Keys() {
				for _, version := range repoIndex.Data[os][arch][category] {
					items = append(items, FileInfo{
						File: version.File,
						URL:  url + "/" + version.Path + "/" + version.File,
						OS:   os,
						Arch: arch,
						Size: version.Size,
					})
					if len(version.Variations) != 0 {
						for _, subVersion := range version.Variations {
							items = append(items, FileInfo{
								File: subVersion.File,
								URL:  url + "/" + subVersion.Path + "/" + subVersion.File,
								OS:   os,
								Arch: arch,
								Size: subVersion.Size,
							})
						}
					}
				}
			}
		}
	}
	return items
} 
 | 
	[
  "func (c *Contentful) Items(skip int) (rc []byte, err error) {\n\turl := fmt.Sprintf(itemsURLTemplate, c.ReadConfig.SpaceID, c.ReadConfig.Environment, c.ReadConfig.AccessToken, c.ReadConfig.Locale, skip)\n\treturn c.get(url)\n}",
  "func (m *Site) GetItems()([]BaseItemable) {\n    val, err := m.GetBackingStore().Get(\"items\")\n    if err != nil {\n        panic(err)\n    }\n    if val != nil {\n        return val.([]BaseItemable)\n    }\n    return nil\n}",
  "func (c *container) Items(prefix, cursor string, count int) ([]stow.Item, string, error) {\n\tvar entries []entry\n\tentries, cursor, err := c.getFolderItems([]entry{}, prefix, \"\", filepath.Join(c.location.config.basePath, c.name), cursor, count, false)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\t// Convert entries to []stow.Item\n\tsItems := make([]stow.Item, len(entries))\n\tfor i := range entries {\n\t\tsItems[i] = entries[i].item\n\t}\n\n\treturn sItems, cursor, nil\n}",
  "func (c *Container) Items(prefix string, cursor string, count int) ([]stow.Item, string, error) {\n\tquery := &storage.Query{Prefix: prefix}\n\tcall := c.Bucket().Objects(c.ctx, query)\n\n\tp := iterator.NewPager(call, count, cursor)\n\tvar results []*storage.ObjectAttrs\n\tnextPageToken, err := p.NextPage(&results)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tvar items []stow.Item\n\tfor _, item := range results {\n\t\ti, err := c.convertToStowItem(item)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nextPageToken, nil\n}",
  "func (h *History) Items(opts ItemsOptions) ([]Item, bool, error) {\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"/version/1/history/%s/items\", h.ID), nil)\n\n\tvalues := req.URL.Query()\n\tvalues.Set(\"start-index\", strconv.Itoa(opts.StartIndex))\n\treq.URL.RawQuery = values.Encode()\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tresp, err := h.Client.do(req)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, false, fmt.Errorf(\"http response code: %s\", resp.Status)\n\t}\n\n\tbs, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tvar v itemsResponse\n\tif err := json.Unmarshal(bs, &v); err != nil {\n\t\treturn nil, false, err\n\t}\n\tvar items = []Item{}\n\tfor _, m := range v.Items {\n\t\tfor id, item := range m {\n\t\t\titem.UUID = id\n\t\t\titems = append(items, item)\n\t\t}\n\t}\n\th.LoadedServerIndex = h.LoadedServerIndex + len(v.Items)\n\th.LatestServerIndex = v.CurrentItemIndex\n\th.EndTotalContentSize = v.EndTotalContentSize\n\th.LatestTotalContentSize = v.LatestTotalContentSize\n\thasMoreItems := h.LoadedServerIndex < h.LatestServerIndex\n\treturn items, hasMoreItems, nil\n}",
  "func (m *Drive) GetItems()([]DriveItemable) {\n    return m.items\n}",
  "func getItems(w http.ResponseWriter, r *http.Request) {\n\titems, err := supermart.GetItems(r.Context(), r)\n\tif err != nil {\n\t\tutils.WriteErrorResponse(http.StatusInternalServerError, err, w)\n\t\treturn\n\t}\n\tutils.WriteResponse(http.StatusOK, items, w)\n}",
  "func (c *Client) GetItems(ctx context.Context, owner string) (Items, error) {\n\tresponse, err := c.sendRequest(ctx, owner, http.MethodGet, fmt.Sprintf(\"%s/%s\", c.storeBaseURL, c.bucket), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif response.Code != http.StatusOK {\n\t\tlevel.Error(c.getLogger(ctx)).Log(xlog.MessageKey(), \"Argus responded with non-200 response for GetItems request\",\n\t\t\t\"code\", response.Code, \"ErrorHeader\", response.ArgusErrorHeader)\n\t\treturn nil, fmt.Errorf(errStatusCodeFmt, response.Code, translateNonSuccessStatusCode(response.Code))\n\t}\n\n\tvar items Items\n\n\terr = json.Unmarshal(response.Body, &items)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetItems: %w: %s\", errJSONUnmarshal, err.Error())\n\t}\n\n\treturn items, nil\n}",
  "func getGitHubRepoItems(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\titemType := vars[\"itemType\"]\n\n\tif itemType == \"branch\" || itemType == \"commit\" {\n\t\towner := vars[\"owner\"]\n\t\trepo := vars[\"repo\"]\n\t\ttoken, err := parseTokenFromCookie(r)\n\t\thelpers.HandleError(err)\n\n\t\tvar res interface{}\n\n\t\tif itemType == \"branch\" {\n\t\t\tres, err = github.GetBranches(token, owner, repo)\n\t\t\thelpers.HandleError(err)\n\t\t} else {\n\t\t\tfrom, err := time.Parse(dtParamLayout, r.URL.Query().Get(\"from\"))\n\t\t\tif err != nil {\n\t\t\t\tfrom = time.Now().AddDate(0, 0, -7)\n\t\t\t}\n\t\t\tto, err := time.Parse(dtParamLayout, r.URL.Query().Get(\"to\"))\n\t\t\tif err != nil {\n\t\t\t\tto = time.Now()\n\t\t\t}\n\n\t\t\tres, err = github.GetCommits(token, owner, repo, from, to)\n\t\t\thelpers.HandleError(err)\n\t\t}\n\n\t\tapiResponse(map[string]interface{}{\"data\": res}, w)\n\t} else {\n\t\thelpers.HandleError(errors.New(\"Invalid itemType param\"))\n\t}\n}",
  "func (api *API) ItemsGet(params Params) (res Items, err error) {\n\tif _, present := params[\"output\"]; !present {\n\t\tparams[\"output\"] = \"extend\"\n\t}\n\tresponse, err := api.CallWithError(\"item.get\", params)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treflector.MapsToStructs2(response.Result.([]interface{}), &res, reflector.Strconv, \"json\")\n\treturn\n}",
  "func (_CraftingI *CraftingICallerSession) Items(arg0 *big.Int) (struct {\n\tBaseType uint8\n\tItemType uint8\n\tCrafted  uint32\n\tCrafter  *big.Int\n}, error) {\n\treturn _CraftingI.Contract.Items(&_CraftingI.CallOpts, arg0)\n}",
  "func (_CraftingI *CraftingISession) Items(arg0 *big.Int) (struct {\n\tBaseType uint8\n\tItemType uint8\n\tCrafted  uint32\n\tCrafter  *big.Int\n}, error) {\n\treturn _CraftingI.Contract.Items(&_CraftingI.CallOpts, arg0)\n}",
  "func getItems(ctx context.Context, request events.APIGatewayProxyRequest,\n\tih *item.Handler) (events.APIGatewayProxyResponse, error) {\n\n\tvar list *item.List\n\n\tlist, err := ih.List(ctx, request.PathParameters[PathParamCategoryID])\n\tif err != nil {\n\t\treturn web.GetResponse(ctx, err.Error(), http.StatusInternalServerError)\n\t}\n\n\treturn web.GetResponse(ctx, list, http.StatusOK)\n}",
  "func getItems(keyword string, page int) *elastic.SearchHits {\n\t// search result from the tags,title,content of the article item\n\tquery := elastic.NewMultiMatchQuery(keyword, \"tags\", \"title\", \"content\")\n\tresult, err := client.Search().\n\t\t\tIndex(\"juejin\").\n\t\t\tQuery(query).\n\t\t\tFrom((page - 1) * 10).Size(10).\n\t\t\tDo(ctx)\n\tif ok := ErrorPrint(err); ok {\n\t\treturn result.Hits\n\t}\n\treturn nil\n}",
  "func (c *Client) GetItems(id int) (Item, error) {\n\tc.defaultify()\n\tvar item Item\n\tresp, err := http.Get(fmt.Sprintf(\"%s/item/%d.json\", c.apiBase, id))\n\tif err != nil {\n\t\treturn item, err\n\t}\n\tdefer resp.Body.Close()\n\tdec := json.NewDecoder(resp.Body)\n\terr = dec.Decode(&item)\n\tif err != nil {\n\t\treturn item, err\n\t}\n\treturn item, nil\n}",
  "func GetItems(da DataAccess, username string) (*ItemList, error) {\n\t// find the user and verify they exist\n\tuser, err := FindUserByName(da, username)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// try to get their items\n\titems, err := da.GetItemsByUser(context.Background(), user.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// calculate net worth, asset total, liability total\n\tvar net, asset, liability int64\n\tfor i := range *items {\n\t\tif (*items)[i].Type == ItemTypeAsset {\n\t\t\tnet += (*items)[i].Value\n\t\t\tasset += (*items)[i].Value\n\t\t} else if (*items)[i].Type == ItemTypeLiability {\n\t\t\tnet -= (*items)[i].Value\n\t\t\tliability += (*items)[i].Value\n\t\t}\n\t}\n\n\treturn &ItemList{Username: username, Items: items, NetWorth: net, AssetTotal: asset, LiabilityTotal: liability}, nil\n}",
  "func (j *DSGitHub) FetchItemsRepository(ctx *Ctx) (err error) {\n\titems := []interface{}{}\n\titem, err := j.githubRepo(ctx, j.Org, j.Repo)\n\tFatalOnError(err)\n\tif item == nil {\n\t\tFatalf(\"there is no such repo %s/%s\", j.Org, j.Repo)\n\t}\n\titem[\"fetched_on\"] = fmt.Sprintf(\"%.6f\", float64(time.Now().UnixNano())/1.0e9)\n\tesItem := j.AddMetadata(ctx, item)\n\tif ctx.Project != \"\" {\n\t\titem[\"project\"] = ctx.Project\n\t}\n\tesItem[\"data\"] = item\n\titems = append(items, esItem)\n\terr = SendToElastic(ctx, j, true, UUID, items)\n\tif err != nil {\n\t\tPrintf(\"%s/%s: Error %v sending %d messages to ES\\n\", j.URL, j.Category, err, len(items))\n\t}\n\treturn\n}",
  "func getItems(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\treq := &struct {\n\t\tKeys []string `codec:\"keys\"`\n\t}{}\n\n\terr := codec.NewDecoder(r.Body, msgpack).Decode(req)\n\tif err != nil {\n\t\tfailErr(w, err)\n\t\treturn\n\t}\n\n\tresults := make([]*keyval, 0, len(req.Keys))\n\tfor _, key := range req.Keys {\n\t\tval, err := db.Get([]byte(key), nil)\n\t\tif err == leveldb.ErrNotFound {\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\tfailErr(w, err)\n\t\t\treturn\n\t\t} else if val != nil {\n\t\t\tresults = append(results, &keyval{key, string(val)})\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", msgpackCType)\n\tcodec.NewEncoder(w, msgpack).Encode(multiResponse{nil, results})\n}",
  "func GetItems() map[int]*models.Item {\n\treturn ItemDB\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	saveIndex encodes index to JSON format and saves it into the file 
 | 
	func saveIndex(repoIndex *index.Index, dir string) {
	indexPath := path.Join(dir, INDEX_NAME)
	fmtc.Printf("Saving index… ")
	err := jsonutil.Write(indexPath, repoIndex)
	if err != nil {
		fmtc.Println("{r}ERROR{!}")
		printErrorAndExit("Can't save index as %s: %v", indexPath, err)
	}
	fmtc.Println("{g}DONE{!}")
} 
 | 
	[
  "func (gh *Storage) writeIndex(index map[string]int64, sha string) error {\n\tcontents, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn gh.writeFile(fs.IndexName, sha, contents)\n}",
  "func SaveIndex(idx *Index, path string) (err error) {\n\tf, err := ioutil.TempFile(filepath.Split(path))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tos.Remove(f.Name())\n\t\t}\n\t}()\n\n\tif err = json.NewEncoder(f).Encode(idx); err != nil {\n\t\treturn err\n\t}\n\n\tif err = f.Sync(); err != nil {\n\t\treturn err\n\t}\n\n\tif err = f.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn os.Rename(f.Name(), path)\n}",
  "func SaveIndex(target string, source QueryList, verbose bool) {\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s...\", target), verbose)\n\tfile, err := os.Create(target)\n\tcheckResult(err)\n\tdefer file.Close()\n\n\tgr := gzip.NewWriter(file)\n\tdefer gr.Close()\n\n\tencoder := gob.NewEncoder(gr)\n\n\terr = encoder.Encode(source.Names)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v sequence names saved\", len(source.Names)), verbose)\n\n\terr = encoder.Encode(source.SeedSize)\n\tcheckResult(err)\n\n\terr = encoder.Encode(source.Cgst)\n\tcheckResult(err)\n\n\t// save the index, but go has a size limit\n\tindexSize := len(source.Index)\n\terr = encoder.Encode(indexSize)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v queries to save...\", indexSize), verbose)\n\n\tcount := 0\n\tfor key, value := range source.Index {\n\t\terr = encoder.Encode(key)\n\t\tcheckResult(err)\n\t\terr = encoder.Encode(value)\n\t\tcheckResult(err)\n\t\tcount++\n\t\tif count%10000 == 0 {\n\t\t\tlogm(\"INFO\", fmt.Sprintf(\"processing: saved %v items\", count), false)\n\t\t}\n\t}\n\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s: done\", target), verbose)\n}",
  "func (index *ind) writeIndexFile() {\n\tif _, err := os.Stat(index.name); os.IsNotExist(err) {\n\t\tindexFile, err := os.Create(index.name)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tindexFile.Close()\n\t}\n\n\tb := new(bytes.Buffer)\n\te := gob.NewEncoder(b)\n\n\terr := e.Encode(index)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tioutil.WriteFile(index.name, b.Bytes(), 7777)\n}",
  "func WriteIndex(index common.Index) error {\n\tbytes, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(indexCachePath, bytes, 0600)\n\treturn err\n}",
  "func (g Index) WriteIndex(file io.Writer) error {\n\tsort.Sort(ByPath(g.Objects))\n\ts := sha1.New()\n\tw := io.MultiWriter(file, s)\n\tbinary.Write(w, binary.BigEndian, g.fixedGitIndex)\n\tfor _, entry := range g.Objects {\n\t\tbinary.Write(w, binary.BigEndian, entry.FixedIndexEntry)\n\t\tbinary.Write(w, binary.BigEndian, []byte(entry.PathName))\n\t\tpadding := 8 - ((82 + len(entry.PathName) + 4) % 8)\n\t\tp := make([]byte, padding)\n\t\tbinary.Write(w, binary.BigEndian, p)\n\t}\n\tbinary.Write(w, binary.BigEndian, s.Sum(nil))\n\treturn nil\n}",
  "func SaveIndex(indexKey string, stub shim.ChaincodeStubInterface) error {\n\t//  Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value\n\t//  Save index entry to state. Only the key Name is needed, no need to store a duplicate copy of the marble.\n\tvalue := []byte{0x00}\n\t// index save\n\tputStateError := stub.PutState(indexKey, value)\n\tif putStateError != nil {\n\t\treturn errors.New(putStateError.Error())\n\t}\n\treturn nil\n}",
  "func (i *Index) SaveJSON() ([]byte, error) {\n\treturn json.Marshal(i.Objects)\n}",
  "func saveIndex(indexName string, path string, documentFileName string) {\n\tfileSquence := 0\n\tindexPath := indexName + \"/\"\n\tpattern := filepath.Join(indexPath, \"*.idx\")\n\n\tif _, err := os.Stat(indexName); os.IsNotExist(err) {\n\t\tos.Mkdir(indexName, 0700) //Write from the same program\n\t}\n\n\t// Folder Created. Now we have to check the next available sequence of file\n\n\texistingIndexFiles, err := filepath.Glob(pattern)\n\n\tfileSquence = len(existingIndexFiles)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tif len(existingIndexFiles) > 0 {\n\t\tfileSquence = len(existingIndexFiles)\n\t}\n\n\tindexFileName := indexPath + strconv.Itoa(fileSquence) + \".idx\"\n\tdocFileName := indexPath + documentFileName + \".document\"\n\n\t// Save the Document File with .document extension\n\tdocFile, err := os.OpenFile(docFileName, os.O_CREATE|os.O_RDWR, 0644)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tindexFile, err := os.OpenFile(indexFileName, os.O_CREATE|os.O_RDWR, 0644)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\t_, err = save(entries, tokenized, docFile, indexFile)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n}",
  "func WriteJSON(logger log.Logger, indexFn string, fn string) error {\n\tindexFile, err := fileutil.OpenMmapFile(indexFn)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"open mmap index file %s\", indexFn)\n\t}\n\tdefer runutil.CloseWithLogOnErr(logger, indexFile, \"close index cache mmap file from %s\", indexFn)\n\n\tb := realByteSlice(indexFile.Bytes())\n\tindexr, err := index.NewReader(b)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"open index reader\")\n\t}\n\tdefer runutil.CloseWithLogOnErr(logger, indexr, \"load index cache reader\")\n\n\t// We assume reader verified index already.\n\tsymbols, err := getSymbolTable(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(fn)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create index cache file\")\n\t}\n\tdefer runutil.CloseWithLogOnErr(logger, f, \"index cache writer\")\n\n\tv := indexCache{\n\t\tVersion:      indexr.Version(),\n\t\tCacheVersion: JSONVersion1,\n\t\tSymbols:      symbols,\n\t\tLabelValues:  map[string][]string{},\n\t}\n\n\t// Extract label value indices.\n\tlnames, err := indexr.LabelNames()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read label indices\")\n\t}\n\tfor _, ln := range lnames {\n\t\tvals, err := indexr.LabelValues(ln)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"get label values\")\n\t\t}\n\t\tv.LabelValues[ln] = vals\n\t}\n\n\t// Extract postings ranges.\n\tpranges, err := indexr.PostingsRanges()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read postings ranges\")\n\t}\n\tfor l, rng := range pranges {\n\t\tv.Postings = append(v.Postings, postingsRange{\n\t\t\tName:  l.Name,\n\t\t\tValue: l.Value,\n\t\t\tStart: rng.Start,\n\t\t\tEnd:   rng.End,\n\t\t})\n\t}\n\n\tif err := json.NewEncoder(f).Encode(&v); err != nil {\n\t\treturn errors.Wrap(err, \"encode file\")\n\t}\n\treturn nil\n}",
  "func (idx *Index) ToBytes(w io.Writer) error {\n\tedx := encodableTypeIndex{\n\t\tTextIndex: idx.textIndex,\n\t\tResults:   idx.results,\n\t}\n\n\tgw, err := gzip.NewWriterLevel(w, gzip.NoCompression)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create gzip writer: %v\", err)\n\t}\n\n\tenc := gob.NewEncoder(gw)\n\terr = enc.Encode(edx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"encode index: %v\", err)\n\t}\n\n\terr = gw.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"close gzip writer: %v\", err)\n\t}\n\n\treturn nil\n}",
  "func (x *Index) Write(w io.Writer) error",
  "func (db *Access) WriteIndex(ie *datatypes.IndexEntry) int64 {\r\n\t//TODO fix this func with appropriate seeking based on ie.\r\n\t//Also, need to update map and not insert when ID already there (update).\r\n\tlog.Printf(\"ie object: %v\", ie)\r\n\t//Get write start (value to be returned)\r\n\tvar offset int64\r\n\tif ie.GetIndexData().IndexFileOffset == -1 {\r\n\t\toffset, _ = db.fileHandles.indexFile.Seek(0, 2)\r\n\t} else {\r\n\t\toffset, _ = db.fileHandles.indexFile.Seek(ie.GetIndexData().IndexFileOffset, 0)\r\n\t}\r\n\r\n\t//Write to disk but ALSO to in-memory table\r\n\tdb.fileHandles.indexFile.Write(ie.WriteableRepr())\r\n\tdb.fileHandles.indexFile.Sync()\r\n\r\n\t//Update indexEntry object with obtained offset\r\n\tie.SetIndexFileOffset(offset)\r\n\r\n\t//in-memory table\r\n\tdb.indexTable.Insert(ie)\r\n\r\n\treturn offset\r\n}",
  "func encodeIndex(d *Index) *internal.Index {\n\treturn &internal.Index{\n\t\tName: d.name,\n\t\tMeta: &internal.IndexMeta{\n\t\t\tColumnLabel: d.columnLabel,\n\t\t\tTimeQuantum: string(d.timeQuantum),\n\t\t},\n\t\tMaxSlice: d.MaxSlice(),\n\t\tFrames:   encodeFrames(d.Frames()),\n\t}\n}",
  "func IndexWrite(x *suffixarray.Index, w io.Writer) error",
  "func (i *Index) saveMeta() error {\n\t// Marshal metadata.\n\tbuf, err := proto.Marshal(&internal.IndexMeta{\n\t\tKeys:           i.keys,\n\t\tTrackExistence: i.trackExistence,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshalling\")\n\t}\n\n\t// Write to meta file.\n\tif err := ioutil.WriteFile(filepath.Join(i.path, \".meta\"), buf, 0666); err != nil {\n\t\treturn errors.Wrap(err, \"writing\")\n\t}\n\n\treturn nil\n}",
  "func (w *Writer) writeIndex() (int64, error) {\n\tw.written = true\n\n\tbuf := new(bytes.Buffer)\n\tst := sst.NewWriter(buf)\n\n\tw.spaceIds.Sort()\n\n\t// For each defined space, we index the space's\n\t// byte offset in the file and the length in bytes\n\t// of all data in the space.\n\tfor _, spaceId := range w.spaceIds {\n\t\tb := new(bytes.Buffer)\n\n\t\tbinary.WriteInt64(b, w.spaceOffsets[spaceId])\n\t\tbinary.WriteInt64(b, w.spaceLengths[spaceId])\n\n\t\tif err := st.Set([]byte(spaceId), b.Bytes()); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif err := st.Close(); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn buf.WriteTo(w.file)\n}",
  "func SerializeReverseIndex(hs HashSign, ri ReverseIndex) []byte {\n\n\tbuf := new(bytes.Buffer)\n\n\tbinary.Write(buf, binary.LittleEndian, hs)\n\tbinary.Write(buf, binary.LittleEndian, ri.KeyOffset)\n\tbinary.Write(buf, binary.LittleEndian, ri.DocListOffset)\n\tbinary.Write(buf, binary.LittleEndian, ri.DocListLen)\n\tbinary.Write(buf, binary.LittleEndian, ri.DocListCap)\n\n\treturn buf.Bytes()\n}",
  "func (c *Collection) saveIndexes() error {\n\tib, err := json.Marshal(c.indexes)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.db.datastore.Put(dsIndexes.ChildString(c.name), ib)\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
	getCurrentIndexUUID returns current index UUID (if exist) 
 | 
	func getCurrentIndexUUID(dir string) string {
	indexFile := path.Join(dir, INDEX_NAME)
	if !fsutil.IsExist(indexFile) {
		return ""
	}
	i := &index.Index{}
	if jsonutil.Read(indexFile, i) != nil {
		return ""
	}
	return i.UUID
} 
 | 
	[
  "func GetCurrentIndexForProduct(conn redis.Conn, key string, field string) string {\n\tproductExis, errExist := redis.Int(conn.Do(\"hexists\", key, field))\n\tif errExist != nil {\n\t\treturn \"\"\n\t}\n\tif productExis == 0 {\n\t\treturn \"\"\n\t}\n\tvalueIndex, _ := redis.String(conn.Do(\"hget\", key, field))\n\treturn valueIndex\n}",
  "func GetCurrentIndexForUser(conn redis.Conn, key string, field string) string {\n\tuserExis, errExist := redis.Int(conn.Do(\"hexists\", key, field))\n\tif errExist != nil {\n\t\treturn \"\"\n\t}\n\tif userExis == 0 {\n\t\treturn \"\"\n\t}\n\tvalueIndex, _ := redis.String(conn.Do(\"hget\", key, field))\n\treturn valueIndex\n}",
  "func (instance *Instance) GetUuid() (uuid uint64) {\n\tif val := instance.GetIndexInstance(); val != nil {\n\t\treturn val.GetInstId()\n\t} else {\n\t\t// TODO: should we panic ?\n\t}\n\treturn\n}",
  "func GetIndexID(t *testing.T, tk *testkit.TestKit, dbName, tblName, idxName string) int64 {\n\tis := domain.GetDomain(tk.Session()).InfoSchema()\n\ttt, err := is.TableByName(model.NewCIStr(dbName), model.NewCIStr(tblName))\n\trequire.NoError(t, err)\n\n\tfor _, idx := range tt.Indices() {\n\t\tif idx.Meta().Name.L == idxName {\n\t\t\treturn idx.Meta().ID\n\t\t}\n\t}\n\n\trequire.FailNow(t, fmt.Sprintf(\"index %s not found(db: %s, tbl: %s)\", idxName, dbName, tblName))\n\treturn -1\n}",
  "func (sm *SeedManager) CurrentIndex() uint64 {\n\treturn sm.maxUseIndex\n}",
  "func (c *gcsCore) getContainerIDFromIndex(index uint32) string {\n\tc.containerIndexMutex.Lock()\n\tdefer c.containerIndexMutex.Unlock()\n\n\tif int(index) < len(c.containerIndex) {\n\t\treturn c.containerIndex[index]\n\t}\n\n\treturn \"\"\n}",
  "func (_UsersData *UsersDataCallerSession) GetUuidByIndex(index *big.Int) ([16]byte, error) {\n\treturn _UsersData.Contract.GetUuidByIndex(&_UsersData.CallOpts, index)\n}",
  "func (_UsersData *UsersDataCaller) GetUuidByIndex(opts *bind.CallOpts, index *big.Int) ([16]byte, error) {\n\tvar (\n\t\tret0 = new([16]byte)\n\t)\n\tout := ret0\n\terr := _UsersData.contract.Call(opts, out, \"getUuidByIndex\", index)\n\treturn *ret0, err\n}",
  "func (t *ArticleHistory) GetIndexID() string {\n\treturn fmt.Sprintf(\"%s.%d\", t.ArticleID, t.Version)\n}",
  "func (c *CodeShipProvider) GetProjectIDFromIndex(index int) (string, error) {\n\treturn c.Projects[index].UUID, nil\n}",
  "func (l *Log) CurrentIndexWithOutLock() uint64 {\n\tif len(l.entries) == 0 {\n\t\treturn 0\n\t}\n\treturn uint64(l.entries[len(l.entries)-1].Logsn)\n}",
  "func (t *Article) GetIndexID() string {\n\treturn fmt.Sprintf(\"%s.%d\", t.ID, t.Version)\n}",
  "func (i *Indexio) Current(key string) (idx uint32, err error) {\n\terr = i.db.Read(func(txn turtleDB.Txn) (err error) {\n\t\tvar bkt turtleDB.Bucket\n\t\tif bkt, err = txn.Get(\"indexes\"); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tidx, err = getCurrent(bkt, key)\n\t\treturn\n\t})\n\n\tif idx == 0 {\n\t\terr = ErrKeyUnset\n\t\treturn\n\t}\n\n\tidx--\n\treturn\n}",
  "func (l *List) GetCurrentIdx() int {\n\treturn l.currentIdx\n}",
  "func (room Room) GetCurrentIdx() int {\n\tid := -1\n\tif room.State >= IdxTurn {\n\t\tid = room.State - IdxTurn\n\t}\n\treturn id\n}",
  "func (r *raftLog) getIndex(first bool) (uint64, error) {\n\ttx, err := r.conn.Begin(false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvar (\n\t\tkey []byte\n\t\tidx uint64\n\t)\n\tcurs := tx.Bucket(logsBucket).Cursor()\n\tif first {\n\t\tkey, _ = curs.First()\n\t} else {\n\t\tkey, _ = curs.Last()\n\t}\n\tif key != nil {\n\t\tidx = binary.BigEndian.Uint64(key)\n\t}\n\ttx.Rollback()\n\treturn idx, nil\n}",
  "func (c *context) CurrentIndex() uint64 {\n\treturn c.currentIndex\n}",
  "func GetIndexOIDForResource(resource string) string {\n\treturn resourceIndex[resource]\n}",
  "func (tq *TodoQuery) FirstIDX(ctx context.Context) uuid.UUID {\n\tid, err := tq.FirstID(ctx)\n\tif err != nil && !IsNotFound(err) {\n\t\tpanic(err)\n\t}\n\treturn id\n}"
]  | 
	{
  "objective": {
    "paired": [],
    "self": [],
    "triplet": [
      [
        "query",
        "document",
        "negatives"
      ]
    ]
  }
} 
 | 
					
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.