query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the value of the 'go_package' option of the first .proto file found in the same directory as projectFile
func detectGoPackageForProject(projectFile string) (string, error) { var goPkg string projectDir := filepath.Dir(projectFile) if err := filepath.Walk(projectDir, func(protoFile string, info os.FileInfo, err error) error { // already set if goPkg != "" { return nil } if !strings.HasSuffix(protoFile, ".proto") { return nil } // search for go_package on protos in the same dir as the project.json if projectDir != filepath.Dir(protoFile) { return nil } content, err := ioutil.ReadFile(protoFile) if err != nil { return err } lines := strings.Split(string(content), "\n") for _, line := range lines { goPackage := goPackageStatementRegex.FindStringSubmatch(line) if len(goPackage) == 0 { continue } if len(goPackage) != 2 { return errors.Errorf("parsing go_package error: from %v found %v", line, goPackage) } goPkg = goPackage[1] break } return nil }); err != nil { return "", err } if goPkg == "" { return "", errors.Errorf("no go_package statement found in root dir of project %v", projectFile) } return goPkg, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Generator) GoFilePackage(depfile *fdep.DepFile) string {\n\treturn fproto_wrap.BaseName(g.GoWrapPackage(depfile))\n}", "func (g *Generator) GoPackage(depfile *fdep.DepFile) string {\n\tfor _, o := range depfile.ProtoFile.Options {\n\t\tif o.Name == \"go_package\" {\n\t\t\treturn o.Value.String()\n\t\t}\n\t}\n\treturn path.Dir(depfile.FilePath)\n}", "func (c *common) GetPackage() string { return c.file.GetPackage() }", "func (pkg *goPackage) firstGoFile() string {\n\tgoSrcs := []platformStringsBuilder{\n\t\tpkg.library.sources,\n\t\tpkg.binary.sources,\n\t\tpkg.test.sources,\n\t}\n\tfor _, sb := range goSrcs {\n\t\tif sb.strs != nil {\n\t\t\tfor s := range sb.strs {\n\t\t\t\tif strings.HasSuffix(s, \".go\") {\n\t\t\t\t\treturn s\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (fd *File) GoPackagePath() string {\n\treturn fd.builder.GoPackagePath\n}", "func GoPackage(packageName string) string {\n\tif packageName == \"\" {\n\t\treturn \"\"\n\t}\n\tsplit := strings.Split(packageName, \".\")\n\treturn split[len(split)-1] + \"pb\"\n}", "func goPackageName(d *descriptor.FileDescriptorProto) (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := goPackageOption(d); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}", "func (d *FileDescriptor) goPackageName() (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := d.goPackageOption(); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}", "func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) }", "func (c *common) PackageName() string { return uniquePackageOf(c.file) }", "func (d *FileDescriptor) goFileName(pathType pathType) string {\n\tname := *d.Name\n\tif ext := path.Ext(name); ext == \".proto\" || ext == \".protodevel\" {\n\t\tname = name[:len(name)-len(ext)]\n\t}\n\tname += \".cobra.pb.go\"\n\n\tif pathType == pathTypeSourceRelative {\n\t\treturn name\n\t}\n\n\t// Does the file have a \"go_package\" option?\n\t// If it does, it may override the filename.\n\tif impPath, _, ok := d.goPackageOption(); ok && impPath != \"\" {\n\t\t// Replace the existing dirname with the declared import path.\n\t\t_, name = path.Split(name)\n\t\tname = path.Join(impPath, name)\n\t\treturn name\n\t}\n\n\treturn name\n}", "func (*GetProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{25}\n}", "func goFileName(d *descriptor.FileDescriptorProto) string {\n\tname := *d.Name\n\tif ext := path.Ext(name); ext == \".proto\" || ext == \".protodevel\" {\n\t\tname = name[:len(name)-len(ext)]\n\t}\n\tname += \".nrpc.go\"\n\n\t// Does the file have a \"go_package\" option?\n\t// If it does, it may override the filename.\n\tif impPath, _, ok := goPackageOption(d); ok && impPath != \"\" {\n\t\t// Replace the existing dirname with the declared import path.\n\t\t_, name = path.Split(name)\n\t\tname = path.Join(impPath, name)\n\t\treturn name\n\t}\n\n\treturn name\n}", "func (pp *protoPackage) pkgPath() string {\n\treturn strings.Replace(pp.Pkg, \".\", \"/\", -1)\n}", "func goPkg(fileName string) (string, error) {\n\tcontent, err := os.ReadFile(fileName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar pkgName string\n\tif match := goPkgOptRe.FindSubmatch(content); len(match) > 0 {\n\t\tpn, err := strconv.Unquote(string(match[1]))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tpkgName = pn\n\t}\n\tif p := strings.IndexRune(pkgName, ';'); p > 0 {\n\t\tpkgName = pkgName[:p]\n\t}\n\treturn pkgName, nil\n}", "func (project Project) Package() (string, error) {\n\n\tif project.packageName != \"\" {\n\t\treturn project.packageName, nil\n\t}\n\n\tgoModPath := project.RelPath(GoModFileName)\n\tif !project.FileExists(goModPath) {\n\t\treturn \"\", errors.New(\"Failed to determine the package name for this project\")\n\t}\n\n\tb, err := ioutil.ReadFile(goModPath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to read the go.mod file\")\n\t}\n\n\tmod, err := gomod.Parse(goModPath, b)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to parse the go.mod file\")\n\t}\n\n\tproject.packageName = strings.TrimSuffix(mod.Name, \"/\")\n\n\treturn project.packageName, nil\n\n}", "func (g *Generator) GoWrapFilePackage(depfile *fdep.DepFile) string {\n\tif g.PkgSource != nil {\n\t\tif p, ok := g.PkgSource.GetFilePkg(g, depfile); ok {\n\t\t\treturn p\n\t\t}\n\t}\n\n\treturn \"fw\" + fproto_wrap.BaseName(g.GoWrapPackage(depfile))\n}", "func (f *FileStruct) GetPersistPackageOption() string {\n\tif f.Desc == nil || f.Desc.GetOptions() == nil {\n\t\treturn \"\"\n\t}\n\tif proto.HasExtension(f.Desc.GetOptions(), persist.E_Package) {\n\t\tpkg, err := proto.GetExtension(f.Desc.GetOptions(), persist.E_Package)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Debug(\"Error\")\n\t\t\treturn \"\"\n\t\t}\n\t\t//logrus.WithField(\"pkg\", *pkg.(*string)).Info(\"Package\")\n\t\treturn *pkg.(*string)\n\t}\n\tlogrus.WithField(\"File Options\", f.Desc.GetOptions()).Debug(\"file options\")\n\treturn \"\"\n}", "func Which(s protoreflect.FullName) ProtoFile {\r\n\treturn wellKnownTypes[s]\r\n}", "func GetPackageName(source string) string {\n\tfileNode, err := parser.ParseFile(\"\", source, nil, parser.ImportsOnly)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn fileNode.Name.Name()\n}", "func (*GetProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{26}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_proto_carbon_proto_rawDescGZIP(), []int{0}\n}", "func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {\n\tif imp, ok := d.(protoreflect.FileImport); ok {\n\t\td = imp.FileDescriptor\n\t}\n\ttype canProto interface {\n\t\tFileDescriptorProto() *descriptorpb.FileDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.FileDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {\n\t\t\treturn fd\n\t\t}\n\t}\n\treturn protodesc.ToFileDescriptorProto(d)\n}", "func goPackageOption(d *descriptor.FileDescriptorProto) (impPath, pkg string, ok bool) {\n\tpkg = d.GetOptions().GetGoPackage()\n\tif pkg == \"\" {\n\t\treturn\n\t}\n\tok = true\n\t// The presence of a slash implies there's an import path.\n\tslash := strings.LastIndex(pkg, \"/\")\n\tif slash < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = pkg, pkg[slash+1:]\n\t// A semicolon-delimited suffix overrides the package name.\n\tsc := strings.IndexByte(impPath, ';')\n\tif sc < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = impPath[:sc], impPath[sc+1:]\n\treturn\n}", "func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) {\n\treturn file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0}\n}", "func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {\n\tp := &descriptorpb.FileDescriptorProto{\n\t\tName: proto.String(file.Path()),\n\t\tOptions: proto.Clone(file.Options()).(*descriptorpb.FileOptions),\n\t}\n\tif file.Package() != \"\" {\n\t\tp.Package = proto.String(string(file.Package()))\n\t}\n\tfor i, imports := 0, file.Imports(); i < imports.Len(); i++ {\n\t\timp := imports.Get(i)\n\t\tp.Dependency = append(p.Dependency, imp.Path())\n\t\tif imp.IsPublic {\n\t\t\tp.PublicDependency = append(p.PublicDependency, int32(i))\n\t\t}\n\t\tif imp.IsWeak {\n\t\t\tp.WeakDependency = append(p.WeakDependency, int32(i))\n\t\t}\n\t}\n\tfor i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {\n\t\tloc := locs.Get(i)\n\t\tl := &descriptorpb.SourceCodeInfo_Location{}\n\t\tl.Path = append(l.Path, loc.Path...)\n\t\tif loc.StartLine == loc.EndLine {\n\t\t\tl.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)}\n\t\t} else {\n\t\t\tl.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)}\n\t\t}\n\t\tl.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...)\n\t\tif loc.LeadingComments != \"\" {\n\t\t\tl.LeadingComments = proto.String(loc.LeadingComments)\n\t\t}\n\t\tif loc.TrailingComments != \"\" {\n\t\t\tl.TrailingComments = proto.String(loc.TrailingComments)\n\t\t}\n\t\tif p.SourceCodeInfo == nil {\n\t\t\tp.SourceCodeInfo = &descriptorpb.SourceCodeInfo{}\n\t\t}\n\t\tp.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l)\n\n\t}\n\tfor i, messages := 0, file.Messages(); i < messages.Len(); i++ {\n\t\tp.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i)))\n\t}\n\tfor i, enums := 0, file.Enums(); i < enums.Len(); i++ {\n\t\tp.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))\n\t}\n\tfor i, services := 0, file.Services(); i < services.Len(); i++ {\n\t\tp.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i)))\n\t}\n\tfor i, exts := 0, file.Extensions(); i < exts.Len(); i++ {\n\t\tp.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))\n\t}\n\tif syntax := file.Syntax(); syntax != protoreflect.Proto2 {\n\t\tp.Syntax = proto.String(file.Syntax().String())\n\t}\n\treturn p\n}", "func (*PatchProject) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{4}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{2}\n}", "func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) {\n\tpkg = d.GetOptions().GetGoPackage()\n\tif pkg == \"\" {\n\t\treturn\n\t}\n\tok = true\n\t// The presence of a slash implies there's an import path.\n\tslash := strings.LastIndex(pkg, \"/\")\n\tif slash < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = pkg, pkg[slash+1:]\n\t// A semicolon-delimited suffix overrides the package name.\n\tsc := strings.IndexByte(impPath, ';')\n\tif sc < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = impPath[:sc], impPath[sc+1:]\n\treturn\n}", "func (*ProjectSimple) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{29}\n}", "func (*GoPackageInfo) Descriptor() ([]byte, []int) {\n\treturn file_kythe_proto_go_proto_rawDescGZIP(), []int{1}\n}", "func deduceGenPkgName(genFiles []*descriptor.FileDescriptorProto) (string, error) {\n\tvar genPkgName string\n\tfor _, f := range genFiles {\n\t\tname, explicit := goPackageName(f)\n\t\tif explicit {\n\t\t\tname = stringutils.CleanIdentifier(name)\n\t\t\tif genPkgName != \"\" && genPkgName != name {\n\t\t\t\t// Make sure they're all set consistently.\n\t\t\t\treturn \"\", errors.Errorf(\"files have conflicting go_package settings, must be the same: %q and %q\", genPkgName, name)\n\t\t\t}\n\t\t\tgenPkgName = name\n\t\t}\n\t}\n\tif genPkgName != \"\" {\n\t\treturn genPkgName, nil\n\t}\n\n\t// If there is no explicit setting, then check the implicit package name\n\t// (derived from the protobuf package name) of the files and make sure it's\n\t// consistent.\n\tfor _, f := range genFiles {\n\t\tname, _ := goPackageName(f)\n\t\tname = stringutils.CleanIdentifier(name)\n\t\tif genPkgName != \"\" && genPkgName != name {\n\t\t\treturn \"\", errors.Errorf(\"files have conflicting package names, must be the same or overridden with go_package: %q and %q\", genPkgName, name)\n\t\t}\n\t\tgenPkgName = name\n\t}\n\n\t// All the files have the same name, so we're good.\n\treturn genPkgName, nil\n}", "func GetFirstGoPath() string {\n\treturn strings.Split(os.Getenv(\"GOPATH\"), \":\")[0]\n}", "func lookupProjPath(protoAbs string) (result string) {\n\tlastIndex := len(protoAbs)\n\tcurPath := protoAbs\n\n\tfor lastIndex > 0 {\n\t\tif fileExist(curPath+\"/cmd\") && fileExist(curPath+\"/api\") {\n\t\t\tresult = curPath\n\t\t\treturn\n\t\t}\n\t\tlastIndex = strings.LastIndex(curPath, string(os.PathSeparator))\n\t\tcurPath = protoAbs[:lastIndex]\n\t}\n\tresult = \"\"\n\treturn\n}", "func Namespace(file *descriptor.FileDescriptorProto) string {\n\toptions := file.GetOptions()\n\n\t// When there is a namespace option defined we use it\n\tif options.PhpNamespace != nil {\n\t\treturn options.GetPhpNamespace()\n\t}\n\n\treturn Name(file.GetPackage())\n}", "func protobufName(f *ast.Field) string {\n\tfor _, attr := range f.Attrs {\n\t\tif strings.HasPrefix(attr.Text, \"@protobuf\") {\n\t\t\tfor _, str := range strings.Split(attr.Text[10:len(attr.Text)-1], \",\") {\n\t\t\t\tif strings.HasPrefix(str, \"name=\") {\n\t\t\t\t\treturn str[5:]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{79}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{76}\n}", "func (*ExistingFile) Descriptor() ([]byte, []int) {\n\treturn file_protoconfig_go_kingpinv2_v1_extensions_proto_rawDescGZIP(), []int{1}\n}", "func getTmplFileDesc(fds []*descriptor.FileDescriptorProto) (string, *descriptor.FileDescriptorProto, error) {\n\tvar templateDescriptorProto *descriptor.FileDescriptorProto\n\tfor _, fd := range fds {\n\t\tif fd.GetOptions() == nil || !proto.HasExtension(fd.GetOptions(), tmpl.E_TemplateVariety) {\n\t\t\tcontinue\n\t\t}\n\t\tif templateDescriptorProto != nil {\n\t\t\treturn \"\", nil, fmt.Errorf(\n\t\t\t\t\"proto files %s and %s, both have the option %s. Only one proto file is allowed with this options\",\n\t\t\t\tfd.GetName(), templateDescriptorProto.GetName(), tmpl.E_TemplateVariety.Name)\n\t\t}\n\t\ttemplateDescriptorProto = fd\n\t}\n\n\tif templateDescriptorProto == nil {\n\t\treturn \"\", nil, fmt.Errorf(\"there has to be one proto file that has the extension %s\", tmpl.E_TemplateVariety.Name)\n\t}\n\n\tvar tmplName string\n\tif nameExt, err := proto.GetExtension(templateDescriptorProto.GetOptions(), tmpl.E_TemplateName); err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\n\t\t\t\"proto files %s is missing required template_name option\", templateDescriptorProto.GetName())\n\t} else if err := validateTmplName(*(nameExt.(*string))); err != nil {\n\t\treturn \"\", nil, err\n\t} else {\n\t\ttmplName = *(nameExt.(*string))\n\t}\n\n\treturn tmplName, templateDescriptorProto, nil\n}", "func generateFile(gen *protogen.Plugin, file *protogen.File) {\n\tfilename := file.GeneratedFilenamePrefix + \"_message.pb.go\"\n\tg := gen.NewGeneratedFile(filename, file.GoImportPath)\n\n\tg.P(\"// Code generated by protoc-gen-message-validator. DO NOT EDIT.\")\n\tg.P()\n\tg.P(\"package \", file.GoPackageName)\n\tg.P()\n\n\tfor _, message := range file.Messages {\n\t\tstructName := string(message.Desc.Name())\n\t\tprefix := strings.ToLower(string(structName[0]))\n\n\t\tfor _, subMessage := range message.Messages {\n\t\t\tif subMessage.Desc.IsMapEntry() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsubStructName := string(subMessage.Desc.Name())\n\t\t\tgenerateMessage(fmt.Sprintf(\"%s_%s\", structName, subStructName), prefix, subMessage, g)\n\t\t}\n\n\t\tgenerateMessage(structName, prefix, message, g)\n\t}\n}", "func (*AppGroup) Descriptor() ([]byte, []int) {\n\treturn file_common_proto_rawDescGZIP(), []int{1}\n}", "func (*UpdateProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{21}\n}", "func (*ProjectModel) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{3}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{12}\n}", "func (s *Stub) GetProject() string {\n\treturn \"\"\n}", "func (*UpdateProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{22}\n}", "func guessPackageName(b *util.BuildCtxt, base string) string {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"main\"\n\t}\n\n\tpkg, err := b.Import(base, cwd, 0)\n\tif err != nil {\n\t\t// There may not be any top level Go source files but the project may\n\t\t// still be within the GOPATH.\n\t\tif strings.HasPrefix(base, b.GOPATH) {\n\t\t\tp := strings.TrimPrefix(base, b.GOPATH)\n\t\t\treturn strings.Trim(p, string(os.PathSeparator))\n\t\t}\n\t}\n\n\treturn pkg.ImportPath\n}", "func GetGoPackage(url string) string {\n\tswitch {\n\tcase strings.Contains(url, \";\"):\n\t\tidx := strings.LastIndex(url, \";\")\n\t\treturn url[idx+1:]\n\tcase strings.Contains(url, \"/\"):\n\t\tidx := strings.LastIndex(url, \"/\")\n\t\treturn url[idx+1:]\n\tdefault:\n\t\treturn url\n\t}\n}", "func (g *Generator) GoWrapPackage(depfile *fdep.DepFile) string {\n\tif g.PkgSource != nil {\n\t\tif p, ok := g.PkgSource.GetPkg(g, depfile); ok {\n\t\t\treturn p\n\t\t}\n\t}\n\n\tfor _, o := range depfile.ProtoFile.Options {\n\t\tif o.Name == \"gowrap_package\" {\n\t\t\treturn o.Value.String()\n\t\t}\n\t}\n\n\t// prepend \"fpwrap\"\n\tfor _, o := range depfile.ProtoFile.Options {\n\t\tif o.Name == \"go_package\" {\n\t\t\treturn path.Join(\"fpwrap\", o.Value.String())\n\t\t}\n\t}\n\treturn path.Join(\"fpwrap\", path.Dir(depfile.FilePath))\n}", "func getFileExtensionBySdk(precompiledObjectPath string) (string, error) {\n\tsdk := strings.Split(precompiledObjectPath, string(os.PathSeparator))[0]\n\tvar extension string\n\tswitch sdk {\n\tcase pb.Sdk_SDK_JAVA.String():\n\t\textension = javaExtension\n\tcase pb.Sdk_SDK_PYTHON.String():\n\t\textension = pyExtension\n\tcase pb.Sdk_SDK_GO.String():\n\t\textension = goExtension\n\tcase pb.Sdk_SDK_SCIO.String():\n\t\textension = scioExtension\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"\")\n\t}\n\treturn extension, nil\n}", "func (p *Provider) GetProject() string {\n\to := p.opts\n\tif len(o.projects) > 1 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"multiple projects not supported (%d specified)\", len(o.projects)))\n\t}\n\treturn o.projects[0]\n}", "func (*ListProjectsResponse_Project) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*ProjectMember) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{78}\n}", "func (*GetUserProjectsRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{27}\n}", "func (t *Type) fullGoPackageName() string {\n\tif t.qname.namespace != t.Namespace {\n\t\treturn t.qname.namespace.fullGoPackageName\n\t}\n\treturn \"\"\n}", "func (i Import) Package() string {\n\tif v := i.Alias; len(v) != 0 {\n\t\treturn v\n\t}\n\n\tif v := i.Path; len(v) != 0 {\n\t\tparts := strings.Split(v, \"/\")\n\t\tpkg := parts[len(parts)-1]\n\t\treturn pkg\n\t}\n\n\treturn \"\"\n}", "func (*CreateProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{18}\n}", "func (*CreateProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{17}\n}", "func (*Projects) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{78}\n}", "func (b *Buffer) FileType() string {\n\treturn b.Settings[\"filetype\"].(string)\n}", "func (*ProjectMember) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{81}\n}", "func (x *fastReflection_ModuleOptions) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch descriptor.FullName() {\n\tcase \"cosmos.autocli.v1.ModuleOptions.tx\":\n\t\tvalue := x.Tx\n\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\tcase \"cosmos.autocli.v1.ModuleOptions.query\":\n\t\tvalue := x.Query\n\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\tdefault:\n\t\tif descriptor.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.autocli.v1.ModuleOptions\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.autocli.v1.ModuleOptions does not contain field %s\", descriptor.FullName()))\n\t}\n}", "func (*DescribeProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{8}\n}", "func GetProto(src string) (string, error) {\n\tparsed, err := url.Parse(src)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(parsed.Scheme) > 0 {\n\t\treturn parsed.Scheme, nil\n\t}\n\n\treturn \"\", nil\n}", "func (*NewProject) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{13}\n}", "func goPath() string {\n\tgpDefault := build.Default.GOPATH\n\tgps := filepath.SplitList(gpDefault)\n\n\treturn gps[0]\n}", "func (p *plugin) analyzeFile(f *descriptor.FileDescriptorProto) error {\n\tif f.GetSyntax() != \"proto3\" {\n\t\treturn fmt.Errorf(\"unsupported syntax '%s', must be 'proto3'\", f.GetSyntax())\n\t}\n\n\tfile := goFile{structs: map[string]goStruct{}}\n\n\tfor _, m := range f.GetMessageType() {\n\t\tif err := p.analyzeMessageType(file, []string{}, m); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to analyze message type '%s': %s\", m.GetName(), err.Error())\n\t\t}\n\t}\n\n\tif len(file.structs) > 0 {\n\t\tn := filepath.Base(f.GetName())\n\t\tn = strings.TrimSuffix(n, filepath.Ext(n))\n\t\tp.targetFiles[n+\".pb.go\"] = file\n\t}\n\n\treturn nil\n}", "func (resolver *NpmResolver) ParsePkgFile(pkgFile string) (*Package, error) {\n\tcontent, err := ioutil.ReadFile(pkgFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar packageInfo Package\n\tif err := json.Unmarshal(content, &packageInfo); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &packageInfo, nil\n}", "func getPackageName(datatypeName string) string {\n\tparts := strings.Split(datatypeName, \".\")\n\tif len(parts) == 1 {\n\t\treturn \"\" // no package name\n\t}\n\n\toffset := 0\n\tfor i, p := range parts {\n\t\tif unicode.IsUpper(rune(p[0])) {\n\t\t\tbreak\n\t\t}\n\n\t\toffset += len(p)\n\t\tif i > 0 {\n\t\t\toffset += 1 // also account for the '.'\n\t\t}\n\t}\n\n\treturn datatypeName[:offset]\n}", "func (pp *protoPackage) absPath() string {\n\treturn path.Join(pp.Path, pp.pkgPath())\n}", "func (f *tmplFuncs) resolvePkgPath(pkg string) string {\n\t// Test this proto file itself:\n\tif stripExt(filepath.Base(*f.f.Name)) == pkg {\n\t\treturn *f.f.Name\n\t}\n\n\t// Test each dependency:\n\tfor _, p := range f.f.Dependency {\n\t\tif stripExt(filepath.Base(p)) == pkg {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn \"\"\n}", "func (*MyCompany) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{21}\n}", "func IsCommonProto(f *desc.FileDescriptor) bool {\n\tp := f.GetPackage()\n\tfor _, prefix := range []string{\"google.api\", \"google.protobuf\", \"google.rpc\", \"google.longrunning\"} {\n\t\tif strings.HasPrefix(p, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (i *Import) GetTFVCProject() string {\n\tif i == nil || i.TFVCProject == nil {\n\t\treturn \"\"\n\t}\n\treturn *i.TFVCProject\n}", "func (*Program) Descriptor() ([]byte, []int) {\n\treturn file_proto_common_proto_rawDescGZIP(), []int{1}\n}", "func (*DescribeProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{9}\n}", "func (*DcsProject) Descriptor() ([]byte, []int) {\n\treturn file_dcs_model_proto_rawDescGZIP(), []int{2}\n}", "func (o *ProformaArray) GetProject() string {\n\tif o == nil || o.Project == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Project\n}", "func (*CheckProjectTokenResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{33}\n}", "func (*ProjectColumn) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{0}\n}", "func getCallerPackage() string {\n\tconst replayModeRecordModeCaller = 3\n\tc := caller.Get(replayModeRecordModeCaller)\n\tpkg := strings.SplitN(c, \".\", 2)[0]\n\treturn path.Base(pkg)\n}", "func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }", "func (b *baseBuilder) GetFile() *FileBuilder {\n\tp := b.parent\n\tfor p != nil {\n\t\tif fb, ok := p.(*FileBuilder); ok {\n\t\t\treturn fb\n\t\t}\n\t\tp = p.GetParent()\n\t}\n\treturn nil\n}", "func GoServicePackagePath(name string) string {\n\treturn filepath.Join(PaceBase, ServiceBase, name)\n}", "func (*UpdateProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{10}\n}", "func GetPkgName() string {\n\t_, filePath, _, _ := runtime.Caller(0)\n\tfile, _ := os.Open(filePath)\n\tr := bufio.NewReader(file)\n\tline, _, _ := r.ReadLine()\n\tpkgName := bytes.TrimPrefix(line, []byte(\"package \"))\n\n\treturn string(pkgName)\n}", "func (p *Parser) Package() string {\n\treturn p.asTree.Name.Name\n}", "func ProtoServiceName(fullname string) Option {\n\treturn func(a *appOptions) {\n\t\ta.protoService = fullname\n\t}\n}", "func (*Projects) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{75}\n}", "func packageFilename(pwd, relativePath string) string {\n\tfullPath := filepath.Join(pwd, relativePath)\n\treturn strings.TrimPrefix(strings.TrimPrefix(fullPath, filepath.Join(gopath(), \"src\")), \"/\")\n}", "func (*CheckProjectTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{32}\n}", "func (*ProjectID) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{6}\n}", "func (*ProjectID) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{6}\n}", "func (o *ProformaArray) GetProjectOk() (*string, bool) {\n\tif o == nil || o.Project == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Project, true\n}", "func getPackageName(f string) string {\n\tfor {\n\t\tlastPeriod := strings.LastIndex(f, \".\")\n\t\tlastSlash := strings.LastIndex(f, \"/\")\n\t\tif lastPeriod > lastSlash {\n\t\t\tf = f[:lastPeriod]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn f\n}", "func Project(ctx context.Context, project string) (*configpb.ProjectConfig, error) {\n\tconfigs, err := Projects(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c, ok := configs[project]; ok {\n\t\treturn c, nil\n\t}\n\treturn nil, ErrNotFoundProjectConfig\n}", "func (*GetMyRequest) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{6}\n}", "func (*GlobalOptions) Descriptor() ([]byte, []int) {\n\treturn file_github_com_google_cloudprober_targets_gce_proto_config_proto_rawDescGZIP(), []int{3}\n}", "func (pi *PackageInfo) FileVName(file *ast.File) *spb.VName {\n\tif v := pi.fileVName[file]; v != nil {\n\t\treturn v\n\t}\n\tv := proto.Clone(pi.VName).(*spb.VName)\n\tv.Language = \"\"\n\tv.Signature = \"\"\n\tv.Path = pi.FileSet.Position(file.Pos()).Filename\n\treturn v\n}" ]
[ "0.6514599", "0.6418956", "0.62568474", "0.6100058", "0.6059389", "0.6033528", "0.5760218", "0.571393", "0.56766814", "0.56747204", "0.55886865", "0.55810106", "0.55710924", "0.5528127", "0.5502348", "0.55021805", "0.5456921", "0.5454185", "0.54367715", "0.54307157", "0.54025465", "0.53424656", "0.5329483", "0.5327733", "0.53020066", "0.52819157", "0.52795607", "0.52257407", "0.52240294", "0.51456493", "0.5137637", "0.5134006", "0.5133966", "0.51120484", "0.5107064", "0.50847185", "0.5082025", "0.507457", "0.50681937", "0.5033462", "0.50315183", "0.50245786", "0.5019538", "0.5017327", "0.50053775", "0.50039554", "0.49943617", "0.49909803", "0.49890676", "0.49804884", "0.49707755", "0.4931849", "0.49237567", "0.4921064", "0.4905122", "0.49045667", "0.49015555", "0.49012348", "0.4892581", "0.4889273", "0.48874435", "0.48817107", "0.48771524", "0.4876309", "0.48685285", "0.48641378", "0.4861054", "0.48610055", "0.48546317", "0.4849436", "0.48461854", "0.48250479", "0.4822479", "0.48205945", "0.48192737", "0.48135585", "0.48060772", "0.4804189", "0.48035768", "0.48003185", "0.47990838", "0.4787936", "0.47851935", "0.47817233", "0.4781146", "0.47775137", "0.47767115", "0.4776245", "0.47737917", "0.47712553", "0.476954", "0.47676048", "0.47668436", "0.47668436", "0.47661626", "0.47600132", "0.47533566", "0.47468054", "0.47438252", "0.4743572" ]
0.7273096
0
NewQueueManager instantiates a new QueueManager object This constructor will assign default values to properties that have it defined, and makes sure properties required by API are set, but the set of arguments will change when the set of required properties is changed
func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager { this := QueueManager{} this.Name = name this.Clusters = clusters this.AliasQueues = aliasQueues this.RemoteQueues = remoteQueues this.ClusterQueues = clusterQueues return &this }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name())\n\t}\n\tt := &QueueManager{\n\t\tlogger: logger,\n\t\tflushDeadline: flushDeadline,\n\t\tcfg: cfg,\n\t\texternalLabels: externalLabels,\n\t\trelabelConfigs: relabelConfigs,\n\t\tclient: client,\n\t\tqueueName: client.Name(),\n\n\t\tlogLimiter: rate.NewLimiter(logRateLimit, logBurst),\n\t\tnumShards: cfg.MinShards,\n\t\treshardChan: make(chan int),\n\t\tquit: make(chan struct{}),\n\n\t\tsamplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t}\n\tt.shards = t.newShards(t.numShards)\n\tnumShards.WithLabelValues(t.queueName).Set(float64(t.numShards))\n\tshardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))\n\n\t// Initialize counter labels to zero.\n\tsentBatchDuration.WithLabelValues(t.queueName)\n\tsucceededSamplesTotal.WithLabelValues(t.queueName)\n\tfailedSamplesTotal.WithLabelValues(t.queueName)\n\tdroppedSamplesTotal.WithLabelValues(t.queueName)\n\n\treturn t\n}", "func NewQueueManager(q amboy.Queue) Manager {\n\treturn &queueManager{\n\t\tqueue: q,\n\t}\n}", "func NewQueue() *Queue {\n return &Queue{member: make([]interface{}, 0)}\n}", "func NewQueueManagerWithDefaults() *QueueManager {\n\tthis := QueueManager{}\n\treturn &this\n}", "func NewQueue() Queue {\n\treturn Queue{}\n}", "func NewQueue(name string, itemType reflect.Type, maxQueueSize uint32) Queue {\n\tq := queue{\n\t\tname: name,\n\t\titemType: itemType,\n\t\tchannel: make(chan interface{}, maxQueueSize),\n\t}\n\treturn &q\n}", "func NewManager(h Handler,\n\tusername string,\n\tpassword string,\n\tbrokerIp string,\n\tbrokerPort int,\n\texchange string,\n\tqueueName string,\n\tworkers int,\n\tallocate bool,\n\tmanagerName string,\n\thandleFunction handlerFunction,\n\tlogLevel string,\n\tnet catalogue.BaseNetworkInt,\n\timg catalogue.BaseImageInt) (*Manager, error) {\n\n\tmanager := &Manager{\n\t\tConnection: nil,\n\t\tChannel: nil,\n\t\tallocate: allocate,\n\t\tworkers: workers,\n\t\terrorChan: make(chan error),\n\t\tlogger: GetLogger(managerName, logLevel),\n\t\thandlerFunction: handleFunction,\n\t\thandler: h,\n\t\timage: img,\n\t\tnetwork: net,\n\t}\n\n\terr := setupManager(username, password, brokerIp, brokerPort, manager, exchange, queueName)\n\tif err != nil {\n\t\tmanager.logger.Errorf(\"Error while setup the amqp thing: %v\", err)\n\t\treturn nil, err\n\t}\n\tmanager.queueName = queueName\n\treturn manager, nil\n}", "func NewQueue(name string) *Queue {\n\tredisClient := GetRedisClientFromConfig()\n\tqueue := &Queue{Name: name, RedisClient: redisClient}\n\treturn queue\n}", "func NewQueue(args []func(http.ResponseWriter, *http.Request) (http.ResponseWriter, *http.Request)) *Queue {\n\tq := &Queue{}\n\tfor _, f := range args {\n\t\tq.list = append(q.list, f)\n\t}\n\treturn q\n}", "func newQueueMeta(conf *Conf) *queueMeta {\n\treturn &queueMeta{conf: conf}\n}", "func NewQueue(ctx context.Context, queueID string, db *sql.DB, conf QueueConfig) (*Queue, error) {\n\tq := &Queue{ID: queueID}\n\tq.repo = repo.NewRepository(db)\n\tq.PollRate = 100 * time.Millisecond // Default\n\tq.queueSize = 10000 // Default\n\tq.retries = 3 // Default\n\tq.IsMultiQueue = conf.IsMultiQueue\n\tq.baseDelay = 3 * time.Second // Default\n\n\tif conf.PollingRate > 0 {\n\t\tq.PollRate = conf.PollingRate\n\t}\n\tif conf.Qsize > 0 {\n\t\tq.queueSize = conf.Qsize\n\t}\n\tif conf.BaseDelay > 0 {\n\t\tq.baseDelay = conf.BaseDelay\n\t}\n\tif conf.Retries >= 0 {\n\t\tq.retries = conf.Retries\n\t}\n\t// Multilevel Queue/channel created\n\ttemp := mlQueue{}\n\ttemp.notifier = make([]chan JobChan, 1)\n\ttemp.notifier[0] = make(chan JobChan, q.queueSize)\n\ttemp.total = 1\n\tq.mq = temp\n\n\tm := make(map[string][]worker.Worker)\n\tq.workers = m\n\tvar wg sync.WaitGroup\n\tq.wg = &wg\n\n\t// resume stopped jobs\n\terr := q.ResumePendingJobs(ctx)\n\tif err != nil {\n\t\tlogger.Log.Error(\"Unable to resume jobs from bucket: %s\", zap.Error(err))\n\t\t// Don't fail out, this isn't really fatal. But maybe it should be?\n\t}\n\treturn q, nil\n}", "func MyQueueConstructor() MyQueue {\n\treturn MyQueue{}\n}", "func NewQueue(maximumCapacity int, initialCapacity int, factory TokenFactory) *Queue {\n\tq := &Queue{\n\t\tmaxCapacity: maximumCapacity,\n\t\tavailableTokens: make(chan (Token), maximumCapacity),\n\t\tcommittedTokens: make(chan (Token), maximumCapacity),\n\t\tdiscardTokens: make(chan (Token), maximumCapacity),\n\t\tcloseTokens: make(chan (Token)),\n\t}\n\n\tfor i := 0; i < maximumCapacity; i++ {\n\t\ttoken := factory()\n\t\tif token == nil {\n\t\t\treturn nil\n\t\t}\n\t\tq.discardTokens <- token\n\t\tq.validTokens = append(q.validTokens, token)\n\t}\n\n\tq.EnableDisableTokens(initialCapacity)\n\n\treturn q\n}", "func NewQueue(l int) *Queue {\n\tif l == -1 {\n\t\treturn &Queue{\n\t\t\tQueue: make([]types.Event, 0),\n\t\t\tL: int(^uint(0) >> 1), // max integer value, architecture independent\n\t\t}\n\t}\n\tq := &Queue{\n\t\tQueue: make([]types.Event, 0, l),\n\t\tL: l,\n\t}\n\tlog.WithFields(log.Fields{\"Capacity\": q.L}).Debugf(\"Creating queue\")\n\treturn q\n}", "func New() Manager {\n\treturn Manager{\n\t\tState: make(map[string]string),\n\t\tClientHolder: make(map[string]utils.Set),\n\t\tClientQueue: make(map[string]utils.Queue),\n\t}\n}", "func setupManager(username string, password string, brokerIp string, brokerPort int, manager *Manager, exchange string, queueName string) error {\n\tamqpURI := getAmqpUri(username, password, brokerIp, brokerPort)\n\tmanager.logger.Debugf(\"dialing %s\", amqpURI)\n\tvar err error\n\tmanager.Connection, err = amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Connection, getting Channel\")\n\tmanager.Channel, err = manager.Connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Channel, declaring Exchange (%q)\", exchange)\n\n\tmanager.logger.Debugf(\"declared Exchange, declaring Queue %q\", queueName)\n\tqueue, err := manager.Channel.QueueDeclare(\n\t\tqueueName,\n\t\ttrue,\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"declared Queue (%q, %d messages, %d consumers), binding to Exchange\",\n\t\tqueue.Name, queue.Messages, queue.Consumers)\n\n\tif err = manager.Channel.QueueBind(\n\t\tqueue.Name, // name of the queue\n\t\tqueue.Name, // bindingKey\n\t\texchange, // sourceExchange\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debug(\"Queue bound to Exchange, starting Consume\")\n\treturn nil\n}", "func New(name string, c config.Config) *Queue {\n\treturn &Queue{\n\t\tname: name,\n\t\tconf: c,\n\t}\n}", "func NewQueue(action func(interface{}) error) *QueueWorker {\n\treturn &QueueWorker{\n\t\taction: action,\n\t\tlatch: &Latch{},\n\t\tmaxWork: DefaultQueueWorkerMaxWork,\n\t}\n}", "func New(cfg Config, pubSub pubSub, metrics metricsProvider) (*Queue, error) {\n\tmsgChan, err := pubSub.SubscribeWithOpts(context.Background(), topic, spi.WithPool(cfg.PoolSize))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"subscribe to topic [%s]: %w\", topic, err)\n\t}\n\n\tq := &Queue{\n\t\tpubSub: pubSub,\n\t\tmsgChan: msgChan,\n\t\tjsonMarshal: json.Marshal,\n\t\tjsonUnmarshal: json.Unmarshal,\n\t\tmetrics: metrics,\n\t}\n\n\tq.Lifecycle = lifecycle.New(\"operation-queue\",\n\t\tlifecycle.WithStart(q.start),\n\t\tlifecycle.WithStop(q.stop),\n\t)\n\n\tq.Start()\n\n\treturn q, nil\n}", "func NewQueue(maxWorkers int, maxQueue int) *Queue {\n\tq := make(chan Job, maxQueue)\n\treturn &Queue{\n\t\tq,\n\t\ttrue,\n\t\t&Dispatcher{\n\t\t\tjobQueue: q,\n\t\t\tworkerPool: make(chan chan Job, maxWorkers),\n\t\t\tMaxWorkers: maxWorkers,\n\t\t},\n\t}\n}", "func (queue *Queue) Init() (err error) {\n\tclient := queue.GetClient()\n\n\tparams := &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(queue.Name + deadLetterQueueSuffix),\n\t\tAttributes: map[string]*string{\n\t\t\t\"MessageRetentionPeriod\": aws.String(\"1209600\"),\n\t\t},\n\t}\n\tresp, err := client.CreateQueue(params)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"queueName\": queue.Name,\n\t\t\t\"error\": err,\n\t\t}).Error(\"Createing the dead letter queue\")\n\t\treturn\n\t}\n\n\tqueue.DeadLetterQueueURL = *resp.QueueUrl\n\tlog.WithFields(log.Fields{\n\t\t\"QueueUrl\": queue.DeadLetterQueueURL,\n\t}).Info(\"Dead Letter Queue initialized\")\n\n\tqueueArnAttributeName := \"QueueArn\"\n\tdeadLetterQueueAttributes, err := queue.GetAttributesByQueueURL(queue.DeadLetterQueueURL, []*string{&queueArnAttributeName})\n\tif err != nil {\n\t\treturn\n\t}\n\tredrivePolicy := &RedrivePolicy{\n\t\tMaxReceiveCount: MaxReceiveCountBeforeDead,\n\t\tDeadLetterTargetArn: *deadLetterQueueAttributes.Attributes[queueArnAttributeName],\n\t}\n\tredrivePolicyString, err := redrivePolicy.GetAsAWSString()\n\tif err != nil {\n\t\treturn\n\t}\n\tparams = &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(queue.Name),\n\t\tAttributes: map[string]*string{\n\t\t\t\"RedrivePolicy\": redrivePolicyString,\n\t\t\t\"MessageRetentionPeriod\": aws.String(\"1209600\"),\n\t\t},\n\t}\n\tresp, err = client.CreateQueue(params)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"queueName\": queue.Name,\n\t\t\t\"error\": err,\n\t\t}).Error(\"Createing the queue\")\n\t\treturn\n\t}\n\n\tqueue.URL = *resp.QueueUrl\n\tlog.WithFields(log.Fields{\n\t\t\"QueueUrl\": queue.URL,\n\t}).Info(\"Queue initialized\")\n\n\treturn\n}", "func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {\n\tif maxFlowSize > maxQueueSize {\n\t\tpanic(\"MaxFlowSize > MaxQueueSize\")\n\t}\n\n\tif helper == nil {\n\t\tpanic(\"helper is nil\")\n\t}\n\n\tq := new(Queue)\n\tq.cond.L = &q.lock\n\tq.maxQueueSize = maxQueueSize\n\tq.maxFlowSize = maxFlowSize\n\tq.helper = helper\n\tq.flows = make(map[uint64]*flowInfo)\n\n\treturn q\n}", "func (t *OpenconfigQos_Qos_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}", "func NewQueue(action WorkAction, options ...QueueOption) *Queue {\n\tq := Queue{\n\t\tLatch: NewLatch(),\n\t\tAction: action,\n\t\tContext: context.Background(),\n\t\tMaxWork: DefaultQueueMaxWork,\n\t\tParallelism: runtime.NumCPU(),\n\t}\n\tfor _, option := range options {\n\t\toption(&q)\n\t}\n\treturn &q\n}", "func New() *Queue {\r\n\treturn &Queue{nil,nil,0}\r\n}", "func NewQueue(ctx *pulumi.Context,\n\tname string, args *QueueArgs, opts ...pulumi.ResourceOption) (*Queue, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.HoursOfOperationArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'HoursOfOperationArn'\")\n\t}\n\tif args.InstanceArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'InstanceArn'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Queue\n\terr := ctx.RegisterResource(\"aws-native:connect:Queue\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func newQueueService(c *orgbot.Config) (*queueService, error) {\n\tsess, err := cmd.NewAWSSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &queueService{\n\t\tconfig: c,\n\t\tsqsClient: sqs.New(sess),\n\t}, nil\n\n}", "func NewQueue(storage Storage, reQueueTimeout time.Duration) Queue {\n\tif reQueueTimeout < 1 {\n\t\treQueueTimeout = time.Minute * 30\n\t}\n\n\tname := \"gocelery\"\n\tq := &queue{\n\t\tstorage: storage,\n\t\thead: 0,\n\t\ttail: 0,\n\t\trequeueTimeout: reQueueTimeout,\n\t\tqueuePrefix: fmt.Sprintf(\"%s-queue-\", name),\n\t\tqueueAckPrefix: fmt.Sprintf(\"%s-ack-\", name),\n\t}\n\n\t// restore the old state from the DB\n\tq.loadHeadTail()\n\treturn q\n}", "func New(mqURL string) (models.MessageQueue, error) {\n\tmq, err := newmq(mqURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &metricMQ{mq}, nil\n}", "func NewQueue(id string, persistent bool, conn net.Conn) (Queue, error) {\n\tservice := broker.GetService(ServiceName).(*QueueService)\n\treturn service.newQueue(id, persistent, conn)\n}", "func NewQueue(newNode *node.Node) *Queue {\n\tq := Queue{size: 0, final: newNode}\n\treturn &q\n}", "func New(cb Done, transport http.RoundTripper) *Manager {\n\treturn &Manager{\n\t\tkeys: sets.NewString(),\n\t\tcb: cb,\n\t\ttransport: transport,\n\t}\n}", "func NewQueue() *Queue {\n\treturn &Queue{}\n}", "func NewQueue() *Queue {\n\treturn &Queue{}\n}", "func NewQueue() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func New() Queue {\n\treturn Queue{list: linkedlist.New()}\n}", "func New(name string) (*Queue, error) {\n\tqueue := Queue{Name: name}\n\terr := queue.Init()\n\n\treturn &queue, err\n}", "func NewQueue(\n\tservers []string,\n\topts QueueOptions,\n) (Queue, error) {\n\tq, err := newQueue(servers, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.initConnections(servers)\n\tgo q.reportMetrics()\n\n\treturn q, nil\n}", "func New(mqURL string) (models.MessageQueue, error) {\n\t// Play with URL schemes here: https://play.golang.org/p/xWAf9SpCBW\n\tu, err := url.Parse(mqURL)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\"url\": mqURL}).Fatal(\"bad MQ URL\")\n\t}\n\tlogrus.WithFields(logrus.Fields{\"mq\": u.Scheme}).Debug(\"selecting MQ\")\n\tswitch u.Scheme {\n\tcase \"memory\":\n\t\treturn NewMemoryMQ(), nil\n\tcase \"redis\":\n\t\treturn NewRedisMQ(u)\n\tcase \"bolt\":\n\t\treturn NewBoltMQ(u)\n\t}\n\tif strings.HasPrefix(u.Scheme, \"ironmq\") {\n\t\treturn NewIronMQ(u), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"mq type not supported %v\", u.Scheme)\n}", "func newQueue() *Queue {\n\tl := list.New()\n\treturn &Queue{Elements: l}\n}", "func New() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func New() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func New(hint int) *Queue {\n\treturn &Queue{\n\t\titems: make([]interface{}, 0, hint),\n\t}\n}", "func Constructor() MyQueue {\n\treturn Myqueue{list: listNew()}\n}", "func New(delegate Delegate, settings Settings) (*Queue, error) {\n\tconst op = \"pq/new\"\n\n\tif delegate == nil {\n\t\treturn nil, errOp(op).of(InvalidParam).report(\"delegate must not be nil\")\n\t}\n\n\taccessor, errKind := makeAccess(delegate)\n\tif errKind != NoError {\n\t\treturn nil, errOp(op).of(errKind)\n\t}\n\n\tpageSize := delegate.PageSize()\n\n\tq := &Queue{\n\t\taccessor: accessor,\n\t\tsettings: settings,\n\t\tpagePool: newPagePool(pageSize),\n\t}\n\n\t// use pointer address as ID for correlating error messages\n\tq.id = queueID(uintptr(unsafe.Pointer(q)))\n\taccessor.quID = q.id\n\n\trootBuf, err := q.accessor.ReadRoot()\n\tif err != nil {\n\t\treturn nil, wrapErr(op, err).of(InitFailed).\n\t\t\treport(\"failed to read queue header\")\n\t}\n\n\troot := castQueueRootPage(rootBuf[:])\n\tif root.version.Get() != queueVersion {\n\t\tcause := &Error{\n\t\t\tkind: InitFailed,\n\t\t\tmsg: fmt.Sprintf(\"queue version %v\", root.version.Get()),\n\t\t}\n\t\treturn nil, wrapErr(op, cause).of(InitFailed)\n\t}\n\n\ttracef(\"open queue: %p (pageSize: %v)\\n\", q, pageSize)\n\ttraceQueueHeader(root)\n\n\tq.version = root.version.Get()\n\tq.hdrOffset = q.accessor.RootFileOffset()\n\tq.onInit()\n\treturn q, nil\n}", "func New() *Queue {\n\tq := new(Queue)\n\tq.length = 0\n\tq.s1 = stack.New()\n\tq.s2 = stack.New()\n\n\treturn q\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}", "func NewQueue(queueCapacity int) (*Queue, error) {\n\tif queueCapacity < 0 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"negative capacity value: %d\", queueCapacity)\n\t}\n\n\tdata := make([]interface{}, 0, queueCapacity)\n\n\treturn &Queue{\n\t\tdata: data,\n\t}, nil\n}", "func NewQueue() *Queue {\n\treturn new(Queue).InitQueueNode()\n}", "func New(ctx context.Context, cfg models.Config) (*Queue, error) {\n\tconn, err := connect(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to connect to RabbitMQ \")\n\t}\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to open a channel \")\n\t}\n\n\t_, err = ch.QueueDeclare(\"ItemQueue\", false, false, false, false, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to declare a queue \")\n\t}\n\n\treturn &Queue{ch, conn}, nil\n}", "func NewQueue() *Queue {\n\tqueue := &Queue{\n\t\tcontents: make([]interface{}, 1),\n\t\tstart: -1,\n\t\tend: -1,\n\t\tlength: 0,\n\t}\n\n\treturn queue\n}", "func NewAPIRequestManager() *APIRequestManager {\n\treturn &APIRequestManager{\n\t\tqueue: make(chan *WorkerItem, 10),\n\t}\n}", "func NewQueue(queueID uint16, handler PacketHandler, cfg *QueueConfig) *Queue {\n\tif cfg == nil {\n\t\tcfg = &QueueConfig{}\n\t}\n\tq := &Queue{\n\t\tID: queueID,\n\t\thandler: handler,\n\t\tcfg: cfg,\n\t}\n\tqueueRegistry.Register(queueID, q)\n\treturn q\n}", "func newJobQueue(ctx context.Context, consumerCount, length int) *jobQueue {\n\teg, selfCtx := errgroup.WithContext(ctx)\n\treturn &jobQueue{\n\t\tctx: selfCtx,\n\t\tmsgq: make(chan *restoreSchemaJob, length),\n\t\tconsumerCount: consumerCount,\n\t\teg: eg,\n\t}\n}", "func NewManager(ps common.PubsubInterface, primaryCapacity int, overflowCapacity int) *Manager {\n\tshelves := map[string]*primaryShelf{\n\t\t\"hot\": NewPrimaryShelf(primaryCapacity),\n\t\t\"cold\": NewPrimaryShelf(primaryCapacity),\n\t\t\"frozen\": NewPrimaryShelf(primaryCapacity),\n\t}\n\toverflow := NewOverflowShelf(overflowCapacity, []string{\"hot\", \"cold\", \"frozen\"})\n\treturn &Manager{shelves, overflow, ps}\n}", "func Constructor() MyQueue {\n\treturn MyQueue{}\n}", "func Constructor() MyQueue {\n\treturn MyQueue{}\n}", "func New() *queue {\n\treturn &queue{\n\t\titems: make([]item, DefaultCapacity),\n\t\tcapacity: DefaultCapacity,\n\t}\n}", "func New(cfg config.Queue, n notifier) *Queue {\n\tq := &Queue{\n\t\taddCh: make(chan struct{}, cfg.QueueSize),\n\t\tpopCh: make(chan struct{}, cfg.GoRoutinesSize),\n\t\taddMessage: make(chan entity.NotifierMessage, 1),\n\t\tpopMessage: make(chan entity.NotifierMessage, 1),\n\t\tnotifier: n,\n\t}\n\n\tgo q.pop()\n\tgo q.add()\n\n\treturn q\n}", "func New(queueName string) bus.MessageQueue {\n\tmq := new(messageQueue)\n\tmq.queue = queueFactory(queueName)\n\n\treturn mq\n}", "func NewQueue(qname string, taskId string) (*Queue, error) {\n q := &Queue{}\n q.StdoutChan = make(chan []byte, QUEUE_SIZE)\n q.StderrChan = make(chan []byte, QUEUE_SIZE)\n q.exitChan = make(chan string)\n q.finishChan = make(chan bool)\n\n s, err := aws.NewSqs(qname, taskId)\n q.awsSqs = s\n\n return q, err\n}", "func New(opt *Options) *Queue {\n\tif client == nil {\n\t\tredisOpt := &redis.Options{\n\t\t\tAddr: opt.Connection.Addr,\n\t\t\tPassword: opt.Connection.Password,\n\t\t\tDB: opt.Connection.DB,\n\t\t\tMaxRetries: opt.Connection.MaxRetries,\n\t\t\tDialTimeout: opt.Connection.DialTimeout,\n\t\t\tReadTimeout: opt.Connection.ReadTimeout,\n\t\t\tWriteTimeout: opt.Connection.WriteTimeout,\n\t\t\tPoolSize: opt.Connection.PoolSize,\n\t\t\tPoolTimeout: opt.Connection.PoolTimeout,\n\t\t\tIdleTimeout: opt.Connection.IdleTimeout,\n\t\t}\n\t\tclient = redis.NewClient(redisOpt)\n\t}\n\n\treturn &Queue{\n\t\tjobChannel: make(chan string, 1000),\n\t\tconcurrency: opt.Concurrency,\n\t\tqueueName: opt.QueueName,\n\t\tprocessor: opt.Processor,\n\t\terrorHandler: opt.ErrorHandler,\n\t}\n}", "func New() *Queue {\n\titems := []*item.Item{}\n\tlock := &sync.Mutex{}\n\treturn &Queue{items, lock}\n}", "func NewQueue(ch *amqp.Channel, n string) amqp.Queue {\n\tq, err := ch.QueueDeclare(\n\t\tn,\n\t\tfalse,\n\t\tfalse,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tfailOnError(err, \"Failed to declare a queue\")\n\treturn q\n}", "func NewQueueListener(ea *EventAggregator, url string) *QueueListener {\n\n\tql := QueueListener{\n\t\tsources: make(map[string]*dto.Sensor),\n\t\tea: ea,\n\t}\n\n\tql.conn, ql.ch = qutils.GetChannel(url)\n\n\treturn &ql\n}", "func NewManager(ctx context.Context, config *Config) *Manager {\n\tmanager := &Manager{\n\t\tctx: ctx,\n\t\tconfig: config,\n\t\tEntries: map[string]*Entry{},\n\t}\n\tfor _, config := range config.APICalls {\n\t\tapiCall := NewAPICall(config.URI, config.Key)\n\t\tjob := NewFnJob(config.Name, config.Schedule, apiCall.Call)\n\t\tmanager.AddJob(job)\n\t}\n\treturn manager\n}", "func NewQueue() Queue {\r\n\tvar empty []int\r\n\treturn Queue{empty, len(empty)}\r\n}", "func NewQueue() *queue {\n q := new(queue)\n q.head = new(task)\n q.tail = q.head\n\treturn q\n}", "func NewQueue(client *elastic.Client, options ...Option) *Queue {\n\tqueue := &Queue{\n\t\terrorHandler: defaultErrorHandler,\n\t\tcloser: make(chan struct{}),\n\t\ttimeout: time.Second * 5,\n\t}\n\n\tfor _, option := range options {\n\t\toption(queue)\n\t}\n\n\tif queue.requester == nil {\n\t\tqueue.requester = &ClientRequester{Client: client, Timeout: queue.timeout}\n\t}\n\n\tif len(queue.conditions) == 0 {\n\t\tpanic(\"mixer/elasticqueue: write conditions were passed, the client will buffer \" +\n\t\t\t\"infinitely! Use WithCondition() to pass one or more options to NewQueue()\")\n\t}\n\n\tgo queue.listenToConditions()\n\n\treturn queue\n}", "func NewQueue() *Queue {\n\treturn &Queue{\n\t\tdata: []*QueueNode{},\n\t}\n}", "func NewQueue() *Queue {\n\tq := new(Queue)\n\tq.ready = make(ReadyHeap, 0)\n\theap.Init(&q.ready)\n\tq.delay = make(DelayHeap, 0)\n\theap.Init(&q.delay)\n\tq.reserve = NewReserve(new(Item), false)\n\tq.list = make(map[string]bool)\n\tgo q.watchReady()\n\tgo q.watchDelay()\n\treturn q\n}", "func NewQueue() *SubmitQueue {\n\treturn &SubmitQueue{\n\t\tsorted: false,\n\t\titems: nil,\n\t}\n}", "func NewBasicMockMessageQueue() lanternmq.MessageQueue {\n\tmq := BasicMockMessageQueue{}\n\tmq.Queue = make(chan []byte, 20)\n\n\tmq.ConnectFn = func(username string, password string, host string, port string) error {\n\t\treturn nil\n\t}\n\n\tmq.CreateChannelFn = func() (lanternmq.ChannelID, error) {\n\t\treturn 1, nil\n\t}\n\n\tmq.NumConcurrentMsgsFn = func(chID lanternmq.ChannelID, num int) error {\n\t\treturn nil\n\t}\n\n\tmq.QueueExistsFn = func(chId lanternmq.ChannelID, qName string) (bool, error) {\n\t\treturn true, nil\n\t}\n\n\tmq.DeclareQueueFn = func(chID lanternmq.ChannelID, name string) error {\n\t\treturn nil\n\t}\n\n\tmq.PublishToQueueFn = func(chID lanternmq.ChannelID, qName string, message string) error {\n\t\tif len(mq.Queue) < 20 {\n\t\t\tmq.Queue <- []byte(message)\n\t\t} else {\n\t\t\treturn errors.New(\"queue full - unable to add new message\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tmq.ConsumeFromQueueFn = func(chID lanternmq.ChannelID, qName string) (lanternmq.Messages, error) {\n\t\treturn nil, nil\n\t}\n\n\tmq.ProcessMessagesFn = func(ctx context.Context, msgs lanternmq.Messages, handler lanternmq.MessageHandler, args *map[string]interface{}, errs chan<- error) {\n\t\tfor msg := range mq.Queue {\n\t\t\terr := handler(msg, args)\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t}\n\t\t}\n\t}\n\n\tmq.CloseFn = func() {}\n\treturn &mq\n}", "func New() *JobManager {\n\tjm := JobManager{\n\t\theartbeatInterval: DefaultHeartbeatInterval,\n\t\tjobs: map[string]*JobMeta{},\n\t\ttasks: map[string]*TaskMeta{},\n\t}\n\tjm.schedulerWorker = async.NewInterval(jm.runDueJobs, DefaultHeartbeatInterval)\n\tjm.killHangingTasksWorker = async.NewInterval(jm.killHangingTasks, DefaultHeartbeatInterval)\n\treturn &jm\n}", "func New() *Queue {\r\n\treturn &Queue{\r\n\t\tdata: []int{},\r\n\t}\r\n}", "func NewQueue() Queue{\n\treturn Queue{values: make([]*unsafe.Pointer,50), end: 0,}\n}", "func NewManager(endPoint, slug, shConnStr, fetcherBackendVersion, enricherBackendVersion string, fetch bool, enrich bool, eSUrl string, esUser string, esPassword string, esIndex string, fromDate *time.Time, project string, fetchSize int, enrichSize int, affBaseURL, esCacheURL, esCacheUsername, esCachePassword, authGrantType, authClientID, authClientSecret, authAudience, auth0URL, env, webHookURL string) (*Manager, error) {\n\tmng := &Manager{\n\t\tEndpoint: endPoint,\n\t\tSlug: slug,\n\t\tSHConnString: shConnStr,\n\t\tFetcherBackendVersion: fetcherBackendVersion,\n\t\tEnricherBackendVersion: enricherBackendVersion,\n\t\tFetch: fetch,\n\t\tEnrich: enrich,\n\t\tESUrl: eSUrl,\n\t\tESUsername: esUser,\n\t\tESPassword: esPassword,\n\t\tESIndex: esIndex,\n\t\tFromDate: fromDate,\n\t\tHTTPTimeout: 60 * time.Second,\n\t\tProject: project,\n\t\tFetchSize: fetchSize,\n\t\tEnrichSize: enrichSize,\n\t\tAffBaseURL: affBaseURL,\n\t\tESCacheURL: esCacheURL,\n\t\tESCacheUsername: esCacheUsername,\n\t\tESCachePassword: esCachePassword,\n\t\tAuthGrantType: authGrantType,\n\t\tAuthClientID: authClientID,\n\t\tAuthClientSecret: authClientSecret,\n\t\tAuthAudience: authAudience,\n\t\tAuth0URL: auth0URL,\n\t\tEnvironment: env,\n\t\tesClientProvider: nil,\n\t\tfetcher: nil,\n\t\tenricher: nil,\n\t\tWebHookURL: webHookURL,\n\t\tMaxWorkers: 1000,\n\t}\n\n\tfetcher, enricher, esClientProvider, err := buildServices(mng)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroupName, err := getGroupName(endPoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmng.fetcher = fetcher\n\tmng.enricher = enricher\n\tmng.esClientProvider = esClientProvider\n\tmng.GroupName = groupName\n\tmng.workerPool = &workerPool{\n\t\tMaxWorker: MaxConcurrentRequests,\n\t\tqueuedTaskC: make(chan func()),\n\t}\n\n\treturn mng, nil\n}", "func NewLLQueue(data interface{}) *LLQueue {\n\tthis := &LLQueue{\n\t\tHead: linkedlist.NewNode(data),\n\t}\n\tthis.Tail = this.Head\n\treturn this\n}", "func NewQueue() *Queue {\n\treturn &Queue{\n\t\titems: []Lit{},\n\t}\n}", "func LocalQueueFactory(size int) Option {\n\treturn func(env Environment) {\n\t\tif size < 5 {\n\t\t\tsize = 5\n\t\t}\n\t\tQueueFactory(makeLocalEventQueueFactory(size))\n\t}\n}", "func (q *QueueMethod) Setup() error {\n\tif err := q.declareExchange(q.Provider.AMQP.DeadLetterExchange, \"direct\", nil); err != nil {\n\t\treturn fmt.Errorf(\"declare exchange failed: name: %s, type: %s, message: %v\", q.Provider.AMQP.DeadLetterExchange, \"direct\", err)\n\t}\n\tif err := q.declareExchange(q.Provider.AMQP.ShardingQueueExchange, \"x-modulus-hash\", nil); err != nil {\n\t\treturn fmt.Errorf(\"declare exchange failed: name: %s, type: %s, message: %v\", q.Provider.AMQP.ShardingQueueExchange, \"x-modulus-hash\", err)\n\t}\n\tif err := q.declareExchange(q.Provider.AMQP.DelayMessageExchange, \"x-delayed-message\", amqp.Table{\"x-delayed-type\": \"x-modulus-hash\"}); err != nil {\n\t\treturn fmt.Errorf(\"declare exchange failed: name: %s, type: %s, message: %v\", q.Provider.AMQP.DelayMessageExchange, \"x-delayed-message\", err)\n\t}\n\tif err := q.bindExchange(q.Provider.AMQP.DelayMessageExchange, q.Provider.AMQP.ShardingQueueExchange, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"bind exchange failed: name: %s to %s, message: %v\", q.Provider.AMQP.DelayMessageExchange, q.Provider.AMQP.ShardingQueueExchange, err)\n\t}\n\n\tif err := q.createPolicy(RabbitMQHaMode, \"\", \"queues\", Definition{\n\t\tHaMode: \"exactly\",\n\t\tHaParam: 2,\n\t\tHaSyncMode: \"automatic\",\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"create policy failed: mode: %s, message: %v\", RabbitMQHaMode, err)\n\t}\n\n\tif err := q.createPolicy(RabbitMQShardMode, \"^sns.sharding$\", \"exchanges\", Definition{\n\t\tShardsPerNode: 2,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"create policy failed: mode: %s, message: %v\", RabbitMQShardMode, err)\n\t}\n\n\treturn nil\n}", "func New(maxSize int, dropBehavior DropBehavior) *Queue {\n\treturn &Queue{\n\t\tmaxSize: maxSize,\n\t\tdropBehavior: dropBehavior,\n\t}\n}", "func NewMockQueueManager(ctrl *gomock.Controller) *MockQueueManager {\n\tmock := &MockQueueManager{ctrl: ctrl}\n\tmock.recorder = &MockQueueManagerMockRecorder{mock}\n\treturn mock\n}", "func NewQueue() *Queue {\n\tstorageListener := storage.NewListener()\n\tw := window.New(storageListener, 1, time.Microsecond)\n\tgo w.Exec()\n\treturn &Queue{\n\t\tdata: map[int][]byte{},\n\t\tfront: -1,\n\t\tback: 0,\n\t\tstorageW: window.New(storageListener, 64, 500*time.Nanosecond),\n\t}\n}", "func New(dir string) (*Queue, error) {\n\tentries, err := readKeys(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar current int\n\tif len(entries) > 0 {\n\t\tcurrent = entries[len(entries)-1]\n\t}\n\treturn &Queue{\n\t\tdir: dir,\n\t\tentries: entries,\n\t\tcurrent: current,\n\t}, nil\n}", "func (r *yandexMessageQueueReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&connectorsv1.YandexMessageQueue{}).\n\t\tComplete(r)\n}", "func newManager(id int, wg *sync.WaitGroup, pr int64, cr float64, ps float64) *Manager {\n\tvar weather Weather\n\tweather.initializeWeather()\n\tweather.generateWeather()\n\tforecast, multiplier := weather.getWeather()\n\tfmt.Printf(\"\\nCURRENT FORECAST: %s\\n\", forecast)\n\n\tproductsRate = pr\n\tcustomerRate = cr * multiplier\n\tprocessSpeed = ps\n\n\tcustomerStatusChan = make(chan int, 256)\n\tcheckoutChangeStatusChan = make(chan int, 256)\n\n\t// Default to 1 Checkout when the store opens\n\tnumberOfCheckoutsOpen = 1\n\n\treturn &Manager{id: id, wg: wg}\n}", "func NewQueue() *Queue {\n\treturn &Queue{\n\t\telements: list.New(),\n\t}\n}", "func NewQueue() *Queue {\n\treturn &Queue{\n\t\telements: list.New(),\n\t}\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}", "func New() *Queue {\n\treturn &Queue{\n\t\tidleCh: make(chan interface{}),\n\t\tclosed: false,\n\t}\n}", "func newQuotasManager() Manager {\n\treturn &quotasManager{}\n}", "func (q *queueImp) Create(queue string) error {\n\t// 1. check queue name valid\n\tif utils.BlankString(queue) {\n\t\treturn errors.NotValidf(\"CreateQueue queue:%s\", queue)\n\t}\n\n\t// 2. check kafka whether the queue exists\n\texist, err := q.manager.ExistTopic(queue, true)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif exist {\n\t\treturn errors.AlreadyExistsf(\"CreateQueue queue:%s \", queue)\n\t}\n\n\t// 3. check metadata whether the queue exists\n\texist, err = q.extendManager.ExistQueue(queue)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif exist {\n\t\treturn errors.AlreadyExistsf(\"CreateQueue queue:%s \", queue)\n\t}\n\t// 4. create kafka topic\n\tif err = q.manager.CreateTopic(queue, q.conf.KafkaReplications,\n\t\tq.conf.KafkaPartitions, q.conf.KafkaZKAddr); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\t// 5. add metadata of queue\n\tif err = q.extendManager.AddQueue(queue); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}", "func NewQueue(dirPath string, dataSizeLimit int64) (Queue, error) {\n\tif err := mkDirFunc(dirPath); err != nil {\n\t\treturn nil, err\n\t}\n\tlock := &sync.RWMutex{}\n\tq := &queue{\n\t\tdirPath: dirPath,\n\t\tdataSizeLimit: dataSizeLimit,\n\t\trwMutex: lock,\n\t\tnotEmpty: sync.NewCond(lock),\n\t}\n\n\t// if data size limit < default limit, need reset\n\tif q.dataSizeLimit < defaultDataSizeLimit {\n\t\tq.dataSizeLimit = defaultDataSizeLimit\n\t}\n\n\tvar err error\n\n\tdefer func() {\n\t\t// if init queue failure, need release resource(like file/map file etc.)\n\t\tif err != nil {\n\t\t\tq.Close()\n\t\t}\n\t}()\n\n\t// init data page factory\n\tvar dataPageFct page.Factory\n\tdataPageFct, err = newPageFactoryFunc(filepath.Join(dirPath, dataPath), dataPageSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.dataPageFct = dataPageFct\n\n\t// init index page factory\n\tvar indexPageFct page.Factory\n\tindexPageFct, err = newPageFactoryFunc(filepath.Join(dirPath, indexPath), indexPageSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.indexPageFct = indexPageFct\n\n\thasMeta := fileutil.Exist(filepath.Join(dirPath, metaPath, fmt.Sprintf(\"%d.bat\", metaPageIndex)))\n\n\t// init meta page factory\n\tvar metaPageFct page.Factory\n\tmetaPageFct, err = newPageFactoryFunc(filepath.Join(dirPath, metaPath), metaPageSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.metaPageFct = metaPageFct\n\n\tq.metaPage, err = q.metaPageFct.AcquirePage(metaPageIndex)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif hasMeta {\n\t\t// initialize sequence\n\t\tq.initSequence()\n\t} else {\n\t\tq.appendedSeq.Store(SeqNoNewMessageAvailable)\n\t\tq.acknowledgedSeq.Store(SeqNoNewMessageAvailable)\n\n\t\t// persist metadata\n\t\tq.metaPage.PutUint64(uint64(q.AppendedSeq()), queueAppendedSeqOffset)\n\t\tq.metaPage.PutUint64(uint64(q.AcknowledgedSeq()), queueAcknowledgedSeqOffset)\n\n\t\terr = q.metaPage.Sync()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// initialize data page indexes\n\terr = q.initDataPageIndex()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn q, nil\n}", "func NewRequestManager(ttl time.Duration) *RequestManager {\n\treturn &RequestManager{\n\t\trequests: map[interface{}]*requestQueue{},\n\t\tttl: ttl,\n\t\tnow: metav1.Now,\n\t}\n}", "func NewQueue(url, exchangeName, queueName string) (*Queue, error) {\n\tqueue := Queue{\n\t\turl: url,\n\t\texchangeName: exchangeName,\n\t\tqueueName: queueName,\n\t}\n\terr := queue.Connect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = queue.Declare()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &queue, nil\n}", "func NewManager(system Resumer, name string, kubeClient kubernetes.Interface) *Manager {\n\treturn &Manager{\n\t\tstop: make(chan struct{}),\n\t\tsystem: system,\n\t\tname: name,\n\t\tkubeClient: kubeClient,\n\t}\n}", "func newQuotasManager() QuotaManager {\n\treturn &quotasManager{}\n}", "func NewQueue(name string) *Queue {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &Queue{\n\t\tjobs: make(chan Job),\n\t\tname: name,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}\n}", "func NewManager(\n\tlogger logging.LoggerInterface,\n\tsynchronizer synchronizerInterface,\n\tcfg *conf.AdvancedConfig,\n\tfeedbackLoop chan<- int64,\n\tauthAPI service.AuthClient,\n\truntimeTelemetry storage.TelemetryRuntimeProducer,\n\tmetadata dtos.Metadata,\n\tclientKey *string,\n\thcMonitor application.MonitorProducerInterface, // Deprecated: This is no longer used, left here only to avoid a breaking change\n) (*ManagerImpl, error) {\n\n\tprocessor, err := NewProcessor(cfg.SplitUpdateQueueSize, cfg.SegmentUpdateQueueSize, synchronizer, logger)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error instantiating processor: %w\", err)\n\t}\n\n\tstatusTracker := NewStatusTracker(logger, runtimeTelemetry)\n\tparser := &NotificationParserImpl{\n\t\tlogger: logger,\n\t\tonSplitUpdate: processor.ProcessSplitChangeUpdate,\n\t\tonSplitKill: processor.ProcessSplitKillUpdate,\n\t\tonSegmentUpdate: processor.ProcessSegmentChangeUpdate,\n\t\tonControlUpdate: statusTracker.HandleControl,\n\t\tonOccupancyMesage: statusTracker.HandleOccupancy,\n\t\tonAblyError: statusTracker.HandleAblyError,\n\t}\n\n\tmanager := &ManagerImpl{\n\t\tauthAPI: authAPI,\n\t\tsseClient: sse.NewStreamingClient(cfg, logger, metadata, clientKey),\n\t\tstatusTracker: statusTracker,\n\t\tfeedback: feedbackLoop,\n\t\tprocessor: processor,\n\t\tparser: parser,\n\t\tlogger: logger,\n\t\truntimeTelemetry: runtimeTelemetry,\n\t}\n\tmanager.lifecycle.Setup()\n\treturn manager, nil\n}" ]
[ "0.6894487", "0.6460588", "0.6222042", "0.61513555", "0.6091006", "0.606205", "0.60386145", "0.6006943", "0.59639966", "0.594565", "0.5942067", "0.59363455", "0.5910915", "0.5891163", "0.5883823", "0.5868089", "0.5867437", "0.58120775", "0.579116", "0.57845575", "0.5739835", "0.5729333", "0.57284796", "0.5727378", "0.57215804", "0.5712212", "0.57040274", "0.5703663", "0.570017", "0.5695105", "0.5686319", "0.56860936", "0.5670982", "0.5670982", "0.56662583", "0.5644873", "0.5636126", "0.5625117", "0.5620882", "0.5615961", "0.5586366", "0.5586366", "0.5585547", "0.55763906", "0.55730265", "0.5562347", "0.5558622", "0.554701", "0.55468494", "0.5542528", "0.5534157", "0.5519224", "0.54966015", "0.54926205", "0.5488962", "0.5475239", "0.5475239", "0.54693717", "0.5466636", "0.54603916", "0.54525185", "0.5438298", "0.5435606", "0.54334134", "0.5428155", "0.54264826", "0.5426083", "0.5412998", "0.54060096", "0.5405864", "0.5382502", "0.5380629", "0.5378693", "0.5369328", "0.5361096", "0.5356475", "0.5354758", "0.5353707", "0.53503066", "0.5349652", "0.53444856", "0.53388375", "0.53265595", "0.5317479", "0.53142416", "0.5310355", "0.53072613", "0.5302812", "0.5302812", "0.5290566", "0.52859527", "0.5285241", "0.52829885", "0.5282758", "0.52773714", "0.52661717", "0.525159", "0.52441984", "0.5235611", "0.5232728" ]
0.6490378
1
NewQueueManagerWithDefaults instantiates a new QueueManager object This constructor will only assign default values to properties that have it defined, but it doesn't guarantee that properties required by API are set
func NewQueueManagerWithDefaults() *QueueManager { this := QueueManager{} return &this }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name())\n\t}\n\tt := &QueueManager{\n\t\tlogger: logger,\n\t\tflushDeadline: flushDeadline,\n\t\tcfg: cfg,\n\t\texternalLabels: externalLabels,\n\t\trelabelConfigs: relabelConfigs,\n\t\tclient: client,\n\t\tqueueName: client.Name(),\n\n\t\tlogLimiter: rate.NewLimiter(logRateLimit, logBurst),\n\t\tnumShards: cfg.MinShards,\n\t\treshardChan: make(chan int),\n\t\tquit: make(chan struct{}),\n\n\t\tsamplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t}\n\tt.shards = t.newShards(t.numShards)\n\tnumShards.WithLabelValues(t.queueName).Set(float64(t.numShards))\n\tshardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))\n\n\t// Initialize counter labels to zero.\n\tsentBatchDuration.WithLabelValues(t.queueName)\n\tsucceededSamplesTotal.WithLabelValues(t.queueName)\n\tfailedSamplesTotal.WithLabelValues(t.queueName)\n\tdroppedSamplesTotal.WithLabelValues(t.queueName)\n\n\treturn t\n}", "func NewDefaultClient() QueueClient {\n\treturn &inMemoryQueue{queues: make(map[string][]string)}\n}", "func DefaultQueue(queue string) func(*Locker) error {\n\treturn func(l *Locker) error {\n\t\tl.DefaultQueue = queue\n\t\treturn nil\n\t}\n}", "func (m ManagedConsumerConfig) setDefaults() ManagedConsumerConfig {\n\tif m.NewConsumerTimeout <= 0 {\n\t\tm.NewConsumerTimeout = 5 * time.Second\n\t}\n\tif m.InitialReconnectDelay <= 0 {\n\t\tm.InitialReconnectDelay = 1 * time.Second\n\t}\n\tif m.MaxReconnectDelay <= 0 {\n\t\tm.MaxReconnectDelay = 5 * time.Minute\n\t}\n\t// unbuffered queue not allowed\n\tif m.QueueSize <= 0 {\n\t\tm.QueueSize = 128\n\t}\n\n\treturn m\n}", "func Default() *JobManager {\n\tif _default == nil {\n\t\t_defaultLock.Lock()\n\t\tdefer _defaultLock.Unlock()\n\n\t\tif _default == nil {\n\t\t\t_default = New()\n\t\t}\n\t}\n\treturn _default\n}", "func NewDefault(db *bolt.DB) (q queue.WaitQueue, err error) {\n\treturn New(db, DefaultBucket, DefaultMemQueueSize, DefaultBufSize)\n}", "func DefaultQueue(queue string) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.DefaultQueue = queue\n\t\treturn nil\n\t}\n}", "func (obj *RabbitQueue) Default() {\n\trabbitQueueLog.Info(\"default\", \"name\", obj.Name, \"namespace\", obj.Namespace)\n\n\tif obj.Spec.QueueName == \"\" {\n\t\tobj.Spec.QueueName = obj.Name\n\t}\n}", "func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager {\n\tthis := QueueManager{}\n\tthis.Name = name\n\tthis.Clusters = clusters\n\tthis.AliasQueues = aliasQueues\n\tthis.RemoteQueues = remoteQueues\n\tthis.ClusterQueues = clusterQueues\n\treturn &this\n}", "func DefaultQueueSettings() QueueSettings {\n\treturn QueueSettings{\n\t\tEnabled: true,\n\t\tNumConsumers: 10,\n\t\t// For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage.\n\t\t// This is a pretty decent value for production.\n\t\t// User should calculate this from the perspective of how many seconds to buffer in case of a backend outage,\n\t\t// multiply that by the number of requests per seconds.\n\t\tQueueSize: 5000,\n\t\tPersistentStorageEnabled: false,\n\t}\n}", "func NewQueueManager(q amboy.Queue) Manager {\n\treturn &queueManager{\n\t\tqueue: q,\n\t}\n}", "func NewDefault(m map[string]interface{}) (share.Manager, error) {\n\tc := &config{}\n\tif err := mapstructure.Decode(m, c); err != nil {\n\t\terr = errors.Wrap(err, \"error creating a new manager\")\n\t\treturn nil, err\n\t}\n\n\ts, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexer := indexer.CreateIndexer(s)\n\n\tclient, err := pool.GetGatewayServiceClient(c.GatewayAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(client, s, indexer)\n}", "func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {\n\tif maxFlowSize > maxQueueSize {\n\t\tpanic(\"MaxFlowSize > MaxQueueSize\")\n\t}\n\n\tif helper == nil {\n\t\tpanic(\"helper is nil\")\n\t}\n\n\tq := new(Queue)\n\tq.cond.L = &q.lock\n\tq.maxQueueSize = maxQueueSize\n\tq.maxFlowSize = maxFlowSize\n\tq.helper = helper\n\tq.flows = make(map[uint64]*flowInfo)\n\n\treturn q\n}", "func New(mqURL string) (models.MessageQueue, error) {\n\tmq, err := newmq(mqURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &metricMQ{mq}, nil\n}", "func NewDefaultMQService() *mqServiceImpl {\n\treturn &mqServiceImpl{}\n}", "func New() *queue {\n\treturn &queue{\n\t\titems: make([]item, DefaultCapacity),\n\t\tcapacity: DefaultCapacity,\n\t}\n}", "func newDefaultPodManager() *podManager {\n\treturn &podManager{\n\t\trunningPods: make(map[string]*runningPod),\n\t\trequests: make(chan *cniserver.PodRequest, 20),\n\t\treattachPods: make(map[string]*corev1.Pod),\n\t}\n}", "func NewDefaultManager() *Manager {\n\tm := NewManager()\n\n\t// default config\n\tm.SetAuthorizeCodeExp(time.Minute * 10)\n\tm.SetImplicitTokenCfg(&Config{AccessTokenExp: time.Hour * 1})\n\tm.SetClientTokenCfg(&Config{AccessTokenExp: time.Hour * 2})\n\tm.SetAuthorizeCodeTokenCfg(&Config{IsGenerateRefresh: true, AccessTokenExp: time.Hour * 2, RefreshTokenExp: time.Hour * 24 * 3})\n\tm.SetPasswordTokenCfg(&Config{IsGenerateRefresh: true, AccessTokenExp: time.Hour * 2, RefreshTokenExp: time.Hour * 24 * 7})\n\n\tm.MapTokenModel(models.NewToken())\n\tm.MapAuthorizeGenerate(generates.NewAuthorizeGenerate())\n\tm.MapAccessGenerate(generates.NewAccessGenerate())\n\n\treturn m\n}", "func NewQueue(maximumCapacity int, initialCapacity int, factory TokenFactory) *Queue {\n\tq := &Queue{\n\t\tmaxCapacity: maximumCapacity,\n\t\tavailableTokens: make(chan (Token), maximumCapacity),\n\t\tcommittedTokens: make(chan (Token), maximumCapacity),\n\t\tdiscardTokens: make(chan (Token), maximumCapacity),\n\t\tcloseTokens: make(chan (Token)),\n\t}\n\n\tfor i := 0; i < maximumCapacity; i++ {\n\t\ttoken := factory()\n\t\tif token == nil {\n\t\t\treturn nil\n\t\t}\n\t\tq.discardTokens <- token\n\t\tq.validTokens = append(q.validTokens, token)\n\t}\n\n\tq.EnableDisableTokens(initialCapacity)\n\n\treturn q\n}", "func NewBasicMockMessageQueue() lanternmq.MessageQueue {\n\tmq := BasicMockMessageQueue{}\n\tmq.Queue = make(chan []byte, 20)\n\n\tmq.ConnectFn = func(username string, password string, host string, port string) error {\n\t\treturn nil\n\t}\n\n\tmq.CreateChannelFn = func() (lanternmq.ChannelID, error) {\n\t\treturn 1, nil\n\t}\n\n\tmq.NumConcurrentMsgsFn = func(chID lanternmq.ChannelID, num int) error {\n\t\treturn nil\n\t}\n\n\tmq.QueueExistsFn = func(chId lanternmq.ChannelID, qName string) (bool, error) {\n\t\treturn true, nil\n\t}\n\n\tmq.DeclareQueueFn = func(chID lanternmq.ChannelID, name string) error {\n\t\treturn nil\n\t}\n\n\tmq.PublishToQueueFn = func(chID lanternmq.ChannelID, qName string, message string) error {\n\t\tif len(mq.Queue) < 20 {\n\t\t\tmq.Queue <- []byte(message)\n\t\t} else {\n\t\t\treturn errors.New(\"queue full - unable to add new message\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tmq.ConsumeFromQueueFn = func(chID lanternmq.ChannelID, qName string) (lanternmq.Messages, error) {\n\t\treturn nil, nil\n\t}\n\n\tmq.ProcessMessagesFn = func(ctx context.Context, msgs lanternmq.Messages, handler lanternmq.MessageHandler, args *map[string]interface{}, errs chan<- error) {\n\t\tfor msg := range mq.Queue {\n\t\t\terr := handler(msg, args)\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t}\n\t\t}\n\t}\n\n\tmq.CloseFn = func() {}\n\treturn &mq\n}", "func New() *Queue {\r\n\treturn &Queue{nil,nil,0}\r\n}", "func New(mqURL string) (models.MessageQueue, error) {\n\t// Play with URL schemes here: https://play.golang.org/p/xWAf9SpCBW\n\tu, err := url.Parse(mqURL)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\"url\": mqURL}).Fatal(\"bad MQ URL\")\n\t}\n\tlogrus.WithFields(logrus.Fields{\"mq\": u.Scheme}).Debug(\"selecting MQ\")\n\tswitch u.Scheme {\n\tcase \"memory\":\n\t\treturn NewMemoryMQ(), nil\n\tcase \"redis\":\n\t\treturn NewRedisMQ(u)\n\tcase \"bolt\":\n\t\treturn NewBoltMQ(u)\n\t}\n\tif strings.HasPrefix(u.Scheme, \"ironmq\") {\n\t\treturn NewIronMQ(u), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"mq type not supported %v\", u.Scheme)\n}", "func NewQueue() *Queue {\n return &Queue{member: make([]interface{}, 0)}\n}", "func New() Manager {\n\treturn Manager{\n\t\tState: make(map[string]string),\n\t\tClientHolder: make(map[string]utils.Set),\n\t\tClientQueue: make(map[string]utils.Queue),\n\t}\n}", "func NewManager(logger logging.Logger) SessionManager {\n\treturn &defaultSessionManager{\n\t\tlogger: logger,\n\t\ttasks: make(map[string]exec.Execer),\n\n\t\tquit: make(chan struct{}),\n\t}\n}", "func (queue *Queue) Init() (err error) {\n\tclient := queue.GetClient()\n\n\tparams := &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(queue.Name + deadLetterQueueSuffix),\n\t\tAttributes: map[string]*string{\n\t\t\t\"MessageRetentionPeriod\": aws.String(\"1209600\"),\n\t\t},\n\t}\n\tresp, err := client.CreateQueue(params)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"queueName\": queue.Name,\n\t\t\t\"error\": err,\n\t\t}).Error(\"Createing the dead letter queue\")\n\t\treturn\n\t}\n\n\tqueue.DeadLetterQueueURL = *resp.QueueUrl\n\tlog.WithFields(log.Fields{\n\t\t\"QueueUrl\": queue.DeadLetterQueueURL,\n\t}).Info(\"Dead Letter Queue initialized\")\n\n\tqueueArnAttributeName := \"QueueArn\"\n\tdeadLetterQueueAttributes, err := queue.GetAttributesByQueueURL(queue.DeadLetterQueueURL, []*string{&queueArnAttributeName})\n\tif err != nil {\n\t\treturn\n\t}\n\tredrivePolicy := &RedrivePolicy{\n\t\tMaxReceiveCount: MaxReceiveCountBeforeDead,\n\t\tDeadLetterTargetArn: *deadLetterQueueAttributes.Attributes[queueArnAttributeName],\n\t}\n\tredrivePolicyString, err := redrivePolicy.GetAsAWSString()\n\tif err != nil {\n\t\treturn\n\t}\n\tparams = &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(queue.Name),\n\t\tAttributes: map[string]*string{\n\t\t\t\"RedrivePolicy\": redrivePolicyString,\n\t\t\t\"MessageRetentionPeriod\": aws.String(\"1209600\"),\n\t\t},\n\t}\n\tresp, err = client.CreateQueue(params)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"queueName\": queue.Name,\n\t\t\t\"error\": err,\n\t\t}).Error(\"Createing the queue\")\n\t\treturn\n\t}\n\n\tqueue.URL = *resp.QueueUrl\n\tlog.WithFields(log.Fields{\n\t\t\"QueueUrl\": queue.URL,\n\t}).Info(\"Queue initialized\")\n\n\treturn\n}", "func New() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func New() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func MyQueueConstructor() MyQueue {\n\treturn MyQueue{}\n}", "func New() *JobManager {\n\tjm := JobManager{\n\t\theartbeatInterval: DefaultHeartbeatInterval,\n\t\tjobs: map[string]*JobMeta{},\n\t\ttasks: map[string]*TaskMeta{},\n\t}\n\tjm.schedulerWorker = async.NewInterval(jm.runDueJobs, DefaultHeartbeatInterval)\n\tjm.killHangingTasksWorker = async.NewInterval(jm.killHangingTasks, DefaultHeartbeatInterval)\n\treturn &jm\n}", "func WithDefaultMaxTries(n int) Option {\n\treturn func(q *Queue) {\n\t\tq.maxTries = n\n\t}\n}", "func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {\n\tif maxValueLength == 0 {\n\t\tmaxValueLength = defaultMaxValueLength\n\t}\n\treturn &DefaultRestrictionManager{\n\t\tdefaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},\n\t}\n}", "func (manager *Manager) SetDefaults() {\n\tmanager.viperConfig.SetDefault(workspace, \"No name\")\n\n\tmanager.viperConfig.SetDefault(tcpAddress, \"localhost:8888\")\n\tmanager.viperConfig.SetDefault(tcpConnectionType, \"tcp\")\n\n\tmanager.viperConfig.SetDefault(httpAddress, \":8080\")\n\n\tmanager.viperConfig.SetDefault(shutdownTimeout, 15*time.Second)\n\tmanager.viperConfig.SetDefault(readTimeout, 10*time.Second)\n\tmanager.viperConfig.SetDefault(writeTimeout, 10*time.Second)\n\n\tmanager.viperConfig.SetDefault(websocketReadBufferSize, 1024)\n\tmanager.viperConfig.SetDefault(websocketWriteBufferSize, 1024)\n\tmanager.viperConfig.SetDefault(websocketMaxMessageSize, 512)\n\tmanager.viperConfig.SetDefault(websocketWriteWait, 10*time.Second)\n\tmanager.viperConfig.SetDefault(websocketPongWait, 60*time.Second)\n\tmanager.viperConfig.SetDefault(websocketPingPeriod, 60*0.9*time.Second)\n\n\tmanager.viperConfig.SetDefault(httpTimeout, 1*time.Second)\n\n\tmanager.viperConfig.SetDefault(logLevel, \"debug\")\n}", "func NewQueue() Queue {\n\treturn Queue{}\n}", "func NewQueue(name string) *Queue {\n\tredisClient := GetRedisClientFromConfig()\n\tqueue := &Queue{Name: name, RedisClient: redisClient}\n\treturn queue\n}", "func NewQueue(name string, itemType reflect.Type, maxQueueSize uint32) Queue {\n\tq := queue{\n\t\tname: name,\n\t\titemType: itemType,\n\t\tchannel: make(chan interface{}, maxQueueSize),\n\t}\n\treturn &q\n}", "func setupManager(username string, password string, brokerIp string, brokerPort int, manager *Manager, exchange string, queueName string) error {\n\tamqpURI := getAmqpUri(username, password, brokerIp, brokerPort)\n\tmanager.logger.Debugf(\"dialing %s\", amqpURI)\n\tvar err error\n\tmanager.Connection, err = amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Connection, getting Channel\")\n\tmanager.Channel, err = manager.Connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Channel, declaring Exchange (%q)\", exchange)\n\n\tmanager.logger.Debugf(\"declared Exchange, declaring Queue %q\", queueName)\n\tqueue, err := manager.Channel.QueueDeclare(\n\t\tqueueName,\n\t\ttrue,\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"declared Queue (%q, %d messages, %d consumers), binding to Exchange\",\n\t\tqueue.Name, queue.Messages, queue.Consumers)\n\n\tif err = manager.Channel.QueueBind(\n\t\tqueue.Name, // name of the queue\n\t\tqueue.Name, // bindingKey\n\t\texchange, // sourceExchange\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debug(\"Queue bound to Exchange, starting Consume\")\n\treturn nil\n}", "func NewQueue(maxWorkers int, maxQueue int) *Queue {\n\tq := make(chan Job, maxQueue)\n\treturn &Queue{\n\t\tq,\n\t\ttrue,\n\t\t&Dispatcher{\n\t\t\tjobQueue: q,\n\t\t\tworkerPool: make(chan chan Job, maxWorkers),\n\t\t\tMaxWorkers: maxWorkers,\n\t\t},\n\t}\n}", "func LocalQueueFactory(size int) Option {\n\treturn func(env Environment) {\n\t\tif size < 5 {\n\t\t\tsize = 5\n\t\t}\n\t\tQueueFactory(makeLocalEventQueueFactory(size))\n\t}\n}", "func NewFakeQueueDispatcher() (dispatcher *FakeQueueDispatcher) {\n\tdispatcher = &FakeQueueDispatcher{}\n\tdispatcher.Messages = make([]interface{}, 0)\n\treturn\n}", "func setupDefaults() {\n\tclient.DefaultClient = grpcCli.NewClient()\n\tserver.DefaultServer = grpcSvr.NewServer()\n\tnetwork.DefaultNetwork = mucpNet.NewNetwork()\n\tmetrics.DefaultMetricsReporter = noopMet.New()\n\n\t// setup rpc implementations after the client is configured\n\tauth.DefaultAuth = authSrv.NewAuth()\n\tbroker.DefaultBroker = brokerSrv.NewBroker()\n\tevents.DefaultStream = eventsSrv.NewStream()\n\tevents.DefaultStore = eventsSrv.NewStore()\n\tregistry.DefaultRegistry = registrySrv.NewRegistry()\n\trouter.DefaultRouter = routerSrv.NewRouter()\n\tstore.DefaultStore = storeSrv.NewStore()\n\tstore.DefaultBlobStore = storeSrv.NewBlobStore()\n\truntime.DefaultRuntime = runtimeSrv.NewRuntime()\n}", "func SetDefault(jm *JobManager) {\n\t_defaultLock.Lock()\n\t_default = jm\n\t_defaultLock.Unlock()\n}", "func NewDefaultPriorityQueue() *PriorityQueue {\n\treturn NewPriorityQueue(func(interface{}) bool { return false })\n}", "func NewManager(h Handler,\n\tusername string,\n\tpassword string,\n\tbrokerIp string,\n\tbrokerPort int,\n\texchange string,\n\tqueueName string,\n\tworkers int,\n\tallocate bool,\n\tmanagerName string,\n\thandleFunction handlerFunction,\n\tlogLevel string,\n\tnet catalogue.BaseNetworkInt,\n\timg catalogue.BaseImageInt) (*Manager, error) {\n\n\tmanager := &Manager{\n\t\tConnection: nil,\n\t\tChannel: nil,\n\t\tallocate: allocate,\n\t\tworkers: workers,\n\t\terrorChan: make(chan error),\n\t\tlogger: GetLogger(managerName, logLevel),\n\t\thandlerFunction: handleFunction,\n\t\thandler: h,\n\t\timage: img,\n\t\tnetwork: net,\n\t}\n\n\terr := setupManager(username, password, brokerIp, brokerPort, manager, exchange, queueName)\n\tif err != nil {\n\t\tmanager.logger.Errorf(\"Error while setup the amqp thing: %v\", err)\n\t\treturn nil, err\n\t}\n\tmanager.queueName = queueName\n\treturn manager, nil\n}", "func NewQueue(ctx context.Context, queueID string, db *sql.DB, conf QueueConfig) (*Queue, error) {\n\tq := &Queue{ID: queueID}\n\tq.repo = repo.NewRepository(db)\n\tq.PollRate = 100 * time.Millisecond // Default\n\tq.queueSize = 10000 // Default\n\tq.retries = 3 // Default\n\tq.IsMultiQueue = conf.IsMultiQueue\n\tq.baseDelay = 3 * time.Second // Default\n\n\tif conf.PollingRate > 0 {\n\t\tq.PollRate = conf.PollingRate\n\t}\n\tif conf.Qsize > 0 {\n\t\tq.queueSize = conf.Qsize\n\t}\n\tif conf.BaseDelay > 0 {\n\t\tq.baseDelay = conf.BaseDelay\n\t}\n\tif conf.Retries >= 0 {\n\t\tq.retries = conf.Retries\n\t}\n\t// Multilevel Queue/channel created\n\ttemp := mlQueue{}\n\ttemp.notifier = make([]chan JobChan, 1)\n\ttemp.notifier[0] = make(chan JobChan, q.queueSize)\n\ttemp.total = 1\n\tq.mq = temp\n\n\tm := make(map[string][]worker.Worker)\n\tq.workers = m\n\tvar wg sync.WaitGroup\n\tq.wg = &wg\n\n\t// resume stopped jobs\n\terr := q.ResumePendingJobs(ctx)\n\tif err != nil {\n\t\tlogger.Log.Error(\"Unable to resume jobs from bucket: %s\", zap.Error(err))\n\t\t// Don't fail out, this isn't really fatal. But maybe it should be?\n\t}\n\treturn q, nil\n}", "func New(name string, c config.Config) *Queue {\n\treturn &Queue{\n\t\tname: name,\n\t\tconf: c,\n\t}\n}", "func New() Queue {\n\treturn Queue{list: linkedlist.New()}\n}", "func NewDefaultManager(\n\tdemands *cache.SafeDemandCache,\n\tbinpacker *binpacker.Binpacker,\n\tinstanceGroupLabel string) Manager {\n\treturn &defaultManager{\n\t\tdemands: demands,\n\t\tbinpacker: binpacker,\n\t\tinstanceGroupLabel: instanceGroupLabel,\n\t}\n}", "func NewQueue(action WorkAction, options ...QueueOption) *Queue {\n\tq := Queue{\n\t\tLatch: NewLatch(),\n\t\tAction: action,\n\t\tContext: context.Background(),\n\t\tMaxWork: DefaultQueueMaxWork,\n\t\tParallelism: runtime.NumCPU(),\n\t}\n\tfor _, option := range options {\n\t\toption(&q)\n\t}\n\treturn &q\n}", "func NewDefaults() map[string]interface{} {\n\tdefaults := make(map[string]interface{})\n\n\tdefaults[authPostgresURI] = \"postgresql://postgres:postgres@localhost:5432/test?sslmode=disable\"\n\tdefaults[authMigrationVersion] = 0\n\n\tdefaults[gatewayAddr] = \":10000\"\n\tdefaults[gatewayEndpoint] = \"/graphql\"\n\tdefaults[gatewayServePlayground] = true\n\tdefaults[gatewayPlaygroundEndpoint] = \"/playground\"\n\tdefaults[gatewayEnableIntrospection] = true\n\n\tdefaults[seedUserLogin] = \"root\"\n\tdefaults[seedUserPassword] = \"root\"\n\tdefaults[seedRoleTitle] = \"ROOT\"\n\tdefaults[seedRoleSuper] = true\n\n\tdefaults[sessionAccessTokenTTL] = 1000000\n\tdefaults[sessionRefreshTokenTTl] = 5000000\n\n\treturn defaults\n}", "func New(opt *Options) *Queue {\n\tif client == nil {\n\t\tredisOpt := &redis.Options{\n\t\t\tAddr: opt.Connection.Addr,\n\t\t\tPassword: opt.Connection.Password,\n\t\t\tDB: opt.Connection.DB,\n\t\t\tMaxRetries: opt.Connection.MaxRetries,\n\t\t\tDialTimeout: opt.Connection.DialTimeout,\n\t\t\tReadTimeout: opt.Connection.ReadTimeout,\n\t\t\tWriteTimeout: opt.Connection.WriteTimeout,\n\t\t\tPoolSize: opt.Connection.PoolSize,\n\t\t\tPoolTimeout: opt.Connection.PoolTimeout,\n\t\t\tIdleTimeout: opt.Connection.IdleTimeout,\n\t\t}\n\t\tclient = redis.NewClient(redisOpt)\n\t}\n\n\treturn &Queue{\n\t\tjobChannel: make(chan string, 1000),\n\t\tconcurrency: opt.Concurrency,\n\t\tqueueName: opt.QueueName,\n\t\tprocessor: opt.Processor,\n\t\terrorHandler: opt.ErrorHandler,\n\t}\n}", "func (o *SendJobCommandParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func New() *Queue {\n\tq := new(Queue)\n\tq.length = 0\n\tq.s1 = stack.New()\n\tq.s2 = stack.New()\n\n\treturn q\n}", "func NewQueue() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func defaultConsumerOptions() *consumerOptions {\n\treturn &consumerOptions{\n\t\tqueueDepth: 10000,\n\t\tconcurrency: 10,\n\t\tStats: &NilConsumerStatsCollector{},\n\t}\n}", "func NewQueue(action func(interface{}) error) *QueueWorker {\n\treturn &QueueWorker{\n\t\taction: action,\n\t\tlatch: &Latch{},\n\t\tmaxWork: DefaultQueueWorkerMaxWork,\n\t}\n}", "func (o *GetGPUArchitectureParams) SetDefaults() {\n\tvar (\n\t\tallowUnstableDefault = bool(false)\n\t)\n\n\tval := GetGPUArchitectureParams{\n\t\tAllowUnstable: &allowUnstableDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func NewQueue(l int) *Queue {\n\tif l == -1 {\n\t\treturn &Queue{\n\t\t\tQueue: make([]types.Event, 0),\n\t\t\tL: int(^uint(0) >> 1), // max integer value, architecture independent\n\t\t}\n\t}\n\tq := &Queue{\n\t\tQueue: make([]types.Event, 0, l),\n\t\tL: l,\n\t}\n\tlog.WithFields(log.Fields{\"Capacity\": q.L}).Debugf(\"Creating queue\")\n\treturn q\n}", "func NewDefaults() *Client {\n\treturn &Client{\n\t\tsigkil: make(chan os.Signal, 1),\n\t\tsighup: make(chan os.Signal, 1),\n\t\tmenu: make(map[string]ui.MenuItem),\n\t\tplex: &logs.Timer{},\n\t\talert: &logs.Cooler{},\n\t\tLogger: logs.New(),\n\t\tConfig: &configfile.Config{\n\t\t\tApps: &apps.Apps{\n\t\t\t\tURLBase: \"/\",\n\t\t\t},\n\t\t\tServices: &services.Config{\n\t\t\t\tInterval: cnfg.Duration{Duration: services.DefaultSendInterval},\n\t\t\t\tParallel: 1,\n\t\t\t},\n\t\t\tBindAddr: configfile.DefaultBindAddr,\n\t\t\tSnapshot: &snapshot.Config{\n\t\t\t\tTimeout: cnfg.Duration{Duration: snapshot.DefaultTimeout},\n\t\t\t},\n\t\t\tLogs: &logs.Logs{\n\t\t\t\tLogFiles: DefaultLogFiles,\n\t\t\t\tLogFileMb: DefaultLogFileMb,\n\t\t\t},\n\t\t\tTimeout: cnfg.Duration{Duration: configfile.DefaultTimeout},\n\t\t}, Flags: &Flags{\n\t\t\tFlagSet: flag.NewFlagSet(DefaultName, flag.ExitOnError),\n\t\t\tConfigFile: os.Getenv(DefaultEnvPrefix + \"_CONFIG_FILE\"),\n\t\t\tEnvPrefix: DefaultEnvPrefix,\n\t\t},\n\t}\n}", "func New(hint int) *Queue {\n\treturn &Queue{\n\t\titems: make([]interface{}, 0, hint),\n\t}\n}", "func initMailQueueProducer() (err error) {\n\tnsqCfg := nsq.NewConfig()\n\tnsqCfg.UserAgent = \"tmail.queue\"\n\tNsqQueueProducer, err = nsq.NewProducer(\"127.0.0.1:4150\", nsqCfg)\n\tif Cfg.GetDebugEnabled() {\n\t\tNsqQueueProducer.SetLogger(Log, 0)\n\t} else {\n\t\tNsqQueueProducer.SetLogger(Log, 4)\n\t}\n\treturn err\n}", "func New(name string) (*Queue, error) {\n\tqueue := Queue{Name: name}\n\terr := queue.Init()\n\n\treturn &queue, err\n}", "func New(maxSize int, dropBehavior DropBehavior) *Queue {\n\treturn &Queue{\n\t\tmaxSize: maxSize,\n\t\tdropBehavior: dropBehavior,\n\t}\n}", "func New(delegate Delegate, settings Settings) (*Queue, error) {\n\tconst op = \"pq/new\"\n\n\tif delegate == nil {\n\t\treturn nil, errOp(op).of(InvalidParam).report(\"delegate must not be nil\")\n\t}\n\n\taccessor, errKind := makeAccess(delegate)\n\tif errKind != NoError {\n\t\treturn nil, errOp(op).of(errKind)\n\t}\n\n\tpageSize := delegate.PageSize()\n\n\tq := &Queue{\n\t\taccessor: accessor,\n\t\tsettings: settings,\n\t\tpagePool: newPagePool(pageSize),\n\t}\n\n\t// use pointer address as ID for correlating error messages\n\tq.id = queueID(uintptr(unsafe.Pointer(q)))\n\taccessor.quID = q.id\n\n\trootBuf, err := q.accessor.ReadRoot()\n\tif err != nil {\n\t\treturn nil, wrapErr(op, err).of(InitFailed).\n\t\t\treport(\"failed to read queue header\")\n\t}\n\n\troot := castQueueRootPage(rootBuf[:])\n\tif root.version.Get() != queueVersion {\n\t\tcause := &Error{\n\t\t\tkind: InitFailed,\n\t\t\tmsg: fmt.Sprintf(\"queue version %v\", root.version.Get()),\n\t\t}\n\t\treturn nil, wrapErr(op, cause).of(InitFailed)\n\t}\n\n\ttracef(\"open queue: %p (pageSize: %v)\\n\", q, pageSize)\n\ttraceQueueHeader(root)\n\n\tq.version = root.version.Get()\n\tq.hdrOffset = q.accessor.RootFileOffset()\n\tq.onInit()\n\treturn q, nil\n}", "func NewPrinterDefaults()(*PrinterDefaults) {\n m := &PrinterDefaults{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (r *yandexMessageQueueReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&connectorsv1.YandexMessageQueue{}).\n\t\tComplete(r)\n}", "func (o *RTRCheckAdminCommandStatusParams) SetDefaults() {\n\tvar (\n\t\tsequenceIDDefault = int64(0)\n\t)\n\n\tval := RTRCheckAdminCommandStatusParams{\n\t\tSequenceID: sequenceIDDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func New(cfg Config, pubSub pubSub, metrics metricsProvider) (*Queue, error) {\n\tmsgChan, err := pubSub.SubscribeWithOpts(context.Background(), topic, spi.WithPool(cfg.PoolSize))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"subscribe to topic [%s]: %w\", topic, err)\n\t}\n\n\tq := &Queue{\n\t\tpubSub: pubSub,\n\t\tmsgChan: msgChan,\n\t\tjsonMarshal: json.Marshal,\n\t\tjsonUnmarshal: json.Unmarshal,\n\t\tmetrics: metrics,\n\t}\n\n\tq.Lifecycle = lifecycle.New(\"operation-queue\",\n\t\tlifecycle.WithStart(q.start),\n\t\tlifecycle.WithStop(q.stop),\n\t)\n\n\tq.Start()\n\n\treturn q, nil\n}", "func NewQueue() Queue {\r\n\tvar empty []int\r\n\treturn Queue{empty, len(empty)}\r\n}", "func New() *Queue {\n\titems := []*item.Item{}\n\tlock := &sync.Mutex{}\n\treturn &Queue{items, lock}\n}", "func NewQueue(\n\tservers []string,\n\topts QueueOptions,\n) (Queue, error) {\n\tq, err := newQueue(servers, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.initConnections(servers)\n\tgo q.reportMetrics()\n\n\treturn q, nil\n}", "func NewQueue(storage Storage, reQueueTimeout time.Duration) Queue {\n\tif reQueueTimeout < 1 {\n\t\treQueueTimeout = time.Minute * 30\n\t}\n\n\tname := \"gocelery\"\n\tq := &queue{\n\t\tstorage: storage,\n\t\thead: 0,\n\t\ttail: 0,\n\t\trequeueTimeout: reQueueTimeout,\n\t\tqueuePrefix: fmt.Sprintf(\"%s-queue-\", name),\n\t\tqueueAckPrefix: fmt.Sprintf(\"%s-ack-\", name),\n\t}\n\n\t// restore the old state from the DB\n\tq.loadHeadTail()\n\treturn q\n}", "func (s *Store) CreateQueue(name string, overriddenSettings ...QueueSetting) (QueueMeta, QueueSettings, error) {\n\tif !isValidQueueName(name) {\n\t\treturn QueueMeta{}, QueueSettings{}, ErrInvalidQueueName\n\t}\n\n\tmeta := QueueMeta{Name: name, Created: time.Now()}\n\tsettings := defaultQueueSettings()\n\n\tfor _, setting := range overriddenSettings {\n\t\tif err := setting(&settings); err != nil {\n\t\t\treturn QueueMeta{}, QueueSettings{}, err\n\t\t}\n\t}\n\n\treturn meta, settings, s.db.Update(func(tx *bolt.Tx) error {\n\t\tqueues := tx.Bucket([]byte(\"Queues\"))\n\n\t\tbucket, err := queues.CreateBucket([]byte(name))\n\t\tif err != nil {\n\t\t\tif err == bolt.ErrBucketExists {\n\t\t\t\treturn ErrQueueExists\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\t// Meta\n\n\t\tmetaBucket, err := bucket.CreateBucketIfNotExists([]byte(\"Meta\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = metaBucket.Put([]byte(\"Name\"), []byte(name)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = metaBucket.Put([]byte(\"Created\"), encodeTime(meta.Created)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Settings\n\n\t\tsettingsBucket, err := bucket.CreateBucketIfNotExists([]byte(\"Settings\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = settingsBucket.Put([]byte(\"LeaseDuration\"), encodeInt(settings.LeaseDuration)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = settingsBucket.Put([]byte(\"MessageRetentionPeriod\"), encodeInt(settings.MessageRetentionPeriod)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = settingsBucket.Put([]byte(\"DelaySeconds\"), encodeInt(settings.DelaySeconds)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Message Buckets\n\n\t\tmessages, err := bucket.CreateBucketIfNotExists([]byte(\"Messages\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := messages.CreateBucketIfNotExists([]byte(\"Visible\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := messages.CreateBucketIfNotExists([]byte(\"Leased\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := messages.CreateBucketIfNotExists([]byte(\"Delayed\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func DefaultQueueKeysFunc(_ runtime.Object) []string {\n\treturn []string{DefaultQueueKey}\n}", "func New(cfg config.Queue, n notifier) *Queue {\n\tq := &Queue{\n\t\taddCh: make(chan struct{}, cfg.QueueSize),\n\t\tpopCh: make(chan struct{}, cfg.GoRoutinesSize),\n\t\taddMessage: make(chan entity.NotifierMessage, 1),\n\t\tpopMessage: make(chan entity.NotifierMessage, 1),\n\t\tnotifier: n,\n\t}\n\n\tgo q.pop()\n\tgo q.add()\n\n\treturn q\n}", "func New(cb Done, transport http.RoundTripper) *Manager {\n\treturn &Manager{\n\t\tkeys: sets.NewString(),\n\t\tcb: cb,\n\t\ttransport: transport,\n\t}\n}", "func newDefaultContainerConfig() ContainerConfig {\n\treturn ContainerConfig{\n\t\tCPU: newMinMaxAllocation(),\n\t\tMemory: newMinMaxAllocation(),\n\t\tBlockRead: newMinMaxAllocation(),\n\t\tBlockWrite: newMinMaxAllocation(),\n\t\tNetworkRx: newMinMaxAllocation(),\n\t\tNetworkTx: newMinMaxAllocation(),\n\t}\n}", "func NewQueue(ctx *pulumi.Context,\n\tname string, args *QueueArgs, opts ...pulumi.ResourceOption) (*Queue, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.HoursOfOperationArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'HoursOfOperationArn'\")\n\t}\n\tif args.InstanceArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'InstanceArn'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Queue\n\terr := ctx.RegisterResource(\"aws-native:connect:Queue\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (o *ListHetznerSizesParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func (t *OpenconfigQos_Qos_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}", "func GetDefaultManager() *Manager {\n\treturn defaultManager\n}", "func (o *GetFqdnCacheParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func NewAPIRequestManager() *APIRequestManager {\n\treturn &APIRequestManager{\n\t\tqueue: make(chan *WorkerItem, 10),\n\t}\n}", "func New() *Queue {\r\n\treturn &Queue{\r\n\t\tdata: []int{},\r\n\t}\r\n}", "func setDefault(c *Config) {\n\tc.Token = \"\"\n\tc.GuildID = \"\"\n}", "func New(ctx context.Context, cfg models.Config) (*Queue, error) {\n\tconn, err := connect(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to connect to RabbitMQ \")\n\t}\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to open a channel \")\n\t}\n\n\t_, err = ch.QueueDeclare(\"ItemQueue\", false, false, false, false, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to declare a queue \")\n\t}\n\n\treturn &Queue{ch, conn}, nil\n}", "func (c *InitManagementClusterInput) Defaults(ctx context.Context) {\n\tc.Config.Defaults()\n\tif c.Scheme == nil {\n\t\tc.Scheme = runtime.NewScheme()\n\t}\n\tif c.NewManagementClusterFn == nil {\n\t\tc.NewManagementClusterFn = func() (ManagementCluster, error) {\n\t\t\treturn kind.NewCluster(ctx, c.ManagementClusterName, c.Scheme)\n\t\t}\n\t}\n}", "func (c *InitManagementClusterInput) Defaults(ctx context.Context) {\n\tc.Config.Defaults()\n\tif c.Scheme == nil {\n\t\tc.Scheme = runtime.NewScheme()\n\t}\n\tif c.NewManagementClusterFn == nil {\n\t\tc.NewManagementClusterFn = func() (ManagementCluster, error) {\n\t\t\treturn kind.NewCluster(ctx, c.ManagementClusterName, c.Scheme)\n\t\t}\n\t}\n}", "func NewSmsTrackingWithDefaults() *SmsTracking {\n\tthis := SmsTracking{}\n\treturn &this\n}", "func (cc *ConstructionCreate) defaults() {\n\tif _, ok := cc.mutation.RawProduction(); !ok {\n\t\tv := construction.DefaultRawProduction\n\t\tcc.mutation.SetRawProduction(v)\n\t}\n\tif _, ok := cc.mutation.Production(); !ok {\n\t\tv := construction.DefaultProduction\n\t\tcc.mutation.SetProduction(v)\n\t}\n\tif _, ok := cc.mutation.GetType(); !ok {\n\t\tv := construction.DefaultType\n\t\tcc.mutation.SetType(v)\n\t}\n\tif _, ok := cc.mutation.Level(); !ok {\n\t\tv := construction.DefaultLevel\n\t\tcc.mutation.SetLevel(v)\n\t}\n\tif _, ok := cc.mutation.Modifier(); !ok {\n\t\tv := construction.DefaultModifier\n\t\tcc.mutation.SetModifier(v)\n\t}\n\tif _, ok := cc.mutation.LastUpdated(); !ok {\n\t\tv := construction.DefaultLastUpdated()\n\t\tcc.mutation.SetLastUpdated(v)\n\t}\n\tif _, ok := cc.mutation.NeedRefresh(); !ok {\n\t\tv := construction.DefaultNeedRefresh\n\t\tcc.mutation.SetNeedRefresh(v)\n\t}\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}", "func (o *StorageServiceMetricsHintsInProgressGetParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func NewQueue(args []func(http.ResponseWriter, *http.Request) (http.ResponseWriter, *http.Request)) *Queue {\n\tq := &Queue{}\n\tfor _, f := range args {\n\t\tq.list = append(q.list, f)\n\t}\n\treturn q\n}", "func Constructor() MyQueue {\n\treturn Myqueue{list: listNew()}\n}", "func NewQueue() *Queue {\n\treturn &Queue{}\n}", "func NewQueue() *Queue {\n\treturn &Queue{}\n}", "func (o *GetBundleByKeyParams) SetDefaults() {\n\tvar (\n\t\tauditDefault = string(\"NONE\")\n\n\t\tincludedDeletedDefault = bool(false)\n\t)\n\n\tval := GetBundleByKeyParams{\n\t\tAudit: &auditDefault,\n\t\tIncludedDeleted: &includedDeletedDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func New(\n\tlogger *zap.SugaredLogger,\n\tflushFunc, closeFunc func(),\n\topts Options,\n) *Queue {\n\tif flushFunc == nil {\n\t\tflushFunc = func() {}\n\t}\n\tif closeFunc == nil {\n\t\tcloseFunc = func() {}\n\t}\n\tif opts.Rate == 0 {\n\t\topts.Rate = 5 * time.Second\n\t}\n\n\tvar counter = int32(0)\n\treturn &Queue{\n\t\tl: logger,\n\n\t\tcloseFunc: closeFunc,\n\t\tflushFunc: flushFunc,\n\n\t\tpendingC: make(chan func(), 3*opts.BatchSize),\n\t\tpending: &counter,\n\t\trate: opts.Rate,\n\t\tbatchSize: opts.BatchSize,\n\n\t\tstopC: make(chan bool, 1),\n\t\tstopped: false,\n\t}\n}", "func (o *QtreeCollectionGetParams) SetDefaults() {\n\tvar (\n\t\treturnRecordsDefault = bool(true)\n\n\t\treturnTimeoutDefault = int64(15)\n\t)\n\n\tval := QtreeCollectionGetParams{\n\t\tReturnRecords: &returnRecordsDefault,\n\t\tReturnTimeout: &returnTimeoutDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func newQueue() *Queue {\n\tl := list.New()\n\treturn &Queue{Elements: l}\n}" ]
[ "0.6368561", "0.5929313", "0.5928396", "0.59103316", "0.5819523", "0.58130354", "0.5753316", "0.5739753", "0.568538", "0.5661701", "0.5606484", "0.5487868", "0.54643965", "0.5463153", "0.54630005", "0.5449085", "0.544472", "0.54301214", "0.540289", "0.5370558", "0.535047", "0.5341004", "0.5334277", "0.5295785", "0.52576846", "0.52551335", "0.52515256", "0.52515256", "0.52440447", "0.5240541", "0.52186775", "0.52153325", "0.5206354", "0.51808494", "0.5172658", "0.5166101", "0.5157915", "0.5146065", "0.5141454", "0.5139676", "0.5139047", "0.51379216", "0.51322454", "0.5129915", "0.5124572", "0.51156694", "0.5109505", "0.5105227", "0.5103686", "0.51033306", "0.5093908", "0.50899905", "0.5077126", "0.50655735", "0.5062785", "0.5044816", "0.50357693", "0.50288504", "0.5013021", "0.50087065", "0.50034255", "0.49959683", "0.49950668", "0.49800548", "0.4976274", "0.49653354", "0.49580646", "0.49579832", "0.49559307", "0.49503115", "0.49397472", "0.4929721", "0.49250063", "0.4920364", "0.49179143", "0.49174854", "0.49174672", "0.49137354", "0.4901583", "0.48928303", "0.48868117", "0.4878816", "0.48771858", "0.48752692", "0.48736435", "0.48721433", "0.48712355", "0.48712355", "0.48708367", "0.48632345", "0.4862766", "0.48562503", "0.4856162", "0.48510158", "0.4849206", "0.4849206", "0.48466817", "0.48451757", "0.48441622", "0.48435262" ]
0.7690877
0
GetName returns the Name field value
func (o *QueueManager) GetName() string { if o == nil { var ret string return ret } return o.Name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *SingleSelectFieldField) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (f *Field) GetName() string {\n\treturn formatGoName(f.Name)\n}", "func (e *Entry) GetName() string {\n\tif len(e.NameRaw) > 0 {\n\t\treturn string(e.NameRaw)\n\t}\n\treturn e.Name\n}", "func (o *FormField) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (p *GetField) Name() string { return p.name }", "func (f BinaryField) GetName() string {\n\treturn f.name\n}", "func (s *RecordSchema) GetName() string {\n\treturn s.Name\n}", "func (o *ContentProviderReadDetailed) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (t *Type) GetName() string {\n\treturn formatGoName(t.Name)\n}", "func (m *metadata) GetName() string {\n\treturn m.name\n}", "func (f *Fieldx) Name() string {\n\treturn f.data.Name\n}", "func (d UserData) Name() string {\n\tval := d.ModelData.Get(models.NewFieldName(\"Name\", \"name\"))\n\tif !d.Has(models.NewFieldName(\"Name\", \"name\")) {\n\t\treturn *new(string)\n\t}\n\treturn val.(string)\n}", "func (o *SiteMapReadDetailed) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o FieldOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Field) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (l *Label) GetName() string {\n\tif l == nil || l.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *l.Name\n}", "func (cli *SetWrapper) GetName() (string, error) {\n\tvals, err := cli.set.GetValues()\n\tif nil != err {\n\t\treturn \"\", err\n\t}\n\treturn vals.String(fieldSetName), nil\n}", "func (f *Field) Name() string {\n\treturn f.field.Name\n}", "func (o *AddOn) GetName() (value string, ok bool) {\n\tok = o != nil && o.bitmap_&2048 != 0\n\tif ok {\n\t\tvalue = o.name\n\t}\n\treturn\n}", "func (o *CreateInstance) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *DisplayInfo) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *Credit1099Payer) GetName() string {\n\tif o == nil || o.Name.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name.Get()\n}", "func (meta *Metadata) GetName() string {\n\treturn meta.Name\n}", "func (m *Metadata) GetName() string {\n\treturn m.Name\n}", "func (inst *InstUIToFP) GetName() string {\n\treturn inst.Name\n}", "func (f BooleanField) GetName() string {\n\treturn f.name\n}", "func (n Normalizer) GetName() string {\n\treturn n.name\n}", "func (inst *InstFPToUI) GetName() string {\n\treturn inst.Name\n}", "func (t *Type) GetName() string { return t.Name }", "func (s *StickerSet) GetName() (value string) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Name\n}", "func (f *DialectMessageField) GetName() string {\n\treturn f.name\n}", "func (o *FifoCreateReqWeb) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *Content) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *Ga4ghChemotherapy) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (request *DomainRegisterRequest) GetName() *string {\n\treturn request.GetStringProperty(\"Name\")\n}", "func (o *StorageSasExpander) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (t *NamedType) GetName() string {\n\treturn t.Name\n}", "func (o *Tag) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (x DashboardEntity) GetName() string {\n\treturn x.Name\n}", "func (o *GovChainMetadata) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *EventsScalarQuery) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (inst *InstFPExt) GetName() string {\n\treturn inst.Name\n}", "func (o *CredentialsResponseElement) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (ds *Datasource) GetName() string {\n\treturn ds.name\n}", "func (b *Base) GetName() string {\n\treturn b.Name\n}", "func (b *Base) GetName() string {\n\treturn b.Name\n}", "func (m *Metric) GetName() string {\n\tif m == nil || m.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *m.Name\n}", "func (r Source) GetName() string {\n\treturn r.Name\n}", "func (o *Member) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (l *LabelResult) GetName() string {\n\tif l == nil || l.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *l.Name\n}", "func (o *Metric) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *GridViewUpdate) GetName() string {\n\tif o == nil || IsNil(o.Name) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (x GenericEntity) GetName() string {\n\treturn x.Name\n}", "func (inst *InstFPToSI) GetName() string {\n\treturn inst.Name\n}", "func (x ExternalEntity) GetName() string {\n\treturn x.Name\n}", "func (b *Being) GetName() string {\n\treturn b.Name.Display\n}", "func (o *BaseItem) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (inst *InstTrunc) GetName() string {\n\treturn inst.Name\n}", "func (c FieldsCollection) Name() *models.Field {\n\treturn c.MustGet(\"Name\")\n}", "func (m *Resource) GetName() string {\n\tif m != nil {\n\t\treturn m.Name\n\t}\n\treturn \"\"\n}", "func (c *Definition) GetName() string {\n\treturn c.Name\n}", "func (o *ResourceEntry) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (ct *ComposedTemplate) GetName() string {\n\tif ct.Name != nil {\n\t\treturn *ct.Name\n\t}\n\treturn \"\"\n}", "func (field Field) Name() string {\n\tif len(field.Names) > 0 {\n\t\treturn field.Names[0].String()\n\t}\n\n\t// The field has no name, so we use Type name as the field name.\n\treturn itemTypeName(field.TypeValue.Type).Name\n}", "func (inst *InstFPTrunc) GetName() string {\n\treturn inst.Name\n}", "func (o *LocalDatabaseProvider) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (t *SentryTaggedStruct) GetName() string {\n\treturn \"\"\n}", "func (o *RelatedAssetSerializerWithPermission) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (inst *InstBitCast) GetName() string {\n\treturn inst.Name\n}", "func (p *Plan) GetName() string {\n\tif p == nil || p.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *p.Name\n}", "func (i *Identity) GetName() string {\n\treturn i.Name\n}", "func (o *Ga4ghTumourboard) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (entry *Entry) GetName() (name ndn.Name) {\n\tc := (*CEntry)(entry)\n\tname.UnmarshalBinary(c.NameV[:c.NameL])\n\treturn name\n}", "func (o *LogsPipelineProcessor) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (x ApmDatabaseInstanceEntity) GetName() string {\n\treturn x.Name\n}", "func (o *EventTypeIn) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (p *Person) GetName() string {\n\treturn p.name\n}", "func (o *SecurityMonitoringRuleCase) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (obj *ObjectBase) GetName() string {\n\treturn obj.name\n}", "func (s *FixedSchema) GetName() string {\n\treturn s.Name\n}", "func (o *LogicalDatabaseResponse) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (s *StructField) Name() string {\n\treturn s.name\n}", "func (o *EquipmentBaseSensor) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (s *Source) GetName() string {\n\treturn s.Name\n}", "func (o *EmbeddedUnitModel) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (o *ComponentReferenceDTO) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (o *CreateTemplateRequestEntity) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (o *Replication) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (c *STableField) Name() string {\n\tif len(c.alias) > 0 {\n\t\treturn c.alias\n\t}\n\treturn c.spec.Name()\n}", "func (o *Site3) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (e *EnumEntry) GetName() string { return e.Name }", "func (o *LimitRate) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (inst *InstSExt) GetName() string {\n\treturn inst.Name\n}", "func (x WorkloadEntity) GetName() string {\n\treturn x.Name\n}", "func (v *TypeReference) GetName() (o string) {\n\tif v != nil {\n\t\to = v.Name\n\t}\n\treturn\n}", "func (n Node) GetName() string {\n\treturn n.Name\n}", "func (s *RecursiveSchema) GetName() string {\n\treturn s.Actual.GetName()\n}", "func (o *Account) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (inst *InstSIToFP) GetName() string {\n\treturn inst.Name\n}", "func (o *View) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *RoleWithAccess) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (obj *BlobAttribute) GetName() string {\n\treturn obj.getName()\n}" ]
[ "0.8270152", "0.79907626", "0.7765679", "0.76368356", "0.7604266", "0.7600852", "0.75710016", "0.7564642", "0.7548394", "0.75191027", "0.7517443", "0.7511258", "0.7505484", "0.74717504", "0.7460151", "0.7448915", "0.7438394", "0.7437716", "0.7437274", "0.74279296", "0.74176025", "0.7417425", "0.73988014", "0.7398225", "0.73829347", "0.73791754", "0.7377273", "0.73704046", "0.73656774", "0.7355937", "0.7354654", "0.7351853", "0.73467225", "0.7343459", "0.7326997", "0.73269904", "0.73185426", "0.73109293", "0.7305797", "0.7305369", "0.7301732", "0.7299439", "0.72957164", "0.7289352", "0.7289352", "0.7284731", "0.7284502", "0.72793746", "0.7276065", "0.7272541", "0.72633016", "0.7261334", "0.7261052", "0.72606975", "0.7257407", "0.72564834", "0.72544855", "0.7248087", "0.7244419", "0.7244409", "0.72382206", "0.7232118", "0.7232057", "0.7215937", "0.7212889", "0.72091377", "0.7207112", "0.7204379", "0.720419", "0.72028446", "0.7201746", "0.71989995", "0.7198318", "0.71961045", "0.7195568", "0.7193702", "0.719241", "0.7187218", "0.7186939", "0.7183314", "0.71782213", "0.71694314", "0.7166108", "0.7165342", "0.716414", "0.7163785", "0.7163008", "0.71605873", "0.7160267", "0.71582985", "0.7156023", "0.71534425", "0.7149648", "0.71479595", "0.71464264", "0.7144716", "0.71443707", "0.7144208", "0.7140781", "0.71401983", "0.7139454" ]
0.0
-1
GetNameOk returns a tuple with the Name field value and a boolean to check if the value has been set.
func (o *QueueManager) GetNameOk() (*string, bool) { if o == nil { return nil, false } return &o.Name, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *SingleSelectFieldField) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *FormField) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Credit1099Payer) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name.Get(), o.Name.IsSet()\n}", "func (o *CreateInstance) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Ga4ghChemotherapy) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *SharedSecretSet3) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *SharedSecretSetCreate) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Member) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ConnectorTypeAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ConnectorTypeAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *User) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *EditBankConnectionParams) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *VersionedConnection) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *EnvironmentVariablePair1) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *ApiKey) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ParameterContextDTO) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Name) GetNameOk() (int32, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret int32\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *Tag) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *WafPolicyGroup) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Ga4ghTumourboard) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *GeoipConfig2) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *GovChainMetadata) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *CredentialsResponseElement) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *GridViewUpdate) GetNameOk() (*string, bool) {\n\tif o == nil || IsNil(o.Name) {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *LocalDatabaseProvider) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *CloudInstanceTypeAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *EventsScalarQuery) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Ga4ghFeature) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *UpdateRole) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *Commitstatus) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *IamServiceProviderAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ProjectApiKey) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *LogicalDatabaseResponse) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *EquipmentBaseSensor) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Replication) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *UiNodeInputAttributes) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Account) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *FileversionFileversion) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *WorkflowWorkflowDefinitionAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *SecurityMonitoringRuleCase) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *EventTypeIn) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *RoleWithAccess) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *V0037JobProperties) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *DisplayInfo) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *ComponentReferenceDTO) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Platform) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *FifoCreateReqWeb) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *BackupUnitProperties) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\n\treturn o.Name, true\n}", "func (o *SyntheticMonitorUpdate) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *ResourceProperties) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\n\treturn o.Name, true\n}", "func (o *Vm) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *ContentProvider2) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *LogsPipelineProcessor) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *BaseItem) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *ViewSampleProject) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *IdentityAccount) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *EmbeddedUnitModel) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *OutputProblemAllOf) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *WorkflowServiceItemDefinitionAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Site3) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *RackUnitPersonality) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *View) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Workloadv1Location) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ResourceEntry) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *KubernetesAddonDefinitionAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Transaction) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Channel) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Content) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Project) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *EtherPhysicalPort) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ContentProviderReadDetailed) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *WorkflowBuildTaskMeta) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *TokenCard) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *CreateProjectApiKeyRequest) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *MicrosoftGraphVerifiedDomain) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *ManualDependency) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ViewTag) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *RelatedAssetSerializerWithPermission) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *IamUserAuthorization) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *FlowBreadcrumbDTO) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *CreateEventPayloadActions) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *SyntheticsBrowserTest) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *UpdateServerCertificateRequest) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *IamProjectRoleCreate) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *ModelsBackupJobStatusResponse) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ApplicationLoadBalancerForwardingRuleProperties) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\n\treturn o.Name, true\n}", "func (o *SiteMapReadDetailed) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *V0037Node) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *HyperflexHealthCheckPackageChecksum) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ViewMilestone) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *OnpremUpgradePhase) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Product) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *ShowSystem) GetNameOk() (*string, bool) {\n\tif o == nil || IsNil(o.Name) {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Metric) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *PluginMount) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *MonitorSearchResult) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *IncidentTeamResponseAttributes) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *HttpDelivery) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *VersionedControllerService) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *CreateTemplateRequestEntity) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *StorageSasExpander) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}" ]
[ "0.78737646", "0.7743277", "0.76780003", "0.76587296", "0.7625746", "0.7576448", "0.7566445", "0.754901", "0.7547995", "0.7547995", "0.7546563", "0.7531728", "0.74989444", "0.7477683", "0.7444606", "0.74311113", "0.74224156", "0.7415727", "0.73950857", "0.73914826", "0.7377958", "0.73696923", "0.7369173", "0.73578656", "0.7353992", "0.7343812", "0.7337855", "0.7334837", "0.73294985", "0.7326691", "0.7326075", "0.73203164", "0.7310711", "0.7306018", "0.7303261", "0.73022515", "0.7293332", "0.7260063", "0.72574806", "0.72475743", "0.72444725", "0.72408175", "0.7237908", "0.7235848", "0.72357357", "0.7235004", "0.7234653", "0.7234264", "0.7230227", "0.7228573", "0.7224436", "0.721976", "0.72129697", "0.72088903", "0.7207785", "0.7206651", "0.72049356", "0.7202382", "0.71983904", "0.7194746", "0.71889794", "0.7187908", "0.7187718", "0.7185701", "0.7182476", "0.7172748", "0.71694", "0.7165475", "0.71627665", "0.71518505", "0.7151123", "0.7149924", "0.71391815", "0.71388054", "0.7131289", "0.71307176", "0.71282786", "0.71266574", "0.71239126", "0.7123541", "0.71223587", "0.71211797", "0.7120866", "0.7118214", "0.711135", "0.71099347", "0.710965", "0.71078134", "0.71058035", "0.71012187", "0.71009016", "0.70980823", "0.70923406", "0.709159", "0.70859045", "0.70719177", "0.7071204", "0.7065237", "0.7059648", "0.7056911", "0.7049086" ]
0.0
-1
SetName sets field value
func (o *QueueManager) SetName(v string) { o.Name = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cli *SetWrapper) SetName(name string) error {\n\treturn cli.set.SetValue(fieldSetName, name)\n}", "func (m *ModelStructRecord) SetField(name string, value reflect.Value) {\n\tif name == \"\" {\n\t\treturn\n\t}\n\tfieldValue := m.FieldValues[name]\n\t//if value.Kind() == reflect.Ptr {\n\t//\tpanic(\"RecordFieldSetError: value cannot be a ptr\")\n\t//}\n\tif fieldValue.IsValid() == false {\n\t\tm.VirtualFieldValues[name] = reflect.New(m.model.GetFieldWithName(name).StructField().Type).Elem()\n\t\tfieldValue = m.VirtualFieldValues[name]\n\t}\n\t//fieldValue = LoopIndirectAndNew(fieldValue)\n\tsafeSet(fieldValue, value)\n}", "func (me *TdtypeType) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (o *Wfmagent) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (o *Createshareresponse) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (s *Structx) Set(name string, value interface{}) error {\n\tf, ok := s.Field(name)\n\tif !ok {\n\t\treturn ErrNotField\n\t}\n\n\treturn f.Set(value)\n}", "func (r *Wrapper) Set(name string, val any) error {\n\tfv := r.rv.FieldByName(name)\n\tif !fv.IsValid() {\n\t\treturn errors.New(\"field not found\")\n\t}\n\n\tif !fv.CanSet() {\n\t\treturn errors.New(\"field can not set value\")\n\t}\n\n\tfv.Set(reflect.ValueOf(val))\n\treturn nil\n}", "func Set(target, source interface{}) error {\n\tconverter := &Converter{\n\t\tTagName: \"field\",\n\t}\n\n\treturn converter.Convert(source, target)\n}", "func (f *Flow) setName(n string) {\n\tf.Name = n\n}", "func (o *Workitemwrapup) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (o *Actionmap) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (df *DataFrame) SetField(name string, val starlark.Value) error {\n\tif df.frozen {\n\t\treturn fmt.Errorf(\"cannot set, DataFrame is frozen\")\n\t}\n\n\tif name == \"columns\" {\n\t\tidx, ok := val.(*Index)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"cannot assign to 'columns', wrong type\")\n\t\t}\n\t\tdf.columns = idx\n\t\treturn nil\n\t}\n\treturn starlark.NoSuchAttrError(name)\n}", "func (me *TactionType) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (me *TSAFTPTPaymentType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func SetStrByName(o interface{}, name string, val string) {\n\tif fd := reflect.ValueOf(o).Elem().FieldByName(name); fd.IsValid() {\n\t\tfd.SetString(val)\n\t}\n}", "func (me *TxsdType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TxsdType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func setterName(typeName string) string {\n\treturn fmt.Sprintf(\"Set%s\", accessorName(typeName))\n}", "func (me *TClipValueType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (o *Wfmbushorttermforecastimportcompletetopicbuforecastmodification) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (m *DiseaseMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase disease.FieldName:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetName(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Disease field %s\", name)\n}", "func (me *TPointsType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TxsdContactType) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (me *TFilterValueType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTtextTypeMandatoryMax100Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TAttlistSupplMeshNameType) Set(s string) { (*xsdt.Token)(me).Set(s) }", "func (me *TrestrictionType) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (me *TViewBoxSpecType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (f *Field) Set(l *Location, val string, seps *Delimeters) error {\n\tloc := l.Comp\n\tif loc < 0 {\n\t\tloc = 0\n\t}\n\tif x := loc - len(f.Components) + 1; x > 0 {\n\t\tf.Components = append(f.Components, make([]Component, x)...)\n\t}\n\terr := f.Components[loc].Set(l, val, seps)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Value = f.encode(seps)\n\treturn nil\n}", "func (o *Oauthclientrequest) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (me *TxsdInvoiceType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTtextTypeMandatoryMax90Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TKerningValue) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTtextTypeMandatoryMax50Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (f *Fields) Set(s []*Field)", "func (me *TMaskValueType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (o *Integrationtype) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (me *TSAFPTtextTypeMandatoryMax35Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTtextTypeMax40Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTtextTypeMandatoryMax255Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTtextTypeMandatoryMax200Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TNumberOrPercentageType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (o *Contentmanagementworkspacedocumentstopicdocumentdatav2) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (me *TSAFPTtextTypeMandatoryMax254Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func ExamplePerson_SetName() {\n\tperson := NewPerson(\"alice\")\n\tperson.SetName(\"bob\")\n}", "func (m *DiseasetypeMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase diseasetype.FieldDiseaseTypeName:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetDiseaseTypeName(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Diseasetype field %s\", name)\n}", "func (me *TSAFPTtextTypeMandatoryMax210Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (m *SeriesMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase series.FieldName:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetName(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Series field %s\", name)\n}", "func (o *Webchatmemberinfo) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (me *TSAFTaxonomyCode) Set(s string) { (*xsdt.Integer)(me).Set(s) }", "func (me *TSAFPTHashControl) Set(s string) { (*xsdt.String)(me).Set(s) }", "func SetField(obj interface{}, name string, value interface{}) error {\n\tstructValue := reflect.ValueOf(obj).Elem()\n\tstructFieldValue := structValue.FieldByName(name)\n\n\tif !structFieldValue.IsValid() {\n\t\treturn fmt.Errorf(\"No such field: %s in obj\", name)\n\t}\n\n\tif !structFieldValue.CanSet() {\n\t\treturn fmt.Errorf(\"Cannot set %s field value\", name)\n\t}\n\n\tstructFieldType := structFieldValue.Type()\n\tval := reflect.ValueOf(value)\n\tif structFieldType != val.Type() {\n\t\treturn errors.New(\"Provided value type didn't match obj field type\")\n\t}\n\n\tstructFieldValue.Set(val)\n\treturn nil\n}", "func (me *TStrokeMiterLimitValueType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (f *Field) Set(val interface{}) error {\n\t// we can't set unexported fields, so be sure this field is exported\n\tif !f.IsExported() {\n\t\treturn errNotExported\n\t}\n\n\t// do we get here? not sure...\n\tif !f.value.CanSet() {\n\t\treturn errNotSettable\n\t}\n\n\tgiven := reflect.ValueOf(val)\n\n\tif f.value.Kind() != given.Kind() {\n\t\treturn fmt.Errorf(\"wrong kind. got: %s want: %s\", given.Kind(), f.value.Kind())\n\t}\n\n\tf.value.Set(given)\n\treturn nil\n}", "func (me *TSAFPTtextTypeMandatoryMax70Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TdurationType) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (m *NametitleMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase nametitle.FieldTitle:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetTitle(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Nametitle field %s\", name)\n}", "func (me *TSAFPTtextTypeMandatoryMax20Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TshapeEnumType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TClipFillRuleType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (e *Engine) setName(name string) error {\n\tindex, err := naming.ExtractIndex(name, \"-\", 1)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"couldn't get index from device name: %v\", err)\n\t\treturn err\n\t}\n\te.Name = name\n\te.Index = index\n\treturn nil\n}", "func (o *Directrouting) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (o *Posttextresponse) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (me *TxsdThoroughfareDependentThoroughfares) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (me *TartIdType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TClipPathValueType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (o *Digitalcondition) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (me *TNumbersType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TAttlistDescriptorNameType) Set(s string) { (*xsdt.Token)(me).Set(s) }", "func (me *TSAFPTtextTypeMandatoryMax10Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func setField(obj interface{}, name string, value interface{}) error {\n\tstructValue := reflect.ValueOf(obj).Elem()\n\tstructType := reflect.TypeOf(obj).Elem()\n\tstructFieldValue := structValue.FieldByName(name)\n\n\tfor i := 0; i < structType.NumField(); i++ {\n\t\tfield := structType.Field(i)\n\t\ttag := field.Tag.Get(\"query\")\n\n\t\tif tag == name {\n\t\t\tstructFieldValue = structValue.Field(i)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !structFieldValue.IsValid() || !structFieldValue.CanSet() {\n\t\treturn errors.New(fmt.Sprintf(\"%s is not allowed\", name))\n\t}\n\n\tstructFieldType := structFieldValue.Type()\n\tval := reflect.ValueOf(value)\n\n\tif structFieldType.Kind() == reflect.Bool {\n\t\tswitch val.String() {\n\t\tcase \"false\":\n\t\t\tstructFieldValue.SetBool(false)\n\t\t\treturn nil\n\t\tcase \"true\":\n\t\t\tstructFieldValue.SetBool(true)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(fmt.Sprintf(\"%s must be a boolean\", name))\n\t\t}\n\t} else {\n\t\tstructFieldValue.Set(val)\n\t\treturn nil\n\t}\n}", "func (m *ClassifierMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase classifier.FieldEQUIPMENTCLASSIFIER:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetEQUIPMENTCLASSIFIER(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Classifier field %s\", name)\n}", "func (me *TPortlistType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TMediaDescType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (m *CompanyMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase company.FieldName:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetName(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Company field %s\", name)\n}", "func (o *Outcomequantilecondition) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (me *TxsdAnimateTransformTypeType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (o *Wfmintradaydataupdatetopicintradayhistoricalqueuedata) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func (m *SettlementMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase settlement.FieldX:\n\t\tv, ok := value.(int)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetX(v)\n\t\treturn nil\n\tcase settlement.FieldY:\n\t\tv, ok := value.(int)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetY(v)\n\t\treturn nil\n\tcase settlement.FieldIsCity:\n\t\tv, ok := value.(bool)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetIsCity(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Settlement field %s\", name)\n}", "func (me *TSAFPTtextTypeMandatoryMax30Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTtextTypeMandatoryMax60Car) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (o *Appevent) SetField(field string, fieldValue interface{}) {\n\t// Get Value object for field\n\ttarget := reflect.ValueOf(o)\n\ttargetField := reflect.Indirect(target).FieldByName(field)\n\n\t// Set value\n\tif fieldValue != nil {\n\t\ttargetField.Set(reflect.ValueOf(fieldValue))\n\t} else {\n\t\t// Must create a new Value (creates **type) then get its element (*type), which will be nil pointer of the appropriate type\n\t\tx := reflect.Indirect(reflect.New(targetField.Type()))\n\t\ttargetField.Set(x)\n\t}\n\n\t// Add field to set field names list\n\tif o.SetFieldNames == nil {\n\t\to.SetFieldNames = make(map[string]bool)\n\t}\n\to.SetFieldNames[field] = true\n}", "func setField(obj interface{}, name string, value interface{}) error {\n\tstructValue := reflect.ValueOf(obj).Elem()\n\tstructFieldValue := structValue.FieldByName(name)\n\n\tif !structFieldValue.IsValid() {\n\t\treturn fmt.Errorf(\"No such field: %s in obj\", name)\n\t}\n\n\tif !structFieldValue.CanSet() {\n\t\treturn fmt.Errorf(\"Cannot set %s field value\", name)\n\t}\n\n\tstructFieldType := structFieldValue.Type()\n\n\tvar val reflect.Value\n\tswitch structFieldType.String() {\n\tcase \"int\":\n\t\ti, _ := strconv.Atoi(value.(js.Value).String())\n\t\tval = reflect.ValueOf(i)\n\t\tbreak\n\tcase \"float64\":\n\t\ti, _ := strconv.ParseFloat(value.(js.Value).String(), 64)\n\t\tval = reflect.ValueOf(i)\n\t\tbreak\n\tcase \"bool\":\n\t\ti, _ := strconv.ParseBool(value.(js.Value).String())\n\t\tval = reflect.ValueOf(i)\n\t\tbreak\n\tcase \"string\":\n\t\tval = reflect.ValueOf(value.(js.Value).String())\n\t\tbreak\n\tdefault:\n\t\tval = reflect.ValueOf(value)\n\t\tbreak\n\t}\n\n\tstructFieldValue.Set(val)\n\treturn nil\n}", "func (me *TOpacityValueType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TScriptType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TContentTypeType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFTPTSourceBilling) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TSAFPTGLAccountID) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (s *StructField) Set(v interface{}) error {\n\tif s.field.PkgPath != \"\" {\n\t\treturn errors.New(\"Field is not exported\")\n\t}\n\n\tif !s.CanSet() {\n\t\treturn errors.New(\"Field cannot be set\")\n\t}\n\n\tgiven := reflect.ValueOf(v)\n\n\tif s.value.Kind() != given.Kind() {\n\t\treturn errors.New(\"Field and value kind don't match\")\n\t}\n\n\ts.value.Set(given)\n\treturn nil\n}", "func (me *TransformListType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (blood *bloodGeneral) Set(name string, value float64) {\n\te := reflect.ValueOf(blood).Elem()\n\tfield := e.FieldByName(name)\n\tif field.IsValid() && field.CanSet() && field.Kind() == reflect.Float64 {\n\t\tfield.SetFloat(value)\n\t} else {\n\t\tpanic(\"Cannot find \" + name + \" in BloodGeneral struct\")\n\t}\n\n\treturn\n}", "func (m *StreetMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase street.FieldName:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetName(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Street field %s\", name)\n}", "func (m *BedtypeMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase bedtype.FieldBedtypename:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetBedtypename(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Bedtype field %s\", name)\n}", "func (me *TxsdImpactSimpleContentExtensionType) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (me *TxsdMovementType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (m *CategoryMutation) SetField(name string, value ent.Value) error {\n\tswitch name {\n\tcase category.FieldName:\n\t\tv, ok := value.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T for field %s\", value, name)\n\t\t}\n\t\tm.SetName(v)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unknown Category field %s\", name)\n}", "func (me *TcoordinatesType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TxsdFeMorphologyTypeOperator) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (me *TxsdPremiseNumberNumberType) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (f *FieldHeaderNames) Set(value string) error {\n\t// When arguments are passed through YAML, escaped double quotes\n\t// might be added to this string, and they would break the last\n\t// key/value pair. This ensures the string is clean.\n\tvalue = strings.Trim(value, \"\\\"\")\n\n\tfields := strings.Fields(value)\n\n\tfor _, field := range fields {\n\t\tn := strings.SplitN(field, \"=\", 2)\n\t\t(*f)[n[0]] = n[1]\n\t}\n\n\treturn nil\n}", "func (m *LabelActionBase) SetName(value *string)() {\n err := m.GetBackingStore().Set(\"name\", value)\n if err != nil {\n panic(err)\n }\n}" ]
[ "0.6978644", "0.67127097", "0.6554158", "0.6535701", "0.648949", "0.6488116", "0.64872724", "0.64611167", "0.641035", "0.63914317", "0.6386742", "0.6377272", "0.63651955", "0.6351144", "0.6337588", "0.6311778", "0.6311778", "0.6304745", "0.62829113", "0.6272751", "0.6268671", "0.6266285", "0.6265865", "0.6253639", "0.62447006", "0.6232831", "0.62251157", "0.6221833", "0.62189144", "0.6212537", "0.62094635", "0.6209313", "0.6204735", "0.6202101", "0.61937445", "0.6192035", "0.6189462", "0.61840737", "0.61809164", "0.6176772", "0.61750937", "0.61733145", "0.6157459", "0.61512333", "0.61440337", "0.61427486", "0.61419475", "0.6137599", "0.6128708", "0.6127297", "0.612435", "0.61233217", "0.6111386", "0.61098164", "0.610855", "0.61011183", "0.6099003", "0.60969484", "0.6093712", "0.6089691", "0.6086111", "0.60800856", "0.60796374", "0.60786676", "0.60735166", "0.6071955", "0.6071905", "0.60712224", "0.60553825", "0.6054341", "0.6053531", "0.6053113", "0.6051501", "0.60495913", "0.6046556", "0.60453755", "0.6043941", "0.6043066", "0.60420644", "0.60407996", "0.6040585", "0.6040419", "0.6040081", "0.6036135", "0.6031209", "0.6027033", "0.602659", "0.6026208", "0.6025597", "0.60237217", "0.6018427", "0.60161626", "0.601538", "0.6014008", "0.6012201", "0.6012091", "0.6008574", "0.600484", "0.60046756", "0.6004388", "0.6003529" ]
0.0
-1
GetClusters returns the Clusters field value
func (o *QueueManager) GetClusters() []string { if o == nil { var ret []string return ret } return o.Clusters }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Config) GetClusters(ctx context.Context, quiet bool, filterMap map[string]string, clustersName ...string) (string, error) {\n\tc.Logger.Debugf(\"Sending parameters to server to get the clusters %q\", strings.Join(clustersName, \", \"))\n\n\tfilter := MapToSlice(filterMap)\n\n\treturn c.RunGRPCnRESTFunc(\"get\", true,\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersGRPC(ctx, quiet, filter, clustersName...)\n\t\t},\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersHTTP(quiet, filter, clustersName...)\n\t\t})\n}", "func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}", "func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (s *RaftDatabase) Clusters() int {\n\treturn GetArg(s.name, \"clusters\").Int(s.clusters)\n}", "func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}", "func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}", "func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (e *ECS) ListClusters(req *ListClustersReq) (\n\t*ListClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"ListClusters\")\n\tif req.MaxResults > 0 {\n\t\tparams[\"maxResults\"] = strconv.Itoa(int(req.MaxResults))\n\t}\n\tif req.NextToken != \"\" {\n\t\tparams[\"nextToken\"] = req.NextToken\n\t}\n\n\tresp := new(ListClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\tklog.V(4).Infof(\"Clusters called\")\n\treturn nil, false\n}", "func (a ClustersAPI) List() ([]httpmodels.GetResp, error) {\n\tvar clusterList = struct {\n\t\tClusters []httpmodels.GetResp `json:\"clusters,omitempty\" url:\"clusters,omitempty\"`\n\t}{}\n\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/list\", nil, nil)\n\tif err != nil {\n\t\treturn clusterList.Clusters, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterList)\n\treturn clusterList.Clusters, err\n}", "func (ch *ClusterHandler) GetClusters() app.Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tcontext := app.GetRequestContext(r)\n\n\t\t\tlogger := log.WithFields(log.Fields{\"package\": \"handlers\", \"event\": \"get_clusters\", \"request\": context.RequestId()})\n\n\t\t\tclusters, err := ch.service.GetClusters(context.RequestId())\n\t\t\tif err != nil {\n\t\t\t\tresponse := ErrorResponseAttributes{Title: \"get_clusters_error\", Detail: err.Error()}\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trespondWithJson(w, newErrorResponse(&response, context.RequestId()), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trespondWithJson(w, newClustersResponse(clusters, context.RequestId()), http.StatusOK)\n\t\t})\n\t}\n}", "func (c *Client) GetClusters(ctx context.Context) <-chan GetClusterResult {\n\t// TODO Make the concurrency configurable\n\tconcurrency := int(math.Min(5, float64(runtime.NumCPU())))\n\tresults := make(chan GetClusterResult, concurrency)\n\n\tclusterNames, err := c.GetClusterNames(ctx)\n\tif err != nil {\n\t\tclose(results)\n\t\treturn results\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfor _, clusterName := range clusterNames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcluster, err := c.GetCluster(ctx, name)\n\t\t\t\tresult := GetClusterResult{Cluster: cluster, Error: err}\n\t\t\t\tresults <- result\n\t\t\t}(clusterName)\n\t\t}\n\t\twg.Wait()\n\t}()\n\n\treturn results\n}", "func (cloud *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (client OpenShiftManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result v20180930preview.OpenShiftManagedCluster, err error) {\n\treq, err := client.GetPreparer(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}", "func (a *DefaultApiService) ListClusters(ctx _context.Context, localVarOptionals *ListClustersOpts) (Clusters, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Clusters\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/clusters\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Id.IsSet() {\n\t\tt:=localVarOptionals.Id.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotId.IsSet() {\n\t\tt:=localVarOptionals.NotId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMin.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.min\", parameterToString(localVarOptionals.StoryCountMin.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMax.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.max\", parameterToString(localVarOptionals.StoryCountMax.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.start\", parameterToString(localVarOptionals.TimeStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.end\", parameterToString(localVarOptionals.TimeEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.start\", parameterToString(localVarOptionals.EarliestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.end\", parameterToString(localVarOptionals.EarliestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.start\", parameterToString(localVarOptionals.LatestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.end\", parameterToString(localVarOptionals.LatestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LocationCountry.IsSet() {\n\t\tt:=localVarOptionals.LocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotLocationCountry.IsSet() {\n\t\tt:=localVarOptionals.NotLocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Return_.IsSet() {\n\t\tt:=localVarOptionals.Return_.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortBy.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_by\", parameterToString(localVarOptionals.SortBy.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortDirection.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_direction\", parameterToString(localVarOptionals.SortDirection.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Cursor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cursor\", parameterToString(localVarOptionals.Cursor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PerPage.IsSet() {\n\t\tlocalVarQueryParams.Add(\"per_page\", parameterToString(localVarOptionals.PerPage.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/xml\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-ID\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 429 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func ExampleClustersClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().Get(ctx, \"resRg\", \"myCluster\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t}},\n\t// \t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t},\n\t// \t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t},\n\t// \t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t}},\n\t// \t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t}},\n\t// \t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t},\n\t// \t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t},\n\t// \t}\n}", "func (a *Client) ListClusters(params *ListClustersParams, authInfo runtime.ClientAuthInfoWriter) (*ListClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListClustersOK), nil\n\n}", "func (s *Server) GetClusters() []*api.Cluster {\n\tinstances := s.doGetClusters()\n\tclusters := make([]*api.Cluster, len(instances))\n\tfor i, instance := range instances {\n\t\tclusters[i] = convertClusterToAPI(instance)\n\t}\n\treturn clusters\n}", "func (bc *Baiducloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (adm Admin) ListClusters() (string, error) {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect to zookeeper.\")\n\t\treturn \"\", err\n\t}\n\tdefer conn.Disconnect()\n\n\tvar clusters []string\n\n\tchildren, err := conn.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := conn.IsClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func (q *QueryResolver) Clusters(ctx context.Context) ([]*ClusterInfoResolver, error) {\n\tgrpcAPI := q.Env.VizierClusterInfo\n\tresp, err := grpcAPI.GetClusterInfo(ctx, &cloudpb.GetClusterInfoRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar res []*ClusterInfoResolver\n\tfor _, cluster := range resp.Clusters {\n\t\tresolver, err := clusterInfoToResolver(cluster)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, resolver)\n\t}\n\treturn res, nil\n}", "func (r *ProjectsInstancesClustersService) Get(name string) *ProjectsInstancesClustersGetCall {\n\tc := &ProjectsInstancesClustersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (a *Client) GetCombinedCloudClusters(params *GetCombinedCloudClustersParams, opts ...ClientOption) (*GetCombinedCloudClustersOK, *GetCombinedCloudClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetCombinedCloudClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetCombinedCloudClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/cloud_cluster/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetCombinedCloudClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetCombinedCloudClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetCombinedCloudClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (c *Client) GetClustersSync(ctx context.Context) ([]*Cluster, error) {\n\tclusters := make([]*Cluster, 0)\n\n\tfor result := range c.GetClusters(ctx) {\n\t\tif result.Error != nil {\n\t\t\treturn nil, result.Error\n\t\t}\n\t\tclusters = append(clusters, result.Cluster)\n\t}\n\n\treturn clusters, nil\n}", "func (adm Admin) ListClusters() (string, error) {\n\tvar clusters []string\n\n\tchildren, err := adm.zkClient.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := adm.isClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func handleGetClusters(c *Context, w http.ResponseWriter, r *http.Request) {\n\tpaging, err := parsePaging(r.URL)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to parse paging parameters\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfilter := &model.ClusterFilter{\n\t\tPaging: paging,\n\t}\n\n\tclusters, err := c.Store.GetClusterDTOs(filter)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to query clusters\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif clusters == nil {\n\t\tclusters = []*model.ClusterDTO{}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, clusters)\n}", "func (s *clusterService) Clusters(ctx context.Context, options ...rest.HTTPClientOption) ([]cluster.Cluster, error) {\n\t_, err := Start(ctx, s.Factories().ClusterCacheFactory(), options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclusterCache.RLock()\n\tdefer clusterCache.RUnlock()\n\n\treturn Clusters(clusterCache.Clusters()), nil\n}", "func (c Client) ListClusters() (ClusterList, error) {\n\tbody, err := c.watsonClient.MakeRequest(\"GET\", c.version+\"/solr_clusters\", nil, nil)\n\tif err != nil {\n\t\treturn ClusterList{}, err\n\t}\n\tvar response ClusterList\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}", "func (c *krakenClusters) Get(name string, options v1.GetOptions) (result *v1alpha1.KrakenCluster, err error) {\n\tresult = &v1alpha1.KrakenCluster{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (c starterClusterServiceOp) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: clusterList,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &clusterList, resp, err\n}", "func getClusters(kubeconfig string) ([]string, error) {\n\tkubectlArgs := []string{\"kubectl\"}\n\tif kubeconfig != \"\" {\n\t\tkubectlArgs = append(kubectlArgs, fmt.Sprintf(\"--kubeconfig=%s\", kubeconfig))\n\t}\n\tcontextArgs := append(kubectlArgs, []string{\"config\", \"get-contexts\", \"-o=name\"}...)\n\toutput, err := runCommand(contextArgs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error in getting contexts from kubeconfig: %s\", err)\n\t}\n\treturn strings.Split(output, \"\\n\"), nil\n}", "func (nh *NodeHost) Clusters() []*node {\n\tresult := make([]*node, 0)\n\tnh.clusterMu.RLock()\n\tnh.clusterMu.clusters.Range(func(k, v interface{}) bool {\n\t\tresult = append(result, v.(*node))\n\t\treturn true\n\t})\n\tnh.clusterMu.RUnlock()\n\n\treturn result\n}", "func (a *ClustersApiService) ListClusters(ctx _context.Context, space string) ApiListClustersRequest {\n\treturn ApiListClustersRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t}\n}", "func GetMultipleClustersName(cmd *cobra.Command, args []string) ([]string, error) {\n\tif len(args) == 0 {\n\t\treturn nil, UserErrorf(\"requires a cluster name\")\n\t}\n\treturn args, nil\n}", "func (bc *Baiducloud) ListClusters(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"ListClusters unimplemented\")\n}", "func (a *ClustersApiService) ListClustersExecute(r ApiListClustersRequest) (ListClustersResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ListClustersResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.ListClusters\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (svc ServerlessClusterService) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &clusterList, resp, err\n}", "func (m *MockBuilder) Clusters() []string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Clusters\")\n\tret0, _ := ret[0].([]string)\n\treturn ret0\n}", "func (cc *CloudComb) GetClustersImages() (string, error) {\n\tresult, _, err := cc.doRESTRequest(\"GET\", \"/api/v1/apps/images\", \"\", nil, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn result, nil\n}", "func (ds *DiscoveryService) ListClusters(request *restful.Request, response *restful.Response) {\n\tkey := request.Request.URL.String()\n\tout, cached := ds.cdsCache.cachedDiscoveryResponse(key)\n\tif !cached {\n\t\tif sc := request.PathParameter(ServiceCluster); sc != ds.mesh.IstioServiceCluster {\n\t\t\terrorResponse(response, http.StatusNotFound,\n\t\t\t\tfmt.Sprintf(\"Unexpected %s %q\", ServiceCluster, sc))\n\t\t\treturn\n\t\t}\n\n\t\t// service-node holds the IP address\n\t\tip := request.PathParameter(ServiceNode)\n\t\t// CDS computes clusters that are referenced by RDS routes for a particular proxy node\n\t\t// TODO: this implementation is inefficient as it is recomputing all the routes for all proxies\n\t\t// There is a lot of potential to cache and reuse cluster definitions across proxies and also\n\t\t// skip computing the actual HTTP routes\n\t\tinstances := ds.services.HostInstances(map[string]bool{ip: true})\n\t\tservices := ds.services.Services()\n\t\thttpRouteConfigs := buildOutboundHTTPRoutes(instances, services, &ProxyContext{\n\t\t\tDiscovery: ds.services,\n\t\t\tConfig: ds.config,\n\t\t\tMeshConfig: ds.mesh,\n\t\t\tIPAddress: ip,\n\t\t})\n\n\t\t// de-duplicate and canonicalize clusters\n\t\tclusters := httpRouteConfigs.clusters().normalize()\n\n\t\t// apply custom policies for HTTP clusters\n\t\tfor _, cluster := range clusters {\n\t\t\tinsertDestinationPolicy(ds.config, cluster)\n\t\t}\n\n\t\tvar err error\n\t\tif out, err = json.MarshalIndent(ClusterManager{Clusters: clusters}, \" \", \" \"); err != nil {\n\t\t\terrorResponse(response, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\tds.cdsCache.updateCachedDiscoveryResponse(key, out)\n\t}\n\twriteResponse(response, out)\n}", "func (e *ECS) DescribeClusters(req *DescribeClustersReq) (*DescribeClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"DescribeClusters\")\n\tif len(req.Clusters) > 0 {\n\t\taddParamsList(params, \"clusters.member\", req.Clusters)\n\t}\n\n\tresp := new(DescribeClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (c *ClustersController) List(ctx *app.ListClustersContext) error {\n\t// return a single cluster given its URL\n\tif ctx.ClusterURL != nil {\n\t\t// authorization is checked at the service level for more consistency accross the codebase.\n\t\tclustr, err := c.app.ClusterService().FindByURL(ctx, *ctx.ClusterURL)\n\t\tif err != nil {\n\t\t\tif ok, _ := errors.IsNotFoundError(err); ok {\n\t\t\t\t// no result found, return an empty array\n\t\t\t\treturn ctx.OK(&app.ClusterList{\n\t\t\t\t\tData: []*app.ClusterData{},\n\t\t\t\t})\n\t\t\t}\n\t\t\t// something wrong happened, return the error\n\t\t\treturn app.JSONErrorResponse(ctx, err)\n\t\t}\n\t\treturn ctx.OK(&app.ClusterList{\n\t\t\tData: []*app.ClusterData{convertToClusterData(*clustr)},\n\t\t})\n\t}\n\t// otherwise, list all clusters\n\tclusters, err := c.app.ClusterService().List(ctx, ctx.Type)\n\tif err != nil {\n\t\treturn app.JSONErrorResponse(ctx, err)\n\t}\n\tvar data []*app.ClusterData\n\tfor _, clustr := range clusters {\n\t\tdata = append(data, convertToClusterData(clustr))\n\t}\n\treturn ctx.OK(&app.ClusterList{\n\t\tData: data,\n\t})\n}", "func ListClusters(c *cli.Context) error {\n\tif err := printClusters(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func GetClusterNodes(cs *framework.ClientSet) (int, error) {\n\tnodes, err := getNodesByLabel(cs, \"\")\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to get the number of cluster nodes: %v\", err)\n\t}\n\n\treturn len(nodes), nil\n}", "func (c *ClientIMPL) GetCluster(ctx context.Context) (resp Cluster, err error) {\n\tvar systemList []Cluster\n\tcluster := Cluster{}\n\tqp := c.APIClient().QueryParamsWithFields(&cluster)\n\n\tmajorMinorVersion, err := c.GetSoftwareMajorMinorVersion(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't find the array version %s\", err.Error())\n\t} else {\n\t\tif majorMinorVersion >= 3.0 {\n\t\t\tqp.Select(\"nvm_subsystem_nqn\")\n\t\t}\n\t}\n\t_, err = c.APIClient().Query(\n\t\tctx,\n\t\tRequestConfig{\n\t\t\tMethod: \"GET\",\n\t\t\tEndpoint: clusterURL,\n\t\t\tQueryParams: qp,\n\t\t},\n\t\t&systemList)\n\terr = WrapErr(err)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn systemList[0], err\n}", "func (c starterClusterServiceOp) Get(ctx context.Context, input *models.GetStarterClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"cluster\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: cluster,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func ExampleClustersClient_List() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().List(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ClusterListResult = armservicefabric.ClusterListResult{\n\t// \tValue: []*armservicefabric.Cluster{\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"myCluster\"),\n\t// \t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\tTags: map[string]*string{\n\t// \t\t\t},\n\t// \t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(false),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"myCluster2\"),\n\t// \t\t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\t\tEtag: to.Ptr(\"W/\\\"636462502164040075\\\"\"),\n\t// \t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster2\"),\n\t// \t\t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\t\tTags: map[string]*string{\n\t// \t\t\t\t},\n\t// \t\t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager)},\n\t// \t\t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentLinux),\n\t// \t\t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\t\tClusterID: to.Ptr(\"2747e469-b24e-4039-8a0a-46151419523f\"),\n\t// \t\t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tManagementEndpoint: to.Ptr(\"http://myCluster2.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\t\tVMImage: to.Ptr(\"Ubuntu\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t}},\n\t// \t\t}\n}", "func List() ([]clusterapi.Cluster, error) {\n\tvar clusterList []clusterapi.Cluster\n\terr := utils.BrowseMetadataContent(clusterapi.ClusterMetadataPrefix, func(buf *bytes.Buffer) error {\n\t\tvar c clusterapi.Cluster\n\t\terr := gob.NewDecoder(buf).Decode(&c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclusterList = append(clusterList, c)\n\t\treturn nil\n\t})\n\treturn clusterList, err\n}", "func (store *CenterStore) GetCenters(data []core.Elemt, space core.Space, k int, clust core.Clust) (core.Clust, error) {\n\tvar centers, ok = store.centers[k]\n\n\tif !ok {\n\t\treturn store.genCenters(data, space, k, clust)\n\t}\n\n\treturn centers, nil\n}", "func (s *ocmClient) GetCluster() (*ClusterInfo, error) {\n\n\t// fetch the clusterversion, which contains the internal ID\n\tcv := &configv1.ClusterVersion{}\n\terr := s.client.Get(context.TODO(), types.NamespacedName{Name: \"version\"}, cv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get clusterversion: %v\", err)\n\t}\n\texternalID := cv.Spec.ClusterID\n\n\tcsUrl, err := url.Parse(s.ocmBaseUrl.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't parse OCM API url: %v\", err)\n\t}\n\tcsUrl.Path = path.Join(csUrl.Path, CLUSTERS_V1_PATH)\n\n\tresponse, err := s.httpClient.R().\n\t\tSetQueryParams(map[string]string{\n\t\t\t\"page\": \"1\",\n\t\t\t\"size\": \"1\",\n\t\t\t\"search\": fmt.Sprintf(\"external_id = '%s'\", externalID),\n\t\t}).\n\t\tSetResult(&ClusterList{}).\n\t\tExpectContentType(\"application/json\").\n\t\tGet(csUrl.String())\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't query OCM cluster service: request to '%v' returned error '%v'\", csUrl.String(), err)\n\t}\n\n\toperationId := response.Header().Get(OPERATION_ID_HEADER)\n\tif response.IsError() {\n\t\treturn nil, fmt.Errorf(\"request to '%v' received error code %v, operation id '%v'\", csUrl.String(), response.StatusCode(), operationId)\n\t}\n\n\tlog.Info(fmt.Sprintf(\"request to '%v' received response code %v, operation id: '%v'\", csUrl.String(), response.StatusCode(), operationId))\n\n\tlistResponse := response.Result().(*ClusterList)\n\tif listResponse.Size != 1 || len(listResponse.Items) != 1 {\n\t\treturn nil, ErrClusterIdNotFound\n\t}\n\n\treturn &listResponse.Items[0], nil\n}", "func RetrieveClusters(manifests string) cluster.Map {\n\tklog.V(1).Info(\"retrieving clusters from manifests\")\n\tclusters := cluster.Map{}\n\tdocuments := yamlutils.SplitDocuments(manifests)\n\tscheme := runtime.NewScheme()\n\tif err := clusterv1alpha1.AddToScheme(scheme); err != nil {\n\t\treturn cluster.Map{}\n\t}\n\tserializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, json.SerializerOptions{Yaml: true})\n\tfor _, document := range documents {\n\t\tclusterObj := clusterv1alpha1.Cluster{}\n\t\tif _, _, err := serializer.Decode([]byte(document), nil, &clusterObj); err != nil || clusterObj.TypeMeta.Kind != \"Cluster\" {\n\t\t\tcontinue\n\t\t}\n\t\tinternalCluster, err := cluster.NewClusterFromv1alpha1(&clusterObj)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tclusters[internalCluster.Name] = internalCluster\n\t}\n\treturn clusters\n}", "func (p *Provider) List() ([]string, error) {\n\treturn p.provider.ListClusters()\n}", "func Clusters(api API) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tclusters := api.Clusters()\n\t\tm := make(map[string]map[string]any, len(clusters))\n\t\tfor _, c := range clusters {\n\t\t\tm[c.ID] = c.Debug()\n\t\t}\n\n\t\tdata, err := json.Marshal(m)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"could not marshal cluster debug map: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(data)\n\t\tw.Write([]byte(\"\\n\"))\n\t}\n}", "func (svc ServerlessClusterService) Get(ctx context.Context,\n\tinput *models.GetServerlessClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"cluster\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: cluster,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func Clusters(clusters map[string]cluster.Cluster) []cluster.Cluster {\n\tcs := make([]cluster.Cluster, 0, len(clusters))\n\tfor _, cls := range clusters {\n\t\tcs = append(cs, cls)\n\t}\n\treturn cs\n}", "func (p *v1Provider) GetCluster(w http.ResponseWriter, r *http.Request) {\n\thttpapi.IdentifyEndpoint(r, \"/v1/clusters/current\")\n\ttoken := p.CheckToken(r)\n\tif !token.Require(w, \"cluster:show_basic\") {\n\t\treturn\n\t}\n\tshowBasic := !token.Check(\"cluster:show\")\n\n\tfilter := reports.ReadFilter(r, p.Cluster.GetServiceTypesForArea)\n\tif showBasic {\n\t\tfilter.IsSubcapacityAllowed = func(serviceType, resourceName string) bool {\n\t\t\ttoken.Context.Request[\"service\"] = serviceType\n\t\t\ttoken.Context.Request[\"resource\"] = resourceName\n\t\t\treturn token.Check(\"cluster:show_subcapacity\")\n\t\t}\n\t}\n\n\tcluster, err := reports.GetClusterResources(p.Cluster, p.DB, filter)\n\tif respondwith.ErrorText(w, err) {\n\t\treturn\n\t}\n\trespondwith.JSON(w, 200, map[string]interface{}{\"cluster\": cluster})\n}", "func listClusters(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tctx := r.Context()\n\tallowed := permission.Check(t, permission.PermClusterRead)\n\tif !allowed {\n\t\treturn permission.ErrUnauthorized\n\t}\n\tclusters, err := servicemanager.Cluster.List(ctx)\n\tif err != nil {\n\t\tif err == provTypes.ErrNoCluster {\n\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tadmin := permission.Check(t, permission.PermClusterAdmin)\n\tif !admin {\n\t\tfor i := range clusters {\n\t\t\tclusters[i].CleanUpSensitive()\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\treturn json.NewEncoder(w).Encode(clusters)\n}", "func AzureGetClusters(subscriptionID, clientID, clientSecret, tenantID, resourceGroupName string, admin bool) (string, error) {\n\tctx := context.Background()\n\tclient := containerservice.NewManagedClustersClient(subscriptionID)\n\n\tauthorizer, err := getAzureAuthorizer(clientID, clientSecret, tenantID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tclient.Authorizer = authorizer\n\n\tvar clusters []string\n\n\tfor list, err := client.ListComplete(ctx); list.NotDone(); err = list.Next() {\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tvar res containerservice.CredentialResults\n\t\tname := *list.Value().Name\n\n\t\tif admin {\n\t\t\tres, err = client.ListClusterAdminCredentials(ctx, resourceGroupName, name)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t} else {\n\t\t\tres, err = client.ListClusterUserCredentials(ctx, resourceGroupName, name)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\n\t\tfor _, kubeconfig := range *res.Kubeconfigs {\n\t\t\tvar kubeconfigJSON interface{}\n\t\t\terr := yaml.Unmarshal(*kubeconfig.Value, &kubeconfigJSON)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tkubeconfigJSON = convert(kubeconfigJSON)\n\t\t\tkubeconfigJSONString, err := json.Marshal(kubeconfigJSON)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tclusters = append(clusters, fmt.Sprintf(\"{\\\"name\\\": \\\"%s_%s_%s\\\", \\\"kubeconfig\\\": %s}\", *kubeconfig.Name, resourceGroupName, name, kubeconfigJSONString))\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(\"[%s]\", strings.Join(clusters, \",\")), nil\n}", "func (a *Client) VirtualizationClustersRead(params *VirtualizationClustersReadParams) (*VirtualizationClustersReadOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewVirtualizationClustersReadParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"virtualization_clusters_read\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/virtualization/clusters/{id}/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &VirtualizationClustersReadReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*VirtualizationClustersReadOK), nil\n\n}", "func GetClusterCIDRs(lister configlistersv1.NetworkLister, recorder events.Recorder) ([]string, error) {\n\tnetwork, err := lister.Get(\"cluster\")\n\tif errors.IsNotFound(err) {\n\t\trecorder.Warningf(\"ObserveRestrictedCIDRFailed\", \"Required networks.%s/cluster not found\", configv1.GroupName)\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\trecorder.Warningf(\"ObserveRestrictedCIDRFailed\", \"error getting networks.%s/cluster: %v\", configv1.GroupName, err)\n\t\treturn nil, err\n\t}\n\n\tif len(network.Status.ClusterNetwork) == 0 {\n\t\trecorder.Warningf(\"ObserveClusterCIDRFailed\", \"Required status.clusterNetwork field is not set in networks.%s/cluster\", configv1.GroupName)\n\t\treturn nil, fmt.Errorf(\"networks.%s/cluster: status.clusterNetwork not found\", configv1.GroupName)\n\t}\n\n\tvar clusterCIDRs []string\n\tfor i, clusterNetwork := range network.Status.ClusterNetwork {\n\t\tif len(clusterNetwork.CIDR) == 0 {\n\t\t\trecorder.Warningf(\"ObserveRestrictedCIDRFailed\", \"Required status.clusterNetwork[%d].cidr field is not set in networks.%s/cluster\", i, configv1.GroupName)\n\t\t\treturn nil, fmt.Errorf(\"networks.%s/cluster: status.clusterNetwork[%d].cidr not found\", configv1.GroupName, i)\n\t\t}\n\t\tclusterCIDRs = append(clusterCIDRs, clusterNetwork.CIDR)\n\t}\n\t// TODO fallback to podCIDR? is that still a thing?\n\treturn clusterCIDRs, nil\n}", "func (a *ClusterControllerApiService) GetClustersUsingGET(ctx _context.Context, account string, application string, clusterName string) apiGetClustersUsingGETRequest {\n\treturn apiGetClustersUsingGETRequest{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\taccount: account,\n\t\tapplication: application,\n\t\tclusterName: clusterName,\n\t}\n}", "func NewClusters(db *gorm.DB) *Clusters {\n\treturn &Clusters{db: db}\n}", "func (o *ResourceLimits) GetK8sClustersProvisioned() *int32 {\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\treturn o.K8sClustersProvisioned\n}", "func ExampleSnowball_ListClusters_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.ListClustersInput{}\n\n\tresult, err := svc.ListClusters(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidNextTokenException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidNextTokenException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (a *ClustersApiService) ClusterServiceListClusters(ctx context.Context, body Servicev1ClusterQuery) (V1Clusterlist, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue V1Clusterlist\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/gitops/api/v1/clusters\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tlocalVarQueryParams.Add(\"routingId\", body.AccountIdentifier)\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v V1Clusterlist\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v GatewayruntimeError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func AWSGetClusters(accessKeyId, secretAccessKey, region string) (string, error) {\n\tvar clusters []*eks.Cluster\n\tvar names []*string\n\tvar nextToken *string\n\n\tcred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, \"\")\n\n\tsess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\teksClient := eks.New(sess)\n\n\tfor {\n\t\tc, err := eksClient.ListClusters(&eks.ListClustersInput{NextToken: nextToken})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tnames = append(names, c.Clusters...)\n\n\t\tif c.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tnextToken = c.NextToken\n\t}\n\n\tfor _, name := range names {\n\t\tcluster, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: name})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif *cluster.Cluster.Status == eks.ClusterStatusActive {\n\t\t\tclusters = append(clusters, cluster.Cluster)\n\t\t}\n\t}\n\n\tif clusters != nil {\n\t\tb, err := json.Marshal(clusters)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn string(b), nil\n\t}\n\n\treturn \"\", nil\n}", "func (a *Client) VirtualizationClustersList(params *VirtualizationClustersListParams) (*VirtualizationClustersListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewVirtualizationClustersListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"virtualization_clusters_list\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/virtualization/clusters/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &VirtualizationClustersListReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*VirtualizationClustersListOK), nil\n\n}", "func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}", "func getClusterNameForMultiVC(ctx context.Context, vs *multiVCvSphere,\n\tclientIndex int) ([]*object.ClusterComputeResource,\n\t*VsanClient, error) {\n\n\tvar vsanHealthClient *VsanClient\n\tvar err error\n\tc := newClientForMultiVC(ctx, vs)\n\n\tdatacenter := strings.Split(multiVCe2eVSphere.multivcConfig.Global.Datacenters, \",\")\n\n\tfor i, client := range c {\n\t\tif clientIndex == i {\n\t\t\tvsanHealthClient, err = newVsanHealthSvcClient(ctx, client.Client)\n\t\t\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\t\t}\n\t}\n\n\tfinder := find.NewFinder(vsanHealthClient.vim25Client, false)\n\tdc, err := finder.Datacenter(ctx, datacenter[0])\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\tfinder.SetDatacenter(dc)\n\n\tclusterComputeResource, err := finder.ClusterComputeResourceList(ctx, \"*\")\n\tframework.Logf(\"clusterComputeResource %v\", clusterComputeResource)\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\n\treturn clusterComputeResource, vsanHealthClient, err\n}", "func (o GetClustersResultOutput) ClusterIdentifiers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetClustersResult) []string { return v.ClusterIdentifiers }).(pulumi.StringArrayOutput)\n}", "func (elementConfiguration *ElementConfiguration) ListClusters() ([]string, error) {\n\t// collect names\n\tclusterConfigurations := []string{}\n\n elementConfiguration.ClustersX.RLock()\n\tfor clusterConfiguration := range elementConfiguration.Clusters {\n\t\tclusterConfigurations = append(clusterConfigurations, clusterConfiguration)\n\t}\n\telementConfiguration.ClustersX.RUnlock()\n\n\t// success\n\treturn clusterConfigurations, nil\n}", "func (a *Client) GetMsgVpnDistributedCacheClusters(params *GetMsgVpnDistributedCacheClustersParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnDistributedCacheClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnDistributedCacheClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnDistributedCacheClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/distributedCaches/{cacheName}/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnDistributedCacheClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnDistributedCacheClustersOK), nil\n\n}", "func (cb *clientBase) GetCluster() string {\n\treturn cb.cluster\n}", "func (s *RpcClient) GetClusterNodes(ctx context.Context) ([]GetClusterNodesResponse, error) {\n\tres := struct {\n\t\tGeneralResponse\n\t\tResult []GetClusterNodesResponse `json:\"result\"`\n\t}{}\n\terr := s.request(ctx, \"getClusterNodes\", []interface{}{}, &res)\n\tif err != nil {\n\t\treturn []GetClusterNodesResponse{}, err\n\t}\n\tif res.Error != nil {\n\t\treturn []GetClusterNodesResponse{}, errors.New(res.Error.Message)\n\t}\n\treturn res.Result, nil\n}", "func (op *outputProvider) GetRemoteClusters(opts ...services.MarshalOption) ([]types.RemoteCluster, error) {\n\treturn op.impersonatedClient.GetRemoteClusters(opts...)\n}", "func (page ClusterListResultPage) Values() []Cluster {\n\tif page.clr.IsEmpty() {\n\t\treturn nil\n\t}\n\treturn *page.clr.Value\n}", "func (m *RedisProxy) GetCluster() string {\n\tif m != nil {\n\t\treturn m.Cluster\n\t}\n\treturn \"\"\n}", "func (connection *Connection) GetClusterNodes() []*URL {\n\tif connection.IsCluster() {\n\t\treturn connection.adabasToData.transactions.clusterNodes\n\t}\n\treturn make([]*URL, 0)\n}", "func GetClusterId() string {\n\treturn axClusterId\n}", "func (a *Client) V2ListClusters(ctx context.Context, params *V2ListClustersParams) (*V2ListClustersOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"v2ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v2/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V2ListClustersReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V2ListClustersOK), nil\n\n}", "func ListAllClusters(response *JsonListClustersMap) *JsonListClustersMap {\n\tvar SIDCluster int\n\tvar SName string\n\tvar SAWSAccount int64\n\tvar SAWSRegion string\n\tvar SAWSEnvironment string\n\tvar SK8sVersion string\n\n\tvar SNodeType string\n\tvar SNodeInstance string\n\tvar STotalInstances int\n\n\tvar totalInstances int\n\n\tdescription := make(DescriptionMap)\n\n\tdb, err := sql.Open(\"mysql\", UserDB+\":\"+PassDB+\"@tcp(\"+HostDB+\":\"+PortDB+\")/\"+DatabaseDB+\"?charset=utf8\")\n\tcheckErr(err)\n\n\tdefer db.Close()\n\n\trows, err := db.Query(\"SELECT id_cluster, nome, aws_account, aws_region, aws_env, k8s_version FROM clusters ORDER BY nome\")\n\tcheckErr(err)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&SIDCluster, &SName, &SAWSAccount, &SAWSRegion, &SAWSEnvironment, &SK8sVersion)\n\t\tcheckErr(err)\n\n\t\tdescription = DescriptionMap{}\n\t\ttotalInstances = 0\n\n\t\trows1, err := db.Query(\"SELECT node_type, node_instance, total_instances FROM nodes WHERE id_cluster=?\", SIDCluster)\n\t\tcheckErr(err)\n\n\t\tfor rows1.Next() {\n\t\t\terr = rows1.Scan(&SNodeType, &SNodeInstance, &STotalInstances)\n\t\t\tcheckErr(err)\n\n\t\t\tdescription[SNodeType] = append(\n\t\t\t\tdescription[SNodeType],\n\t\t\t\tDescriptionStruct{\n\t\t\t\t\tDescription{\n\t\t\t\t\t\tType: SNodeInstance,\n\t\t\t\t\t\tTotalTypeInstances: STotalInstances,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\n\t\t\ttotalInstances = totalInstances + STotalInstances\n\t\t}\n\n\t\t*response = append(\n\t\t\t*response,\n\t\t\tjsonListClusters{\n\t\t\t\tClusterName: SName,\n\t\t\t\tAws: AWS{\n\t\t\t\t\tAccount: SAWSAccount,\n\t\t\t\t\tRegion: SAWSRegion,\n\t\t\t\t\tEnvironment: SAWSEnvironment,\n\t\t\t\t},\n\t\t\t\tK8SVersion: SK8sVersion,\n\t\t\t\tInstances: Instances{\n\t\t\t\t\tTotalInstances: totalInstances,\n\t\t\t\t\tDescription: description,\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t}\n\n\treturn response\n}", "func (m *Manager) GetClusterList() ([]Cluster, error) {\n\tnames, err := m.specManager.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar clusters = []Cluster{}\n\n\tfor _, name := range names {\n\t\tmetadata, err := m.meta(name)\n\t\tif err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&\n\t\t\t!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {\n\t\t\treturn nil, perrs.Trace(err)\n\t\t}\n\n\t\tbase := metadata.GetBaseMeta()\n\n\t\tclusters = append(clusters, Cluster{\n\t\t\tName: name,\n\t\t\tUser: base.User,\n\t\t\tVersion: base.Version,\n\t\t\tPath: m.specManager.Path(name),\n\t\t\tPrivateKey: m.specManager.Path(name, \"ssh\", \"id_rsa\"),\n\t\t})\n\t}\n\n\treturn clusters, nil\n}", "func (r *ProjectsInstancesClustersService) List(parent string) *ProjectsInstancesClustersListCall {\n\tc := &ProjectsInstancesClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\treturn c\n}", "func GetClusterMode() string {\n\treturn masterRTCfg.clusterMode\n}", "func (p PGSQLConnection) GetAllClusters() ([]ClusterModel, error) {\n\tclusters := []ClusterModel{}\n\tif err := p.connection.Select(&clusters, \"SELECT * FROM clusters\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn clusters, nil\n}", "func (o GetClustersClusterOutput) ClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetClustersCluster) string { return v.ClusterId }).(pulumi.StringOutput)\n}", "func clusterList() []string {\n\tif c := envy.String(\"DQLITED_CLUSTER\"); c != \"\" {\n\t\treturn strings.Split(c, \",\")\n\t}\n\treturn defaultCluster\n}", "func (d *Dao) OverlordClusters(c context.Context, zone, appid string) (ocs []*model.OverlordCluster, err error) {\n\tvar res struct {\n\t\tData []*model.OverlordApiserver `json:\"grouped_clusters\"`\n\t}\n\tif err = d.client.RESTfulGet(c, apiserverURI, \"\", nil, &res, appid); err != nil {\n\t\tlog.Error(\"overlord cluster url(%s) appid(%s) error(%v)\", apiserverURI, appid, err)\n\t\treturn\n\t}\nGETALL:\n\tfor _, oa := range res.Data {\n\t\tif zone == \"\" || oa.Group == zone {\n\t\t\tfor _, oc := range oa.Clusters {\n\t\t\t\tcluster := &model.OverlordCluster{\n\t\t\t\t\tName: oc.Name,\n\t\t\t\t\tType: oc.Type,\n\t\t\t\t\tZone: zone,\n\t\t\t\t\tHashMethod: \"fnv1a_64\",\n\t\t\t\t\tHashDistribution: \"ketama\",\n\t\t\t\t\tHashTag: \"{}\",\n\t\t\t\t\tListenProto: \"tcp\",\n\t\t\t\t\tListenAddr: net.JoinHostPort(\"0.0.0.0\", strconv.Itoa(oc.FrontEndPort)),\n\t\t\t\t\tDailTimeout: 1000,\n\t\t\t\t\tReadTimeout: 1000,\n\t\t\t\t\tWriteTimeout: 1000,\n\t\t\t\t\tNodeConn: 2,\n\t\t\t\t\tPingFailLimit: 3,\n\t\t\t\t\tPingAutoEject: true,\n\t\t\t\t}\n\t\t\t\tfor _, oci := range oc.Instances {\n\t\t\t\t\tif oc.Type == \"redis_cluster\" && oci.Role != \"master\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ton := &model.OverlordNode{\n\t\t\t\t\t\tAlias: oci.Alias,\n\t\t\t\t\t\tAddr: net.JoinHostPort(oci.IP, strconv.Itoa(oci.Port)),\n\t\t\t\t\t\tWeight: oci.Weight,\n\t\t\t\t\t}\n\t\t\t\t\tcluster.Nodes = append(cluster.Nodes, on)\n\t\t\t\t}\n\t\t\t\tocs = append(ocs, cluster)\n\t\t\t}\n\t\t}\n\t}\n\tif len(ocs) == 0 && zone != \"\" {\n\t\tzone = \"\"\n\t\tgoto GETALL\n\t}\n\treturn\n}", "func fetchCluster(c *gin.Context) string {\n\tconst key = \"cluster\"\n\n\tswitch {\n\tcase len(c.Param(key)) > 0:\n\t\treturn c.Param(key)\n\tcase len(c.Query(key)) > 0:\n\t\treturn c.Query(key)\n\tcase len(c.PostForm(key)) > 0:\n\t\treturn c.PostForm(key)\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (a *Client) GetClusterCredentials(params *GetClusterCredentialsParams, authInfo runtime.ClientAuthInfoWriter) (*GetClusterCredentialsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClusterCredentialsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetClusterCredentials\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters/{name}/credentials\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClusterCredentialsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetClusterCredentialsOK), nil\n\n}", "func (o LookupResponsePolicyResultOutput) GkeClusters() ResponsePolicyGKEClusterResponseArrayOutput {\n\treturn o.ApplyT(func(v LookupResponsePolicyResult) []ResponsePolicyGKEClusterResponse { return v.GkeClusters }).(ResponsePolicyGKEClusterResponseArrayOutput)\n}", "func GetManegementCluster(version, capiImage, capdImage string) ([]runtime.Object, error) {\n\tcapiObjects, err := GetCAPI(version, capiImage)\n\tif err != nil {\n\t\treturn []runtime.Object{}, err\n\t}\n\n\tnamespaceObj := GetNamespace()\n\tstatefulSet := GetStatefulSet(capdImage)\n\tclusterRole := GetClusterRole()\n\tclusterRoleBinding := GetClusterRoleBinding()\n\n\treturn append(capiObjects,\n\t\t&namespaceObj,\n\t\t&statefulSet,\n\t\t&clusterRole,\n\t\t&clusterRoleBinding,\n\t), nil\n}", "func (c *Client) Cluster(ctx context.Context) ([]NodeInfo, error) {\n\trequest := protocol.Message{}\n\trequest.Init(16)\n\tresponse := protocol.Message{}\n\tresponse.Init(512)\n\n\tprotocol.EncodeCluster(&request, protocol.ClusterFormatV1)\n\n\tif err := c.protocol.Call(ctx, &request, &response); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to send Cluster request\")\n\t}\n\n\tservers, err := protocol.DecodeNodes(&response)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse Node response\")\n\t}\n\n\treturn servers, nil\n}", "func GetClusterIPs(service *corev1.Service) []string {\n\tclusterIPs := []string{service.Spec.ClusterIP}\n\tif len(service.Spec.ClusterIPs) > 0 {\n\t\tclusterIPs = service.Spec.ClusterIPs\n\t}\n\n\t// Same IPv6 could be represented differently (as from rfc5952):\n\t// 2001:db8:0:0:aaaa::1\n\t// 2001:db8::aaaa:0:0:1\n\t// 2001:db8:0::aaaa:0:0:1\n\t// net.ParseIP(ip).String() output is used as a normalization form\n\t// for all cases above it returns 2001:db8::aaaa:0:0:1\n\t// without the normalization there could be mismatches in key lookups e.g. for PTR\n\tnormalized := make([]string, 0, len(clusterIPs))\n\tfor _, ip := range clusterIPs {\n\t\tnormalized = append(normalized, net.ParseIP(ip).String())\n\t}\n\n\treturn normalized\n}", "func (a *Client) ListAvailableClusters(ctx context.Context, params *ListAvailableClustersParams) (*ListAvailableClustersOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listAvailableClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/heappe/ClusterInformation/ListAvailableClusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListAvailableClustersReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListAvailableClustersOK), nil\n\n}" ]
[ "0.7150111", "0.70848936", "0.7050372", "0.69830257", "0.69812804", "0.68225586", "0.6785897", "0.67075115", "0.67008924", "0.66717374", "0.6653548", "0.6628853", "0.65589386", "0.6548702", "0.6535888", "0.6530604", "0.6489139", "0.6475645", "0.6451163", "0.6437961", "0.64292145", "0.6420646", "0.64115655", "0.6351999", "0.6342829", "0.63368356", "0.63260955", "0.63172734", "0.6294134", "0.6274878", "0.62505454", "0.62502795", "0.6240707", "0.6152818", "0.61403", "0.61298174", "0.6111065", "0.6086631", "0.60699946", "0.605888", "0.6055227", "0.6039949", "0.60248417", "0.60127914", "0.5990848", "0.5971463", "0.5963538", "0.5935251", "0.59316564", "0.59266365", "0.59207803", "0.59110713", "0.5910659", "0.590048", "0.5897609", "0.58610535", "0.58484685", "0.5844702", "0.58280903", "0.5825456", "0.58106434", "0.5751794", "0.5746291", "0.5745103", "0.57434946", "0.5741837", "0.5721784", "0.57198244", "0.57118505", "0.5692283", "0.567878", "0.5669264", "0.56662285", "0.5651515", "0.56448567", "0.563336", "0.56233066", "0.56175965", "0.5593433", "0.55922747", "0.5577536", "0.55719817", "0.55662704", "0.5562364", "0.5552811", "0.55524254", "0.5536599", "0.55298865", "0.5524156", "0.5522188", "0.5513524", "0.55062175", "0.55043143", "0.5485468", "0.5481368", "0.5479709", "0.5474633", "0.54723483", "0.5467098", "0.5448341" ]
0.76226926
0
GetClustersOk returns a tuple with the Clusters field value and a boolean to check if the value has been set.
func (o *QueueManager) GetClustersOk() (*[]string, bool) { if o == nil { return nil, false } return &o.Clusters, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}", "func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.K8sClustersProvisioned, true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetClusterOk() (*VirtualizationVmwareClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func NewDescribeClustersOK() *DescribeClustersOK {\n\n\treturn &DescribeClustersOK{}\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\tklog.V(4).Infof(\"Clusters called\")\n\treturn nil, false\n}", "func (c *Config) GetClusters(ctx context.Context, quiet bool, filterMap map[string]string, clustersName ...string) (string, error) {\n\tc.Logger.Debugf(\"Sending parameters to server to get the clusters %q\", strings.Join(clustersName, \", \"))\n\n\tfilter := MapToSlice(filterMap)\n\n\treturn c.RunGRPCnRESTFunc(\"get\", true,\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersGRPC(ctx, quiet, filter, clustersName...)\n\t\t},\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersHTTP(quiet, filter, clustersName...)\n\t\t})\n}", "func (o *ListClustersOnEndpointUsingGETOK) IsSuccess() bool {\n\treturn true\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}", "func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}", "func (o *ProjectDeploymentRuleResponse) GetClusterOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Cluster, true\n}", "func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (cloud *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o *VirtualizationIweClusterAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}", "func (o *ClusterSummaryDTO) GetClusteredOk() (*bool, bool) {\n\tif o == nil || o.Clustered == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Clustered, true\n}", "func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func compareClusters(got, want *types.Cluster) bool {\n\tresult := false\n\tif reflect.DeepEqual(got.Status, want.Status) {\n\t\tresult = true\n\t}\n\n\treturn result\n}", "func (bc *Baiducloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o *VirtualizationBaseHostPciDeviceAllOf) GetClusterOk() (*VirtualizationBaseClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func NewListClustersOK() *ListClustersOK {\n\treturn &ListClustersOK{}\n}", "func (o *VirtualizationIweVirtualMachine) GetClusterOk() (*VirtualizationIweClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (o *HyperflexEncryption) GetClusterOk() (*HyperflexClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *VirtualizationIweHost) GetClusterOk() (*VirtualizationIweClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *StorageHyperFlexStorageContainer) GetClusterOk() (*HyperflexClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *ListClustersOnEndpointUsingGETForbidden) IsSuccess() bool {\n\treturn false\n}", "func (o *HyperflexMapClusterIdToProtectionInfoAllOf) GetClusterIdOk() (*string, bool) {\n\tif o == nil || o.ClusterId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterId, true\n}", "func (o *ClusterSummaryDTO) GetConnectedToClusterOk() (*bool, bool) {\n\tif o == nil || o.ConnectedToCluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ConnectedToCluster, true\n}", "func (o *MoveClustersAccepted) IsSuccess() bool {\n\treturn true\n}", "func (s *RaftDatabase) Clusters() int {\n\treturn GetArg(s.name, \"clusters\").Int(s.clusters)\n}", "func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}", "func (a *Client) ListClusters(params *ListClustersParams, authInfo runtime.ClientAuthInfoWriter) (*ListClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListClustersOK), nil\n\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *V0037Node) GetCoresOk() (*int32, bool) {\n\tif o == nil || o.Cores == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cores, true\n}", "func (o *ClusterSummaryDTO) GetConnectedNodesOk() (*string, bool) {\n\tif o == nil || o.ConnectedNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ConnectedNodes, true\n}", "func (e *ECS) ListClusters(req *ListClustersReq) (\n\t*ListClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"ListClusters\")\n\tif req.MaxResults > 0 {\n\t\tparams[\"maxResults\"] = strconv.Itoa(int(req.MaxResults))\n\t}\n\tif req.NextToken != \"\" {\n\t\tparams[\"nextToken\"] = req.NextToken\n\t}\n\n\tresp := new(ListClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (o *V0037JobProperties) GetClusterConstraintsOk() (*string, bool) {\n\tif o == nil || o.ClusterConstraints == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterConstraints, true\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetIsClusterHealthyOk() (*string, bool) {\n\tif o == nil || o.IsClusterHealthy == nil {\n\t\treturn nil, false\n\t}\n\treturn o.IsClusterHealthy, true\n}", "func (m *MockBuilder) Clusters() []string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Clusters\")\n\tret0, _ := ret[0].([]string)\n\treturn ret0\n}", "func (o *MoveClustersForbidden) IsSuccess() bool {\n\treturn false\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetClusterUuidOk() (*string, bool) {\n\tif o == nil || o.ClusterUuid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterUuid, true\n}", "func (o *VirtualizationIweClusterAllOf) GetHxClusterOk() (*StorageBaseClusterRelationship, bool) {\n\tif o == nil || o.HxCluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.HxCluster, true\n}", "func (o *V2GetClusterDefaultConfigOK) IsSuccess() bool {\n\treturn true\n}", "func (o *MetroclusterSvmGetOK) IsSuccess() bool {\n\treturn true\n}", "func (s *Snapshot) NumClusters(ns core.Namespace) int {\n\tif val, ok := s.clusters[ns]; ok && val != nil {\n\t\treturn len(val)\n\t\t//return val.Len()\n\t}\n\treturn 0\n}", "func (adm Admin) ListClusters() (string, error) {\n\tvar clusters []string\n\n\tchildren, err := adm.zkClient.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := adm.isClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetNdClusterSizeOk() (*int64, bool) {\n\tif o == nil || o.NdClusterSize == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NdClusterSize, true\n}", "func (a *DefaultApiService) ListClusters(ctx _context.Context, localVarOptionals *ListClustersOpts) (Clusters, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Clusters\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/clusters\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Id.IsSet() {\n\t\tt:=localVarOptionals.Id.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotId.IsSet() {\n\t\tt:=localVarOptionals.NotId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMin.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.min\", parameterToString(localVarOptionals.StoryCountMin.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMax.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.max\", parameterToString(localVarOptionals.StoryCountMax.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.start\", parameterToString(localVarOptionals.TimeStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.end\", parameterToString(localVarOptionals.TimeEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.start\", parameterToString(localVarOptionals.EarliestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.end\", parameterToString(localVarOptionals.EarliestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.start\", parameterToString(localVarOptionals.LatestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.end\", parameterToString(localVarOptionals.LatestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LocationCountry.IsSet() {\n\t\tt:=localVarOptionals.LocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotLocationCountry.IsSet() {\n\t\tt:=localVarOptionals.NotLocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Return_.IsSet() {\n\t\tt:=localVarOptionals.Return_.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortBy.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_by\", parameterToString(localVarOptionals.SortBy.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortDirection.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_direction\", parameterToString(localVarOptionals.SortDirection.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Cursor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cursor\", parameterToString(localVarOptionals.Cursor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PerPage.IsSet() {\n\t\tlocalVarQueryParams.Add(\"per_page\", parameterToString(localVarOptionals.PerPage.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/xml\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-ID\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 429 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o *ClusterNtpKeysGetOK) IsSuccess() bool {\n\treturn true\n}", "func ExampleSnowball_ListClusters_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.ListClustersInput{}\n\n\tresult, err := svc.ListClusters(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidNextTokenException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidNextTokenException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (a *ClustersApiService) ListClusters(ctx _context.Context, space string) ApiListClustersRequest {\n\treturn ApiListClustersRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t}\n}", "func (o *VirtualizationIweClusterAllOf) GetComputeNodeCountOk() (*int64, bool) {\n\tif o == nil || o.ComputeNodeCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ComputeNodeCount, true\n}", "func (adm Admin) ListClusters() (string, error) {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect to zookeeper.\")\n\t\treturn \"\", err\n\t}\n\tdefer conn.Disconnect()\n\n\tvar clusters []string\n\n\tchildren, err := conn.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := conn.IsClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func (o *V2ResetClusterNotFound) IsSuccess() bool {\n\treturn false\n}", "func (o *HyperflexSoftwareVersionPolicy) GetClusterProfilesOk() ([]HyperflexClusterProfileRelationship, bool) {\n\tif o == nil || o.ClusterProfiles == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterProfiles, true\n}", "func (o *V2ImportClusterCreated) IsSuccess() bool {\n\treturn true\n}", "func (o *V2GetClusterDefaultConfigUnauthorized) IsSuccess() bool {\n\treturn false\n}", "func (o *V2ResetClusterAccepted) IsSuccess() bool {\n\treturn true\n}", "func (o *HyperflexHxapDvUplink) GetClusterOk() (*HyperflexHxapClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *MoveClustersBadRequest) IsSuccess() bool {\n\treturn false\n}", "func (m *MockEKSServiceInterface) ListClusters(input *eks.ListClustersInput) (*eks.ListClustersOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListClusters\", input)\n\tret0, _ := ret[0].(*eks.ListClustersOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockBuilderMockRecorder) Clusters() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Clusters\", reflect.TypeOf((*MockBuilder)(nil).Clusters))\n}", "func NewGetClusterOK() *GetClusterOK {\n\n\treturn &GetClusterOK{}\n}", "func (o *VirtualizationIweClusterAllOf) GetConvergedNodeCountOk() (*int64, bool) {\n\tif o == nil || o.ConvergedNodeCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ConvergedNodeCount, true\n}", "func (o *Cluster) GetCommunicationUrisOk() (*[]string, bool) {\n\tif o == nil || o.CommunicationUris == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommunicationUris, true\n}", "func (o *ResourceLimits) HasK8sClustersProvisioned() bool {\n\tif o != nil && o.K8sClustersProvisioned != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (bc *Baiducloud) ListClusters(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"ListClusters unimplemented\")\n}", "func (o *V2GetPresignedForClusterCredentialsOK) IsSuccess() bool {\n\treturn true\n}", "func (c *client) ClusterExists() (bool, error) {\n\tclusterJSON, err := c.runCmd(\"cluster\", \"list\", \"-o\", \"json\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tclusterList := &ClusterList{}\n\tif err := clusterList.Unmarshal([]byte(clusterJSON)); err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, cluster := range clusterList.Clusters {\n\t\tif cluster.Name == c.clusterName {\n\t\t\tif c.verbose {\n\t\t\t\tfmt.Printf(\"k3d cluster '%s' exists\", c.clusterName)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tif c.verbose {\n\t\tfmt.Printf(\"k3d cluster '%s' does not exist\", c.clusterName)\n\t}\n\treturn false, nil\n}", "func ExampleClustersClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().Get(ctx, \"resRg\", \"myCluster\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t}},\n\t// \t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t},\n\t// \t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t},\n\t// \t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t}},\n\t// \t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t}},\n\t// \t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t},\n\t// \t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t},\n\t// \t}\n}", "func (o *ClusteSummaryEntity) GetClusterSummaryOk() (*ClusterSummaryDTO, bool) {\n\tif o == nil || o.ClusterSummary == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterSummary, true\n}", "func (c Client) ListClusters() (ClusterList, error) {\n\tbody, err := c.watsonClient.MakeRequest(\"GET\", c.version+\"/solr_clusters\", nil, nil)\n\tif err != nil {\n\t\treturn ClusterList{}, err\n\t}\n\tvar response ClusterList\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}", "func NewPostClustersMulticlusterConfigOK() *PostClustersMulticlusterConfigOK {\n\treturn &PostClustersMulticlusterConfigOK{}\n}", "func (o *SubmitReplayRequestEntity) GetClusterNodeIdOk() (*string, bool) {\n\tif o == nil || o.ClusterNodeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterNodeId, true\n}", "func (o *V2UpdateClusterUISettingsOK) IsSuccess() bool {\n\treturn true\n}", "func (o *KubernetesContainerRuntimePolicy) GetClusterProfilesOk() ([]KubernetesClusterProfileRelationship, bool) {\n\tif o == nil || o.ClusterProfiles == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterProfiles, true\n}", "func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}", "func (o *NetgroupsSettingsCollectionGetOK) IsSuccess() bool {\n\treturn true\n}", "func NewGetVSphereDatacentersOK() *GetVSphereDatacentersOK {\n\treturn &GetVSphereDatacentersOK{}\n}", "func (_m *ComputeAPI) LookupClusters(project string) ([]*container.Cluster, error) {\n\tret := _m.Called(project)\n\n\tvar r0 []*container.Cluster\n\tif rf, ok := ret.Get(0).(func(string) []*container.Cluster); ok {\n\t\tr0 = rf(project)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*container.Cluster)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(project)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (o *UcsdBackupInfoAllOf) GetConnectorsOk() ([]UcsdConnectorPack, bool) {\n\tif o == nil || o.Connectors == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Connectors, true\n}", "func (k Kind) ClusterExists() (bool, error) {\n\tcmd := kindCommand(\"kind get clusters\")\n\tcmd.Stdout = nil\n\tcmd.Stderr = nil\n\toutput, err := cmd.Output()\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn strings.Contains(string(output), \"kind\"), nil\n}", "func (c *Client) GetClusters(ctx context.Context) <-chan GetClusterResult {\n\t// TODO Make the concurrency configurable\n\tconcurrency := int(math.Min(5, float64(runtime.NumCPU())))\n\tresults := make(chan GetClusterResult, concurrency)\n\n\tclusterNames, err := c.GetClusterNames(ctx)\n\tif err != nil {\n\t\tclose(results)\n\t\treturn results\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfor _, clusterName := range clusterNames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcluster, err := c.GetCluster(ctx, name)\n\t\t\t\tresult := GetClusterResult{Cluster: cluster, Error: err}\n\t\t\t\tresults <- result\n\t\t\t}(clusterName)\n\t\t}\n\t\twg.Wait()\n\t}()\n\n\treturn results\n}", "func (o *V2ImportClusterForbidden) IsSuccess() bool {\n\treturn false\n}", "func (a ClustersAPI) List() ([]httpmodels.GetResp, error) {\n\tvar clusterList = struct {\n\t\tClusters []httpmodels.GetResp `json:\"clusters,omitempty\" url:\"clusters,omitempty\"`\n\t}{}\n\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/list\", nil, nil)\n\tif err != nil {\n\t\treturn clusterList.Clusters, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterList)\n\treturn clusterList.Clusters, err\n}", "func NewGetClusterInfoOK() *GetClusterInfoOK {\n\treturn &GetClusterInfoOK{}\n}", "func (o *ClusterNtpKeysGetDefault) IsSuccess() bool {\n\treturn o._statusCode/100 == 2\n}", "func (o *ClusterRequest) GetMaxRunningNodesOk() (*int32, bool) {\n\tif o == nil || o.MaxRunningNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MaxRunningNodes, true\n}", "func (o *NvmeServiceCollectionGetOK) IsSuccess() bool {\n\treturn true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) HasCluster() bool {\n\tif o != nil && o.Cluster != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PcloudVpnconnectionsNetworksGetOK) IsSuccess() bool {\n\treturn true\n}", "func (a *ClustersApiService) ListClustersExecute(r ApiListClustersRequest) (ListClustersResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ListClustersResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.ListClusters\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}", "func (p KopsProvisioner) clusterConfigExists(sc *kapp.StackConfig, providerImpl provider.Provider) (bool, error) {\n\n\tproviderVars := provider.GetVars(providerImpl)\n\tlog.Debugf(\"Checking if Kops cluster config exists for values: %#v\", providerVars)\n\n\tprovisionerValues := providerVars[PROVISIONER_KEY].(map[interface{}]interface{})\n\tkopsConfig, err := getKopsConfig(provisionerValues)\n\tif err != nil {\n\t\treturn false, errors.WithStack(err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel() // The cancel should be deferred so resources are cleaned up\n\n\targs := []string{\n\t\t\"get\",\n\t\t\"clusters\",\n\t}\n\n\targs = parameteriseValues(args, kopsConfig.Params.Global)\n\n\tcmd := exec.CommandContext(ctx, KOPS_PATH, args...)\n\tcmd.Env = os.Environ()\n\n\terr = cmd.Run()\n\tif ctx.Err() == context.DeadlineExceeded {\n\t\treturn false, errors.New(\"Timed out trying to retrieve kops cluster config. Check your credentials.\")\n\t}\n\tif err != nil {\n\t\tif _, ok := err.(*exec.ExitError); ok {\n\t\t\tlog.Debug(\"Cluster config doesn't exist\")\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, errors.Wrap(err, \"Error fetching kops clusters\")\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func (o *TechsupportmanagementEndPointAllOf) GetClusterMemberOk() (*AssetClusterMemberRelationship, bool) {\n\tif o == nil || o.ClusterMember == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterMember, true\n}", "func (o *ClusterMetricsNodes) GetComputeOk() (*float64, bool) {\n\tif o == nil || o.Compute == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Compute, true\n}", "func (j *Juju) ClusterReady() (bool, error) {\n\ttmp := \"JUJU_DATA=\" + JujuDataPrefix + j.Name\n\tcmd := exec.Command(\"juju\", \"status\", \"--format=json\")\n\tcmd.Env = append(os.Environ(), tmp)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"ClusterReady error: %v: %s\", err, err.(*exec.ExitError).Stderr)\n\t}\n\n\terr = json.Unmarshal([]byte(out), &jStats)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"ClusterReady error: %v: %s\", err, err.(*exec.ExitError).Stderr)\n\t}\n\n\tfor k := range jStats.Machines {\n\t\tmachineStatus := jStats.Machines[k].MachStatus[\"current\"]\n\t\tif machineStatus != \"started\" {\n\t\t\tlog.WithFields(logrus.Fields{\"name\": j.Name}).Info(\"Cluster Not Ready\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tfor k := range jStats.ApplicationResults {\n\t\tappStatus := jStats.ApplicationResults[k].AppStatus[\"current\"]\n\t\tif appStatus != \"active\" {\n\t\t\tlog.WithFields(logrus.Fields{\"name\": j.Name}).Info(\"Cluster Not Ready\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\"name\": j.Name}).Info(\"Cluster Ready\")\n\treturn true, nil\n}", "func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}", "func (c *Client) GetClustersSync(ctx context.Context) ([]*Cluster, error) {\n\tclusters := make([]*Cluster, 0)\n\n\tfor result := range c.GetClusters(ctx) {\n\t\tif result.Error != nil {\n\t\t\treturn nil, result.Error\n\t\t}\n\t\tclusters = append(clusters, result.Cluster)\n\t}\n\n\treturn clusters, nil\n}" ]
[ "0.6719827", "0.65017885", "0.6465093", "0.646219", "0.6398727", "0.63660103", "0.63552487", "0.63049424", "0.627333", "0.62654936", "0.62586135", "0.6258082", "0.6212132", "0.6196596", "0.6170194", "0.61492944", "0.611987", "0.60981774", "0.60886943", "0.6081035", "0.6077892", "0.6050239", "0.6047554", "0.60216284", "0.59879977", "0.59566593", "0.5946033", "0.59301275", "0.5927767", "0.5911564", "0.58934337", "0.5866069", "0.58345604", "0.5772485", "0.5769356", "0.5660747", "0.56599987", "0.56461686", "0.5645801", "0.56294334", "0.55949396", "0.5581085", "0.55757517", "0.5563501", "0.5559378", "0.55457735", "0.5543135", "0.5541259", "0.5540796", "0.5539327", "0.55194145", "0.55074257", "0.5496589", "0.5492242", "0.5481603", "0.5480428", "0.5463602", "0.545469", "0.54539853", "0.54432994", "0.54390264", "0.54226196", "0.54190487", "0.5414241", "0.539737", "0.5394293", "0.5378104", "0.536012", "0.53583074", "0.5351078", "0.5350199", "0.5337369", "0.53363717", "0.5322175", "0.5313674", "0.53130996", "0.5307607", "0.53044075", "0.5302095", "0.5289414", "0.52863485", "0.5278594", "0.5277239", "0.5264858", "0.5254316", "0.5239916", "0.52364874", "0.5236181", "0.5233157", "0.52274984", "0.5223034", "0.5221388", "0.5219198", "0.52090365", "0.5195012", "0.51941526", "0.51929855", "0.51912737", "0.5186529", "0.5186496" ]
0.8224033
0
SetClusters sets field value
func (o *QueueManager) SetClusters(v []string) { o.Clusters = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *RaftDatabase) SetClusters(clusters int) {\n\ts.clusters = clusters\n}", "func (s *RaftDatabase) Clusters() int {\n\treturn GetArg(s.name, \"clusters\").Int(s.clusters)\n}", "func (store *CenterStore) SetCenters(clust core.Clust) {\n\tstore.centers[len(clust)] = clust\n}", "func (s *ListClustersOutput) SetClusters(v []*string) *ListClustersOutput {\n\ts.Clusters = v\n\treturn s\n}", "func (cc cacheCluster) Set(key string, val any) error {\n\treturn cc.SetCtx(context.Background(), key, val)\n}", "func (tr *Cluster) SetParameters(params map[string]interface{}) error {\n\tp, err := json.TFParser.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.TFParser.Unmarshal(p, &tr.Spec.ForProvider)\n}", "func (c *Cluster) SetServerCoordinates(url string, serverCA []byte, user, password string, clientCert, clientKey []byte) error {\n\tc.Server = url\n\n\t// Create kube config\n\tu := &api.AuthInfo{}\n\tif password != \"\" {\n\t\tu.Username = user\n\t\tu.Password = password\n\t} else {\n\t\tu.ClientCertificateData = clientCert\n\t\tu.ClientKeyData = clientKey\n\t}\n\n\tkc := api.Config{\n\t\tKind: \"Config\",\n\t\tAPIVersion: \"v1\",\n\t\tPreferences: api.Preferences{},\n\t\tClusters: map[string]*api.Cluster{\n\t\t\tc.Name: {\n\t\t\t\tServer: c.Server,\n\t\t\t\tCertificateAuthorityData: serverCA,\n\t\t\t},\n\t\t},\n\t\tAuthInfos: map[string]*api.AuthInfo{\n\t\t\tuser: u,\n\t\t},\n\t\tContexts: map[string]*api.Context{\n\t\t\t\"default\": &api.Context{\n\t\t\t\tCluster: c.Name,\n\t\t\t\tAuthInfo: user,\n\t\t\t},\n\t\t},\n\t\tCurrentContext: \"default\",\n\t}\n\n\td, err := clientcmd.Write(kc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := filepath.Join(c.Path, \".kube\", \"config\")\n\terr = ioutil.WriteFile(p, d, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(2).Info(\"Write file\", \"path\", p)\n\n\t// Create clientset from kube/config\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(2).Info(\"Read config\", \"path\", p)\n\t// create the clientset\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(3).Info(\"Created client\")\n\n\tc.client = client\n\n\treturn nil\n}", "func (s *DescribeClustersInput) SetClusters(v []*string) *DescribeClustersInput {\n\ts.Clusters = v\n\treturn s\n}", "func setClusterRoles() cmds.StartupHook {\n\treturn func(ctx context.Context, wg *sync.WaitGroup, args cmds.StartupHookArgs) error {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-args.APIServerReady\n\t\t\tlogrus.Info(\"Applying Cluster Role Bindings\")\n\n\t\t\tcs, err := newClient(args.KubeConfigAdmin, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"clusterrole: new k8s client: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setKubeletAPIServerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set kubeletAPIServerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setKubeProxyServerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set kubeProxyServerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setTunnelControllerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set tunnelControllerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setCloudControllerManagerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"ccm: set cloudControllerManagerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tlogrus.Info(\"Cluster Role Bindings applied successfully\")\n\t\t}()\n\t\treturn nil\n\t}\n}", "func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {\n\tdaemon.clusterProvider = clusterProvider\n\tdaemon.netController.SetClusterProvider(clusterProvider)\n\tdaemon.attachableNetworkLock = locker.New()\n}", "func (cg *CGroup) SetCPUShare(limit int64) error {\n\tversion := cgControllers[\"cpu\"]\n\tswitch version {\n\tcase Unavailable:\n\t\treturn ErrControllerMissing\n\tcase V1:\n\t\treturn cg.rw.Set(version, \"cpu\", \"cpu.shares\", fmt.Sprintf(\"%d\", limit))\n\tcase V2:\n\t\treturn cg.rw.Set(version, \"cpu\", \"cpu.weight\", fmt.Sprintf(\"%d\", limit))\n\t}\n\n\treturn ErrUnknownVersion\n}", "func (x *Secp256k1N) Set(y *Secp256k1N) {\n\tx.limbs = y.limbs\n}", "func (c *Client) SetSlaves(v []interface{}) {\n\tc.slaves = make([]string,0,len(v))\n\tfor _, vv := range v {\n\t\tc.slaves = append(c.slaves, vv.(string))\n\t}\n}", "func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}", "func (a *DefaultApiService) ListClusters(ctx _context.Context, localVarOptionals *ListClustersOpts) (Clusters, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Clusters\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/clusters\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Id.IsSet() {\n\t\tt:=localVarOptionals.Id.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotId.IsSet() {\n\t\tt:=localVarOptionals.NotId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMin.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.min\", parameterToString(localVarOptionals.StoryCountMin.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMax.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.max\", parameterToString(localVarOptionals.StoryCountMax.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.start\", parameterToString(localVarOptionals.TimeStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.end\", parameterToString(localVarOptionals.TimeEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.start\", parameterToString(localVarOptionals.EarliestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.end\", parameterToString(localVarOptionals.EarliestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.start\", parameterToString(localVarOptionals.LatestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.end\", parameterToString(localVarOptionals.LatestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LocationCountry.IsSet() {\n\t\tt:=localVarOptionals.LocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotLocationCountry.IsSet() {\n\t\tt:=localVarOptionals.NotLocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Return_.IsSet() {\n\t\tt:=localVarOptionals.Return_.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortBy.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_by\", parameterToString(localVarOptionals.SortBy.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortDirection.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_direction\", parameterToString(localVarOptionals.SortDirection.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Cursor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cursor\", parameterToString(localVarOptionals.Cursor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PerPage.IsSet() {\n\t\tlocalVarQueryParams.Add(\"per_page\", parameterToString(localVarOptionals.PerPage.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/xml\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-ID\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 429 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (m *MockBuilder) Clusters() []string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Clusters\")\n\tret0, _ := ret[0].([]string)\n\treturn ret0\n}", "func (o *V0037Node) SetCores(v int32) {\n\to.Cores = &v\n}", "func (d *DefaultDriver) SetClusterOpts(n node.Node, rtOpts map[string]string) error {\n\treturn &errors.ErrNotSupported{\n\t\tType: \"Function\",\n\t\tOperation: \"SetClusterOpts()\",\n\t}\n}", "func (coc *CoClustering) SetParams(params base.Params) {\n\tcoc.Base.SetParams(params)\n\t// Setup hyper-parameters\n\tcoc.nUserClusters = coc.Params.GetInt(base.NUserClusters, 3)\n\tcoc.nItemClusters = coc.Params.GetInt(base.NItemClusters, 3)\n\tcoc.nEpochs = coc.Params.GetInt(base.NEpochs, 20)\n}", "func (coc *CoClustering) SetParams(params base.Params) {\n\tcoc.Base.SetParams(params)\n\t// Setup hyper-parameters\n\tcoc.nUserClusters = coc.Params.GetInt(base.NUserClusters, 3)\n\tcoc.nItemClusters = coc.Params.GetInt(base.NItemClusters, 3)\n\tcoc.nEpochs = coc.Params.GetInt(base.NEpochs, 20)\n}", "func (a *ClustersApiService) ListClustersExecute(r ApiListClustersRequest) (ListClustersResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ListClustersResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.ListClusters\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func ExampleClustersClient_BeginCreateOrUpdate_putAClusterWithMinimumParameters() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewClustersClient().BeginCreateOrUpdate(ctx, \"resRg\", \"myCluster\", armservicefabric.Cluster{\n\t\tLocation: to.Ptr(\"eastus\"),\n\t\tTags: map[string]*string{},\n\t\tProperties: &armservicefabric.ClusterProperties{\n\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t\t\t},\n\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\tManagementEndpoint: to.Ptr(\"http://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t\t\t\t}},\n\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240743\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t{\n\t// \t\t\t\tCodeVersion: to.Ptr(\"7.0.470.9590\"),\n\t// \t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t}},\n\t// \t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t},\n\t// \t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t},\n\t// \t\tClusterCodeVersion: to.Ptr(\"7.0.470.9590\"),\n\t// \t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t},\n\t// \t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t}},\n\t// \t\t}},\n\t// \t\tManagementEndpoint: to.Ptr(\"http://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t}},\n\t// \t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t},\n\t// \t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:45:00\"),\n\t// \t\t\tHealthCheckStableDuration: to.Ptr(\"00:05:00\"),\n\t// \t\t\tHealthCheckWaitDuration: to.Ptr(\"00:05:00\"),\n\t// \t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](100),\n\t// \t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](100),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDomainTimeout: to.Ptr(\"02:00:00\"),\n\t// \t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"10675199.02:48:05.4775807\"),\n\t// \t\t\tUpgradeTimeout: to.Ptr(\"12:00:00\"),\n\t// \t\t},\n\t// \t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t// \t},\n\t// }\n}", "func ExampleClustersClient_BeginUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewClustersClient().BeginUpdate(ctx, \"resRg\", \"myCluster\", armservicefabric.ClusterUpdateParameters{\n\t\tProperties: &armservicefabric.ClusterPropertiesUpdateParameters{\n\t\t\tEventStoreServiceEnabled: to.Ptr(true),\n\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"testnt1\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](2000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](1000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](0),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](4000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](3000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](0),\n\t\t\t\t\tIsPrimary: to.Ptr(false),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](3),\n\t\t\t\t}},\n\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelBronze),\n\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t }()),\n\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t }()),\n\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadence(\"Wave\")),\n\t\t},\n\t\tTags: map[string]*string{\n\t\t\t\"a\": to.Ptr(\"b\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240744\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t\t\"a\": to.Ptr(\"b\"),\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t{\n\t// \t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t}},\n\t// \t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t}},\n\t// \t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t},\n\t// \t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t},\n\t// \t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t},\n\t// \t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t},\n\t// \t\tEventStoreServiceEnabled: to.Ptr(true),\n\t// \t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t}},\n\t// \t\t}},\n\t// \t\tManagementEndpoint: to.Ptr(\"http://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"testnt1\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](2000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](1000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](0),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](4000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](3000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](0),\n\t// \t\t\t\tIsPrimary: to.Ptr(false),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](3),\n\t// \t\t}},\n\t// \t\tNotifications: []*armservicefabric.Notification{\n\t// \t\t\t{\n\t// \t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelCritical),\n\t// \t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelAll),\n\t// \t\t\t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelBronze),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t// \t\t\t\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadenceWave2),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t}\n}", "func (ds *DiscoveryService) ListClusters(request *restful.Request, response *restful.Response) {\n\tkey := request.Request.URL.String()\n\tout, cached := ds.cdsCache.cachedDiscoveryResponse(key)\n\tif !cached {\n\t\tif sc := request.PathParameter(ServiceCluster); sc != ds.mesh.IstioServiceCluster {\n\t\t\terrorResponse(response, http.StatusNotFound,\n\t\t\t\tfmt.Sprintf(\"Unexpected %s %q\", ServiceCluster, sc))\n\t\t\treturn\n\t\t}\n\n\t\t// service-node holds the IP address\n\t\tip := request.PathParameter(ServiceNode)\n\t\t// CDS computes clusters that are referenced by RDS routes for a particular proxy node\n\t\t// TODO: this implementation is inefficient as it is recomputing all the routes for all proxies\n\t\t// There is a lot of potential to cache and reuse cluster definitions across proxies and also\n\t\t// skip computing the actual HTTP routes\n\t\tinstances := ds.services.HostInstances(map[string]bool{ip: true})\n\t\tservices := ds.services.Services()\n\t\thttpRouteConfigs := buildOutboundHTTPRoutes(instances, services, &ProxyContext{\n\t\t\tDiscovery: ds.services,\n\t\t\tConfig: ds.config,\n\t\t\tMeshConfig: ds.mesh,\n\t\t\tIPAddress: ip,\n\t\t})\n\n\t\t// de-duplicate and canonicalize clusters\n\t\tclusters := httpRouteConfigs.clusters().normalize()\n\n\t\t// apply custom policies for HTTP clusters\n\t\tfor _, cluster := range clusters {\n\t\t\tinsertDestinationPolicy(ds.config, cluster)\n\t\t}\n\n\t\tvar err error\n\t\tif out, err = json.MarshalIndent(ClusterManager{Clusters: clusters}, \" \", \" \"); err != nil {\n\t\t\terrorResponse(response, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\tds.cdsCache.updateCachedDiscoveryResponse(key, out)\n\t}\n\twriteResponse(response, out)\n}", "func (s *RedisClusterStore) Set(ctx context.Context, key interface{}, value interface{}, options *Options) error {\n\tif options == nil {\n\t\toptions = s.options\n\t}\n\n\terr := s.clusclient.Set(ctx, key.(string), value, options.ExpirationValue()).Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif tags := options.TagsValue(); len(tags) > 0 {\n\t\ts.setTags(ctx, key, tags)\n\t}\n\n\treturn nil\n}", "func (s *ListClustersOutput) SetClusters(v []*Cluster) *ListClustersOutput {\n\ts.Clusters = v\n\treturn s\n}", "func UpdateCluster(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Bind json into UpdateClusterRequest struct\")\n\n\t// bind request body to UpdateClusterRequest struct\n\tvar updateRequest banzaiTypes.UpdateClusterRequest\n\tif err := c.BindJSON(&updateRequest); err != nil {\n\t\t// bind failed, required field(s) empty\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagGetClusterInfo, \"Bind failed.\", err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: \"Required field is empty\",\n\t\t\tcloud.JsonKeyError: err,\n\t\t})\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Load cluster from database\")\n\n\t// load cluster from db\n\tcl, err := cloud.GetClusterFromDB(c)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Start updating cluster:\", cl.Name)\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Update request: \", updateRequest)\n\tcloudType := cl.Cloud\n\n\tswitch cloudType {\n\tcase banzaiConstants.Amazon:\n\t\t// read amazon props from amazon_cluster_properties table\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Load amazon props from db\")\n\t\tdatabase.SelectFirstWhere(&cl.Amazon, banzaiSimpleTypes.AmazonClusterSimple{ClusterSimpleId: cl.ID})\n\tcase banzaiConstants.Azure:\n\t\t// read azure props from azure_cluster_properties table\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Load azure props from db\")\n\t\tdatabase.SelectFirstWhere(&cl.Azure, banzaiSimpleTypes.AzureClusterSimple{ClusterSimpleId: cl.ID})\n\tdefault:\n\t\t// not supported cloud type\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagGetClusterInfo, \"Not supported cloud type\")\n\t\tcloud.SendNotSupportedCloudResponse(c, banzaiConstants.TagUpdateCluster)\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Cluster to modify: \", cl)\n\n\tif isValid, err := updateRequest.Validate(*cl); isValid && len(err) == 0 {\n\t\t// validation OK\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Validate is OK\")\n\t\tif cloud.UpdateClusterInCloud(c, &updateRequest, *cl) {\n\t\t\t// cluster updated successfully in cloud\n\t\t\t// update prometheus config..\n\t\t\tupdatePrometheus()\n\t\t}\n\t} else {\n\t\t// validation failed\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagGetClusterInfo, \"Validation failed\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: err,\n\t\t})\n\t}\n\n}", "func (m *CIDRMap) Set(machineSetName, s string) {\n\tcidrs := splitCIDRs(s)\n\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tm.entries[machineSetName] = cidrs\n}", "func TestClusterConfigSet(t *testing.T) {\n\t_, etcds, err := CreateCluster(3, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)\n\tassert.NoError(t, err)\n\tdefer DestroyCluster(etcds)\n\n\tresp, _ := tests.Put(\"http://localhost:7001/v2/admin/config\", \"application/json\", bytes.NewBufferString(`{\"activeSize\":3, \"removeDelay\":60}`))\n\tassert.Equal(t, resp.StatusCode, 200)\n\n\ttime.Sleep(1 * time.Second)\n\n\tresp, _ = tests.Get(\"http://localhost:7002/v2/admin/config\")\n\tbody := tests.ReadBodyJSON(resp)\n\tassert.Equal(t, resp.StatusCode, 200)\n\tassert.Equal(t, resp.Header.Get(\"Content-Type\"), \"application/json\")\n\tassert.Equal(t, body[\"activeSize\"], 3)\n\tassert.Equal(t, body[\"removeDelay\"], 60)\n}", "func ExampleClusterManagersClient_Update() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armnetworkcloud.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClusterManagersClient().Update(ctx, \"resourceGroupName\", \"clusterManagerName\", armnetworkcloud.ClusterManagerPatchParameters{\n\t\tTags: map[string]*string{\n\t\t\t\"key1\": to.Ptr(\"myvalue1\"),\n\t\t\t\"key2\": to.Ptr(\"myvalue2\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ClusterManager = armnetworkcloud.ClusterManager{\n\t// \tName: to.Ptr(\"clusterManagerName\"),\n\t// \tType: to.Ptr(\"Microsoft.NetworkCloud/clusterManagers\"),\n\t// \tID: to.Ptr(\"/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.NetworkCloud/clusterManagers/clusterManagerName\"),\n\t// \tSystemData: &armnetworkcloud.SystemData{\n\t// \t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-01-22T13:27:03.008Z\"); return t}()),\n\t// \t\tCreatedBy: to.Ptr(\"identityA\"),\n\t// \t\tCreatedByType: to.Ptr(armnetworkcloud.CreatedByTypeApplication),\n\t// \t\tLastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-01-22T13:29:03.001Z\"); return t}()),\n\t// \t\tLastModifiedBy: to.Ptr(\"identityB\"),\n\t// \t\tLastModifiedByType: to.Ptr(armnetworkcloud.CreatedByTypeUser),\n\t// \t},\n\t// \tLocation: to.Ptr(\"location\"),\n\t// \tTags: map[string]*string{\n\t// \t\t\"key1\": to.Ptr(\"myvalue1\"),\n\t// \t\t\"key2\": to.Ptr(\"myvalue2\"),\n\t// \t},\n\t// \tProperties: &armnetworkcloud.ClusterManagerProperties{\n\t// \t\tAnalyticsWorkspaceID: to.Ptr(\"/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/microsoft.operationalInsights/workspaces/logAnalyticsWorkspaceName\"),\n\t// \t\tClusterVersions: []*armnetworkcloud.ClusterAvailableVersion{\n\t// \t\t\t{\n\t// \t\t\t\tSupportExpiryDate: to.Ptr(\"2023-04-29\"),\n\t// \t\t\t\tTargetClusterVersion: to.Ptr(\"1.0.0\"),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tSupportExpiryDate: to.Ptr(\"2025-01-01\"),\n\t// \t\t\t\tTargetClusterVersion: to.Ptr(\"1.0.2\"),\n\t// \t\t}},\n\t// \t\tDetailedStatus: to.Ptr(armnetworkcloud.ClusterManagerDetailedStatusAvailable),\n\t// \t\tDetailedStatusMessage: to.Ptr(\"cluster manager is up and running\"),\n\t// \t\tFabricControllerID: to.Ptr(\"/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.ManagedNetworkFabric/networkFabricControllers/fabricControllerName\"),\n\t// \t\tManagedResourceGroupConfiguration: &armnetworkcloud.ManagedResourceGroupConfiguration{\n\t// \t\t\tName: to.Ptr(\"my-managed-rg\"),\n\t// \t\t\tLocation: to.Ptr(\"East US\"),\n\t// \t\t},\n\t// \t\tManagerExtendedLocation: &armnetworkcloud.ExtendedLocation{\n\t// \t\t\tName: to.Ptr(\"/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.ExtendedLocation/customLocations/clusterManagerExtendedLocationName\"),\n\t// \t\t\tType: to.Ptr(\"CustomLocation\"),\n\t// \t\t},\n\t// \t\tProvisioningState: to.Ptr(armnetworkcloud.ClusterManagerProvisioningStateSucceeded),\n\t// \t},\n\t// }\n}", "func (a *LocalKeyAgent) UpdateCluster(cluster string) {\n\ta.siteName = cluster\n}", "func (r *Cluster) ApplyTo(m *model.Cluster) {\n\tm.Name = r.Name\n\tm.Description = r.Description\n\tm.DataCenter = r.DataCenter.ID\n\tm.HaReservation = r.bool(r.HaReservation)\n\tm.KsmEnabled = r.bool(r.KSM.Enabled)\n}", "func ExampleClustersClient_BeginCreateOrUpdate_putAClusterWithMaximumParameters() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewClustersClient().BeginCreateOrUpdate(ctx, \"resRg\", \"myCluster\", armservicefabric.Cluster{\n\t\tLocation: to.Ptr(\"eastus\"),\n\t\tTags: map[string]*string{},\n\t\tProperties: &armservicefabric.ClusterProperties{\n\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t\t\tApplicationTypeVersionsCleanupPolicy: &armservicefabric.ApplicationTypeVersionsCleanupPolicy{\n\t\t\t\tMaxUnusedVersionsToKeep: to.Ptr[int64](2),\n\t\t\t},\n\t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t\t\t},\n\t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t\t\t\t\t{\n\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t\t\t\t\t}},\n\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t\t\t},\n\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t\t\t\t{\n\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t\t\t\t\tIsAdmin: to.Ptr(true),\n\t\t\t\t}},\n\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t\t\t\t{\n\t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t\t\t\t\tIsAdmin: to.Ptr(true),\n\t\t\t\t}},\n\t\t\tClusterCodeVersion: to.Ptr(\"7.0.470.9590\"),\n\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t\t\t},\n\t\t\tEventStoreServiceEnabled: to.Ptr(true),\n\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\tInfrastructureServiceManager: to.Ptr(true),\n\t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelSilver),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t\t\t\t\tIsStateless: to.Ptr(false),\n\t\t\t\t\tMultipleAvailabilityZones: to.Ptr(true),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t\t\t\t}},\n\t\t\tNotifications: []*armservicefabric.Notification{\n\t\t\t\t{\n\t\t\t\t\tIsEnabled: to.Ptr(true),\n\t\t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t\t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelCritical),\n\t\t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t\t\t\t\t\t\tReceivers: []*string{\n\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t\t\t\t\t\t\tReceivers: []*string{\n\t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIsEnabled: to.Ptr(true),\n\t\t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t\t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelAll),\n\t\t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t\t\t\t\t\t\tReceivers: []*string{\n\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t\t\t\t\t\t\tReceivers: []*string{\n\t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelPlatinum),\n\t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t\t\t\t\t{\n\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t\t\t\t\t}},\n\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t\t\t},\n\t\t\tSfZonalUpgradeMode: to.Ptr(armservicefabric.SfZonalUpgradeModeHierarchical),\n\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t\t\t\t\t\t\"fabric:/myApp1\": {\n\t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t\t\t\t\t\t\t\t\"myServiceType1\": {\n\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t\t\t\t},\n\t\t\t\tForceRestart: to.Ptr(false),\n\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t\t\t\t\t\t\"fabric:/myApp1\": {\n\t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t\t\t\t\t\t\t\t\"myServiceType1\": {\n\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t\t\t\t},\n\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t\t\t},\n\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t }()),\n\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t }()),\n\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadenceWave1),\n\t\t\tVMImage: to.Ptr(\"Windows\"),\n\t\t\tVmssZonalUpgradeMode: to.Ptr(armservicefabric.VmssZonalUpgradeModeParallel),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\tApplicationTypeVersionsCleanupPolicy: &armservicefabric.ApplicationTypeVersionsCleanupPolicy{\n\t// \t\t\t\tMaxUnusedVersionsToKeep: to.Ptr[int64](2),\n\t// \t\t\t},\n\t// \t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCodeVersion: to.Ptr(\"7.0.470.9590\"),\n\t// \t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t}},\n\t// \t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t},\n\t// \t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(false),\n\t// \t\t\t}},\n\t// \t\t\tClusterCodeVersion: to.Ptr(\"7.0.470.9590\"),\n\t// \t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t},\n\t// \t\t\tEventStoreServiceEnabled: to.Ptr(true),\n\t// \t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t}},\n\t// \t\t\tInfrastructureServiceManager: to.Ptr(true),\n\t// \t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelSilver),\n\t// \t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\tIsStateless: to.Ptr(false),\n\t// \t\t\t\t\tMultipleAvailabilityZones: to.Ptr(true),\n\t// \t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t}},\n\t// \t\t\tNotifications: []*armservicefabric.Notification{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelCritical),\n\t// \t\t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelAll),\n\t// \t\t\t\t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelPlatinum),\n\t// \t\t\t\t\t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tSfZonalUpgradeMode: to.Ptr(armservicefabric.SfZonalUpgradeModeHierarchical),\n\t// \t\t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tForceRestart: to.Ptr(true),\n\t// \t\t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadenceWave1),\n\t// \t\t\t\t\t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t\t\t\t\t\tVmssZonalUpgradeMode: to.Ptr(armservicefabric.VmssZonalUpgradeModeParallel),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t}\n}", "func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}", "func (c *SetClusterConfigCommand) Apply(context raft.Context) (interface{}, error) {\n\tps, _ := context.Server().Context().(*PeerServer)\n\tps.SetClusterConfig(c.Config)\n\treturn nil, nil\n}", "func Update(setValuesFlag, valuesYamlFile, chartLocation, version string) error {\n\t_ = utils.CreateDirIfNotExist(utils.GetSpaceCloudDirectory())\n\n\tcharList, err := utils.HelmList(model.HelmSpaceCloudNamespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(charList) < 1 {\n\t\tutils.LogInfo(\"Space cloud cluster not found, setup a new cluster using the setup command\")\n\t\treturn nil\n\t}\n\n\tclusterID := charList[0].Name\n\tisOk := false\n\tprompt := &survey.Confirm{\n\t\tMessage: fmt.Sprintf(\"Space cloud cluster with id (%s) will be upgraded, Do you want to continue\", clusterID),\n\t}\n\tif err := survey.AskOne(prompt, &isOk); err != nil {\n\t\treturn err\n\t}\n\tif !isOk {\n\t\treturn nil\n\t}\n\n\tvaluesFileObj, err := utils.ExtractValuesObj(setValuesFlag, valuesYamlFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set clusterId of existing cluster\n\tcharInfo, err := utils.HelmGet(clusterID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvaluesFileObj[\"clusterId\"] = charInfo.Config[\"clusterId\"]\n\n\t_, err = utils.HelmUpgrade(clusterID, chartLocation, utils.GetHelmChartDownloadURL(model.HelmSpaceCloudChartDownloadURL, version), \"\", valuesFileObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println()\n\tutils.LogInfo(fmt.Sprintf(\"Space Cloud (cluster id: \\\"%s\\\") has been successfully upgraded! 👍\", charList[0].Name))\n\treturn nil\n}", "func (context *context) SetThreads(v uint) {\n\tcontext.params.SetThreads(int(v))\n}", "func NewClusters(db *gorm.DB) *Clusters {\n\treturn &Clusters{db: db}\n}", "func (m *VirtualEndpoint) SetCloudPCs(value []CloudPCable)() {\n err := m.GetBackingStore().Set(\"cloudPCs\", value)\n if err != nil {\n panic(err)\n }\n}", "func (a *ClustersApiService) ListClusters(ctx _context.Context, space string) ApiListClustersRequest {\n\treturn ApiListClustersRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t}\n}", "func (daemon *Daemon) SetCluster(cluster Cluster) {\n\tdaemon.cluster = cluster\n}", "func (a *NamespacesApiService) SetNamespaceReplicationClusters(ctx _context.Context, tenant string, namespace string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/namespaces/{tenant}/{namespace}/replication\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tenant\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", tenant)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"namespace\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", namespace)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func SetNamespaces(namespaces []string) UpdateSettingsFunc {\n\treturn func(cache *clusterCache) {\n\t\tif !reflect.DeepEqual(cache.namespaces, namespaces) {\n\t\t\tlog.WithField(\"server\", cache.config.Host).Infof(\"Changing cluster namespaces to: %v\", namespaces)\n\t\t\tcache.namespaces = namespaces\n\t\t}\n\t}\n}", "func SetClusterInitializedStatus(restclient *rest.RESTClient, clusterName,\n\tnamespace string) error {\n\n\tcluster := crv1.Pgcluster{}\n\tif _, err := kubeapi.Getpgcluster(restclient, &cluster, clusterName,\n\t\tnamespace); err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tmessage := \"Cluster has been initialized\"\n\tif err := kubeapi.PatchpgclusterStatus(restclient, crv1.PgclusterStateInitialized, message,\n\t\t&cluster, namespace); err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (kvs *KeyValService) Set(args *api.SetArgs, reply *api.ValReply) error {\n\treturn nodeChain.Set(args, reply)\n}", "func (mr *MockBuilderMockRecorder) Clusters() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Clusters\", reflect.TypeOf((*MockBuilder)(nil).Clusters))\n}", "func (r *ControlPlaneReplicas) Set(obj *unstructured.Unstructured, value int64) error {\n\tif err := unstructured.SetNestedField(obj.UnstructuredContent(), value, r.Path()...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to set control plane replicas\")\n\t}\n\treturn nil\n}", "func (a *ClustersApiService) ClusterServiceListClusters(ctx context.Context, body Servicev1ClusterQuery) (V1Clusterlist, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue V1Clusterlist\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/gitops/api/v1/clusters\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tlocalVarQueryParams.Add(\"routingId\", body.AccountIdentifier)\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v V1Clusterlist\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v GatewayruntimeError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func SetClusterContext(clusterConfigPath string) {\n\terr := Inst().S.SetConfig(clusterConfigPath)\n\texpect(err).NotTo(haveOccurred(),\n\t\tfmt.Sprintf(\"Failed to switch to context. Error: [%v]\", err))\n\n\terr = Inst().S.RefreshNodeRegistry()\n\texpect(err).NotTo(haveOccurred())\n\n\terr = Inst().V.RefreshDriverEndpoints()\n\texpect(err).NotTo(haveOccurred())\n}", "func setupCluster(synkPath string, cluster *cluster) error {\n\tkindcfg := &kindconfig.Cluster{\n\t\tNodes: []kindconfig.Node{\n\t\t\t{\n\t\t\t\tRole: kindconfig.ControlPlaneRole,\n\t\t\t\tImage: kinddefaults.Image,\n\t\t\t}, {\n\t\t\t\tRole: kindconfig.WorkerRole,\n\t\t\t\tImage: kinddefaults.Image,\n\t\t\t},\n\t\t},\n\t}\n\tcluster.kind = kindcluster.NewProvider()\n\n\t// Create kubeconfig file for use by synk or the dev.\n\tkubeConfig, err := ioutil.TempFile(\"\", \"kubeconfig-\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create temp kubeconfig\")\n\t}\n\tcluster.kubeConfigPath = kubeConfig.Name()\n\tif err := kubeConfig.Close(); err != nil {\n\t\treturn errors.Wrap(err, \"close temp kubeconfig\")\n\t}\n\n\tif err := cluster.kind.Create(\n\t\tcluster.genName,\n\t\tkindcluster.CreateWithV1Alpha4Config(kindcfg),\n\t\tkindcluster.CreateWithKubeconfigPath(cluster.kubeConfigPath),\n\t); err != nil {\n\t\treturn errors.Wrapf(err, \"create cluster %q\", cluster.genName)\n\t}\n\tkubecfgRaw, err := ioutil.ReadFile(cluster.kubeConfigPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read kube config\")\n\t}\n\tkubecfg, err := clientcmd.NewClientConfigFromBytes(kubecfgRaw)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"decode kube config\")\n\t}\n\tcluster.restCfg, err = kubecfg.ClientConfig()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get rest config\")\n\t}\n\tlog.Printf(\"To use the cluster, run KUBECONFIG=%s kubectl cluster-info\", cluster.kubeConfigPath)\n\n\t// Setup permissive binding we also have in cloud and robot clusters.\n\tctx := context.Background()\n\n\tc, err := client.New(cluster.restCfg, client.Options{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create client\")\n\t}\n\tif err := c.Create(ctx, &rbac.ClusterRoleBinding{\n\t\tObjectMeta: meta.ObjectMeta{\n\t\t\tName: \"permissive-binding\",\n\t\t},\n\t\tRoleRef: rbac.RoleRef{\n\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\tKind: \"ClusterRole\",\n\t\t\tName: \"cluster-admin\",\n\t\t},\n\t\tSubjects: []rbac.Subject{{\n\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\tKind: \"Group\",\n\t\t\tName: \"system:serviceaccounts\",\n\t\t}},\n\t}); err != nil {\n\t\treturn errors.Wrap(err, \"create permissive role binding\")\n\t}\n\n\t// Setup service account and create image pull secrets.\n\tif token := os.Getenv(\"ACCESS_TOKEN\"); token != \"\" {\n\t\t// Use the same secret name as the GCR credential refresher would\n\t\t// on robots.\n\t\t// This makes some testing of components easier, that assume this\n\t\t// secret to exist, e.g. ChartAssignment controller.\n\t\tsecret := &core.Secret{\n\t\t\tObjectMeta: meta.ObjectMeta{\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tName: gcr.SecretName,\n\t\t\t},\n\t\t\tType: core.SecretTypeDockercfg,\n\t\t\tData: map[string][]byte{\n\t\t\t\t\".dockercfg\": gcr.DockerCfgJSON(token),\n\t\t\t},\n\t\t}\n\t\tif err := c.Create(ctx, secret); err != nil {\n\t\t\treturn errors.Wrap(err, \"create pull secret\")\n\t\t}\n\t\tif err := backoff.Retry(\n\t\t\tfunc() error {\n\t\t\t\tvar sa core.ServiceAccount\n\t\t\t\terr := c.Get(ctx, client.ObjectKey{\"default\", \"default\"}, &sa)\n\t\t\t\tif k8serrors.IsNotFound(err) {\n\t\t\t\t\treturn errors.New(\"not found\")\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn backoff.Permanent(errors.Wrap(err, \"get service account\"))\n\t\t\t\t}\n\t\t\t\tsa.ImagePullSecrets = append(sa.ImagePullSecrets, core.LocalObjectReference{\n\t\t\t\t\tName: gcr.SecretName,\n\t\t\t\t})\n\t\t\t\tif err = c.Update(ctx, &sa); k8serrors.IsConflict(err) {\n\t\t\t\t\treturn fmt.Errorf(\"conflict\")\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn backoff.Permanent(errors.Wrap(err, \"update service account\"))\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tbackoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),\n\t\t); err != nil {\n\t\t\treturn errors.Wrap(err, \"inject pull secret\")\n\t\t}\n\t}\n\n\t// Wait for a node to be ready, by checking for node taints (incl. NotReady)\n\t// (context: b/128660997)\n\tif err := backoff.Retry(\n\t\tfunc() error {\n\t\t\tvar nds core.NodeList\n\t\t\tif err := c.List(ctx, &nds); err != nil {\n\t\t\t\treturn backoff.Permanent(err)\n\t\t\t}\n\t\t\tfor _, n := range nds.Items {\n\t\t\t\tif len(n.Spec.Taints) == 0 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"taints not removed\")\n\t\t},\n\t\tbackoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 240),\n\t); err != nil {\n\t\treturn errors.Wrap(err, \"wait for node taints to be removed\")\n\t}\n\tcmd := exec.Command(\n\t\tsynkPath,\n\t\t\"init\",\n\t\t\"--kubeconfig\", cluster.kubeConfigPath,\n\t)\n\tif output, err := cmd.CombinedOutput(); err != nil {\n\t\treturn errors.Errorf(\"install Helm: %v; output:\\n%s\\n\", err, output)\n\t}\n\treturn nil\n}", "func (s *DescribeClustersOutput) SetClusters(v []*Cluster) *DescribeClustersOutput {\n\ts.Clusters = v\n\treturn s\n}", "func loadClusters(filename string) error {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\ttext := scanner.Text()\n\t\twords := strings.Split(text, \" \")\n\t\tx, err := strconv.ParseFloat(words[1], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Can't parse float %s, skipping %s\\n\", words[1])\n\t\t}\n\t\ty, err := strconv.ParseFloat(words[2], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Can't parse float %s, skipping\\n\", words[2])\n\t\t}\n\t\tcluster, err := strconv.Atoi(words[3])\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Can't parse float %s, skipping\\n\", words[2])\n\t\t}\n\t\tfor len(clusters) <= cluster {\n\t\t\tclusters = append(clusters, Point{0, 0})\n\t\t}\n\t\tclusters[cluster] = Point{x, y}\n\t}\n\treturn nil\n}", "func NewCluster(MyCluster []Barebone) Cluster {\n\tvar retCluster Cluster\n\tretCluster.Machines = &MyCluster\n\treturn retCluster\n}", "func ListAllClusters(response *JsonListClustersMap) *JsonListClustersMap {\n\tvar SIDCluster int\n\tvar SName string\n\tvar SAWSAccount int64\n\tvar SAWSRegion string\n\tvar SAWSEnvironment string\n\tvar SK8sVersion string\n\n\tvar SNodeType string\n\tvar SNodeInstance string\n\tvar STotalInstances int\n\n\tvar totalInstances int\n\n\tdescription := make(DescriptionMap)\n\n\tdb, err := sql.Open(\"mysql\", UserDB+\":\"+PassDB+\"@tcp(\"+HostDB+\":\"+PortDB+\")/\"+DatabaseDB+\"?charset=utf8\")\n\tcheckErr(err)\n\n\tdefer db.Close()\n\n\trows, err := db.Query(\"SELECT id_cluster, nome, aws_account, aws_region, aws_env, k8s_version FROM clusters ORDER BY nome\")\n\tcheckErr(err)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&SIDCluster, &SName, &SAWSAccount, &SAWSRegion, &SAWSEnvironment, &SK8sVersion)\n\t\tcheckErr(err)\n\n\t\tdescription = DescriptionMap{}\n\t\ttotalInstances = 0\n\n\t\trows1, err := db.Query(\"SELECT node_type, node_instance, total_instances FROM nodes WHERE id_cluster=?\", SIDCluster)\n\t\tcheckErr(err)\n\n\t\tfor rows1.Next() {\n\t\t\terr = rows1.Scan(&SNodeType, &SNodeInstance, &STotalInstances)\n\t\t\tcheckErr(err)\n\n\t\t\tdescription[SNodeType] = append(\n\t\t\t\tdescription[SNodeType],\n\t\t\t\tDescriptionStruct{\n\t\t\t\t\tDescription{\n\t\t\t\t\t\tType: SNodeInstance,\n\t\t\t\t\t\tTotalTypeInstances: STotalInstances,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\n\t\t\ttotalInstances = totalInstances + STotalInstances\n\t\t}\n\n\t\t*response = append(\n\t\t\t*response,\n\t\t\tjsonListClusters{\n\t\t\t\tClusterName: SName,\n\t\t\t\tAws: AWS{\n\t\t\t\t\tAccount: SAWSAccount,\n\t\t\t\t\tRegion: SAWSRegion,\n\t\t\t\t\tEnvironment: SAWSEnvironment,\n\t\t\t\t},\n\t\t\t\tK8SVersion: SK8sVersion,\n\t\t\t\tInstances: Instances{\n\t\t\t\t\tTotalInstances: totalInstances,\n\t\t\t\t\tDescription: description,\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t}\n\n\treturn response\n}", "func (sv *SupernodesValue) Set(value string) error {\n\tnodes := strings.Split(value, \",\")\n\tfor _, n := range nodes {\n\t\tv := strings.Split(n, \"=\")\n\t\tif len(v) == 0 || len(v) > 2 {\n\t\t\treturn errors.New(\"invalid nodes\")\n\t\t}\n\t\t// ignore weight\n\t\tnode := v[0]\n\t\tvv := strings.Split(node, \":\")\n\t\tif len(vv) >= 2 {\n\t\t\treturn errors.New(\"invalid nodes\")\n\t\t}\n\t\tif len(vv) == 1 {\n\t\t\tnode = fmt.Sprintf(\"%s:%d\", node, DefaultSchedulerPort)\n\t\t}\n\t\tsv.Nodes = append(sv.Nodes, node)\n\t}\n\treturn nil\n}", "func (m *Group) SetThreads(value []ConversationThreadable)() {\n m.threads = value\n}", "func (coc *CoClustering) Fit(trainSet *core.DataSet, options ...core.RuntimeOption) {\n\tcoc.Init(trainSet, options)\n\t// Initialize parameters\n\tcoc.GlobalMean = trainSet.GlobalMean\n\tuserRatings := trainSet.DenseUserRatings\n\titemRatings := trainSet.DenseItemRatings\n\tcoc.UserMeans = base.SparseVectorsMean(userRatings)\n\tcoc.ItemMeans = base.SparseVectorsMean(itemRatings)\n\tcoc.UserClusters = coc.rng.NewUniformVectorInt(trainSet.UserCount(), 0, coc.nUserClusters)\n\tcoc.ItemClusters = coc.rng.NewUniformVectorInt(trainSet.ItemCount(), 0, coc.nItemClusters)\n\tcoc.UserClusterMeans = make([]float64, coc.nUserClusters)\n\tcoc.ItemClusterMeans = make([]float64, coc.nItemClusters)\n\tcoc.CoClusterMeans = base.NewMatrix(coc.nUserClusters, coc.nItemClusters)\n\t// Clustering\n\tfor ep := 0; ep < coc.nEpochs; ep++ {\n\t\t// Compute averages A^{COC}, A^{RC}, A^{CC}, A^R, A^C\n\t\tcoc.clusterMean(coc.UserClusterMeans, coc.UserClusters, userRatings)\n\t\tcoc.clusterMean(coc.ItemClusterMeans, coc.ItemClusters, itemRatings)\n\t\tcoc.coClusterMean(coc.CoClusterMeans, coc.UserClusters, coc.ItemClusters, userRatings)\n\t\t// Update row (user) cluster assignments\n\t\tbase.ParallelFor(0, trainSet.UserCount(), func(denseUserId int) {\n\t\t\tbestCluster, leastCost := -1, math.Inf(1)\n\t\t\tfor g := 0; g < coc.nUserClusters; g++ {\n\t\t\t\tcost := 0.0\n\t\t\t\tuserRatings[denseUserId].ForEach(func(_, denseItemId int, value float64) {\n\t\t\t\t\titemCluster := coc.ItemClusters[denseItemId]\n\t\t\t\t\tprediction := coc.UserMeans[denseUserId] + coc.ItemMeans[denseItemId] -\n\t\t\t\t\t\tcoc.UserClusterMeans[g] -\n\t\t\t\t\t\tcoc.ItemClusterMeans[itemCluster] +\n\t\t\t\t\t\tcoc.CoClusterMeans[g][itemCluster]\n\t\t\t\t\ttemp := prediction - value\n\t\t\t\t\tcost += temp * temp\n\t\t\t\t})\n\t\t\t\tif cost < leastCost {\n\t\t\t\t\tbestCluster = g\n\t\t\t\t\tleastCost = cost\n\t\t\t\t}\n\t\t\t}\n\t\t\tcoc.UserClusters[denseUserId] = bestCluster\n\t\t})\n\t\t// Update column (item) cluster assignments\n\t\tbase.ParallelFor(0, trainSet.ItemCount(), func(denseItemId int) {\n\t\t\tbestCluster, leastCost := -1, math.Inf(1)\n\t\t\tfor h := 0; h < coc.nItemClusters; h++ {\n\t\t\t\tcost := 0.0\n\t\t\t\titemRatings[denseItemId].ForEach(func(_, denseUserId int, value float64) {\n\t\t\t\t\tuserCluster := coc.UserClusters[denseUserId]\n\t\t\t\t\tprediction := coc.UserMeans[denseUserId] + coc.ItemMeans[denseItemId] -\n\t\t\t\t\t\tcoc.UserClusterMeans[userCluster] - coc.ItemClusterMeans[h] +\n\t\t\t\t\t\tcoc.CoClusterMeans[userCluster][h]\n\t\t\t\t\ttemp := prediction - value\n\t\t\t\t\tcost += temp * temp\n\t\t\t\t})\n\t\t\t\tif cost < leastCost {\n\t\t\t\t\tbestCluster = h\n\t\t\t\t\tleastCost = cost\n\t\t\t\t}\n\t\t\t}\n\t\t\tcoc.ItemClusters[denseItemId] = bestCluster\n\t\t})\n\t}\n}", "func (o *V0037Node) SetThreads(v int32) {\n\to.Threads = &v\n}", "func DestroyClustersCommand(provider *kind.Provider) *cobra.Command {\n\tflags := &DestroyClusterFlagpole{}\n\tcmd := &cobra.Command{\n\t\tArgs: cobra.NoArgs,\n\t\tUse: \"clusters\",\n\t\tShort: \"Destroy clusters\",\n\t\tLong: \"Destroys clusters\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\n\t\t\tvar targetClusters []string\n\t\t\tif len(flags.Clusters) > 0 {\n\t\t\t\ttargetClusters = append(targetClusters, flags.Clusters...)\n\t\t\t} else {\n\t\t\t\tconfigFiles, err := ioutil.ReadDir(defaults.KindConfigDir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tfor _, configFile := range configFiles {\n\t\t\t\t\tclName := strings.FieldsFunc(configFile.Name(), func(r rune) bool { return strings.ContainsRune(\" -.\", r) })[2]\n\t\t\t\t\ttargetClusters = append(targetClusters, clName)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, clName := range targetClusters {\n\t\t\t\tknown, err := cluster.IsKnown(clName, provider)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"%s: %v\", clName, err)\n\t\t\t\t}\n\t\t\t\tif known {\n\t\t\t\t\terr := cluster.Destroy(clName, provider)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"%s: %v\", clName, err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.Errorf(\"cluster %q not found.\", clName)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringSliceVarP(&flags.Clusters, \"clusters\", \"c\", []string{}, \"comma separated list of cluster names to destroy. eg: cl1,cl6,cl3\")\n\treturn cmd\n}", "func SetNodes(ns []string) {\n\tnodes = ns\n}", "func ExampleClustersClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().Get(ctx, \"resRg\", \"myCluster\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t}},\n\t// \t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t},\n\t// \t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t},\n\t// \t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t}},\n\t// \t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t}},\n\t// \t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t},\n\t// \t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t},\n\t// \t}\n}", "func ExampleClustersClient_List() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().List(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ClusterListResult = armservicefabric.ClusterListResult{\n\t// \tValue: []*armservicefabric.Cluster{\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"myCluster\"),\n\t// \t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\tTags: map[string]*string{\n\t// \t\t\t},\n\t// \t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(false),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"myCluster2\"),\n\t// \t\t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\t\tEtag: to.Ptr(\"W/\\\"636462502164040075\\\"\"),\n\t// \t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster2\"),\n\t// \t\t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\t\tTags: map[string]*string{\n\t// \t\t\t\t},\n\t// \t\t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager)},\n\t// \t\t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentLinux),\n\t// \t\t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\t\tClusterID: to.Ptr(\"2747e469-b24e-4039-8a0a-46151419523f\"),\n\t// \t\t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tManagementEndpoint: to.Ptr(\"http://myCluster2.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\t\tVMImage: to.Ptr(\"Ubuntu\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t}},\n\t// \t\t}\n}", "func (coc *CoClustering) Fit(trainSet core.DataSetInterface, options *base.RuntimeOptions) {\n\tcoc.Init(trainSet)\n\t// Initialize parameters\n\tcoc.GlobalMean = trainSet.GlobalMean()\n\tcoc.UserMeans = make([]float64, trainSet.UserCount())\n\tfor i := 0; i < trainSet.UserCount(); i++ {\n\t\tcoc.UserMeans[i] = trainSet.UserByIndex(i).Mean()\n\t}\n\tcoc.ItemMeans = make([]float64, trainSet.ItemCount())\n\tfor i := 0; i < trainSet.ItemCount(); i++ {\n\t\tcoc.ItemMeans[i] = trainSet.ItemByIndex(i).Mean()\n\t}\n\tcoc.UserClusters = make([]int, trainSet.UserCount())\n\tfor i := range coc.UserClusters {\n\t\tif trainSet.UserByIndex(i).Len() > 0 {\n\t\t\tcoc.UserClusters[i] = coc.rng.Intn(coc.nUserClusters)\n\t\t}\n\t}\n\tcoc.ItemClusters = make([]int, trainSet.ItemCount())\n\tfor i := range coc.ItemClusters {\n\t\tif trainSet.ItemByIndex(i).Len() > 0 {\n\t\t\tcoc.ItemClusters[i] = coc.rng.Intn(coc.nItemClusters)\n\t\t}\n\t}\n\tcoc.UserClusterMeans = make([]float64, coc.nUserClusters)\n\tcoc.ItemClusterMeans = make([]float64, coc.nItemClusters)\n\tcoc.CoClusterMeans = base.NewMatrix(coc.nUserClusters, coc.nItemClusters)\n\t// Clustering\n\tfor ep := 0; ep < coc.nEpochs; ep++ {\n\t\toptions.Logf(\"epoch = %v/%v\", ep+1, coc.nEpochs)\n\t\t// Compute averages A^{COC}, A^{RC}, A^{CC}, A^R, A^C\n\t\tcoc.clusterMean(coc.UserClusterMeans, coc.UserClusters, trainSet.Users())\n\t\tcoc.clusterMean(coc.ItemClusterMeans, coc.ItemClusters, trainSet.Items())\n\t\tcoc.coClusterMean(coc.CoClusterMeans, coc.UserClusters, coc.ItemClusters, trainSet.Users())\n\t\t// Update row (user) cluster assignments\n\t\tbase.ParallelFor(0, trainSet.UserCount(), func(userIndex int) {\n\t\t\tbestCluster, leastCost := -1, math.Inf(1)\n\t\t\tfor g := 0; g < coc.nUserClusters; g++ {\n\t\t\t\tcost := 0.0\n\t\t\t\ttrainSet.UserByIndex(userIndex).ForEachIndex(func(_, itemIndex int, value float64) {\n\t\t\t\t\titemCluster := coc.ItemClusters[itemIndex]\n\t\t\t\t\tprediction := coc.UserMeans[userIndex] + coc.ItemMeans[itemIndex] -\n\t\t\t\t\t\tcoc.UserClusterMeans[g] -\n\t\t\t\t\t\tcoc.ItemClusterMeans[itemCluster] +\n\t\t\t\t\t\tcoc.CoClusterMeans[g][itemCluster]\n\t\t\t\t\ttemp := prediction - value\n\t\t\t\t\tcost += temp * temp\n\t\t\t\t})\n\t\t\t\tif cost < leastCost {\n\t\t\t\t\tbestCluster = g\n\t\t\t\t\tleastCost = cost\n\t\t\t\t}\n\t\t\t}\n\t\t\tcoc.UserClusters[userIndex] = bestCluster\n\t\t})\n\t\t// Update column (item) cluster assignments\n\t\tbase.ParallelFor(0, trainSet.ItemCount(), func(itemIndex int) {\n\t\t\tbestCluster, leastCost := -1, math.Inf(1)\n\t\t\tfor h := 0; h < coc.nItemClusters; h++ {\n\t\t\t\tcost := 0.0\n\t\t\t\ttrainSet.ItemByIndex(itemIndex).ForEachIndex(func(_, userIndex int, value float64) {\n\t\t\t\t\tuserCluster := coc.UserClusters[userIndex]\n\t\t\t\t\tprediction := coc.UserMeans[userIndex] + coc.ItemMeans[itemIndex] -\n\t\t\t\t\t\tcoc.UserClusterMeans[userCluster] - coc.ItemClusterMeans[h] +\n\t\t\t\t\t\tcoc.CoClusterMeans[userCluster][h]\n\t\t\t\t\ttemp := prediction - value\n\t\t\t\t\tcost += temp * temp\n\t\t\t\t})\n\t\t\t\tif cost < leastCost {\n\t\t\t\t\tbestCluster = h\n\t\t\t\t\tleastCost = cost\n\t\t\t\t}\n\t\t\t}\n\t\t\tcoc.ItemClusters[itemIndex] = bestCluster\n\t\t})\n\t}\n}", "func (l *LoadBalancer) SetInstances(hosts []string) {\n\tl.hostLock.Lock()\n\tdefer l.hostLock.Unlock()\n\tl.hosts = hosts[:]\n}", "func (d *DBGenerator) setSubclusterDetail(ctx context.Context) error {\n\tq := Queries[SubclusterQueryKey]\n\trows, err := d.Conn.QueryContext(ctx, q)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t}\n\tdefer rows.Close()\n\n\t// Map to have fast lookup of subcluster name to index in the\n\t// d.Objs.Vdb.Spec.Subclusters array\n\tsubclusterInxMap := map[string]int{}\n\n\tfor rows.Next() {\n\t\tif rows.Err() != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, rows.Err())\n\t\t}\n\t\tvar name string\n\t\tvar isPrimary bool\n\t\tif err := rows.Scan(&name, &isPrimary); err != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t\t}\n\n\t\tif !vapi.IsValidSubclusterName(name) {\n\t\t\treturn fmt.Errorf(\"subcluster names are included in the name of statefulsets, but the name \"+\n\t\t\t\t\"'%s' cannot be used as it will violate Kubernetes naming. Please rename the subcluster and \"+\n\t\t\t\t\"retry this command again\", name)\n\t\t}\n\n\t\tinx, ok := subclusterInxMap[name]\n\t\tif !ok {\n\t\t\tinx = len(d.Objs.Vdb.Spec.Subclusters)\n\t\t\t// Add an empty subcluster. We increment the count a few lines down.\n\t\t\td.Objs.Vdb.Spec.Subclusters = append(d.Objs.Vdb.Spec.Subclusters,\n\t\t\t\tvapi.Subcluster{Name: name, Size: 0, IsPrimary: isPrimary})\n\t\t\tsubclusterInxMap[name] = inx\n\t\t}\n\t\td.Objs.Vdb.Spec.Subclusters[inx].Size++\n\n\t\t// Maintain the ReviveOrder. Update the count of the prior unless the\n\t\t// previous node was for a different subcluster.\n\t\trevSz := len(d.Objs.Vdb.Spec.ReviveOrder)\n\t\tif revSz == 0 || d.Objs.Vdb.Spec.ReviveOrder[revSz-1].SubclusterIndex != inx {\n\t\t\td.Objs.Vdb.Spec.ReviveOrder = append(d.Objs.Vdb.Spec.ReviveOrder, vapi.SubclusterPodCount{SubclusterIndex: inx, PodCount: 1})\n\t\t} else {\n\t\t\td.Objs.Vdb.Spec.ReviveOrder[revSz-1].PodCount++\n\t\t}\n\t}\n\n\tif len(subclusterInxMap) == 0 {\n\t\treturn errors.New(\"not subclusters found\")\n\t}\n\treturn nil\n}", "func Cluster(context *cli.Context) error {\n\tregion := context.String(flags.RegionFlag)\n\tif err := fieldEmpty(region, flags.RegionFlag); err != nil {\n\t\treturn err\n\t}\n\tclusterProfileName := context.String(flags.ConfigNameFlag)\n\tif err := fieldEmpty(clusterProfileName, flags.ConfigNameFlag); err != nil {\n\t\treturn err\n\t}\n\tcluster := context.String(flags.ClusterFlag)\n\tif err := fieldEmpty(cluster, flags.ClusterFlag); err != nil {\n\t\treturn err\n\t}\n\n\tlaunchType := context.String(flags.DefaultLaunchTypeFlag)\n\tif err := config.ValidateLaunchType(launchType); err != nil {\n\t\treturn err\n\t}\n\n\tcfnStackName := context.String(flags.CFNStackNameFlag)\n\tcomposeServiceNamePrefix := context.String(flags.ComposeServiceNamePrefixFlag)\n\n\tclusterConfig := &config.Cluster{\n\t\tCluster: cluster,\n\t\tRegion: region,\n\t\tCFNStackName: cfnStackName,\n\t\tComposeServiceNamePrefix: composeServiceNamePrefix,\n\t\tDefaultLaunchType: launchType,\n\t}\n\n\trdwr, err := config.NewReadWriter()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error saving cluster configuration\")\n\t}\n\tif err = rdwr.SaveCluster(clusterProfileName, clusterConfig); err != nil {\n\t\treturn errors.Wrap(err, \"Error saving cluster configuration\")\n\t}\n\n\tlogrus.Infof(\"Saved ECS CLI cluster configuration %s.\", clusterProfileName)\n\treturn nil\n}", "func Clusters(api API) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tclusters := api.Clusters()\n\t\tm := make(map[string]map[string]any, len(clusters))\n\t\tfor _, c := range clusters {\n\t\t\tm[c.ID] = c.Debug()\n\t\t}\n\n\t\tdata, err := json.Marshal(m)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"could not marshal cluster debug map: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(data)\n\t\tw.Write([]byte(\"\\n\"))\n\t}\n}", "func (c *Cluster) Set(host, forward string) {\n\tproxy := &httputil.ReverseProxy{\n\t\tDirector: func(r *http.Request) {\n\t\t\tr.URL.Scheme = \"http\"\n\t\t\tr.URL.Host = forward\n\t\t},\n\t\tErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {\n\t\t\tw.WriteHeader(http.StatusBadGateway)\n\t\t\t_, _ = w.Write([]byte(errors.Cause(err).Error()))\n\t\t},\n\t}\n\n\tc.proxiesLock.Lock()\n\tdefer c.proxiesLock.Unlock()\n\n\tc.proxies[host] = proxy\n}", "func (s *IngestStep) Cluster(schemaFile string, dataset string,\n\trootDataPath string, outputFolder string, hasHeader bool) error {\n\toutputSchemaPath := path.Join(outputFolder, D3MSchemaPathRelative)\n\toutputDataPath := path.Join(outputFolder, D3MDataPathRelative)\n\tsourceFolder := path.Dir(dataset)\n\n\t// copy the source folder to have all the linked files for merging\n\tos.MkdirAll(outputFolder, os.ModePerm)\n\terr := copy.Copy(sourceFolder, outputFolder)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to copy source data\")\n\t}\n\n\t// delete the existing files that will be overwritten\n\tos.Remove(outputSchemaPath)\n\tos.Remove(outputDataPath)\n\n\t// load metadata from original schema\n\tmeta, err := metadata.LoadMetadataFromOriginalSchema(schemaFile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to load original schema file\")\n\t}\n\tmainDR := meta.GetMainDataResource()\n\n\t// add feature variables\n\tfeatures, err := getClusterVariables(meta, \"_cluster_\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to get cluster variables\")\n\t}\n\n\td3mIndexField := getD3MIndexField(mainDR)\n\n\t// open the input file\n\tdataPath := path.Join(rootDataPath, mainDR.ResPath)\n\tlines, err := s.readCSVFile(dataPath, hasHeader)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error reading raw data\")\n\t}\n\n\t// add the cluster data to the raw data\n\tfor _, f := range features {\n\t\tmainDR.Variables = append(mainDR.Variables, f.Variable)\n\n\t\tlines, err = s.appendFeature(sourceFolder, d3mIndexField, false, f, lines)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error appending clustered data\")\n\t\t}\n\t}\n\n\t// initialize csv writer\n\toutput := &bytes.Buffer{}\n\twriter := csv.NewWriter(output)\n\n\t// output the header\n\theader := make([]string, len(mainDR.Variables))\n\tfor _, v := range mainDR.Variables {\n\t\theader[v.Index] = v.Name\n\t}\n\terr = writer.Write(header)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error storing clustered header\")\n\t}\n\n\tfor _, line := range lines {\n\t\terr = writer.Write(line)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error storing clustered output\")\n\t\t}\n\t}\n\n\t// output the data with the new feature\n\twriter.Flush()\n\n\terr = util.WriteFileWithDirs(outputDataPath, output.Bytes(), os.ModePerm)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error writing clustered output\")\n\t}\n\n\trelativePath := getRelativePath(path.Dir(outputSchemaPath), outputDataPath)\n\tmainDR.ResPath = relativePath\n\n\t// write the new schema to file\n\terr = metadata.WriteSchema(meta, outputSchemaPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to store cluster schema\")\n\t}\n\n\treturn nil\n}", "func listClusters(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tctx := r.Context()\n\tallowed := permission.Check(t, permission.PermClusterRead)\n\tif !allowed {\n\t\treturn permission.ErrUnauthorized\n\t}\n\tclusters, err := servicemanager.Cluster.List(ctx)\n\tif err != nil {\n\t\tif err == provTypes.ErrNoCluster {\n\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tadmin := permission.Check(t, permission.PermClusterAdmin)\n\tif !admin {\n\t\tfor i := range clusters {\n\t\t\tclusters[i].CleanUpSensitive()\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\treturn json.NewEncoder(w).Encode(clusters)\n}", "func setDefaultCluster(clusterID string) error {\n\tif err := initConfig(); err != nil {\n\t\treturn err\n\t}\n\tviper.Set(\"cluster\", clusterID)\n\treturn viper.WriteConfig()\n}", "func (gs *GKEClient) Setup(numNodes *int64, nodeType *string, region *string, zone *string, project *string) (ClusterOperations, error) {\n\tvar err error\n\tgc := &GKECluster{\n\t\tRequest: &GKERequest{\n\t\t\tNumNodes: DefaultGKENumNodes,\n\t\t\tNodeType: DefaultGKENodeType,\n\t\t\tRegion: DefaultGKERegion,\n\t\t\tZone: DefaultGKEZone,\n\t\t\tBackupRegions: DefaultGKEBackupRegions},\n\t}\n\n\tctx := context.Background()\n\n\tc, err := google.DefaultClient(ctx, container.CloudPlatformScope)\n\tif nil != err {\n\t\treturn nil, fmt.Errorf(\"failed create google client: '%v'\", err)\n\t}\n\n\tcontainerService, err := container.New(c)\n\tif nil != err {\n\t\treturn nil, fmt.Errorf(\"failed create container service: '%v'\", err)\n\t}\n\tgc.operations = &GKESDKClient{containerService}\n\n\tif nil != project { // use provided project and create cluster\n\t\tgc.Project = project\n\t\tgc.NeedCleanup = true\n\t} else if err := gc.checkEnvironment(); nil != err {\n\t\treturn nil, fmt.Errorf(\"failed checking existing cluster: '%v'\", err)\n\t} else if nil != gc.Cluster { // return if Cluster was already set by kubeconfig\n\t\treturn gc, nil\n\t}\n\tif nil == gc.Cluster {\n\t\tif common.IsProw() {\n\t\t\tproject, err := boskos.AcquireGKEProject(nil)\n\t\t\tif nil != err {\n\t\t\t\treturn nil, fmt.Errorf(\"failed acquire boskos project: '%v'\", err)\n\t\t\t}\n\t\t\tgc.Project = &project.Name\n\t\t}\n\t\tif nil != numNodes {\n\t\t\tgc.Request.NumNodes = *numNodes\n\t\t}\n\t\tif nil != nodeType {\n\t\t\tgc.Request.NodeType = *nodeType\n\t\t}\n\t\tif nil != region {\n\t\t\tgc.Request.Region = *region\n\t\t}\n\t\tif \"\" != common.GetOSEnv(regionEnv) {\n\t\t\tgc.Request.Region = common.GetOSEnv(regionEnv)\n\t\t}\n\t\tif \"\" != common.GetOSEnv(backupRegionEnv) {\n\t\t\tgc.Request.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), \" \")\n\t\t}\n\t\tif nil != zone {\n\t\t\tgc.Request.Zone = *zone\n\t\t\tgc.Request.BackupRegions = make([]string, 0)\n\t\t}\n\t}\n\tif nil == gc.Project || \"\" == *gc.Project {\n\t\treturn nil, fmt.Errorf(\"gcp project must be set\")\n\t}\n\tlog.Printf(\"use project '%s' for running test\", *gc.Project)\n\treturn gc, nil\n}", "func (c Client) ListClusters() (ClusterList, error) {\n\tbody, err := c.watsonClient.MakeRequest(\"GET\", c.version+\"/solr_clusters\", nil, nil)\n\tif err != nil {\n\t\treturn ClusterList{}, err\n\t}\n\tvar response ClusterList\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}", "func (kcuo *K8sContainerUpdateOne) SetK8sClusterId(u uint) *K8sContainerUpdateOne {\n\tkcuo.mutation.ResetK8sClusterId()\n\tkcuo.mutation.SetK8sClusterId(u)\n\treturn kcuo\n}", "func Clusters(clusters map[string]cluster.Cluster) []cluster.Cluster {\n\tcs := make([]cluster.Cluster, 0, len(clusters))\n\tfor _, cls := range clusters {\n\t\tcs = append(cs, cls)\n\t}\n\treturn cs\n}", "func (s *CPU) SetCores(v int64) *CPU {\n\ts.Cores = &v\n\treturn s\n}", "func (m *User) SetSchools(value []string)() {\n m.schools = value\n}", "func setBootstrapNodes(ctx *cli.Context, cfg *p2p.ClientCfg) {\r\n\turls := params.MainnetBootnodes\r\n\tswitch {\r\n\tcase ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):\r\n\t\tif ctx.GlobalIsSet(BootnodesV4Flag.Name) {\r\n\t\t\turls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), \",\")\r\n\t\t} else {\r\n\t\t\turls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), \",\")\r\n\t\t}\r\n\tcase ctx.GlobalBool(TestnetFlag.Name):\r\n\t\turls = params.TestnetBootnodes\r\n\tcase ctx.GlobalBool(RinkebyFlag.Name):\r\n\t\turls = params.RinkebyBootnodes\r\n\tcase cfg.BootstrapNodes != nil:\r\n\t\treturn // already set, don't apply defaults.\r\n\t}\r\n\r\n\t//log.Info(\"[******************************]\");\r\n\tcfg.BootstrapNodes = make([]*discover.Node, 0, len(urls))\r\n\tfor _, url := range urls {\r\n\t\tlog.Info(\"【BootTrapNodes】\", \"[url]=\", url)\r\n\t\tnode, err := discover.ParseNode(url)\r\n\t\tif err != nil {\r\n\t\t\tlog.Error(\"Bootstrap URL invalid\", \"enode\", url, \"err\", err)\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tcfg.BootstrapNodes = append(cfg.BootstrapNodes, node)\r\n\t}\r\n}", "func StartCluster(c *cli.Context) error {\n\tclusters, err := getClusters(c.Bool(\"all\"), c.String(\"name\"))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx := context.Background()\n\tdocker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn fmt.Errorf(\" Couldn't create docker client\\n%+v\", err)\n\t}\n\n\t// remove clusters one by one instead of appending all names to the docker command\n\t// this allows for more granular error handling and logging\n\tfor _, cluster := range clusters {\n\t\tlog.Printf(\"Starting cluster [%s]\", cluster.name)\n\n\t\t// TODO: consider only touching the registry if it's really in use by a cluster\n\t\tregistryContainer, err := getRegistryContainer()\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Couldn't get registry container, if you know you have one, try starting it manually via `docker start`\")\n\t\t}\n\t\tif registryContainer != \"\" {\n\t\t\tlog.Infof(\"...Starting registry container '%s'\", registryContainer)\n\t\t\tif err := docker.ContainerStart(ctx, registryContainer, types.ContainerStartOptions{}); err != nil {\n\t\t\t\tlog.Warnf(\"Failed to start the registry container '%s', try starting it manually via `docker start %s`\", registryContainer, registryContainer)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debugln(\"No registry container found. Proceeding.\")\n\t\t}\n\n\t\tlog.Println(\"...Starting server\")\n\t\tif err := docker.ContainerStart(ctx, cluster.server.ID, types.ContainerStartOptions{}); err != nil {\n\t\t\treturn fmt.Errorf(\" Couldn't start server for cluster %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tif len(cluster.workers) > 0 {\n\t\t\tlog.Printf(\"...Starting %d workers\\n\", len(cluster.workers))\n\t\t\tfor _, worker := range cluster.workers {\n\t\t\t\tif err := docker.ContainerStart(ctx, worker.ID, types.ContainerStartOptions{}); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"SUCCESS: Started cluster [%s]\", cluster.name)\n\t}\n\n\treturn nil\n}", "func (o *PersistConfig) SetClusterVersion(v *semver.Version) {\n\tatomic.StorePointer(&o.clusterVersion, unsafe.Pointer(v))\n}", "func Configure(p *config.Provider) {\n\tp.AddResourceConfigurator(\"aws_neptune_cluster\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.ExternalName{\n\t\t\tSetIdentifierArgumentFn: func(base map[string]interface{}, name string) {\n\t\t\t\tbase[\"cluster_identifier\"] = name\n\t\t\t},\n\n\t\t\tOmittedFields: []string{\n\t\t\t\t\"cluster_identifier\",\n\t\t\t\t\"cluster_identifier_prefix\",\n\t\t\t},\n\n\t\t\tGetExternalNameFn: config.IDAsExternalName,\n\t\t\tGetIDFn: config.ExternalNameAsID,\n\t\t}\n\n\t\tr.UseAsync = true\n\n\t\tr.References[\"snapshot_identifier\"] = config.Reference{\n\t\t\tType: \"ClusterSnapshot\",\n\t\t}\n\n\t\tr.References[\"replication_source_identifier\"] = config.Reference{\n\t\t\tType: \"Cluster\",\n\t\t}\n\n\t\tr.References[\"neptune_subnet_group_name\"] = config.Reference{\n\t\t\tType: \"SubnetGroup\",\n\t\t}\n\n\t\tr.References[\"neptune_cluster_parameter_group_name\"] = config.Reference{\n\t\t\tType: \"ClusterParameterGroup\",\n\t\t}\n\n\t\tr.References[\"iam_roles\"] = config.Reference{\n\t\t\tType: \"github.com/crossplane-contrib/provider-jet-aws/apis/iam/v1alpha2.Role\",\n\t\t\tRefFieldName: \"IAMRoleIdRefs\",\n\t\t\tSelectorFieldName: \"IAMRoleIdSelector\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_neptune_cluster_endpoint\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.ExternalName{\n\t\t\tSetIdentifierArgumentFn: func(base map[string]interface{}, name string) {\n\t\t\t\tbase[\"cluster_endpoint_identifier\"] = name\n\t\t\t},\n\n\t\t\tOmittedFields: []string{\n\t\t\t\t\"cluster_endpoint_identifier\",\n\t\t\t},\n\n\t\t\tGetExternalNameFn: func(tfstate map[string]interface{}) (string, error) {\n\t\t\t\tid, ok := tfstate[\"id\"].(string)\n\t\t\t\tif !ok || id == \"\" {\n\t\t\t\t\treturn \"\", errors.New(\"cannot get id from tfstate\")\n\t\t\t\t}\n\n\t\t\t\t// my_cluster:my_cluster_endpoint\n\t\t\t\tw := strings.Split(id, \":\")\n\t\t\t\tif len(w) != 2 {\n\t\t\t\t\treturn \"\", errors.New(\"format of id should be my_cluster:my_cluster_endpoint\")\n\t\t\t\t}\n\t\t\t\treturn w[len(w)-1], nil\n\t\t\t},\n\n\t\t\tGetIDFn: func(_ context.Context, externalName string, parameters map[string]interface{}, _ map[string]interface{}) (string, error) {\n\t\t\t\tci, ok := parameters[\"cluster_identifier\"].(string)\n\t\t\t\tif !ok || ci == \"\" {\n\t\t\t\t\treturn \"\", errors.New(\"cannot get cluster_identifier from parameters\")\n\t\t\t\t}\n\t\t\t\treturn fmt.Sprintf(\"%s:%s\", ci, externalName), nil\n\t\t\t},\n\t\t}\n\n\t\tr.References[\"cluster_identifier\"] = config.Reference{\n\t\t\tType: \"Cluster\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_neptune_cluster_instance\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.ExternalName{\n\t\t\tSetIdentifierArgumentFn: func(base map[string]interface{}, name string) {\n\t\t\t\tbase[\"identifier\"] = name\n\t\t\t},\n\n\t\t\tOmittedFields: []string{\n\t\t\t\t\"identifier\",\n\t\t\t\t\"identifier_prefix\",\n\t\t\t},\n\n\t\t\tGetExternalNameFn: config.IDAsExternalName,\n\t\t\tGetIDFn: config.ExternalNameAsID,\n\t\t}\n\n\t\tr.UseAsync = true\n\n\t\tr.References[\"cluster_identifier\"] = config.Reference{\n\t\t\tType: \"Cluster\",\n\t\t}\n\n\t\tr.References[\"neptune_parameter_group_name\"] = config.Reference{\n\t\t\tType: \"ParameterGroup\",\n\t\t}\n\n\t\tr.References[\"neptune_subnet_group_name\"] = config.Reference{\n\t\t\tType: \"SubnetGroup\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_neptune_cluster_parameter_group\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.NameAsIdentifier\n\t})\n\n\tp.AddResourceConfigurator(\"aws_neptune_cluster_snapshot\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.ExternalName{\n\t\t\tSetIdentifierArgumentFn: func(base map[string]interface{}, name string) {\n\t\t\t\tbase[\"db_cluster_snapshot_identifier\"] = name\n\t\t\t},\n\n\t\t\tOmittedFields: []string{\n\t\t\t\t\"db_cluster_snapshot_identifier\",\n\t\t\t},\n\n\t\t\tGetExternalNameFn: config.IDAsExternalName,\n\t\t\tGetIDFn: config.ExternalNameAsID,\n\t\t}\n\n\t\tr.UseAsync = true\n\n\t\tr.References[\"db_cluster_identifier\"] = config.Reference{\n\t\t\tType: \"Cluster\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_neptune_event_subscription\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.NameAsIdentifier\n\t})\n\n\tp.AddResourceConfigurator(\"aws_neptune_parameter_group\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.NameAsIdentifier\n\t})\n\n\tp.AddResourceConfigurator(\"aws_neptune_subnet_group\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\n\t\tr.ExternalName = config.NameAsIdentifier\n\n\t\tr.References[\"subnet_ids\"] = config.Reference{\n\t\t\tType: \"github.com/crossplane-contrib/provider-jet-aws/apis/ec2/v1alpha2.Subnet\",\n\t\t\tRefFieldName: \"SubnetIdRefs\",\n\t\t\tSelectorFieldName: \"SubnetIdSelector\",\n\t\t}\n\t})\n}", "func PutClusterConfig(req *restful.Request, resp *restful.Response) {\n\tconst (\n\t\thandler = \"PutClusterConfig\"\n\t)\n\tspan := v1http.SetHTTPSpanContextInfo(req, handler)\n\tdefer span.Finish()\n\n\tif err := putClsConfig(req); err != nil {\n\t\tutils.SetSpanLogTagError(span, err)\n\t\tblog.Errorf(\"%s | err: %v\", common.BcsErrStoragePutResourceFailStr, err)\n\t\tlib.ReturnRest(&lib.RestResponse{\n\t\t\tResp: resp,\n\t\t\tErrCode: common.BcsErrStoragePutResourceFail,\n\t\t\tMessage: common.BcsErrStoragePutResourceFailStr})\n\t\treturn\n\t}\n\tr, err := generateData(req, getCls)\n\tif err != nil {\n\t\tutils.SetSpanLogTagError(span, err)\n\t\tblog.Errorf(\"%s | err: %v\", common.BcsErrStorageGetResourceFailStr, err)\n\t\tlib.ReturnRest(&lib.RestResponse{\n\t\t\tResp: resp,\n\t\t\tErrCode: common.BcsErrStorageGetResourceFail,\n\t\t\tMessage: common.BcsErrStorageGetResourceFailStr})\n\t\treturn\n\t}\n\tlib.ReturnRest(&lib.RestResponse{Resp: resp, Data: r})\n}", "func SetKubeConfig(project, zone, cluster, kubeconfig string) error {\n\tif err := os.Setenv(\"KUBECONFIG\", kubeconfig); err != nil {\n\t\treturn err\n\t}\n\n\tclusterJSON, err := util.ShellSilent(\n\t\t\"gcloud container clusters describe %s --project=%s --zone=%s --format=json\",\n\t\tcluster, project, zone)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclusterObj := container.Cluster{}\n\tif err = json.Unmarshal([]byte(clusterJSON), &clusterObj); err != nil {\n\t\treturn err\n\t}\n\n\tif clusterObj.MasterAuth == nil ||\n\t\t(len(clusterObj.MasterAuth.ClientCertificate) == 0 && len(clusterObj.MasterAuth.ClientKey) == 0) {\n\t\t_, err := util.ShellSilent(\n\t\t\t\"gcloud container clusters get-credentials %s --project=%s --zone=%s\",\n\t\t\tcluster, project, zone)\n\t\treturn err\n\t}\n\n\tca, err := base64.StdEncoding.DecodeString(clusterObj.MasterAuth.ClusterCaCertificate)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclientCert, err := base64.StdEncoding.DecodeString(clusterObj.MasterAuth.ClientCertificate)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclientKey, err := base64.StdEncoding.DecodeString(clusterObj.MasterAuth.ClientKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig := clientapi.Config{\n\t\tAPIVersion: \"v1\",\n\t\tKind: \"Config\",\n\t\tClusters: []clientapi.NamedCluster{\n\t\t\t{\n\t\t\t\tName: cluster,\n\t\t\t\tCluster: clientapi.Cluster{\n\t\t\t\t\tServer: \"https://\" + clusterObj.Endpoint,\n\t\t\t\t\tCertificateAuthorityData: ca,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tAuthInfos: []clientapi.NamedAuthInfo{\n\t\t\t{\n\t\t\t\tName: cluster,\n\t\t\t\tAuthInfo: clientapi.AuthInfo{\n\t\t\t\t\tClientCertificateData: clientCert,\n\t\t\t\t\tClientKeyData: clientKey,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tContexts: []clientapi.NamedContext{\n\t\t\t{\n\t\t\t\tName: cluster,\n\t\t\t\tContext: clientapi.Context{\n\t\t\t\t\tCluster: cluster,\n\t\t\t\t\tAuthInfo: cluster,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tCurrentContext: cluster,\n\t}\n\n\tkubeconfigData, err := yaml.Marshal(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(kubeconfig, kubeconfigData, 0666)\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\tklog.V(4).Infof(\"Clusters called\")\n\treturn nil, false\n}", "func (a ClustersApi) ClustersUuidPut(uuid string, body ClusterIntentInput) (*ClusterIntentResponse, *APIResponse, error) {\n\n\tvar httpMethod = \"Put\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/clusters/{uuid}\"\n\tpath = strings.Replace(path, \"{\"+\"uuid\"+\"}\", fmt.Sprintf(\"%v\", uuid), -1)\n\n\theaderParams := make(map[string]string)\n\tqueryParams := url.Values{}\n\tformParams := make(map[string]string)\n\tvar postBody interface{}\n\tvar fileName string\n\tvar fileBytes []byte\n\t// authentication (basicAuth) required\n\n\t// http basic authentication required\n\tif a.Configuration.Username != \"\" || a.Configuration.Password != \"\" {\n\t\theaderParams[\"Authorization\"] = \"Basic \" + a.Configuration.GetBasicAuthEncodedString()\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tswitch reflect.TypeOf(body) {\n\tcase reflect.TypeOf(\"\"):\n\t\tpostBody = body\n\tdefault:\n\t\tpostBody = &body\n\t}\n\n\tvar successPayload = new(ClusterIntentResponse)\n\thttpResponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, fileName, fileBytes)\n\tif err != nil {\n\t\treturn successPayload, NewAPIResponse(httpResponse.RawResponse), err\n\t}\n\terr = json.Unmarshal(httpResponse.Body(), &successPayload)\n\treturn successPayload, NewAPIResponse(httpResponse.RawResponse), err\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (s *Snapshot) NumClusters(ns core.Namespace) int {\n\tif val, ok := s.clusters[ns]; ok && val != nil {\n\t\treturn len(val)\n\t\t//return val.Len()\n\t}\n\treturn 0\n}", "func EKSCluster() Option {\n\treturn KubernetesCluster(\"/api/v1/namespaces/amazon-cloudwatch/configmaps/cluster-info\", \"cluster.name\")\n}", "func (bc *Baiducloud) ListClusters(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"ListClusters unimplemented\")\n}", "func (e *ECS) ListClusters(req *ListClustersReq) (\n\t*ListClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"ListClusters\")\n\tif req.MaxResults > 0 {\n\t\tparams[\"maxResults\"] = strconv.Itoa(int(req.MaxResults))\n\t}\n\tif req.NextToken != \"\" {\n\t\tparams[\"nextToken\"] = req.NextToken\n\t}\n\n\tresp := new(ListClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (c Cube) Set(x, y, z int, val []float64) {\n\tc.Data[x][y][z] = val\n}", "func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}", "func (cc cacheCluster) SetCtx(ctx context.Context, key string, val any) error {\n\tc, ok := cc.dispatcher.Get(key)\n\tif !ok {\n\t\treturn cc.errNotFound\n\t}\n\n\treturn c.(Cache).SetCtx(ctx, key, val)\n}", "func (a *Client) ListClusters(params *ListClustersParams, authInfo runtime.ClientAuthInfoWriter) (*ListClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListClustersOK), nil\n\n}", "func (km *KMeans) Cluster(\n\tdata []Point, numberOfClusters uint,\n\tinitializer ClusterInitializer) ([]Cluster, error) {\n\n\terr := km.load(data, numberOfClusters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkm.maxLoops = 50\n\tkm.tolerance = .01\n\n\tinitializer.initialize(&km.Clusterer)\n\tkm.assign()\n\n\tloopCount := 0\n\tfor {\n\t\tloopCount += 1\n\t\tif loopCount > km.maxLoops {\n\t\t\terr = errors.New(fmt.Sprintf(\"Maximum loops reached before convergence: %v\", loopCount))\n\t\t\treturn km.clusters, err\n\t\t}\n\n\t\tif km.recalcCenters() {\n\t\t\tfmt.Printf(\"Stopped after %v iterations.\", loopCount)\n\t\t\treturn km.clusters, nil\n\t\t}\n\n\t\tkm.assign()\n\t}\n}", "func (nn *FeedForward) SetContexts(nContexts int, initValues [][]*seal.Ciphertext) {\n\tif initValues == nil {\n\t\tinitValues = make([][]*seal.Ciphertext, nContexts)\n\n\t\tfor i := 0; i < nContexts; i++ {\n\t\t\tinitValues[i] = nn.vector(nn.NHiddens, 0.5)\n\t\t}\n\t}\n\n\tnn.Contexts = initValues\n}", "func WorkerClustersSelector() string {\n\treturn LabelClusterOn + \"=\" + LabelClusterOnValue\n}", "func (c *clusterApi) setSecret(w http.ResponseWriter, r *http.Request) {\n\tmethod := \"setSecret\"\n\tvar secReq secrets.SetSecretRequest\n\tparams := r.URL.Query()\n\tsecretID := params[secrets.SecretKey]\n\n\tif len(secretID) == 0 || secretID[0] == \"\" {\n\t\tc.sendError(c.name, method, w, \"Missing secret ID\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif err := json.NewDecoder(r.Body).Decode(&secReq); err != nil {\n\t\tc.sendError(c.name, method, w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tinst, err := clustermanager.Inst()\n\tif err != nil {\n\t\tc.sendError(c.name, method, w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = inst.SecretSet(secretID[0], secReq.SecretValue)\n\tif err != nil {\n\t\tc.sendError(c.name, method, w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}" ]
[ "0.623222", "0.5735698", "0.55403745", "0.5447475", "0.5413994", "0.5402886", "0.53507334", "0.5320656", "0.5263007", "0.5236094", "0.52265227", "0.52218485", "0.52067995", "0.51981896", "0.51796174", "0.5176415", "0.5163213", "0.5159419", "0.5066361", "0.5066361", "0.50452363", "0.5023948", "0.50224626", "0.5021142", "0.50125504", "0.5009232", "0.5005155", "0.50035095", "0.49919844", "0.4977796", "0.4970905", "0.49587336", "0.49570334", "0.49301648", "0.49242905", "0.49002284", "0.48916116", "0.48904333", "0.48639575", "0.48623544", "0.4857413", "0.4856471", "0.48537827", "0.48389778", "0.4838257", "0.48337394", "0.48295385", "0.48277488", "0.4823459", "0.48217618", "0.4809163", "0.48033962", "0.48000363", "0.47996438", "0.47855598", "0.47694686", "0.47616512", "0.4745739", "0.4740817", "0.4735783", "0.47300962", "0.4722891", "0.47207114", "0.47152337", "0.471396", "0.47090268", "0.4695454", "0.4687172", "0.46849898", "0.46820202", "0.46782172", "0.46708375", "0.46459532", "0.46414497", "0.46384355", "0.46367666", "0.46295547", "0.4628828", "0.46278825", "0.46237108", "0.46219113", "0.4621874", "0.46198767", "0.4614171", "0.46109232", "0.46090987", "0.46089962", "0.4608294", "0.46075982", "0.45808175", "0.4580352", "0.4579932", "0.45797235", "0.45797232", "0.45779496", "0.45730728", "0.45712999", "0.4567822", "0.4567317", "0.4567078" ]
0.680183
0
GetAliasQueues returns the AliasQueues field value
func (o *QueueManager) GetAliasQueues() []AliasQueue { if o == nil { var ret []AliasQueue return ret } return o.AliasQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func listQueues(ENV string) []string {\n \t// Using the SDK's default configuration, loading additional config\n\t// and credentials values from the environment variables, shared\n\t// credentials, and shared configuration files\n\n\tsess, err := session.NewSession(&aws.Config{\n\t Region: aws.String(\"us-east-1\")},\n\t)\n\n // Create a SQS service client.\n svc := sqs.New(sess)\n\n\t//have to create a session object first\n\toutput, err := svc.ListQueues(&sqs.ListQueuesInput{\n\t QueueNamePrefix: aws.String(ENV),\n })\n\tif err != nil { panic(err) }\n\n\tqueues := output.QueueUrls\n\tfinal_queues := []string{}\n\n\tfor _, i := range queues {\n\t fmt.Println(string(*i))\n\t final_queues = append(final_queues, *i)\n }\n\treturn final_queues\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (a *adapter) queueLookup(queueName string) (*sqs.GetQueueUrlOutput, error) {\n\treturn a.sqsClient.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: &queueName,\n\t})\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func QueuedAsinsGet(c *gin.Context) {\n\tvar asins []models.QueuedAsin\n\tmodels.DB.Order(\"id asc\").Preload(\"Feed\").Find(&asins)\n\n\tH := DefaultH(c)\n\tH[\"Title\"] = \"Queued Asins\"\n\tH[\"Asins\"] = asins\n\tc.HTML(200, \"admin/asins/queued_index\", H)\n}", "func (db *BotDB) GetAliases(user uint64) []string {\n\tq, err := db.sqlGetAliases.Query(user)\n\tif db.CheckError(\"GetAliases\", err) != nil {\n\t\treturn []string{}\n\t}\n\tdefer q.Close()\n\treturn db.parseStringResults(q)\n}", "func (nd *Node) GetAliases() []*Host {\n\tnd.mutex.RLock()\n\taliases := nd.aliases\n\tnd.mutex.RUnlock()\n\n\treturn aliases\n}", "func (m *Endpoint) GetAliases() []string {\n\tif m != nil {\n\t\treturn m.Aliases\n\t}\n\treturn nil\n}", "func (q VariadicQuery) GetAlias() string {\n\treturn q.Alias\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (a AliasedName) GetAlias() string { return a.Alias }", "func (q *Queue) GetQueue() []types.Event {\n\treturn q.Queue\n}", "func (a *Admin) GetChainAliases(_ *http.Request, args *GetChainAliasesArgs, reply *GetChainAliasesReply) error {\n\ta.Log.Debug(\"API called\",\n\t\tzap.String(\"service\", \"admin\"),\n\t\tzap.String(\"method\", \"getChainAliases\"),\n\t\tlogging.UserString(\"chain\", args.Chain),\n\t)\n\n\tid, err := ids.FromString(args.Chain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treply.Aliases, err = a.ChainManager.Aliases(id)\n\treturn err\n}", "func (o EndpointsResponseOutput) Queue() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EndpointsResponse) string { return v.Queue }).(pulumi.StringOutput)\n}", "func (s *Store) GetQueueNames() ([]string, error) {\n\tvar names []string\n\treturn names, s.db.View(func(tx *bolt.Tx) error {\n\t\treturn s.queues(tx).ForEach(func(key, value []byte) error {\n\t\t\tnames = append(names, string(key))\n\t\t\treturn nil\n\t\t})\n\t})\n}", "func (b *backend) QueueStats(ctx context.Context, qq *entroq.QueuesQuery) (map[string]*entroq.QueueStat, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).QueueStats(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get queue stats over gRPC: %w\", err)\n\t}\n\tqs := make(map[string]*entroq.QueueStat)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = &entroq.QueueStat{\n\t\t\tName: q.Name,\n\t\t\tSize: int(q.NumTasks),\n\t\t\tClaimed: int(q.NumClaimed),\n\t\t\tAvailable: int(q.NumAvailable),\n\t\t\tMaxClaims: int(q.MaxClaims),\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (o *Project) GetAlias() []ProjectAlias {\n\tif o == nil || o.Alias == nil {\n\t\tvar ret []ProjectAlias\n\t\treturn ret\n\t}\n\treturn *o.Alias\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (o EndpointsResponsePtrOutput) Queue() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *EndpointsResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Queue\n\t}).(pulumi.StringPtrOutput)\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func (o TopicRuleSqsOutput) QueueUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleSqs) string { return v.QueueUrl }).(pulumi.StringOutput)\n}", "func (p *Process) CmdGetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tif responce.Value, err = p.tcdb.GetQueue(request.Key); err != nil {\n\t\treturn\n\t} else if !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func QueueARN() reference.ExtractValueFn {\n\treturn func(mg resource.Managed) string {\n\t\tcr, ok := mg.(*Queue)\n\t\tif !ok {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn cr.Status.AtProvider.ARN\n\t}\n}", "func (q *queueImp) Lookup(queue string, group string) ([]*model.QueueInfo, error) {\n\n\tqueueInfos := make([]*model.QueueInfo, 0)\n\tswitch {\n\tcase queue == \"\":\n\t\t//Get all queue's information\n\t\tqueueMap, err := q.extendManager.GetQueueMap()\n\t\tif err != nil {\n\t\t\treturn queueInfos, errors.Trace(err)\n\t\t}\n\t\tfor queueName, groupNames := range queueMap {\n\t\t\tgroupConfigs := make([]*model.GroupConfig, 0)\n\t\t\tfor _, groupName := range groupNames {\n\t\t\t\tconfig, err := q.extendManager.GetGroupConfig(groupName, queueName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn queueInfos, errors.Trace(err)\n\t\t\t\t}\n\t\t\t\tif config != nil {\n\t\t\t\t\tgroupConfigs = append(groupConfigs, &model.GroupConfig{\n\t\t\t\t\t\tGroup: config.Group,\n\t\t\t\t\t\tWrite: config.Write,\n\t\t\t\t\t\tRead: config.Read,\n\t\t\t\t\t\tUrl: config.Url,\n\t\t\t\t\t\tIps: config.Ips,\n\t\t\t\t\t})\n\t\t\t\t} else {\n\t\t\t\t\tlog.Warnf(\"config is nil queue:%s, group:%s\", queueName, groupName)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tctime, _ := q.extendManager.QueueCreateTime(queueName)\n\t\t\tqueueInfos = append(queueInfos, &model.QueueInfo{\n\t\t\t\tQueue: queueName,\n\t\t\t\tCtime: ctime,\n\t\t\t\tLength: 0,\n\t\t\t\tGroups: groupConfigs,\n\t\t\t})\n\t\t}\n\tcase queue != \"\" && group == \"\":\n\t\t//Get a queue's all groups information\n\t\tqueueMap, err := q.extendManager.GetQueueMap()\n\t\tif err != nil {\n\t\t\treturn queueInfos, errors.Trace(err)\n\t\t}\n\t\tgroupNames, exists := queueMap[queue]\n\t\tif !exists {\n\t\t\tbreak\n\t\t}\n\t\tgroupConfigs := make([]*model.GroupConfig, 0)\n\t\tfor _, gName := range groupNames {\n\t\t\tconfig, err := q.extendManager.GetGroupConfig(gName, queue)\n\t\t\tif err != nil {\n\t\t\t\treturn queueInfos, errors.Trace(err)\n\t\t\t}\n\t\t\tif config != nil {\n\t\t\t\tgroupConfigs = append(groupConfigs, &model.GroupConfig{\n\t\t\t\t\tGroup: config.Group,\n\t\t\t\t\tWrite: config.Write,\n\t\t\t\t\tRead: config.Read,\n\t\t\t\t\tUrl: config.Url,\n\t\t\t\t\tIps: config.Ips,\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tlog.Warnf(\"config is nil queue:%s, group:%s\", queue, gName)\n\t\t\t}\n\t\t}\n\n\t\tctime, _ := q.extendManager.QueueCreateTime(queue)\n\t\tqueueInfos = append(queueInfos, &model.QueueInfo{\n\t\t\tQueue: queue,\n\t\t\tCtime: ctime,\n\t\t\tLength: 0,\n\t\t\tGroups: groupConfigs,\n\t\t})\n\tdefault:\n\t\t//Get group's information by queue and group's name\n\t\tconfig, err := q.extendManager.GetGroupConfig(group, queue)\n\t\tif err != nil {\n\t\t\treturn queueInfos, errors.Trace(err)\n\t\t}\n\t\tgroupConfigs := make([]*model.GroupConfig, 0)\n\t\tif config != nil {\n\t\t\tgroupConfigs = append(groupConfigs, &model.GroupConfig{\n\t\t\t\tGroup: config.Group,\n\t\t\t\tWrite: config.Write,\n\t\t\t\tRead: config.Read,\n\t\t\t\tUrl: config.Url,\n\t\t\t\tIps: config.Ips,\n\t\t\t})\n\t\t}\n\n\t\tctime, _ := q.extendManager.QueueCreateTime(queue)\n\t\tqueueInfos = append(queueInfos, &model.QueueInfo{\n\t\t\tQueue: queue,\n\t\t\tCtime: ctime,\n\t\t\tLength: 0,\n\t\t\tGroups: groupConfigs,\n\t\t})\n\t}\n\treturn queueInfos, nil\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (client *Client) GetQueueURL(name string) (string, error) {\n\tvar parsedResponse GetQueueURLResult\n\turl := NewGetQueueURLRequest(client.EndPointURL, name).URL()\n\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"\", errors.New(string(body))\n\t}\n\n\terr = xml.Unmarshal(body, &parsedResponse)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn parsedResponse.QueueURL, nil\n}", "func (o QueueOutput) QueueArn() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Queue) pulumi.StringOutput { return v.QueueArn }).(pulumi.StringOutput)\n}", "func (ClearTrans) GetQueue() string {\n\treturn \"cy_rubik_clearTrans\"\n}", "func GetQueueEndpoint(baseUri string, accountName string) string {\n\treturn fmt.Sprintf(\"https://%s.queue.%s\", accountName, baseUri)\n}", "func (svc *AdminBuildService) GetQueue(opt *GetQueueOptions) (*[]library.BuildQueue, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := \"/api/v1/admin/builds/queue\"\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// BuildQueue type we want to return\n\tv := new([]library.BuildQueue)\n\n\tresp, err := svc.client.Call(\"GET\", u, nil, v)\n\n\treturn v, resp, err\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (o DotnetSettingsOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettings) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func (c *Client) GetBotAliases(ctx context.Context, params *GetBotAliasesInput, optFns ...func(*Options)) (*GetBotAliasesOutput, error) {\n\tif params == nil {\n\t\tparams = &GetBotAliasesInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"GetBotAliases\", params, optFns, addOperationGetBotAliasesMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*GetBotAliasesOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (player *musicPlayer) getQueueInfo() ([]string, error) {\n\tplayer.Lock()\n\tdefer player.Unlock()\n\tif len(player.state.queue) == 0 {\n\t\treturn nil, errors.New(cannot_get_queue_info_msg)\n\t}\n\t//make a copy to the queue\n\tcopy := make([]string, 0, len(player.state.queue))\n\tfor _, el := range player.state.queue {\n\t\tcopy = append(copy, el)\n\t}\n\treturn copy, nil\n}", "func (o ServiceBusQueueOutputDataSourceOutput) QueueName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServiceBusQueueOutputDataSource) *string { return v.QueueName }).(pulumi.StringPtrOutput)\n}", "func (o DotnetSettingsPtrOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *DotnetSettings) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ForcedNamespaceAliases\n\t}).(pulumi.StringArrayOutput)\n}", "func (o DiagnosticsStorageAccountConfigOutput) QueueEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfig) string { return v.QueueEndpoint }).(pulumi.StringOutput)\n}", "func (o DotnetSettingsResponseOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettingsResponse) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func GetHostAliases(ctx context.Context) ([]string, error) {\n\tname, err := GetHostname(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't extract a host alias from the kubelet: %w\", err)\n\t}\n\tif err := validate.ValidHostname(name); err != nil {\n\t\treturn nil, fmt.Errorf(\"host alias from kubelet is not valid: %w\", err)\n\t}\n\treturn []string{name}, nil\n}", "func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"roberto@rr.com\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"alex@rr.com\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"ale@rr.com\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}", "func (s *Store) GetQueueSettings(name string) (QueueSettings, error) {\n\tvar settings QueueSettings\n\treturn settings, s.db.View(func(tx *bolt.Tx) error {\n\t\ts, err := s.getQueueSettings(tx, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsettings = s\n\t\treturn nil\n\t})\n}", "func (o LookupQueueResultOutput) AppEngineHttpQueue() AppEngineHttpQueueResponseOutput {\n\treturn o.ApplyT(func(v LookupQueueResult) AppEngineHttpQueueResponse { return v.AppEngineHttpQueue }).(AppEngineHttpQueueResponseOutput)\n}", "func (psc *PartitionSchedulingContext) GetQueue(queueName string) *SchedulingQueue {\n psc.lock.RLock()\n defer psc.lock.RUnlock()\n\n return psc.queues[queueName]\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (this *Queue) GetQueue() (val Mensaje, err error) {\n\t// Primero determina si la cola está vacía\n\tif this.rear == this.front {\n\t\treturn Mensaje{0, \"0\", \"0\"}, errors.New(\"Cola de Mensajes Vacia\")\n\t}\n\tthis.front++\n\tval = this.array[this.front]\n\treturn val, err\n}", "func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}", "func (o DiagnosticsStorageAccountConfigResponseOutput) QueueEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfigResponse) string { return v.QueueEndpoint }).(pulumi.StringOutput)\n}", "func DeclareQueues(ch *amqp.Channel, queueName string) (amqp.Queue, amqp.Queue) {\n\treturn declareQueue(ch, queueName), declareResponseQueue(ch, queueName)\n}", "func (o ServiceBusQueueOutputDataSourceResponseOutput) QueueName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServiceBusQueueOutputDataSourceResponse) *string { return v.QueueName }).(pulumi.StringPtrOutput)\n}", "func (c *apiConsumers) TeamsQueue() <-chan *TeamDTO {\n\treturn c.queue\n}", "func (o TopicRuleErrorActionSqsOutput) QueueUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleErrorActionSqs) string { return v.QueueUrl }).(pulumi.StringOutput)\n}", "func (_Rootchain *RootchainCaller) ExitsQueues(opts *bind.CallOpts, arg0 common.Address) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Rootchain.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func GetHostAliases(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"Kubernetes support not build: couldn't extract a host alias from the kubelet\")\n}", "func (b *backend) QueueStats(ctx context.Context, qq *entroq.QueuesQuery) (map[string]*entroq.QueueStat, error) {\n\tdefer un(lock(b))\n\n\tnow, err := b.Time(ctx)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"tasks current time\")\n\t}\n\n\tqs := make(map[string]*entroq.QueueStat)\n\tfor q, heap := range b.heaps {\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// Find out how many tasks are claimed. This means they both have a\n\t\t// claimant and their arrival time is in the future.\n\t\tclaimed := 0\n\t\tavailable := 0\n\t\tmaxClaims := 0\n\t\tfor _, item := range heap.Items() {\n\t\t\tif item.task.At.After(now) {\n\t\t\t\tif item.task.Claimant != uuid.Nil {\n\t\t\t\t\tclaimed++\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tavailable++\n\t\t\t}\n\t\t\tif int(item.task.Claims) > maxClaims {\n\t\t\t\tmaxClaims = int(item.task.Claims)\n\t\t\t}\n\t\t}\n\t\tqs[q] = &entroq.QueueStat{\n\t\t\tName: q,\n\t\t\tSize: heap.Len(),\n\t\t\tClaimed: claimed,\n\t\t\tAvailable: available,\n\t\t\tMaxClaims: maxClaims,\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (s *API) GetQueueURL(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"GetQueueURL\")\n\n\tqueueName := req.FormValue(\"QueueName\")\n\tqueue, ok := s.sqs.queues[queueName]\n\tif !ok {\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\terror := ErrorResponse{\n\t\t\tError: ErrorResult{\n\t\t\t\tType: \"Not Found\",\n\t\t\t\tCode: \"AWS.SimpleQueueService.NonExistentQueue\",\n\t\t\t\tMessage: \"The specified queue does not exist for this wsdl version.\",\n\t\t\t},\n\t\t\tRequestId: \"00000000-0000-0000-0000-000000000000\",\n\t\t}\n\t\tenc := xml.NewEncoder(w)\n\t\tenc.Indent(\" \", \" \")\n\t\tif err := enc.Encode(error); err != nil {\n\t\t\tlog.Errorf(\"error: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tresponse := GetQueueURLResponse{\n\t\tResult: GetQueueURLResult{queue.url},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func DeleteTorrentFromQueues(torrentHash string, db *storm.DB) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor x, torrentHashActive := range torrentQueues.ActiveTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashActive {\n\t\t\ttorrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Active: \", torrentHash)\n\t\t}\n\t}\n\tfor x, torrentHashQueued := range torrentQueues.QueuedTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashQueued {\n\t\t\ttorrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Queued\", torrentHash)\n\t\t}\n\t}\n\tfor x, torrentHashActive := range torrentQueues.ForcedTorrents { //FOR EXTRA CAUTION deleting it from all queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashActive {\n\t\t\ttorrentQueues.ForcedTorrents = append(torrentQueues.ForcedTorrents[:x], torrentQueues.ForcedTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Forced: \", torrentHash)\n\t\t}\n\t}\n\tStorage.UpdateQueues(db, torrentQueues)\n\tLogger.WithFields(logrus.Fields{\"Torrent Hash\": torrentHash, \"TorrentQueues\": torrentQueues}).Info(\"Removing Torrent from all Queues\")\n}", "func GetInfoFromQueue(q amqp.Queue) QueueInfo {\n\n\treturn QueueInfo{\n\t\tName: q.Name,\n\t\tConsumers: q.Consumers,\n\t\tMessages: q.Messages,\n\t}\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func (b *Backend) GetLeagueByQueue(league string, queue string) (*riotclient.LeagueListDTO, error) {\n\treturn nil, fmt.Errorf(\"Not implemented\")\n}", "func RemoveDuplicatesFromQueues(db *storm.DB) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor _, torrentHash := range torrentQueues.ActiveTorrents {\n\t\tfor i, queuedHash := range torrentQueues.QueuedTorrents {\n\t\t\tif torrentHash == queuedHash {\n\t\t\t\ttorrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:i], torrentQueues.QueuedTorrents[i+1:]...)\n\t\t\t}\n\t\t}\n\t}\n\tStorage.UpdateQueues(db, torrentQueues)\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func getQueueUrl(id string) (queueUrl string, retErr error) {\n\n\t//Creazione client DynamoDB\n\tsvc := dynamodb.New(common.Sess)\n\n\tresult, err := svc.GetItem(&dynamodb.GetItemInput{\n\t\tTableName: aws.String(subTableName),\n\t\tKey: map[string]*dynamodb.AttributeValue{\n\t\t\t\"SubID\": {\n\t\t\t\tS: aws.String(id),\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nel retreive del subscriber con ID: \" + id + \".\\n\" + err.Error())\n\t\treturn \"\", err\n\t}\n\n\titem := common.SubscriberEntry{}\n\n\terr = dynamodbattribute.UnmarshalMap(result.Item, &item)\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nell'unmarshaling del risultato\")\n\t\treturn \"\", err\n\t}\n\tif item.SubID == \"\" {\n\t\tcommon.Warning(\"[BROKER] Nessun subscriber trovato con id \" + id)\n\t\treturn \"\", errors.New(\"no item found\")\n\t}\n\n\tcommon.Info(\"[BROKER] Subscriber trovato: \" + item.SubID + \"\\n\\t\" + item.QueueURL)\n\n\treturn item.QueueURL, nil\n}", "func ListMatchmakingQueues(settings *playfab.Settings, postData *ListMatchmakingQueuesRequestModel, entityToken string) (*ListMatchmakingQueuesResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/ListMatchmakingQueues\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &ListMatchmakingQueuesResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func queueName(name string, withoutPrefix bool) string {\n\t// Allow using a nameless queue\n\tif name == \"\" || withoutPrefix {\n\t\treturn name\n\t}\n\n\treturn \"relay.\" + name\n}", "func (m *MatchInfo) GetQueue(client *static.Client) (static.Queue, error) {\n\treturn client.GetQueue(m.QueueID)\n}", "func (o DiagnosticsStorageAccountConfigResponsePtrOutput) QueueEndpoint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DiagnosticsStorageAccountConfigResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.QueueEndpoint\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *QueueManager) GetClusterQueues() []ClusterQueue {\n\tif o == nil {\n\t\tvar ret []ClusterQueue\n\t\treturn ret\n\t}\n\n\treturn o.ClusterQueues\n}", "func (taskBolt *TaskBolt) ReadQueue(n int) []*ga4gh_task_exec.Job {\n\tjobs := make([]*ga4gh_task_exec.Job, 0)\n\ttaskBolt.db.View(func(tx *bolt.Tx) error {\n\n\t\t// Iterate over the JobsQueued bucket, reading the first `n` jobs\n\t\tc := tx.Bucket(JobsQueued).Cursor()\n\t\tfor k, _ := c.First(); k != nil && len(jobs) < n; k, _ = c.Next() {\n\t\t\tid := string(k)\n\t\t\tjob := getJob(tx, id)\n\t\t\tjobs = append(jobs, job)\n\t\t}\n\t\treturn nil\n\t})\n\treturn jobs\n}", "func queueName(src *v1alpha1.AWSS3Source) string {\n\treturn \"s3-events_\" + src.Spec.ARN.Resource\n}", "func (s *Service) Queue() amboy.Queue {\n\treturn s.queue\n}", "func (svc *SQS) XGetQueueURL(ctx context.Context, queueName string) (queueURL string, err error) {\n\tresp, err := svc.GetQueueURL(ctx, GetQueueURLRequest{\n\t\tQueueName: queueName,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.QueueURL, nil\n}", "func (o DiagnosticsStorageAccountConfigPtrOutput) QueueEndpoint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DiagnosticsStorageAccountConfig) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.QueueEndpoint\n\t}).(pulumi.StringPtrOutput)\n}", "func (a *Alias) ToString() string {\n\tif len(a.AdvancedAliases) != 0 {\n\t\taliases := make([]string, len(a.AdvancedAliases))\n\t\tfor i, advancedAlias := range a.AdvancedAliases {\n\t\t\taliases[i] = aws.StringValue(advancedAlias.Alias)\n\t\t}\n\t\treturn strings.Join(aliases, \",\")\n\t}\n\tif a.StringSliceOrString.String != nil {\n\t\treturn aws.StringValue(a.StringSliceOrString.String)\n\t}\n\treturn strings.Join(a.StringSliceOrString.StringSlice, \",\")\n}", "func (mb *client) ReadFIFOQueue(address uint16) (results []byte, err error) {\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeReadFIFOQueue,\n\t\tData: dataBlock(address),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(response.Data) < 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' is less than expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\tcount := int(binary.BigEndian.Uint16(response.Data))\n\tif count != (len(response.Data) - 1) {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match count '%v'\", len(response.Data)-1, count)\n\t\treturn\n\t}\n\tcount = int(binary.BigEndian.Uint16(response.Data[2:]))\n\tif count > 31 {\n\t\terr = fmt.Errorf(\"modbus: fifo count '%v' is greater than expected '%v'\", count, 31)\n\t\treturn\n\t}\n\tresults = response.Data[4:]\n\treturn\n}", "func (s *Store) GetQueueStatistics(name string) (QueueStatistics, error) {\n\treturn QueueStatistics{}, nil\n}", "func (i *aliasTarget) getAliasTargetHostedZoneID() string {\n\tif i == nil {\n\t\treturn \"\"\n\t}\n\treturn i.HostedZoneID\n}", "func (o RuleTargetDeadLetterQueueOutput) Arn() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v RuleTargetDeadLetterQueue) *string { return v.Arn }).(pulumi.StringPtrOutput)\n}", "func (i *aliasTarget) getAliasDNSName() string {\n\tif i == nil {\n\t\treturn \"\"\n\t}\n\treturn i.DNSName\n}", "func UConverterGetAliases(arg1 string, arg2 *UErrorCode) (_swig_ret []string)", "func (c *EmployeeClient) QueryQueue(e *Employee) *QueueQuery {\n\tquery := &QueueQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := e.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(employee.Table, employee.FieldID, id),\n\t\t\tsqlgraph.To(queue.Table, queue.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, employee.QueueTable, employee.QueueColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(e.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func deleteQueues(ctx *TestContext) {\n\tfor _, q := range ctx.Queues {\n\t\tDeleteQueue(ctx, q)\n\t}\n}", "func (s *RedisDeviceStore) DownlinkQueue(appID, devID string) (DownlinkQueue, error) {\n\treturn &RedisDownlinkQueue{\n\t\tappID: appID,\n\t\tdevID: devID,\n\t\tqueues: s.queues,\n\t}, nil\n}", "func (o *SamlConfigurationProperties) GetSpPrivateKeyAlias() SamlConfigurationPropertyItemsString {\n\tif o == nil || o.SpPrivateKeyAlias == nil {\n\t\tvar ret SamlConfigurationPropertyItemsString\n\t\treturn ret\n\t}\n\treturn *o.SpPrivateKeyAlias\n}", "func (s *azureServiceBusScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {\n\tqueuelen, err := s.GetAzureServiceBusLength(ctx)\n\n\tif err != nil {\n\t\tazureServiceBusLog.Error(err, \"error getting service bus entity length\")\n\t\treturn []external_metrics.ExternalMetricValue{}, err\n\t}\n\n\tmetric := external_metrics.ExternalMetricValue{\n\t\tMetricName: metricName,\n\t\tValue: *resource.NewQuantity(int64(queuelen), resource.DecimalSI),\n\t\tTimestamp: metav1.Now(),\n\t}\n\n\treturn append([]external_metrics.ExternalMetricValue{}, metric), nil\n}", "func (q *queue) GetName() string {\n\treturn q.name\n}", "func (pub *Publisher) QueueName() string {\r\n\treturn pub.queueName\r\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}" ]
[ "0.69151425", "0.6435573", "0.6247431", "0.6227677", "0.6098082", "0.59897524", "0.58581173", "0.5692911", "0.56441885", "0.5639317", "0.5635707", "0.54951936", "0.54639095", "0.5432769", "0.53856283", "0.53819096", "0.53484887", "0.5290492", "0.5287443", "0.5241385", "0.52201736", "0.5193116", "0.51760435", "0.5118323", "0.51103204", "0.5089046", "0.5078399", "0.50702375", "0.5046697", "0.5034584", "0.50287783", "0.5027905", "0.5007089", "0.49887496", "0.49882758", "0.4979757", "0.49775475", "0.49771214", "0.4973581", "0.49435443", "0.49400744", "0.49381882", "0.49256247", "0.49200523", "0.4919063", "0.48988461", "0.48888874", "0.4886248", "0.4876699", "0.4871946", "0.48709017", "0.48704988", "0.48689255", "0.48380634", "0.48321846", "0.48276863", "0.48178735", "0.48153856", "0.48122102", "0.48113808", "0.48036042", "0.4803099", "0.47833392", "0.47506607", "0.47491416", "0.47476757", "0.4719639", "0.47042498", "0.4702077", "0.46949622", "0.46943215", "0.46889198", "0.46811938", "0.4680082", "0.4679647", "0.4675511", "0.46735367", "0.46548513", "0.46544385", "0.46489146", "0.46485028", "0.46439692", "0.46436137", "0.46430463", "0.46403083", "0.4640157", "0.46350938", "0.46334526", "0.4632484", "0.46324128", "0.46283746", "0.46224973", "0.46193987", "0.46156996", "0.4608924", "0.4605449", "0.46037555", "0.46032417", "0.4595018", "0.45928654" ]
0.7781025
0
GetAliasQueuesOk returns a tuple with the AliasQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) { if o == nil { return nil, false } return &o.AliasQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}", "func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) IsYANGGoStruct() {}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) IsYANGGoStruct() {}", "func (o *Platform) GetAliasOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Alias, true\n}", "func (u *Unpackerr) haveSonarrQitem(name string) bool {\n\tfor _, server := range u.Sonarr {\n\t\tfor _, q := range server.Queue.Records {\n\t\t\tif q.Title == name {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}", "func (o *Project) GetAliasOk() (*[]ProjectAlias, bool) {\n\tif o == nil || o.Alias == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Alias, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) IsYANGGoStruct() {}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (_Rootchain *RootchainCaller) ExitsQueues(opts *bind.CallOpts, arg0 common.Address) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Rootchain.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_State) IsYANGGoStruct() {}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_State) IsYANGGoStruct() {}", "func (q *QueueWatcher) checkQueueEmpty(jobName string) (bool, error) {\n\tqueues, err := q.client.Queues()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, queue := range queues {\n\t\tif queue.JobName == jobName {\n\t\t\treturn queue.Count == 0, nil\n\t\t}\n\t}\n\n\t// If queue does not exist consider it empty.\n\t// QueueWatcher is not active in the initial phase during which no items\n\t// have been enqueued yet.\n\t// E.g. when active checks start, the ProductionExhausted channel has been\n\t// closed.\n\treturn true, nil\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (o *Environment) GetQuotasOk() (*EnvironmentQuotas, bool) {\n\tif o == nil || o.Quotas == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Quotas, true\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (o *SamlConfigurationProperties) GetSpPrivateKeyAliasOk() (*SamlConfigurationPropertyItemsString, bool) {\n\tif o == nil || o.SpPrivateKeyAlias == nil {\n\t\treturn nil, false\n\t}\n\treturn o.SpPrivateKeyAlias, true\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (ss *SqsService) IsQueueEmpty(ctx context.Context) (isEmpty bool) {\n\tisEmpty = false\n\tinput := &sqs.GetQueueAttributesInput{\n\t\tQueueUrl: &ss.queueURL,\n\t\tAttributeNames: []types.QueueAttributeName{\n\t\t\t\"ApproximateNumberOfMessages\",\n\t\t\t\"ApproximateNumberOfMessagesNotVisible\",\n\t\t},\n\t}\n\toutput, err := ss.client.GetQueueAttributes(ctx, input)\n\n\tif err != nil {\n\t\tlog.Printf(\"Faided to get queue attributes from Queue %s, please try again later - %s\", ss.queueName, err.Error())\n\t\treturn\n\t}\n\n\tvisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessages\"])\n\tnotVisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessagesNotVisible\"])\n\n\tlog.Printf(\"Queue %s has %d not visible message(s) and %d visable message(s)\\n\", ss.queueName, notVisible, visible)\n\n\tif visible+notVisible <= 1 {\n\t\tisEmpty = true\n\t}\n\treturn\n}", "func (stats *APTQueueStats) HasErrors() bool {\n\treturn len(stats.Errors) > 0\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue) IsYANGGoStruct() {}", "func (o *FiltersApiLog) GetQueryApiNamesOk() ([]string, bool) {\n\tif o == nil || o.QueryApiNames == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.QueryApiNames, true\n}", "func (_Rootchain *RootchainSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (o *FiltersApiLog) GetQueryCallNamesOk() ([]string, bool) {\n\tif o == nil || o.QueryCallNames == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.QueryCallNames, true\n}", "func IsQueueExist(name string, ch *amqp.Channel) bool {\n\tvar exist bool\n\t_, err := ch.QueueInspect(name)\n\tif err == nil {\n\t\texist = true\n\t}\n\n\treturn exist\n}", "func (_Rootchain *RootchainCallerSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *QueueManager) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Queues_Queue) IsYANGGoStruct() {}", "func (c *checkQueueAttributeImpl) CheckQueueAttributeQuery(options CheckQueueAttributeOptions) icinga.Result {\n\tname := \"Queue.Attributes\"\n\n\tstatusCheck, err := icinga.NewStatusCheck(options.ThresholdWarning, options.ThresholdCritical)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't check status: %v\", err))\n\t}\n\n\tif len(options.OkIfQueueIsMissing) > 0 {\n\t\tproperty := \"broker=\\\"0.0.0.0\\\"\"\n\t\tattribute := \"QueueNames\"\n\t\tqueueSearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{property}, attribute)\n\t\tif err != nil {\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query QueueNames in Jolokia: %v\", err))\n\t\t}\n\t\tif queueSearchResult == nil {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"No queues found: [%v]\", queueSearchResult)\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't find QueueNames for [%v]\", property))\n\t\t}\n\n\t\tif !queueExists(queueSearchResult.([] interface{}), options.OkIfQueueIsMissing) {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"Queue [%v] not in queue list [%v]\", options.OkIfQueueIsMissing, queueSearchResult.([] interface{}))\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusOk, fmt.Sprintf(\"queue [%v] does not exist\", options.OkIfQueueIsMissing))\n\t\t}\n\t}\n\n\tsearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{options.Queue}, options.Attribute)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query Jolokia: %v\", err))\n\t}\n\n\tresult, err := utils.ToFloat(searchResult)\n\tif err != nil {\n\t\tif (options.Verbose > 0) {\n\t\t\tlog.Printf(\"An error occured with result [%v]\", searchResult)\n\t\t}\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"query result is invalid: %v\", err))\n\t}\n\n\tmessage := fmt.Sprintf(\"Search produced: %v\", searchResult)\n\tstatus := statusCheck.Check(result)\n\n\treturn icinga.NewResult(name, status, message)\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (o *V0037JobProperties) GetRequeueOk() (*bool, bool) {\n\tif o == nil || o.Requeue == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Requeue, true\n}", "func (t *OpenconfigQos_Qos_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *torrent.Client) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor len(torrentQueues.ActiveTorrents) > config.MaxActiveTorrents {\n\t\tremoveTorrent := torrentQueues.ActiveTorrents[:1]\n\t\tfor _, singleTorrent := range tclient.Torrents() {\n\t\t\tif singleTorrent.InfoHash().String() == removeTorrent[0] {\n\t\t\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, removeTorrent[0])\n\t\t\t\tRemoveTorrentFromActive(&singleTorrentFromStorage, singleTorrent, db)\n\t\t\t}\n\t\t}\n\t}\n\ttorrentQueues = Storage.FetchQueues(db)\n\tfor _, singleTorrent := range tclient.Torrents() {\n\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())\n\t\tif singleTorrentFromStorage.TorrentStatus == \"Stopped\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one\n\t\t\tif singleTorrent.InfoHash().String() == queuedTorrent {\n\t\t\t\tif singleTorrent.BytesMissing() > 0 {\n\t\t\t\t\tfor _, activeTorrent := range torrentQueues.ActiveTorrents {\n\t\t\t\t\t\tfor _, singleActiveTorrent := range tclient.Torrents() {\n\t\t\t\t\t\t\tif activeTorrent == singleActiveTorrent.InfoHash().String() {\n\t\t\t\t\t\t\t\tif singleActiveTorrent.Seeding() == true {\n\t\t\t\t\t\t\t\t\tsingleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleActiveTFS.TorrentName}).Info(\"Seeding, Removing from active to add queued\")\n\t\t\t\t\t\t\t\t\tRemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)\n\t\t\t\t\t\t\t\t\tsingleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleQueuedTFS.TorrentName}).Info(\"Adding torrent to the queue, not active\")\n\t\t\t\t\t\t\t\t\tAddTorrentToActive(&singleQueuedTFS, singleTorrent, db)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue_Config) IsYANGGoStruct() {}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (mr *MockSQSAPIMockRecorder) ListQueues(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListQueues\", reflect.TypeOf((*MockSQSAPI)(nil).ListQueues), arg0)\n}", "func (o *NetworkDns) GetNameServersOk() ([]string, bool) {\n\tif o == nil || o.NameServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NameServers, true\n}", "func (sub *subState) isQueueSubscriber() bool {\n\treturn sub.QGroup != \"\"\n}", "func (o *URL) GetAliasOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Alias, true\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue_State) IsYANGGoStruct() {}", "func (target *ElasticsearchTarget) HasQueueStore() bool {\n\treturn target.store != nil\n}", "func (q *execQueue) canQueue() bool {\n\tq.mu.Lock()\n\tok := !q.isClosed() && len(q.funcs) < cap(q.funcs)\n\tq.mu.Unlock()\n\treturn ok\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) HasExitQueue(opts *bind.CallOpts, vaultId *big.Int, token common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"hasExitQueue\", vaultId, token)\n\treturn *ret0, err\n}", "func (*OpenconfigQos_Qos_Queues_Queue_Config) IsYANGGoStruct() {}", "func (stats *APTQueueStats) HasWarnings() bool {\n\treturn len(stats.Warnings) > 0\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Queues_Queue_State) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Queues_Queue_Red) IsYANGGoStruct() {}", "func (o DotnetSettingsPtrOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *DotnetSettings) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ForcedNamespaceAliases\n\t}).(pulumi.StringArrayOutput)\n}", "func (qc *QueueConfig) Exists() bool {\n\treturn qc._exists\n}", "func (t *TopicCache) IsQueueEmpty(projectName, serviceName string) bool {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\t_, ok := t.inQueue[projectName+serviceName]\n\n\treturn !ok\n}", "func (o *VulnUpdateNotification) HasQueueId() bool {\n\tif o != nil && o.QueueId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *IpamAliasEditInput) GetAliasNameOk() (*string, bool) {\n\tif o == nil || o.AliasName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.AliasName, true\n}", "func DeclareQueues(ch *amqp.Channel, queueName string) (amqp.Queue, amqp.Queue) {\n\treturn declareQueue(ch, queueName), declareResponseQueue(ch, queueName)\n}", "func setupValidQueueNames() {\n\tfor _, jType := range models.ValidJobTypes {\n\t\tvar jt = string(jType)\n\t\tvalidQueues[jt] = true\n\t\tvalidQueueList = append(validQueueList, jt)\n\t}\n}", "func (o DotnetSettingsResponseOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettingsResponse) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func (o *LocalDatabaseProvider) GetDnsServersOk() ([]string, bool) {\n\tif o == nil || o.DnsServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DnsServers, true\n}", "func (o *VnicEthAdapterPolicyInventory) HasTxQueueSettings() bool {\n\tif o != nil && o.TxQueueSettings.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func NewGetCallQueueitemsOK() *GetCallQueueitemsOK {\n\treturn &GetCallQueueitemsOK{}\n}", "func (o *DnsEventAllOf) GetQtypeOk() (*string, bool) {\n\tif o == nil || o.Qtype == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Qtype, true\n}", "func IsAlias(name, alias string) bool {\n\td := registry.Driver(name)\n\tfor _, da := range d.Alias {\n\t\tif da == alias {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *Replication) GetMaxQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.MaxQueueSizeBytes, true\n}", "func (o *User) GetMailboxSettingsOk() (AnyOfmicrosoftGraphMailboxSettings, bool) {\n\tif o == nil || o.MailboxSettings == nil {\n\t\tvar ret AnyOfmicrosoftGraphMailboxSettings\n\t\treturn ret, false\n\t}\n\treturn *o.MailboxSettings, true\n}", "func (o *W2) GetAllocatedTipsOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.AllocatedTips.Get(), o.AllocatedTips.IsSet()\n}", "func QueueMatch(ctx context.Context, t *testing.T, client *entroq.EntroQ, qPrefix string) {\n\tqueue1 := path.Join(qPrefix, \"queue-1\")\n\tqueue2 := path.Join(qPrefix, \"queue-2\")\n\tqueue3 := path.Join(qPrefix, \"queue-3\")\n\tquirkyQueue := path.Join(qPrefix, \"quirky=queue\")\n\n\twantQueues := map[string]int{\n\t\tqueue1: 1,\n\t\tqueue2: 2,\n\t\tqueue3: 3,\n\t\tquirkyQueue: 1,\n\t}\n\n\t// Add tasks so that queues have a certain number of things in them, as above.\n\tvar toInsert []entroq.ModifyArg\n\tfor q, n := range wantQueues {\n\t\tfor i := 0; i < n; i++ {\n\t\t\ttoInsert = append(toInsert, entroq.InsertingInto(q))\n\t\t}\n\t}\n\tinserted, _, err := client.Modify(ctx, toInsert...)\n\tif err != nil {\n\t\tt.Fatalf(\"in QueueMatch - inserting empty tasks: %v\", err)\n\t}\n\n\t// Check that we got everything inserted.\n\tif want, got := len(inserted), len(toInsert); want != got {\n\t\tt.Fatalf(\"in QueueMatch - want %d inserted, got %d\", want, got)\n\t}\n\n\t// Check that we can get exact numbers for all of the above using MatchExact.\n\tfor q, n := range wantQueues {\n\t\tqs, err := client.Queues(ctx, entroq.MatchExact(q))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"QueueMatch single - getting queue: %v\", err)\n\t\t}\n\t\tif len(qs) != 1 {\n\t\t\tt.Errorf(\"QueueMatch single - expected 1 entry, got %d\", len(qs))\n\t\t}\n\t\tif want, got := n, qs[q]; want != got {\n\t\t\tt.Errorf(\"QueueMatch single - expected %d values in queue %q, got %d\", want, q, got)\n\t\t}\n\t}\n\n\t// Check that passing multiple exact matches works properly.\n\tmultiExactCases := []struct {\n\t\tq1 string\n\t\tq2 string\n\t}{\n\t\t{queue1, queue2},\n\t\t{queue1, queue3},\n\t\t{quirkyQueue, queue2},\n\t\t{\"bogus\", queue3},\n\t}\n\n\tfor _, c := range multiExactCases {\n\t\tqs, err := client.Queues(ctx, entroq.MatchExact(c.q1), entroq.MatchExact(c.q2))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"QueueMatch multi - getting multiple queues: %v\", err)\n\t\t}\n\t\tif len(qs) > 2 {\n\t\t\tt.Errorf(\"QueueMatch multi - expected no more than 2 entries, got %d\", len(qs))\n\t\t}\n\t\twant1, want2 := wantQueues[c.q1], wantQueues[c.q2]\n\t\tif got1, got2 := qs[c.q1], qs[c.q2]; want1 != got1 || want2 != got2 {\n\t\t\tt.Errorf(\"QueueMatch multi - wanted %q:%d, %q:%d, got %q:%d, %q:%d\", c.q1, want1, c.q2, want2, c.q1, got1, c.q2, got2)\n\t\t}\n\t}\n\n\t// Check prefix matching.\n\tprefixCases := []struct {\n\t\tprefix string\n\t\tqn int\n\t\tn int\n\t}{\n\t\t{path.Join(qPrefix, \"queue-\"), 3, 6},\n\t\t{path.Join(qPrefix, \"qu\"), 4, 7},\n\t\t{path.Join(qPrefix, \"qui\"), 1, 1},\n\t}\n\n\tfor _, c := range prefixCases {\n\t\tqs, err := client.Queues(ctx, entroq.MatchPrefix(c.prefix))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"QueueMatch prefix - queues error: %v\", err)\n\t\t}\n\t\tif want, got := c.qn, len(qs); want != got {\n\t\t\tt.Errorf(\"QueueMatch prefix - want %d queues, got %d\", want, got)\n\t\t}\n\t\ttot := 0\n\t\tfor _, n := range qs {\n\t\t\ttot += n\n\t\t}\n\t\tif want, got := c.n, tot; want != got {\n\t\t\tt.Errorf(\"QueueMatch prefix - want %d total items, got %d\", want, got)\n\t\t}\n\t}\n}", "func (o DotnetSettingsOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettings) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func (o *Project) HasAlias() bool {\n\tif o != nil && o.Alias != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func (j *Job) DestinationMQ() bool {\n\treturn j.Publish != \"\" && (j.Destination == \"mq\" || j.Destination == \"both\" || j.Destination == \"\")\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) HasExitQueue(vaultId *big.Int, token common.Address) (bool, error) {\n\treturn _PlasmaFramework.Contract.HasExitQueue(&_PlasmaFramework.CallOpts, vaultId, token)\n}", "func (*OpenconfigQos_Qos_Queues_Queue_Wred) IsYANGGoStruct() {}" ]
[ "0.62684894", "0.6182187", "0.6036427", "0.5732854", "0.55811864", "0.5505796", "0.5485279", "0.54428786", "0.535559", "0.5352854", "0.52992433", "0.5274927", "0.5233722", "0.5213937", "0.5182979", "0.51760995", "0.51753753", "0.51179737", "0.510792", "0.5093728", "0.50886714", "0.5077195", "0.5077019", "0.5073604", "0.5070931", "0.5055009", "0.50076914", "0.49961957", "0.49958527", "0.4994109", "0.49924216", "0.4980261", "0.4964561", "0.4950706", "0.49246693", "0.49225292", "0.49120596", "0.48996034", "0.48967153", "0.48941848", "0.48781726", "0.48721504", "0.4869644", "0.48496425", "0.48418292", "0.48400497", "0.48394054", "0.483484", "0.48331857", "0.48250756", "0.48206323", "0.4805135", "0.47896424", "0.47882307", "0.47721645", "0.47683075", "0.47668794", "0.47585508", "0.47560215", "0.474742", "0.47449997", "0.4740495", "0.4736876", "0.4723869", "0.46963224", "0.46940932", "0.46837297", "0.4681356", "0.46779615", "0.4663454", "0.4651283", "0.46473032", "0.46464393", "0.46454248", "0.46380576", "0.46341732", "0.46036628", "0.45887274", "0.45851693", "0.45845893", "0.45833793", "0.45824075", "0.4581199", "0.45500588", "0.45257246", "0.450709", "0.45051086", "0.45019165", "0.44997787", "0.44928822", "0.44921127", "0.44908398", "0.44847196", "0.44809794", "0.44780758", "0.44757226", "0.4475278", "0.44647294", "0.44577336", "0.44524524" ]
0.8438502
0
SetAliasQueues sets field value
func (o *QueueManager) SetAliasQueues(v []AliasQueue) { o.AliasQueues = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}", "func (c *Consumer) SetQueueBind(bind *QueueBind) *Consumer {\n\tif bind != nil {\n\t\tc.mutex.Lock()\n\t\tc.bind = bind\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (s *Service) SetQueue(q amboy.Queue) error {\n\tif s.closer != nil {\n\t\treturn errors.New(\"cannot set a new queue, Service is already open\")\n\t}\n\n\ts.queue = q\n\treturn nil\n}", "func (c *Consumer) SetQueueName(withPrefix bool, name string) *Consumer {\n\tif name == \"\" {\n\t\tname = c.getExchangeTopic()\n\t}\n\tnewQueueName := GenerateQueueName(withPrefix, name)\n\tc.mutex.Lock()\n\tc.declare.SetName(newQueueName)\n\tc.bind.SetName(newQueueName)\n\tc.mutex.Unlock()\n\treturn c\n}", "func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tcreds := auth.CredentialsFromContext(ctx)\n\tif ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {\n\t\t// \"An attempt (IPC_SET) was made to increase msg_qbytes beyond the\n\t\t// system parameter MSGMNB, but the caller is not privileged (Linux:\n\t\t// does not have the CAP_SYS_RESOURCE capability).\"\n\t\treturn linuxerr.EPERM\n\t}\n\n\tif err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {\n\t\treturn err\n\t}\n\n\tq.maxBytes = ds.MsgQbytes\n\tq.changeTime = ktime.NowFromContext(ctx)\n\treturn nil\n}", "func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}", "func (c *Client) QueueBind(\n\texchange, queue, key string,\n\topts *QueueBindOpts,\n\tconnOpts *ConnectOpts) error {\n\n\tdefaultOpts := DefaultQueueBindOpts()\n\n\tif opts != nil {\n\t\tdefaultOpts = opts\n\t}\n\n\tdefaultConnOpts := DefaultConnectOpts()\n\tif connOpts != nil {\n\t\tdefaultConnOpts = connOpts\n\t}\n\n\tconn, err := c.connect(defaultConnOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ch.Close()\n\n\terr = ch.QueueBind(\n\t\tqueue,\n\t\tkey,\n\t\texchange,\n\t\tdefaultOpts.NoWait,\n\t\tdefaultOpts.Args,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (router *EventRouter) BindQueue(queue string, exchange string) {\n\tif router.lastError == nil {\n\t\trouter.DeclareExchange(exchange)\n\t}\n\tif router.lastError == nil {\n\t\trouter.DeclareQueue(queue)\n\t}\n\tif router.lastError == nil {\n\t\trouter.lastError = router.channel.QueueBind(queue, \"\", exchange, false, nil)\n\t}\n}", "func (e *LifecycleEvent) SetQueueURL(url string) { e.queueURL = url }", "func (rm *RouterMux) SetAlias(route string, aliases ...string) {\n\tfor _, alias := range aliases {\n\t\trm.aliases[alias] = route\n\t}\n}", "func (c *Consumer) SetQueueDeclare(declare *QueueDeclare) *Consumer {\n\tif declare != nil {\n\t\tc.mutex.Lock()\n\t\tc.declare = declare\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}", "func (acnl *Channel) setupQueues(cnl *amqp.Channel) error {\n\t/*if _, err := cnl.QueueDeclare(QueueVNFMRegister, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMRegister, QueueVNFMRegister, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMUnregister, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMUnregister, QueueVNFMUnregister, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMCoreActions, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMCoreActions, QueueVNFMCoreActions, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMCoreActionsReply, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMCoreActionsReply, QueueVNFMCoreActionsReply, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}*/\n\n\t// is this needed?\n\tif _, err := cnl.QueueDeclare(acnl.cfg.queues.generic, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(acnl.cfg.queues.generic, acnl.cfg.queues.generic, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func QueueBind(config ConfigQueue, ch *amqp.Channel) error {\n\tlog.Println(\"config: %+v\", config.Bind)\n\n\tif err := ch.QueueBind(\n\t\tconfig.Name,\n\t\tconfig.Bind.RoutingKey,\n\t\tconfig.Bind.ExchangeName,\n\t\tconfig.NoWait,\n\t\tnil,\n\t); err != nil {\n\t\treturn errors.New(\"[QueueBind]: unable to queue bind\" + err.Error())\n\t}\n\n\treturn nil\n}", "func (s *DefaultSubscriber) QueueBind(name, key string) error {\n\terr := s.channel.QueueBind(name, key, s.exchange, false, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to bind queue %s with key %s on exchange '%s' (%s)\", name, key, s.exchange, err)\n\t}\n\treturn nil\n}", "func (w *Worker) SetQueue(q Queue) {\n\tw.queue = q\n}", "func (q *QLearning) SetQ(a Action, qv float64) {\n\tq.qt[q.state.Get()][a] = qv\n}", "func (r *RPC) SetQueueClient(c queue.Client) {\n\tgapi := NewGRpcServer(c, r.api)\n\tjapi := NewJSONRPCServer(c, r.api)\n\tr.gapi = gapi\n\tr.japi = japi\n\tr.c = c\n\t//注册系统rpc\n\tpluginmgr.AddRPC(r)\n\tr.Listen()\n}", "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (s *segment) setOwner(ep *endpoint, qFlags queueFlags) {\n\tswitch qFlags {\n\tcase recvQ:\n\t\tep.updateReceiveMemUsed(s.segMemSize())\n\tcase sendQ:\n\t\t// no memory account for sendQ yet.\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected queue flag %b\", qFlags))\n\t}\n\ts.ep = ep\n\ts.qFlags = qFlags\n}", "func (r *RPC) SetQueueClient(c queue.Client) {\r\n\tgapi := NewGRpcServer(c, r.api)\r\n\tjapi := NewJSONRPCServer(c, r.api)\r\n\tr.gapi = gapi\r\n\tr.japi = japi\r\n\tr.c = c\r\n\t//注册系统rpc\r\n\tpluginmgr.AddRPC(r)\r\n\tr.Listen()\r\n}", "func QueueBind(ch *amqp.Channel, qName, rKey, exchange string, noWait bool) error {\n\terr := ch.QueueBind(\n\t\tqName, // queue name\n\t\trKey, // routing key\n\t\texchange, // exchange\n\t\tnoWait,\n\t\tnil,\n\t)\n\treturn err\n}", "func WithQueues(queues []string) Option {\n\treturn func(opts *Options) {\n\t\topts.Queues = queues\n\t}\n}", "func (c *Connection) queueBind(queue string, routingKey string, exchange string, opts *QueueBindOpts) error {\n\terr := c.Channel.QueueBind(\n\t\tqueue,\n\t\troutingKey,\n\t\texchange,\n\t\topts.NoWait,\n\t\topts.Args,\n\t)\n\n\treturn err\n}", "func (mq *MessageQueue) SetQueue(queueName string) error {\n\tif mq.Channel == nil {\n\t\tnewCH, err := mq.NewChannel()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmq.Channel = newCH\n\t}\n\tmq.Channel.Qos(mq.Prefetch, 0, false)\n\tif _, err := mq.Channel.QueueDeclare(\n\t\tqueueName, // name\n\t\ttrue, // durable\n\t\tfalse, // delete when unused\n\t\tfalse, // exclusive\n\t\tfalse, // no-wait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"roberto@rr.com\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"alex@rr.com\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"ale@rr.com\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}", "func SetConfig(c QueueConfig) error {\n\t// is name unique?\n\tif _, ok := configList[c.Name]; ok {\n\t\treturn ErrQueueIsExist\n\t}\n\n\t// is contener unique?\n\tfor _, v := range configList {\n\t\tif v.Contener == reflect.ValueOf(c.JobContener).Type() {\n\t\t\treturn ErrContenerIsNotUnique\n\t\t}\n\t}\n\n\treturn setConfig(c)\n}", "func (p *Patch) SetTriggerAliases() error {\n\ttriggersKey := bsonutil.GetDottedKeyName(TriggersKey, TriggerInfoAliasesKey)\n\treturn UpdateOne(\n\t\tbson.M{IdKey: p.Id},\n\t\tbson.M{\n\t\t\t\"$addToSet\": bson.M{triggersKey: bson.M{\"$each\": p.Triggers.Aliases}},\n\t\t},\n\t)\n}", "func SetRabbitMQ(bk *RabbitMQBroker) OptionFunc {\n\treturn func(bi *brokerInstance) {\n\t\tbi.rabbitmq = bk\n\t}\n}", "func SetMasqToPort(port ...int) (*RuleAction, error) {\n\tra := &RuleAction{}\n\tra.masq = &masquerade{}\n\tif len(port) == 0 {\n\t\treturn nil, fmt.Errorf(\"no port provided\")\n\t}\n\tif len(port) > 2 {\n\t\treturn nil, fmt.Errorf(\"more than maximum of 2 ports provided\")\n\t}\n\tports := [2]*uint16{}\n\tp := uint16(port[0])\n\tports[0] = &p\n\tif len(port) == 2 {\n\t\tp := uint16(port[1])\n\t\tports[1] = &p\n\t}\n\tra.masq.toPort = ports\n\n\treturn ra, nil\n}", "func (s *API) SetQueueAttributes(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"SetQueueAttributes\")\n\tw.WriteHeader(http.StatusNotImplemented)\n}", "func (client *Client) QueueBind(queueName, exchangeName, bindingKey string) error {\n\treturn client.amqpChannel.QueueBind(queueName, exchangeName, bindingKey)\n}", "func (_Container *ContainerTransactor) SetAddressKMS(opts *bind.TransactOpts, address_KMS common.Address) (*types.Transaction, error) {\n\treturn _Container.contract.Transact(opts, \"setAddressKMS\", address_KMS)\n}", "func SetMasq(random, fullyRandom, persistent bool) (*RuleAction, error) {\n\tra := &RuleAction{}\n\tra.masq = &masquerade{}\n\tra.masq.random = &random\n\tra.masq.fullyRandom = &fullyRandom\n\tra.masq.persistent = &persistent\n\n\treturn ra, nil\n}", "func (a *SequenceNumber) SetSQN(sQN uint8) {}", "func (bk *AddressBook) enqueueAddrs(addrQueue *chan *Address) {\n\tbk.addrState.RLock()\n\tdefer bk.addrState.RUnlock()\n\n\t*addrQueue = make(chan *Address, len(bk.peers))\n\tfor _, v := range bk.peers {\n\t\t*addrQueue <- v\n\t}\n}", "func (_BaseContentSpace *BaseContentSpaceTransactor) SetAddressKMS(opts *bind.TransactOpts, address_KMS common.Address) (*types.Transaction, error) {\n\treturn _BaseContentSpace.contract.Transact(opts, \"setAddressKMS\", address_KMS)\n}", "func (_BaseLibrary *BaseLibraryTransactor) SetAddressKMS(opts *bind.TransactOpts, address_KMS common.Address) (*types.Transaction, error) {\n\treturn _BaseLibrary.contract.Transact(opts, \"setAddressKMS\", address_KMS)\n}", "func SetAlertSubscribe(name string) {\n\talertSubscribeQueue = name\n}", "func setupValidQueueNames() {\n\tfor _, jType := range models.ValidJobTypes {\n\t\tvar jt = string(jType)\n\t\tvalidQueues[jt] = true\n\t\tvalidQueueList = append(validQueueList, jt)\n\t}\n}", "func (m *AudioRoutingGroup) SetReceivers(value []string)() {\n err := m.GetBackingStore().Set(\"receivers\", value)\n if err != nil {\n panic(err)\n }\n}", "func (q *Queue) EnQueue(val interface{}) {\r\n\r\n\ttemp, _ := CreateNew(val)\r\n\r\n\tq.QueueList = append(q.QueueList, temp.QueueList...)\r\n}", "func (s *SearchQueuesOutput) SetQueues(v []*Queue) *SearchQueuesOutput {\n\ts.Queues = v\n\treturn s\n}", "func setupQueue(client *redis.Client) error {\n\t// ping the queue\n\terr := pingQueue(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (xa XAir) Set(address string, arguments osc.Arguments) {\n\tmsg := osc.Message{Address: address, Arguments: arguments}\n\tlog.Info.Printf(\"Set on %s: %s\", xa.Name, msg)\n\txa.set <- msg\n\txa.send(msg)\n}", "func (backend *RedisBackend) RegisterQueue(queuename string) {\n\tbackend.queues = append(backend.queues, queuename)\n\tbackend.Subscribe(queuename)\n}", "func (o *MarkClaimedTaskDoneParams) SetQueue(queue string) {\n\to.Queue = queue\n}", "func (spriteBatch *SpriteBatch) Setq(index int, quad *Quad, args ...float32) error {\n\treturn spriteBatch.addv(quad.getVertices(), generateModelMatFromArgs(args), index)\n}", "func DeclareQueues(ch *amqp.Channel, queueName string) (amqp.Queue, amqp.Queue) {\n\treturn declareQueue(ch, queueName), declareResponseQueue(ch, queueName)\n}", "func SetQueueReclaimable(ctx *TestContext, queues []string, reclaimable bool) {\n\tBy(\"Setting Queue reclaimable\")\n\n\tfor _, q := range queues {\n\t\tqueue, err := ctx.Vcclient.SchedulingV1beta1().Queues().Get(context.TODO(), q, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to get queue %s\", q)\n\n\t\tqueue.Spec.Reclaimable = &reclaimable\n\t\t_, err = ctx.Vcclient.SchedulingV1beta1().Queues().Update(context.TODO(), queue, metav1.UpdateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to update queue %s\", q)\n\t}\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func DeleteTorrentFromQueues(torrentHash string, db *storm.DB) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor x, torrentHashActive := range torrentQueues.ActiveTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashActive {\n\t\t\ttorrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Active: \", torrentHash)\n\t\t}\n\t}\n\tfor x, torrentHashQueued := range torrentQueues.QueuedTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashQueued {\n\t\t\ttorrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Queued\", torrentHash)\n\t\t}\n\t}\n\tfor x, torrentHashActive := range torrentQueues.ForcedTorrents { //FOR EXTRA CAUTION deleting it from all queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashActive {\n\t\t\ttorrentQueues.ForcedTorrents = append(torrentQueues.ForcedTorrents[:x], torrentQueues.ForcedTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Forced: \", torrentHash)\n\t\t}\n\t}\n\tStorage.UpdateQueues(db, torrentQueues)\n\tLogger.WithFields(logrus.Fields{\"Torrent Hash\": torrentHash, \"TorrentQueues\": torrentQueues}).Info(\"Removing Torrent from all Queues\")\n}", "func (_BaseContent *BaseContentTransactor) SetAddressKMS(opts *bind.TransactOpts, address_KMS common.Address) (*types.Transaction, error) {\n\treturn _BaseContent.contract.Transact(opts, \"setAddressKMS\", address_KMS)\n}", "func queueName(name string, withoutPrefix bool) string {\n\t// Allow using a nameless queue\n\tif name == \"\" || withoutPrefix {\n\t\treturn name\n\t}\n\n\treturn \"relay.\" + name\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (s *SQSServer) ListenAndServeQueues(queues ...QueueConf) error {\n\tif len(queues) == 0 {\n\t\treturn fmt.Errorf(\"Must specify at least one SQS queue to poll\")\n\t}\n\tpollctx, pollcancel := context.WithCancel(context.Background())\n\ttaskctx, taskcancel := context.WithCancel(context.Background())\n\ts.stopPolling = pollcancel\n\ts.stopTasks = taskcancel\n\tfor i := range queues {\n\t\tif queues[i].Name == \"\" {\n\t\t\treturn fmt.Errorf(\"Queue configuration must have a Name\")\n\t\t}\n\t\tif queues[i].Region == \"\" {\n\t\t\tqueues[i].Region = s.defaultRegion\n\t\t}\n\t\tif queues[i].ReadBatch == 0 {\n\t\t\tqueues[i].ReadBatch = defaultReadBatchSize\n\t\t}\n\t\tif queues[i].Metrics == nil {\n\t\t\tqueues[i].Metrics = func(MetricType, float64, int) {}\n\t\t}\n\t}\n\treturn s.pollQueues(pollctx, taskctx, queues)\n}", "func (o *GetLolCareerStatsV1ChampionAveragesByChampionIDByPositionByTierByQueueParams) SetQueue(queue string) {\n\to.Queue = queue\n}", "func SetMatchmakingQueue(settings *playfab.Settings, postData *SetMatchmakingQueueRequestModel, entityToken string) (*SetMatchmakingQueueResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/SetMatchmakingQueue\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &SetMatchmakingQueueResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func SetSocksHost(s string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tc.host = s\n\t\treturn nil\n\t}\n}", "func (h *Homebrew) SendQueue(q []*dmr.Packet, toPeer *Peer) error {\n\tfor _, packet := range q {\n\t\tdata := buildData(packet, h.ID)\n\t\tif err := h.WriteToPeer(data, toPeer); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (policy *ticketPolicy) OnSetQueueClient() {\n\n}", "func (mq *MessageQueue) BindExchangeQueue(exchangeName, routingKey, queueName string) error {\n\tch, _ := mq.NewChannel()\n\tdefer ch.Close()\n\treturn ch.QueueBind(queueName, routingKey, exchangeName, false, nil)\n}", "func (queue *Queue) SetBufferSize(bufferSize int) error {\n\tif bufferSize < 0 {\n\t\treturn fmt.Errorf(\n\t\t\t\"buffer size is less than 0: %d\", bufferSize)\n\t}\n\n\tif bufferSize < len(queue.data) {\n\t\treturn fmt.Errorf(\n\t\t\t\"buffer size is less than the length of the queue: %d\",\n\t\t\tbufferSize)\n\t}\n\n\tdata := make([]interface{}, len(queue.data), bufferSize)\n\n\tcopy(data, queue.data)\n\tqueue.data = data\n\n\treturn nil\n}", "func (a *Admin) SetSlots(addr, action string, slots []Slot, nodeID string) error {\n\tif len(slots) == 0 {\n\t\treturn nil\n\t}\n\tc, err := a.Connections().Get(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, slot := range slots {\n\t\tif nodeID == \"\" {\n\t\t\tc.PipeAppend(\"CLUSTER\", \"SETSLOT\", slot, action)\n\t\t} else {\n\t\t\tc.PipeAppend(\"CLUSTER\", \"SETSLOT\", slot, action, nodeID)\n\t\t}\n\t}\n\tif !a.Connections().ValidatePipeResp(c, addr, \"Cannot SETSLOT\") {\n\t\treturn fmt.Errorf(\"Error occured during CLUSTER SETSLOT %s\", action)\n\t}\n\tc.PipeClear()\n\n\treturn nil\n}", "func (bk *AddressBook) enqueueBlacklist(addrQueue *chan *Address) {\n\tbk.addrState.RLock()\n\tdefer bk.addrState.RUnlock()\n\n\t*addrQueue = make(chan *Address, len(bk.blacklist))\n\tfor _, v := range bk.blacklist {\n\t\t*addrQueue <- v\n\t}\n}", "func (_m *ControllerInterface) SetArrays(_a0 map[string]*array.PowerStoreArray) {\n\t_m.Called(_a0)\n}", "func jobQAssign(ctx context.Context, cfg *Config, cluster string, queues *Queues) (err kv.Error) {\n\n\t// Obtain a list of all of the known node groups in the cluster and the machine types they\n\t// are provisioning\n\tgroups, err := getGroups(ctx, cfg, cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinstances := map[string][]string{}\n\t// Create a map from the groups, node group major, for a ec2 instance type major collection\n\tfor aGroup, instTypes := range groups {\n\t\tfor _, instType := range instTypes {\n\t\t\taddCatalog(instType, aGroup, instances)\n\t\t}\n\t}\n\n\tif logger.IsTrace() {\n\t\tlogger.Trace(spew.Sdump(groups), \"stack\", stack.Trace().TrimRuntime())\n\t\tlogger.Trace(spew.Sdump(instances), \"stack\", stack.Trace().TrimRuntime())\n\t}\n\n\t// Assign the known machine types based on the Queues and then match them up\n\tif err = loadNodeGroups(ctx, cfg, cluster, queues, instances); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o EndpointsResponseOutput) Queue() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EndpointsResponse) string { return v.Queue }).(pulumi.StringOutput)\n}", "func setupManager(username string, password string, brokerIp string, brokerPort int, manager *Manager, exchange string, queueName string) error {\n\tamqpURI := getAmqpUri(username, password, brokerIp, brokerPort)\n\tmanager.logger.Debugf(\"dialing %s\", amqpURI)\n\tvar err error\n\tmanager.Connection, err = amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Connection, getting Channel\")\n\tmanager.Channel, err = manager.Connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Channel, declaring Exchange (%q)\", exchange)\n\n\tmanager.logger.Debugf(\"declared Exchange, declaring Queue %q\", queueName)\n\tqueue, err := manager.Channel.QueueDeclare(\n\t\tqueueName,\n\t\ttrue,\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"declared Queue (%q, %d messages, %d consumers), binding to Exchange\",\n\t\tqueue.Name, queue.Messages, queue.Consumers)\n\n\tif err = manager.Channel.QueueBind(\n\t\tqueue.Name, // name of the queue\n\t\tqueue.Name, // bindingKey\n\t\texchange, // sourceExchange\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debug(\"Queue bound to Exchange, starting Consume\")\n\treturn nil\n}", "func Queue(opt queue.Queue) Option {\n\treturn func(o *Options) {\n\t\to.Queue = opt\n\t}\n}", "func RemoveDuplicatesFromQueues(db *storm.DB) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor _, torrentHash := range torrentQueues.ActiveTorrents {\n\t\tfor i, queuedHash := range torrentQueues.QueuedTorrents {\n\t\t\tif torrentHash == queuedHash {\n\t\t\t\ttorrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:i], torrentQueues.QueuedTorrents[i+1:]...)\n\t\t\t}\n\t\t}\n\t}\n\tStorage.UpdateQueues(db, torrentQueues)\n}", "func SetHost(s string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tc.samhost = s\n\t\treturn nil\n\t}\n}", "func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *torrent.Client) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor len(torrentQueues.ActiveTorrents) > config.MaxActiveTorrents {\n\t\tremoveTorrent := torrentQueues.ActiveTorrents[:1]\n\t\tfor _, singleTorrent := range tclient.Torrents() {\n\t\t\tif singleTorrent.InfoHash().String() == removeTorrent[0] {\n\t\t\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, removeTorrent[0])\n\t\t\t\tRemoveTorrentFromActive(&singleTorrentFromStorage, singleTorrent, db)\n\t\t\t}\n\t\t}\n\t}\n\ttorrentQueues = Storage.FetchQueues(db)\n\tfor _, singleTorrent := range tclient.Torrents() {\n\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())\n\t\tif singleTorrentFromStorage.TorrentStatus == \"Stopped\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one\n\t\t\tif singleTorrent.InfoHash().String() == queuedTorrent {\n\t\t\t\tif singleTorrent.BytesMissing() > 0 {\n\t\t\t\t\tfor _, activeTorrent := range torrentQueues.ActiveTorrents {\n\t\t\t\t\t\tfor _, singleActiveTorrent := range tclient.Torrents() {\n\t\t\t\t\t\t\tif activeTorrent == singleActiveTorrent.InfoHash().String() {\n\t\t\t\t\t\t\t\tif singleActiveTorrent.Seeding() == true {\n\t\t\t\t\t\t\t\t\tsingleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleActiveTFS.TorrentName}).Info(\"Seeding, Removing from active to add queued\")\n\t\t\t\t\t\t\t\t\tRemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)\n\t\t\t\t\t\t\t\t\tsingleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleQueuedTFS.TorrentName}).Info(\"Adding torrent to the queue, not active\")\n\t\t\t\t\t\t\t\t\tAddTorrentToActive(&singleQueuedTFS, singleTorrent, db)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *AuroraAdminClient) SetQuota(ctx context.Context, ownerRole string, quota *ResourceAggregate) (r *Response, err error) {\n var _args317 AuroraAdminSetQuotaArgs\n _args317.OwnerRole = ownerRole\n _args317.Quota = quota\n var _result318 AuroraAdminSetQuotaResult\n if err = p.Client_().Call(ctx, \"setQuota\", &_args317, &_result318); err != nil {\n return\n }\n return _result318.GetSuccess(), nil\n}", "func (t *Topology) AddQueueBind(arg QueueBind) *Topology {\n\tt.mutex.Lock()\n\tt.queueBind = append(t.queueBind, arg)\n\tt.mutex.Unlock()\n\tt.update()\n\treturn t\n}", "func (p *AuroraAdminClient) SetQuota(ctx context.Context, ownerRole string, quota *ResourceAggregate) (r *Response, err error) {\n var _args367 AuroraAdminSetQuotaArgs\n _args367.OwnerRole = ownerRole\n _args367.Quota = quota\n var _result368 AuroraAdminSetQuotaResult\n var meta thrift.ResponseMeta\n meta, err = p.Client_().Call(ctx, \"setQuota\", &_args367, &_result368)\n p.SetLastResponseMeta_(meta)\n if err != nil {\n return\n }\n return _result368.GetSuccess(), nil\n}", "func (s *Subscriber) AddQueueArg(key string, value interface{}) *Subscriber {\n\tif s.QueueOpt.Args == nil {\n\t\ts.QueueOpt.Args = make(map[string]interface{})\n\t}\n\ts.QueueOpt.Args[key] = value\n\treturn s\n}", "func (_BaseAccessWallet *BaseAccessWalletTransactor) SetAddressKMS(opts *bind.TransactOpts, address_KMS common.Address) (*types.Transaction, error) {\n\treturn _BaseAccessWallet.contract.Transact(opts, \"setAddressKMS\", address_KMS)\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *AudioRoutingGroup) SetSources(value []string)() {\n err := m.GetBackingStore().Set(\"sources\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (_Contract *ContractTransactor) SetDNSRecords(opts *bind.TransactOpts, node [32]byte, data []byte) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"setDNSRecords\", node, data)\n}", "func (s *Series) SetAlias(a string) {\n\ts.alias = a\n}", "func (log *logging) setBackend(name string) error {\n\tif log.active.Name() == name {\n\t\treturn nil\n\t}\n\n\tcreateFn, ok := log.backend[name]\n\tif !ok {\n\t\treturn loggerError(\"can't activate unknown backend '%s'\", name)\n\t}\n\n\tlog.active.Stop()\n\tlog.active = createFn()\n\tlog.active.SetSourceAlignment(log.maxname)\n\n\treturn nil\n}", "func (s *server) setAllowedHosts(allowedHosts []string) {\n\ts.hosts.Lock()\n\tdefer s.hosts.Unlock()\n\ts.hosts.table = make(map[string]bool, len(allowedHosts))\n\ts.hosts.wildcards = nil\n\tfor _, h := range allowedHosts {\n\t\tif strings.Index(h, \"*\") != -1 {\n\t\t\ts.hosts.wildcards = append(s.hosts.wildcards, strings.ToLower(h))\n\t\t} else {\n\t\t\ts.hosts.table[strings.ToLower(h)] = true\n\t\t}\n\t}\n}", "func (o *PostMultiNodeDeviceParams) SetAliases(aliases *string) {\n\to.Aliases = aliases\n}", "func (q *inMemoryQueue) SwitchQueue(src, dest string) (string, error) {\n\tq.mux.Lock()\n\tdefer q.mux.Unlock()\n\tqueue := q.queues[src]\n\tif len(queue) == 0 {\n\t\treturn \"\", fmt.Errorf(\"queue %s length is 0\", src)\n\t}\n\tvalue := queue[0]\n\tq.queues[src] = queue[1:]\n\tq.queues[dest] = append(q.queues[dest], value)\n\treturn value, nil\n}", "func (b *bar) SetPostfix(postfix string) {\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\tb.postfix = postfix\n}", "func (m *MockAMQPChan) QueueBind(arg0, arg1, arg2 string, arg3 bool, arg4 amqp.Table) error {\n\tret := m.ctrl.Call(m, \"QueueBind\", arg0, arg1, arg2, arg3, arg4)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func brokerSetAddress(bs *v1alpha1.BrokerStatus, url *apis.URL) {\n\tif url != nil {\n\t\tbs.Address.Hostname = url.Host\n\t\tbs.Address.URL = url\n\t\tbrokerCondSet.Manage(bs).MarkTrue(v1alpha1.BrokerConditionAddressable)\n\t} else {\n\t\tbs.Address.Hostname = \"\"\n\t\tbs.Address.URL = nil\n\t\tbrokerCondSet.Manage(bs).MarkFalse(v1alpha1.BrokerConditionAddressable, \"NotAddressable\", \"broker service has .status.addressable.url == nil\")\n\t}\n}", "func (o *Project) SetAlias(v []ProjectAlias) {\n\to.Alias = &v\n}", "func (c *Client) SetDNSNameservers(ctx context.Context, dns []string) error {\n\tconst uriFmt = \"/api/v2/domain/%v/dns/nameservers\"\n\n\treq, err := c.buildRequest(ctx, http.MethodPost, fmt.Sprintf(uriFmt, c.domain), DomainDNSNameservers{\n\t\tDNS: dns,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.performRequest(req, nil)\n}", "func (o EndpointsResponsePtrOutput) Queue() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *EndpointsResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Queue\n\t}).(pulumi.StringPtrOutput)\n}", "func (this *AVTransport) SaveQueue(instanceId uint32, title, objectId string) (string, error) {\n\ttype Response struct {\n\t\tXMLName xml.Name\n\t\tAssignedObjectID string\n\t\tErrorResponse\n\t}\n\targs := []Arg{\n\t\t{\"InstanceID\", instanceId},\n\t\t{\"Title\", title},\n\t\t{\"ObjectID\", objectId},\n\t}\n\tresponse := this.Svc.Call(\"SaveQueue\", args)\n\tdoc := Response{}\n\txml.Unmarshal([]byte(response), &doc)\n\treturn doc.AssignedObjectID, doc.Error()\n}", "func (t *Topology) AddQueueUnbind(arg QueueUnbind) *Topology {\n\tt.mutex.Lock()\n\tt.queueUnbind = append(t.queueUnbind, arg)\n\tt.mutex.Unlock()\n\tt.update()\n\treturn t\n}" ]
[ "0.58699656", "0.5864439", "0.5774504", "0.57054865", "0.56945664", "0.5664903", "0.5560596", "0.5487568", "0.54635847", "0.54438704", "0.54421866", "0.54008865", "0.5364136", "0.53472435", "0.53410256", "0.53387004", "0.52517086", "0.5212178", "0.5209478", "0.51821357", "0.51305914", "0.5121906", "0.51203305", "0.5106302", "0.51037186", "0.50736773", "0.50679135", "0.50565827", "0.50504196", "0.5050117", "0.50300944", "0.5021378", "0.5014274", "0.49563086", "0.49056625", "0.48917606", "0.4881463", "0.48224744", "0.48186257", "0.48013216", "0.47851634", "0.47694933", "0.4760049", "0.47597012", "0.47594845", "0.4753708", "0.47520944", "0.47453326", "0.47044632", "0.46949625", "0.46666938", "0.46584886", "0.46521276", "0.46399477", "0.46340173", "0.46282148", "0.46164313", "0.46142542", "0.4607874", "0.45895898", "0.4587896", "0.45801967", "0.456608", "0.45604253", "0.4555825", "0.4540522", "0.4538378", "0.45334113", "0.45292068", "0.45239234", "0.451145", "0.4510785", "0.4500103", "0.44990358", "0.44959533", "0.44947004", "0.44933653", "0.44883806", "0.44837838", "0.4479961", "0.44775924", "0.4476557", "0.44764692", "0.44636077", "0.44628668", "0.44488588", "0.44482812", "0.44380865", "0.44335026", "0.4433337", "0.4426735", "0.44263488", "0.44173378", "0.4414444", "0.44073564", "0.44043568", "0.43997076", "0.43964738", "0.439049", "0.4380159" ]
0.6917118
0
GetRemoteQueues returns the RemoteQueues field value
func (o *QueueManager) GetRemoteQueues() []RemoteQueue { if o == nil { var ret []RemoteQueue return ret } return o.RemoteQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func GetRemoteHosts() []string {\r\n\tret := make([]string, 0)\r\n\r\n\tmutex.RLock()\r\n\tdefer mutex.RUnlock()\r\n\r\n\tnodeKey := hex.EncodeToString(GetNodePubKey())\r\n\tfor pubKey, item := range nodes {\r\n\t\tif pubKey != nodeKey && !item.Stopped {\r\n\t\t\tret = append(ret, item.TCPAddress)\r\n\t\t}\r\n\t}\r\n\treturn ret\r\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func GetRemoteServers() ([]*remoteServer, error) {\n\ts, err := getStorage()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif s.RemoteServers == nil {\n\t\treturn make([]*remoteServer, 0), nil\n\t}\n\n\treturn s.RemoteServers, nil\n}", "func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func listQueues(ENV string) []string {\n \t// Using the SDK's default configuration, loading additional config\n\t// and credentials values from the environment variables, shared\n\t// credentials, and shared configuration files\n\n\tsess, err := session.NewSession(&aws.Config{\n\t Region: aws.String(\"us-east-1\")},\n\t)\n\n // Create a SQS service client.\n svc := sqs.New(sess)\n\n\t//have to create a session object first\n\toutput, err := svc.ListQueues(&sqs.ListQueuesInput{\n\t QueueNamePrefix: aws.String(ENV),\n })\n\tif err != nil { panic(err) }\n\n\tqueues := output.QueueUrls\n\tfinal_queues := []string{}\n\n\tfor _, i := range queues {\n\t fmt.Println(string(*i))\n\t final_queues = append(final_queues, *i)\n }\n\treturn final_queues\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func getServerQueue() (err error) {\n\tserverData := make([]byte, 256)\n\tserverConnection, err := net.Dial(\"udp\", ServerAddress)\n\tif err != nil {\n\t\treturn err\n\t} else {\n\t\tdefer serverConnection.Close()\n\t}\n\n\t// UDP voodoo to get server info -- https://github.com/LiquidObsidian/fivereborn-query/blob/master/index.js#L54\n\tfmt.Fprintf(serverConnection, \"\\xFF\\xFF\\xFF\\xFFgetinfo f\")\n\t_, err = bufio.NewReader(serverConnection).Read(serverData)\n\n\tif err == nil {\n\t\tserverData := bytes.Split(serverData, []byte(\"\\n\"))\n\t\tserverDetails := bytes.Split(serverData[1], []byte(\"\\\\\"))\n\t\tserverQueue := bytes.FieldsFunc(serverDetails[12], func(c rune) bool { return c == '[' || c == ']' })\n\n\t\tcurrentPlayerValues, _ := strconv.ParseInt(string(serverDetails[4]), 0, 64)\n\t\tcurrentserverQueueValues, _ := strconv.ParseInt(string(serverQueue[0]), 0, 64)\n\t\tServerDetails.ServerQueue.CurrentPlayers = currentPlayerValues\n\n\t\tif currentserverQueueValues >= 1 {\n\t\t\tServerDetails.ServerQueue.CurrentQueue = currentserverQueueValues\n\t\t}\n\t} else {\n\t\treturn err\n\t}\n\n\treturn\n}", "func (m *AudioRoutingGroup) GetReceivers()([]string) {\n val, err := m.GetBackingStore().Get(\"receivers\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}", "func (svc *AdminBuildService) GetQueue(opt *GetQueueOptions) (*[]library.BuildQueue, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := \"/api/v1/admin/builds/queue\"\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// BuildQueue type we want to return\n\tv := new([]library.BuildQueue)\n\n\tresp, err := svc.client.Call(\"GET\", u, nil, v)\n\n\treturn v, resp, err\n}", "func (base Base) ListRemote() (result []string, err error) {\n\treturn\n}", "func RemoteBucketList(remoteURL string) ([]couchbase.BucketInfo, error) {\n\tbucketInfosObj, err := simple_utils.ExecWithTimeout2(remoteBucketList, remoteURL, base.DefaultHttpTimeout, logger_utils)\n\tif bucketInfosObj != nil {\n\t\treturn bucketInfosObj.([]couchbase.BucketInfo), err\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func (p *Process) CmdGetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tif responce.Value, err = p.tcdb.GetQueue(request.Key); err != nil {\n\t\treturn\n\t} else if !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func remoteBucketList(remoteURLObj interface{}) (interface{}, error) {\n\tremoteURL := remoteURLObj.(string)\n\treturn couchbase.GetBucketList(remoteURL)\n}", "func (m SQSMonitor) receiveQueueMessages(qURL string) ([]*sqs.Message, error) {\n\tresult, err := m.SQS.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.MessageSystemAttributeNameSentTimestamp),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tQueueUrl: &qURL,\n\t\tMaxNumberOfMessages: aws.Int64(10),\n\t\tVisibilityTimeout: aws.Int64(20), // 20 seconds\n\t\tWaitTimeSeconds: aws.Int64(20), // Max long polling\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result.Messages, nil\n}", "func (connector *DbConnector) GetRemoteTriggersToCheck(count int) ([]string, error) {\n\treturn connector.getTriggersToCheck(remoteTriggersToCheckKey, count)\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (client *Client) GetQueueURL(name string) (string, error) {\n\tvar parsedResponse GetQueueURLResult\n\turl := NewGetQueueURLRequest(client.EndPointURL, name).URL()\n\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"\", errors.New(string(body))\n\t}\n\n\terr = xml.Unmarshal(body, &parsedResponse)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn parsedResponse.QueueURL, nil\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (q *Queue) GetQueue() []types.Event {\n\treturn q.Queue\n}", "func (u *Unpackerr) getSonarrQueue() {\n\tfor _, server := range u.Sonarr {\n\t\tif server.APIKey == \"\" {\n\t\t\tu.Debugf(\"Sonarr (%s): skipped, no API key\", server.URL)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tqueue, err := server.GetQueue(DefaultQueuePageSize, 1)\n\t\tif err != nil {\n\t\t\tu.Printf(\"[ERROR] Sonarr (%s): %v\", server.URL, err)\n\n\t\t\treturn\n\t\t}\n\n\t\t// Only update if there was not an error fetching.\n\t\tserver.Queue = queue\n\t\tu.Printf(\"[Sonarr] Updated (%s): %d Items Queued\", server.URL, len(queue.Records))\n\t}\n}", "func (b *Buckets) RemoteBuckets(ctx context.Context, id thread.ID) (list []Info, err error) {\n\tctx = b.Context(ctx)\n\tvar threads []cmd.Thread\n\tif id.Defined() {\n\t\tthreads = []cmd.Thread{{ID: id}}\n\t} else {\n\t\tthreads = b.clients.ListThreads(ctx, true)\n\t}\n\tfor _, t := range threads {\n\t\tctx = common.NewThreadIDContext(ctx, t.ID)\n\t\tres, err := b.clients.Buckets.List(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, root := range res.Roots {\n\t\t\tinfo, err := pbRootToInfo(root)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlist = append(list, info)\n\t\t}\n\t}\n\treturn list, nil\n}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettings() VnicEthRxQueueSettings {\n\tif o == nil || o.RxQueueSettings.Get() == nil {\n\t\tvar ret VnicEthRxQueueSettings\n\t\treturn ret\n\t}\n\treturn *o.RxQueueSettings.Get()\n}", "func (this *Queue) GetQueue() (val Mensaje, err error) {\n\t// Primero determina si la cola está vacía\n\tif this.rear == this.front {\n\t\treturn Mensaje{0, \"0\", \"0\"}, errors.New(\"Cola de Mensajes Vacia\")\n\t}\n\tthis.front++\n\tval = this.array[this.front]\n\treturn val, err\n}", "func (p *Pool) GetQueue() chan ThreeDPrinter {\n\treturn p.printers\n}", "func getMessages(svc *sqs.SQS, queue string) (*sqs.ReceiveMessageOutput, error) {\n\tparams := &sqs.ReceiveMessageInput{\n\t\tQueueUrl: &queue,\n\t\tMaxNumberOfMessages: aws.Int64(maxNumMessagesToFetch),\n\t\tVisibilityTimeout: aws.Int64(defaultVisibilityTimeout),\n\t\tWaitTimeSeconds: aws.Int64(longPollTimeSeconds),\n\t\tMessageAttributeNames: requiredAttributes,\n\t}\n\tlogging.Debug(\"Polling SQS queue for messages.\", nil)\n\tresp, err := svc.ReceiveMessage(params)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (a *Client) GetMsgVpnQueue(params *GetMsgVpnQueueParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueueOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueueParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueue\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues/{queueName}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueueReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueueOK), nil\n\n}", "func GetDefaultRemoteHosts() []string {\r\n\tret := make([]string, 0)\r\n\r\n\tmutex.RLock()\r\n\tdefer mutex.RUnlock()\r\n\r\n\tnodeKey := hex.EncodeToString(GetNodePubKey())\r\n\tfor pubKey, item := range nodes {\r\n\t\tif pubKey != nodeKey && !item.Stopped {\r\n\t\t\tret = append(ret, item.TCPAddress)\r\n\t\t}\r\n\t}\r\n\tif len(ret) == 0 && len(conf.Config.NodesAddr) > 0 {\r\n\t\tret = append(ret, conf.Config.NodesAddr[0])\r\n\t}\r\n\treturn ret\r\n}", "func (o *NSQProducer) GetRemoteAddress() string {\n\tif o == nil || o.RemoteAddress == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.RemoteAddress\n}", "func GetCachedRemoteRepos(artDetails *jfauth.ServiceDetails) (*[]string, error) {\n\tremoteRepos := []string{}\n\tstorageInfoGB := []RepoStorageUsedSpaceInfo{}\n\tresp, err := getHttpResp(artDetails, \"api/storageinfo\")\n\tif err != nil {\n\t\tjflog.Error(\"Failed to get http resp for api/storageinfo\")\n\t}\n\tStorageInfo := &StorageInfo{}\n\tif err := json.Unmarshal(resp, &StorageInfo); err != nil {\n\t\treturn &remoteRepos, err\n\t}\n\n\t// Gather repoType CACHE that has storage space > 1 GB\n\tfor _, r := range *&StorageInfo.RepoStorage {\n\t\tif r.RepoType == \"CACHE\" && strings.Contains(r.UsedSpace, \"GB\") {\n\t\t\tre := regexp.MustCompile(`[-]?\\d[\\d,]*[\\.]?[\\d{2}]*`)\n\t\t\tusedSpaceGB, err := strconv.ParseFloat(re.FindString(r.UsedSpace), 64)\n\t\t\tif err != nil {\n\t\t\t\tjflog.Error(\"Failed used space to float for repo %s\", r.Key)\n\t\t\t}\n\t\t\tstorageInfoGB = append(storageInfoGB, RepoStorageUsedSpaceInfo{r.Key, r.RepoType, r.FoldersCount, r.FilesCount, usedSpaceGB, r.PackageType})\n\n\t\t}\n\t}\n\n\tsort.Slice(storageInfoGB, func(i, j int) bool { return storageInfoGB[i].UsedSpaceGB > storageInfoGB[j].UsedSpaceGB })\n\n\t//for _, r := range storageInfoGB {\n\t//\tremoteRepos = append(remoteRepos, strings.ReplaceAll(r.Key, \"-cache\", \"\"))\n\t//}\n\tremoteRepos = append([]string{\"atlassian\"}, remoteRepos...)\n\tremoteRepos = append([]string{\"docker-bintray-io\"}, remoteRepos...)\n\treturn &remoteRepos, nil\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (m *MatchInfo) GetQueue(client *static.Client) (static.Queue, error) {\n\treturn client.GetQueue(m.QueueID)\n}", "func (svc *SQS) XGetQueueURL(ctx context.Context, queueName string) (queueURL string, err error) {\n\tresp, err := svc.GetQueueURL(ctx, GetQueueURLRequest{\n\t\tQueueName: queueName,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.QueueURL, nil\n}", "func (cfg Config) GetRemoteHost() (remoteHost string) {\n\treturn cfg.RemoteHost\n}", "func (cfg *Config) MQServers() string {\n\treturn os.Getenv(\"MQ_SERVERS\")\n}", "func (qc *queueClient) rejectQueue(msgs []rpccapnp.Message) []rpccapnp.Message {\n\tqc.mu.Lock()\n\tfor {\n\t\tc := qc.pop()\n\t\tif w := c.which(); w == qcallRemoteCall {\n\t\t\tmsgs = c.a.reject(msgs, errQueueCallCancel)\n\t\t} else if w == qcallLocalCall {\n\t\t\tc.f.Reject(errQueueCallCancel)\n\t\t} else if w == qcallDisembargo {\n\t\t\tm := newDisembargoMessage(nil, rpccapnp.Disembargo_context_Which_receiverLoopback, c.embargoID)\n\t\t\td, _ := m.Disembargo()\n\t\t\td.SetTarget(c.embargoTarget)\n\t\t\tmsgs = append(msgs, m)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tqc.mu.Unlock()\n\treturn msgs\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"roberto@rr.com\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"alex@rr.com\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"ale@rr.com\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}", "func GetQueued(sender types.Service) types.QueuedSender {\n\tqs := &queuedSender{\n\t\tsender: sender,\n\t}\n\treturn qs\n}", "func (s *rabbitMQScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {\n\tmessages, publishRate, err := s.getQueueStatus()\n\tif err != nil {\n\t\treturn []external_metrics.ExternalMetricValue{}, fmt.Errorf(\"error inspecting rabbitMQ: %s\", err)\n\t}\n\n\tvar metricValue resource.Quantity\n\tif s.metadata.mode == rabbitModeQueueLength {\n\t\tmetricValue = *resource.NewQuantity(int64(messages), resource.DecimalSI)\n\t} else {\n\t\tmetricValue = *resource.NewMilliQuantity(int64(publishRate*1000), resource.DecimalSI)\n\t}\n\n\tmetric := external_metrics.ExternalMetricValue{\n\t\tMetricName: metricName,\n\t\tValue: metricValue,\n\t\tTimestamp: metav1.Now(),\n\t}\n\n\treturn append([]external_metrics.ExternalMetricValue{}, metric), nil\n}", "func (a *Client) GetMsgVpnJndiQueue(params *GetMsgVpnJndiQueueParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueueOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueueParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueue\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues/{queueName}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueueReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueueOK), nil\n\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (a *Client) GetMsgVpnQueueSubscriptions(params *GetMsgVpnQueueSubscriptionsParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueueSubscriptionsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueueSubscriptionsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueueSubscriptions\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues/{queueName}/subscriptions\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueueSubscriptionsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueueSubscriptionsOK), nil\n\n}", "func (m *VpnConfiguration) GetServers()([]VpnServerable) {\n val, err := m.GetBackingStore().Get(\"servers\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]VpnServerable)\n }\n return nil\n}", "func (player *musicPlayer) getQueueInfo() ([]string, error) {\n\tplayer.Lock()\n\tdefer player.Unlock()\n\tif len(player.state.queue) == 0 {\n\t\treturn nil, errors.New(cannot_get_queue_info_msg)\n\t}\n\t//make a copy to the queue\n\tcopy := make([]string, 0, len(player.state.queue))\n\tfor _, el := range player.state.queue {\n\t\tcopy = append(copy, el)\n\t}\n\treturn copy, nil\n}", "func GetRemoteHost(remoteURL string) *string {\n\tvar remoteHostReference []string\n\tremoteHostReference = []string{\"github\", \"gitlab\", \"bitbucket\", \"azure\", \"codecommit\"}\n\n\tfor _, host := range remoteHostReference {\n\t\tif strings.Contains(remoteURL, host) {\n\t\t\treturn &host\n\t\t}\n\t}\n\treturn nil\n}", "func QueueRemoteWrite(req *gomemcached.MCRequest) {\n\n\tkey := req.Key\n\tnodeList := getVbucketNode(int(findShard(string(key))))\n\tnodes := strings.Split(nodeList, \";\")\n\n\tif len(nodes) < 1 {\n\t\tlog.Fatal(\"Nodelist is empty. Cannot proceed\")\n\t}\n\n\tif len(nodes) < 2 {\n\t\t//no replica\n\t\treturn\n\t}\n\n\tvar remoteNode string\n\t// figure out which is the remote host and queue to the write to that node\n\tfor _, node := range nodes {\n\t\tfound := false\n\t\thostname := strings.Split(node, \":\")\n\t\tfor _, ip := range ipList {\n\t\t\tif ip == hostname[0] {\n\t\t\t\tfound = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif found == false {\n\t\t\tremoteNode = node\n\t\t}\n\t}\n\n\tri := &repItem{host: remoteNode, req: req, opcode: OP_REP}\n\trepChan <- ri\n\treturn\n}", "func (cfg Config) GetRmqQueueConfig(queue string) RmqQueue {\n\treturn cfg.rmqQueueMap[queue]\n}", "func getQueueUrl(id string) (queueUrl string, retErr error) {\n\n\t//Creazione client DynamoDB\n\tsvc := dynamodb.New(common.Sess)\n\n\tresult, err := svc.GetItem(&dynamodb.GetItemInput{\n\t\tTableName: aws.String(subTableName),\n\t\tKey: map[string]*dynamodb.AttributeValue{\n\t\t\t\"SubID\": {\n\t\t\t\tS: aws.String(id),\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nel retreive del subscriber con ID: \" + id + \".\\n\" + err.Error())\n\t\treturn \"\", err\n\t}\n\n\titem := common.SubscriberEntry{}\n\n\terr = dynamodbattribute.UnmarshalMap(result.Item, &item)\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nell'unmarshaling del risultato\")\n\t\treturn \"\", err\n\t}\n\tif item.SubID == \"\" {\n\t\tcommon.Warning(\"[BROKER] Nessun subscriber trovato con id \" + id)\n\t\treturn \"\", errors.New(\"no item found\")\n\t}\n\n\tcommon.Info(\"[BROKER] Subscriber trovato: \" + item.SubID + \"\\n\\t\" + item.QueueURL)\n\n\treturn item.QueueURL, nil\n}", "func (n *NetworkInterface) Get() (string, error) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\t//fmt.Println(\"qu len: \", len(n.Queue))\n\tif len(n.Queue) > 0 {\n\t\ttoReturn := n.Queue[0]\n\t\tn.Queue = n.Queue[1:]\n\t\treturn toReturn, nil\n\t}\n\treturn \"\", errors.New(\"Empty\")\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (multi_queue *MultiQueue) Pop(timeout int) (string, error) {\n\tq, err := multi_queue.SelectHealthyQueue()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tconn := q.pooledConnection.Get()\n\tdefer conn.Close()\n\n\tr, err := redis.Strings(conn.Do(\"BRPOP\", multi_queue.key, timeout))\n\tif err == nil {\n\t\treturn r[1], nil\n\t} else {\n\t\tif err != redis.ErrNil {\n\t\t\tq.QueueError()\n\t\t}\n\t\treturn \"\", err\n\t}\n}", "func (o LookupQueueResultOutput) AppEngineHttpQueue() AppEngineHttpQueueResponseOutput {\n\treturn o.ApplyT(func(v LookupQueueResult) AppEngineHttpQueueResponse { return v.AppEngineHttpQueue }).(AppEngineHttpQueueResponseOutput)\n}", "func (o *Replication) GetRemoteBucketID() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteBucketID\n}", "func ListMatchmakingQueues(settings *playfab.Settings, postData *ListMatchmakingQueuesRequestModel, entityToken string) (*ListMatchmakingQueuesResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/ListMatchmakingQueues\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &ListMatchmakingQueuesResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (ClearTrans) GetQueue() string {\n\treturn \"cy_rubik_clearTrans\"\n}", "func (c *restClient) GetQueue(ctx context.Context, req *cloudtaskspb.GetQueueRequest, opts ...gax.CallOption) (*cloudtaskspb.Queue, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetReadMask() != nil {\n\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetQueue[0:len((*c.CallOptions).GetQueue):len((*c.CallOptions).GetQueue)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &cloudtaskspb.Queue{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (s *API) GetQueueURL(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"GetQueueURL\")\n\n\tqueueName := req.FormValue(\"QueueName\")\n\tqueue, ok := s.sqs.queues[queueName]\n\tif !ok {\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\terror := ErrorResponse{\n\t\t\tError: ErrorResult{\n\t\t\t\tType: \"Not Found\",\n\t\t\t\tCode: \"AWS.SimpleQueueService.NonExistentQueue\",\n\t\t\t\tMessage: \"The specified queue does not exist for this wsdl version.\",\n\t\t\t},\n\t\t\tRequestId: \"00000000-0000-0000-0000-000000000000\",\n\t\t}\n\t\tenc := xml.NewEncoder(w)\n\t\tenc.Indent(\" \", \" \")\n\t\tif err := enc.Encode(error); err != nil {\n\t\t\tlog.Errorf(\"error: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tresponse := GetQueueURLResponse{\n\t\tResult: GetQueueURLResult{queue.url},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func GetQueueURL(c context.Context, api SQSReceiveMessageAPI, input *sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) {\n\treturn api.GetQueueUrl(c, input)\n}", "func (r *RPC) GetQueueClient() queue.Client {\r\n\treturn r.c\r\n}", "func (s *ItemQueue) GetMessages() []int {\n\tvar messages []int\n\ts.lock.Lock()\n\n\tfor i := 0; i < len(s.items); i++ {\n\t\t\tmessages[i] = s.items[i].ID\n\t}\n\n\ts.lock.Unlock()\n\treturn messages\n}", "func (c *Config) GetRemoteHost() string {\n\tif c.sandboxMode {\n\t\treturn fmt.Sprintf(\"sandbox.payfast.co.za\")\n\t}\n\n\treturn fmt.Sprintf(\"www.payfast.co.za\")\n}", "func receiveQueueMessage(receiveQueue string) (messages []*sqs.Message, retErr error) {\n\n\tsvc := sqs.New(common.Sess)\n\tvar messagesList []*sqs.Message\n\n\tresult, err := svc.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.MessageSystemAttributeNameSentTimestamp),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tWaitTimeSeconds: aws.Int64(common.Config.PollingTime), \t//Long polling\n\t\tMaxNumberOfMessages: aws.Int64(common.Config.MaxRcvMessage),\n\t\tQueueUrl: &receiveQueue,\n\t})\n\n\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nell'ottenimento del messaggio. \" + err.Error())\n\t\treturn nil, err\n\t}\n\tif len(result.Messages) == 0 {\n\t\tcommon.Info(\"[BROKER] Nessun messaggio ricevuto\")\n\t\treturn\n\t} else {\n\t\tsendLogMessage(\"Messaggi ricevuti: \" + strconv.Itoa(len(result.Messages)))\n\n\t\tfor _, mess := range result.Messages {\n\n\t\t\terr = sendMessage(*mess)\n\n\t\t\tif err != nil {\n\t\t\t\tcommon.Warning(\"[BROKER] Errore nell'invio del messaggio dal broker. \" + err.Error())\n\t\t\t}\n\n\t\t\t//Messaggio eliminato solo dopoche viene mandato\n\t\t\t_, err := svc.DeleteMessage(&sqs.DeleteMessageInput{\n\t\t\t\tQueueUrl: &receiveQueue,\n\t\t\t\tReceiptHandle: mess.ReceiptHandle,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tcommon.Info(\"[BROKER] Errore nell'eliminazione del messaggio. \" + err.Error())\n\t\t\t} else {\n\t\t\t\tmessagesList = append(messagesList, mess)\n\t\t\t\tcommon.Info(\"[BROKER] Messaggio eliminato con successo\")\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn messagesList, nil\n\n}", "func GetFromQueue(queue string) ([]byte, error) {\n\treturn cache.Get(queue)\n}", "func (d *Device) GetQueue(qf *QueueFamily) *Queue {\n\n\tvar vkq vk.Queue\n\n\tvk.GetDeviceQueue(d.VKDevice, uint32(qf.Index), 0, &vkq)\n\n\tvar queue Queue\n\tqueue.QueueFamily = qf\n\tqueue.Device = d\n\tqueue.VKQueue = vkq\n\n\treturn &queue\n}", "func (c *QueueClient) Get(ctx context.Context, id int) (*Queue, error) {\n\treturn c.Query().Where(queue.ID(id)).Only(ctx)\n}", "func (c *apiConsumers) TeamsQueue() <-chan *TeamDTO {\n\treturn c.queue\n}", "func (c *connection) getQueueLength(inputs input) (int, error) {\n\n\tif inputs.limit > 0 {\n\t\treturn inputs.limit, nil\n\t}\n\n\tqLength, err := redis.Int(conn.redis.Do(\"LLEN\", inputs.source))\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif qLength < 1 {\n\t\treturn 0, fmt.Errorf(\"Source queue is empty\")\n\t}\n\n\treturn qLength, nil\n}", "func (client DeploymentsClient) GetRemoteDebuggingConfigSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (wr *WorkerRef) GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, g session.Group) (*solver.Remote, error) {\n\tif w, ok := wr.Worker.(interface {\n\t\tGetRemote(context.Context, cache.ImmutableRef, bool, compression.Type, session.Group) (*solver.Remote, error)\n\t}); ok {\n\t\treturn w.GetRemote(ctx, wr.ImmutableRef, createIfNeeded, compressionType, g)\n\t}\n\treturn wr.ImmutableRef.GetRemote(ctx, createIfNeeded, compressionType, g)\n}", "func printQueue(q *Queue) {\n\tfmt.Println(q.values)\n}", "func (s *server) GetBroadcasts(overhead, limit int) [][]byte {\n\treturn s.queue.GetBroadcasts(overhead, limit)\n}", "func (b *Backend) GetLeagueByQueue(league string, queue string) (*riotclient.LeagueListDTO, error) {\n\treturn nil, fmt.Errorf(\"Not implemented\")\n}", "func GetRemoteAddressSet(ctx *gin.Context) (remoteIp, remotePort string) {\n\tremoteIp, remotePort = \"0.0.0.0\", \"0\"\n\n\tif ctx == nil || ctx.Request == nil {\n\t\treturn\n\t}\n\n\tvar err error\n\tif remoteIp, remotePort, err = net.SplitHostPort(ctx.Request.RemoteAddr); err != nil {\n\t\treturn\n\t}\n\n\tforwardedRemoteIp := ctx.GetHeader(\"x-forwarded-for\")\n\n\t// Deal with forwarded remote ip\n\tif len(forwardedRemoteIp) > 0 {\n\t\tif forwardedRemoteIp == \"::1\" {\n\t\t\tforwardedRemoteIp = \"localhost\"\n\t\t}\n\n\t\tremoteIp = forwardedRemoteIp\n\t}\n\n\tif remoteIp == \"::1\" {\n\t\tremoteIp = \"localhost\"\n\t}\n\n\treturn remoteIp, remotePort\n}", "func (h *HTTPClient) Dequeue(ctx context.Context, token, projID, qName string, num int, timeout Timeout, wait Wait, delete bool) ([]DequeuedMessage, error) {\n\tif !timeoutInRange(timeout) {\n\t\treturn nil, ErrTimeoutOutOfRange\n\t}\n\tif !waitInRange(wait) {\n\t\treturn nil, ErrWaitOutOfRange\n\t}\n\n\tbody := &bytes.Buffer{}\n\tif err := json.NewEncoder(body).Encode(dequeueReq{Num: num, Timeout: int(timeout), Wait: int(wait), Delete: delete}); err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := h.newReq(\"POST\", token, projID, fmt.Sprintf(\"queues/%s/reservations\", qName), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := new(dequeueResp)\n\tdoFunc := func(resp *http.Response, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif err := json.NewDecoder(resp.Body).Decode(ret); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tif err := gorion.HTTPDo(ctx, h.client, h.transport, req, doFunc); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret.Messages, nil\n}", "func radioGetQueueHandler(c echo.Context) error {\n\tlinks := radio.queue\n\tuserID := getUserIDFromContext(c)\n\tvotes := service.GetVotesForUser(links, userID)\n\n\tfor i, l := range links {\n\t\tif vote, ok := votes[l.LinkID]; ok {\n\t\t\tlinks[i].MyVote = vote\n\t\t} else {\n\t\t\tlinks[i].MyVote = 0\n\t\t}\n\t}\n\n\treturn c.JSON(http.StatusOK, echo.Map{\n\t\t\"links\": links,\n\t\t\"votes\": votes,\n\t})\n}", "func GetQueue(id string) Queue {\n\tservice := broker.GetService(ServiceName).(*QueueService)\n\treturn service.getQueue(id)\n}", "func (o *KvmPolicyInventory) GetRemotePort() int64 {\n\tif o == nil || o.RemotePort == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.RemotePort\n}", "func (r *RPC) GetQueueClient() queue.Client {\n\treturn r.c\n}", "func mainAdminBucketRemoteList(ctx *cli.Context) error {\n\tcheckAdminBucketRemoteListSyntax(ctx)\n\n\t// Additional command specific theme customization.\n\tconsole.SetColor(\"RemoteListMessage\", color.New(color.Bold, color.FgHiGreen))\n\tconsole.SetColor(\"RemoteListEmpty\", color.New(color.FgYellow))\n\tconsole.SetColor(\"SourceBucket\", color.New(color.FgYellow))\n\tconsole.SetColor(\"TargetBucket\", color.New(color.FgYellow))\n\tconsole.SetColor(\"TargetURL\", color.New(color.FgHiWhite))\n\tconsole.SetColor(\"ARN\", color.New(color.FgCyan))\n\tconsole.SetColor(\"Arrow\", color.New(color.FgHiWhite))\n\n\t// Get the alias parameter from cli\n\targs := ctx.Args()\n\taliasedURL := args.Get(0)\n\t_, sourceBucket := url2Alias(aliasedURL)\n\n\t// Create a new MinIO Admin Client\n\tclient, err := newAdminClient(aliasedURL)\n\tfatalIf(err, \"Unable to initialize admin connection.\")\n\ttargets, e := client.ListRemoteTargets(globalContext, sourceBucket, ctx.String(\"service\"))\n\tfatalIf(probe.NewError(e).Trace(args...), \"Unable to list remote target\")\n\tprintRemotes(ctx, aliasedURL, targets)\n\treturn nil\n}", "func (o *SmscSession) GetRemoteAddr() string {\n\tif o == nil || o.RemoteAddr == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.RemoteAddr\n}", "func (q *Queue) Get(number int) ([]interface{}, error) {\n\tif number < 1 {\n\t\treturn []interface{}{}, nil\n\t}\n\n\tq.lock.Lock()\n\n\tif q.disposed {\n\t\tq.lock.Unlock()\n\t\treturn nil, errors.New(\"Queue has been disposed\")\n\t}\n\n\tvar items []interface{}\n\tif len(q.items) == 0 {\n\t\tsema := newSema()\n\t\tq.waiters.put(sema)\n\t\tsema.wg.Add(1)\n\t\tq.lock.Unlock()\n\n\t\tsema.wg.Wait()\n\t\t// We are now inside put's lock.\n\t\tif q.disposed {\n\t\t\treturn nil, errors.New(\"Queue has been disposed\")\n\t\t}\n\n\t\titems = q.items.get(number)\n\t\tsema.response.Done()\n\t\treturn items, nil\n\t}\n\n\titems = q.items.get(number)\n\tq.lock.Unlock()\n\treturn items, nil\n}", "func GetQueue(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *QueueState, opts ...pulumi.ResourceOption) (*Queue, error) {\n\tvar resource Queue\n\terr := ctx.ReadResource(\"aws-native:connect:Queue\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (bp *Processer) GetRemoteAddr() string {\n\taddr := bp.g.Sock.RemoteAddr()\n\treturn addr.String()\n}", "func (bq *InMemoryBuildQueue) ListPlatformQueues(ctx context.Context, request *emptypb.Empty) (*buildqueuestate.ListPlatformQueuesResponse, error) {\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\t// Obtain platform queue IDs in sorted order.\n\tplatformQueueList := append(platformQueueList(nil), bq.platformQueues...)\n\tsort.Sort(platformQueueList)\n\n\t// Extract status.\n\tplatformQueues := make([]*buildqueuestate.PlatformQueueState, 0, len(bq.platformQueues))\n\tfor _, pq := range platformQueueList {\n\t\tsizeClassQueues := make([]*buildqueuestate.SizeClassQueueState, 0, len(pq.sizeClassQueues))\n\t\tfor i, scq := range pq.sizeClassQueues {\n\t\t\texecutingWorkersCount := uint32(0)\n\t\t\tfor _, w := range scq.workers {\n\t\t\t\tif w.currentTask != nil {\n\t\t\t\t\texecutingWorkersCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tactiveInvocationsCount := uint32(0)\n\t\t\tfor _, i := range scq.invocations {\n\t\t\t\tif i.isActive() {\n\t\t\t\t\tactiveInvocationsCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsizeClassQueues = append(sizeClassQueues, &buildqueuestate.SizeClassQueueState{\n\t\t\t\tSizeClass: pq.sizeClasses[i],\n\t\t\t\tTimeout: bq.cleanupQueue.getTimestamp(scq.cleanupKey),\n\t\t\t\tInvocationsCount: uint32(len(scq.invocations)),\n\t\t\t\tQueuedInvocationsCount: uint32(scq.queuedInvocations.Len()),\n\t\t\t\tActiveInvocationsCount: uint32(activeInvocationsCount),\n\t\t\t\tWorkersCount: uint32(len(scq.workers)),\n\t\t\t\tExecutingWorkersCount: executingWorkersCount,\n\t\t\t\tDrainsCount: uint32(len(scq.drains)),\n\t\t\t})\n\t\t}\n\t\tplatformQueues = append(platformQueues, &buildqueuestate.PlatformQueueState{\n\t\t\tName: pq.platformKey.GetPlatformQueueName(),\n\t\t\tSizeClassQueues: sizeClassQueues,\n\t\t})\n\t}\n\treturn &buildqueuestate.ListPlatformQueuesResponse{\n\t\tPlatformQueues: platformQueues,\n\t}, nil\n}", "func (svc *SQS) XListAllQueueURLs(ctx context.Context, queuePrefix string) (queueURLs []string, err error) {\n\tresp, err := svc.ListQueues(ctx, ListQueuesRequest{\n\t\tQueueNamePrefix: queuePrefix,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults := resp.QueueURLs\n\tnextToken := resp.NextToken\n\tfor nextToken != \"\" {\n\t\tresp, err := svc.ListQueues(ctx, ListQueuesRequest{\n\t\t\tQueueNamePrefix: queuePrefix,\n\t\t\tNextToken: nextToken,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, resp.QueueURLs...)\n\t\tnextToken = resp.NextToken\n\t}\n\n\treturn results, nil\n}", "func (s *Store) GetQueueNames() ([]string, error) {\n\tvar names []string\n\treturn names, s.db.View(func(tx *bolt.Tx) error {\n\t\treturn s.queues(tx).ForEach(func(key, value []byte) error {\n\t\t\tnames = append(names, string(key))\n\t\t\treturn nil\n\t\t})\n\t})\n}" ]
[ "0.71619385", "0.61695373", "0.6127869", "0.6031343", "0.5995589", "0.5916818", "0.58918214", "0.5793508", "0.57635283", "0.57260025", "0.57182837", "0.56593096", "0.5565563", "0.5556938", "0.5451114", "0.54059637", "0.5343811", "0.53087986", "0.53000456", "0.52932256", "0.5202478", "0.5202026", "0.51352566", "0.5096544", "0.5083316", "0.50753343", "0.5045381", "0.5038165", "0.50206137", "0.50194395", "0.5001005", "0.49750003", "0.49579534", "0.4944364", "0.49298406", "0.49111122", "0.4910221", "0.4906531", "0.49031442", "0.48946214", "0.4891568", "0.4883973", "0.48648852", "0.48286587", "0.4789293", "0.4785026", "0.47594467", "0.47512728", "0.47358534", "0.47358033", "0.47339717", "0.4733591", "0.4727068", "0.47222948", "0.47170427", "0.47135445", "0.47113577", "0.4709004", "0.46976623", "0.46943414", "0.469314", "0.46926573", "0.46891418", "0.46861315", "0.46844608", "0.46694505", "0.4664064", "0.4657276", "0.4646711", "0.46339905", "0.46229827", "0.4618968", "0.46186715", "0.4612663", "0.46069223", "0.45983294", "0.45978802", "0.45940563", "0.4591997", "0.45916057", "0.4585771", "0.4584739", "0.45590138", "0.45555526", "0.45398208", "0.45337144", "0.45322356", "0.4525946", "0.45216835", "0.4516763", "0.45081013", "0.45048097", "0.45039165", "0.45037025", "0.44986156", "0.44960508", "0.44853693", "0.44829133", "0.44707265", "0.44690043" ]
0.76292956
0
GetRemoteQueuesOk returns a tuple with the RemoteQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) { if o == nil { return nil, false } return &o.RemoteQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}", "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}", "func (o *NSQProducer) GetRemoteAddressOk() (*string, bool) {\n\tif o == nil || o.RemoteAddress == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteAddress, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}", "func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}", "func (o *NotificationConfig) GetReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Receivers, true\n}", "func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}", "func (o *V0037JobProperties) GetRequeueOk() (*bool, bool) {\n\tif o == nil || o.Requeue == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Requeue, true\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (o *KvmPolicyInventory) GetRemotePortOk() (*int64, bool) {\n\tif o == nil || o.RemotePort == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemotePort, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}", "func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetRemoteDisplayInfoOk() (*VirtualizationVmwareRemoteDisplayInfo, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteDisplayInfo.Get(), o.RemoteDisplayInfo.IsSet()\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (o *Replication) GetRemoteBucketIDOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteBucketID, true\n}", "func (o *LocalDatabaseProvider) GetDnsServersOk() ([]string, bool) {\n\tif o == nil || o.DnsServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DnsServers, true\n}", "func (o *RemoteEnvironmentConfigListDto) GetValuesOk() (*[]RemoteEnvironmentConfigStub, bool) {\n\tif o == nil || o.Values == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Values, true\n}", "func IsRemotePlan(planContents []byte) bool {\n\t// We add a header to plans generated by the remote backend so we can\n\t// detect that they're remote in the apply phase.\n\tremoteOpsHeaderBytes := []byte(remoteOpsHeader)\n\treturn bytes.Equal(planContents[:len(remoteOpsHeaderBytes)], remoteOpsHeaderBytes)\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (o *SmscSession) GetRemoteAddrOk() (*string, bool) {\n\tif o == nil || o.RemoteAddr == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteAddr, true\n}", "func (o *Replication) GetMaxQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.MaxQueueSizeBytes, true\n}", "func (o *FiltersVmGroup) GetSubnetIdsOk() (*[]string, bool) {\n\tif o == nil || o.SubnetIds == nil {\n\t\treturn nil, false\n\t}\n\treturn o.SubnetIds, true\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_State) IsYANGGoStruct() {}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (o *V0037Node) GetBoardsOk() (*int32, bool) {\n\tif o == nil || o.Boards == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Boards, true\n}", "func (o *NodeUpdate) GetDnsServersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.DnsServers, true\n}", "func (o *NotificationConfig) GetBccReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.BccReceivers, true\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (o *SMSConnectorSettings) GetDecodersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Decoders, true\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) IsYANGGoStruct() {}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func RmqQueueStat(jBody []byte) (messageCount int, result int) {\n\ttype QueueFailedCount struct {\n\t\tCount int `json:\"messages\"` // figure out which of these we need\n\t}\n\tresult = 0 // explicitly zeroing\n\tvar queueFailedCount QueueFailedCount\n\tmarshalerr := json.Unmarshal(jBody, &queueFailedCount)\n\tif marshalerr != nil {\n\t\tfmt.Println(marshalerr)\n\t}\n\tmessageCount = queueFailedCount.Count\n\tif queueFailedCount.Count > 0 && queueFailedCount.Count < 20 {\n\t\tresult = 1\n\t} else if queueFailedCount.Count > 19 {\n\t\tresult = 2\n\t}\n\treturn messageCount, result\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}", "func (o *SecurityProblem) GetManagementZonesOk() (*[]ManagementZone, bool) {\n\tif o == nil || o.ManagementZones == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ManagementZones, true\n}", "func (connector *DbConnector) GetRemoteTriggersToCheck(count int) ([]string, error) {\n\treturn connector.getTriggersToCheck(remoteTriggersToCheckKey, count)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) IsYANGGoStruct() {}", "func (m *ExtractorClientMock) MinimockGetChatHistoryRemoteDone() bool {\n\tfor _, e := range m.GetChatHistoryRemoteMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.GetChatHistoryRemoteMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterGetChatHistoryRemoteCounter) < 1 {\n\t\treturn false\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcGetChatHistoryRemote != nil && mm_atomic.LoadUint64(&m.afterGetChatHistoryRemoteCounter) < 1 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (o *NotificationProjectBudgetNotification) GetTeamIdsOk() (*[]int32, bool) {\n\tif o == nil || o.TeamIds == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TeamIds, true\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *ApplianceAllOfNetworkingIpv4Dhcp) GetRoutersOk() (*bool, bool) {\n\tif o == nil || o.Routers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Routers, true\n}", "func (bq *InMemoryBuildQueue) ListPlatformQueues(ctx context.Context, request *emptypb.Empty) (*buildqueuestate.ListPlatformQueuesResponse, error) {\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\t// Obtain platform queue IDs in sorted order.\n\tplatformQueueList := append(platformQueueList(nil), bq.platformQueues...)\n\tsort.Sort(platformQueueList)\n\n\t// Extract status.\n\tplatformQueues := make([]*buildqueuestate.PlatformQueueState, 0, len(bq.platformQueues))\n\tfor _, pq := range platformQueueList {\n\t\tsizeClassQueues := make([]*buildqueuestate.SizeClassQueueState, 0, len(pq.sizeClassQueues))\n\t\tfor i, scq := range pq.sizeClassQueues {\n\t\t\texecutingWorkersCount := uint32(0)\n\t\t\tfor _, w := range scq.workers {\n\t\t\t\tif w.currentTask != nil {\n\t\t\t\t\texecutingWorkersCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tactiveInvocationsCount := uint32(0)\n\t\t\tfor _, i := range scq.invocations {\n\t\t\t\tif i.isActive() {\n\t\t\t\t\tactiveInvocationsCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsizeClassQueues = append(sizeClassQueues, &buildqueuestate.SizeClassQueueState{\n\t\t\t\tSizeClass: pq.sizeClasses[i],\n\t\t\t\tTimeout: bq.cleanupQueue.getTimestamp(scq.cleanupKey),\n\t\t\t\tInvocationsCount: uint32(len(scq.invocations)),\n\t\t\t\tQueuedInvocationsCount: uint32(scq.queuedInvocations.Len()),\n\t\t\t\tActiveInvocationsCount: uint32(activeInvocationsCount),\n\t\t\t\tWorkersCount: uint32(len(scq.workers)),\n\t\t\t\tExecutingWorkersCount: executingWorkersCount,\n\t\t\t\tDrainsCount: uint32(len(scq.drains)),\n\t\t\t})\n\t\t}\n\t\tplatformQueues = append(platformQueues, &buildqueuestate.PlatformQueueState{\n\t\t\tName: pq.platformKey.GetPlatformQueueName(),\n\t\t\tSizeClassQueues: sizeClassQueues,\n\t\t})\n\t}\n\treturn &buildqueuestate.ListPlatformQueuesResponse{\n\t\tPlatformQueues: platformQueues,\n\t}, nil\n}", "func (o *Replication) GetRemoteIDOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteID, true\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) HasExitQueue(opts *bind.CallOpts, vaultId *big.Int, token common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"hasExitQueue\", vaultId, token)\n\treturn *ret0, err\n}", "func (o *HyperflexHxapDvUplink) GetVlansOk() (*string, bool) {\n\tif o == nil || o.Vlans == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Vlans, true\n}", "func (o *SMSConnectorSettings) GetLimitsOk() (*Thresholds, bool) {\n\tif o == nil || o.Limits == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Limits, true\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func (o *Replication) GetCurrentQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.CurrentQueueSizeBytes, true\n}", "func (o *VnicEthAdapterPolicyInventory) HasRxQueueSettings() bool {\n\tif o != nil && o.RxQueueSettings.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *CouponLimitConfigs) GetLimitsOk() ([]LimitConfig, bool) {\n\tif o == nil || o.Limits == nil {\n\t\tvar ret []LimitConfig\n\t\treturn ret, false\n\t}\n\treturn *o.Limits, true\n}", "func NewGetCallQueueitemsOK() *GetCallQueueitemsOK {\n\treturn &GetCallQueueitemsOK{}\n}", "func (o *NewCoupons) GetLimitsOk() ([]LimitConfig, bool) {\n\tif o == nil || o.Limits == nil {\n\t\tvar ret []LimitConfig\n\t\treturn ret, false\n\t}\n\treturn *o.Limits, true\n}", "func (o *NotificationConfig) GetCcReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.CcReceivers, true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) HasRemoteDisplayInfo() bool {\n\tif o != nil && o.RemoteDisplayInfo.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Wireless) GetChannelsOk() (string, bool) {\n\tif o == nil || o.Channels == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Channels, true\n}", "func (o *NetworkDns) GetNameServersOk() ([]string, bool) {\n\tif o == nil || o.NameServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NameServers, true\n}", "func (o *StorageRemoteKeySetting) GetIsPasswordSetOk() (*bool, bool) {\n\tif o == nil || o.IsPasswordSet == nil {\n\t\treturn nil, false\n\t}\n\treturn o.IsPasswordSet, true\n}", "func (u *Unpackerr) haveSonarrQitem(name string) bool {\n\tfor _, server := range u.Sonarr {\n\t\tfor _, q := range server.Queue.Records {\n\t\t\tif q.Title == name {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *NSQProducer) HasRemoteAddress() bool {\n\tif o != nil && o.RemoteAddress != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *StorageRemoteKeySetting) GetPortOk() (*int64, bool) {\n\tif o == nil || o.Port == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Port, true\n}", "func (o *StatusAzureServiceBus) GetRecordsProcessedOk() (*int64, bool) {\n\tif o == nil || IsNil(o.RecordsProcessed) {\n\t\treturn nil, false\n\t}\n\treturn o.RecordsProcessed, true\n}", "func (o *FiltersApiLog) GetQueryApiNamesOk() ([]string, bool) {\n\tif o == nil || o.QueryApiNames == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.QueryApiNames, true\n}", "func QueueStatus_Values() []string {\n\treturn []string{\n\t\tQueueStatusEnabled,\n\t\tQueueStatusDisabled,\n\t}\n}", "func (o *FiltersVirtualGateway) GetVirtualGatewayIdsOk() ([]string, bool) {\n\tif o == nil || o.VirtualGatewayIds == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.VirtualGatewayIds, true\n}", "func (o *FeedSyncResult) GetGroupsOk() ([]GroupSyncResult, bool) {\n\tif o == nil || o.Groups == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Groups, true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetRemoteDisplayVncEnabledOk() (*bool, bool) {\n\tif o == nil || o.RemoteDisplayVncEnabled == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteDisplayVncEnabled, true\n}", "func (o *W2) GetNonqualifiedPlansOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NonqualifiedPlans.Get(), o.NonqualifiedPlans.IsSet()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_State) IsYANGGoStruct() {}", "func (o *V0037Node) GetSocketsOk() (*int32, bool) {\n\tif o == nil || o.Sockets == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Sockets, true\n}", "func (o *User) GetMessagesOk() ([]MicrosoftGraphMessage, bool) {\n\tif o == nil || o.Messages == nil {\n\t\tvar ret []MicrosoftGraphMessage\n\t\treturn ret, false\n\t}\n\treturn *o.Messages, true\n}", "func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (*OpenconfigQos_Qos_Queues_Queue_Red) IsYANGGoStruct() {}", "func (o *FiltersSecurityGroup) GetOutboundRuleProtocolsOk() (*[]string, bool) {\n\tif o == nil || o.OutboundRuleProtocols == nil {\n\t\treturn nil, false\n\t}\n\treturn o.OutboundRuleProtocols, true\n}", "func (o *KvmPolicyInventory) HasRemotePort() bool {\n\tif o != nil && o.RemotePort != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *User) GetMailboxSettingsOk() (AnyOfmicrosoftGraphMailboxSettings, bool) {\n\tif o == nil || o.MailboxSettings == nil {\n\t\tvar ret AnyOfmicrosoftGraphMailboxSettings\n\t\treturn ret, false\n\t}\n\treturn *o.MailboxSettings, true\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (o *UsageSNMPHour) GetSnmpDevicesOk() (*int64, bool) {\n\tif o == nil || o.SnmpDevices == nil {\n\t\treturn nil, false\n\t}\n\treturn o.SnmpDevices, true\n}", "func (_DappboxManager *DappboxManagerCaller) IsRemoteFolder(opts *bind.CallOpts, dappboxAddress common.Address, remoteFolderAddress common.Address) (bool, common.Address, *big.Int, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t\tret1 = new(common.Address)\n\t\tret2 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _DappboxManager.contract.Call(opts, out, \"isRemoteFolder\", dappboxAddress, remoteFolderAddress)\n\treturn *ret0, *ret1, *ret2, err\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetNetworksOk() ([]VirtualizationBaseNetworkRelationship, bool) {\n\tif o == nil || o.Networks == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Networks, true\n}", "func (o *FiltersNatService) GetSubnetIdsOk() ([]string, bool) {\n\tif o == nil || o.SubnetIds == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.SubnetIds, true\n}", "func (o *SoftwarerepositoryLocalMachineAllOf) GetUploadUrlsOk() ([]string, bool) {\n\tif o == nil || o.UploadUrls == nil {\n\t\treturn nil, false\n\t}\n\treturn o.UploadUrls, true\n}", "func (o *VisuallyComplete2Settings) GetMutationBlacklistOk() (*string, bool) {\n\tif o == nil || o.MutationBlacklist == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MutationBlacklist, true\n}", "func (o *FiltersSubnet) GetSubnetIdsOk() ([]string, bool) {\n\tif o == nil || o.SubnetIds == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.SubnetIds, true\n}", "func (connector *DbConnector) GetRemoteTriggersToCheckCount() (int64, error) {\n\treturn connector.getTriggersToCheckCount(remoteTriggersToCheckKey)\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue) IsYANGGoStruct() {}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) IsYANGGoStruct() {}" ]
[ "0.63053876", "0.61726433", "0.5913533", "0.5784514", "0.57285035", "0.533683", "0.53098166", "0.52802896", "0.5174174", "0.5162988", "0.5104646", "0.50820893", "0.5044645", "0.5033634", "0.50302416", "0.5001202", "0.49617392", "0.49253264", "0.49231443", "0.48999754", "0.48885736", "0.4880531", "0.48601952", "0.4841383", "0.4839321", "0.4827003", "0.48217717", "0.47937664", "0.47796562", "0.4776574", "0.47542107", "0.47247502", "0.46949935", "0.46794948", "0.46789235", "0.46750748", "0.46690944", "0.4649491", "0.46421465", "0.46382114", "0.46373102", "0.46326855", "0.46309564", "0.46191058", "0.46060085", "0.46052495", "0.460306", "0.45956945", "0.4593882", "0.45751137", "0.45655227", "0.45647424", "0.45595235", "0.4548738", "0.45453075", "0.45442015", "0.4541691", "0.4540848", "0.45343843", "0.4526562", "0.45254922", "0.45216408", "0.45181453", "0.4512346", "0.4509706", "0.4504498", "0.45039773", "0.45025113", "0.44958487", "0.44787934", "0.44773033", "0.44641584", "0.44640988", "0.44581118", "0.44520712", "0.44426504", "0.44367278", "0.44320062", "0.44301763", "0.44275033", "0.44245714", "0.44134593", "0.4412306", "0.44094756", "0.4408505", "0.44066787", "0.43993542", "0.4390439", "0.4387919", "0.43774816", "0.43516698", "0.43472597", "0.43472567", "0.43453172", "0.43426707", "0.43275246", "0.4323224", "0.43189925", "0.43179637", "0.43171772" ]
0.8458737
0
SetRemoteQueues sets field value
func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) { o.RemoteQueues = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tcreds := auth.CredentialsFromContext(ctx)\n\tif ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {\n\t\t// \"An attempt (IPC_SET) was made to increase msg_qbytes beyond the\n\t\t// system parameter MSGMNB, but the caller is not privileged (Linux:\n\t\t// does not have the CAP_SYS_RESOURCE capability).\"\n\t\treturn linuxerr.EPERM\n\t}\n\n\tif err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {\n\t\treturn err\n\t}\n\n\tq.maxBytes = ds.MsgQbytes\n\tq.changeTime = ktime.NowFromContext(ctx)\n\treturn nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}", "func (r *RPC) SetQueueClient(c queue.Client) {\n\tgapi := NewGRpcServer(c, r.api)\n\tjapi := NewJSONRPCServer(c, r.api)\n\tr.gapi = gapi\n\tr.japi = japi\n\tr.c = c\n\t//注册系统rpc\n\tpluginmgr.AddRPC(r)\n\tr.Listen()\n}", "func (r *RPC) SetQueueClient(c queue.Client) {\r\n\tgapi := NewGRpcServer(c, r.api)\r\n\tjapi := NewJSONRPCServer(c, r.api)\r\n\tr.gapi = gapi\r\n\tr.japi = japi\r\n\tr.c = c\r\n\t//注册系统rpc\r\n\tpluginmgr.AddRPC(r)\r\n\tr.Listen()\r\n}", "func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}", "func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}", "func (m *AudioRoutingGroup) SetReceivers(value []string)() {\n err := m.GetBackingStore().Set(\"receivers\", value)\n if err != nil {\n panic(err)\n }\n}", "func (acnl *Channel) setupQueues(cnl *amqp.Channel) error {\n\t/*if _, err := cnl.QueueDeclare(QueueVNFMRegister, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMRegister, QueueVNFMRegister, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMUnregister, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMUnregister, QueueVNFMUnregister, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMCoreActions, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMCoreActions, QueueVNFMCoreActions, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMCoreActionsReply, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMCoreActionsReply, QueueVNFMCoreActionsReply, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}*/\n\n\t// is this needed?\n\tif _, err := cnl.QueueDeclare(acnl.cfg.queues.generic, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(acnl.cfg.queues.generic, acnl.cfg.queues.generic, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *VpnConfiguration) SetServers(value []VpnServerable)() {\n err := m.GetBackingStore().Set(\"servers\", value)\n if err != nil {\n panic(err)\n }\n}", "func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"roberto@rr.com\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"alex@rr.com\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"ale@rr.com\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}", "func SetQueueReclaimable(ctx *TestContext, queues []string, reclaimable bool) {\n\tBy(\"Setting Queue reclaimable\")\n\n\tfor _, q := range queues {\n\t\tqueue, err := ctx.Vcclient.SchedulingV1beta1().Queues().Get(context.TODO(), q, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to get queue %s\", q)\n\n\t\tqueue.Spec.Reclaimable = &reclaimable\n\t\t_, err = ctx.Vcclient.SchedulingV1beta1().Queues().Update(context.TODO(), queue, metav1.UpdateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to update queue %s\", q)\n\t}\n}", "func QueueRemoteWrite(req *gomemcached.MCRequest) {\n\n\tkey := req.Key\n\tnodeList := getVbucketNode(int(findShard(string(key))))\n\tnodes := strings.Split(nodeList, \";\")\n\n\tif len(nodes) < 1 {\n\t\tlog.Fatal(\"Nodelist is empty. Cannot proceed\")\n\t}\n\n\tif len(nodes) < 2 {\n\t\t//no replica\n\t\treturn\n\t}\n\n\tvar remoteNode string\n\t// figure out which is the remote host and queue to the write to that node\n\tfor _, node := range nodes {\n\t\tfound := false\n\t\thostname := strings.Split(node, \":\")\n\t\tfor _, ip := range ipList {\n\t\t\tif ip == hostname[0] {\n\t\t\t\tfound = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif found == false {\n\t\t\tremoteNode = node\n\t\t}\n\t}\n\n\tri := &repItem{host: remoteNode, req: req, opcode: OP_REP}\n\trepChan <- ri\n\treturn\n}", "func (c *Consumer) SetQueueBind(bind *QueueBind) *Consumer {\n\tif bind != nil {\n\t\tc.mutex.Lock()\n\t\tc.bind = bind\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}", "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func queueTrackRemote(track string) {\n\n\tm := remoteCommand{\n\t\tCommand: \"play_track\",\n\t\tParam: track,\n\t}\n\n\terr := pushMessage(context.sqs, m)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tlog.Println(\"Track Queued: \", track)\n}", "func (e *LifecycleEvent) SetQueueURL(url string) { e.queueURL = url }", "func (s *Service) SetQueue(q amboy.Queue) error {\n\tif s.closer != nil {\n\t\treturn errors.New(\"cannot set a new queue, Service is already open\")\n\t}\n\n\ts.queue = q\n\treturn nil\n}", "func (s *SearchQueuesOutput) SetQueues(v []*Queue) *SearchQueuesOutput {\n\ts.Queues = v\n\treturn s\n}", "func (oo *OmciCC) SendSetPrioQueueVar(ctx context.Context, timeout int, highPrio bool,\n\trxChan chan Message, params ...me.ParamData) (*me.ManagedEntity, error) {\n\ttid := oo.GetNextTid(highPrio)\n\tlogger.Debugw(ctx, \"send PrioQueue-Set-msg:\", log.Fields{\"device-id\": oo.deviceID,\n\t\t\"SequNo\": strconv.FormatInt(int64(tid), 16),\n\t\t\"InstId\": strconv.FormatInt(int64(params[0].EntityID), 16)})\n\n\tmeInstance, omciErr := me.NewPriorityQueue(params[0])\n\tif omciErr.GetError() == nil {\n\t\tomciLayer, msgLayer, err := oframe.EncodeFrame(meInstance, omci.SetRequestType, oframe.TransactionID(tid))\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot encode PrioQueue for set\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkt, err := SerializeOmciLayer(ctx, omciLayer, msgLayer)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot serialize PrioQueue set\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tomciRxCallbackPair := CallbackPair{\n\t\t\tCbKey: tid,\n\t\t\tCbEntry: CallbackPairEntry{rxChan, oo.receiveOmciResponse, true},\n\t\t}\n\t\terr = oo.Send(ctx, pkt, timeout, CDefaultRetries, highPrio, omciRxCallbackPair)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot send PrioQueue set\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debug(ctx, \"send PrioQueue-set msg done\")\n\t\treturn meInstance, nil\n\t}\n\tlogger.Errorw(ctx, \"Cannot generate PrioQueue Instance\", log.Fields{\n\t\t\"Err\": omciErr.GetError(), \"device-id\": oo.deviceID})\n\treturn nil, omciErr.GetError()\n}", "func WithQueues(queues []string) Option {\n\treturn func(opts *Options) {\n\t\topts.Queues = queues\n\t}\n}", "func setupQueue(client *redis.Client) error {\n\t// ping the queue\n\terr := pingQueue(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (_Mcapscontroller *McapscontrollerTransactor) SetMaxPoolTokens(opts *bind.TransactOpts, poolAddress common.Address, maxPoolTokens *big.Int) (*types.Transaction, error) {\n\treturn _Mcapscontroller.contract.Transact(opts, \"setMaxPoolTokens\", poolAddress, maxPoolTokens)\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (w *Worker) SetQueue(q Queue) {\n\tw.queue = q\n}", "func SetConfig(c QueueConfig) error {\n\t// is name unique?\n\tif _, ok := configList[c.Name]; ok {\n\t\treturn ErrQueueIsExist\n\t}\n\n\t// is contener unique?\n\tfor _, v := range configList {\n\t\tif v.Contener == reflect.ValueOf(c.JobContener).Type() {\n\t\t\treturn ErrContenerIsNotUnique\n\t\t}\n\t}\n\n\treturn setConfig(c)\n}", "func (m *Printer) SetConnectors(value []PrintConnectorable)() {\n err := m.GetBackingStore().Set(\"connectors\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *segment) setOwner(ep *endpoint, qFlags queueFlags) {\n\tswitch qFlags {\n\tcase recvQ:\n\t\tep.updateReceiveMemUsed(s.segMemSize())\n\tcase sendQ:\n\t\t// no memory account for sendQ yet.\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected queue flag %b\", qFlags))\n\t}\n\ts.ep = ep\n\ts.qFlags = qFlags\n}", "func SetServers(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"INFO\\tSet Servers\")\n\tfmt.Println(\"INFO\\tSet Servers\")\n\t/*\n\t \tvars := mux.Vars(r)\n\t \tip := vars[\"ip\"]\n\t \tips := strings.Split(ip, DELIM)\n\n\t fmt.Println(ip)\n\t \tfor i := range ips {\n\t \t\tdata.servers[ips[i]] = true\n\t \t}\n\n\t \tfmt.Println(\" servers \", data.processType)\n\t \tif data.processType == 3 {\n\t \t\tvar clients map[string]bool = data.servers\n\t \t\tserverListStr := create_server_list_string()\n\t \t\tj := 0\n\t \t\tfor ipaddr := range clients {\n\t \t\t\tsend_command_to_process(ipaddr, \"SetServers\", serverListStr)\n\t \t\t\tname := \"server_\" + fmt.Sprintf(\"%d\", j)\n\t \t\t\tfmt.Println(name, ipaddr)\n\t \t\t\tsend_command_to_process(ipaddr, \"SetName\", name)\n\t \t\t\tj = j + 1\n\t \t\t}\n\t \t}\n\t*/\n}", "func (s *API) SetQueueAttributes(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"SetQueueAttributes\")\n\tw.WriteHeader(http.StatusNotImplemented)\n}", "func (policy *ticketPolicy) OnSetQueueClient() {\n\n}", "func (m *ShowMeasurementsMapper) SetRemote(remote Mapper) error {\n\tm.remote = remote\n\treturn nil\n}", "func (m *User) SetMessages(value []Messageable)() {\n m.messages = value\n}", "func (d *domainClient) SetBlockedURLs(ctx context.Context, args *SetBlockedURLsArgs) (err error) {\n\tif args != nil {\n\t\terr = rpcc.Invoke(ctx, \"Network.setBlockedURLs\", args, nil, d.conn)\n\t} else {\n\t\terr = rpcc.Invoke(ctx, \"Network.setBlockedURLs\", nil, nil, d.conn)\n\t}\n\tif err != nil {\n\t\terr = &internal.OpError{Domain: \"Network\", Op: \"SetBlockedURLs\", Err: err}\n\t}\n\treturn\n}", "func (q *priorityLocalQueue) SetRunner(r amboy.Runner) error {\n\tif q.Started() {\n\t\treturn errors.New(\"cannot set runner after queue is started\")\n\t}\n\n\tq.runner = r\n\n\treturn nil\n}", "func SetSocksHost(s string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tc.host = s\n\t\treturn nil\n\t}\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func SetMatchmakingQueue(settings *playfab.Settings, postData *SetMatchmakingQueueRequestModel, entityToken string) (*SetMatchmakingQueueResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/SetMatchmakingQueue\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &SetMatchmakingQueueResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (c *Client) SetSlaves(v []interface{}) {\n\tc.slaves = make([]string,0,len(v))\n\tfor _, vv := range v {\n\t\tc.slaves = append(c.slaves, vv.(string))\n\t}\n}", "func SetRemoteEnvironment(environ map[string]string) {\n\tremoteEnvironment = environ\n}", "func (m *SharedWithChannelTeamInfo) SetAllowedMembers(value []ConversationMemberable)() {\n err := m.GetBackingStore().Set(\"allowedMembers\", value)\n if err != nil {\n panic(err)\n }\n}", "func (gt GtwyMgr) Set(ctx context.Context, appcontext, remoteAddress string) error {\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"Set\", \"info\", \"start\")\n\t}\n\n\tglst := &Gateway{AppContext: appcontext, RemoteAddress: remoteAddress}\n\n\tky, err := gt.newKey(ctx, gt.bc.GetConfigValue(ctx, \"EnvGtwayDsNamespace\"), gt.bc.GetConfigValue(ctx, \"EnvGtwayDsKind\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx, err := gt.ds.NewTransaction(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := tx.Put(ky, glst); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\tif _, err = tx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"Set\", \"info\", \"end\")\n\t}\n\treturn nil\n}", "func (router *EventRouter) BindQueue(queue string, exchange string) {\n\tif router.lastError == nil {\n\t\trouter.DeclareExchange(exchange)\n\t}\n\tif router.lastError == nil {\n\t\trouter.DeclareQueue(queue)\n\t}\n\tif router.lastError == nil {\n\t\trouter.lastError = router.channel.QueueBind(queue, \"\", exchange, false, nil)\n\t}\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func SetConfigList(c []QueueConfig) error {\n\tfor _, v := range c {\n\t\terr := SetConfig(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (_Lmc *LmcTransactor) SetReceiverWhitelisted(opts *bind.TransactOpts, receiver common.Address, whitelisted bool) (*types.Transaction, error) {\n\treturn _Lmc.contract.Transact(opts, \"setReceiverWhitelisted\", receiver, whitelisted)\n}", "func (r *RemoteList) unlockedSetV4(ownerVpnIp iputil.VpnIp, vpnIp iputil.VpnIp, to []*Ip4AndPort, check checkFuncV4) {\n\tr.shouldRebuild = true\n\tc := r.unlockedGetOrMakeV4(ownerVpnIp)\n\n\t// Reset the slice\n\tc.reported = c.reported[:0]\n\n\t// We can't take their array but we can take their pointers\n\tfor _, v := range to[:minInt(len(to), MaxRemotes)] {\n\t\tif check(vpnIp, v) {\n\t\t\tc.reported = append(c.reported, v)\n\t\t}\n\t}\n}", "func (cfg *Config) SetRemoteHost(remoteHost string) {\n\tcfg.RemoteHost = remoteHost\n}", "func (_Mcapscontroller *McapscontrollerTransactorSession) SetMaxPoolTokens(poolAddress common.Address, maxPoolTokens *big.Int) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.SetMaxPoolTokens(&_Mcapscontroller.TransactOpts, poolAddress, maxPoolTokens)\n}", "func (_Mcapscontroller *McapscontrollerSession) SetMaxPoolTokens(poolAddress common.Address, maxPoolTokens *big.Int) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.SetMaxPoolTokens(&_Mcapscontroller.TransactOpts, poolAddress, maxPoolTokens)\n}", "func (h *Homebrew) SendQueue(q []*dmr.Packet, toPeer *Peer) error {\n\tfor _, packet := range q {\n\t\tdata := buildData(packet, h.ID)\n\t\tif err := h.WriteToPeer(data, toPeer); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func QueueBind(config ConfigQueue, ch *amqp.Channel) error {\n\tlog.Println(\"config: %+v\", config.Bind)\n\n\tif err := ch.QueueBind(\n\t\tconfig.Name,\n\t\tconfig.Bind.RoutingKey,\n\t\tconfig.Bind.ExchangeName,\n\t\tconfig.NoWait,\n\t\tnil,\n\t); err != nil {\n\t\treturn errors.New(\"[QueueBind]: unable to queue bind\" + err.Error())\n\t}\n\n\treturn nil\n}", "func (m *Group) SetThreads(value []ConversationThreadable)() {\n m.threads = value\n}", "func (as AccountStorage) SetPendingCoinDayQueue(ctx sdk.Context, me types.AccountKey, pendingCoinDayQueue *PendingCoinDayQueue) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\tpendingCoinDayQueueByte, err := as.cdc.MarshalJSON(*pendingCoinDayQueue)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalPendingCoinDayQueue(err)\n\t}\n\tstore.Set(getPendingCoinDayQueueKey(me), pendingCoinDayQueueByte)\n\treturn nil\n}", "func (ss *RoundRobinServerList) SetServers(servers ...string) error {\n\tnaddr := make([]net.Addr, len(servers))\n\tfor i, server := range servers {\n\t\tif strings.Contains(server, \"/\") {\n\t\t\taddr, err := net.ResolveUnixAddr(\"unix\", server)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnaddr[i] = newStaticAddr(addr)\n\t\t} else {\n\t\t\ttcpaddr, err := net.ResolveTCPAddr(\"tcp\", server)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnaddr[i] = newStaticAddr(tcpaddr)\n\t\t}\n\t}\n\n\tss.mu.Lock()\n\tdefer ss.mu.Unlock()\n\tss.addrs = naddr\n\treturn nil\n}", "func (c *Consumer) SetQueueDeclare(declare *QueueDeclare) *Consumer {\n\tif declare != nil {\n\t\tc.mutex.Lock()\n\t\tc.declare = declare\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}", "func (m *CloudCommunications) SetCalls(value []Callable)() {\n err := m.GetBackingStore().Set(\"calls\", value)\n if err != nil {\n panic(err)\n }\n}", "func (r *RPC) SetQueueClientNoListen(c queue.Client) {\r\n\tgapi := NewGRpcServer(c, r.api)\r\n\tjapi := NewJSONRPCServer(c, r.api)\r\n\tr.gapi = gapi\r\n\tr.japi = japi\r\n\tr.c = c\r\n}", "func (c *Client) QueueBind(\n\texchange, queue, key string,\n\topts *QueueBindOpts,\n\tconnOpts *ConnectOpts) error {\n\n\tdefaultOpts := DefaultQueueBindOpts()\n\n\tif opts != nil {\n\t\tdefaultOpts = opts\n\t}\n\n\tdefaultConnOpts := DefaultConnectOpts()\n\tif connOpts != nil {\n\t\tdefaultConnOpts = connOpts\n\t}\n\n\tconn, err := c.connect(defaultConnOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ch.Close()\n\n\terr = ch.QueueBind(\n\t\tqueue,\n\t\tkey,\n\t\texchange,\n\t\tdefaultOpts.NoWait,\n\t\tdefaultOpts.Args,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *RPC) SetAPI(api client.QueueProtocolAPI) {\r\n\tr.api = api\r\n}", "func (m *Group) SetRejectedSenders(value []DirectoryObjectable)() {\n m.rejectedSenders = value\n}", "func (s *SQSServer) ListenAndServeQueues(queues ...QueueConf) error {\n\tif len(queues) == 0 {\n\t\treturn fmt.Errorf(\"Must specify at least one SQS queue to poll\")\n\t}\n\tpollctx, pollcancel := context.WithCancel(context.Background())\n\ttaskctx, taskcancel := context.WithCancel(context.Background())\n\ts.stopPolling = pollcancel\n\ts.stopTasks = taskcancel\n\tfor i := range queues {\n\t\tif queues[i].Name == \"\" {\n\t\t\treturn fmt.Errorf(\"Queue configuration must have a Name\")\n\t\t}\n\t\tif queues[i].Region == \"\" {\n\t\t\tqueues[i].Region = s.defaultRegion\n\t\t}\n\t\tif queues[i].ReadBatch == 0 {\n\t\t\tqueues[i].ReadBatch = defaultReadBatchSize\n\t\t}\n\t\tif queues[i].Metrics == nil {\n\t\t\tqueues[i].Metrics = func(MetricType, float64, int) {}\n\t\t}\n\t}\n\treturn s.pollQueues(pollctx, taskctx, queues)\n}", "func (m *ExternalConnection) SetGroups(value []ExternalGroupable)() {\n m.groups = value\n}", "func (a *Agent) SetServers(urls [][]URL) {\n\ta.Servers = urls\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func sendScheduleSetCommand(ctx context.Context, receiverObjectID string, body contentScheduleType) (rep string, err error) {\n\ti := 0\n\tmapSubElementSubObject, sliceDs := createMapElementSubObject(receiverObjectID, body.OwnerId)\n\tfor subElement, subObject := range mapSubElementSubObject {\n\t\t// set: schedule of subObject to subElement\n\t\tsubBody := body\n\t\tsubBody.OwnerId = subObject\n\n\t\trep, err = sendManagerPutCommandByLabel(ctx, sliceDs[i], subElement, PreficManager, MangerSchedule, ManagerPutMethod, subBody.String())\n\t\tif err != nil {\n\t\t\tLoggingClient.Error(fmt.Sprintf(\"Error: sendSetCommandTo(%s, %s)\", subElement, subBody.String()))\n\t\t}\n\t\ti++\n\t}\n\treturn\n}", "func (mq *MessageQueue) SetQueue(queueName string) error {\n\tif mq.Channel == nil {\n\t\tnewCH, err := mq.NewChannel()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmq.Channel = newCH\n\t}\n\tmq.Channel.Qos(mq.Prefetch, 0, false)\n\tif _, err := mq.Channel.QueueDeclare(\n\t\tqueueName, // name\n\t\ttrue, // durable\n\t\tfalse, // delete when unused\n\t\tfalse, // exclusive\n\t\tfalse, // no-wait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *RPC) SetQueueClientNoListen(c queue.Client) {\n\tgapi := NewGRpcServer(c, r.api)\n\tjapi := NewJSONRPCServer(c, r.api)\n\tr.gapi = gapi\n\tr.japi = japi\n\tr.c = c\n}", "func (s *server) setAllowedHosts(allowedHosts []string) {\n\ts.hosts.Lock()\n\tdefer s.hosts.Unlock()\n\ts.hosts.table = make(map[string]bool, len(allowedHosts))\n\ts.hosts.wildcards = nil\n\tfor _, h := range allowedHosts {\n\t\tif strings.Index(h, \"*\") != -1 {\n\t\t\ts.hosts.wildcards = append(s.hosts.wildcards, strings.ToLower(h))\n\t\t} else {\n\t\t\ts.hosts.table[strings.ToLower(h)] = true\n\t\t}\n\t}\n}", "func (j *qProxyListQueuesServer) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (queue *Queue) SetBufferSize(bufferSize int) error {\n\tif bufferSize < 0 {\n\t\treturn fmt.Errorf(\n\t\t\t\"buffer size is less than 0: %d\", bufferSize)\n\t}\n\n\tif bufferSize < len(queue.data) {\n\t\treturn fmt.Errorf(\n\t\t\t\"buffer size is less than the length of the queue: %d\",\n\t\t\tbufferSize)\n\t}\n\n\tdata := make([]interface{}, len(queue.data), bufferSize)\n\n\tcopy(data, queue.data)\n\tqueue.data = data\n\n\treturn nil\n}", "func (p *MockPeer) SetRoles(roles []string) {\r\n\tp.MockRoles = roles\r\n}", "func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}", "func QueueBind(ch *amqp.Channel, qName, rKey, exchange string, noWait bool) error {\n\terr := ch.QueueBind(\n\t\tqName, // queue name\n\t\trKey, // routing key\n\t\texchange, // exchange\n\t\tnoWait,\n\t\tnil,\n\t)\n\treturn err\n}", "func (m *AudioRoutingGroup) SetSources(value []string)() {\n err := m.GetBackingStore().Set(\"sources\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *AppVulnerabilityTask) SetManagedDevices(value []AppVulnerabilityManagedDeviceable)() {\n err := m.GetBackingStore().Set(\"managedDevices\", value)\n if err != nil {\n panic(err)\n }\n}", "func (p *localWorkerPool) SetActual(ctx context.Context, lw api.LocalWorker, remoteAddr string) error {\n\tlwPoolMetrics.SetActualTotalCounters.WithLabelValues(lw.GetId()).Inc()\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tid := lw.GetId()\n\tentry, found := p.workers[id]\n\tif !found {\n\t\tentry = &localWorkerEntry{}\n\t\tentry.LocalWorker.Id = id\n\t\tp.workers[id] = entry\n\t}\n\tentry.remoteAddr = remoteAddr\n\tentry.LocalWorker.Actual = lw.GetActual().Clone()\n\tentry.lastUpdatedActualAt = time.Now()\n\tp.actuals.Pub(entry.LocalWorker)\n\treturn nil\n}", "func (mn *MockNetwork) SetMessageHandler(network.MessageHandler) {\n\n}", "func (m *User) SetMailboxSettings(value MailboxSettingsable)() {\n m.mailboxSettings = value\n}", "func (s *ListQueuesOutput) SetQueueUrls(v []*string) *ListQueuesOutput {\n\ts.QueueUrls = v\n\treturn s\n}", "func (m *VirtualEndpoint) SetCloudPCs(value []CloudPCable)() {\n err := m.GetBackingStore().Set(\"cloudPCs\", value)\n if err != nil {\n panic(err)\n }\n}", "func SetAllowedHosts(allowed []string) {\n\tDefaultDialer.SetAllowedHosts(allowed)\n}", "func SetRabbitMQ(bk *RabbitMQBroker) OptionFunc {\n\treturn func(bi *brokerInstance) {\n\t\tbi.rabbitmq = bk\n\t}\n}", "func (_Lmc *LmcSession) SetReceiverWhitelisted(receiver common.Address, whitelisted bool) (*types.Transaction, error) {\n\treturn _Lmc.Contract.SetReceiverWhitelisted(&_Lmc.TransactOpts, receiver, whitelisted)\n}", "func (s *Service) ReleaseRemote(r *RemoteConnection) {\r\n\ts.Mutex.Lock()\r\n\tn := 0\r\n\tfor _, x := range s.Remotes {\r\n\t\tif x != r {\r\n\t\t\ts.Remotes[n] = x\r\n\t\t\tn++\r\n\t\t}\r\n\t}\r\n\ts.Remotes = s.Remotes[:n]\r\n\ts.Mutex.Unlock()\r\n}", "func (r *Redis) SetVoiceServer(pk types.VoiceServerUpdate) error {\n\tb, err := json.Marshal(&pk)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.c.Set(keys.PrefixVoiceServer.Fmt(pk.GuildID), b, 0).Err()\n}", "func (m *ServiceAnnouncement) SetMessages(value []ServiceUpdateMessageable)() {\n m.messages = value\n}", "func Set(items []utils.Pair, db RedisDBClientInterface, group environment.EnvironmentGroup) error {\n\tfor _, kv := range items {\n\t\tfst := extract(kv.Fst.(string), group)\n\t\tsnd := extract(kv.Snd.(string), group)\n\t\t_, err := db.Set(fst, snd,0).Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r *RPC) SetAPI(api client.QueueProtocolAPI) {\n\tr.api = api\n}", "func (sc *ShamClient) setLocalRegistry(endpoints []string) {\n\tsc.lrMutex.Lock()\n\tdefer sc.lrMutex.Unlock()\n\n\tsc.localRegistry = endpoints\n}", "func SetSocksPort(v string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tport, err := strconv.Atoi(v)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Invalid port; non-number.\")\n\t\t}\n\t\tif port < 65536 && port > -1 {\n\t\t\tc.port = v\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Invalid port.\")\n\t}\n}", "func (api *HWApi) SetRemoteS3Conf(remoteName string, conf *aws.Config) {\n\tif api.remoteS3 == nil {\n\t\tapi.remoteS3 = map[string]*aws.Config{}\n\t}\n\tapi.remoteS3[remoteName] = conf\n}", "func (s *InMemoryStore) GetOrSetQueueURL(ctx context.Context, queueName string, fn func() (string, error)) (string, error) {\n\treturn s.getOrSet(\"queue:\"+queueName, fn)\n}" ]
[ "0.6111192", "0.59353924", "0.5917593", "0.59074235", "0.584018", "0.5630969", "0.56206805", "0.5559726", "0.547905", "0.53623676", "0.53450924", "0.5335673", "0.5274789", "0.52499294", "0.52425617", "0.51923627", "0.5152956", "0.5142641", "0.50583076", "0.5016923", "0.4999753", "0.49943715", "0.4990845", "0.49540374", "0.4881145", "0.48736754", "0.48334", "0.4813134", "0.48027474", "0.4794664", "0.47792572", "0.47771478", "0.4773323", "0.4753753", "0.47400615", "0.47303507", "0.47265765", "0.4726158", "0.47027966", "0.4698985", "0.46851775", "0.46684587", "0.46677646", "0.46676436", "0.46610665", "0.4654984", "0.4652009", "0.46446836", "0.46446618", "0.46419862", "0.46247813", "0.46132106", "0.45984542", "0.45914304", "0.45904788", "0.45867288", "0.45862207", "0.457997", "0.4577958", "0.4575017", "0.4570709", "0.45636785", "0.45633018", "0.45547938", "0.45508882", "0.45461553", "0.45444688", "0.45419553", "0.4540883", "0.4540239", "0.45229688", "0.45191798", "0.45183614", "0.45123735", "0.44815382", "0.4477171", "0.44682732", "0.4446785", "0.44415224", "0.44391018", "0.44389075", "0.44334117", "0.44317225", "0.4428487", "0.44241983", "0.4421286", "0.44198185", "0.44176102", "0.44159624", "0.4411642", "0.44085184", "0.44064704", "0.44057462", "0.43915167", "0.43896475", "0.43865418", "0.43856913", "0.43848237", "0.43842608", "0.43712318" ]
0.68890077
0
GetClusterQueues returns the ClusterQueues field value
func (o *QueueManager) GetClusterQueues() []ClusterQueue { if o == nil { var ret []ClusterQueue return ret } return o.ClusterQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (o *QueueManager) SetClusterQueues(v []ClusterQueue) {\n\to.ClusterQueues = v\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func listQueues(ENV string) []string {\n \t// Using the SDK's default configuration, loading additional config\n\t// and credentials values from the environment variables, shared\n\t// credentials, and shared configuration files\n\n\tsess, err := session.NewSession(&aws.Config{\n\t Region: aws.String(\"us-east-1\")},\n\t)\n\n // Create a SQS service client.\n svc := sqs.New(sess)\n\n\t//have to create a session object first\n\toutput, err := svc.ListQueues(&sqs.ListQueuesInput{\n\t QueueNamePrefix: aws.String(ENV),\n })\n\tif err != nil { panic(err) }\n\n\tqueues := output.QueueUrls\n\tfinal_queues := []string{}\n\n\tfor _, i := range queues {\n\t fmt.Println(string(*i))\n\t final_queues = append(final_queues, *i)\n }\n\treturn final_queues\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (client *Client) GetClusterQueueInfoWithCallback(request *GetClusterQueueInfoRequest, callback func(response *GetClusterQueueInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetClusterQueueInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetClusterQueueInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (ClearTrans) GetQueue() string {\n\treturn \"cy_rubik_clearTrans\"\n}", "func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}", "func (svc *AdminBuildService) GetQueue(opt *GetQueueOptions) (*[]library.BuildQueue, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := \"/api/v1/admin/builds/queue\"\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// BuildQueue type we want to return\n\tv := new([]library.BuildQueue)\n\n\tresp, err := svc.client.Call(\"GET\", u, nil, v)\n\n\treturn v, resp, err\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (r *RPC) GetQueueClient() queue.Client {\r\n\treturn r.c\r\n}", "func (r *RPC) GetQueueClient() queue.Client {\n\treturn r.c\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (this *Queue) GetQueue() (val Mensaje, err error) {\n\t// Primero determina si la cola está vacía\n\tif this.rear == this.front {\n\t\treturn Mensaje{0, \"0\", \"0\"}, errors.New(\"Cola de Mensajes Vacia\")\n\t}\n\tthis.front++\n\tval = this.array[this.front]\n\treturn val, err\n}", "func (p *Process) CmdGetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tif responce.Value, err = p.tcdb.GetQueue(request.Key); err != nil {\n\t\treturn\n\t} else if !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (q *Queue) GetQueue() []types.Event {\n\treturn q.Queue\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (m *RedisProxy) GetCluster() string {\n\tif m != nil {\n\t\treturn m.Cluster\n\t}\n\treturn \"\"\n}", "func (p *Pool) GetQueue() chan ThreeDPrinter {\n\treturn p.printers\n}", "func maximumClique(g graph.Undirected) (k int, maxClique []graph.Node, cliques [][]graph.Node) {\n\tcliques = topo.BronKerbosch(g)\n\tfor _, c := range topo.BronKerbosch(g) {\n\t\tif len(c) > len(maxClique) {\n\t\t\tmaxClique = c\n\t\t}\n\t}\n\treturn len(maxClique), maxClique, cliques\n}", "func (cfg *Config) MQServers() string {\n\treturn os.Getenv(\"MQ_SERVERS\")\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}", "func (rqs *R11nQueueSet) Queues() map[DeploymentID]*R11nQueue {\n\trqs.Lock()\n\tdefer rqs.Unlock()\n\ts := make(map[DeploymentID]*R11nQueue, len(rqs.set))\n\tfor k, v := range rqs.set {\n\t\ts[k] = v\n\t}\n\treturn s\n}", "func (c *ClientIMPL) GetCluster(ctx context.Context) (resp Cluster, err error) {\n\tvar systemList []Cluster\n\tcluster := Cluster{}\n\tqp := c.APIClient().QueryParamsWithFields(&cluster)\n\n\tmajorMinorVersion, err := c.GetSoftwareMajorMinorVersion(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't find the array version %s\", err.Error())\n\t} else {\n\t\tif majorMinorVersion >= 3.0 {\n\t\t\tqp.Select(\"nvm_subsystem_nqn\")\n\t\t}\n\t}\n\t_, err = c.APIClient().Query(\n\t\tctx,\n\t\tRequestConfig{\n\t\t\tMethod: \"GET\",\n\t\t\tEndpoint: clusterURL,\n\t\t\tQueryParams: qp,\n\t\t},\n\t\t&systemList)\n\terr = WrapErr(err)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn systemList[0], err\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (p Peel) AllQueuesConsumerGroups() (map[string][]string, error) {\n\tkk, err := p.c.KeyScan(core.Key{Base: \"*\", Subs: []string{\"*\"}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := map[string]map[string]struct{}{}\n\tfor _, k := range kk {\n\t\tif k, err = queueKeyUnmarshal(k); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif m[k.Base] == nil {\n\t\t\tm[k.Base] = map[string]struct{}{}\n\t\t}\n\t\tif k.Subs[0] == \"available\" {\n\t\t\tcontinue\n\t\t}\n\t\tm[k.Base][k.Subs[0]] = struct{}{}\n\t}\n\n\toutm := map[string][]string{}\n\tfor q, cgm := range m {\n\t\toutm[q] = make([]string, 0, len(cgm))\n\t\tfor cg := range cgm {\n\t\t\toutm[q] = append(outm[q], cg)\n\t\t}\n\t}\n\n\treturn outm, nil\n}", "func (taskBolt *TaskBolt) ReadQueue(n int) []*ga4gh_task_exec.Job {\n\tjobs := make([]*ga4gh_task_exec.Job, 0)\n\ttaskBolt.db.View(func(tx *bolt.Tx) error {\n\n\t\t// Iterate over the JobsQueued bucket, reading the first `n` jobs\n\t\tc := tx.Bucket(JobsQueued).Cursor()\n\t\tfor k, _ := c.First(); k != nil && len(jobs) < n; k, _ = c.Next() {\n\t\t\tid := string(k)\n\t\t\tjob := getJob(tx, id)\n\t\t\tjobs = append(jobs, job)\n\t\t}\n\t\treturn nil\n\t})\n\treturn jobs\n}", "func (obj *miner) Queue() buckets.Buckets {\n\treturn obj.queue\n}", "func (c *jsiiProxy_CfnQueue) ToString() *string {\n\tvar returns *string\n\n\t_jsii_.Invoke(\n\t\tc,\n\t\t\"toString\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (c *restClient) GetQueue(ctx context.Context, req *cloudtaskspb.GetQueueRequest, opts ...gax.CallOption) (*cloudtaskspb.Queue, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetReadMask() != nil {\n\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetQueue[0:len((*c.CallOptions).GetQueue):len((*c.CallOptions).GetQueue)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &cloudtaskspb.Queue{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (m *MatchInfo) GetQueue(client *static.Client) (static.Queue, error) {\n\treturn client.GetQueue(m.QueueID)\n}", "func (psc *PartitionSchedulingContext) GetQueue(queueName string) *SchedulingQueue {\n psc.lock.RLock()\n defer psc.lock.RUnlock()\n\n return psc.queues[queueName]\n}", "func (d *Device) GetQueue(qf *QueueFamily) *Queue {\n\n\tvar vkq vk.Queue\n\n\tvk.GetDeviceQueue(d.VKDevice, uint32(qf.Index), 0, &vkq)\n\n\tvar queue Queue\n\tqueue.QueueFamily = qf\n\tqueue.Device = d\n\tqueue.VKQueue = vkq\n\n\treturn &queue\n}", "func (c *Cluster) GetKeyspaces(ctx context.Context) ([]*vtadminpb.Keyspace, error) {\n\tspan, ctx := trace.NewSpan(ctx, \"Cluster.GetKeyspaces\")\n\tdefer span.Finish()\n\n\tAnnotateSpan(c, span)\n\n\tif err := c.Vtctld.Dial(ctx); err != nil {\n\t\treturn nil, fmt.Errorf(\"Vtctld.Dial(cluster=%s) failed: %w\", c.ID, err)\n\t}\n\n\tresp, err := c.Vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tm sync.Mutex\n\t\twg sync.WaitGroup\n\t\trec concurrency.AllErrorRecorder\n\t\tkeyspaces = make([]*vtadminpb.Keyspace, len(resp.Keyspaces))\n\t)\n\n\tfor i, ks := range resp.Keyspaces {\n\t\twg.Add(1)\n\t\tgo func(i int, ks *vtctldatapb.Keyspace) {\n\t\t\tdefer wg.Done()\n\n\t\t\tshards, err := c.FindAllShardsInKeyspace(ctx, ks.Name, FindAllShardsInKeyspaceOptions{SkipDial: true})\n\t\t\tif err != nil {\n\t\t\t\trec.RecordError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkeyspace := &vtadminpb.Keyspace{\n\t\t\t\tCluster: c.ToProto(),\n\t\t\t\tKeyspace: ks,\n\t\t\t\tShards: shards,\n\t\t\t}\n\n\t\t\tm.Lock()\n\t\t\tdefer m.Unlock()\n\t\t\tkeyspaces[i] = keyspace\n\t\t}(i, ks)\n\t}\n\n\twg.Wait()\n\tif rec.HasErrors() {\n\t\treturn nil, rec.Error()\n\t}\n\n\treturn keyspaces, nil\n}", "func (client *Client) GetClusterQueueInfoWithChan(request *GetClusterQueueInfoRequest) (<-chan *GetClusterQueueInfoResponse, <-chan error) {\n\tresponseChan := make(chan *GetClusterQueueInfoResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetClusterQueueInfo(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func (c *NetClient) OutQueue() chan<- []byte {\n\treturn c.outQueue\n}", "func (o TopicRuleSqsOutput) QueueUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleSqs) string { return v.QueueUrl }).(pulumi.StringOutput)\n}", "func (cb *clientBase) GetCluster() string {\n\treturn cb.cluster\n}", "func (mq MetricsQueue) Len() int { return len(mq) }", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func GetMessageQueue() *MessageQueue {\n\treturn messageQueue\n}", "func clusterList() []string {\n\tif c := envy.String(\"DQLITED_CLUSTER\"); c != \"\" {\n\t\treturn strings.Split(c, \",\")\n\t}\n\treturn defaultCluster\n}", "func (b *backend) QueueStats(ctx context.Context, qq *entroq.QueuesQuery) (map[string]*entroq.QueueStat, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).QueueStats(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get queue stats over gRPC: %w\", err)\n\t}\n\tqs := make(map[string]*entroq.QueueStat)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = &entroq.QueueStat{\n\t\t\tName: q.Name,\n\t\t\tSize: int(q.NumTasks),\n\t\t\tClaimed: int(q.NumClaimed),\n\t\t\tAvailable: int(q.NumAvailable),\n\t\t\tMaxClaims: int(q.MaxClaims),\n\t\t}\n\t}\n\treturn qs, nil\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (o NetworkInterfaceOutput) QueueCount() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v NetworkInterface) *int { return v.QueueCount }).(pulumi.IntPtrOutput)\n}", "func (client *Client) GetQueueURL(name string) (string, error) {\n\tvar parsedResponse GetQueueURLResult\n\turl := NewGetQueueURLRequest(client.EndPointURL, name).URL()\n\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"\", errors.New(string(body))\n\t}\n\n\terr = xml.Unmarshal(body, &parsedResponse)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn parsedResponse.QueueURL, nil\n}", "func (c *MQCache) Len() (totalLen int64, queuesLen []int64) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tfor i := 0; i < c.queues; i++ {\n\t\tc.queuesLen[i] = c.q[i].len()\n\t\ttotalLen += c.queuesLen[i]\n\t}\n\treturn totalLen, c.queuesLen\n}", "func (m *SetNodePoolSizeRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func GetQueue(id string) Queue {\n\tservice := broker.GetService(ServiceName).(*QueueService)\n\treturn service.getQueue(id)\n}", "func GetClusterConfig(req *restful.Request, resp *restful.Response) {\n\tconst (\n\t\thandler = \"GetClusterConfig\"\n\t)\n\tspan := v1http.SetHTTPSpanContextInfo(req, handler)\n\tdefer span.Finish()\n\n\tr, err := generateData(req, getCls)\n\tif err != nil {\n\t\tutils.SetSpanLogTagError(span, err)\n\t\tblog.Errorf(\"%s | err: %v\", common.BcsErrStorageGetResourceFailStr, err)\n\t\tlib.ReturnRest(&lib.RestResponse{\n\t\t\tResp: resp,\n\t\t\tErrCode: common.BcsErrStorageGetResourceFail,\n\t\t\tMessage: common.BcsErrStorageGetResourceFailStr})\n\t\treturn\n\t}\n\tlib.ReturnRest(&lib.RestResponse{Resp: resp, Data: r})\n}", "func (c *QueueClient) Get(ctx context.Context, id int) (*Queue, error) {\n\treturn c.Query().Where(queue.ID(id)).Only(ctx)\n}", "func (clusterInfo ClusterInfo) CreateQueues(queues []rh.QueueInfo) error {\n\trmqc, err := rh.NewClient(clusterInfo.AdminURL(), clusterInfo.UserName, clusterInfo.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, queue := range queues {\n\t\tlog.Printf(\"Creating queue %v\", queue.Name)\n\t\t_, err = rmqc.DeclareQueue(clusterInfo.Vhost, queue.Name, rh.QueueSettings{Durable: queue.Durable, AutoDelete: queue.AutoDelete, Arguments: queue.Arguments})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (o *V0037Node) GetThreads() int32 {\n\tif o == nil || o.Threads == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Threads\n}", "func (m SQSMonitor) receiveQueueMessages(qURL string) ([]*sqs.Message, error) {\n\tresult, err := m.SQS.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.MessageSystemAttributeNameSentTimestamp),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tQueueUrl: &qURL,\n\t\tMaxNumberOfMessages: aws.Int64(10),\n\t\tVisibilityTimeout: aws.Int64(20), // 20 seconds\n\t\tWaitTimeSeconds: aws.Int64(20), // Max long polling\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result.Messages, nil\n}", "func GetQueue(config *Configuration) (*Queue, error) {\n\tvar wg sync.WaitGroup\n\tvar wk int\n\n\tq := Queue{&wg, false, nil, nil, nil, nil, &wk}\n\n\tq.Config = config\n\n\tconn, err := amqp.Dial(config.Host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.connection = conn\n\tq.Connected = true\n\tch, err := q.connection.Channel()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.channel = ch\n\tq.channel.Qos(config.PrefetchCount, config.PrefetchByteSize, true)\n\n\tiq, err := q.channel.QueueDeclare(config.RoutingKey, config.Durable, config.DeleteIfUnused, config.Exclusive, config.NoWait, config.arguments)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.Exchange != \"\" {\n\t\terr = q.bind()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tq.internalQueue = &iq\n\n\treturn &q, nil\n}", "func (o LookupQueueResultOutput) AppEngineHttpQueue() AppEngineHttpQueueResponseOutput {\n\treturn o.ApplyT(func(v LookupQueueResult) AppEngineHttpQueueResponse { return v.AppEngineHttpQueue }).(AppEngineHttpQueueResponseOutput)\n}", "func (s consumerNamespaceLister) Get(name string) (*arbv1.Consumer, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(arbv1.Resource(\"queue\"), name)\n\t}\n\treturn obj.(*arbv1.Consumer), nil\n}", "func (a *adapter) queueLookup(queueName string) (*sqs.GetQueueUrlOutput, error) {\n\treturn a.sqsClient.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: &queueName,\n\t})\n}", "func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}", "func (o TopicRuleSqsPtrOutput) QueueUrl() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TopicRuleSqs) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.QueueUrl\n\t}).(pulumi.StringPtrOutput)\n}", "func (s databaseClusterNamespaceLister) Get(name string) (*v1alpha1.DatabaseCluster, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"databasecluster\"), name)\n\t}\n\treturn obj.(*v1alpha1.DatabaseCluster), nil\n}", "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (m *ListNodePoolsRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *RedisProxy_PrefixRoutes) GetCatchAllCluster() string {\n\tif m != nil {\n\t\treturn m.CatchAllCluster\n\t}\n\treturn \"\"\n}", "func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.Set[string], error) {\n\t// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.\n\tnodes, err := GetReadySchedulableNodes(ctx, c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting nodes while attempting to list cluster zones: %w\", err)\n\t}\n\n\t// collect values of zone label from all nodes\n\tzones := sets.New[string]()\n\tfor _, node := range nodes.Items {\n\t\tif zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\n\t\tif zone, found := node.Labels[v1.LabelTopologyZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\t}\n\treturn zones, nil\n}", "func demo_queue() {\n fmt.Print(\"\\n---QUEUE Logic---\\n\\n\")\n q := queue.Queue{}\n\n for i := 0; i <= 5; i++ {\n q.Enqueue(i)\n }\n fmt.Print(\"---Queue Before Dequeue---\\n\")\n q.PrintAll()\n dequeued := q.Dequeue()\n fmt.Printf(\"Dequeued Value: %v\\n\", dequeued)\n fmt.Print(\"---Queue After Dequeue---\\n\")\n q.PrintAll()\n}", "func GetFromQueue(queue string) ([]byte, error) {\n\treturn cache.Get(queue)\n}", "func (c *Consumer) Messages() <-chan Message {\n\treturn c.queue\n}", "func (o *V0037JobProperties) GetClusterConstraints() string {\n\tif o == nil || o.ClusterConstraints == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ClusterConstraints\n}", "func (s *Store) GetQueueNames() ([]string, error) {\n\tvar names []string\n\treturn names, s.db.View(func(tx *bolt.Tx) error {\n\t\treturn s.queues(tx).ForEach(func(key, value []byte) error {\n\t\t\tnames = append(names, string(key))\n\t\t\treturn nil\n\t\t})\n\t})\n}", "func (mb *client) ReadFIFOQueue(address uint16) (results []byte, err error) {\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeReadFIFOQueue,\n\t\tData: dataBlock(address),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(response.Data) < 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' is less than expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\tcount := int(binary.BigEndian.Uint16(response.Data))\n\tif count != (len(response.Data) - 1) {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match count '%v'\", len(response.Data)-1, count)\n\t\treturn\n\t}\n\tcount = int(binary.BigEndian.Uint16(response.Data[2:]))\n\tif count > 31 {\n\t\terr = fmt.Errorf(\"modbus: fifo count '%v' is greater than expected '%v'\", count, 31)\n\t\treturn\n\t}\n\tresults = response.Data[4:]\n\treturn\n}", "func (q *SimpleQueue) Get(ctx context.Context, user cn.CapUser, idStart int64, cntLimit int) (messages []*MessageWithMeta, err *mft.Error) {\n\tmessages, _, err = q.GetSegment(ctx, user, idStart, cntLimit, nil)\n\n\treturn messages, err\n}", "func (service *ContrailService) GetQosQueue(ctx context.Context, request *models.GetQosQueueRequest) (response *models.GetQosQueueResponse, err error) {\n\tspec := &models.ListSpec{\n\t\tLimit: 1,\n\t\tFilters: []*models.Filter{\n\t\t\t&models.Filter{\n\t\t\t\tKey: \"uuid\",\n\t\t\t\tValues: []string{request.ID},\n\t\t\t},\n\t\t},\n\t}\n\tlistRequest := &models.ListQosQueueRequest{\n\t\tSpec: spec,\n\t}\n\tvar result *models.ListQosQueueResponse\n\tif err := common.DoInTransaction(\n\t\tservice.DB,\n\t\tfunc(tx *sql.Tx) error {\n\t\t\tresult, err = db.ListQosQueue(ctx, tx, listRequest)\n\t\t\treturn err\n\t\t}); err != nil {\n\t\treturn nil, common.ErrorInternal\n\t}\n\tif len(result.QosQueues) == 0 {\n\t\treturn nil, common.ErrorNotFound\n\t}\n\tresponse = &models.GetQosQueueResponse{\n\t\tQosQueue: result.QosQueues[0],\n\t}\n\treturn response, nil\n}", "func (o *ClusterRequest) GetMaxRunningNodes() int32 {\n\tif o == nil || o.MaxRunningNodes == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.MaxRunningNodes\n}", "func deleteQueues(ctx *TestContext) {\n\tfor _, q := range ctx.Queues {\n\t\tDeleteQueue(ctx, q)\n\t}\n}", "func (service *ContrailService) ListQosQueue(\n\tctx context.Context,\n\trequest *models.ListQosQueueRequest) (response *models.ListQosQueueResponse, err error) {\n\tif err := common.DoInTransaction(\n\t\tservice.DB,\n\t\tfunc(tx *sql.Tx) error {\n\t\t\tresponse, err = db.ListQosQueue(ctx, tx, request)\n\t\t\treturn err\n\t\t}); err != nil {\n\t\treturn nil, common.ErrorInternal\n\t}\n\treturn response, nil\n}", "func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {\n\tnodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error getting nodes while attempting to list cluster zones: %w\", err)\n\t}\n\n\t// collect values of zone label from all nodes\n\tzones := sets.NewString()\n\tfor _, node := range nodes.Items {\n\t\tif zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\n\t\tif zone, found := node.Labels[v1.LabelTopologyZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\t}\n\treturn zones, nil\n}", "func (s *ItemQueue) GetMessages() []int {\n\tvar messages []int\n\ts.lock.Lock()\n\n\tfor i := 0; i < len(s.items); i++ {\n\t\t\tmessages[i] = s.items[i].ID\n\t}\n\n\ts.lock.Unlock()\n\treturn messages\n}", "func (d Dispatcher) JobQueueCount() int {\n\treturn d.GetJobPQ().Len()\n}", "func (m *ServerContext) OperationQueue() cutter.OperationQueue {\n\treturn m.OpQueue\n}", "func (o *PendingDeleteCluster) GetCluster() (value *Cluster, ok bool) {\n\tok = o != nil && o.bitmap_&16 != 0\n\tif ok {\n\t\tvalue = o.cluster\n\t}\n\treturn\n}", "func (c *checkQueueAttributeImpl) CheckQueueAttributeQuery(options CheckQueueAttributeOptions) icinga.Result {\n\tname := \"Queue.Attributes\"\n\n\tstatusCheck, err := icinga.NewStatusCheck(options.ThresholdWarning, options.ThresholdCritical)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't check status: %v\", err))\n\t}\n\n\tif len(options.OkIfQueueIsMissing) > 0 {\n\t\tproperty := \"broker=\\\"0.0.0.0\\\"\"\n\t\tattribute := \"QueueNames\"\n\t\tqueueSearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{property}, attribute)\n\t\tif err != nil {\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query QueueNames in Jolokia: %v\", err))\n\t\t}\n\t\tif queueSearchResult == nil {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"No queues found: [%v]\", queueSearchResult)\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't find QueueNames for [%v]\", property))\n\t\t}\n\n\t\tif !queueExists(queueSearchResult.([] interface{}), options.OkIfQueueIsMissing) {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"Queue [%v] not in queue list [%v]\", options.OkIfQueueIsMissing, queueSearchResult.([] interface{}))\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusOk, fmt.Sprintf(\"queue [%v] does not exist\", options.OkIfQueueIsMissing))\n\t\t}\n\t}\n\n\tsearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{options.Queue}, options.Attribute)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query Jolokia: %v\", err))\n\t}\n\n\tresult, err := utils.ToFloat(searchResult)\n\tif err != nil {\n\t\tif (options.Verbose > 0) {\n\t\t\tlog.Printf(\"An error occured with result [%v]\", searchResult)\n\t\t}\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"query result is invalid: %v\", err))\n\t}\n\n\tmessage := fmt.Sprintf(\"Search produced: %v\", searchResult)\n\tstatus := statusCheck.Check(result)\n\n\treturn icinga.NewResult(name, status, message)\n}", "func (o NetworkInterfaceResponseOutput) QueueCount() pulumi.IntOutput {\n\treturn o.ApplyT(func(v NetworkInterfaceResponse) int { return v.QueueCount }).(pulumi.IntOutput)\n}", "func printQueue(q *Queue) {\n\tfmt.Println(q.values)\n}", "func (player *musicPlayer) getQueueInfo() ([]string, error) {\n\tplayer.Lock()\n\tdefer player.Unlock()\n\tif len(player.state.queue) == 0 {\n\t\treturn nil, errors.New(cannot_get_queue_info_msg)\n\t}\n\t//make a copy to the queue\n\tcopy := make([]string, 0, len(player.state.queue))\n\tfor _, el := range player.state.queue {\n\t\tcopy = append(copy, el)\n\t}\n\treturn copy, nil\n}" ]
[ "0.6691833", "0.6425696", "0.6164038", "0.6133957", "0.6117693", "0.6031181", "0.60217136", "0.5825728", "0.57695156", "0.57689", "0.5742274", "0.56948394", "0.55601054", "0.55268496", "0.5474696", "0.5450112", "0.5429199", "0.539163", "0.5356741", "0.52030075", "0.51988304", "0.5182143", "0.51490265", "0.51087147", "0.5103361", "0.5099392", "0.50851905", "0.5066335", "0.50540495", "0.5025029", "0.49590418", "0.4906924", "0.49022672", "0.48896283", "0.48868766", "0.48661202", "0.48596022", "0.48557484", "0.48547757", "0.48427603", "0.48256513", "0.48014542", "0.479396", "0.4790269", "0.47799766", "0.47531062", "0.4700358", "0.46963513", "0.46939135", "0.46890926", "0.46743894", "0.46738762", "0.46722856", "0.466206", "0.46505934", "0.46260458", "0.46253824", "0.46127254", "0.46066052", "0.46048033", "0.45979723", "0.45952168", "0.45886198", "0.4588499", "0.4586456", "0.45849684", "0.45836443", "0.45542175", "0.45519477", "0.45509022", "0.45242378", "0.45202643", "0.4508231", "0.45065653", "0.44599864", "0.44561458", "0.4452559", "0.44497415", "0.4448812", "0.44433922", "0.44373363", "0.44353268", "0.44342008", "0.4428824", "0.44241643", "0.4419383", "0.44171697", "0.4397003", "0.4396048", "0.4394981", "0.43945265", "0.43902647", "0.43878347", "0.43853906", "0.43821993", "0.4378504", "0.43714702", "0.4365387", "0.43605918", "0.435876" ]
0.741813
0
GetClusterQueuesOk returns a tuple with the ClusterQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) { if o == nil { return nil, false } return &o.ClusterQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (o *QueueManager) GetClusterQueues() []ClusterQueue {\n\tif o == nil {\n\t\tvar ret []ClusterQueue\n\t\treturn ret\n\t}\n\n\treturn o.ClusterQueues\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (o *QueueManager) SetClusterQueues(v []ClusterQueue) {\n\to.ClusterQueues = v\n}", "func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (o *V0037Node) GetThreadsOk() (*int32, bool) {\n\tif o == nil || o.Threads == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Threads, true\n}", "func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (o *Replication) GetMaxQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.MaxQueueSizeBytes, true\n}", "func (ss *SqsService) IsQueueEmpty(ctx context.Context) (isEmpty bool) {\n\tisEmpty = false\n\tinput := &sqs.GetQueueAttributesInput{\n\t\tQueueUrl: &ss.queueURL,\n\t\tAttributeNames: []types.QueueAttributeName{\n\t\t\t\"ApproximateNumberOfMessages\",\n\t\t\t\"ApproximateNumberOfMessagesNotVisible\",\n\t\t},\n\t}\n\toutput, err := ss.client.GetQueueAttributes(ctx, input)\n\n\tif err != nil {\n\t\tlog.Printf(\"Faided to get queue attributes from Queue %s, please try again later - %s\", ss.queueName, err.Error())\n\t\treturn\n\t}\n\n\tvisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessages\"])\n\tnotVisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessagesNotVisible\"])\n\n\tlog.Printf(\"Queue %s has %d not visible message(s) and %d visable message(s)\\n\", ss.queueName, notVisible, visible)\n\n\tif visible+notVisible <= 1 {\n\t\tisEmpty = true\n\t}\n\treturn\n}", "func (o *V0037JobProperties) GetClusterConstraintsOk() (*string, bool) {\n\tif o == nil || o.ClusterConstraints == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterConstraints, true\n}", "func (mq MetricsQueue) Len() int { return len(mq) }", "func (c *checkQueueAttributeImpl) CheckQueueAttributeQuery(options CheckQueueAttributeOptions) icinga.Result {\n\tname := \"Queue.Attributes\"\n\n\tstatusCheck, err := icinga.NewStatusCheck(options.ThresholdWarning, options.ThresholdCritical)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't check status: %v\", err))\n\t}\n\n\tif len(options.OkIfQueueIsMissing) > 0 {\n\t\tproperty := \"broker=\\\"0.0.0.0\\\"\"\n\t\tattribute := \"QueueNames\"\n\t\tqueueSearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{property}, attribute)\n\t\tif err != nil {\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query QueueNames in Jolokia: %v\", err))\n\t\t}\n\t\tif queueSearchResult == nil {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"No queues found: [%v]\", queueSearchResult)\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't find QueueNames for [%v]\", property))\n\t\t}\n\n\t\tif !queueExists(queueSearchResult.([] interface{}), options.OkIfQueueIsMissing) {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"Queue [%v] not in queue list [%v]\", options.OkIfQueueIsMissing, queueSearchResult.([] interface{}))\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusOk, fmt.Sprintf(\"queue [%v] does not exist\", options.OkIfQueueIsMissing))\n\t\t}\n\t}\n\n\tsearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{options.Queue}, options.Attribute)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query Jolokia: %v\", err))\n\t}\n\n\tresult, err := utils.ToFloat(searchResult)\n\tif err != nil {\n\t\tif (options.Verbose > 0) {\n\t\t\tlog.Printf(\"An error occured with result [%v]\", searchResult)\n\t\t}\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"query result is invalid: %v\", err))\n\t}\n\n\tmessage := fmt.Sprintf(\"Search produced: %v\", searchResult)\n\tstatus := statusCheck.Check(result)\n\n\treturn icinga.NewResult(name, status, message)\n}", "func (t *OpenconfigQos_Qos_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (clusterInfo ClusterInfo) CreateQueues(queues []rh.QueueInfo) error {\n\trmqc, err := rh.NewClient(clusterInfo.AdminURL(), clusterInfo.UserName, clusterInfo.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, queue := range queues {\n\t\tlog.Printf(\"Creating queue %v\", queue.Name)\n\t\t_, err = rmqc.DeclareQueue(clusterInfo.Vhost, queue.Name, rh.QueueSettings{Durable: queue.Durable, AutoDelete: queue.AutoDelete, Arguments: queue.Arguments})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (client *Client) GetClusterQueueInfoWithCallback(request *GetClusterQueueInfoRequest, callback func(response *GetClusterQueueInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetClusterQueueInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetClusterQueueInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (_Rootchain *RootchainCaller) ExitsQueues(opts *bind.CallOpts, arg0 common.Address) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Rootchain.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (o *Replication) GetCurrentQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.CurrentQueueSizeBytes, true\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (o *UcsdBackupInfoAllOf) GetConnectorsOk() ([]UcsdConnectorPack, bool) {\n\tif o == nil || o.Connectors == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Connectors, true\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func CfnQueue_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_mediaconvert.CfnQueue\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (o *Environment) GetQuotasOk() (*EnvironmentQuotas, bool) {\n\tif o == nil || o.Quotas == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Quotas, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}", "func NewGetCallQueueitemsOK() *GetCallQueueitemsOK {\n\treturn &GetCallQueueitemsOK{}\n}", "func (_Rootchain *RootchainCallerSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (_Rootchain *RootchainSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func (c *MQCache) Len() (totalLen int64, queuesLen []int64) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tfor i := 0; i < c.queues; i++ {\n\t\tc.queuesLen[i] = c.q[i].len()\n\t\ttotalLen += c.queuesLen[i]\n\t}\n\treturn totalLen, c.queuesLen\n}", "func IsQueueExist(name string, ch *amqp.Channel) bool {\n\tvar exist bool\n\t_, err := ch.QueueInspect(name)\n\tif err == nil {\n\t\texist = true\n\t}\n\n\treturn exist\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}", "func (bq *InMemoryBuildQueue) ListPlatformQueues(ctx context.Context, request *emptypb.Empty) (*buildqueuestate.ListPlatformQueuesResponse, error) {\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\t// Obtain platform queue IDs in sorted order.\n\tplatformQueueList := append(platformQueueList(nil), bq.platformQueues...)\n\tsort.Sort(platformQueueList)\n\n\t// Extract status.\n\tplatformQueues := make([]*buildqueuestate.PlatformQueueState, 0, len(bq.platformQueues))\n\tfor _, pq := range platformQueueList {\n\t\tsizeClassQueues := make([]*buildqueuestate.SizeClassQueueState, 0, len(pq.sizeClassQueues))\n\t\tfor i, scq := range pq.sizeClassQueues {\n\t\t\texecutingWorkersCount := uint32(0)\n\t\t\tfor _, w := range scq.workers {\n\t\t\t\tif w.currentTask != nil {\n\t\t\t\t\texecutingWorkersCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tactiveInvocationsCount := uint32(0)\n\t\t\tfor _, i := range scq.invocations {\n\t\t\t\tif i.isActive() {\n\t\t\t\t\tactiveInvocationsCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsizeClassQueues = append(sizeClassQueues, &buildqueuestate.SizeClassQueueState{\n\t\t\t\tSizeClass: pq.sizeClasses[i],\n\t\t\t\tTimeout: bq.cleanupQueue.getTimestamp(scq.cleanupKey),\n\t\t\t\tInvocationsCount: uint32(len(scq.invocations)),\n\t\t\t\tQueuedInvocationsCount: uint32(scq.queuedInvocations.Len()),\n\t\t\t\tActiveInvocationsCount: uint32(activeInvocationsCount),\n\t\t\t\tWorkersCount: uint32(len(scq.workers)),\n\t\t\t\tExecutingWorkersCount: executingWorkersCount,\n\t\t\t\tDrainsCount: uint32(len(scq.drains)),\n\t\t\t})\n\t\t}\n\t\tplatformQueues = append(platformQueues, &buildqueuestate.PlatformQueueState{\n\t\t\tName: pq.platformKey.GetPlatformQueueName(),\n\t\t\tSizeClassQueues: sizeClassQueues,\n\t\t})\n\t}\n\treturn &buildqueuestate.ListPlatformQueuesResponse{\n\t\tPlatformQueues: platformQueues,\n\t}, nil\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *ClusterRequest) GetMaxRunningNodesOk() (*int32, bool) {\n\tif o == nil || o.MaxRunningNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MaxRunningNodes, true\n}", "func (o *Ga4ghSearchCallSetsResponse) GetCallSetsOk() ([]Ga4ghCallSet, bool) {\n\tif o == nil || o.CallSets == nil {\n\t\tvar ret []Ga4ghCallSet\n\t\treturn ret, false\n\t}\n\treturn *o.CallSets, true\n}", "func (o *MoveClustersAccepted) IsSuccess() bool {\n\treturn true\n}", "func (mr *MockSQSAPIMockRecorder) ListQueues(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListQueues\", reflect.TypeOf((*MockSQSAPI)(nil).ListQueues), arg0)\n}", "func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *torrent.Client) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor len(torrentQueues.ActiveTorrents) > config.MaxActiveTorrents {\n\t\tremoveTorrent := torrentQueues.ActiveTorrents[:1]\n\t\tfor _, singleTorrent := range tclient.Torrents() {\n\t\t\tif singleTorrent.InfoHash().String() == removeTorrent[0] {\n\t\t\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, removeTorrent[0])\n\t\t\t\tRemoveTorrentFromActive(&singleTorrentFromStorage, singleTorrent, db)\n\t\t\t}\n\t\t}\n\t}\n\ttorrentQueues = Storage.FetchQueues(db)\n\tfor _, singleTorrent := range tclient.Torrents() {\n\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())\n\t\tif singleTorrentFromStorage.TorrentStatus == \"Stopped\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one\n\t\t\tif singleTorrent.InfoHash().String() == queuedTorrent {\n\t\t\t\tif singleTorrent.BytesMissing() > 0 {\n\t\t\t\t\tfor _, activeTorrent := range torrentQueues.ActiveTorrents {\n\t\t\t\t\t\tfor _, singleActiveTorrent := range tclient.Torrents() {\n\t\t\t\t\t\t\tif activeTorrent == singleActiveTorrent.InfoHash().String() {\n\t\t\t\t\t\t\t\tif singleActiveTorrent.Seeding() == true {\n\t\t\t\t\t\t\t\t\tsingleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleActiveTFS.TorrentName}).Info(\"Seeding, Removing from active to add queued\")\n\t\t\t\t\t\t\t\t\tRemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)\n\t\t\t\t\t\t\t\t\tsingleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleQueuedTFS.TorrentName}).Info(\"Adding torrent to the queue, not active\")\n\t\t\t\t\t\t\t\t\tAddTorrentToActive(&singleQueuedTFS, singleTorrent, db)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (o *StorageHyperFlexStorageContainer) GetClusterOk() (*HyperflexClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}", "func (o *NiatelemetryNexusDashboardsAllOf) GetClusterUuidOk() (*string, bool) {\n\tif o == nil || o.ClusterUuid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterUuid, true\n}", "func (q *execQueue) canQueue() bool {\n\tq.mu.Lock()\n\tok := !q.isClosed() && len(q.funcs) < cap(q.funcs)\n\tq.mu.Unlock()\n\treturn ok\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *V0037JobProperties) GetQosOk() (*string, bool) {\n\tif o == nil || o.Qos == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Qos, true\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (o *DnsZoneDataData) GetZoneMastersOk() (*string, bool) {\n\tif o == nil || o.ZoneMasters == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ZoneMasters, true\n}", "func (o *V0037Node) GetCoresOk() (*int32, bool) {\n\tif o == nil || o.Cores == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cores, true\n}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}", "func (t *TopicCache) IsQueueEmpty(projectName, serviceName string) bool {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\t_, ok := t.inQueue[projectName+serviceName]\n\n\treturn !ok\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}", "func (o *SMSConnectorSettings) GetLimitsOk() (*Thresholds, bool) {\n\tif o == nil || o.Limits == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Limits, true\n}", "func hasFreeQueueSlots() bool {\n\tif activeClients > activeClientsMax {\n\t\tatomic.AddInt64(&activeClientsReachedTimes, 1)\n\n\t\treturn false\n\t}\n\n\tif activeClients >= activeClientsReached {\n\t\tactiveClientsReached = activeClients\n\t}\n\n\treturn true\n}", "func (o *V0037Node) GetBoardsOk() (*int32, bool) {\n\tif o == nil || o.Boards == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Boards, true\n}", "func (c *captiveCoreTomlValues) QuorumSetIsConfigured() bool {\n\treturn len(c.QuorumSetEntries) > 0 || len(c.Validators) > 0\n}", "func (gcq *gcQueue) shouldQueue(\n\tctx context.Context, now hlc.ClockTimestamp, repl *Replica, _ *config.SystemConfig,\n) (bool, float64) {\n\n\t// Consult the protected timestamp state to determine whether we can GC and\n\t// the timestamp which can be used to calculate the score.\n\t_, zone := repl.DescAndZone()\n\tcanGC, _, gcTimestamp, oldThreshold, newThreshold := repl.checkProtectedTimestampsForGC(ctx, *zone.GC)\n\tif !canGC {\n\t\treturn false, 0\n\t}\n\t// If performing a GC will not advance the GC threshold, there's no reason\n\t// to GC again.\n\tif newThreshold.Equal(oldThreshold) {\n\t\treturn false, 0\n\t}\n\tr := makeGCQueueScore(ctx, repl, gcTimestamp, *zone.GC)\n\treturn r.ShouldQueue, r.FinalScore\n}", "func (o *V1WorkloadSpec) GetContainersOk() (*map[string]V1ContainerSpec, bool) {\n\tif o == nil || o.Containers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Containers, true\n}", "func (o *V0037JobProperties) GetRequeueOk() (*bool, bool) {\n\tif o == nil || o.Requeue == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Requeue, true\n}", "func (o *ListClustersOnEndpointUsingGETOK) IsSuccess() bool {\n\treturn true\n}", "func (o *ClientConfiguration) GetAvailableBankGroupsOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AvailableBankGroups, true\n}", "func (c *Client) ensureConsumerQueues(topic string) error {\n\taChan, err := c.getChannel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer aChan.Close()\n\n\tfor i := 0; i != c.numConsumerQueues; i++ {\n\t\tqueue := c.getRk(topic, i)\n\n\t\tif _, err := aChan.QueueDeclare(queue, true, false, false, false, amqp.Table{}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := aChan.QueueBind(queue, queue, topic, false, amqp.Table{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}", "func (m *MockSQSAPI) ListQueues(arg0 *sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListQueues\", arg0)\n\tret0, _ := ret[0].(*sqs.ListQueuesOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (cfg *Config) MQServers() string {\n\treturn os.Getenv(\"MQ_SERVERS\")\n}", "func (o *NSQProducer) GetTcpPortOk() (*int32, bool) {\n\tif o == nil || o.TcpPort == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TcpPort, true\n}", "func TestSliceQueueSuccess(t *testing.T) {\n\tqueueSuccess(t, &queue.SliceQueue{})\n}", "func (cc *Cluster) IsQuorum(v uint64) bool {\n\n\tfor _, q := range cc.Quorums {\n\t\tif v&q == q {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o *NetgroupsSettingsCollectionGetOK) IsSuccess() bool {\n\treturn true\n}", "func WithQueues(queues []string) Option {\n\treturn func(opts *Options) {\n\t\topts.Queues = queues\n\t}\n}", "func (o *DnsZoneDataData) GetZoneClassParametersOk() (*[]ApiClassParameterOutputEntry, bool) {\n\tif o == nil || o.ZoneClassParameters == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ZoneClassParameters, true\n}", "func (o *Cause) GetCausesOk() (*[]interface{}, bool) {\n\tif o == nil || o.Causes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Causes, true\n}", "func (o *StatusAzureServiceBus) GetRecordsProcessedOk() (*int64, bool) {\n\tif o == nil || IsNil(o.RecordsProcessed) {\n\t\treturn nil, false\n\t}\n\treturn o.RecordsProcessed, true\n}", "func (o *KubernetesPodStatus) GetQosClassOk() (*string, bool) {\n\tif o == nil || o.QosClass == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QosClass, true\n}", "func (o *SecurityProblem) GetManagementZonesOk() (*[]ManagementZone, bool) {\n\tif o == nil || o.ManagementZones == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ManagementZones, true\n}", "func (qc *QueueConfig) Exists() bool {\n\treturn qc._exists\n}", "func (c *client) ClusterExists() (bool, error) {\n\tclusterJSON, err := c.runCmd(\"cluster\", \"list\", \"-o\", \"json\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tclusterList := &ClusterList{}\n\tif err := clusterList.Unmarshal([]byte(clusterJSON)); err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, cluster := range clusterList.Clusters {\n\t\tif cluster.Name == c.clusterName {\n\t\t\tif c.verbose {\n\t\t\t\tfmt.Printf(\"k3d cluster '%s' exists\", c.clusterName)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tif c.verbose {\n\t\tfmt.Printf(\"k3d cluster '%s' does not exist\", c.clusterName)\n\t}\n\treturn false, nil\n}", "func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.K8sClustersProvisioned, true\n}" ]
[ "0.64735645", "0.64523965", "0.6035547", "0.58038056", "0.53999776", "0.53208125", "0.5234418", "0.5219659", "0.5205979", "0.51352835", "0.5088557", "0.5061583", "0.5047421", "0.50225717", "0.50075495", "0.49929222", "0.49726918", "0.49626175", "0.49571374", "0.49505204", "0.4910291", "0.49086472", "0.4870585", "0.48654485", "0.48648486", "0.48150924", "0.4807354", "0.47827765", "0.47716317", "0.4757595", "0.47479898", "0.47287238", "0.47200388", "0.4709999", "0.4708853", "0.4703158", "0.47027186", "0.46969888", "0.4696914", "0.469152", "0.46906188", "0.46903023", "0.46878037", "0.46815172", "0.4672829", "0.46617898", "0.4661367", "0.465014", "0.46401793", "0.46380433", "0.46244723", "0.46241587", "0.46236807", "0.46231955", "0.46199661", "0.46175566", "0.4615826", "0.46140933", "0.4592387", "0.45902264", "0.45870736", "0.45860043", "0.457281", "0.45558897", "0.45551425", "0.45495507", "0.45418593", "0.45395905", "0.45348665", "0.45200172", "0.45021793", "0.4497237", "0.4493511", "0.44841293", "0.44784686", "0.44768226", "0.44730937", "0.44699517", "0.44677082", "0.44638708", "0.44293743", "0.44248316", "0.4424208", "0.4420703", "0.44205612", "0.44089937", "0.44081664", "0.44064337", "0.4405084", "0.44050375", "0.43964806", "0.43905383", "0.43900228", "0.43877444", "0.43779445", "0.4375703", "0.437483", "0.43745577", "0.43738717", "0.4367556" ]
0.832443
0
SetClusterQueues sets field value
func (o *QueueManager) SetClusterQueues(v []ClusterQueue) { o.ClusterQueues = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}", "func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}", "func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tcreds := auth.CredentialsFromContext(ctx)\n\tif ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {\n\t\t// \"An attempt (IPC_SET) was made to increase msg_qbytes beyond the\n\t\t// system parameter MSGMNB, but the caller is not privileged (Linux:\n\t\t// does not have the CAP_SYS_RESOURCE capability).\"\n\t\treturn linuxerr.EPERM\n\t}\n\n\tif err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {\n\t\treturn err\n\t}\n\n\tq.maxBytes = ds.MsgQbytes\n\tq.changeTime = ktime.NowFromContext(ctx)\n\treturn nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (r *RPC) SetQueueClient(c queue.Client) {\n\tgapi := NewGRpcServer(c, r.api)\n\tjapi := NewJSONRPCServer(c, r.api)\n\tr.gapi = gapi\n\tr.japi = japi\n\tr.c = c\n\t//注册系统rpc\n\tpluginmgr.AddRPC(r)\n\tr.Listen()\n}", "func (r *RPC) SetQueueClient(c queue.Client) {\r\n\tgapi := NewGRpcServer(c, r.api)\r\n\tjapi := NewJSONRPCServer(c, r.api)\r\n\tr.gapi = gapi\r\n\tr.japi = japi\r\n\tr.c = c\r\n\t//注册系统rpc\r\n\tpluginmgr.AddRPC(r)\r\n\tr.Listen()\r\n}", "func SetConfig(c QueueConfig) error {\n\t// is name unique?\n\tif _, ok := configList[c.Name]; ok {\n\t\treturn ErrQueueIsExist\n\t}\n\n\t// is contener unique?\n\tfor _, v := range configList {\n\t\tif v.Contener == reflect.ValueOf(c.JobContener).Type() {\n\t\t\treturn ErrContenerIsNotUnique\n\t\t}\n\t}\n\n\treturn setConfig(c)\n}", "func SetQueueReclaimable(ctx *TestContext, queues []string, reclaimable bool) {\n\tBy(\"Setting Queue reclaimable\")\n\n\tfor _, q := range queues {\n\t\tqueue, err := ctx.Vcclient.SchedulingV1beta1().Queues().Get(context.TODO(), q, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to get queue %s\", q)\n\n\t\tqueue.Spec.Reclaimable = &reclaimable\n\t\t_, err = ctx.Vcclient.SchedulingV1beta1().Queues().Update(context.TODO(), queue, metav1.UpdateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to update queue %s\", q)\n\t}\n}", "func (w *Worker) SetQueue(q Queue) {\n\tw.queue = q\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (m *Group) SetThreads(value []ConversationThreadable)() {\n m.threads = value\n}", "func (acnl *Channel) setupQueues(cnl *amqp.Channel) error {\n\t/*if _, err := cnl.QueueDeclare(QueueVNFMRegister, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMRegister, QueueVNFMRegister, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMUnregister, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMUnregister, QueueVNFMUnregister, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMCoreActions, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMCoreActions, QueueVNFMCoreActions, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := cnl.QueueDeclare(QueueVNFMCoreActionsReply, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(QueueVNFMCoreActionsReply, QueueVNFMCoreActionsReply, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}*/\n\n\t// is this needed?\n\tif _, err := cnl.QueueDeclare(acnl.cfg.queues.generic, true, acnl.cfg.queues.autodelete,\n\t\tacnl.cfg.queues.exclusive, false, nil); err != nil {\n\n\t\treturn err\n\t}\n\n\tif err := cnl.QueueBind(acnl.cfg.queues.generic, acnl.cfg.queues.generic, acnl.cfg.exchange.name, false, nil); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Service) SetQueue(q amboy.Queue) error {\n\tif s.closer != nil {\n\t\treturn errors.New(\"cannot set a new queue, Service is already open\")\n\t}\n\n\ts.queue = q\n\treturn nil\n}", "func (o *V0037Node) SetThreads(v int32) {\n\to.Threads = &v\n}", "func (c *Consumer) SetQueueDeclare(declare *QueueDeclare) *Consumer {\n\tif declare != nil {\n\t\tc.mutex.Lock()\n\t\tc.declare = declare\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}", "func (context *context) SetThreads(v uint) {\n\tcontext.params.SetThreads(int(v))\n}", "func (o *QueueManager) GetClusterQueues() []ClusterQueue {\n\tif o == nil {\n\t\tvar ret []ClusterQueue\n\t\treturn ret\n\t}\n\n\treturn o.ClusterQueues\n}", "func (oo *OmciCC) SendSetPrioQueueVar(ctx context.Context, timeout int, highPrio bool,\n\trxChan chan Message, params ...me.ParamData) (*me.ManagedEntity, error) {\n\ttid := oo.GetNextTid(highPrio)\n\tlogger.Debugw(ctx, \"send PrioQueue-Set-msg:\", log.Fields{\"device-id\": oo.deviceID,\n\t\t\"SequNo\": strconv.FormatInt(int64(tid), 16),\n\t\t\"InstId\": strconv.FormatInt(int64(params[0].EntityID), 16)})\n\n\tmeInstance, omciErr := me.NewPriorityQueue(params[0])\n\tif omciErr.GetError() == nil {\n\t\tomciLayer, msgLayer, err := oframe.EncodeFrame(meInstance, omci.SetRequestType, oframe.TransactionID(tid))\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot encode PrioQueue for set\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkt, err := SerializeOmciLayer(ctx, omciLayer, msgLayer)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot serialize PrioQueue set\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tomciRxCallbackPair := CallbackPair{\n\t\t\tCbKey: tid,\n\t\t\tCbEntry: CallbackPairEntry{rxChan, oo.receiveOmciResponse, true},\n\t\t}\n\t\terr = oo.Send(ctx, pkt, timeout, CDefaultRetries, highPrio, omciRxCallbackPair)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot send PrioQueue set\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debug(ctx, \"send PrioQueue-set msg done\")\n\t\treturn meInstance, nil\n\t}\n\tlogger.Errorw(ctx, \"Cannot generate PrioQueue Instance\", log.Fields{\n\t\t\"Err\": omciErr.GetError(), \"device-id\": oo.deviceID})\n\treturn nil, omciErr.GetError()\n}", "func SetRabbitMQ(bk *RabbitMQBroker) OptionFunc {\n\treturn func(bi *brokerInstance) {\n\t\tbi.rabbitmq = bk\n\t}\n}", "func (jbobject *ConcurrentJMXEnabledThreadPoolExecutorMBean) SetCoreThreads(number int) {\n\terr := jbobject.CallVoid(\"setCoreThreads\", number)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}", "func (gs *GasStation) ChangeMaxQueueColumn(n int, max int) {\r\n\tgs.gasColumnList[n-1].maxQueue = max\r\n}", "func (mq *MessageQueue) SetQueue(queueName string) error {\n\tif mq.Channel == nil {\n\t\tnewCH, err := mq.NewChannel()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmq.Channel = newCH\n\t}\n\tmq.Channel.Qos(mq.Prefetch, 0, false)\n\tif _, err := mq.Channel.QueueDeclare(\n\t\tqueueName, // name\n\t\ttrue, // durable\n\t\tfalse, // delete when unused\n\t\tfalse, // exclusive\n\t\tfalse, // no-wait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func jobQAssign(ctx context.Context, cfg *Config, cluster string, queues *Queues) (err kv.Error) {\n\n\t// Obtain a list of all of the known node groups in the cluster and the machine types they\n\t// are provisioning\n\tgroups, err := getGroups(ctx, cfg, cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinstances := map[string][]string{}\n\t// Create a map from the groups, node group major, for a ec2 instance type major collection\n\tfor aGroup, instTypes := range groups {\n\t\tfor _, instType := range instTypes {\n\t\t\taddCatalog(instType, aGroup, instances)\n\t\t}\n\t}\n\n\tif logger.IsTrace() {\n\t\tlogger.Trace(spew.Sdump(groups), \"stack\", stack.Trace().TrimRuntime())\n\t\tlogger.Trace(spew.Sdump(instances), \"stack\", stack.Trace().TrimRuntime())\n\t}\n\n\t// Assign the known machine types based on the Queues and then match them up\n\tif err = loadNodeGroups(ctx, cfg, cluster, queues, instances); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (jbobject *ConcurrentJMXEnabledThreadPoolExecutorMBean) SetMaximumThreads(number int) {\n\terr := jbobject.CallVoid(\"setMaximumThreads\", number)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}", "func (o *GetLolCareerStatsV1ChampionAveragesByChampionIDByPositionByTierByQueueParams) SetQueue(queue string) {\n\to.Queue = queue\n}", "func setupQueue(client *redis.Client) error {\n\t// ping the queue\n\terr := pingQueue(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func WithQueues(queues []string) Option {\n\treturn func(opts *Options) {\n\t\topts.Queues = queues\n\t}\n}", "func (c *Consumer) SetQueueBind(bind *QueueBind) *Consumer {\n\tif bind != nil {\n\t\tc.mutex.Lock()\n\t\tc.bind = bind\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}", "func (wp *WorkerPool[T]) SetNumShards(numShards int) {\n\tif numShards <= 1 {\n\t\tnumShards = 1\n\t}\n\n\tif numShards > maxShards {\n\t\tnumShards = maxShards\n\t}\n\n\twp.numShards = numShards\n}", "func (e *LifecycleEvent) SetQueueURL(url string) { e.queueURL = url }", "func (gc *GCloudContainer) SetNodePoolSize(name string, size int64) (err error) {\n\n\tapiName := fmt.Sprintf(\"projects/%v/locations/%v/clusters/%v/nodePools/%v\", gc.Client.Project, gc.Client.Location, gc.Client.Cluster, name)\n\n\tnodePoolSizeRequest := &container.SetNodePoolSizeRequest{\n\t\tNodeCount: size,\n\t}\n\n\toperation, err := gc.Service.Projects.Locations.Clusters.NodePools.SetSize(apiName, nodePoolSizeRequest).Context(gc.Client.Context).Do()\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = gc.waitForOperation(operation)\n\n\treturn\n}", "func (o *QueueManager) SetClusters(v []string) {\n\to.Clusters = v\n}", "func (s *SearchQueuesOutput) SetQueues(v []*Queue) *SearchQueuesOutput {\n\ts.Queues = v\n\treturn s\n}", "func (s *API) SetQueueAttributes(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"SetQueueAttributes\")\n\tw.WriteHeader(http.StatusNotImplemented)\n}", "func SetConfigList(c []QueueConfig) error {\n\tfor _, v := range c {\n\t\terr := SetConfig(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (_AuthContract *AuthContractTransactor) SetQuorum(opts *bind.TransactOpts, _quorum *big.Int) (*types.Transaction, error) {\n\treturn _AuthContract.contract.Transact(opts, \"setQuorum\", _quorum)\n}", "func (r *Listener) SetThreads(count uint32) {\n\tr.thCntLock.Lock()\n\tdefer r.thCntLock.Unlock()\n\n\tr.thMax = count\n}", "func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"roberto@rr.com\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"alex@rr.com\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"ale@rr.com\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}", "func (a *Admin) SetSlots(addr, action string, slots []Slot, nodeID string) error {\n\tif len(slots) == 0 {\n\t\treturn nil\n\t}\n\tc, err := a.Connections().Get(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, slot := range slots {\n\t\tif nodeID == \"\" {\n\t\t\tc.PipeAppend(\"CLUSTER\", \"SETSLOT\", slot, action)\n\t\t} else {\n\t\t\tc.PipeAppend(\"CLUSTER\", \"SETSLOT\", slot, action, nodeID)\n\t\t}\n\t}\n\tif !a.Connections().ValidatePipeResp(c, addr, \"Cannot SETSLOT\") {\n\t\treturn fmt.Errorf(\"Error occured during CLUSTER SETSLOT %s\", action)\n\t}\n\tc.PipeClear()\n\n\treturn nil\n}", "func (s *Dimensions) SetQueue(v *QueueReference) *Dimensions {\n\ts.Queue = v\n\treturn s\n}", "func (c *Consumer) SetQueueName(withPrefix bool, name string) *Consumer {\n\tif name == \"\" {\n\t\tname = c.getExchangeTopic()\n\t}\n\tnewQueueName := GenerateQueueName(withPrefix, name)\n\tc.mutex.Lock()\n\tc.declare.SetName(newQueueName)\n\tc.bind.SetName(newQueueName)\n\tc.mutex.Unlock()\n\treturn c\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (q *SimpleQueue) SetDelete(ctx context.Context, user cn.CapUser, setNeedDelete func(ctx context.Context, i int, len int, q *SimpleQueue, block *SimpleQueueBlock) (needDelete bool, err *mft.Error)) (err *mft.Error) {\n\n\tblocks := make([]*SimpleQueueBlock, 0)\n\n\tif !q.mx.RTryLock(ctx) {\n\t\treturn GenerateError(10025000)\n\t}\n\n\tfor i := 0; i < len(q.Blocks); i++ {\n\t\tblocks = append(blocks, q.Blocks[i])\n\t}\n\n\tq.mx.RUnlock()\n\n\tblocksToDelete := make([]*SimpleQueueBlock, 0)\n\n\tfor i, block := range blocks {\n\t\tneedDelete, err := setNeedDelete(ctx, i, len(blocks), q, block)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif needDelete {\n\t\t\tblocksToDelete = append(blocksToDelete, block)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, block := range blocksToDelete {\n\t\terr = block.setNeedDelete(ctx, q)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Consumer) SetConsumers(t otelconsumer.Traces, m otelconsumer.Metrics, l otelconsumer.Logs) {\n\tc.mut.Lock()\n\tdefer c.mut.Unlock()\n\n\tc.metricsConsumer = m\n\tc.logsConsumer = l\n\tc.tracesConsumer = t\n}", "func (q *SimpleQueue) SetMarks(ctx context.Context, user cn.CapUser, setBlockMark func(ctx context.Context, i int, len int, q *SimpleQueue, block *SimpleQueueBlock) (needSetMark bool, nextMark string, err *mft.Error)) (err *mft.Error) {\n\n\tblocks := make([]*SimpleQueueBlock, 0)\n\n\tif !q.mx.RTryLock(ctx) {\n\t\treturn GenerateError(10024000)\n\t}\n\n\tfor i := 0; i < len(q.Blocks); i++ {\n\t\tblocks = append(blocks, q.Blocks[i])\n\t}\n\n\tq.mx.RUnlock()\n\n\tfor i, block := range blocks {\n\t\tneedSetMark, nextMark, err := setBlockMark(ctx, i, len(blocks), q, block)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif needSetMark {\n\t\t\terr = block.setNewStorage(ctx, q, nextMark)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *System) SetThreads(count int) {\n\ts.threadCount = count\n\ts.threads = make([]Thread, count)\n\tfor i := range s.threads {\n\t\ts.threads[i].idx = i\n\t\ts.threads[i].System = s\n\t}\n\n\ts.setupSpawner()\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (a *NamespacesApiService) SetMaxConsumersPerTopic(ctx _context.Context, tenant string, namespace string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/namespaces/{tenant}/{namespace}/maxConsumersPerTopic\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tenant\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", tenant)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"namespace\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", namespace)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (m *Miner) SetThreads(threads int) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif threads == 0 {\n\t\treturn errors.New(\"cannot have a miner with 0 threads.\")\n\t}\n\tm.threads = threads\n\n\treturn nil\n}", "func (policy *ticketPolicy) OnSetQueueClient() {\n\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (m *Planner) SetBuckets(value []PlannerBucketable)() {\n m.buckets = value\n}", "func (m *Synchronization) SetJobs(value []SynchronizationJobable)() {\n err := m.GetBackingStore().Set(\"jobs\", value)\n if err != nil {\n panic(err)\n }\n}", "func (q *QLearning) SetQ(a Action, qv float64) {\n\tq.qt[q.state.Get()][a] = qv\n}", "func (clusterInfo ClusterInfo) CreateQueues(queues []rh.QueueInfo) error {\n\trmqc, err := rh.NewClient(clusterInfo.AdminURL(), clusterInfo.UserName, clusterInfo.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, queue := range queues {\n\t\tlog.Printf(\"Creating queue %v\", queue.Name)\n\t\t_, err = rmqc.DeclareQueue(clusterInfo.Vhost, queue.Name, rh.QueueSettings{Durable: queue.Durable, AutoDelete: queue.AutoDelete, Arguments: queue.Arguments})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *segment) setOwner(ep *endpoint, qFlags queueFlags) {\n\tswitch qFlags {\n\tcase recvQ:\n\t\tep.updateReceiveMemUsed(s.segMemSize())\n\tcase sendQ:\n\t\t// no memory account for sendQ yet.\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected queue flag %b\", qFlags))\n\t}\n\ts.ep = ep\n\ts.qFlags = qFlags\n}", "func (q *Queue) enqueue(value int) {\n\tq.values = append(q.values, value)\n}", "func SetNamespaces(namespaces []string) UpdateSettingsFunc {\n\treturn func(cache *clusterCache) {\n\t\tif !reflect.DeepEqual(cache.namespaces, namespaces) {\n\t\t\tlog.WithField(\"server\", cache.config.Host).Infof(\"Changing cluster namespaces to: %v\", namespaces)\n\t\t\tcache.namespaces = namespaces\n\t\t}\n\t}\n}", "func TestClusterConfigSet(t *testing.T) {\n\t_, etcds, err := CreateCluster(3, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)\n\tassert.NoError(t, err)\n\tdefer DestroyCluster(etcds)\n\n\tresp, _ := tests.Put(\"http://localhost:7001/v2/admin/config\", \"application/json\", bytes.NewBufferString(`{\"activeSize\":3, \"removeDelay\":60}`))\n\tassert.Equal(t, resp.StatusCode, 200)\n\n\ttime.Sleep(1 * time.Second)\n\n\tresp, _ = tests.Get(\"http://localhost:7002/v2/admin/config\")\n\tbody := tests.ReadBodyJSON(resp)\n\tassert.Equal(t, resp.StatusCode, 200)\n\tassert.Equal(t, resp.Header.Get(\"Content-Type\"), \"application/json\")\n\tassert.Equal(t, body[\"activeSize\"], 3)\n\tassert.Equal(t, body[\"removeDelay\"], 60)\n}", "func (queue *Queue) SetBufferSize(bufferSize int) error {\n\tif bufferSize < 0 {\n\t\treturn fmt.Errorf(\n\t\t\t\"buffer size is less than 0: %d\", bufferSize)\n\t}\n\n\tif bufferSize < len(queue.data) {\n\t\treturn fmt.Errorf(\n\t\t\t\"buffer size is less than the length of the queue: %d\",\n\t\t\tbufferSize)\n\t}\n\n\tdata := make([]interface{}, len(queue.data), bufferSize)\n\n\tcopy(data, queue.data)\n\tqueue.data = data\n\n\treturn nil\n}", "func BatchBatchingQueue(value string) BatchAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"batching_queue\"] = value\n\t}\n}", "func Queue(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tctx.Data[\"Title\"] = ctx.Tr(\"admin.monitor.queue\", mq.Name)\n\tctx.Data[\"PageIsAdmin\"] = true\n\tctx.Data[\"PageIsAdminMonitor\"] = true\n\tctx.Data[\"Queue\"] = mq\n\tctx.HTML(http.StatusOK, tplQueue)\n}", "func (q *DashboardQueue) Init(log logrus.FieldLogger, dashboards []*configpb.Dashboard, when time.Time) {\n\tn := len(dashboards)\n\tnames := make([]string, n)\n\tnamedDashboards := make(map[string]*configpb.Dashboard, n)\n\tgroups := make(map[string]*stringset.Set, n)\n\tfor i, d := range dashboards {\n\t\tname := d.Name\n\t\tnames[i] = name\n\t\tfor _, tab := range d.DashboardTab {\n\t\t\tif groups[tab.TestGroupName] == nil {\n\t\t\t\tns := stringset.New()\n\t\t\t\tgroups[tab.TestGroupName] = &ns\n\t\t\t}\n\t\t\tgroups[tab.TestGroupName].Add(name)\n\t\t}\n\t}\n\tq.lock.Lock()\n\tq.Queue.Init(log, names, when)\n\tq.dashboards = namedDashboards\n\tq.groups = groups\n\tq.lock.Unlock()\n}", "func PutClusterConfig(req *restful.Request, resp *restful.Response) {\n\tconst (\n\t\thandler = \"PutClusterConfig\"\n\t)\n\tspan := v1http.SetHTTPSpanContextInfo(req, handler)\n\tdefer span.Finish()\n\n\tif err := putClsConfig(req); err != nil {\n\t\tutils.SetSpanLogTagError(span, err)\n\t\tblog.Errorf(\"%s | err: %v\", common.BcsErrStoragePutResourceFailStr, err)\n\t\tlib.ReturnRest(&lib.RestResponse{\n\t\t\tResp: resp,\n\t\t\tErrCode: common.BcsErrStoragePutResourceFail,\n\t\t\tMessage: common.BcsErrStoragePutResourceFailStr})\n\t\treturn\n\t}\n\tr, err := generateData(req, getCls)\n\tif err != nil {\n\t\tutils.SetSpanLogTagError(span, err)\n\t\tblog.Errorf(\"%s | err: %v\", common.BcsErrStorageGetResourceFailStr, err)\n\t\tlib.ReturnRest(&lib.RestResponse{\n\t\t\tResp: resp,\n\t\t\tErrCode: common.BcsErrStorageGetResourceFail,\n\t\t\tMessage: common.BcsErrStorageGetResourceFailStr})\n\t\treturn\n\t}\n\tlib.ReturnRest(&lib.RestResponse{Resp: resp, Data: r})\n}", "func SetNumThreads(numThreads int) {\n\tpoolSize = numThreads\n}", "func (mq *LinuxMessageQueue) SetBlocking(block bool) error {\n\tif block {\n\t\tmq.flags &= ^O_NONBLOCK\n\t} else {\n\t\tmq.flags |= O_NONBLOCK\n\t}\n\treturn nil\n}", "func (c Cube) Set(x, y, z int, val []float64) {\n\tc.Data[x][y][z] = val\n}", "func (o *MarkClaimedTaskDoneParams) SetQueue(queue string) {\n\to.Queue = queue\n}", "func (cc cacheCluster) Set(key string, val any) error {\n\treturn cc.SetCtx(context.Background(), key, val)\n}", "func WithQueue(queue workqueue.RateLimitingInterface) Option {\n\treturn func(config *queueInformerConfig) {\n\t\tconfig.queue = queue\n\t}\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (m *SynchronizationJob) SetSynchronizationJobSettings(value []KeyValuePairable)() {\n err := m.GetBackingStore().Set(\"synchronizationJobSettings\", value)\n if err != nil {\n panic(err)\n }\n}", "func (in *ActionExportCreateInput) SetThreads(value int64) *ActionExportCreateInput {\n\tin.Threads = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"Threads\"] = nil\n\treturn in\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func Set(items []utils.Pair, db RedisDBClientInterface, group environment.EnvironmentGroup) error {\n\tfor _, kv := range items {\n\t\tfst := extract(kv.Fst.(string), group)\n\t\tsnd := extract(kv.Snd.(string), group)\n\t\t_, err := db.Set(fst, snd,0).Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *torrent.Client) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor len(torrentQueues.ActiveTorrents) > config.MaxActiveTorrents {\n\t\tremoveTorrent := torrentQueues.ActiveTorrents[:1]\n\t\tfor _, singleTorrent := range tclient.Torrents() {\n\t\t\tif singleTorrent.InfoHash().String() == removeTorrent[0] {\n\t\t\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, removeTorrent[0])\n\t\t\t\tRemoveTorrentFromActive(&singleTorrentFromStorage, singleTorrent, db)\n\t\t\t}\n\t\t}\n\t}\n\ttorrentQueues = Storage.FetchQueues(db)\n\tfor _, singleTorrent := range tclient.Torrents() {\n\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())\n\t\tif singleTorrentFromStorage.TorrentStatus == \"Stopped\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one\n\t\t\tif singleTorrent.InfoHash().String() == queuedTorrent {\n\t\t\t\tif singleTorrent.BytesMissing() > 0 {\n\t\t\t\t\tfor _, activeTorrent := range torrentQueues.ActiveTorrents {\n\t\t\t\t\t\tfor _, singleActiveTorrent := range tclient.Torrents() {\n\t\t\t\t\t\t\tif activeTorrent == singleActiveTorrent.InfoHash().String() {\n\t\t\t\t\t\t\t\tif singleActiveTorrent.Seeding() == true {\n\t\t\t\t\t\t\t\t\tsingleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleActiveTFS.TorrentName}).Info(\"Seeding, Removing from active to add queued\")\n\t\t\t\t\t\t\t\t\tRemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)\n\t\t\t\t\t\t\t\t\tsingleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleQueuedTFS.TorrentName}).Info(\"Adding torrent to the queue, not active\")\n\t\t\t\t\t\t\t\t\tAddTorrentToActive(&singleQueuedTFS, singleTorrent, db)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (m NoAllocs) SetAllocQty(value decimal.Decimal, scale int32) {\n\tm.Set(field.NewAllocQty(value, scale))\n}", "func Put(queueName string, value interface{}) error {\n\treturn b.Put(queueName, value)\n}", "func (m *mqService) SetQOSCount(count int) ConfigFunc {\n\treturn func(qos *ConfigQOS) {\n\t\tqos.Count = count\n\t}\n}", "func (_m *PrometheusBackend) SetPipelineRunsQueued(n int) {\n\t_m.Called(n)\n}", "func (self *Swarm) SetChequebook(ctx context.Context) error {\n\terr := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(fmt.Sprintf(\"new chequebook set (%v): saving config file, resetting all connections in the hive\", self.config.Swap.Contract.Hex()))\n\tself.hive.DropAll()\n\treturn nil\n}", "func (gs *GasStation) addColumnToQueue(gc GasColumn) {\r\n\tvar cl *ClientQueue\r\n\tgs.queueForEachColumn[gc] = cl\r\n}", "func (m *Mailboxes) Set(n int, val string) error {\n\tif n > len(m.mem)-1 {\n\t\treturn ErrInvalidMemory{n}\n\t}\n\n\tif n < 0 {\n\t\treturn ErrInvalidMemory{n}\n\t}\n\n\tm.mem[n] = val\n\treturn nil\n}", "func (a *SequenceNumber) SetSQN(sQN uint8) {}", "func (b *B) SetParallelism(p int)", "func (m *MockMetrics) SetQueueCount(arg0 int) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SetQueueCount\", arg0)\n}", "func (_m *Factory) SetClientQPS(_a0 float32) {\n\t_m.Called(_a0)\n}", "func (_Mcapscontroller *McapscontrollerTransactor) SetMaxPoolTokens(opts *bind.TransactOpts, poolAddress common.Address, maxPoolTokens *big.Int) (*types.Transaction, error) {\n\treturn _Mcapscontroller.contract.Transact(opts, \"setMaxPoolTokens\", poolAddress, maxPoolTokens)\n}", "func Queue(opt queue.Queue) Option {\n\treturn func(o *Options) {\n\t\to.Queue = opt\n\t}\n}", "func (q *Queue) EnQueue(val interface{}) {\r\n\r\n\ttemp, _ := CreateNew(val)\r\n\r\n\tq.QueueList = append(q.QueueList, temp.QueueList...)\r\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func (s *AgentContactReference) SetQueue(v *QueueReference) *AgentContactReference {\n\ts.Queue = v\n\treturn s\n}", "func SetShardsForTesting(ctx context.Context, rs []ReclusteringShard) error {\n\ttestutil.MustApply(ctx,\n\t\tspanner.Delete(\"ReclusteringShards\", spanner.AllKeys()))\n\t// Insert some ReclusteringShards.\n\t_, err := span.ReadWriteTransaction(ctx, func(ctx context.Context) error {\n\t\tfor _, r := range rs {\n\t\t\tms := spanutil.InsertMap(\"ReclusteringShards\", map[string]any{\n\t\t\t\t\"ShardNumber\": r.ShardNumber,\n\t\t\t\t\"Project\": r.Project,\n\t\t\t\t\"AttemptTimestamp\": r.AttemptTimestamp,\n\t\t\t\t\"Progress\": r.Progress,\n\t\t\t})\n\t\t\tspan.BufferWrite(ctx, ms)\n\t\t}\n\t\treturn nil\n\t})\n\treturn err\n}", "func (as AccountStorage) SetPendingCoinDayQueue(ctx sdk.Context, me types.AccountKey, pendingCoinDayQueue *PendingCoinDayQueue) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\tpendingCoinDayQueueByte, err := as.cdc.MarshalJSON(*pendingCoinDayQueue)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalPendingCoinDayQueue(err)\n\t}\n\tstore.Set(getPendingCoinDayQueueKey(me), pendingCoinDayQueueByte)\n\treturn nil\n}", "func (tr *Cluster) SetParameters(params map[string]interface{}) error {\n\tp, err := json.TFParser.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.TFParser.Unmarshal(p, &tr.Spec.ForProvider)\n}", "func WithQueueSubscriber(queue string) ConsumerOption {\n\treturn func(c *Consumer) error {\n\t\tif queue == \"\" {\n\t\t\treturn ErrInvalidQueueName\n\t\t}\n\t\tc.Subscriber = &QueueSubscriber{Queue: queue}\n\t\treturn nil\n\t}\n}", "func (ssc *StatefulSetController) enqueueStatefulSet(obj interface{}) {\n\tkey, err := controller.KeyFunc(obj)\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"Couldn't get key for object %+v: %v\", obj, err))\n\t\treturn\n\t}\n\tssc.queue.Add(key)\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}" ]
[ "0.60655606", "0.60586584", "0.5939442", "0.58560574", "0.57875776", "0.5729049", "0.5456519", "0.54093945", "0.52865255", "0.5281743", "0.5238613", "0.520454", "0.5202154", "0.51686084", "0.51261485", "0.5113021", "0.5069352", "0.5063116", "0.5051894", "0.50452304", "0.5042081", "0.5042068", "0.5041478", "0.503941", "0.50341976", "0.5015566", "0.49399552", "0.48950285", "0.487654", "0.48710188", "0.48641852", "0.4853056", "0.48195937", "0.4818114", "0.4812475", "0.48050088", "0.47967413", "0.47943625", "0.47639203", "0.4753472", "0.47502643", "0.47266856", "0.46795544", "0.46540165", "0.46527028", "0.46498135", "0.46440807", "0.46391225", "0.46313223", "0.46229637", "0.4622938", "0.46208358", "0.46099985", "0.45666462", "0.4557358", "0.45252442", "0.4522156", "0.44985747", "0.44977486", "0.44975874", "0.4496435", "0.44754854", "0.44724527", "0.44688135", "0.44440535", "0.44432253", "0.4435722", "0.44356394", "0.44152778", "0.44033962", "0.4402275", "0.44017527", "0.4395115", "0.43923742", "0.43885067", "0.43594095", "0.43575317", "0.43528938", "0.4347428", "0.43468145", "0.4340868", "0.43241265", "0.43198234", "0.43171063", "0.43161196", "0.43029392", "0.42998987", "0.4295411", "0.42951337", "0.42941594", "0.4290528", "0.42898387", "0.42810768", "0.42797163", "0.4267939", "0.42675316", "0.42650148", "0.4253764", "0.42476857", "0.42441234" ]
0.6035805
2
Is the tiploc one for this station
func (bf *boardFilter) atStation(tpl string) bool { for _, s := range bf.res.Station { if s == tpl { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (me TxsdType) IsLocator() bool { return me == \"locator\" }", "func (me TxsdType) IsLocator() bool { return me.String() == \"locator\" }", "func (l Location) IsStation() bool {\n\treturn l.Station != nil\n}", "func (t *Tangle) HasTip(h hash.Hash) bool {\n\treturn t.tips[h]\n}", "func (d *LDB) GetStationTiploc(tiploc string) *Station {\n\tcrs, exists := d.tiplocs[tiploc]\n\tif !exists {\n\t\treturn nil\n\t}\n\treturn d.GetStationCrs(crs)\n\t/*\n\t\tvar station *Station\n\n\t\t\t// Try to resolve the crs\n\t\t_ = d.View(func(tx *bbolt.Tx) error {\n\t\t\tstation = d.getStationTiploc(tx, tiploc)\n\t\t\treturn nil\n\t\t})\n\n\t\t\treturn station\n\t*/\n}", "func IsPoint(t Tuplelike) bool {\n\treturn t.At(3) == pointW\n}", "func (l *Location) firstZoneUsed() bool {\n\tfor _, tx := range l.tx {\n\t\tif tx.index == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (l *Location) firstZoneUsed() bool {\n\tfor _, tx := range l.tx {\n\t\tif tx.index == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *Tuple) IsPoint() bool {\n\treturn t.W == 1\n}", "func (me TxsdMovementStatus) IsT() bool { return me.String() == \"T\" }", "func (t *Tuple) IsPoint() bool {\n\treturn t.W == 1.0\n}", "func isStationInJourney(st string, journey timetableRouteJourney) bool {\n\tfor _, call := range journey.Calls.Call {\n\t\tif call.ScheduledStopPointRef.Ref == st {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (np *vpoint) sameLoc(x, y float64) bool {\n\treturn np.x == x && np.y == y\n}", "func (me TAttlistDescriptorNameType) IsGeographic() bool { return me.String() == \"Geographic\" }", "func (t *Tuple) IsPoint() bool {\n\treturn t.w == 1.0\n}", "func (adf ADF) IsStationary() bool {\n\treturn adf.Statistic < adf.PValueThreshold\n}", "func (me TdtypeType) IsNtpstamp() bool { return me.String() == \"ntpstamp\" }", "func (me TisoLanguageCodes) IsPt() bool { return me.String() == \"PT\" }", "func (me TEventType) IsPing() bool { return me.String() == \"Ping\" }", "func isDST(t time.Time) bool {\n\tname, _ := t.In(locNewYork).Zone()\n\treturn name == \"EDT\"\n}", "func (t *traceLocation) isSet() bool {\n\treturn t.line > 0\n}", "func (l Location) IsSystem() bool {\n\treturn l.Station == nil && l.Structure == nil\n}", "func (me TAttlistOtherIDSource) IsNasa() bool { return me.String() == \"NASA\" }", "func (c1 Coordinates) IsIn(l Locatable) bool {\n\tif l == nil {\n\t\treturn false\n\t}\n\tc2 := l.GetCoords()\n\tif c2.Resolution > c1.Resolution {\n\t\treturn false\n\t}\n\n\tif c1.Sector != c2.Sector {\n\t\treturn false\n\t} else if c2.Resolution == coord_SECTOR {\n\t\treturn true\n\t}\n\n\tif c1.SubSector != c2.SubSector {\n\t\treturn false\n\t} else if c2.Resolution == coord_SUBSECTOR {\n\t\treturn true\n\t}\n\n\tif c1.StarCoord != c2.StarCoord {\n\t\treturn false\n\t} else if c2.Resolution == coord_STARSYSTEM {\n\t\treturn true\n\t}\n\n\t//if this point is reached, then we know c1 and c2 are both local points in the same starsystem,\n\t//so we want to see if c1 is orbitting/docking with l\n\tif dist := c1.CalcVector(c2).Distance * METERS_PER_LY; dist <= l.GetVisitDistance() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (info *endpointsInfo) GetIsLocal() bool {\n\treturn info.isLocal\n}", "func (me TxsdTaxAccountingBasis) IsT() bool { return me.String() == \"T\" }", "func (t Tuple) IsPoint() bool {\n\tif float.Equal(t.W, pointW) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (info *BaseEndpointInfo) GetIsLocal() bool {\n\treturn info.IsLocal\n}", "func (me TAttlistLocationLabelType) IsTable() bool { return me.String() == \"table\" }", "func (europ europeDeprecatedTimeZones) Isle_of_Man() string { return \"Europe/London\" }", "func (this *satcluster) getLocation() vectors.Vector2 {\n\tif exestate.OnError(this) {\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\t//Interseccion entre dos satelites\n\tpointA, pointB, state := geometry.GetCirclesIntersections(this.getAt(0), this.getAt(1))\n\n\tif !state.IsOk() {\n\t\tthis.RegisterState(state)\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\tif pointA.IsEmpty() && pointB.IsEmpty() {\n\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triengular, no hay interseccion (1) (satellities.satcluster.getLocation)\"))\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\tvar intersectsA = (pointA.IsEmpty() == false)\n\tvar intersectsB = (pointB.IsEmpty() == false)\n\n\t//\tequals, _ := vectors.Equals(pointA, pointB)\n\n\t//Para los restantes satelites se verifican las distancias a los puntos de la interseccion\n\tif this.count() > 2 {\n\t\tfor i := 2; i < this.count(); i++ {\n\n\t\t\tintersectsA = intersectsA && !(math.Abs(pointA.DistanceTo(this.getAt(i).Pos)-this.getAt(i).Distance) > 0.005)\n\t\t\tintersectsB = intersectsB && !(math.Abs(pointB.DistanceTo(this.getAt(i).Pos)-this.getAt(i).Distance) > 0.005)\n\n\t\t\tif intersectsA == false && intersectsB == false {\n\t\t\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triangular, no hay interseccion (2) (satellities.satcluster.getLocation)\"))\n\t\t\t\treturn vectors.GetEmptyVector2()\n\t\t\t}\n\t\t}\n\t}\n\n\tif intersectsA && intersectsB {\n\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triangular, mas de un punto (satellities.satcluster.getLocation)\"))\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\tif intersectsA {\n\t\tpointA.Round()\n\t\treturn pointA\n\t} else if intersectsB {\n\t\tpointB.Round()\n\t\treturn pointB\n\t} else {\n\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triangular\"))\n\t\treturn vectors.GetEmptyVector2()\n\t}\n}", "func (p *Position) IsShort() bool {\n\treturn p.EntranceOrder() != nil && p.EntranceOrder().Side == SELL\n}", "func (ss *SolarSystem) IsOptimumTemperaturePressure() bool {\n\ta := arePointsAligned(ss.Ferengi.Location, ss.Betasoide.Location, ss.Vulcano.Location)\n\tb := arePointsAligned(ss.Ferengi.Location, ss.Betasoide.Location, &ss.Sun)\n\treturn a && !b\n}", "func (d *droid) hasVisitedCurrent() bool {\n\tfor _, visitedPoint := range d.visited {\n\t\tif d.location.x == visitedPoint.x && d.location.y == visitedPoint.y {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TaltitudeModeEnumType) IsRelativeToGround() bool { return me == \"relativeToGround\" }", "func IsLocation(reply Reply) (bool, error) {\n\tif reply.Coords != [2]float64{0, 0} {\n\t\tb, _ := ioutil.ReadFile(\"./static/service_area.json\")\n\t\tfeature, _ := geojson.UnmarshalFeature(b)\n\t\tpnt := orb.Point(reply.Coords)\n\t\t// NOTE: change to postgis if accuracy needed\n\t\tif planar.PolygonContains(feature.Geometry.Bound().ToPolygon(), pnt) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, errors.New(\"Outside service area\")\n\t}\n\tlocPostback := strings.Split(reply.Text, \":\")\n\tif len(locPostback) > 1 && locPostback[0] == \"location\" {\n\t\treturn true, nil\n\t}\n\t// check for text if it's match\n\tif IsThisIn(strings.ToLower(reply.Text), TargetPlaces) {\n\t\treturn true, nil\n\t}\n\treturn false, errors.New(\"Not a location\")\n}", "func (z *Zone) IsTLD() bool {\n\treturn !strings.Contains(z.Domain, \".\")\n}", "func (n *Node) IsLocal() bool {\n\treturn n != nil && n.Name == GetName() && n.Cluster == getCluster()\n}", "func isSpotNode(node *apiv1.Node) bool {\n\tsplitLabel := strings.SplitN(SpotNodeLabel, \"=\", 2)\n\n\t// If \"=\" found, check for new label schema. If no \"=\" is found, check for\n\t// old label schema\n\tswitch len(splitLabel) {\n\tcase 1:\n\t\t_, found := node.ObjectMeta.Labels[SpotNodeLabel]\n\t\treturn found\n\tcase 2:\n\t\tspotLabelKey := splitLabel[0]\n\t\tspotLabelVal := splitLabel[1]\n\n\t\tval, _ := node.ObjectMeta.Labels[spotLabelKey]\n\t\tif val == spotLabelVal {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (d *LDB) createStation(tx *bbolt.Tx, locations []*darwinref.Location) *Station {\n\n\tif len(locations) == 0 {\n\t\treturn nil\n\t}\n\n\t// Mark Public if we have a CRS & it doesn't start with X or Z\n\t// 2019 June 10 Enable Z for now as Farringdon is known as Farringdon Underground.\n\t// This will expose the underground but better than leave a major station. Hopefully with Crossrail this will revert\n\t// back to the single station.\n\tcrs := locations[0].Crs\n\tpublic := crs != \"\" && crs[0] != 'X' // && crs[0] != 'Z'\n\tif !public {\n\t\treturn nil\n\t}\n\n\t//tb := tx.Bucket([]byte(tiplocBucket))\n\n\ts := d.getStationCrs(tx, crs)\n\tif s == nil {\n\t\ts = &Station{}\n\t\ts.Crs = crs\n\t\ts.Locations = locations\n\t} else {\n\t\t// Remove any tiplocs that have been removed\n\t\ttpl := make(map[string]interface{})\n\t\tfor _, loc := range locations {\n\t\t\ttpl[loc.Tiploc] = true\n\t\t}\n\t\tfor _, loc := range s.Locations {\n\t\t\tif _, exists := tpl[loc.Tiploc]; exists {\n\t\t\t\tdelete(tpl, loc.Tiploc)\n\t\t\t}\n\t\t}\n\t\tfor t, _ := range tpl {\n\t\t\tdelete(d.tiplocs, t)\n\t\t\t//_ = tb.Delete([]byte(t))\n\t\t}\n\t}\n\n\ts.Public = public\n\n\td.stations[crs] = s\n\tb, _ := s.Bytes()\n\t_ = tx.Bucket([]byte(crsBucket)).Put([]byte(crs), b)\n\n\t// Ensure all our tiplocs point to this crs\n\t//cb := []byte(crs)\n\tfor _, l := range s.Locations {\n\t\td.tiplocs[l.Tiploc] = crs\n\t\t//tpl := []byte(l.Tiploc)\n\t\t//b = tb.Get(tpl)\n\t\t//if b == nil || bytes.Compare(cb, b) != 0 {\n\t\t//\t_ = tb.Put(tpl, cb)\n\t\t//}\n\t}\n\n\treturn s\n}", "func (o *NewData) HasLocation() bool {\n\tif o != nil && o.Location != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsLat(val float64) bool {\n\tif (val < MaxLat) && (val > MinLat) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *Workloadv1Location) HasLatitude() bool {\n\tif o != nil && o.Latitude != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdFeTurbulenceTypeStitchTiles) IsStitch() bool { return me.String() == \"stitch\" }", "func (g *Grid) IsAlive(p Point) (bool, bool) {\n\tg.lock.RLock()\n\tdefer g.lock.RUnlock()\n\tif c, ok := g.set[p]; ok {\n\t\treturn c.Alive, ok\n\t}\n\treturn false, false\n}", "func (l Location) IsCitadel() bool {\n\treturn l.Structure != nil\n}", "func (t *Tile) IsTerminal() bool {\n\tif t.GetSuit().GetSuitType() != SuitTypeSimple {\n\t\treturn false\n\t}\n\treturn t.GetOrdinal() == 0 || t.GetOrdinal() == t.GetSuit().GetSize()-1\n}", "func (me TxsdFeTurbulenceTypeStitchTiles) IsNoStitch() bool { return me.String() == \"noStitch\" }", "func (m NoMDEntries) HasLocationID() bool {\n\treturn m.Has(tag.LocationID)\n}", "func (t *treapNode) isSpanInTreap(s *mspan) bool {\n\tif t == nil {\n\t\treturn false\n\t}\n\treturn t.span == s || t.left.isSpanInTreap(s) || t.right.isSpanInTreap(s)\n}", "func isVirtualTip(bs *HashSet, futureSet *HashSet, anticone *HashSet, children *HashSet) bool {\n\tfor k := range children.GetMap() {\n\t\tif bs.Has(&k) {\n\t\t\treturn false\n\t\t}\n\t\tif !futureSet.Has(&k) && !anticone.Has(&k) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (et EntryType) IsInUse() bool {\n\treturn et&128 > 0\n}", "func (f Forest) IsTreeInLocation(location Location) bool {\n\t_, treeInLocation := f.Trees[location]\n\treturn treeInLocation\n}", "func (p IPPrefix) IsSingleIP() bool { return p.Bits != 0 && p.Bits == p.IP.BitLen() }", "func isTravellable(p robo.Node) bool {\n\t// NB since we're using the absolute value here, we could make the exploration more efficient by only finding 1st quadrant results (i.e. x >= 0, y >= 0)\n\t// \tand multiplying the resulting area by 4 (as the same area would be reflected in all 4 quadrants)\n\treturn getSumOfDigits(abs(p.X))+getSumOfDigits(abs(p.Y)) <= MaxSum\n}", "func (s *sunlightmap) Now() bool {\n\ts.zeitpunkte = []time.Time{time.Now().Local()}\n\ts.visualization = \"static\"\n\treturn true\n}", "func (me TxsdSystemCategory) IsSensor() bool { return me.String() == \"sensor\" }", "func (t Time) Location() *Location {}", "func (i *Info) GetHasLTOp() bool {\n\tif strings.Contains(i.Selectable, \"LT\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func (atlan atlanticTimeZones) St_Helena() string {return \"Atlantic/St_Helena\" }", "func (me TAttlistLocationLabelType) IsFigure() bool { return me.String() == \"figure\" }", "func (india indianaTimeZones) Tell_City() string {return \"America/Indiana/Tell_City\" }", "func (o *W2) HasAllocatedTips() bool {\n\tif o != nil && o.AllocatedTips.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (*InstSIToFP) isInst() {}", "func CheckMapPointForFood(tile *GopherWorldTile) bool {\n\treturn tile.Food != nil && tile.Gopher == nil\n}", "func (w *worker) isTTDReached(header *model.Header) bool {\n\ttd, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty\n\treturn td != nil && ttd != nil && td.Cmp(ttd) >= 0\n}", "func (o *LastBidAsk) HasT() bool {\n\tif o != nil && o.T != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TAttlistOtherIDSource) IsPip() bool { return me.String() == \"PIP\" }", "func isTemp(tls *libc.TLS, N int32) uintptr { /* speedtest1.c:111:19: */\n\tif g.eTemp >= N {\n\t\treturn ts + 2146 /* \" TEMP\" */\n\t}\n\treturn ts + 2152 /* \"\" */\n}", "func (tr *trooper) teleport() bool {\n\tif tr.teleportEnergy >= tr.temax {\n\t\ttr.play(teleportSound)\n\t\ttr.teleportEnergy = 0\n\t\ttr.energyChanged()\n\t\treturn true\n\t}\n\treturn false\n}", "func (d *Device) IsThermostat() bool {\n\treturn bitMasked{Functionbitmask: d.Functionbitmask}.hasMask(64)\n}", "func (me TSAFTPTMovementTaxType) IsNs() bool { return me.String() == \"NS\" }", "func IsAfternoon() bool {\n localTime := time.Now()\n return localTime.Hour() <= 18\n}", "func (ts *triState) Get() interface{} {\n\treturn *ts == setTrue\n}", "func (o *ReservationModel) GetHasCityTaxOk() (*bool, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.HasCityTax, true\n}", "func (nu UnitNameInfo) IsTemplate() bool {\n\treturn len(nu.Template) > 0 && !nu.IsInstance()\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsMarker() bool { return me.String() == \"marker\" }", "func (*OpenconfigPlatform_Components_Component_State_Temperature) IsYANGGoStruct() {}", "func (ggt Globegridtile) ContainsLat(lat float64) bool {\n\treturn (lat >= ggt.min_lat && lat < ggt.max_lat)\n}", "func (pacif pacificTimeZones) Tongatapu() string {return \"Pacific/Tongatapu\" }", "func (me TviewRefreshModeEnumType) IsOnRegion() bool { return me == \"onRegion\" }", "func (jbobject *TaskContext) IsRunningLocally() bool {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"isRunningLocally\", javabind.Boolean)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(bool)\n}", "func (me TactionType) IsStatusNewInfo() bool { return me.String() == \"status-new-info\" }", "func (o *StoragePhysicalDisk) HasLocatorLed() bool {\n\tif o != nil && o.LocatorLed != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isSameNode(a, b *NodeInfo) bool {\n\n\tif a.Addr == b.Addr { //do not check ID now\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TAttlistGeneralNoteOwner) IsNasa() bool { return me.String() == \"NASA\" }", "func (xt XSDTime) Location() *time.Location {\n\tif xt.hasTz {\n\t\treturn xt.innerTime.Location()\n\t}\n\treturn nil\n}", "func (o *TagModelStore) HasLatitude() bool {\n\tif o != nil && o.Latitude.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *connInfo) is0RTT() (ok bool) {\n\tfor _, packet := range c.packets {\n\t\thdr := packet\n\t\tpacketType := logging.PacketTypeFromHeader(&hdr)\n\t\tif packetType == logging.PacketType0RTT {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (me TxsdMovementStatus) IsN() bool { return me.String() == \"N\" }", "func (antar antarcticaTimeZones) Syowa() string {return \"Antarctica/Syowa\" }", "func (r Response) IsLocation() bool {\n\treturn r.isType(TypeLocation)\n}", "func (p Point) Is(p2 Point) bool {\n\treturn p.X == p2.X && p.Y == p2.Y\n}", "func (context *context) IsSOT(t Token) bool {\n\treturn whisper.Token(t.Id) == context.model.ctx.Whisper_token_sot()\n}", "func (lt *LineTask) Loc(prog float64) *Point {\n\tif lt.TaskType == OnDeparture {\n\t\treturn lt.Stay.Pos()\n\t}\n\tif prog < 0.5 && lt.before.TaskType == OnDeparture {\n\t\treturn lt.Moving.Div(2 * prog * prog)\n\t} else if prog > 0.5 && lt.TaskType == OnStopping {\n\t\treturn lt.Moving.Div(-2*prog*prog + 4*prog - 1)\n\t}\n\treturn lt.Moving.Div(prog)\n}", "func (u *RootInfo) IsRootInfo() {}", "func hasPrimaryIP(node *goeapi.Node, name, ip string) (bool, error) {\n\tiface := module.IPInterface(node)\n\tcfg, err := iface.Get(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn cfg.Address() == ip, nil\n}", "func (o *Wireless) HasLastTaskId() bool {\n\tif o != nil && o.LastTaskId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (ameri americaTimeZones) St_Kitts() string {return \"America/St_Kitts\" }", "func (a *_Atom) isTerminal() bool {\n\treturn a.bonds.Count() == 1\n}" ]
[ "0.67006546", "0.66539073", "0.63916194", "0.59491193", "0.56473047", "0.5592531", "0.55731654", "0.55731654", "0.54852706", "0.54088384", "0.54081464", "0.5377661", "0.5375327", "0.5370738", "0.53356165", "0.53268975", "0.5317167", "0.53046083", "0.52147436", "0.51936233", "0.51934934", "0.5154264", "0.5120234", "0.5115398", "0.51015544", "0.50832975", "0.50757897", "0.5040876", "0.5026803", "0.5024056", "0.5009615", "0.49991542", "0.49954978", "0.49775082", "0.4960659", "0.49526188", "0.49489322", "0.4942218", "0.49412292", "0.49356264", "0.49220553", "0.49105734", "0.48904282", "0.4887003", "0.48860922", "0.48851112", "0.48846164", "0.48652443", "0.48628733", "0.48556763", "0.48552498", "0.48131183", "0.4804166", "0.48013696", "0.47805342", "0.47737384", "0.47714496", "0.47714335", "0.47687957", "0.47567722", "0.4749613", "0.4744259", "0.47423133", "0.4735561", "0.47294357", "0.4721036", "0.4715218", "0.47089452", "0.4708297", "0.47060865", "0.47047636", "0.47017363", "0.4697419", "0.4695633", "0.46930647", "0.46911943", "0.469107", "0.46838704", "0.46827748", "0.46770602", "0.4675013", "0.46683416", "0.46679196", "0.46566826", "0.46565783", "0.46510398", "0.4650867", "0.46500838", "0.46457392", "0.46419278", "0.46376863", "0.46342218", "0.46307486", "0.46303523", "0.4625742", "0.4620944", "0.46203682", "0.46180946", "0.4615343", "0.4613991" ]
0.5378452
11
Does the service call at a specific station
func (bf *boardFilter) callsAt(callingPoints []darwind3.CallingPoint, tpls []string) bool { for _, cp := range callingPoints { for _, tpl := range tpls { if tpl == cp.Tiploc { return true } } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *service) GetByStation(stationID int) (*Status, error) {\n\treturn s.adapter.GetByStation(stationID)\n}", "func isStationInJourney(st string, journey timetableRouteJourney) bool {\n\tfor _, call := range journey.Calls.Call {\n\t\tif call.ScheduledStopPointRef.Ref == st {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (bf *boardFilter) atStation(tpl string) bool {\n\tfor _, s := range bf.res.Station {\n\t\tif s == tpl {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (this *BuoyController) Station() {\n\tbuoyBusiness.Station(&this.BaseController, this.GetString(\":stationId\"))\n}", "func (s *Server) Service() string { return \"station\" }", "func TestStationServiceGetStationByID(t *testing.T) {\n\t// case 1\n\tStationRepo.On(\"GetStationByID\", int64(1)).Once().Return(&model.Station{Code: \"123\"}, nil)\n\ttestutil.Play(t, StationSrv, \"GetStationByID\", int64(1)).Match(&model.Station{Code: \"123\"}, nil)\n\n\t// case 2\n\tStationRepo.On(\"GetStationByID\", int64(2)).Once().Return(nil, gorm.ErrRecordNotFound)\n\ttestutil.Play(t, StationSrv, \"GetStationByID\", int64(2)).Match(nil, gorm.ErrRecordNotFound)\n}", "func (c *Client) StationInfo(addr mac.MAC) (*StationInfo, error) {\n\tfor _, iface := range c.interfaces {\n\t\tattrs := []netlink.Attribute{\n\t\t\t{\n\t\t\t\tType: nl80211.AttrMac,\n\t\t\t\tData: addr[:],\n\t\t\t},\n\t\t\tnetlink.Attribute{\n\t\t\t\tType: nl80211.AttrIfindex,\n\t\t\t\tData: nlenc.Uint32Bytes(uint32(iface.Index)),\n\t\t\t},\n\t\t}\n\n\t\tb, err := netlink.MarshalAttributes(attrs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Ask nl80211 to retrieve station info for the interface specified\n\t\t// by its attributes\n\t\treq := genetlink.Message{\n\t\t\tHeader: genetlink.Header{\n\t\t\t\t// From nl80211.h:\n\t\t\t\t// * @NL80211_CMD_GET_STATION: Get station attributes for station identified by\n\t\t\t\t// * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX.\n\t\t\t\tCommand: nl80211.CmdGetStation,\n\t\t\t\tVersion: c.familyVersion,\n\t\t\t},\n\t\t\tData: b,\n\t\t}\n\n\t\tflags := netlink.Request | netlink.Match\n\t\tmsgs, err := c.c.Execute(req, c.familyID, flags)\n\t\tif err != nil || len(msgs) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(msgs) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"unexpected number (%d) of messages received; expected 1\", len(msgs))\n\t\t}\n\n\t\tif err := c.checkMessages(msgs, nl80211.CmdNewStation); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn c.parseStationInfo(msgs[0].Data)\n\t}\n\n\treturn nil, fmt.Errorf(\"station %q not found\", addr)\n}", "func GetStationMonitor(params JourneyParams) (*Journey,error){\n urlString,err:=GetStationMonitorUrlAsString(params) \n\tvar record Journey\n\tlog.Println(urlString)\n\tlog.Println(err)\n\tif err!= nil {\n\t\treturn &record,err\n\t}\n err=makeRequest(urlString,&record)\n\tlog.Println(err)\n\tlog.Println(record)\n\treturn &record, err\n}", "func (g *Garden) CallService(span opentracing.Span, service, action string, request *Request) (int, string, error) {\n\ts := g.cfg.Routes[service]\n\tif len(s) == 0 {\n\t\treturn 404, NotFound, errors.New(\"service not found\")\n\t}\n\troute := s[action]\n\tif len(route.Path) == 0 {\n\t\treturn 404, NotFound, errors.New(\"service route not found\")\n\t}\n\n\t// just gateway can request out route\n\tif strings.ToLower(route.Type) == \"out\" && g.serviceType == 0 {\n\t\treturn 404, NotFound, errors.New(\"just gateway can request out type route\")\n\t}\n\t// gateway can't call rpc route\n\tif strings.ToLower(route.Type) == \"in\" && g.serviceType == 1 {\n\t\treturn 404, NotFound, errors.New(\"gateway can't call in type route\")\n\t}\n\n\tserviceAddr, nodeIndex, err := g.selectServiceHttpAddr(service)\n\tif err != nil {\n\t\treturn 404, NotFound, err\n\t}\n\tvar result string\n\tvar code int\n\turl := \"http://\" + serviceAddr + route.Path\n\n\t// service limiter\n\tif route.Limiter != \"\" {\n\t\tsecond, quantity, err := limiterAnalyze(route.Limiter)\n\t\tif err != nil {\n\t\t\tg.Log(DebugLevel, \"Limiter\", err)\n\t\t} else if !g.limiterInspect(serviceAddr+\"/\"+service+\"/\"+action, second, quantity) {\n\t\t\tspan.SetTag(\"break\", \"service limiter\")\n\t\t\treturn 403, ServerLimiter, errors.New(\"server limiter\")\n\t\t}\n\t}\n\n\t// service fusing\n\tif route.Fusing != \"\" {\n\t\tsecond, quantity, err := g.fusingAnalyze(route.Fusing)\n\t\tif err != nil {\n\t\t\tg.Log(DebugLevel, \"Fusing\", err)\n\t\t} else if !g.fusingInspect(serviceAddr+\"/\"+service+\"/\"+action, second, quantity) {\n\t\t\tspan.SetTag(\"break\", \"service fusing\")\n\t\t\treturn 403, ServerFusing, errors.New(\"server fusing\")\n\t\t}\n\t}\n\n\t// service call retry\n\tretry, err := retryAnalyze(g.cfg.Service.CallRetry)\n\tif err != nil {\n\t\tg.Log(DebugLevel, \"Retry\", err)\n\t\tretry = []int{0}\n\t}\n\n\tfor i, r := range retry {\n\t\tsm := serviceOperate{\n\t\t\toperate: \"incWaiting\",\n\t\t\tserviceName: service,\n\t\t\tnodeIndex: nodeIndex,\n\t\t}\n\t\tg.serviceManager <- sm\n\n\t\tcode, result, err = g.requestService(span, url, request, route.Timeout)\n\n\t\tsm.operate = \"decWaiting\"\n\t\tg.serviceManager <- sm\n\n\t\tif err != nil {\n\t\t\tg.addFusingQuantity(service + \"/\" + action)\n\n\t\t\t// call timeout don't retry\n\t\t\tif strings.Contains(err.Error(), \"Timeout\") {\n\t\t\t\treturn code, Timeout, err\n\t\t\t}\n\n\t\t\t// call 404 don't retry\n\t\t\tif code == 404 {\n\t\t\t\treturn code, NotFound, err\n\t\t\t}\n\n\t\t\tif i == len(retry)-1 {\n\t\t\t\treturn code, ServerError, err\n\t\t\t}\n\t\t\ttime.Sleep(time.Millisecond * time.Duration(r))\n\t\t\tcontinue\n\t\t}\n\n\t\tbreak\n\t}\n\n\treturn code, result, nil\n}", "func callService(from string, wg *sync.WaitGroup) {\n\tresponseData, apiErr := getXeAPIData(from, wg)\n\tif apiErr != nil {\n\t\tlogger.WithField(\"get XE API data error:\", apiErr.Error()).Info(\"Get API data failed\")\n\t\tlog.Panic(apiErr)\n\t}\n\t// prepared query values with parametrs\n\tqueryValues := make([]string, 0, len(responseData.To))\n\tqueryParams := make([]interface{}, 0, len(responseData.To)*5)\n\tfor _, toValue := range responseData.To {\n\t\tqueryValues = append(queryValues, \"(?, ?, ?, ?, ?)\")\n\t\tqueryParams = append(queryParams, responseData.Amount, toValue.Mid, responseData.From, toValue.Quotecurrency, responseData.Timestamp)\n\t}\n\t// update responsedata to db\n\tdbErr := db.UpdateResponseData(responseData, queryValues, queryParams)\n\tif dbErr != nil {\n\t\tlogger.WithField(\"update data to database error:\", apiErr.Error()).Info(\"Updating to database failed\")\n\t\tlog.Panic(apiErr)\n\t}\n\twg.Done()\n}", "func MonitoringStation(m Map) (station Coord, visible []Coord) {\n\tvres := make(chan VisibleResponse, len(m))\n\n\tfor _, ast := range m {\n\t\tgo Visible(m, ast, vres)\n\t}\n\n\ti := 0\n\tmaxlen := 0\n\tfor res := range vres {\n\t\tif maxlen < len(res.Visible) {\n\t\t\tmaxlen = len(res.Visible)\n\t\t\tstation = res.Station\n\t\t\tvisible = res.Visible\n\t\t}\n\n\t\tif i++; i == len(m) {\n\t\t\tclose(vres)\n\t\t}\n\t}\n\n\treturn\n}", "func (c WebsiteClient) StationInfo(code string, destination string) (*t.StationDepartures, error) {\n\tresp, err := getResponse(code, destination)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdoc, err := goquery.NewDocumentFromResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstationName := getStation(doc)\n\tdepartures, err := getDepartures(doc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsd := t.StationDepartures{\n\t\tName: *stationName,\n\t\tDepartures: departures,\n\t}\n\n\treturn &sd, nil\n}", "func (controller *BuoyController) RetrieveStation() {\n\tvar params struct {\n\t\tStationID string `form:\"stationId\" error:\"invalid_station_id\"`\n\t}\n\tif controller.ParseAndValidate(&params) == false {\n\t\treturn\n\t}\n\tbuoyStation, err := buoyService.FindStation(&controller.Service, params.StationID)\n\tif err != nil {\n\t\tlog.CompletedErrorf(err, controller.UserID, \"BuoyController.RetrieveStation\", \"StationID[%s]\", params.StationID)\n\t\tcontroller.ServeError(err)\n\t\treturn\n\t}\n\tcontroller.Data[\"Station\"] = buoyStation\n\tcontroller.Layout = \"\"\n\tcontroller.TplName = \"buoy/modal/pv_station-detail.html\"\n\tview, _ := controller.RenderString()\n\tcontroller.AjaxResponse(0, \"SUCCESS\", view)\n}", "func (c *Client) StationData(ctx context.Context, station string) (*Station, error) {\n\tresp, err := c.fetch(ctx, stationDataEndpoint, map[string]string{\"station\": station})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := struct {\n\t\tXMLName xml.Name `xml:\"STATION\"`\n\t\tStation2Char string `xml:\"STATION_2CHAR\"`\n\t\tStationName string `xml:\"STATIONNAME\"`\n\t\tItems []struct {\n\t\t\tIndex int `xml:\"ITEM_INDEX\"`\n\t\t\tScheduledDepartureDate string `xml:\"SCHED_DEP_DATE\"`\n\t\t\tDestination string `xml:\"DESTINATION\"`\n\t\t\tTrack string `xml:\"TRACK\"`\n\t\t\tLine string `xml:\"LINE\"`\n\t\t\tTrainID string `xml:\"TRAIN_ID\"`\n\t\t\tConnectingTrainID string `xml:\"CONNECTING_TRAIN_ID\"`\n\t\t\tStatus string `xml:\"STATUS\"`\n\t\t\tSecondsLate int `xml:\"SEC_LATE\"`\n\t\t\tLastModified string `xml:\"LAST_MODIFIED\"`\n\t\t\tBackColor string `xml:\"BACKCOLOR\"`\n\t\t\tForeColor string `xml:\"FORECOLOR\"`\n\t\t\tShadowColor string `xml:\"SHADOWCOLOR\"`\n\t\t\tGPSTime string `xml:\"GPSTIME\"`\n\t\t\tLineAbbreviation string `xml:\"LINEABBREVIATION\"`\n\t\t\tInlineMsg string `xml:\"INLINEMSG\"`\n\t\t\tLongitude string `xml:\"GPSLONGITUDE\"`\n\t\t\tLatitude string `xml:\"GPSLATITUDE\"`\n\t\t\tStops []struct {\n\t\t\t\tName string `xml:\"NAME\"`\n\t\t\t\tTime string `xml:\"TIME\"`\n\t\t\t\tDeparted string `xml:\"DEPARTED\"`\n\t\t\t} `xml:\"STOPS>STOP\"`\n\t\t} `xml:\"ITEMS>ITEM\"`\n\t}{}\n\n\terr = xml.Unmarshal(resp, &data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttrains := []StationTrain{}\n\tfor _, r := range data.Items {\n\t\ttID, err := strconv.Atoi(r.TrainID)\n\t\tif err != nil {\n\t\t\t// Skip trains that don't have a numeric ID.\n\t\t\t// These are Amtrak trains with \"A123\" style IDs.\n\t\t\tcontinue\n\t\t}\n\t\ttrain := StationTrain{\n\t\t\tIndex: r.Index,\n\t\t\tDestination: r.Destination,\n\t\t\tTrack: strings.TrimSpace(r.Track),\n\t\t\tLine: r.Line,\n\t\t\tTrainID: tID,\n\t\t\tStatus: strings.TrimSpace(r.Status),\n\t\t\tSecondsLate: time.Duration(r.SecondsLate) * time.Second,\n\t\t\tLineAbbrv: r.LineAbbreviation,\n\t\t\tInlineMsg: strings.TrimSpace(r.InlineMsg),\n\t\t}\n\t\ttrain.ScheduledDepartureDate, _ = parseTime(r.ScheduledDepartureDate)\n\t\ttrain.LatLngTimestamp, _ = parseTime(r.GPSTime)\n\t\ttrain.LatLng, _ = parseLatLng(r.Latitude, r.Longitude)\n\n\t\tstops := make([]StationStop, len(r.Stops))\n\t\tfor j, s := range r.Stops {\n\t\t\tstops[j] = StationStop{Name: strings.TrimSpace(s.Name)}\n\t\t\tstops[j].Time, _ = parseTime(s.Time)\n\t\t\tstops[j].Departed = (s.Departed == \"YES\")\n\t\t}\n\t\ttrain.Stops = stops\n\t\ttrains = append(trains, train)\n\t}\n\n\ts := &Station{ID: data.Station2Char, Name: data.StationName, Departures: trains}\n\treturn s, nil\n}", "func (this *ProxyLite) AvailableServiceAddress(conn *net.TCPConn) (data *modelSDSResponse.SDSResponseInfo) {\n\tif conn == nil {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\terr error\n\t\tr string\n\t)\n\n\tr, err = Json.StructToJsonString(this.request)\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t_, err = conn.Write([]byte(strings.Join([]string{r, \"\\n\"}, \"\")))\n\n\tif err != nil {\n\t\tlog.Println(\"proxy send request failure \\r\\n\", err)\n\t\treturn nil\n\t}\n\n\t//\tlog.Println(\"proxy send request =\\r\\n\", string(r))\n\n\tvar (\n\t\trecv_len int\n\t\tbuff []byte = make([]byte, bufferLimit)\n\t\tresponse []byte\n\t)\n\n\tfor {\n\t\ttime.Sleep(10 * time.Millisecond)\n\n\t\t//\t\tconn.SetReadDeadline(time.Now().Add(this.requestTimeout))\n\t\trecv_len, err = conn.Read(buff)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"proxy recieve response failure \", err)\n\t\t\tresponse = nil\n\t\t\tbreak\n\t\t}\n\n\t\tresponse = append(response, buff[:recv_len]...)\n\n\t\tif recv_len < bufferLimit {\n\t\t\tbreak\n\t\t}\n\t}\n\n\teventData := string(response)\n\n\t//\tlog.Println(\"eventData==>\", eventData)\n\n\tif eventData == \"EOF\" || (response == nil && len(response) <= 0) {\n\t\t//\tif response == nil && len(response) <= 0 {\n\t\treturn nil\n\t}\n\n\terr = Json.JsonbytesToStruct(response, &data)\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn\n}", "func GetStationbyId(r *http.Request, id string) (*Station, error) {\n\t// Does this station id exist?\n\tif station, ok := stationsCache[id]; ok {\n\t\treturn &station, nil\n\t} else {\n\t\treturn nil, errStationNotFound\n\t}\n}", "func (c *Client) Getstationsdata(req *StationDataRequest) (*StationData, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"req is nil\")\n\t}\n\n\tu, err := url.Parse(\"https://api.netatmo.com/api/getstationsdata\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueries := url.Values{}\n\tqueries.Add(\"access_token\", c.token)\n\tif req.DeviceID != nil {\n\t\tqueries.Add(\"device_id\", *req.DeviceID)\n\t}\n\tqueries.Add(\"get_favorites\", strconv.FormatBool(req.GetFavorites))\n\n\tu.RawQuery = queries.Encode()\n\n\tvar d StationData\n\tif err := c.get(u, &d); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &d, nil\n}", "func (a *SchedulesAPI) RequestStationSchedules(orig, date string) (res StationSchedulesResponse, err error) {\n\tparams := initSchedulesRequest(\"stnsched\")\n\tparams.options[\"orig\"] = []string{orig}\n\n\tif date != \"\" {\n\t\tparams.options[\"date\"] = []string{date}\n\t}\n\n\terr = params.requestAPI(a, &res)\n\treturn\n}", "func main() {\n\n\tparam := new(requestInfo)\n\tparam.Command = \"GetWeather\" //set default command\n\tvar connectAddr string\n\tmyscanner := bufio.NewScanner(os.Stdin)\n\tfmt.Println(\"Enter IP address for connection to or press ENTER to connect to server in Localhost\")\n\tmyscanner.Scan()\n\tconnectAddr = myscanner.Text()\n\tconnectAddr += \":7777\"\n\tfmt.Println(\"Trying to connect to\", connectAddr)\n\tconn, err := net.Dial(\"tcp\", connectAddr)\n\tif err != nil {\n\t\tfmt.Println(\"error connection: \", err)\n\t\treturn\n\t} else {\n\t\tfmt.Println(\"Connected success to\", connectAddr)\n\t}\n\tdefer func() {\n\t\tconn.Close()\n\t\tfmt.Println(\"Disconnecting\")\n\t}()\n\n\tfor {\n\n\t\tcontinueRequesting := GetInfoFromServer(conn, *param)\n\t\tif !continueRequesting {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *client) StationInfo(ifi *Interface) ([]*StationInfo, error) {\n\tmsgs, err := c.get(\n\t\tunix.NL80211_CMD_GET_STATION,\n\t\tnetlink.Dump,\n\t\tifi,\n\t\tfunc(ae *netlink.AttributeEncoder) {\n\t\t\tif ifi.HardwareAddr != nil {\n\t\t\t\tae.Bytes(unix.NL80211_ATTR_MAC, ifi.HardwareAddr)\n\t\t\t}\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(msgs) == 0 {\n\t\treturn nil, os.ErrNotExist\n\t}\n\n\tstations := make([]*StationInfo, len(msgs))\n\tfor i := range msgs {\n\t\tif stations[i], err = parseStationInfo(msgs[i].Data); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn stations, nil\n}", "func grabLocation(){\n\t//sends out an immediate request of closest people\n\n}", "func (a *Access) CallService(domain, service string, entityID string) error {\n\tserviceData := struct {\n\t\tEntityID string `json:\"entity_id\"`\n\t}{entityID}\n\n\treturn a.httpPost(\"/api/services/\"+domain+\"/\"+service, serviceData)\n}", "func get_route(params map[string]string) ([]Route,error){\n\t//Retrieve stations\n//\tvar fromStationObjs,toStationObjs []Station\n//\tvar fromStationObj,toStationObj []Station\n//\tvar err error\n//\tvar fromId,toId int\n\n\tvalues := make(url.Values)\n\tif fromStation,ok := params[\"fromStation\"] ; ok {\n\t\t// handle fromStation params\n\t\tfromStationObjs, err := search_stations(fromStation)\n\t\tfromStationObj := fromStationObjs[0]\n\t\tfromId := fromStationObj.Id\n\t\tvalues.Add(\"fromStation\",strconv.Itoa(fromId))\n\t\tif err != nil {\n\t\t\treturn nil,err\n\t\t}\n\t} else if lat,ok := params[\"fromLatitute\"]; ok {\n\t\tif lon, ok := params[\"fromLongitute\"]; ok {\n\t\t\tvalues.Add(\"fromLatitute\",lat)\n\t\t\tvalues.Add(\"fromLongitute\",lon)\n\t\t}\n\t}\n\t// same thing for destination...\n\tif toStation,ok := params[\"toStation\"] ; ok {\n\t\t// handle toStation params\n\t\ttoStationObjs, err := search_stations(toStation)\n\t\ttoStationObj := toStationObjs[0]\n\t\ttoId := toStationObj.Id\n\t\tvalues.Add(\"toStation\",strconv.Itoa(toId))\n\t\tif err != nil {\n\t\t\treturn nil,err\n\t\t}\n\t} else if lat,ok := params[\"toLatitute\"]; ok {\n\t\tif lon, ok := params[\"toLongitute\"]; ok {\n\t\t\tvalues.Add(\"toLatitute\",lat)\n\t\t\tvalues.Add(\"toLongitute\",lon)\n\t\t}\n\t}\n\n\t// construct URL query string\n\tif maxTravelTimeFootwayToStation, ok := params[\"maxTravelTimeFootwayToStation\"]; ok {\n\t\tvalues.Add(\"maxTravelTimeFootwayToStation\",maxTravelTimeFootwayToStation)\n\t}\n\n\tif maxTravelTimeFootwayToDestination, ok := params[\"maxTravelTimeFootwayToDestination\"]; ok {\n\t\tvalues.Add(\"maxTravelTimeFootwayToDestination\",maxTravelTimeFootwayToDestination)\n\t}\n\n\t// then construct the request URLs\n\turl := fmt.Sprintf(\"https://www.mvg.de/fahrinfo/api/routing/?%s\",values.Encode())\n\t// request and get response\n\tjsonString,err := request_mvg(url)\n\t// parse the result\n\t\n\tvar jsonResponse map[string][]Route\n\tif err = json.Unmarshal(jsonString,&jsonResponse); err != nil {\n\t\treturn nil,err\n\t}\n\treturn jsonResponse[\"connectionList\"],nil\n}", "func CheckPlaces(date string, route uint, interval uint, cityFrom uint, cityTo uint, timeFrom uint, timeTo uint) error {\n\tclient := http.Client{\n\t\tTimeout: time.Second * 5,\n\t}\n\n\turl := fmt.Sprintf(urlFormat, route, date, cityFrom, cityTo)\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize buffer for sound (notifications)\n\tbeepBuffer, err := getBeepBuffer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Start check places...\")\n\tlog.Println(\"URL: \", url)\n\tlog.Printf(\"Time: %d - %d\", timeFrom, timeTo)\n\n\tfor {\n\t\tlog.Println(time.Now())\n\t\tres, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer res.Body.Close()\n\n\t\tresp := response{}\n\t\tif err := json.Unmarshal(body, &resp); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := processResponse(resp, timeFrom, timeTo, beepBuffer); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\ttime.Sleep(time.Duration(interval) * time.Second)\n\t}\n}", "func RunStap(uuid string)(data string, err error){\n if ndb.Db == nil {\n logs.Error(\"RunStap -- Can't acces to database\")\n return \"\", errors.New(\"RunStap -- Can't acces to database\")\n }\n err = ndb.GetTokenByUuid(uuid); if err!=nil{logs.Error(\"Error loading node token: %s\",err); return \"\",err}\n ipnid,portnid,err := ndb.ObtainPortIp(uuid)\n if err != nil {\n logs.Error(\"RunStap ERROR Obtaining Port and IP for Add a new server into STAP: \"+err.Error())\n return \"\",err\n }\n data, err = nodeclient.RunStap(ipnid,portnid,uuid)\n if err != nil {\n logs.Error(\"Stap run ERROR: \"+err.Error())\n return \"\",err\n }\n return data,nil\n}", "func (c *CaltrainClient) isForToday(day string, ref string) bool {\n\tweekdays, ok := c.dayService[ref]\n\tif !ok {\n\t\treturn false\n\t}\n\tfor _, d := range weekdays {\n\t\tif d == day {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (client *Client) ServiceStatus(request *ServiceStatusRequest) (response *ServiceStatusResponse, err error) {\nresponse = CreateServiceStatusResponse()\nerr = client.DoAction(request, response)\nreturn\n}", "func ServiceConnect(w http.ResponseWriter, r *http.Request) {\n\t//params := mux.Vars(r)\n\tstate := r.FormValue(\"state\")\n\tif state != \"random\" {\n\t\tfmt.Println(\"someThing went Wrong!!!\")\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t}\n\tcode := r.FormValue(\"code\")\n\ttokenURL, err := oauthConfigs.Exchange(context.Background(), code)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t}\n\tTokenURL = tokenURL\n\t//fmt.Println(tokenURL, \"****************************************************\")\n\tif TokenURL.AccessToken != \"\" {\n\t\tresponse, err := http.Get(\"https://www.googleapis.com/oauth2/v2/userinfo?access_token=\" + TokenURL.AccessToken)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\tdefer response.Body.Close()\n\t\tcontent, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tfmt.Fprintf(w, string(content))\n\t}\n}", "func TestStationConnQuerier(t *testing.T) {\n\tquerier := New(\n\t\t&mockFinder{},\n\t\tranker.CreateRanker(ranker.SimpleRanker, nil),\n\t\t&mockPlaceLocationQuerier{},\n\t\t&topograph.MockConnectivityMap,\n\t\tmockOrigLocation,\n\t\tmockDestLocation,\n\t\t10,\n\t\t30,\n\t)\n\n\t// verify location\n\tlocationCases := []struct {\n\t\tqueryID entity.PlaceID\n\t\texpectLocation *nav.Location\n\t}{\n\t\t{\n\t\t\titeratortype.OrigLocationID,\n\t\t\tmockOrigLocation,\n\t\t},\n\t\t{\n\t\t\titeratortype.DestLocationID,\n\t\t\tmockDestLocation,\n\t\t},\n\t\t{\n\t\t\t1,\n\t\t\tmockStation1Location,\n\t\t},\n\t\t{\n\t\t\t2,\n\t\t\tmockStation2Location,\n\t\t},\n\t\t{\n\t\t\t3,\n\t\t\tmockStation3Location,\n\t\t},\n\t\t{\n\t\t\titeratortype.InvalidPlaceID,\n\t\t\tnil,\n\t\t},\n\t}\n\n\tfor _, c := range locationCases {\n\t\tactualLocation := querier.GetLocation(c.queryID)\n\t\tif !reflect.DeepEqual(actualLocation, c.expectLocation) {\n\t\t\tt.Errorf(\"Incorrect result for place.TopoQuerier.GetLocation, expect %+v but got %+v\\n\", c.expectLocation, actualLocation)\n\t\t}\n\t}\n\n\t// verify connectivity\n\tconnectivityCases := []struct {\n\t\tplaceID entity.PlaceID\n\t\texpectQueryResult []*entity.TransferInfo\n\t}{\n\t\t{\n\t\t\titeratortype.OrigLocationID,\n\t\t\t[]*entity.TransferInfo{\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: 3,\n\t\t\t\t\t\tLocation: mockStation3Location,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 4622.08948420977,\n\t\t\t\t\t\tDuration: 208.2022290184581,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: 2,\n\t\t\t\t\t\tLocation: mockStation2Location,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 4999.134247893073,\n\t\t\t\t\t\tDuration: 225.18622738257085,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: 1,\n\t\t\t\t\t\tLocation: mockStation1Location,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 6310.598332634715,\n\t\t\t\t\t\tDuration: 284.2611861547169,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\titeratortype.DestLocationID,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t1,\n\t\t\t[]*entity.TransferInfo{\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: 2,\n\t\t\t\t\t\tLocation: mockStation2Location,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 1, // hard code value from mock MemoryTopoGraph\n\t\t\t\t\t\tDuration: 1, // hard code value from mock MemoryTopoGraph\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: iteratortype.DestLocationID,\n\t\t\t\t\t\tLocation: mockDestLocation,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 4873.817197753869,\n\t\t\t\t\t\tDuration: 219.54131521413822,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t3,\n\t\t\t[]*entity.TransferInfo{\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: iteratortype.DestLocationID,\n\t\t\t\t\t\tLocation: mockDestLocation,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 7083.8672907090095,\n\t\t\t\t\t\tDuration: 319.0931212031085,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t2,\n\t\t\t[]*entity.TransferInfo{\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: 3,\n\t\t\t\t\t\tLocation: mockStation3Location,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 2, // hard code value from mock MemoryTopoGraph\n\t\t\t\t\t\tDuration: 2, // hard code value from mock MemoryTopoGraph\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPlaceWithLocation: entity.PlaceWithLocation{\n\t\t\t\t\t\tID: iteratortype.DestLocationID,\n\t\t\t\t\t\tLocation: mockDestLocation,\n\t\t\t\t\t},\n\t\t\t\t\tWeight: &entity.Weight{\n\t\t\t\t\t\tDistance: 7277.313067724465,\n\t\t\t\t\t\tDuration: 327.80689494254347,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range connectivityCases {\n\t\tactualQueryResult := querier.GetConnectedPlaces(c.placeID)\n\t\tif !reflect.DeepEqual(actualQueryResult, c.expectQueryResult) {\n\t\t\tfor _, r := range c.expectQueryResult {\n\t\t\t\tfmt.Printf(\"+++ %v, %v, %v, %v, %v\\n\", r.ID, r.Location.Lat, r.Location.Lon, r.Weight.Distance, r.Weight.Duration)\n\t\t\t}\n\n\t\t\tfor _, r := range actualQueryResult {\n\t\t\t\tfmt.Printf(\"+++ %v, %v, %v, %v, %v\\n\", r.ID, r.Location.Lat, r.Location.Lon, r.Weight.Distance, r.Weight.Duration)\n\t\t\t}\n\t\t\tt.Errorf(\"Incorrect result for place.TopoQuerier.GetConnectedPlaces, expect %#v but got %#v\\n\", c.expectQueryResult, actualQueryResult)\n\t\t}\n\t}\n}", "func ServiceUpdate(txn *cheshire.Txn) {\n\tlog.Println(\"Service checkin registered\")\n\trouterTable, ok := Servs.RouterTable(txn.Params().MustString(\"service\", \"\"))\n\tif !ok {\n\t\tcheshire.SendError(txn, 406, \"Service param missing or service not found\")\n\t\treturn\n\t}\n\n\tfor _, e := range routerTable.Entries {\n\t\t_, updatedlocal, updatedremote, err := EntryCheckin(routerTable, e)\n\t\tif err != nil {\n\t\t\tServs.Logger.Printf(\"Error contacting %s -- %s\", e.Id(), err)\n\t\t\tcontinue\n\t\t}\n\t\tif updatedremote {\n\t\t\tServs.Logger.Printf(\"Updated router table on %s\", e.Id())\n\t\t} else if updatedlocal {\n\t\t\tServs.Logger.Printf(\"Updated local router table from %s\", e.Id())\n\n\t\t} else {\n\t\t\tServs.Logger.Printf(\"Router table upto date on %s\", e.Id())\n\t\t}\n\t}\n\t//send a router table update, in case anything changed\n\tres := cheshire.NewResponse(txn)\n\trouterTable, _ = Servs.RouterTable(routerTable.Service)\n\tres.Put(\"router_table\", routerTable.ToDynMap())\n\ttxn.Write(res)\n}", "func (self *discovery) callDiscoveryService(action string, successState bool) error {\n\tlog.Infof(\"[Server] Attempting to %s with the discovery service...\", action)\n\n\tazName, _ := util.GetAwsAZName()\n\tregSize := reg.size()\n\tmachineClass := os.Getenv(\"H2O_MACHINE_CLASS\")\n\n\tendpoints := make([]*register.MultiRequest_Endpoint, regSize)\n\ti := 0\n\tfor _, endpoint := range reg.iterate() {\n\t\tendpoints[i] = &register.MultiRequest_Endpoint{\n\t\t\tName: proto.String(endpoint.Name),\n\t\t\tMean: proto.Int32(endpoint.Mean),\n\t\t\tUpper95: proto.Int32(endpoint.Upper95),\n\t\t\tSubscribe: proto.String(endpoint.Subscribe),\n\t\t}\n\n\t\ti++\n\t}\n\n\tservice := &dscShared.Service{\n\t\tName: proto.String(Name),\n\t\tDescription: proto.String(Description),\n\t\tVersion: proto.Uint64(Version),\n\t\tSource: proto.String(Source),\n\t\tOwnerEmail: proto.String(OwnerEmail),\n\t\tOwnerMobile: proto.String(OwnerMobile),\n\t\tOwnerTeam: proto.String(OwnerTeam),\n\t}\n\n\trequest, err := ScopedRequest(\n\t\t\"com.HailoOSS.kernel.discovery\",\n\t\taction,\n\t\t&register.MultiRequest{\n\t\t\tInstanceId: proto.String(InstanceID),\n\t\t\tHostname: proto.String(self.hostname),\n\t\t\tMachineClass: proto.String(machineClass),\n\t\t\tAzName: proto.String(azName),\n\t\t\tService: service,\n\t\t\tEndpoints: endpoints,\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tlog.Warnf(\"[Server] Failed to build request when %sing services\", action)\n\t\treturn err\n\t}\n\n\t// explicitly define timeout, since we're happy to wait\n\tclientOptions := client.Options{\"retries\": 0, \"timeout\": 5 * time.Second}\n\n\trsp := &register.Response{}\n\tif err := client.Req(request, rsp, clientOptions); err != nil {\n\t\tlog.Warnf(\"[Server] Failed to %s services: %v\", action, err)\n\t\treturn err\n\t}\n\n\t// ok -- all done!\n\tself.connected = successState\n\tlog.Infof(\"[Server] Successfully %sed with the hive mind!\", action)\n\n\treturn nil\n}", "func ServiceAvailable(ctx *Context, url string, timeout time.Duration) bool {\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url)\n\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\tLog(ERROR, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\treturn false\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"code\", resp.StatusCode, \"available\", false)\n\t\treturn false\n\t}\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"available\", true)\n\treturn true\n}", "func searchService(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tquery := r.URL.Query()\n\t//Check bbox parameter\n\tbboxParam := query.Get(\"bbox\")\n\tbbox, errBBox := tools.GetBbox(bboxParam)\n\tif errBBox != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(fmt.Sprintf(`{\"message\": \"bbox have to be well formatted (%s)\"}`, errBBox.Error())))\n\t\treturn\n\t}\n\t//Check threshold parameter\n\taltThresholdParam := query.Get(\"altThresholdFeet\")\n\taltThreshold, errAltThreshold := strconv.Atoi(altThresholdParam)\n\tif errAltThreshold != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(fmt.Sprintf(`{\"message\": \"need a number (%s)\"}`, errAltThreshold.Error())))\n\t\treturn\n\t}\n\n\t//Check time windows parameters\n\tlayout := \"2006-01-02T15:04:05\"\n\tfromTimeStampParam := query.Get(\"fromTimeStamp\")\n\tfromTimeStamp, errFromTimeStamp := time.Parse(layout, fromTimeStampParam)\n\n\tif errFromTimeStamp != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(fmt.Sprintf(`{\"message\": \"need a time with layout (%s) - error: %s\"}`, layout, errFromTimeStamp.Error())))\n\t\treturn\n\t}\n\ttoTimeStampParam := query.Get(\"toTimeStamp\")\n\ttoTimeStamp, errToTimeStamp := time.Parse(layout, toTimeStampParam)\n\n\tif errToTimeStamp != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(fmt.Sprintf(`{\"message\": \"need a time with layout (%s) - error: %s\"}`, layout, errToTimeStamp.Error())))\n\t\treturn\n\t}\n\n\t//call logical for searching in DB\n\tsearchSvc := service.New(log)\n\t//TODO: remove db connection at the starting of the startHttp service, and then pass to search service\n\tdata, errSearch := searchSvc.Search(ctx, *&conf.Flighttracker.Postgres, bbox, altThreshold, fromTimeStamp, toTimeStamp)\n\n\tif errSearch != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(fmt.Sprintf(`{\"message\": \"internal server error (%s)\"}`, errAltThreshold.Error())))\n\t\treturn\n\t}\n\n\tparameters := parameters{\n\t\tBbox: bbox,\n\t\tAltThreshold: altThreshold,\n\t\tFromTimeStampParam: fromTimeStamp,\n\t\tToTimeStampParam: toTimeStamp,\n\t}\n\n\tresponse := response{\n\t\tParameters: parameters,\n\t\tNbFlight: len(data),\n\t\tData: data,\n\t}\n\n\tresult, errJsonMarshal := json.Marshal(response)\n\tif errJsonMarshal != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(fmt.Sprintf(`{\"message\": \"internal server error (%s)\"}`, errJsonMarshal.Error())))\n\t\treturn\n\t}\n\n\tw.Write(result)\n}", "func (l Location) IsStation() bool {\n\treturn l.Station != nil\n}", "func (s *stations) assignWork(conn *Connector, task Task) {\n\trand.Seed(time.Now().UnixNano())\n\tstart := rand.Intn(len(s.stations))\n\tlastIndx := len(s.stations) - 1\n\ti := start\n\n\tdata, err := json.Marshal(task)\n\tif err != nil {\n\t\tlog.Println(\"Error on marshalling task,\", err)\n\t\treturn\n\t}\n\tfor {\n\n\t\tlog.Println(\"Request to \", \"Station.Need.Work.\"+s.stations[i].ID)\n\n\t\tresp, err := conn.nc.Request(\"Station.Need.Work.\"+s.stations[i].ID, data, 500*time.Millisecond)\n\t\tif err == nil {\n\t\t\tlog.Printf(\"Received confirmation request Station.Need.Work task \\n\\t%v\\n\\t Message: %v\", task, string(resp.Data))\n\n\t\t\tif string(resp.Data) == \"accepted\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error on request Station.Need.Work task \\n\\t%v, \\n\\t%v \", task, err)\n\t\t}\n\n\t\ti++\n\n\t\tif i == lastIndx+1 {\n\t\t\ti = 0\n\t\t}\n\n\t\tif i == start {\n\t\t\t// done, went through all stations\n\t\t\tbreak\n\t\t}\n\t}\n}", "func Endpoint(w http.ResponseWriter, r *http.Request) {\n\ttraceID := appdash.NewRootSpanID()\n\tdecoder := json.NewDecoder(r.Body)\n\tvar t []ClientCallInfo\n\terr := decoder.Decode(&t)\n\tif err != nil {\n\t\tlog.Println(\"erooror\", err)\n\t}\n\tstartTime := time.Now()\n\tfor i := 0; i < len(t); i++ {\n\t\te := NewServerEvent()\n\t\te.ServerRecv = startTime\n\t\te.Route = t[i].InitiatorType\n\t\te.User = \"u\"\n\t\te.Response = ResponseInfo{\n\t\t\tStatusCode: 200,\n\t\t\t//Headers: map[string]string{\"Span-Id\": \"0000000000000001/0000000000000002/0000000000000003\"},\n\t\t}\n\t\te.Request = RequestInfo{\n\t\t\tMethod: \"GET\",\n\t\t\tProto: \"HTTP/1.1\",\n\t\t\tURI: t[i].Name,\n\t\t\tHost: \"example.com\",\n\t\t\tHeaders: map[string]string{\"X-Req-Header\": \"a\"},\n\t\t}\n\t\tduration := t[i].EndTime\n\t\tc := int64(duration)\n\t\te.ServerSend = time.Unix(0, ((startTime.UnixNano()/1000000)+c)*1000000)\n\t\ttraceIDto := appdash.NewSpanID(traceID)\n\t\trec := appdash.NewRecorder(traceIDto, collector)\n\t\trec.Name(t[i].Name)\n\t\trec.Event(e)\n\t\trec.Finish()\n\t}\n\t//\ttime.Now() + time.Duration(194.15)*time.Millisecond\n\t// log.Println(\"I am inside Endpoint\", startTime)\n\t// log.Println(\"I am inside Endpoint\", endTime)\n}", "func (c *Client) StationList(ctx context.Context) ([]Station, error) {\n\tresp, err := c.fetch(ctx, stationListEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := struct {\n\t\tXMLName xml.Name `xml:\"STATIONS\"`\n\t\tStation []struct {\n\t\t\tName string `xml:\"STATIONNAME\"`\n\t\t\tStation2Char string `xml:\"STATION_2CHAR\"`\n\t\t} `xml:\"STATION\"`\n\t}{}\n\n\terr = xml.Unmarshal(resp, &data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstations := []Station{}\n\tfor _, r := range data.Station {\n\t\tstations = append(stations, Station{\n\t\t\tName: strings.TrimSpace(r.Name),\n\t\t\tID: r.Station2Char,\n\t\t\tAliases: extraStations[r.Station2Char],\n\t\t})\n\t}\n\treturn stations, nil\n}", "func (c *CaltrainClient) getTimetableForStation(stationCode string, dir Direction, day time.Weekday) ([]timetableRouteJourney, error) {\n\tallJourneys := []timetableRouteJourney{}\n\n\tweekday := strings.ToLower(day.String())\n\n\tfor lineId, ttArray := range c.timetable {\n\t\tline, err := c.getLine(lineId)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"failed to get line!\")\n\t\t\tline = Line{Id: \"unknown\", Name: \"Unknown\"}\n\t\t} else {\n\t\t\tlogrus.Debugf(\"getting line %s-%s for station code %s...\", line.Id, line.Name, stationCode)\n\t\t}\n\t\tfor _, frame := range ttArray {\n\t\t\t// Check the day reference\n\t\t\tif !c.isForToday(weekday, frame.FrameValidityConditions.AvailabilityCondition.DayTypes.DayTypeRef.Ref) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Checkc the direction\n\t\t\tif !isMyDirection(frame.Name, dir) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// loop through all journeys in this frame\n\t\t\tjourneys := frame.VehicleJourneys.TimetableRouteJourney\n\t\t\tfor _, journey := range journeys {\n\t\t\t\tif isStationInJourney(stationCode, journey) {\n\t\t\t\t\tjourney.Line = line.Name\n\t\t\t\t\tallJourneys = append(allJourneys, journey)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn allJourneys, nil\n}", "func (c *Client) Get(ctx context.Context, p *GetPayload) (res *StationFull, err error) {\n\tvar ires interface{}\n\tires, err = c.GetEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(*StationFull), nil\n}", "func (c *Client) parseStationInfo(b []byte) (*StationInfo, error) {\n\tattrs, err := netlink.UnmarshalAttributes(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar info StationInfo\n\tfor _, a := range attrs {\n\t\tswitch a.Type {\n\t\tcase nl80211.AttrMac:\n\t\t\tcopy(info.HardwareAddr[:], a.Data)\n\n\t\tcase nl80211.AttrStaInfo:\n\t\t\tnattrs, err := netlink.UnmarshalAttributes(a.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err := (&info).parseAttributes(nattrs); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// nl80211.AttrStaInfo is last attribute we are interested in\n\t\t\treturn &info, nil\n\n\t\tcase nl80211.AttrIfindex:\n\t\t\tifaceIndex := int(nlenc.Uint32(a.Data))\n\t\t\tiface, ok := c.interfaces[ifaceIndex]\n\t\t\tif !ok {\n\t\t\t\tiface.Index = ifaceIndex\n\t\t\t}\n\t\t\tinfo.Iface = *iface\n\n\t\tdefault:\n\t\t\t// The other attributes that are returned here: nl80211.AttrGeneration\n\t\t\t// No need to parse them for now.\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// No station info found\n\treturn nil, os.ErrNotExist\n}", "func (c *CaltrainClient) getTrainRoutesBetweenStations(src, dst Station, day time.Weekday) ([]timetableRouteJourney, error) {\n\tsCode, dCode, err := c.getRouteCodes(src, dst)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get station codes: %w\", err)\n\t}\n\n\tweekday := strings.ToLower(day.String())\n\n\troutes := []timetableRouteJourney{}\n\tfor line, ttArray := range c.timetable {\n\t\tfor _, frame := range ttArray {\n\t\t\t// Check the day reference\n\t\t\tif !c.isForToday(weekday, frame.FrameValidityConditions.AvailabilityCondition.DayTypes.DayTypeRef.Ref) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tjourneys := frame.VehicleJourneys.TimetableRouteJourney\n\t\t\tfor _, journey := range journeys {\n\t\t\t\tif areStationsInJourney([]string{sCode, dCode}, journey) {\n\t\t\t\t\tjourney.Line = line\n\t\t\t\t\troutes = append(routes, journey)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn routes, nil\n}", "func getStation(stationID []int64) map[int64]*WaterQualityCompareDatetimeOutputStation {\n\n\tp := []interface{}{}\n\tq := \"SELECT id, waterquality_station_name FROM m_waterquality_station WHERE \"\n\tfor i, v := range stationID {\n\t\tif i > 0 {\n\t\t\tq += \" OR id=$\" + strconv.Itoa(i+1)\n\t\t} else {\n\t\t\tq += \"id=$\" + strconv.Itoa(i+1)\n\t\t}\n\t\tp = append(p, v)\n\t}\n\n\tdb, err := pqx.Open()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\trows, err := db.Query(q, p...)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tdefer rows.Close()\n\tmapCompare := make(map[int64]*WaterQualityCompareDatetimeOutputStation)\n\n\tfor rows.Next() {\n\t\tvar (\n\t\t\tstationID sql.NullInt64\n\t\t\tstationName pqx.JSONRaw\n\t\t)\n\t\tdd := &WaterQualityCompareDatetimeOutputStation{}\n\t\trows.Scan(&stationID, &stationName)\n\t\tdd.Station = stationName.JSON()\n\t\tmapCompare[stationID.Int64] = dd\n\t}\n\n\treturn mapCompare\n}", "func (s *SmartContract) query(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n\tprov, _ := APIstub.GetState(args[0])\n\treturn shim.Success(prov)\n}", "func (config StickerConfig) method() string {\n\treturn \"sendSticker\"\n}", "func RunStapServer(uuid string, server string)(data string, err error){\n if ndb.Db == nil {\n logs.Error(\"RunStapServer -- Can't acces to database\")\n return \"\", errors.New(\"RunStapServer -- Can't acces to database\")\n }\n err = ndb.GetTokenByUuid(uuid); if err!=nil{logs.Error(\"Error loading node token: %s\",err); return \"\",err}\n ipnid,portnid,err := ndb.ObtainPortIp(uuid)\n if err != nil {\n logs.Error(\"RunStapServer ERROR Obtaining Port and IP for Add a new server into STAP: \"+err.Error())\n return \"\",err\n }\n data, err = nodeclient.RunStapServer(ipnid,portnid,server)\n if err != nil {\n logs.Error(\"RunStapServer ERROR: \"+err.Error())\n return \"\",err\n }\n return data,nil\n}", "func Run(res http.ResponseWriter, req *http.Request) {\n\tif val, ok := serviceMap[strings.Split(req.Host, \":\")[0]]; ok {\n\t\tif val.runAction(res, req) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif val, ok := serviceMap[\"*\"]; ok {\n\t\tif val.runAction(res, req) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tres.WriteHeader(http.StatusBadGateway)\n\tfmt.Fprint(res, \"BAD GATEWAY\")\n}", "func (tm *ServiceTracerouteManager) Run() {\n\t//Multiplexing of data between the running ServiceTraceroutes and external process\n\tfor {\n\t\tselect {\n\t\tcase <-tm.StopChan:\n\t\t\treturn\n\n\t\t//For TCP and UDP the logic is the same:\n\t\t//1 - Get a new packet\n\t\t//2 - Check if the packet is associated to a specific running traceroute\n\t\t//2.1 - If yes, send the packet to the traceroute and go to step 1\n\t\t//3 - If not, check if the packet can be associated with a traceroute waiting for the application flow (missing local port)\n\t\t//3.1 - If yes, redirect the packet to the traceroute, change the flow ID of the traceroute and go to step 1\n\t\t//4 - If there are no running or waiting traceroutes, check if the remote IP of the packet is inside an application flow to be studied (service)\n\t\t//4.1 - If yes, try to start a new traceroute (it depends on the checks done before starting the traceroute) and then go to step 1\n\t\t//5 - If not, drop the packet and go to step 1\n\t\tcase tcpPacket := <-tm.TCPChan:\n\t\t\tip1, port1, ip2, port2, err := tm.GetFlowIDFromTCPPacket(&tcpPacket)\n\n\t\t\tif err != nil {\n\t\t\t\t//ERROR: skip packet\n\t\t\t\tif tm.Configuration.Verbose {\n\t\t\t\t\ttm.OutChan <- err.Error()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tst := tm.GetTracerouteFromFlowID(ip1, port1, ip2, port2)\n\n\t\t\t//If no traceroute have the target flow id\n\t\t\t//Assign it to one with same destination and port\n\t\t\tif st == nil {\n\t\t\t\tst = tm.AssignFlowIDToTraceroute(ip1, port1, ip2, port2)\n\t\t\t}\n\n\t\t\t//Redirect packet\n\t\t\tif st != nil {\n\t\t\t\tst.SniffChannel <- &tcpPacket\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !tm.Configuration.DNSResolver {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar services []string\n\t\t\tvar ipresolutions []string\n\t\t\tdstIp := ip1\n\t\t\tdstPort := port1\n\t\t\tsrcPort := port2\n\t\t\t//No traceroute active with this flow ID\n\t\t\t//Try to resolve IP\n\t\t\tif res, err := tm.DNS.ResolveIP(ip1); err == nil {\n\t\t\t\tservices = res.Names\n\t\t\t\tipresolutions = res.IPResolutions\n\t\t\t\tdstIp = ip1\n\t\t\t\tdstPort = port1\n\t\t\t\tsrcPort = port2\n\t\t\t} else if res, err := tm.DNS.ResolveIP(ip2); err == nil {\n\t\t\t\tservices = res.Names\n\t\t\t\tipresolutions = res.IPResolutions\n\t\t\t\tdstIp = ip2\n\t\t\t\tdstPort = port2\n\t\t\t\tsrcPort = port1\n\t\t\t} else {\n\t\t\t\t//no resolver detected\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttm.StartServiceTraceroute(Tcp, services, ipresolutions, dstIp, dstPort, srcPort)\n\n\t\tcase udpPacket := <-tm.UDPChan:\n\t\t\tip1, port1, ip2, port2, err := tm.GetFlowIDFromUDPPacket(&udpPacket)\n\n\t\t\tif err != nil {\n\t\t\t\t//ERROR: skip packet\n\t\t\t\tif tm.Configuration.Verbose {\n\t\t\t\t\ttm.OutChan <- err.Error()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tst := tm.GetTracerouteFromFlowID(ip1, port1, ip2, port2)\n\n\t\t\t//If no traceroute have the target flow id\n\t\t\t//Assign it to one with same destination and port\n\t\t\tif st == nil {\n\t\t\t\tst = tm.AssignFlowIDToTraceroute(ip1, port1, ip2, port2)\n\t\t\t}\n\n\t\t\t//Redirect packet\n\t\t\tif st != nil {\n\t\t\t\tst.SniffChannel <- &udpPacket\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !tm.Configuration.DNSResolver {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar services []string\n\t\t\tvar ipresolutions []string\n\t\t\tdstIp := ip1\n\t\t\tdstPort := port1\n\t\t\tsrcPort := port2\n\t\t\t//No traceroute active with this flow ID\n\t\t\t//Try to resolve IP\n\t\t\tif res, err := tm.DNS.ResolveIP(ip1); err == nil {\n\t\t\t\tservices = res.Names\n\t\t\t\tipresolutions = res.IPResolutions\n\t\t\t\tdstIp = ip1\n\t\t\t\tdstPort = port1\n\t\t\t\tsrcPort = port2\n\t\t\t} else if res, err := tm.DNS.ResolveIP(ip2); err == nil {\n\t\t\t\tservices = res.Names\n\t\t\t\tipresolutions = res.IPResolutions\n\t\t\t\tdstIp = ip2\n\t\t\t\tdstPort = port2\n\t\t\t\tsrcPort = port1\n\t\t\t} else {\n\t\t\t\t//no resolver detected\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttm.StartServiceTraceroute(Udp, services, ipresolutions, dstIp, dstPort, srcPort)\n\n\t\t//For ICMP, obtain the flow id of the dropped packet\n\t\t//If no traceroutes are associated to the flow id, drop the packet\n\t\tcase icmpPacket := <-tm.ICMPChan:\n\t\t\tdstIp, dstPort, srcIp, srcPort, err := tm.GetFlowIDFromICMPPacket(&icmpPacket)\n\n\t\t\tif err != nil {\n\t\t\t\t//ERROR: skip packet\n\t\t\t\tif tm.Configuration.Verbose {\n\t\t\t\t\ttm.OutChan <- err.Error()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tst := tm.GetTracerouteFromFlowID(dstIp, dstPort, srcIp, srcPort)\n\n\t\t\tif st == nil || st.Configuration.RemoteIP.String() != dstIp.String() {\n\t\t\t\t//ERROR: skip packet\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tst.SniffChannel <- &icmpPacket\n\t\t}\n\t}\n}", "func (c *Client) Update(ctx context.Context, p *UpdatePayload) (res *StationFull, err error) {\n\tvar ires interface{}\n\tires, err = c.UpdateEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(*StationFull), nil\n}", "func (qb *HuxQueryBuilder) QueryStation(station string) *HuxQueryBuilder {\n\tqb.queryStation = station\n\treturn qb\n}", "func (s *StreamService) StreamCall(srv pb.Control_StreamCallServer) error {\n var driverId int32\n var seq int32\n if name, err := srv.Recv(); err != nil {\n fmt.Printf(\"Recv From Driver err: %v\", err)\n return err\n } else {\n fmt.Printf(\"Driver driverId[%v] login\", name.DriverId)\n driverId = name.DriverId\n seq = name.Seq\n }\n\n if fmtinStatus[driverId] == true {\n fmt.Printf(\"Driver driverId[%v] AlReady login\", driverId)\n return status.Errorf(codes.AlreadyExists, \"AlReady fmtin!\")\n }\n\n fmtinStatus[driverId] = true\n defer func(){fmtinStatus[driverId] = false}()\n\n for {\n var val string\n select {\n case val = <- chans[driverId].ch:\n fmt.Printf(\"Driver driverId[%v] Get Action [%s]!\", driverId, val)\n case <-time.After(3 * time.Second):\n fmt.Printf(\"Driver driverId[%v] Timeout And Continue!\", driverId)\n err := srv.Send(&pb.Response{\n DriverId: driverId,\n Seq: seq,\n Ping: \"PING\",\n })\n \n if err != nil {\n fmt.Printf(\"Clinet err: %v\", err)\n return err\n }\n\n continue\n }\n\n err := srv.Send(&pb.Response{\n DriverId: driverId,\n Seq: seq,\n Data: \"Driver Do Action: \" + val ,\n })\n\n if err != nil {\n fmt.Printf(\"Clinet err: %v\", err)\n return err\n }\n\n res, err := srv.Recv()\n if err != nil {\n fmt.Printf(\"Recv From Clinet err: %v\", err)\n return err\n }\n\n if seq != res.Seq {\n fmt.Printf(\"Seq %d != %d \", seq, res.Seq)\n }\n\n seq++\n fmt.Printf(\"Recv From Driver: %v\", res)\n }\n}", "func checkrequestStatus(d *schema.ResourceData, config Config, requestID string, timeOut int) error {\n\ttimeout := time.After(time.Duration(timeOut) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tstatus, state, err := checkServiceRequestStatus(config, requestID)\n\t\t\tif err == nil {\n\t\t\t\tif state == \"finished\" && status == \"Ok\" {\n\t\t\t\t\tlog.Println(\"[DEBUG] Service order added SUCCESSFULLY\")\n\t\t\t\t\td.SetId(requestID)\n\t\t\t\t\treturn nil\n\t\t\t\t} else if status == \"Error\" {\n\t\t\t\t\tlog.Println(\"[ERROR] Failed\")\n\t\t\t\t\treturn fmt.Errorf(\"[Error] Failed execution\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"[DEBUG] Request state is :\", state)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tlog.Println(\"[DEBUG] Timeout occured\")\n\t\t\treturn fmt.Errorf(\"[ERROR] Timeout\")\n\t\t}\n\t}\n}", "func (s *SmartContract) requestConsultant(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n\n\tif len(args) != 15 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 15\")\n\t}\n\n\tvar sow = SOW{\n\t\tDateCreated: args[1],\n\t\tTermStartDate: args[2],\n\t\tTermEndDate: args[3],\n\t\tRequireFullTime: args[4],\n\t\tRatePerHour: args[5],\n\t\tStatus: args[6],\n\t\tClientId: args[7],\n\t\tName: args[8],\n\t\tConsultantId: args[9],\n\t\tSOWId: args[10],\n\t\tDescription: args[11],\n\t\tRequirement1: args[12],\n\t\tRequirement2: args[13],\n\t\tRequirement3: args[14],\n\t}\n\n\tsowAsBytes, _ := json.Marshal(sow)\n\terr := APIstub.PutState(args[0], sowAsBytes)\n\n\tif err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Failed to request consultant: %s\", args[0]))\n\t}\n\n\treturn shim.Success(sowAsBytes)\n}", "func (bf *boardFilter) acceptService(service ldb.Service) bool {\n\t// Original requirement, must have an RID\n\tif service.RID == \"\" {\n\t\treturn false\n\t}\n\n\t// remove terminating services\n\tif bf.terminated && bf.atStation(service.Destination) {\n\t\treturn false\n\t}\n\n\tif bf.callAt && !bf.callsAt(service.CallingPoints, bf.callAtTiplocs) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func StopStap(uuid string)(data string, err error){\n if ndb.Db == nil {\n logs.Error(\"StopStap -- Can't acces to database\")\n return \"\", errors.New(\"StopStap -- Can't acces to database\")\n }\n err = ndb.GetTokenByUuid(uuid); if err!=nil{logs.Error(\"Error loading node token: %s\",err); return \"\",err}\n ipnid,portnid,err := ndb.ObtainPortIp(uuid)\n if err != nil {\n logs.Error(\"StopStap ERROR Obtaining Port and IP for Add a new server into STAP: \"+err.Error())\n return \"\",err\n }\n data, err = nodeclient.StopStap(ipnid,portnid,uuid)\n if err != nil {\n logs.Error(\"Stap stop ERROR: \"+err.Error())\n return \"\",err\n }\n return data,nil\n}", "func DoCall(url string) (SPres, error) {\n\tdata := SPres{}\n\trepo, err := sparql.NewRepo(\"http://rwgsparql:9999/blazegraph/namespace/ecrwg/sparql\")\n\tif err != nil {\n\t\tlog.Printf(\"query make repo: %v\\n\", err)\n\t\treturn data, err\n\t}\n\n\tf := bytes.NewBufferString(queries)\n\tbank := sparql.LoadBank(f)\n\n\tq, err := bank.Prepare(\"orgInfo\", struct{ URL string }{url})\n\tif err != nil {\n\t\tlog.Printf(\"query bank prepair: %v\\n\", err)\n\t\treturn data, err\n\t}\n\n\tres, err := repo.Query(q)\n\tif err != nil {\n\t\tlog.Printf(\"query call: %v\\n\", err)\n\t\treturn data, err\n\t}\n\n\tbindingsTest2 := res.Bindings() // map[string][]rdf.Term\n\n\t// This whole aspect seems verbose... there has to be a better Go way to do this check?\n\tdata.Description = \"No description provided by facility\"\n\tif len(bindingsTest2) > 0 {\n\t\tdata.Repository = bindingsTest2[\"repository\"][0].String()\n\t\tif len(bindingsTest2[\"description\"]) > 0 {\n\t\t\tdata.Description = bindingsTest2[\"description\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"name\"]) > 0 {\n\t\t\tdata.Name = bindingsTest2[\"name\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"url\"]) > 0 {\n\t\t\tdata.URL = bindingsTest2[\"url\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"logo\"]) > 0 {\n\t\t\tdata.Logo = bindingsTest2[\"logo\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_name\"]) > 0 {\n\t\t\tdata.ContactName = bindingsTest2[\"contact_name\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_email\"]) > 0 {\n\t\t\tdata.ContactEmail = bindingsTest2[\"contact_email\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_url\"]) > 0 {\n\t\t\tdata.ContactURL = bindingsTest2[\"contact_url\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_role\"]) > 0 {\n\t\t\tdata.ContactRole = bindingsTest2[\"contact_role\"][0].String()\n\t\t}\n\t}\n\n\treturn data, err\n}", "func (self *SinglePad) PollStatus() {\n self.Object.Call(\"pollStatus\")\n}", "func (s *svc) Solicit() error {\n\t// list all the routes\n\troutes, err := s.table.List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// build events to advertise\n\tevents := make([]*router.Event, len(routes))\n\tfor i := range events {\n\t\tevents[i] = &router.Event{\n\t\t\tType: router.Update,\n\t\t\tTimestamp: time.Now(),\n\t\t\tRoute: routes[i],\n\t\t}\n\t}\n\n\tadvert := &router.Advert{\n\t\tId: s.opts.Id,\n\t\tType: router.RouteUpdate,\n\t\tTimestamp: time.Now(),\n\t\tTTL: time.Duration(router.DefaultAdvertTTL),\n\t\tEvents: events,\n\t}\n\n\tselect {\n\tcase s.advertChan <- advert:\n\tcase <-s.exit:\n\t\tclose(s.advertChan)\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func (f RouterFunc) Run(cfg config.ServiceConfig) { f(cfg) }", "func (uem *UyuniEventMapper) scall(function string, args ...interface{}) interface{} {\n\tvar res interface{}\n\n\tif uem._session == \"\" {\n\t\tlog.Println(\"SCALL has no session:\", uem._session)\n\t\tuem.auth()\n\t}\n\n\t_args := []interface{}{uem._session}\n\t_args = append(_args, args...)\n\n\tres, err := uem.call(function, _args...)\n\trecall := err != nil\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\tif recall {\n\t\tres, err = uem.call(function, _args...)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"XML-RPC crash:\", err.Error())\n\t\t}\n\t}\n\n\treturn res\n}", "func (al *AccessLayer) GetTimesForStations(from, to, now string, numTimes int) ([]*m.Time, error) {\n\tvar times []*m.Time\n\tt := newRTDTime(now)\n\t// NOTE: Day might be something that's passed in\n\tday := al.getServiceIDFromDay(0)\n\trows, err := al.getStationTimes(from, to, t.toStringRTDTime(), day, numTimes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttimes = append(times, parseStationTimeRows(rows)...)\n\n\tif len(times) < numTimes {\n\t\tday = al.getServiceIDFromDay(24 * time.Hour)\n\t\trows, err := al.getStationTimes(from, to, t.toStringNextDay(), day, numTimes-len(times))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttimes = append(times, parseStationTimeRows(rows)...)\n\n\t}\n\n\tif len(times) < numTimes {\n\t\tal.logger.Warningf(fmt.Sprintf(\"%d times requested, only %d provided\", numTimes, len(times)))\n\t}\n\treturn times, nil\n}", "func (s Server) ListStations(req *nb.StationListRequest, stream nb.C1InterfaceService_ListStationsServer) error {\n\tif req.Subscribe {\n\t\treturn fmt.Errorf(\"subscribe not yet implemented\")\n\t}\n\n\tif req.Ecgi == nil {\n\t\tch := make(chan sb.ControlUpdate)\n\t\tif req.Subscribe {\n\t\t\tif err := manager.GetManager().SubscribeControlUpdates(ch); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tif err := manager.GetManager().ListControlUpdates(ch); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfor update := range ch {\n\t\t\tswitch update.GetMessageType() {\n\t\t\tcase sb.MessageType_CELL_CONFIG_REPORT:\n\t\t\t\tcellConfigReport := update.GetCellConfigReport()\n\t\t\t\tecgi := nb.ECGI{\n\t\t\t\t\tEcid: cellConfigReport.GetEcgi().GetEcid(),\n\t\t\t\t\tPlmnid: cellConfigReport.GetEcgi().GetPlmnId(),\n\t\t\t\t}\n\t\t\t\tbaseStationInfo := nb.StationInfo{\n\t\t\t\t\tEcgi: &ecgi,\n\t\t\t\t}\n\t\t\t\tbaseStationInfo.MaxNumConnectedUes = cellConfigReport.GetMaxNumConnectedUes()\n\t\t\t\tif err := stream.Send(&baseStationInfo); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"list stations for specific ecgi not yet implemented\")\n\t}\n\treturn nil\n}", "func SuriRunning() (running bool) {\n var err error\n cmd, err := utils.GetKeyValueString(\"suriRunning\", \"cmd\")\n if err != nil {\n logs.Error(\"suriRunning Error getting data from main.conf\")\n }\n param, err := utils.GetKeyValueString(\"suriRunning\", \"param\")\n if err != nil {\n logs.Error(\"suriRunning Error getting data from main.conf\")\n }\n command, err := utils.GetKeyValueString(\"suriRunning\", \"command\")\n if err != nil {\n logs.Error(\"suriRunning Error getting data from main.conf\")\n }\n\n out, err := exec.Command(command, param, cmd).Output()\n if err == nil {\n // if strings.Contains(string(out), \"suricata\") {\n spid := regexp.MustCompile(\"[0-9]+\")\n pid := spid.FindAllString(string(out), 1)\n if len(pid) <= 0 || pid == nil {\n return false\n }\n logs.Info(\"Suricata PID -> %s\", pid[0])\n return true\n // }\n }\n logs.Error(\"Suricata isn't running \" + string(out))\n return false\n}", "func main() {\n\n\tfmt.Println(\"Hello\")\n\tworkingmyassoff.Working(\"SId\")\n\n\tconst name string = \"Sid\"\n\n\tworkingmyassoff.IsthisWorking(name == \"Sid\")\n\n\tworkingmyassoff.IsthisWorking(name == \"Owl\")\n}", "func GetStatusSniffer(uuid string) (running bool, status bool) {\n owlh, err := ndb.GetStapServerInformation(uuid)\n if err != nil {\n logs.Error(\"Error retrieving stap server information\")\n }\n logs.Info(\"Checking Sniffer status for uuid: \" + uuid)\n\n running, pid, cpu, mem := GetStatusSnifferSSH(uuid)\n cpuStatus := GetStatusCPU(owlh, cpu, uuid)\n memStatus := GetStatusMEM(owlh, mem, uuid)\n storageStatus := GetStatusStorage(owlh, uuid)\n\n logs.Alert(\"Checking \" + owlh[\"name\"] + \" - \" + owlh[\"ip\"] + \" - PID:\" + pid + \" CPU: \" + strconv.FormatBool(cpuStatus) + \" MEM: \" + strconv.FormatBool(memStatus) + \" STORAGE: \" + strconv.FormatBool(storageStatus))\n if cpuStatus && memStatus && storageStatus {\n return running, true\n }\n return running, false\n}", "func (c DeviceManager) Run() {\n\tactive := false\n\tmanual := false\n\tstatus := false\n\tsensorTemp, err := tempsensor.GetTemp()\n\ttempsensor.CheckError(err)\n\tif err == nil {\n\t\tactive, manual = c.evaluateState(sensorTemp, calderadevice.CalderaActive)\n\t}\n\n\terr = calderadevice.SetState(active)\n\tcalderadevice.CheckError(err)\n\tcalderaTemp, err := calderadevice.GetTemp()\n\tcalderadevice.CheckError(err)\n\n\tstatus = !calderadevice.CalderaError && !tempsensor.SensorError\n\n\tif calderadevice.CalderaActive != active {\n\t\tlog.Println(\"devicemanager:: new caldera device state \", active)\n\t\tnotifier.NotifyCalderaState(active, manual)\n\t\tcalderadevice.CalderaActive = active\n\t}\n\n\tif int(time.Now().Unix())-lastExternalWeatherRequest > externalWeatherPeriod {\n\t\tresp, err := http.Get(\"https://api.openweathermap.org/data/2.5/weather?units=metric&q=Fuenlabrada,es&appid=62b6faef972916a25c2420b17af38d40\")\n\t\tif err == nil {\n\t\t\tlastExternalWeatherRequest = int(time.Now().Unix())\n\t\t\tvar info map[string]interface{}\n\t\t\tjson.NewDecoder(resp.Body).Decode(&info)\n\t\t\tmain := info[\"main\"].(map[string]interface{})\n\t\t\tLastExternalWeatherTemp = main[\"temp\"].(float64)\n\t\t} else {\n\t\t\tlog.Println(\"devicemanager:: cannot get external temp\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t}\n\te := msgbroker.NewEvent(sensorTemp, calderaTemp, LastExternalWeatherTemp, status, active, manual)\n\tmsgbroker.Publish(e)\n\tled.Update(e)\n}", "func (o *OnCall) WhoIsOnCall() string {\n\tt := time.Now()\n\tyear, month, day := t.Date()\n\tt1 := time.Date(year, month, day, o.OffShiftStart, 0, 0, 0, t.Location())\n\tt2 := time.Date(year, month, day, o.OffShiftStop, 0, 0, 0, t.Location())\n\n\tonDuty := o.DefaultOnDuty\n\n\t// NightShift == DefaultOnDuty\n\tif inTimeSpan(t1, t2, t) {\n\t\treturn onDuty\n\t}\n\n\tres, err := http.Get(o.SheetsURL + o.SheetID + \"/export?&exportFormat=csv&&gid=0\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t\treturn onDuty\n\t}\n\tdefer res.Body.Close()\n\n\treader := csv.NewReader(res.Body)\n\tcsvData, err := reader.ReadAll()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t\treturn onDuty\n\t}\n\n\tfor _, row := range csvData {\n\t\tif strings.EqualFold(row[2], \"oncall\") {\n\t\t\tonDuty = row[1]\n\t\t}\n\t}\n\n\treturn onDuty\n}", "func (g *Graph) GetStationByID(id int) *Station {\n\tif st, ok := g.Nodes[id]; ok {\n\t\treturn st\n\t}\n\n\treturn nil\n}", "func TestStationServiceGetNextStationCode(t *testing.T) {\n\t// case 1\n\tStationRepo.On(\"GetNextSeq\").Once().Return(\"1\", nil)\n\tres1, res2 := StationSrv.getNextStationCode()\n\ttestutil.PlayUnexported(t, res1, res2).Match(\"S00001\", nil)\n\n\t// case 2\n\tStationRepo.On(\"GetNextSeq\").Once().Return(\"\", errors.New(\"fuck\"))\n\tres1, res2 = StationSrv.getNextStationCode()\n\ttestutil.PlayUnexported(t, res1, res2).Match(\"\", errors.New(\"fuck\"))\n}", "func onGetVisits(w http.ResponseWriter, r *http.Request) {\n\tvalues := r.URL.Query()\n\tshortURL, ok := values[\"shortURL\"]\n\tkey, ok1 := values[\"key\"]\n\tuserName, ok2 := values[\"userName\"]\n\tif (ok1 == true && ok2 == true) && (len(key) >= 1) && (len(userName) >= 1) { // validating values\n\t\tif ValidateAPIKey(key[0], userName[0]) == false {\n\t\t\tresponseToCLient(w, \"Wrong or expired key\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tresponseToCLient(w, \"Please check the request parameters\")\n\t\treturn\n\t}\n\tif ok {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif len(shortURL) >= 1 {\n\t\t\tcorrectURL, err := url.ParseRequestURI(shortURL[0])\n\t\t\tif err != nil {\n\t\t\t\tresponseToCLient(w, \"Please enter the correct and complete url, example - http://google.com\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"host \" + correctURL.Host)\n\t\t\t\tif correctURL.Host != \"mydomain.com\" { // checking whether this url is from our domain\n\t\t\t\t\tresponseToCLient(w, \"Not the correct short link provided by mydomain.com\")\n\t\t\t\t} else {\n\t\t\t\t\ta := correctURL.Path[1:] // removing first '/' from rest of the path\n\t\t\t\t\tstr, _ := getCounter(a)\n\t\t\t\t\tresponseToCLient(w, \"Total Visits : \"+str)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tresponseToCLient(w, \"Please check the request parameters\")\n\t\t}\n\t} else {\n\t\tresponseToCLient(w, \"No shortURL found, Please check the request parameters\")\n\t}\n\n}", "func main() {\n\tsrvs := retrievePodSRVs(\"TYPE_YOUR_HEADLESS_SERVICE_NAME_HERE\")\n\tname, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, srv := range srvs {\n\t\t// If service is local, just respond directly\n\t\tif strings.Contains(srv.Target, string(name)) {\n\t\t\t// print pod's hostname\n\t\t\tfmt.Println(\"You've hit \" + name)\n\t\t\t// Otherwise, if pod is remote, issue a GET request and print response\n\t\t} else {\n\t\t\trespBody := getPodData(srv.Target)\n\t\t\tfmt.Println(respBody)\n\t\t}\n\t}\n}", "func getStationsForGridpoint(httpClient *http.Client, httpUserAgentString string, apiURLString string, gridpoint Gridpoint) ([]Station, error) {\n\trespBody, err := doAPIRequest(\n\t\thttpClient,\n\t\thttpUserAgentString,\n\t\tapiURLString,\n\t\tfmt.Sprintf(\n\t\t\tgetStationsForGridpointEndpointURLStringFmt,\n\t\t\tgridpoint.WFO,\n\t\t\tgridpoint.GridX,\n\t\t\tgridpoint.GridY,\n\t\t),\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newStationsFromStationsRespBody(respBody)\n}", "func NewStation(station *wifi.StationInfo) Station {\n\tc := Station{\n\t\tWifi: *station,\n\t}\n\tif tbl, err := arp.Table(); err == nil {\n\t\tfor _, e := range tbl {\n\t\t\tif e.MAC == station.HardwareAddr {\n\t\t\t\tc.IP = e.IP\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn c\n}", "func (s *keyvisualService) run() {\n\t// TODO: make the ticker consistent with heartbeat interval\n\tticker := time.NewTicker(time.Minute)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-s.ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tcluster := s.svr.GetRaftCluster()\n\t\t\tif cluster == nil || !serverapi.IsServiceAllowed(s.svr, defaultRegisterAPIGroupInfo) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts.scanRegions(cluster)\n\t\t\t// TODO: implements the stats\n\t\t}\n\t}\n}", "func call(srv string, rpcname string,\n\targs interface{}, reply interface{}) bool {\n\tc, err := rpc.DialHTTP(\"tcp\", srv)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func (s *service) Subscribe(stationID int, listener Listener) {\n\ts.fetcher.Subscribe(stationID, listener)\n}", "func getAirPollutionData() {\n\tticker := time.NewTicker(30 * time.Second)\n\tdefer ticker.Stop()\n\n\tfor ; true; <-ticker.C {\n\t\tserRes := newServerResponse(3)\n\t\t// Retry getting data\n\t\tfor serRes.retries > 0 {\n\t\t\tserRes.res, serRes.err = http.Get(\"https://opendata.epa.gov.tw/ws/Data/ATM00625/?$format=json\")\n\t\t\tif serRes.err != nil {\n\t\t\t\tlog.Println(serRes.err)\n\t\t\t\tserRes.retries--\n\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif serRes.res != nil {\n\t\t\thelpRead(serRes.res)\n\t\t} else {\n\t\t\tfmt.Println(\"Server response is empty !!\")\n\t\t}\n\t}\n}", "func ForwardPacket(packet *common.Pkt) bool {\n // Print packet contents (disable this before submitting your code)\n //printPkt(packet)\n\n as := \"\" ; ip := \"\"\n for k := 0; k < len(packet.SCION.SrcAS) ; k++ {\n as = as + strconv.Itoa(int(packet.SCION.SrcAS[k]))\n }\n for k := 0; k < len(packet.SCION.SrcHost); k++ {\n ip = ip + strconv.Itoa(int(packet.SCION.SrcHost[k]))\n }\n\n IA := SourceIA{ SrcISD: packet.SCION.SrcISD, SrcAS: as }\n Addr := SourceAddr{ Sip: ip, IA: IA }\n difference := time.Now().Sub(time_interval)\n\n //check if the time interval expired to limit rate\n if difference > TIME_INTERVAL {\n removePendingAddrs()\n removePendingIAs() \n time_interval = time.Now()\n }\n\n //check if Addr and IA is registered\n entryAddr , found := FilteredAddrs[Addr]\n entryIA , foundIA := FilteredIAs[IA]\n\n if found{\n //is the registered address a filtered one?\n if entryAddr.Done{\n if time.Since(entryAddr.TimeStamp) > ADDR_BAD_TIME {\n delete(FilteredAddrs, Addr) //remove the Addr from the ones we registered\n }\n //if its not done\n }else{\n //check if the threshold is reached\n if entryAddr.Amount >= ADDR_THRESHOLD{\n entryAddr.TimeStamp = time.Now()\n entryAddr.Done = true //label as filtered addr\n FilteredAddrs[Addr] = entryAddr\n }else{\n entryAddr.Amount = entryAddr.Amount + 1\n FilteredAddrs[Addr] = entryAddr\n } \n }\n }else{\n //add a big amount of time so that the 113 check doesnt pass\n timeAddr := time.Now().Add(time.Hour * 24 * 10)\n FilteredAddrs[Addr] = FilterSourceAddr{Amount: 1, TimeStamp: timeAddr, Done: false}\n }\n \n //is the IA part registered? \n if foundIA{\n if entryIA.Amount >= IA_THRESHOLD{\n entryIA.Done = true\n FilteredIAs[IA] = entryIA\n }else{\n //threshold not reached yet check if we can increase amount\n address := FilteredAddrs[Addr]\n if !address.Done{\n entryIA.Amount = entryIA.Amount + 1\n FilteredIAs[IA] = entryIA\n }\n }\n }else{\n FilteredIAs[IA] = FilterSourceIA{Amount: 1, Done: false}\n }\n \n dangerous_addr := FilteredAddrs[Addr]\n dangerous_IA := FilteredIAs[IA]\n\n if dangerous_addr.Done || dangerous_IA.Done{\n return false\n }else{\n return true\n\n }\n\n // Decision\n // | true -> forward packet\n // | false -> drop packet\n //return true\n}", "func areStationsInJourney(stops []string, journey timetableRouteJourney) bool {\n\tfor _, s := range stops {\n\t\tif !isStationInJourney(s, journey) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Send_Info(Project, Module, Operation, Status, BlockId, AccountId string ){\n ipserv := \"http://18.223.111.231:5898\"\n url := ipserv+\"/api/add/\"+Project+\"*\"+Module+\"*\"+Operation+\"*\"+Status+\"*\"+BlockId+\"*\"+AccountId\n\tre,errr:= http.NewRequest(\"GET\", url, nil)\n\t\n\tif errr!=nil{\n fmt.Println(errr.Error()) \n return\n\t}\n\n\tres, erd := http.DefaultClient.Do(re)\n\tif erd!=nil{\n fmt.Println(erd.Error()) \n return\n\t}\n\n\tdefer res.Body.Close()\n}", "func doService(serviceRequest string) {\n\tif serviceRequest == \"update-youtube-dl\" {\n\t\tupdateYoutubeDl(GetConfVal(\"youtubeDownloader\"))\n\t} else {\n\t\tlogErr(\"Unknown service request %s\", serviceRequest)\n\t}\n}", "func call(srv string, name string, args interface{}, reply interface{}) bool {\n\tc, err := rpc.Dial(\"unix\", srv)\n\tif err != nil {\n\t\terr1 := err.(*net.OpError)\n\t\tif err1.Err != syscall.ENOENT && err1.Err != syscall.ECONNREFUSED {\n\t\t\tfmt.Printf(\"paxos Dial() failed: %v\\n\", err1)\n\t\t}\n\t\treturn false\n\t}\n\tdefer c.Close()\n\n\t// fmt.Printf(\"Call srv:%s name:%s\\n\", srv, name)\n\terr = c.Call(name, args, reply)\n\t// fmt.Printf(\"After Call %s, err:%v, rpl:%v\\n\", srv, err, reply)\n\n\tif err == nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func (c *CamSodaChecker) checkEndpoint(endpoint string) (onlineModels map[string]StatusKind, images map[string]string, err error) {\n\tclient := c.clientsLoop.nextClient()\n\tonlineModels = map[string]StatusKind{}\n\timages = map[string]string{}\n\tresp, buf, err := onlineQuery(endpoint, client, c.Headers)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"cannot send a query, %v\", err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, nil, fmt.Errorf(\"query status, %d\", resp.StatusCode)\n\t}\n\tdecoder := json.NewDecoder(ioutil.NopCloser(bytes.NewReader(buf.Bytes())))\n\tvar parsed camSodaOnlineResponse\n\terr = decoder.Decode(&parsed)\n\tif err != nil {\n\t\tif c.Dbg {\n\t\t\tLdbg(\"response: %s\", buf.String())\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"cannot parse response, %v\", err)\n\t}\n\tif !parsed.Status {\n\t\treturn nil, nil, fmt.Errorf(\"API error, %s\", parsed.Error)\n\t}\n\tfor _, m := range parsed.Results {\n\t\tmodelID := strings.ToLower(m.Username)\n\t\tonlineModels[modelID] = StatusOnline\n\t\timages[modelID] = m.Thumb\n\t}\n\treturn\n}", "func parseStationInfo(b []byte) (*StationInfo, error) {\n\tattrs, err := netlink.UnmarshalAttributes(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar info StationInfo\n\tfor _, a := range attrs {\n\t\tswitch a.Type {\n\t\tcase unix.NL80211_ATTR_MAC:\n\t\t\tinfo.HardwareAddr = net.HardwareAddr(a.Data)\n\t\tcase unix.NL80211_ATTR_STA_INFO:\n\t\t\tnattrs, err := netlink.UnmarshalAttributes(a.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err := (&info).parseAttributes(nattrs); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Parsed the necessary data.\n\t\t\treturn &info, nil\n\t\t}\n\t}\n\n\t// No station info found\n\treturn nil, os.ErrNotExist\n}", "func sendService(uuid string, service *msg.Service) error {\n\tlog.Println(log.INFO, fmt.Sprintf(\"adding %s (%s) to skydns\", uuid, service.Name))\n\tif err := skydns.Add(uuid, service); err != nil {\n\t\t// ignore erros for conflicting uuids and start the heartbeat again\n\t\tif err != client.ErrConflictingUUID {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(log.INFO, \"service already exists for %s. Resetting params.TTL.\", uuid)\n\t\tupdateService(uuid, params.TTL)\n\t}\n\tlog.Println(log.INFO, fmt.Sprintf(\"added %s (%s) successfully\", uuid, service.Name))\n\tgo heartbeat(uuid)\n\treturn nil\n}", "func (p *DirectBuy) Run() error {\n\tlog := p.log\n\tlog.Info(\"Start direct buy service...\")\n\tdefer func() {\n\t\tlog.Info(\"Closed direct buy service\")\n\t\tp.done <- struct{}{}\n\t}()\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tp.runUpdateStatus()\n\t}()\n\n\twg.Wait()\n\n\treturn nil\n}", "func (tm *ServiceTracerouteManager) CheckExistanceServiceTracerouteExperiment(protocol string, remoteIp net.IP, remotePort int, localPort int) bool {\n\texists := false\n\n\tkey := tm.GetMapKey(protocol, remoteIp, remotePort, localPort)\n\ttm.runningTracesMutex.Lock()\n\n\tif _, ok := tm.RunningServiceTraceroutes[key]; ok {\n\t\texists = true\n\t}\n\n\ttm.runningTracesMutex.Unlock()\n\n\treturn exists\n}", "func RunServiceExample(nsxManager, nsxUser, nsxPassword string, debug bool) {\n\t//\n\t// Create NSXClient object.\n\t//\n\tnsxclient := gonsx.NewNSXClient(nsxManager, nsxUser, nsxPassword, true, debug)\n\n\t//\n\t// Get All Services.\n\t//\n\t// Create api object.\n\tgetAllAPI := service.NewGetAll(\"globalroot-0\")\n\n\t// make api call.\n\terr := nsxclient.Do(getAllAPI)\n\n\t// check if there were any errors\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() == 200 {\n\t\tAllApplications := getAllAPI.GetResponse().Applications\n\t\tfor _, service := range AllApplications {\n\t\t\tfmt.Printf(\"objectId: %-20s name: %-20s\\n\", service.ObjectID, service.Name)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Status code:\", getAllAPI.StatusCode())\n\t\tfmt.Println(\"Response: \", getAllAPI.ResponseObject())\n\t}\n\n\t//\n\t// Get Single Service\n\t//\n\t// Get All ( we re-utilize the GetAll object from above here )\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() == 200 {\n\t\tservice := getAllAPI.GetResponse().FilterByName(\"OVP_test1\")\n\t\tif service.ObjectID != \"\" {\n\t\t\tfmt.Println(service)\n\t\t} else {\n\t\t\tfmt.Println(\"Not found!\")\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Status code:\", getAllAPI.StatusCode())\n\t\tfmt.Println(\"Response: \", getAllAPI.ResponseObject())\n\t}\n\n\t//\n\t// Create single service.\n\t//\n\tcreateAPI := service.NewCreate(\"globalroot-0\", \"test\", \"desc\", \"TCP\", \"8080\")\n\terr = nsxclient.Do(createAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t}\n\n\tif createAPI.StatusCode() == 201 {\n\t\tapplicationID := createAPI.ResponseObject()\n\t\tfmt.Println(\"Service created successfully.\")\n\t\tfmt.Println(\"objectId:\", applicationID)\n\t} else {\n\t\tfmt.Println(\"Failed to created the service!\")\n\t\tfmt.Println(createAPI.ResponseObject())\n\t}\n\n\t// UPDATE\n\t//\n\t// Updating a single service.\n\t// Get list of all applications. Search through looking for application match.\n\t// Update the attribute/s of the service.\n\tgetAllAPI = service.NewGetAll(\"globalroot-0\")\n\n\t// make api call.\n\terr = nsxclient.Do(getAllAPI)\n\n\t// check if there were any errors\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() != 200 {\n\t\tfmt.Printf(\"Status code: %v, Response: %v\\n\", getAllAPI.StatusCode(), getAllAPI.ResponseObject())\n\t}\n\n\t// Get All ( we re-utilize the GetAll object from above here )\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() == 200 {\n\t\tservice := getAllAPI.GetResponse().FilterByName(\"test\")\n\t\tif service.ObjectID != \"\" {\n\t\t\tfmt.Println(\"Found service: \", service.ObjectID, service.Name)\n\t\t} else {\n\t\t\tfmt.Println(\"Not found!\")\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Status code:\", getAllAPI.StatusCode())\n\t\tfmt.Println(\"Response: \", getAllAPI.ResponseObject())\n\t}\n\n\t// Change the name of the service from test to test_https and change the port to TCP/443.\n\tserviceToModify := getAllAPI.GetResponse().FilterByName(\"test\")\n\tserviceToModify.Name = \"test_https\"\n\tmodifyElement := service.Element{ApplicationProtocol: \"TCP\", Value: \"443\"}\n\tserviceToModify.Element = []service.Element{modifyElement}\n\tupdateAPI := service.NewUpdate(serviceToModify.ObjectID, serviceToModify)\n\n\terr = nsxclient.Do(updateAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\tif updateAPI.StatusCode() == 200 {\n\t\tnewObject := updateAPI.GetResponse()\n\t\tfmt.Println(\"Service updated successfully.\")\n\t\tfmt.Println(\"objectId:\", newObject.ObjectID)\n\t} else {\n\t\tfmt.Println(\"Failed to update the service!\")\n\t\tfmt.Println(updateAPI.ResponseObject())\n\t}\n\n\t//\n\t// Deleting a single service.\n\t//\n\n\t// Let's refresh the getAllAPI call, so that it has the last created data.\n\terr = nsxclient.Do(getAllAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\tapplicationIDToDelete := getAllAPI.GetResponse().FilterByName(\"test_https\")\n\tdeleteAPI := service.NewDelete(applicationIDToDelete.ObjectID)\n\terr = nsxclient.Do(deleteAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t}\n\n\tif deleteAPI.StatusCode() == 200 {\n\t\tfmt.Println(\"Service deleted successfully.\")\n\t} else {\n\t\tfmt.Println(\"Failed to delete the service!\")\n\t\tfmt.Println(\"Status code:\", deleteAPI.StatusCode())\n\t\tfmt.Println(\"Response:\", deleteAPI.ResponseObject())\n\t}\n\n}", "func exampleRoutine(chargePointID string, handler *CentralSystemHandler) {\n\t// Wait for some time\n\ttime.Sleep(2 * time.Second)\n\t// Reserve a connector\n\treservationID := 42\n\tclientIdTag := \"l33t\"\n\tconnectorID := 1\n\texpiryDate := types.NewDateTime(time.Now().Add(1 * time.Hour))\n\tcb1 := func(confirmation *reservation.ReserveNowConfirmation, err error) {\n\t\tif err != nil {\n\t\t\tlogDefault(chargePointID, reservation.ReserveNowFeatureName).Errorf(\"error on request: %v\", err)\n\t\t} else if confirmation.Status == reservation.ReservationStatusAccepted {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"connector %v reserved for client %v until %v (reservation ID %d)\", connectorID, clientIdTag, expiryDate.FormatTimestamp(), reservationID)\n\t\t} else {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"couldn't reserve connector %v: %v\", connectorID, confirmation.Status)\n\t\t}\n\t}\n\te := centralSystem.ReserveNow(chargePointID, cb1, connectorID, expiryDate, clientIdTag, reservationID)\n\tif e != nil {\n\t\tlogDefault(chargePointID, reservation.ReserveNowFeatureName).Errorf(\"couldn't send message: %v\", e)\n\t\treturn\n\t}\n\t// Wait for some time\n\ttime.Sleep(1 * time.Second)\n\t// Cancel the reservation\n\tcb2 := func(confirmation *reservation.CancelReservationConfirmation, err error) {\n\t\tif err != nil {\n\t\t\tlogDefault(chargePointID, reservation.CancelReservationFeatureName).Errorf(\"error on request: %v\", err)\n\t\t} else if confirmation.Status == reservation.CancelReservationStatusAccepted {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"reservation %v canceled successfully\", reservationID)\n\t\t} else {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"couldn't cancel reservation %v\", reservationID)\n\t\t}\n\t}\n\te = centralSystem.CancelReservation(chargePointID, cb2, reservationID)\n\tif e != nil {\n\t\tlogDefault(chargePointID, reservation.ReserveNowFeatureName).Errorf(\"couldn't send message: %v\", e)\n\t\treturn\n\t}\n\t// Wait for some time\n\ttime.Sleep(5 * time.Second)\n\t// Get current local list version\n\tcb3 := func(confirmation *localauth.GetLocalListVersionConfirmation, err error) {\n\t\tif err != nil {\n\t\t\tlogDefault(chargePointID, localauth.GetLocalListVersionFeatureName).Errorf(\"error on request: %v\", err)\n\t\t} else {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"current local list version: %v\", confirmation.ListVersion)\n\t\t}\n\t}\n\te = centralSystem.GetLocalListVersion(chargePointID, cb3)\n\tif e != nil {\n\t\tlogDefault(chargePointID, localauth.GetLocalListVersionFeatureName).Errorf(\"couldn't send message: %v\", e)\n\t\treturn\n\t}\n\t// Wait for some time\n\ttime.Sleep(5 * time.Second)\n\tconfigKey := \"MeterValueSampleInterval\"\n\tconfigValue := \"10\"\n\t// Change meter sampling values time\n\tcb4 := func(confirmation *core.ChangeConfigurationConfirmation, err error) {\n\t\tif err != nil {\n\t\t\tlogDefault(chargePointID, core.ChangeConfigurationFeatureName).Errorf(\"error on request: %v\", err)\n\t\t} else if confirmation.Status == core.ConfigurationStatusNotSupported {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Warnf(\"couldn't update configuration for unsupported key: %v\", configKey)\n\t\t} else if confirmation.Status == core.ConfigurationStatusRejected {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Warnf(\"couldn't update configuration for readonly key: %v\", configKey)\n\t\t} else {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"updated configuration for key %v to: %v\", configKey, configValue)\n\t\t}\n\t}\n\te = centralSystem.ChangeConfiguration(chargePointID, cb4, configKey, configValue)\n\tif e != nil {\n\t\tlogDefault(chargePointID, localauth.GetLocalListVersionFeatureName).Errorf(\"couldn't send message: %v\", e)\n\t\treturn\n\t}\n\n\t// Wait for some time\n\ttime.Sleep(5 * time.Second)\n\t// Trigger a heartbeat message\n\tcb5 := func(confirmation *remotetrigger.TriggerMessageConfirmation, err error) {\n\t\tif err != nil {\n\t\t\tlogDefault(chargePointID, remotetrigger.TriggerMessageFeatureName).Errorf(\"error on request: %v\", err)\n\t\t} else if confirmation.Status == remotetrigger.TriggerMessageStatusAccepted {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"%v triggered successfully\", core.HeartbeatFeatureName)\n\t\t} else if confirmation.Status == remotetrigger.TriggerMessageStatusRejected {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"%v trigger was rejected\", core.HeartbeatFeatureName)\n\t\t}\n\t}\n\te = centralSystem.TriggerMessage(chargePointID, cb5, core.HeartbeatFeatureName)\n\tif e != nil {\n\t\tlogDefault(chargePointID, remotetrigger.TriggerMessageFeatureName).Errorf(\"couldn't send message: %v\", e)\n\t\treturn\n\t}\n\n\t// Wait for some time\n\ttime.Sleep(5 * time.Second)\n\t// Trigger a diagnostics status notification\n\tcb6 := func(confirmation *remotetrigger.TriggerMessageConfirmation, err error) {\n\t\tif err != nil {\n\t\t\tlogDefault(chargePointID, remotetrigger.TriggerMessageFeatureName).Errorf(\"error on request: %v\", err)\n\t\t} else if confirmation.Status == remotetrigger.TriggerMessageStatusAccepted {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"%v triggered successfully\", firmware.GetDiagnosticsFeatureName)\n\t\t} else if confirmation.Status == remotetrigger.TriggerMessageStatusRejected {\n\t\t\tlogDefault(chargePointID, confirmation.GetFeatureName()).Infof(\"%v trigger was rejected\", firmware.GetDiagnosticsFeatureName)\n\t\t}\n\t}\n\te = centralSystem.TriggerMessage(chargePointID, cb6, firmware.DiagnosticsStatusNotificationFeatureName)\n\tif e != nil {\n\t\tlogDefault(chargePointID, remotetrigger.TriggerMessageFeatureName).Errorf(\"couldn't send message: %v\", e)\n\t\treturn\n\t}\n}", "func Run(ctx context.Context, lc *client.LogClient, sv *ct.SignatureVerifier, st APICallSTHWriter, l *ctlog.Log, period time.Duration) {\n\tglog.Infof(\"%s: %s: started with period %v\", l.URL, logStr, period)\n\n\tschedule.Every(ctx, period, func(ctx context.Context) {\n\t\tgetCheckStoreSTH(ctx, lc, sv, st, l)\n\t})\n\n\tglog.Infof(\"%s: %s: stopped\", l.URL, logStr)\n}", "func listen(iface string, filter string, fn func(packet gopacket.Packet) []*osc.Message) error {\n\n client := osc.NewClient(ip, port)\n\n handle, err := pcap.OpenLive(iface, 1600, true, pcap.BlockForever)\n if err != nil {\n return err\n }\n\n if filter != \"\" {\n err := handle.SetBPFFilter(filter)\n if err != nil {\n return err\n }\n }\n\n src := gopacket.NewPacketSource(handle, handle.LinkType())\n for packet := range src.Packets() {\n msgs := fn(packet)\n for _, msg := range msgs {\n client.Send(msg)\n }\n }\n\n return nil\n}", "func queryService() (resp *http.Response, err error) {\n\t// Query service via HTTP API for confirmation in console.\n\tendpointQuery = fmt.Sprintf(\"http://localhost:%s/v1/catalog/service/%s\", port, consul.Name)\n\treq, err := http.NewRequest(\"GET\", endpointQuery, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient = NewClient()\n\tresp, err = client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func checkresult(action string, photolng float64, citylng float64) bool{\n\treturn (photolng <= citylng && action == \"West\") || (photolng >= citylng && action == \"East\")\n}", "func gatherInfo(c *gin.Context) (msg structs.AccessCampaignNotify) {\n\tsessions.SetSession(c)\n\ttid := sessions.GetTid(c)\n\tlogCtx := log.WithFields(log.Fields{\n\t\t\"tid\": tid,\n\t})\n\tr := c.Request\n\theaders, err := json.Marshal(r.Header)\n\tif err != nil {\n\t\tlogCtx.Error(\"cannot marshal headers\")\n\t\theaders = []byte(\"{}\")\n\t}\n\tmsg = structs.AccessCampaignNotify{\n\t\tTid: tid,\n\t\tIP: r.Header.Get(\"X-Forwarded-For\"),\n\t\tUserAgent: r.UserAgent(),\n\t\tReferer: r.Referer(),\n\t\tUrlPath: r.URL.String(),\n\t\tMethod: r.Method,\n\t\tHeaders: string(headers),\n\t\tSupported: true,\n\t\tCountryCode: cnf.Service.CountryCode,\n\t\tOperatorCode: cnf.Service.OperatorCode,\n\t}\n\n\tlogCtx.WithFields(log.Fields{\n\t\t\"urlpath\": c.Request.URL.Path + \"?\" + c.Request.URL.RawQuery,\n\t\t\"url\": r.URL.String(),\n\t}).Debug(\"log\")\n\n\t// but for now we use get parameter to pass msisdn\n\t// and there not always could be the correct IP adress\n\t// so, if operator code or country code not found\n\t// we can set them via msisdn\n\tvar ok bool\n\tif msg.Msisdn, ok = c.GetQuery(\"msisdn\"); ok && len(msg.Msisdn) >= 5 {\n\t\tlogCtx.WithFields(log.Fields{\n\t\t\t\"msisdn\": msg.Msisdn,\n\t\t}).Debug(\"took from get params\")\n\t} else {\n\t\tmsg.Msisdn = sessions.GetFromSession(\"msisdn\", c)\n\t\tif len(msg.Msisdn) >= 5 {\n\t\t\tlogCtx.WithFields(log.Fields{\n\t\t\t\t\"msisdn\": msg.Msisdn,\n\t\t\t}).Debug(\"took from session\")\n\t\t}\n\t}\n\tif len(msg.Msisdn) < 5 {\n\t\tmsg.Error = \"Msisdn not found\"\n\t}\n\n\tIPs := getIPAdress(c.Request)\n\tmsg.IP = strings.Join(IPs, \", \")\n\n\treturn msg\n}", "func handle_uri_satellites(w http.ResponseWriter, r *http.Request) {\n\n\tif logutil.GPS_DEBUG > 0 {\n\t\t// Log the request before handling it\n\t\tlogutil.LogRestRequest(r.Method, r.URL.Path)\n\t}\n\n\t// Accept only GET requests to this URI\n\tif r.Method != \"GET\" {\n\t\trespond_with_error(w, http.StatusMethodNotAllowed, r.Method, r.URL.Path)\n\t\treturn\n\t}\n\n\t// Add the standard JSON header to the response\n\t// add_json_header(w) // respond_with_success does this\n\n\t// Prevent this data from being cached (client must always revalidate)\n\tadd_no_cache_header(w)\n\n\t// Construct the JSON response string\n\tresponse_string := string(cache.GetSatellitesAsJSON())\n\n\t// Send the response (and log it)\n\trespond_with_success(w, http.StatusOK, response_string)\n}", "func send(\n\tclient *http.Client,\n\tappservice config.ApplicationService,\n\ttxnID int,\n\ttransaction []byte,\n) (err error) {\n\t// PUT a transaction to our AS\n\t// https://matrix.org/docs/spec/application_service/r0.1.2#put-matrix-app-v1-transactions-txnid\n\taddress := fmt.Sprintf(\"%s/transactions/%d?access_token=%s\", appservice.URL, txnID, url.QueryEscape(appservice.HSToken))\n\treq, err := http.NewRequest(\"PUT\", address, bytes.NewBuffer(transaction))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer checkNamedErr(resp.Body.Close, &err)\n\n\t// Check the AS received the events correctly\n\tif resp.StatusCode != http.StatusOK {\n\t\t// TODO: Handle non-200 error codes from application services\n\t\treturn fmt.Errorf(\"non-OK status code %d returned from AS\", resp.StatusCode)\n\t}\n\n\treturn nil\n}", "func (job *JOB) Execute(ctx context.Context) error {\n\t//Host timezone set Asia/Singapore\n\treq, err := httpclient.MakeRequest(\n\t\thttpclient.Method(\"GET\"),\n\t\thttpclient.URL(\n\t\t\thttpclient.Schema(\"https\"),\n\t\t\thttpclient.Host(\"api.data.gov.sg\"),\n\t\t\thttpclient.URI(\"/v1/transport/carpark-availability\"),\n\t\t),\n\t\thttpclient.Query(\"date_time\", time.Now().Format(time.RFC3339)),\n\t)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"make request\")\n\t}\n\treturn job.Client.Execute(ctx, req, job)\n}", "func RunService(ser Server) {\n\tfor {\n\t\tcon := Accept(ser)\n\t\tgo ser.HandleRequest(con)\n\t}\n}", "func (handler WebserviceHandler) ChangeTrainingStatus(res http.ResponseWriter, req *http.Request) {\n\thandler.Logger.Info(\"Received \" + req.Method + \" request at path: \" + req.URL.Path)\n\n\t// Setting headers for CORS\n\tres.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tres.Header().Set(\"Access-Control-Allow-Headers\", \"Authorization\")\n\tif req.Method == http.MethodOptions {\n\t\treturn\n\t}\n\n\t// Retrieving the ID from the url\n\tvar id string\n\tvar toTrain bool\n\tvar err error\n\n\t// Checking request, verifying if ID is in query params\n\thandler.Logger.Debug(\"Starting to check the ID\")\n\tok := checkID(handler, res, req)\n\tif !ok {\n\t\treturn\n\t}\n\tid = req.URL.Query().Get(\"id\")\n\thandler.Logger.Debug(\"Request correct, ID inserted as query params\")\n\n\t// Checking request, verifying if toTrain is in query params\n\thandler.Logger.Debug(\"Starting to check the toTrain\")\n\ttoTrainString, ok := req.URL.Query()[\"toTrain\"]\n\tif !ok || len(toTrainString) != 1 {\n\t\tif !ok {\n\t\t\thandler.Logger.Info(\"Error retrieving param toTrain. Returning BadRequest\")\n\t\t} else if len(toTrainString) == 0 {\n\t\t\thandler.Logger.Info(\"No toTrain param as query params. Returning BadRequest\")\n\t\t} else {\n\t\t\thandler.Logger.Info(\"More than one toTrain param in query params. Returning BadRequest\")\n\t\t}\n\t\tres.WriteHeader(http.StatusBadRequest)\n\t}\n\ttoTrain, err = strconv.ParseBool(req.URL.Query().Get(\"toTrain\"))\n\tif err != nil {\n\t\thandler.Logger.Info(\"Error transforming toTrain from string to bool. Returning Internal Server Error\")\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\thandler.Logger.Debug(\"Starting to check the toTrain\")\n\n\thandler.Logger.Info(\"ID: \" + id + \"\\ttoTrain: \" + strconv.FormatBool(toTrain))\n\n\t// Executing change of training status\n\thandler.Logger.Debug(\"Starting to execute the change of training status\")\n\terr = handler.KnowledgeBaseInteractor.ChangeTrainingStatus(id, toTrain)\n\tif err != nil {\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\thandler.Logger.Debug(\"Change of training status executed\")\n\n\t// Preparing response\n\tres.WriteHeader(200)\n\thandler.Logger.Info(\"Returning response\")\n\treturn\n}", "func (adf ADF) IsStationary() bool {\n\treturn adf.Statistic < adf.PValueThreshold\n}", "func (this *satcluster) getLocation() vectors.Vector2 {\n\tif exestate.OnError(this) {\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\t//Interseccion entre dos satelites\n\tpointA, pointB, state := geometry.GetCirclesIntersections(this.getAt(0), this.getAt(1))\n\n\tif !state.IsOk() {\n\t\tthis.RegisterState(state)\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\tif pointA.IsEmpty() && pointB.IsEmpty() {\n\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triengular, no hay interseccion (1) (satellities.satcluster.getLocation)\"))\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\tvar intersectsA = (pointA.IsEmpty() == false)\n\tvar intersectsB = (pointB.IsEmpty() == false)\n\n\t//\tequals, _ := vectors.Equals(pointA, pointB)\n\n\t//Para los restantes satelites se verifican las distancias a los puntos de la interseccion\n\tif this.count() > 2 {\n\t\tfor i := 2; i < this.count(); i++ {\n\n\t\t\tintersectsA = intersectsA && !(math.Abs(pointA.DistanceTo(this.getAt(i).Pos)-this.getAt(i).Distance) > 0.005)\n\t\t\tintersectsB = intersectsB && !(math.Abs(pointB.DistanceTo(this.getAt(i).Pos)-this.getAt(i).Distance) > 0.005)\n\n\t\t\tif intersectsA == false && intersectsB == false {\n\t\t\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triangular, no hay interseccion (2) (satellities.satcluster.getLocation)\"))\n\t\t\t\treturn vectors.GetEmptyVector2()\n\t\t\t}\n\t\t}\n\t}\n\n\tif intersectsA && intersectsB {\n\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triangular, mas de un punto (satellities.satcluster.getLocation)\"))\n\t\treturn vectors.GetEmptyVector2()\n\t}\n\n\tif intersectsA {\n\t\tpointA.Round()\n\t\treturn pointA\n\t} else if intersectsB {\n\t\tpointB.Round()\n\t\treturn pointB\n\t} else {\n\t\tthis.RegisterState(exestate.ControlledError(\"No se pudo triangular\"))\n\t\treturn vectors.GetEmptyVector2()\n\t}\n}", "func (s *SmartContract) querySOW(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tsowAsBytes, _ := APIstub.GetState(args[0])\n\tif sowAsBytes == nil {\n\t\treturn shim.Error(\"Could not locate sow\")\n\t}\n\n\treturn shim.Success(sowAsBytes)\n}" ]
[ "0.59751016", "0.59400904", "0.5820535", "0.5803581", "0.57665825", "0.56083244", "0.5489674", "0.5329984", "0.5267891", "0.5153535", "0.5140189", "0.510179", "0.50758654", "0.50346726", "0.5016249", "0.4981579", "0.4963392", "0.4916773", "0.4902284", "0.48805675", "0.4861543", "0.48396632", "0.4836251", "0.48145786", "0.4810532", "0.480305", "0.47518635", "0.47420445", "0.47016454", "0.46977612", "0.4675834", "0.46723077", "0.46463734", "0.4644288", "0.4620322", "0.46182466", "0.46084192", "0.46070564", "0.46033937", "0.45795822", "0.45793802", "0.45665005", "0.45644954", "0.45635924", "0.4553102", "0.4548556", "0.45392185", "0.4535235", "0.45295736", "0.4528249", "0.4520338", "0.45185873", "0.4509602", "0.44494635", "0.44492507", "0.44390717", "0.44384792", "0.44354564", "0.44321066", "0.44319934", "0.4426359", "0.44205374", "0.44130546", "0.44117776", "0.44106126", "0.44081768", "0.438514", "0.4379656", "0.43650484", "0.43598795", "0.4349828", "0.43469647", "0.433875", "0.43385392", "0.43384263", "0.43289196", "0.43148223", "0.42878175", "0.42847034", "0.42841682", "0.42830998", "0.42774668", "0.42746717", "0.42716023", "0.42709416", "0.42663273", "0.42653015", "0.42632386", "0.42628586", "0.42604595", "0.42584145", "0.42563146", "0.42559123", "0.42540127", "0.42531335", "0.42506424", "0.4248065", "0.42444703", "0.42436084", "0.42405245", "0.42302155" ]
0.0
-1
Add a tiploc to the result so that it will be included in the tiploc map
func (bf *boardFilter) addTiploc(tiploc string) { if tiploc != "" { bf.tiplocs[tiploc] = nil } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *LocationMap) Add(t *Location) {\n\tif _, ok := r.m[t.Tiploc]; !ok {\n\t\tr.m[t.Tiploc] = t\n\t}\n}", "func (bd *BlockDAG) updateTips(b *Block) {\n\tif bd.tips == nil {\n\t\tbd.tips = NewHashSet()\n\t\tbd.tips.AddPair(b.GetHash(), b)\n\t\treturn\n\t}\n\tfor k := range bd.tips.GetMap() {\n\t\tblock := bd.getBlock(&k)\n\t\tif block.HasChildren() {\n\t\t\tbd.tips.Remove(&k)\n\t\t}\n\t}\n\tbd.tips.AddPair(b.GetHash(), b)\n}", "func (q LocationTemperatureQueryResult) Add(temp float64, city string, y int, mo int, d int) {\n\tq[city][y][mo][d] = append(q[city][y][mo][d], temp)\n}", "func (f Factory) WithTips(tip, tipper string) Factory {\n\tparsedTips, err := sdk.ParseCoinsNormalized(tip)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tf.tip = &tx.Tip{\n\t\tTipper: tipper,\n\t\tAmount: parsedTips,\n\t}\n\treturn f\n}", "func (t *Tangle) Tips() []*site.Site {\n\tkeys := []*site.Site{}\n\tfor h := range t.tips {\n\t\ts := t.Get(h)\n\t\tif s != nil {\n\t\t\tkeys = append(keys, s.Site)\n\t\t}\n\t}\n\treturn keys\n}", "func (resp Response) AddTags(newTags map[string]string) (*influx.Point, error) {\r\n\r\n\t// Pull off the current tags\r\n\ttags := resp.Point.Tags()\r\n\r\n\t// Add the new tags to the current tags\r\n\tfor tag, tagValue := range newTags {\r\n\t\ttags[tag] = tagValue\r\n\t}\r\n\r\n\t// Make a new point\r\n\tfields, err := resp.Point.Fields()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\r\n\t}\r\n\tpt, err := influx.NewPoint(resp.Point.Name(), tags, fields, resp.Point.Time())\r\n\r\n\t// panic on error\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error adding tags to response point\\n point: %v\\n tags:%v\\n error: %v\\n\", resp.Point, newTags, err)\r\n\t}\r\n\r\n\treturn pt, nil\r\n}", "func (c *Client) TipLog(names []string, lineCount int) error {\n\tsgs, err := c.getServiceList(names, false)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tc.tipLogServicesOrGroups(sgs, lineCount)\n\n\treturn nil\n}", "func (s Searcher) AddLocatable(locatable Locatable) {\n\tlocatable_on_grid := newLocatableOnGrid(locatable, s.lat_tiles, s.lng_tiles)\n\ts.locatable_map.AddLocatableOnGrid(&locatable_on_grid)\n}", "func (pool *TxPool) GetAllTips() map[common.Hash]types.Txi {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\n\treturn pool.tips.txs\n}", "func addTagsToPoint(point *influxdb.Point, tags map[string]string) {\n\tif point.Tags == nil {\n\t\tpoint.Tags = tags\n\t} else {\n\t\tfor k, v := range tags {\n\t\t\tpoint.Tags[k] = v\n\t\t}\n\t}\n}", "func (v *View) AddTimestamp(t Timestamp) {\n\tv.tMutex.Lock()\n\tdefer v.tMutex.Unlock()\n\n\tif _, ok := v.Timestamps[t.ID]; !ok {\n\t\tv.Timestamps[t.ID] = Entry{\n\t\t\tOpinions: Opinions{t.Opinion},\n\t\t\tTimestamp: clock.SyncedTime(),\n\t\t}\n\t\treturn\n\t}\n\n\tentry := v.Timestamps[t.ID]\n\tentry.Opinions = append(entry.Opinions, t.Opinion)\n\tv.Timestamps[t.ID] = entry\n}", "func (t *TipSetIndexer) TipSet(ctx context.Context, ts *types.TipSet) error {\n\tctx, span := global.Tracer(\"\").Start(ctx, \"Indexer.TipSet\")\n\tif span.IsRecording() {\n\t\tspan.SetAttributes(label.String(\"tipset\", ts.String()), label.Int64(\"height\", int64(ts.Height())))\n\t}\n\tdefer span.End()\n\n\tctx, _ = tag.New(ctx, tag.Upsert(metrics.Name, t.name))\n\n\tvar cancel func()\n\tvar tctx context.Context // cancellable context for the task\n\tif t.window > 0 {\n\t\t// Do as much indexing as possible in the specified time window (usually one epoch when following head of chain)\n\t\t// Anything not completed in that time will be marked as incomplete\n\t\ttctx, cancel = context.WithTimeout(ctx, t.window)\n\t} else {\n\t\t// Ensure all goroutines are stopped when we exit\n\t\ttctx, cancel = context.WithCancel(ctx)\n\t}\n\tdefer cancel()\n\n\tll := log.With(\"height\", int64(ts.Height()))\n\n\tstart := time.Now()\n\n\tinFlight := 0\n\t// TODO should these be allocated to the size of message and message execution processors\n\tresults := make(chan *TaskResult, len(t.processors)+len(t.actorProcessors))\n\t// A map to gather the persistable outputs from each task\n\ttaskOutputs := make(map[string]model.PersistableList, len(t.processors)+len(t.actorProcessors))\n\n\t// Run each tipset processing task concurrently\n\tfor name, p := range t.processors {\n\t\tinFlight++\n\t\tgo t.runProcessor(tctx, p, name, ts, results)\n\t}\n\n\t// Run each actor or message processing task concurrently if we have any and we've seen a previous tipset to compare with\n\tif len(t.actorProcessors) > 0 || len(t.messageProcessors) > 0 || len(t.messageExecutionProcessors) > 0 {\n\n\t\t// Actor processors perform a diff between two tipsets so we need to keep track of parent and child\n\t\tvar parent, child *types.TipSet\n\t\tif t.lastTipSet != nil {\n\t\t\tif t.lastTipSet.Height() > ts.Height() {\n\t\t\t\t// last tipset seen was the child\n\t\t\t\tchild = t.lastTipSet\n\t\t\t\tparent = ts\n\t\t\t} else if t.lastTipSet.Height() < ts.Height() {\n\t\t\t\t// last tipset seen was the parent\n\t\t\t\tchild = ts\n\t\t\t\tparent = t.lastTipSet\n\t\t\t} else {\n\t\t\t\tlog.Errorw(\"out of order tipsets\", \"height\", ts.Height(), \"last_height\", t.lastTipSet.Height())\n\t\t\t}\n\t\t}\n\n\t\t// If no parent tipset available then we need to skip processing. It's likely we received the last or first tipset\n\t\t// in a batch. No report is generated because a different run of the indexer could cover the parent and child\n\t\t// for this tipset.\n\t\tif parent != nil {\n\t\t\tif t.node == nil {\n\t\t\t\tnode, closer, err := t.opener.Open(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn xerrors.Errorf(\"unable to open lens: %w\", err)\n\t\t\t\t}\n\t\t\t\tt.node = node\n\t\t\t\tt.closer = closer\n\t\t\t}\n\n\t\t\tif types.CidArrsEqual(child.Parents().Cids(), parent.Cids()) {\n\t\t\t\t// If we have message processors then extract the messages and receipts\n\t\t\t\tif len(t.messageProcessors) > 0 {\n\t\t\t\t\ttsMsgs, err := t.node.GetExecutedAndBlockMessagesForTipset(ctx, child, parent)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t// Start all the message processors\n\t\t\t\t\t\tfor name, p := range t.messageProcessors {\n\t\t\t\t\t\t\tinFlight++\n\t\t\t\t\t\t\tgo t.runMessageProcessor(tctx, p, name, child, parent, tsMsgs.Executed, tsMsgs.Block, results)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tll.Errorw(\"failed to extract messages\", \"error\", err)\n\t\t\t\t\t\tterr := xerrors.Errorf(\"failed to extract messages: %w\", err)\n\t\t\t\t\t\t// We need to report that all message tasks failed\n\t\t\t\t\t\tfor name := range t.messageProcessors {\n\t\t\t\t\t\t\treport := &visormodel.ProcessingReport{\n\t\t\t\t\t\t\t\tHeight: int64(ts.Height()),\n\t\t\t\t\t\t\t\tStateRoot: ts.ParentState().String(),\n\t\t\t\t\t\t\t\tReporter: t.name,\n\t\t\t\t\t\t\t\tTask: name,\n\t\t\t\t\t\t\t\tStartedAt: start,\n\t\t\t\t\t\t\t\tCompletedAt: time.Now(),\n\t\t\t\t\t\t\t\tStatus: visormodel.ProcessingStatusError,\n\t\t\t\t\t\t\t\tErrorsDetected: terr,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\ttaskOutputs[name] = model.PersistableList{report}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// If we have actor processors then find actors that have changed state\n\t\t\t\tif len(t.actorProcessors) > 0 {\n\t\t\t\t\tchangesStart := time.Now()\n\t\t\t\t\tvar err error\n\t\t\t\t\tvar changes map[string]types.Actor\n\t\t\t\t\t// special case, we want to extract all actor states from the genesis block.\n\t\t\t\t\tif parent.Height() == 0 {\n\t\t\t\t\t\tchanges, err = t.getGenesisActors(ctx)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tchanges, err = t.stateChangedActors(tctx, parent.ParentState(), child.ParentState())\n\t\t\t\t\t}\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tll.Debugw(\"found actor state changes\", \"count\", len(changes), \"time\", time.Since(changesStart))\n\t\t\t\t\t\tif t.addressFilter != nil {\n\t\t\t\t\t\t\tfor addr := range changes {\n\t\t\t\t\t\t\t\tif !t.addressFilter.Allow(addr) {\n\t\t\t\t\t\t\t\t\tdelete(changes, addr)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor name, p := range t.actorProcessors {\n\t\t\t\t\t\t\tinFlight++\n\t\t\t\t\t\t\tgo t.runActorProcessor(tctx, p, name, child, parent, changes, results)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tll.Errorw(\"failed to extract actor changes\", \"error\", err)\n\t\t\t\t\t\tterr := xerrors.Errorf(\"failed to extract actor changes: %w\", err)\n\t\t\t\t\t\t// We need to report that all actor tasks failed\n\t\t\t\t\t\tfor name := range t.actorProcessors {\n\t\t\t\t\t\t\treport := &visormodel.ProcessingReport{\n\t\t\t\t\t\t\t\tHeight: int64(ts.Height()),\n\t\t\t\t\t\t\t\tStateRoot: ts.ParentState().String(),\n\t\t\t\t\t\t\t\tReporter: t.name,\n\t\t\t\t\t\t\t\tTask: name,\n\t\t\t\t\t\t\t\tStartedAt: start,\n\t\t\t\t\t\t\t\tCompletedAt: time.Now(),\n\t\t\t\t\t\t\t\tStatus: visormodel.ProcessingStatusError,\n\t\t\t\t\t\t\t\tErrorsDetected: terr,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\ttaskOutputs[name] = model.PersistableList{report}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// If we have messages execution processors then extract internal messages\n\t\t\t\tif len(t.messageExecutionProcessors) > 0 {\n\t\t\t\t\tiMsgs, err := t.node.GetMessageExecutionsForTipSet(ctx, child, parent)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t// Start all the message processors\n\t\t\t\t\t\tfor name, p := range t.messageExecutionProcessors {\n\t\t\t\t\t\t\tinFlight++\n\t\t\t\t\t\t\tgo t.runMessageExecutionProcessor(tctx, p, name, child, parent, iMsgs, results)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tll.Errorw(\"failed to extract messages\", \"error\", err)\n\t\t\t\t\t\tterr := xerrors.Errorf(\"failed to extract messages: %w\", err)\n\t\t\t\t\t\t// We need to report that all message tasks failed\n\t\t\t\t\t\tfor name := range t.messageExecutionProcessors {\n\t\t\t\t\t\t\treport := &visormodel.ProcessingReport{\n\t\t\t\t\t\t\t\tHeight: int64(ts.Height()),\n\t\t\t\t\t\t\t\tStateRoot: ts.ParentState().String(),\n\t\t\t\t\t\t\t\tReporter: t.name,\n\t\t\t\t\t\t\t\tTask: name,\n\t\t\t\t\t\t\t\tStartedAt: start,\n\t\t\t\t\t\t\t\tCompletedAt: time.Now(),\n\t\t\t\t\t\t\t\tStatus: visormodel.ProcessingStatusError,\n\t\t\t\t\t\t\t\tErrorsDetected: terr,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\ttaskOutputs[name] = model.PersistableList{report}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t// TODO: we could fetch the parent stateroot and proceed to index this tipset. However this will be\n\t\t\t\t// slower and increases the likelihood that we exceed the processing window and cause the next\n\t\t\t\t// tipset to be skipped completely.\n\t\t\t\tlog.Errorw(\"mismatching child and parent tipsets\", \"height\", ts.Height(), \"child\", child.Key(), \"parent\", parent.Key())\n\n\t\t\t\t// We need to report that all message and actor tasks were skipped\n\t\t\t\treason := \"tipset did not have expected parent or child\"\n\t\t\t\tfor name := range t.messageProcessors {\n\t\t\t\t\ttaskOutputs[name] = model.PersistableList{t.buildSkippedTipsetReport(ts, name, start, reason)}\n\t\t\t\t\tll.Infow(\"task skipped\", \"task\", name, \"reason\", reason)\n\t\t\t\t}\n\t\t\t\tfor name := range t.actorProcessors {\n\t\t\t\t\ttaskOutputs[name] = model.PersistableList{t.buildSkippedTipsetReport(ts, name, start, reason)}\n\t\t\t\t\tll.Infow(\"task skipped\", \"task\", name, \"reason\", reason)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Wait for all tasks to complete\n\tfor inFlight > 0 {\n\t\tvar res *TaskResult\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase res = <-results:\n\t\t}\n\t\tinFlight--\n\n\t\tllt := ll.With(\"task\", res.Task)\n\n\t\t// Was there a fatal error?\n\t\tif res.Error != nil {\n\t\t\tllt.Errorw(\"task returned with error\", \"error\", res.Error.Error())\n\t\t\t// tell all the processors to close their connections to the lens, they can reopen when needed\n\t\t\tif err := t.closeProcessors(); err != nil {\n\t\t\t\tlog.Errorw(\"error received while closing tipset indexer\", \"error\", err)\n\t\t\t}\n\t\t\treturn res.Error\n\t\t}\n\n\t\tif res.Report == nil {\n\t\t\t// Nothing was done for this tipset\n\t\t\tllt.Debugw(\"task returned with no report\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Fill in some report metadata\n\t\tres.Report.Reporter = t.name\n\t\tres.Report.Task = res.Task\n\t\tres.Report.StartedAt = res.StartedAt\n\t\tres.Report.CompletedAt = res.CompletedAt\n\n\t\tif res.Report.ErrorsDetected != nil {\n\t\t\tres.Report.Status = visormodel.ProcessingStatusError\n\t\t} else if res.Report.StatusInformation != \"\" {\n\t\t\tres.Report.Status = visormodel.ProcessingStatusInfo\n\t\t} else {\n\t\t\tres.Report.Status = visormodel.ProcessingStatusOK\n\t\t}\n\n\t\tllt.Infow(\"task report\", \"status\", res.Report.Status, \"time\", res.Report.CompletedAt.Sub(res.Report.StartedAt))\n\n\t\t// Persist the processing report and the data in a single transaction\n\t\ttaskOutputs[res.Task] = model.PersistableList{res.Report, res.Data}\n\t}\n\n\t// remember the last tipset we observed\n\tt.lastTipSet = ts\n\n\tif len(taskOutputs) == 0 {\n\t\t// Nothing to persist\n\t\tll.Debugw(\"tipset complete, nothing to persist\", \"total_time\", time.Since(start))\n\t\treturn nil\n\t}\n\n\t// wait until there is an empty slot before persisting\n\tll.Debugw(\"waiting to persist data\", \"time\", time.Since(start))\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase t.persistSlot <- struct{}{}:\n\t\t// Slot was free so we can continue. Slot is now taken.\n\t}\n\n\t// Persist all results\n\tgo func() {\n\t\t// free up the slot when done\n\t\tdefer func() {\n\t\t\t<-t.persistSlot\n\t\t}()\n\n\t\tll.Debugw(\"persisting data\", \"time\", time.Since(start))\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(len(taskOutputs))\n\n\t\t// Persist each processor's data concurrently since they don't overlap\n\t\tfor task, p := range taskOutputs {\n\t\t\tgo func(task string, p model.Persistable) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tstart := time.Now()\n\t\t\t\tctx, _ = tag.New(ctx, tag.Upsert(metrics.TaskType, task))\n\n\t\t\t\tif err := t.storage.PersistBatch(ctx, p); err != nil {\n\t\t\t\t\tstats.Record(ctx, metrics.PersistFailure.M(1))\n\t\t\t\t\tll.Errorw(\"persistence failed\", \"task\", task, \"error\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tll.Debugw(\"task data persisted\", \"task\", task, \"time\", time.Since(start))\n\t\t\t}(task, p)\n\t\t}\n\t\twg.Wait()\n\t\tll.Debugw(\"tipset complete\", \"total_time\", time.Since(start))\n\t}()\n\n\treturn nil\n}", "func (p *Path) Ta(cvs ...Point) *Path {\n\treturn p.addCmd(\"T\", tCmd{pts: cvs})\n}", "func (t *Tangle) HasTip(h hash.Hash) bool {\n\treturn t.tips[h]\n}", "func (j *DSGit) SetTLOC(ctx *Ctx) (err error) {\n\turl := ctx.ESURL + \"/\" + ctx.RichIndex + \"/_update_by_query?conflicts=proceed&refresh=true&timeout=20m\"\n\tmethod := Post\n\tpayload := []byte(fmt.Sprintf(`{\"script\":{\"inline\":\"ctx._source.total_lines_of_code=%d;\"},\"query\":{\"bool\":{\"must\":{\"term\":{\"origin\":\"%s\"}}}}}`, j.Loc, j.URL))\n\tvar resp interface{}\n\tresp, _, _, _, err = Request(\n\t\tctx,\n\t\turl,\n\t\tmethod,\n\t\tmap[string]string{\"Content-Type\": \"application/json\"}, // headers\n\t\tpayload, // payload\n\t\t[]string{}, // cookies\n\t\tmap[[2]int]struct{}{{200, 200}: {}}, // JSON statuses: 200\n\t\tnil, // Error statuses\n\t\tmap[[2]int]struct{}{{200, 200}: {}}, // OK statuses: 200\n\t\tnil, // Cache statuses\n\t\ttrue, // retry\n\t\tnil, // cache for\n\t\ttrue, // skip in dry-run mode\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\tupdated, _ := Dig(resp, []string{\"updated\"}, true, false)\n\tPrintf(\"Set total_lines_of_code %d on %.0f documents\\n\", j.Loc, updated)\n\treturn\n}", "func (bd *BlockDAG) GetTips() *HashSet {\n\tbd.stateLock.Lock()\n\tdefer bd.stateLock.Unlock()\n\n\treturn bd.tips\n}", "func Tipln(v ...interface{}) {\n\tTip(v...)\n\tprintln()\n}", "func (ti *TemplateInfo) Add(ts ...dm.Quest_TemplateSpec) {\n\t*ti = append(*ti, ts...)\n\tsort.Sort(*ti)\n\t*ti = (*ti)[:set.Uniq(*ti)]\n}", "func addLocationAnnotations(m *il.Graph) {\n\tfor _, n := range m.Modules {\n\t\taddLocationAnnotation(n.Location, &n.Comments)\n\t}\n\tfor _, n := range m.Providers {\n\t\taddLocationAnnotation(n.Location, &n.Comments)\n\t}\n\tfor _, n := range m.Resources {\n\t\taddLocationAnnotation(n.Location, &n.Comments)\n\t}\n\tfor _, n := range m.Outputs {\n\t\taddLocationAnnotation(n.Location, &n.Comments)\n\t}\n\tfor _, n := range m.Locals {\n\t\taddLocationAnnotation(n.Location, &n.Comments)\n\t}\n\tfor _, n := range m.Variables {\n\t\taddLocationAnnotation(n.Location, &n.Comments)\n\t}\n}", "func (a *analysis) addLabel(ptr, label nodeid) bool {\n\tb := a.nodes[ptr].solve.pts.add(label)\n\tif b && a.log != nil {\n\t\tfmt.Fprintf(a.log, \"\\t\\tpts(n%d) += n%d\\n\", ptr, label)\n\t}\n\treturn b\n}", "func (locs Locations) PrintNearbyLocations(label string, d float64) {\n\tloc0, exist, i0 := locs.FindBy(label)\n\tif !exist {\n\t\tPrintln(\"No location found with that label code\")\n\t\tos.Exit(1)\n\t}\n\tPrintln(\n\t\t\"Results within a %.1f m by %.1f m square centered on %v:\",\n\t\t2*d, 2*d, loc0)\n\tddeg := RadToDeg(DistToRad(d/1000.0))\n\tc := 0\n\tfor i := 0; i<len(locs); i++ {\n\t\tif i != i0 {\n\t\t\tif math.Abs(locs[i].Lat-loc0.Lat) < ddeg &&\n\t\t\t\tmath.Abs(locs[i].Long-loc0.Long) < ddeg {\n\t\t\t\tPrintln(\" %v\", locs[i])\n\t\t\t\tc++\n\t\t\t}\n\t\t}\n\t}\n\tPrintln(\"Found %d locations\", c)\n\tif c > 1 {\n\t\tPrintln(\"%d unique pairs\", maxPairs(0, c-1))\n\t}\n os.Exit(0)\n}", "func (f SettlLocationField) Tag() quickfix.Tag { return tag.SettlLocation }", "func (m *monitor) updateTip(names []string) {\n\tif len(names) > 0 {\n\t\tm.blanks = 0\n\t} else {\n\t\tm.blanks++\n\t}\n\n\tm.tip = append(names, m.tip...)\n\tif len(m.tip) > maxTipSize {\n\t\tm.tip = m.tip[0:maxTipSize]\n\t}\n}", "func (hu *HistorytakingUpdate) AddTemp(f float32) *HistorytakingUpdate {\n\thu.mutation.AddTemp(f)\n\treturn hu\n}", "func (g *testGenerator) setTip(blockName string) {\n\tg.tip = g.blocksByName[blockName]\n\tif g.tip == nil {\n\t\tpanic(fmt.Sprintf(\"tip block name %s does not exist\", blockName))\n\t}\n\tg.tipName = blockName\n}", "func (f *Builder) AppendOn(ctx context.Context, parent *types.TipSet, width int) *types.TipSet {\n\treturn f.Build(ctx, parent, width, nil)\n}", "func (set *lalrSet) addGoto(tkn Token, other *lalrSet) {\n\tset.gotos[tkn] = other\n}", "func (tracer *Tracer) Add(ctx context.Context, start *wire.OutPoint) {\r\n\tnewNode := traceNode{\r\n\t\toutpoint: *start,\r\n\t}\r\n\ttracer.traces = append(tracer.traces, &newNode)\r\n}", "func (hg *HostGroup) SetLocInfo(ctx context.Context, params interface{}) ([]byte, error) {\n\treturn hg.client.PostIn(ctx, \"/api/v1.0/HostGroup.SetLocInfo\", params)\n}", "func TPUReplicateMetadataStepMarkerLocation(value string) TPUReplicateMetadataAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"step_marker_location\"] = value\n\t}\n}", "func (t *Tile) AddPointsToHeatmap(heat *C.struct___0, size float64) *C.struct___0 {\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\tif heat == nil {\n\t\theat, _ = C.heatmap_new(C.uint(size), C.uint(size))\n\t}\n\n\t// Build a stamp with a radius of 10 units.\n\tstamp := C.heatmap_stamp_gen(C.uint(10))\n\n\t// Count the points that have been rendered in case they're used for stats none day.\n\tnumRendered := 0\n\n\tif t.Level == level_depth && len(t.Points) > 0 {\n\t\t// Grab points from this tile.\n\t\tfor _, point := range t.Points {\n\t\t\tpixelPoint := t.PixelPos(point, size)\n\t\t\tC.heatmap_add_point_with_stamp(heat, C.uint(pixelPoint.X), C.uint(pixelPoint.Y), stamp)\n\t\t\tnumRendered++\n\t\t}\n\t} else {\n\t\t// Get the points and have them sent to a buffered channel\n\t\tpointChan := make(chan Point, 100)\n\t\t// Fetch them in a goroutine so we can fetch them and add them at the same time.\n\t\tgo func(tile *Tile, output chan<- Point) {\n\t\t\ttile.GetPoints(pointChan)\n\t\t\tclose(pointChan)\n\t\t}(t, pointChan)\n\t\t// Add them.\n\t\tfor point := range pointChan {\n\t\t\tpixelPoint := t.PixelPos(point, size)\n\t\t\tC.heatmap_add_point(heat, C.uint(pixelPoint.X), C.uint(pixelPoint.Y))\n\t\t\tnumRendered++\n\t\t}\n\t}\n\n\t// Deallocate the stamp memory.\n\tC.heatmap_stamp_free(stamp)\n\treturn heat\n}", "func (pacif pacificTimeZones) Tahiti() string {return \"Pacific/Tahiti\" }", "func (f *Flattener) Add(point *FlattenerPoint) error {\n\n\titem, ok := f.pointMap.Load(point.hash)\n\tif ok {\n\t\tentry := item.(*mapEntry)\n\t\tentry.values = append(entry.values, point.value)\n\t\treturn nil\n\t}\n\n\tentry := &mapEntry{\n\t\tvalues: []float64{point.value},\n\t\tflattenerPointData: flattenerPointData{\n\t\t\toperation: point.operation,\n\t\t\ttimestamp: point.timestamp,\n\t\t\tdataChannelItem: point.dataChannelItem,\n\t\t},\n\t}\n\n\tf.pointMap.Store(point.hash, entry)\n\n\treturn nil\n}", "func (huo *HistorytakingUpdateOne) AddTemp(f float32) *HistorytakingUpdateOne {\n\thuo.mutation.AddTemp(f)\n\treturn huo\n}", "func (tpr *PingResults) AddPingResult(pr PingResult) {\n\ttpr.PingResults = append(tpr.PingResults, pr)\n}", "func (tweetMap TweetMap) Add(address string, tweetID int64) {\n\tif len(tweetMap[address]) > 0 {\n\t\ttweetMap[address] = append(tweetMap[address], tweetID)\n\t\treturn\n\t}\n\ttweetMap[address] = []int64{tweetID}\n}", "func (r *LocationMap) AddAll(t []*Location) {\n\tfor _, e := range t {\n\t\tr.Add(e)\n\t}\n}", "func tipsRequest() {\n\tfmt.Println(_TIPS_CONTENT)\n}", "func (pacif pacificTimeZones) Tongatapu() string {return \"Pacific/Tongatapu\" }", "func (bd *BlockDAG) GetTipsList() []IBlock {\n\tbd.stateLock.Lock()\n\tdefer bd.stateLock.Unlock()\n\n\tresult := bd.instance.GetTipsList()\n\tif result != nil {\n\t\treturn result\n\t}\n\tresult = []IBlock{}\n\tfor k := range bd.tips.GetMap() {\n\t\tresult = append(result, bd.getBlock(&k))\n\t}\n\treturn result\n}", "func (tr *trooper) loc() (x, y, z float64) { return tr.part.At() }", "func (d *LDB) createStation(tx *bbolt.Tx, locations []*darwinref.Location) *Station {\n\n\tif len(locations) == 0 {\n\t\treturn nil\n\t}\n\n\t// Mark Public if we have a CRS & it doesn't start with X or Z\n\t// 2019 June 10 Enable Z for now as Farringdon is known as Farringdon Underground.\n\t// This will expose the underground but better than leave a major station. Hopefully with Crossrail this will revert\n\t// back to the single station.\n\tcrs := locations[0].Crs\n\tpublic := crs != \"\" && crs[0] != 'X' // && crs[0] != 'Z'\n\tif !public {\n\t\treturn nil\n\t}\n\n\t//tb := tx.Bucket([]byte(tiplocBucket))\n\n\ts := d.getStationCrs(tx, crs)\n\tif s == nil {\n\t\ts = &Station{}\n\t\ts.Crs = crs\n\t\ts.Locations = locations\n\t} else {\n\t\t// Remove any tiplocs that have been removed\n\t\ttpl := make(map[string]interface{})\n\t\tfor _, loc := range locations {\n\t\t\ttpl[loc.Tiploc] = true\n\t\t}\n\t\tfor _, loc := range s.Locations {\n\t\t\tif _, exists := tpl[loc.Tiploc]; exists {\n\t\t\t\tdelete(tpl, loc.Tiploc)\n\t\t\t}\n\t\t}\n\t\tfor t, _ := range tpl {\n\t\t\tdelete(d.tiplocs, t)\n\t\t\t//_ = tb.Delete([]byte(t))\n\t\t}\n\t}\n\n\ts.Public = public\n\n\td.stations[crs] = s\n\tb, _ := s.Bytes()\n\t_ = tx.Bucket([]byte(crsBucket)).Put([]byte(crs), b)\n\n\t// Ensure all our tiplocs point to this crs\n\t//cb := []byte(crs)\n\tfor _, l := range s.Locations {\n\t\td.tiplocs[l.Tiploc] = crs\n\t\t//tpl := []byte(l.Tiploc)\n\t\t//b = tb.Get(tpl)\n\t\t//if b == nil || bytes.Compare(cb, b) != 0 {\n\t\t//\t_ = tb.Put(tpl, cb)\n\t\t//}\n\t}\n\n\treturn s\n}", "func (india indianaTimeZones) Tell_City() string {return \"America/Indiana/Tell_City\" }", "func Tip(v ...interface{}) {\n\tprint(TipFont)\n\tfmt.Print(v...)\n\tterminal.Reset()\n}", "func Location(ip string) (map[string]string, error) {\n\turi := fmt.Sprintf(\"%s%s\", baseURI, ip)\n\tbody, err := performRequest(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := extractJSON(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := make(map[string]string)\n\tfor k, v := range data {\n\t\tif k == \"latitude\" || k == \"longitude\" || k == \"metro_code\" {\n\t\t\tm[k] = strconv.FormatFloat(v.(float64), 'f', -1, 64)\n\t\t} else {\n\t\t\tm[k] = v.(string)\n\t\t}\n\t}\n\treturn m, nil\n}", "func (m *ccMetric) ToPoint(metaAsTags map[string]bool) (p *write.Point) {\n\tp = influxdb2.NewPoint(m.name, m.tags, m.fields, m.tm)\n\tfor key, ok1 := range metaAsTags {\n\t\tif val, ok2 := m.GetMeta(key); ok1 && ok2 {\n\t\t\tp.AddTag(key, val)\n\t\t}\n\t}\n\treturn p\n}", "func lattigo_encodeNTTAtLvlNew(paramHandle Handle2, encoderHandle Handle2, realValues *C.constDouble, logLen uint64, level uint64, scale float64) Handle2 {\n\tvar params *ckks.Parameters\n\tparams = getStoredParameters(paramHandle)\n\n\tvar encoder *ckks.Encoder\n\tencoder = getStoredEncoder(encoderHandle)\n\n\tcomplexValues := CDoubleVecToGoComplex(realValues, uint64(math.Pow(2, float64(logLen))))\n\tvar plaintext *ckks.Plaintext\n\tplaintext = ckks.NewPlaintext(*params, int(level), scale)\n\t(*encoder).EncodeNTT(plaintext, complexValues, int(logLen))\n\treturn marshal.CrossLangObjMap.Add(unsafe.Pointer(plaintext))\n}", "func CLUBELOCATIONADDRESSLT(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCLUBELOCATIONADDRESS), v))\n\t})\n}", "func main() {\n\ta := Gps4dLoc{}\n\ta.Lati = 23.333\n\ta.Long = 123.3333\n\n\tugi := UserGpsInfo{}\n\tugi.CurrentLoc.Lati = 12.3\n\tugi.CurrentLoc.Long = 123.333\n\ttestloc := Gps2dLoc{12.3, 22.3}\n\n\tif ugi.CameraLoc.Loc == nil {\n\t\tfmt.Println(\"cameraloc is nil\")\n\t\tfmt.Println(ugi.CameraLoc)\n\t\tugi.CameraLoc.Loc = append(ugi.CameraLoc.Loc, testloc)\n\t\tfmt.Println(len(ugi.CameraLoc.Loc))\n\t\tfmt.Println(\"shoelati\", ugi.CameraLoc.Loc[0].Lati)\n\t} else {\n\t\tfmt.Println(\"shoelati\", ugi.CameraLoc.Loc[0].Lati)\n\t}\n\n\tif ugi.CreateLoc.Timestamp == 0 {\n\t\tfmt.Println(\"create loc is nil\")\n\t}\n\n}", "func (o LookupNetworkDataNetworkResultOutput) Location() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupNetworkDataNetworkResult) string { return v.Location }).(pulumi.StringOutput)\n}", "func (o LookupGroupResultOutput) Location() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupGroupResult) string { return v.Location }).(pulumi.StringOutput)\n}", "func (o LookupNetworkPacketCoreControlPlaneResultOutput) Location() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupNetworkPacketCoreControlPlaneResult) string { return v.Location }).(pulumi.StringOutput)\n}", "func (teamID TeamID) pdMarkers(tl *TeamData) error {\n\tmr, err := db.Query(\"SELECT m.ID, m.portalID, m.type, m.comment, Y(p.loc) AS lat, X(p.loc) AS lon, p.name FROM marker=m, portal=p WHERE m.opID IN (SELECT ID FROM operation WHERE teamID = ?) AND m.portalID = p.ID AND m.opID = p.opID\", teamID)\n\tif err != nil {\n\t\tLog.Error(err)\n\t\treturn err\n\t}\n\tdefer mr.Close()\n\n\tvar tmpMarker Marker\n\tvar tmpWaypoint waypoint\n\tfor mr.Next() {\n\t\terr := mr.Scan(&tmpMarker.ID, &tmpMarker.PortalID, &tmpMarker.Type, &tmpMarker.Comment, &tmpWaypoint.Lat, &tmpWaypoint.Lon, &tmpWaypoint.Desc)\n\t\tif err != nil {\n\t\t\tLog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\ttl.Markers = append(tl.Markers, tmpMarker)\n\n\t\ttmpWaypoint.Type = \"waypoint\"\n\t\ttmpWaypoint.MarkerType = tmpMarker.Type.String()\n\t\ttmpWaypoint.TeamID = teamID.String()\n\t\ttmpWaypoint.ID = markerIDwaypointID(tmpMarker.ID)\n\t\ttmpWaypoint.Radius = 150\n\t\ttmpWaypoint.Share = true\n\t\ttl.Waypoints = append(tl.Waypoints, tmpWaypoint)\n\t}\n\treturn nil\n}", "func (o LookupServicePlanResultOutput) Location() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupServicePlanResult) string { return v.Location }).(pulumi.StringOutput)\n}", "func isVirtualTip(bs *HashSet, futureSet *HashSet, anticone *HashSet, children *HashSet) bool {\n\tfor k := range children.GetMap() {\n\t\tif bs.Has(&k) {\n\t\t\treturn false\n\t\t}\n\t\tif !futureSet.Has(&k) && !anticone.Has(&k) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (t *ATrains) ShowPos() {\n\tfmt.Println(\"wszystkie pociagi - pozycje\")\n\n\tfor i := 0; i < len(t.trains); i++ {\n\t\tt.trains[i].ShowPos()\n\t}\n\n\tfmt.Println(\"\")\n}", "func TestPutTipSet(t *testing.T) {\n\ttf.UnitTest(t)\n\n\tctx := context.Background()\n\tbuilder := chain.NewBuilder(t, address.Undef)\n\tgenTS := builder.Genesis()\n\tr := repo.NewInMemoryRepo()\n\tcs := newChainStore(r, genTS)\n\n\tgenTsas := &chain.TipSetMetadata{\n\t\tTipSet: genTS,\n\t\tTipSetStateRoot: genTS.At(0).ParentStateRoot,\n\t\tTipSetReceipts: testhelpers.EmptyReceiptsCID,\n\t}\n\terr := cs.Store.PutTipSetMetadata(ctx, genTsas)\n\tassert.NoError(t, err)\n}", "func (du *DepartmentUpdate) AddTriageResult(t ...*TriageResult) *DepartmentUpdate {\n\tids := make([]int, len(t))\n\tfor i := range t {\n\t\tids[i] = t[i].ID\n\t}\n\treturn du.AddTriageResultIDs(ids...)\n}", "func lookupTip(r *git.Repository, refname string) *git.Commit {\n\tref, err := r.References.Lookup(refname)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tcommit, err := lookupCommit(r, ref.Target())\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn commit\n}", "func (x *Data) AddAlias(canonical string, alias string) {\n productItem := x.tree.Get(ProductItem{Key: canonical}).(ProductItem)\n x.tree.ReplaceOrInsert(ProductItem{\n Key: alias,\n Products: productItem.Products,\n Listings: productItem.Listings,\n })\n}", "func (luo *LocationUpdateOne) AddLatitude(f float64) *LocationUpdateOne {\n\tif luo.addlatitude == nil {\n\t\tluo.addlatitude = &f\n\t} else {\n\t\t*luo.addlatitude += f\n\t}\n\treturn luo\n}", "func (tgm TargetGroupMap) Add(t *Target) {\n\tkey := t.Instance.String()\n\ttgm[key] = append(tgm[key], t)\n}", "func (lt *LineTask) Loc(prog float64) *Point {\n\tif lt.TaskType == OnDeparture {\n\t\treturn lt.Stay.Pos()\n\t}\n\tif prog < 0.5 && lt.before.TaskType == OnDeparture {\n\t\treturn lt.Moving.Div(2 * prog * prog)\n\t} else if prog > 0.5 && lt.TaskType == OnStopping {\n\t\treturn lt.Moving.Div(-2*prog*prog + 4*prog - 1)\n\t}\n\treturn lt.Moving.Div(prog)\n}", "func lookupTip(r *git.Repository, refname string) *git.Commit {\n\tref, err := r.LookupReference(refname)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tcommit, err := lookupCommit(r, ref.Target())\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn commit\n}", "func AddAtlasToRewardInfo(ri *RewardInfo) {\n\n\tvar logVGVA = []byte(\"ValidatorGroupVoteActivated(address,address,uint256,uint256)\")\n\tvar logVGVAHash = crypto.Keccak256Hash(logVGVA)\n\t//Filter by Event = ValidatorGroupVoteActivated and Group = ri.GroupHash\n\tvar TopicsFilter = [][]common.Hash{{logVGVAHash}, {}, {ri.GroupHash}}\n\n\tcontractAddress := common.HexToAddress(WrapperContractDeploymentAddress[NetActive][Election])\n\n\tquery := ethereum.FilterQuery{\n\t\tFromBlock: big.NewInt(0),\n\t\tToBlock: ri.BlockNumber,\n\t\tTopics: TopicsFilter,\n\t\tAddresses: []common.Address{\n\t\t\tcontractAddress,\n\t\t},\n\t}\n\n\tlogs, err := atlasEthClient.FilterLogs(context.Background(), query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tri.AddressAtlas = make(map[common.Address]bool)\n\tfor _, vLog := range logs {\n\n\t\tif !(vLog.Topics[2] == ri.GroupHash) {\n\t\t\tlog.Panic(\"TopicFilter didn't work correctly\")\n\t\t}\n\t\taddress := common.HexToAddress(vLog.Topics[1].Hex())\n\t\tri.AddressAtlas[address] = true\n\n\t}\n\n}", "func (m *ParentLabelDetails) SetTooltip(value *string)() {\n err := m.GetBackingStore().Set(\"tooltip\", value)\n if err != nil {\n panic(err)\n }\n}", "func infoTableAddRow(infoboxOrSec block, table element, entry *mapListEntry, classes []string) {\n\n\t// create the row\n\ttr := table.createChild(\"tr\", \"infobox-pair\")\n\n\t// append table row with a key\n\tif entry.keyTitle != \"\" {\n\n\t\t// key\n\t\tth := tr.createChild(\"th\", \"infobox-key\")\n\t\tth.addText(entry.keyTitle)\n\t\tth.addClass(classes...)\n\n\t\t// value\n\t\ttd := tr.createChild(\"td\", \"infobox-value\")\n\t\ttd.add(entry.value)\n\t\ttd.addClass(classes...)\n\n\t\treturn\n\t}\n\n\t// otherwise, append a table row without a key\n\ttd := tr.createChild(\"td\", \"infobox-anon\")\n\ttd.setAttr(\"colspan\", \"2\")\n\ttd.add(entry.value)\n\ttd.addClass(classes...)\n}", "func labelMarkers(m []Marker, x, y int, anchor string, fontSize int, short bool, b *bytes.Buffer) {\n\tb.WriteString(`<g id=\"marker_labels\">`)\n\tfor _, mr := range m {\n\t\tb.WriteString(fmt.Sprintf(\"<text x=\\\"%d\\\" y=\\\"%d\\\" font-size=\\\"%d\\\" visibility=\\\"hidden\\\" text-anchor=\\\"%s\\\">\", x, y, fontSize, anchor))\n\t\tif short {\n\t\t\tb.WriteString(mr.shortLabel)\n\t\t} else {\n\t\t\tb.WriteString(mr.label)\n\t\t}\n\t\tb.WriteString(fmt.Sprintf(\"<set attributeName=\\\"visibility\\\" from=\\\"hidden\\\" to=\\\"visible\\\" begin=\\\"%s.mouseover\\\" end=\\\"%s.mouseout\\\" dur=\\\"2s\\\"/>\",\n\t\t\tmr.id, mr.id))\n\t\tb.WriteString(`</text>`)\n\t}\n\tb.WriteString(`</g>`)\n}", "func Tip(branch string) (string, error) {\n\tif branch == \"\" {\n\t\tbranch = \"HEAD\"\n\t}\n\tresult, err := exec.Command(\"git\", \"rev-parse\", \"--short\", branch).CombinedOutput()\n\tif err != nil {\n\t\tif strings.Contains(string(result), \"Needed a single revision\") {\n\t\t\treturn \"\", fmt.Errorf(\"git: Branch %s is unknown, can't get tip\", branch)\n\t\t}\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(result)), nil\n}", "func (p Meta) Location() string { return p.location }", "func (duo *DepartmentUpdateOne) AddTriageResult(t ...*TriageResult) *DepartmentUpdateOne {\n\tids := make([]int, len(t))\n\tfor i := range t {\n\t\tids[i] = t[i].ID\n\t}\n\treturn duo.AddTriageResultIDs(ids...)\n}", "func (this *DtNavMesh) CalcTileLoc(pos []float32, tx, ty *int32) {\n\t*tx = (int32)(math.Floor(float64(pos[0]-this.m_orig[0]) / float64(this.m_tileWidth)))\n\t*ty = (int32)(math.Floor(float64(pos[2]-this.m_orig[2]) / float64(this.m_tileHeight)))\n}", "func (tsResp *TimestampResponse) AddCurrentTimestamp(\n\tpooln, bucketn string, curSeqnos map[uint16]uint64) *TimestampResponse {\n\n\tts := NewTsVbuuid(pooln, bucketn, len(curSeqnos))\n\tfor vbno, seqno := range curSeqnos {\n\t\tts.Append(vbno, seqno, 0 /*vbuuid*/, 0 /*start*/, 0 /*end*/, \"\" /*manifest*/)\n\t}\n\ttsResp.CurrentTimestamps = append(tsResp.CurrentTimestamps, ts)\n\treturn tsResp\n}", "func (pool *TxPool) GetRandomTips(n int) (v []types.Txi) {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\n\t// select n random hashes\n\tvalues := pool.tips.GetAllValues()\n\tindices := generateRandomIndices(n, len(values))\n\n\tfor _, i := range indices {\n\t\tv = append(v, values[i])\n\t}\n\treturn v\n}", "func (d *Dao) TipByID(c context.Context, id int64) (r *model.Tips, err error) {\n\tres := d.db.QueryRow(c, _tipsByIDSQL, id)\n\tr = new(model.Tips)\n\tif err = res.Scan(&r.ID, &r.Platform, &r.Version, &r.Tip, &r.Link, &r.StartTime, &r.EndTime, &r.Level, &r.JudgeType, &r.Operator, &r.Deleted, &r.Position, &r.Ctime, &r.Mtime); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\terr = nil\n\t\t\tr = nil\n\t\t\treturn\n\t\t}\n\t\terr = errors.WithStack(err)\n\t}\n\treturn\n}", "func (t *Tile) AddPoint(p Point) {\n\tscaledRelativePos := t.ScaledRelativePos(p)\n\tif scaledRelativePos.X > 1 || scaledRelativePos.X < 0 {\n\t\tlog.Printf(\"%f, %f is an invalid point for this tile.\\n\",\n\t\t\tscaledRelativePos.X, scaledRelativePos.Y)\n\t\treturn\n\t}\n\n\tif scaledRelativePos.Y > 1 || scaledRelativePos.Y < 0 {\n\t\tlog.Printf(\"%f, %f is an invalid point for this tile.\\n\",\n\t\t\tscaledRelativePos.X, scaledRelativePos.Y)\n\t\treturn\n\t}\n\n\t// Terminate at the level goal.\n\tif t.Level == level_depth {\n\t\tt.Points = append(t.Points, p)\n\t} else {\n\t\tsubTileIndex := 0\n\n\t\t// If it's on the right column, add 1 to the index to make it odd\n\t\tif scaledRelativePos.X >= 0.5 {\n\t\t\tsubTileIndex++\n\t\t}\n\n\t\t// If it's on the bottom, add 2 so it's either 2 or 3\n\t\tif scaledRelativePos.Y >= 0.5 {\n\t\t\tsubTileIndex += 2\n\t\t}\n\n\t\t// If the tile doesn't exist, create it.\n\t\tif t.SubTiles[subTileIndex] == nil {\n\t\t\tnewPosition := t.Position\n\t\t\tif (subTileIndex % 2) == 1 {\n\t\t\t\tnewPosition.X += t.Width / 2.0\n\t\t\t}\n\n\t\t\tif subTileIndex >= 2 {\n\t\t\t\tnewPosition.Y += t.Width / 2.0\n\t\t\t}\n\n\t\t\tt.SubTiles[subTileIndex] = &Tile{\n\t\t\t\tParent: t,\n\t\t\t\tSubTiles: [4]*Tile{},\n\t\t\t\tPoints: []Point{},\n\t\t\t\tLevel: t.Level + 1,\n\t\t\t\tPosition: newPosition,\n\t\t\t\tWidth: t.Width / 2.0,\n\t\t\t}\n\t\t}\n\n\t\t// Add the point.\n\t\tt.SubTiles[subTileIndex].AddPoint(p)\n\t\tt.NumPoints++\n\t}\n}", "func (tm *TreasureMap) addObstacle(listCustomObstacle [][2]int) {\n\tfor _, customObstacle := range listCustomObstacle {\n\t\ttm.Mapping[customObstacle] = entity_obstacle\n\t}\n}", "func (bd *BlockDAG) BuildMerkleTreeStoreFromTips() []*hash.Hash {\n\tparents := bd.GetTips().SortList(false)\n\treturn merkle.BuildParentsMerkleTreeStore(parents)\n}", "func (o AccessLevelCustomExprOutput) Location() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AccessLevelCustomExpr) *string { return v.Location }).(pulumi.StringPtrOutput)\n}", "func (h *NeutrinoDBStore) BlockChainTip() (*wire.BlockHeader, uint32, er.R) {\n\tvar bh *wire.BlockHeader\n\tvar height uint32\n\treturn bh, height, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {\n\t\tvar err er.R\n\t\tbh, height, err = h.BlockChainTip1(tx)\n\t\treturn err\n\t})\n}", "func (squ *SurveyQuestionUpdate) AddLatitude(f float64) *SurveyQuestionUpdate {\n\tif squ.addlatitude == nil {\n\t\tsqu.addlatitude = &f\n\t} else {\n\t\t*squ.addlatitude += f\n\t}\n\treturn squ\n}", "func (f *Builder) BuildOnTip(parent types.TipSet, build func(b *BlockBuilder)) *types.Block {\n\tticket := make([]byte, binary.Size(f.seq))\n\tbinary.BigEndian.PutUint64(ticket, f.seq)\n\tf.seq++\n\n\t// Sum weight of parents' parent weight, plus one for each parent.\n\t// Note: as with the state builder, we should probably factor this out.\n\tparentWeight := types.Uint64(0)\n\tfor i := 0; i < parent.Len(); i++ {\n\t\tparentWeight += parent.At(i).ParentWeight + 1\n\t}\n\n\theight := types.Uint64(0)\n\tif parent.Defined() {\n\t\theight = parent.At(0).Height + 1\n\t}\n\n\tb := &types.Block{\n\t\tTicket: ticket,\n\t\tMiner: f.minerAddress,\n\t\tParentWeight: parentWeight,\n\t\tParents: parent.Key(),\n\t\tHeight: height,\n\t\tMessages: []*types.SignedMessage{},\n\t\tMessageReceipts: []*types.MessageReceipt{},\n\t\t// Omitted fields below\n\t\t//StateRoot: stateRoot,\n\t\t//Proof PoStProof\n\t\t//Timestamp Uint64\n\t}\n\t// Nonce intentionally omitted as it will go away.\n\n\tif build != nil {\n\t\tbuild(&BlockBuilder{b})\n\t}\n\n\t// Compute state root from block.\n\tvar err error\n\tb.StateRoot, err = f.stateBuilder.ComputeStateRoot(b)\n\trequire.NoError(f.t, err)\n\n\tf.blocks[b.Cid()] = b\n\treturn b\n\n}", "func (csm *consensusStateManager) getGHOSTDAGLowerTips(stagingArea *model.StagingArea, pendingTip *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {\n\ttips, err := csm.consensusStateStore.Tips(stagingArea, csm.databaseContext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlowerTips := []*externalapi.DomainHash{pendingTip}\n\tfor _, tip := range tips {\n\t\tif tip.Equal(pendingTip) {\n\t\t\tcontinue\n\t\t}\n\t\tselectedParent, err := csm.ghostdagManager.ChooseSelectedParent(stagingArea, tip, pendingTip)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif selectedParent.Equal(pendingTip) {\n\t\t\tlowerTips = append(lowerTips, tip)\n\t\t}\n\t}\n\treturn lowerTips, nil\n}", "func (lu *LocationUpdate) AddLatitude(f float64) *LocationUpdate {\n\tif lu.addlatitude == nil {\n\t\tlu.addlatitude = &f\n\t} else {\n\t\t*lu.addlatitude += f\n\t}\n\treturn lu\n}", "func (f TargetLocationIDField) Tag() quickfix.Tag { return tag.TargetLocationID }", "func (t MatchTask) addSentenceWithOffset(contextMarker string, words []string, offset int) {\n\tvar sentence = Sentence{offset, words}\n\n\tt.sentenceByContextMarker[contextMarker] = sentence\n}", "func (squo *SurveyQuestionUpdateOne) AddLatitude(f float64) *SurveyQuestionUpdateOne {\n\tif squo.addlatitude == nil {\n\t\tsquo.addlatitude = &f\n\t} else {\n\t\t*squo.addlatitude += f\n\t}\n\treturn squo\n}", "func (f *File) AddLineInfo(offset int, filename string, line int) {\n\tx := index(offset)\n\tf.mutex.Lock()\n\tif i := len(f.infos); i == 0 || index(f.infos[i-1].Offset) < x && x < f.size {\n\t\tf.infos = append(f.infos, lineInfo{offset, filename, line})\n\t}\n\tf.mutex.Unlock()\n}", "func (o DashboardPartsPositionOutput) Metadata() pulumi.MapOutput {\n\treturn o.ApplyT(func(v DashboardPartsPosition) map[string]interface{} { return v.Metadata }).(pulumi.MapOutput)\n}", "func (o LookupVirtualNetworkResultOutput) Location() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupVirtualNetworkResult) string { return v.Location }).(pulumi.StringOutput)\n}", "func (d *SRTM) AddElevation(point []float64) ([]float64, error) {\n\tll := LatLng{\n\t\tLatitude: point[1],\n\t\tLongitude: point[0],\n\t}\n\ttile, err := d.loadTile(ll)\n\tif err != nil {\n\t\tlog.Error().Caller().Err(err).Msgf(\"loadTile: latLng = %s -> error %s\", ll.String(), err.Error())\n\t\treturn nil, err\n\t}\n\ttile.setLRU(time.Now())\n\televation, err := tile.GetElevation(ll)\n\tif err != nil {\n\t\tlog.Error().Caller().Err(err).Msgf(\"GetElevation: latLng = %s -> error %s\", ll.String(), err.Error())\n\t\treturn nil, err\n\t}\n\treturn append(point[:2], float64(elevation)), nil\n}", "func (out *elasticsearchOutput) UpdateLocalTopologyMap() {\n\n\t// get all shippers IPs from Elasticsearch\n\tTopologyMapTmp := make(map[string]string)\n\n\tres, err := out.Conn.SearchUri(\".packetbeat-topology\", \"server-ip\", nil)\n\tif err == nil {\n\t\tfor _, obj := range res.Hits.Hits {\n\t\t\tvar result QueryResult\n\t\t\terr = json.Unmarshal(obj, &result)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar pub PublishedTopology\n\t\t\terr = json.Unmarshal(result.Source, &pub)\n\t\t\tif err != nil {\n\t\t\t\tlogp.Err(\"json.Unmarshal fails with: %s\", err)\n\t\t\t}\n\t\t\t// add mapping\n\t\t\tipaddrs := strings.Split(pub.IPs, \",\")\n\t\t\tfor _, addr := range ipaddrs {\n\t\t\t\tTopologyMapTmp[addr] = pub.Name\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogp.Err(\"Getting topology map fails with: %s\", err)\n\t}\n\n\t// update topology map\n\tout.TopologyMap = TopologyMapTmp\n\n\tlogp.Debug(\"output_elasticsearch\", \"Topology map %s\", out.TopologyMap)\n}", "func testInfo(line string) string {\n\tindex := strings.LastIndex(line, \":\")\n\tlocation := line[0:index]\n\tinfo := line[index+1:]\n\n\treturn fmt.Sprintf(`<div class=\"row\">\n\t<div class=\"col-lg-3 col-lg-pad text-danger item\">%s</div>\n\t<div class=\"col-lg-9 col-lg-pad text-danger item\">%s</div>\n\t</div>`, location, info)\n\n\t// return fmt.Sprintf(`<div class=\"row\"><div class=\"col-lg-10 col-lg-pad text-danger item\">%s</div></div>`,info)\n}", "func (sp *Spectre) updateTipVotes(voter *SpectreBlock, maxParent *SpectreBlock, votedPast *BlockDAG) {\n\tvb := votedPast.getBlock(voter.GetHash())\n\tvoterParents := vb.GetParents()\n\ttipStack := stack.New()\n\ttipSet := NewHashSet()\n\t// take out all other tips and add their votes to child\n\tfor _, h := range voterParents.GetMap() {\n\t\tif !h.(IBlock).GetHash().IsEqual(maxParent.GetHash()) && !h.(IBlock).GetHash().IsEqual(votedPast.getGenesis().GetHash()) {\n\t\t\ttipStack.Push(h)\n\t\t\ttipSet.Add(h.(IBlock).GetHash())\n\t\t}\n\t}\n\tfor tipStack.Len() > 0 {\n\t\ttipHash := tipStack.Pop().(hash.Hash)\n\t\ttb := votedPast.getBlock(&tipHash)\n\t\ttipVoter := votedPast.instance.(*Spectre).sblocks[*tb.GetHash()]\n\t\tif sp.hasVoted(tipHash) {\n\t\t\tv := sp.votes[tipHash]\n\t\t\tif v {\n\t\t\t\tvoter.Votes1 += 1\n\t\t\t} else {\n\t\t\t\tvoter.Votes2 += 1\n\t\t\t}\n\t\t} else {\n\t\t\tif tipVoter.Votes2 > tipVoter.Votes1 {\n\t\t\t\tvoter.Votes2 += 1\n\t\t\t} else if tipVoter.Votes2 < tipVoter.Votes1 {\n\t\t\t\tvoter.Votes1 += 1\n\t\t\t}\n\t\t}\n\n\t\t// find nodes exclusively exist in one tip. We save all the nodes visited in tipSet, for a note's parent p,\n\t\t// if all children of p exist in tipSet, p is exclusively in that node's future set.\n\t\t// e.g. in ByteBall2 with 21 as the virtual block, from 10's view, if we want to find 12's exclusive future,\n\t\t// we save 12 into tipSet first, the 14 and 15 are 12's exclusive parents since all their children\n\t\t// (just 12 in this case ) exist in tipSet\n\t\tfor _, ib := range tb.GetParents().GetMap() {\n\t\t\ttp := *ib.(IBlock).GetHash()\n\t\t\tif tipSet.Has(&tp) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tonly := true\n\t\t\ttpChildren := votedPast.getBlock(&tp).GetChildren()\n\t\t\tfor _, tc := range tpChildren.GetMap() {\n\t\t\t\tif !tipSet.Has(tc.(IBlock).GetHash()) {\n\t\t\t\t\tonly = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif only {\n\t\t\t\ttipStack.Push(tp)\n\t\t\t\ttipSet.Add(&tp)\n\t\t\t}\n\t\t}\n\t}\n}", "func (scsuo *SurveyCellScanUpdateOne) AddLatitude(f float64) *SurveyCellScanUpdateOne {\n\tif scsuo.addlatitude == nil {\n\t\tscsuo.addlatitude = &f\n\t} else {\n\t\t*scsuo.addlatitude += f\n\t}\n\treturn scsuo\n}", "func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"Toutput\"] = value\n\t}\n}", "func (c *ClusterInfo) AddTask(task Task) {\n\tstoreID := task.TargetStoreID()\n\tif n, ok := c.Nodes[storeID]; ok {\n\t\tn.AddTask(task)\n\t}\n}", "func (p *packer) add(s image.Point) (placement, bool) {\n\tif place, ok := p.tryAdd(s); ok {\n\t\treturn place, true\n\t}\n\tp.newPage()\n\treturn p.tryAdd(s)\n}", "func (o InferenceClusterOutput) Location() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InferenceCluster) pulumi.StringOutput { return v.Location }).(pulumi.StringOutput)\n}", "func (os *Offsets) Add(o Offset) {\n\tif *os == nil {\n\t\t*os = make(map[string]map[int32]Offset)\n\t}\n\tot := (*os)[o.Topic]\n\tif ot == nil {\n\t\tot = make(map[int32]Offset)\n\t\t(*os)[o.Topic] = ot\n\t}\n\tot[o.Partition] = o\n}" ]
[ "0.6330537", "0.5318772", "0.49911645", "0.49783835", "0.48557892", "0.48480755", "0.48384988", "0.48337668", "0.47259057", "0.47020352", "0.4641511", "0.4637678", "0.46198624", "0.45897534", "0.45555168", "0.45366836", "0.4514625", "0.4468566", "0.44587126", "0.44583687", "0.4456663", "0.44420767", "0.4425706", "0.4401271", "0.43917885", "0.4355223", "0.43452048", "0.4344374", "0.43419752", "0.43397602", "0.43316385", "0.4317755", "0.42769104", "0.42725813", "0.4236691", "0.42228225", "0.4222203", "0.42203644", "0.42154458", "0.42124233", "0.4211204", "0.42018887", "0.42004293", "0.41994143", "0.41926092", "0.41906", "0.41896123", "0.41829783", "0.41775477", "0.41497356", "0.41382343", "0.41321695", "0.41266826", "0.41228044", "0.41181293", "0.41103378", "0.4094445", "0.40884626", "0.40820882", "0.4079158", "0.40761444", "0.40705666", "0.4067122", "0.40664122", "0.40654054", "0.4063452", "0.40603024", "0.40483302", "0.40451664", "0.40401196", "0.4033232", "0.40309376", "0.40296683", "0.40225196", "0.40209386", "0.40191185", "0.40161744", "0.40136316", "0.4012288", "0.4012187", "0.4009358", "0.4008106", "0.40064394", "0.40044796", "0.40018496", "0.39971867", "0.39915672", "0.39756742", "0.39752147", "0.39745265", "0.39740333", "0.39721075", "0.39714912", "0.39549884", "0.39539158", "0.39466748", "0.39445427", "0.39438194", "0.3941695", "0.39407054" ]
0.66976255
0
Add a ViaResolveRequest to the response
func (bf *boardFilter) addVia(rid, dest string) *darwinref.ViaResolveRequest { viaRequest := &darwinref.ViaResolveRequest{ Crs: bf.station.Crs, Destination: dest, } bf.vias[rid] = viaRequest return viaRequest }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (dr *DarwinRefService) ViaResolveHandler(r *rest.Rest) error {\n\n\t// The query\n\tqueries := make(map[string]*darwinref.ViaResolveRequest)\n\n\t// The response\n\tresponse := make(map[string]*darwinref.Via)\n\n\t// Run the queries\n\tif err := r.Body(&queries); err != nil {\n\n\t\t// Fail safe by returning 500 but still a {} object\n\t\tr.Status(500).Value(response)\n\n\t} else {\n\n\t\tfor rid, query := range queries {\n\t\t\tif via := dr.reference.ResolveVia(query.Crs, query.Destination, query.Tiplocs); via != nil {\n\t\t\t\tresponse[rid] = via\n\t\t\t}\n\t\t}\n\n\t\tr.Status(200).\n\t\t\tJSON().\n\t\t\tValue(response)\n\t}\n\n\treturn nil\n}", "func (client BaseClient) ResolveResponder(resp *http.Response) (result SetObject, err error) {\n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusBadRequest,http.StatusForbidden,http.StatusNotFound,http.StatusInternalServerError),\n autorest.ByUnmarshallingJSON(&result.Value),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (s *EventsService) Resolve(event *Event) (*EventResponse, error) {\n\treturn s.postEvent(event, EventTypeResolve)\n}", "func DecodeResolveRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (interface{}, error) {\n\treturn func(r *http.Request) (interface{}, error) {\n\t\tvar (\n\t\t\tv2 string\n\t\t\tauth *string\n\t\t\terr error\n\t\t)\n\t\tv2 = r.URL.Query().Get(\"v\")\n\t\tif v2 == \"\" {\n\t\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"v\", \"query string\"))\n\t\t}\n\t\tauthRaw := r.Header.Get(\"Authorization\")\n\t\tif authRaw != \"\" {\n\t\t\tauth = &authRaw\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpayload := NewResolvePayload(v2, auth)\n\t\tif payload.Auth != nil {\n\t\t\tif strings.Contains(*payload.Auth, \" \") {\n\t\t\t\t// Remove authorization scheme prefix (e.g. \"Bearer\")\n\t\t\t\tcred := strings.SplitN(*payload.Auth, \" \", 2)[1]\n\t\t\t\tpayload.Auth = &cred\n\t\t\t}\n\t\t}\n\n\t\treturn payload, nil\n\t}\n}", "func CreateResolveResponse(v interface{}) (resp ResolveResp, err error) {\n\tswitch v := v.(type) {\n\tcase edb.Track:\n\t\tresp.Type = TrackType\n\t\tresp.EIDs = []string{v.EID()}\n\tcase []edb.Track:\n\t\tresp.Type = TrackType\n\t\tresp.EIDs = make([]string, len(v))\n\t\tfor i, track := range v {\n\t\t\tresp.EIDs[i] = track.EID()\n\t\t}\n\tdefault:\n\t\terr = errors.New(\"unknown value type passed\")\n\t}\n\n\treturn\n}", "func (i *Internal) Resolve(v interface{}) {\n\ti.Resolver <- v\n}", "func (a *Arguments) Resolve(s *db.Session, r *http.Request) error {\n\tif a.Employee != nil {\n\t\treturn a.Employee.Resolve(s, r)\n\t}\n\n\treturn nil\n}", "func (sr SimpleResolver) Resolve(w ResponseWriter, r Request) error {\n\tdomains := sr[r.Qtype]\n\tif domains == nil {\n\t\treturn nil\n\t}\n\tfor _, record := range domains[Domain(r.Name)] {\n\t\tif err := w.Add(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (client BaseClient) Resolve(ctx context.Context, xMsRequestid *uuid.UUID, xMsCorrelationid *uuid.UUID, xMsMarketplaceToken string) (result SetObject, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/BaseClient.Resolve\")\n defer func() {\n sc := -1\n if result.Response.Response != nil {\n sc = result.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n req, err := client.ResolvePreparer(ctx, xMsRequestid, xMsCorrelationid, xMsMarketplaceToken)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"Resolve\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.ResolveSender(req)\n if err != nil {\n result.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"Resolve\", resp, \"Failure sending request\")\n return\n }\n\n result, err = client.ResolveResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"Resolve\", resp, \"Failure responding to request\")\n }\n\n return\n }", "func (c FileDescriptor) Resolve(ctx context.Context) error {\n\treturn capnp.Client(c).Resolve(ctx)\n}", "func (r *ResolutionHandler) HandleResolveRequest(idOrDocument string) middleware.Responder {\n\n\tif !strings.HasPrefix(idOrDocument, r.namespace) {\n\t\treturn &BadRequestError{&models.Error{Message: swag.String(\"must start with supported namespace\")}}\n\t}\n\n\tdidDoc, err := r.docResolver.ResolveDocument(idOrDocument)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"not found\") {\n\t\t\treturn &NotFoundError{&models.Error{Message: swag.String(\"document not found\")}}\n\t\t}\n\n\t\treturn &InternalServerError{&models.Error{Message: swag.String(err.Error())}}\n\t}\n\n\treturn &Response{Body: &models.Response{Body: didDoc}, Status: http.StatusOK}\n}", "func AltResolveRequest(req *libs.Request) {\n\ttarget := req.Target\n\tif len(req.Values) > 0 {\n\t\tfor _, value := range req.Values {\n\t\t\tfor k, v := range value {\n\t\t\t\tif strings.Contains(v, \"{{.\") && strings.Contains(v, \"}}\") {\n\t\t\t\t\tv = ResolveVariable(v, target)\n\t\t\t\t}\n\t\t\t\t// variable as a script\n\t\t\t\tif strings.Contains(v, \"(\") && strings.Contains(v, \")\") {\n\n\t\t\t\t\tnewValue := RunVariables(v)\n\t\t\t\t\tif len(newValue) > 0 {\n\t\t\t\t\t\ttarget[k] = newValue[0]\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ttarget[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// resolve all part again but with secondary template\n\treq.URL = AltResolveVariable(req.URL, target)\n\treq.Body = AltResolveVariable(req.Body, target)\n\treq.Headers = AltResolveHeader(req.Headers, target)\n\treq.Detections = AltResolveDetection(req.Detections, target)\n\treq.Generators = AltResolveDetection(req.Generators, target)\n\treq.Middlewares = AltResolveDetection(req.Middlewares, target)\n}", "func (a *EmployeeArguments) Resolve(s *db.Session, r *http.Request) error {\n\tif a.Work != nil {\n\t\treturn a.Work.Resolve(s, r)\n\t}\n\n\treturn nil\n}", "func (client BaseClient) ResolvePreparer(ctx context.Context, xMsRequestid *uuid.UUID, xMsCorrelationid *uuid.UUID, xMsMarketplaceToken string) (*http.Request, error) {\n const APIVersion = \"2.0.0\"\n queryParameters := map[string]interface{} {\n \"api-version\": APIVersion,\n }\n\n preparer := autorest.CreatePreparer(\n autorest.AsPost(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/resolve\"),\n autorest.WithQueryParameters(queryParameters),\n autorest.WithHeader(\"Content-Type\", \"application/json\"))\n if xMsRequestid != nil {\n preparer = autorest.DecoratePreparer(preparer,\n autorest.WithHeader(\"x-ms-requestid\",autorest.String(xMsRequestid)))\n }\n if xMsCorrelationid != nil {\n preparer = autorest.DecoratePreparer(preparer,\n autorest.WithHeader(\"x-ms-correlationid\",autorest.String(xMsCorrelationid)))\n }\n if len(xMsMarketplaceToken) > 0 {\n preparer = autorest.DecoratePreparer(preparer,\n autorest.WithHeader(\"x-ms-marketplace-token\",autorest.String(xMsMarketplaceToken)))\n }\n return preparer.Prepare((&http.Request{}).WithContext(ctx))\n }", "func (a *WorkArguments) Resolve(s *db.Session, r *http.Request) error {\n\tg, _ := errgroup.WithContext(r.Context())\n\tg.Go(func() error {\n\t\tdepartment, err := database.ReadDepartment(s, []string{a.DepartmentName})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(department) == 0 {\n\t\t\treturn server.NewHTTPError(http.StatusBadRequest, \"department_name unknown\")\n\t\t}\n\t\ta.departmentID = department[0].ID\n\n\t\treturn nil\n\t})\n\n\tg.Go(func() error {\n\t\tappointment, err := database.ReadAppointment(s, []string{a.AppointmentName})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(appointment) == 0 {\n\t\t\treturn server.NewHTTPError(http.StatusBadRequest, \"appointment_name unknown\")\n\t\t}\n\t\ta.appointmentID = appointment[0].ID\n\n\t\treturn nil\n\t})\n\n\treturn g.Wait()\n}", "func EncodeResolveResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*sensorviews.SavedBookmark)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewResolveResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}", "func (c DevSession) Resolve(ctx context.Context) error {\n\treturn capnp.Client(c).Resolve(ctx)\n}", "func (o *Options) resolve() {\n\to.ContentSource = filepath.ToURL(o.ContentSource)\n\n\tif o.Address == \"\" {\n\t\to.Address = \":8080\"\n\t}\n\n\tif o.TemplateDataFunc == nil {\n\t\to.TemplateDataFunc = tpl.StdDataFunc\n\t}\n\n\tif o.Endpoints == nil {\n\t\to.Endpoints = make(api.Endpoints, 0)\n\t}\n}", "func (r *Resolver) Resolve(qname string, qtype uint16) []dns.RR {\n\tlog.WithFields(log.Fields{\n\t\t\"qname\": qname,\n\t\t\"qtype\": dns.TypeToString[qtype],\n\t}).Info(\"starting a resolver request\")\n\n\ttmpRRs := r.resolver.Resolve(dns.Fqdn(qname), dns.TypeToString[qtype])\n\n\tlog.WithFields(log.Fields{\n\t\t\"qname\": qname,\n\t\t\"qtype\": dns.TypeToString[qtype],\n\t}).Trace(tmpRRs)\n\n\trrs := r.mapRRFromDnsrIntoRR(tmpRRs)\n\n\tlog.WithFields(log.Fields{\n\t\t\"qname\": qname,\n\t\t\"qtype\": dns.TypeToString[qtype],\n\t\t\"answers\": rrs,\n\t}).Info(\"get the answer for the resolve query\")\n\n\treturn rrs\n}", "func (r *ResponseBlocklistName) Resolve(q *dns.Msg, ci ClientInfo) (*dns.Msg, error) {\n\tanswer, err := r.resolver.Resolve(q, ci)\n\tif err != nil || answer == nil {\n\t\treturn answer, err\n\t}\n\treturn r.blockIfMatch(q, answer, ci)\n}", "func (e Engine) Resolve(publicID string, sm *semaphore.Weighted) error {\n\t_, err := e.launchResolution(publicID, true, sm)\n\treturn err\n}", "func (r *ResolverService) Resolve(d *Device) {\n\tr.in <- d\n}", "func (l Lookup) Resolve(request *http.Request) (string, error) {\n\tlogger.Debug(\"processing with existing session\")\n\tpath := request.URL.Path\n\tsessionID := strings.ReplaceAll(path, \"/wd/hub/session/\", \"\")\n\tif len(sessionID) < SessionLength {\n\t\treturn \"\", errors.New(\"invalid session in path \" + path)\n\t}\n\n\tsessionID = sessionID[:SessionLength]\n\tIP, exists := l.pool.IP(sessionID)\n\tif !exists {\n\t\treturn \"\", errors.New(\"pod for session \" + sessionID + \" not found\")\n\t}\n\n\treturn \"http://\" + IP + \":\" + l.config.PodPort, nil\n}", "func (*visibilityExtension) Resolve(c *config.Config, ix *resolve.RuleIndex, rc *repo.RemoteCache, r *rule.Rule, imports interface{}, from label.Label) {\n}", "func (c Controller) Resolve(ctx context.Context) error {\n\treturn capnp.Client(c).Resolve(ctx)\n}", "func (client BaseClient) ResolveSender(req *http.Request) (*http.Response, error) {\n return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n }", "func (c *Client) Resolve(ip net.IP) (net.HardwareAddr, error) {\n\terr := c.Request(ip)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Loop and wait for replies\n\tfor {\n\t\tarp, _, err := c.Read()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif arp.Operation != OperationReply || !arp.SenderIP.Equal(ip) {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn arp.SenderHardwareAddr, nil\n\t}\n}", "func DecodeAddRequest(_ context.Context, r *http.Request) (req interface{}, err error) {\n\tt := da.DA{}\n\terr = json.NewDecoder(r.Body).Decode(&t)\n\treq = endpoints.AddRequest{Req: t}\n\treturn req, err\n}", "func (v *vine) Resolve(ctx *context.T, protocol, address string) (string, []string, error) {\n\tn, a, tag, baseProtocol, err := parseDialingAddress(ctx, address)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tn, resAddresses, err := baseProtocol.Resolve(ctx, n, a)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\taddresses := make([]string, 0, len(resAddresses))\n\tfor _, a := range resAddresses {\n\t\taddresses = append(addresses, createDialingAddress(n, a, tag))\n\t}\n\treturn protocol, addresses, nil\n}", "func (cr *ClientResolver) Resolve(request quantum.ResolveRequest) (quantum.ClientConn, error) {\n\t// Get ResolveResults\n\tresults, err := cr.resolveResults(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(results) == 0 {\n\t\treturn nil, quantum.NoAgentsFromRequest(request)\n\t}\n\n\t// Ping each one, return the first one to respond\n\treturn cr.resolveClient(results)\n}", "func (*Privilege) Resolve(ctx sayori.Context) {\n\tif ctx.Err != nil {\n\t\t_, _ = ctx.Session.ChannelMessageSend(ctx.Message.ChannelID, ctx.Err.Error())\n\t}\n}", "func (t Resolver) Resolution(ctx context.Context) (c *Challenge, err error) {\n\tvar (\n\t\tconn *grpc.ClientConn\n\t\tp *agent.Peer\n\t\tresp *ResolutionResponse\n\t\treq = &ResolutionRequest{}\n\t)\n\n\t// here we select a node based on the a disciminator. that node is responsible\n\t// for managing the acme account key, registration, etc.\n\tif p, err = agent.NodeToPeer(t.rendezvous.Get([]byte(discriminator))); err != nil {\n\t\treturn c, err\n\t}\n\n\tif p.Name == t.local.Name {\n\t\tif resp, err = t.cache.Resolution(ctx, req); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"disk cache\")\n\t\t}\n\t\treturn resp.Challenge, nil\n\t}\n\n\tif conn, err = dialers.NewDirect(agent.AutocertAddress(p)).DialContext(ctx, t.dialer.Defaults()...); err != nil {\n\t\treturn c, err\n\t}\n\tdefer conn.Close()\n\n\tif resp, err = NewACMEClient(conn).Resolution(ctx, req); err != nil {\n\t\treturn c, err\n\t}\n\n\treturn resp.Challenge, err\n}", "func (*ResolveResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{4}\n}", "func (cr *ClientResolver) resolveResults(rr quantum.ResolveRequest) (results []resolveResult, err error) {\n\tif rr.Agent == \"\" {\n\t\treturn cr.resolveWithDNS(rr)\n\t}\n\n\treturn cr.resolveWithAPI(rr)\n}", "func (resolver) Resolve(c *config.Config, ix *resolve.RuleIndex, rc *repo.RemoteCache, r *rule.Rule, from label.Label) {\n\tfmt.Println(\"Resolve:\", r.Name(), \"from\", from)\n}", "func (p *promise) Resolve(value interface{}) {\n\tp.fulfill(true, value, nil)\n}", "func CreateDescribeDomainResolveResponse() (response *DescribeDomainResolveResponse) {\n\tresponse = &DescribeDomainResolveResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func resolve(ctx context.Context, r resolver, name string, options opts.ResolveOpts) (path.Path, error) {\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tvar (\n\t\tp path.Path\n\t\tcacheTag *string\n\t\tproof [][]byte\n\t\terr = ErrResolveFailed\n\t)\n\n\tresCh := resolveAsync(ctx, r, name, options)\n\n\tfor res := range resCh {\n\t\tp, cacheTag, proof, err = res.Path, res.CacheTag, res.Proof, res.Err\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif cacheTag != nil {\n\t\tif ct, ok := ctx.Value(\"cache-tag\").(*string); ok {\n\t\t\t*ct = *cacheTag\n\t\t}\n\t}\n\tif pw, ok := ctx.Value(\"proxy-preamble\").(coreiface.ProofWriter); ok {\n\t\tfor _, p := range proof {\n\t\t\tpw.WriteChunk(p)\n\t\t}\n\t}\n\n\treturn p, err\n}", "func (i *IpldRawNode) Resolve(p []string) (interface{}, []string, error) {\n\treturn nil, nil, nil\n}", "func (m *MockupAssetProvider) AddOpResponse(op entities.AgentOpResponse) derrors.Error{\n\tm.Lock()\n\tdefer m.Unlock()\n\t\n\tm.pendingResult[op.OperationId] = op\n\treturn nil\n}", "func (*DocumentLinkResolveResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{37}\n}", "func (d *Deferred) Resolve(data interface{}) *js.Object {\n\treturn d.Call(\"resolve\", data)\n}", "func (r *Resolver) Resolve(src *spb.VName) *cpb.MarkedSource {\n\treturn r.ResolveTicket(kytheuri.ToString(src))\n}", "func (*CompletionItemResolveResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{20}\n}", "func (r *AssetResolver) Resolve(path string) Response {\n\tfor _, resolver := range r.chain {\n\t\tif response := resolver(path); response != nil {\n\t\t\treturn response\n\t\t}\n\t}\n\treturn nil\n}", "func (r *Resolver) Resolve(did string, parsed *did.DID, res resolver.Resolver) (*resolver.Document, error) {\n\tif parsed.Method != r.Method() {\n\t\treturn nil, fmt.Errorf(\"unknown did method: '%s'\", parsed.Method)\n\t}\n\tvar c = cid.Undef\n\tvar err error\n\tversion := getVersion(parsed.Query)\n\tif version != \"\" {\n\t\tc, err = cid.Parse(version)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resolve(r.client, parsed.ID, c)\n}", "func WithResolveFunc(f func(string) (resolver.Address, error)) Option {\n\treturn func(r *Resolver) {\n\t\tr.resolveFunc = f\n\t}\n}", "func (*CodeLensResolveRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{33}\n}", "func (*CodeLensResolveResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{34}\n}", "func (*ResolveRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{1}\n}", "func Resolver(r resolve.Resolver) Option {\n\treturn func(p *proxy) { p.resolver = r }\n}", "func (t *targetBuilder) addResolvedDependency(dep string) *targetBuilder {\n\tt.resolvedDeps.Add(dep)\n\treturn t\n}", "func (e ResolveResponseValidationError) Cause() error { return e.cause }", "func (r ProviderRef) Resolve() (Provider, error) {\n\tvar err error\n\terr = ErrorOnInvalid(r)\n\tif err.(ValidationErrors).HasErrors() {\n\t\treturn Provider{}, err\n\t}\n\tprovider := r.env.Providers[r.ref]\n\treturn Provider{\n\t\tName: provider.Name,\n\t\tcRef: provider.cRef,\n\t\tParameters: r.parameters.inherit(provider.Parameters),\n\t\tEnvVars: r.envVars.inherit(provider.EnvVars),\n\t\tProxy: r.proxy.inherit(provider.Proxy),\n\t}, nil\n}", "func (templateEngine *TemplateEngine) Resolve(mainTemplate string, bindingDataList *list.List) (interface{}, error) {\n\ttemplateEngine.stats.init()\n\tmainTemplateJSON, err := templateEngine.templateLoader.Load(mainTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\teffectiveBindingDataList := list.New()\n\teffectiveBindingDataList.PushBackList(bindingDataList)\n\tif templateEngine.env != nil {\n\t\teffectiveBindingDataList.PushBack(templateEngine.env)\n\t}\n\ttemplateEngine.dupParams = checkDuplicatedBindingData(effectiveBindingDataList)\n\tresolvedJSON, err := templateEngine.elementResolver.ResolveElement(mainTemplateJSON, effectiveBindingDataList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttemplateEngine.templateLoader.Unload(mainTemplate)\n\treturn unescapeJSON(resolvedJSON), nil\n}", "func (c *Client) AcceptRequestWithPublicInvitation(piID string, inv *didexchange.Invitation, to *introduce.To) error {\n\treturn c.service.Continue(piID, WithPublicInvitation(inv, to))\n}", "func registerResourceMitigation(request *libcoap.Pdu, typ reflect.Type, controller controllers.ControllerInterface, session *libcoap.Session,\n context *libcoap.Context, is_unknown bool) (interface{}, string, error) {\n\n hex := hex.Dump(request.Data)\n if request.Code == libcoap.RequestPut && !strings.Contains(hex, string(libcoap.IETF_MITIGATION_SCOPE_HEX)) {\n return nil, \"\", errors.New(\"Body data MUST be mitigation request\")\n }\n body, err := messages.UnmarshalCbor(request, reflect.TypeOf(messages.MitigationRequest{}))\n if err != nil {\n return nil, \"\", err\n }\n\n var resourcePath string\n\n // Create sub resource to handle observation on behalf of Unknown resource in case of mitigation PUT\n if is_unknown && request.Code == libcoap.RequestPut {\n p := request.PathString()\n resourcePath = p\n r := libcoap.ResourceInit(&p, 0)\n r.TurnOnResourceObservable()\n r.RegisterHandler(libcoap.RequestGet, toMethodHandler(controller.HandleGet, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPut, toMethodHandler(controller.HandlePut, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPost, toMethodHandler(controller.HandlePost, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestDelete, toMethodHandler(controller.HandleDelete, typ, controller, !is_unknown))\n context.AddResource(r)\n log.Debugf(\"Create sub resource to handle observation later : uri-path=%+v\", p)\n // Create sub resource for handle get all with observe option\n pa := strings.Split(p, \"/mid\")\n if len(pa) > 1 {\n resourceAll := context.GetResourceByQuery(&pa[0])\n if resourceAll == nil {\n ra := libcoap.ResourceInit(&pa[0], 0)\n ra.TurnOnResourceObservable()\n ra.RegisterHandler(libcoap.RequestGet, toMethodHandler(controller.HandleGet, typ, controller, !is_unknown))\n context.AddResource(ra)\n log.Debugf(\"Create observer in sub-resource with query: %+v\", pa[0])\n }\n }\n }\n return body, resourcePath, nil\n}", "func (svc *Service) Resolve(ctx context.Context, viewID string, resolves []vcsvcs.SturdyRebaseResolve) (*sync.RebaseStatusResponse, error) {\n\tview, err := svc.viewRepo.Get(viewID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rebaseStatusResponse *sync.RebaseStatusResponse\n\n\tresolveSyncFunc := func(repo vcsvcs.RepoWriter) error {\n\t\trb, err := repo.OpenRebase()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := rb.ResolveFiles(resolves); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconflicts, rebasedCommits, err := rb.Continue()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif conflicts {\n\t\t\treturn fmt.Errorf(\"unexpected conflict after conflict resolution\")\n\t\t}\n\t\tif len(rebasedCommits) != 1 {\n\t\t\treturn fmt.Errorf(\"unexpected number of rebased commits\")\n\t\t}\n\n\t\t// No conflicts\n\n\t\tif err := svc.complete(ctx, repo, view.CodebaseID, view.WorkspaceID, view.ID, &rebasedCommits[0].OldCommitID, rebasedCommits); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trebaseStatusResponse = &sync.RebaseStatusResponse{HaveConflicts: false}\n\t\treturn nil\n\t}\n\n\terr = svc.executorProvider.New().\n\t\tAllowRebasingState(). // allowed to get the state of existing conflicts\n\t\tWrite(resolveSyncFunc).\n\t\tExecView(view.CodebaseID, view.ID, \"syncResolve2\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif rebaseStatusResponse == nil {\n\t\treturn nil, fmt.Errorf(\"no rebase status found\")\n\t}\n\n\treturn rebaseStatusResponse, nil\n}", "func (*CompletionItemResolveRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{19}\n}", "func (r *Route) ResolveWith(addr tcpip.LinkAddress) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tr.remoteLinkAddress = addr\n}", "func (e Engine) SyncResolve(publicID string, sm *semaphore.Weighted) (*resolution.Resolution, error) {\n\treturn e.launchResolution(publicID, false, sm)\n}", "func (r *Resolver) Resolve(context site.Context, u *url.URL) (Context, error) {\n\tdomainMap := r.urlMapLoader.Load(context)\n\tpageID, ok := domainMap[u.Path]\n\tif !ok {\n\t\treturn Context{}, fmt.Errorf(\"page id not found for request %s with path %s\", u.String(), u.Path)\n\t}\n\treturn Context{PageID: pageID}, nil\n}", "func decodeAddRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\treq := endpoint.AddRequest{\n\t\tio.Department{\n\t\t\tDepartmentName: r.FormValue(\"DepartmentName\"),\n\t\t},\n\t}\n\treturn req, nil\n}", "func (f *Flow) AddRequest(r *Request) {\n\tgo func() {\n\t\tf.addRequest <- r\n\t}()\n}", "func (*DocumentLinkResolveRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{36}\n}", "func Resolve(q string) (ip net.IP, port uint16, target string, err error) {\n c := new(dns.Client)\n m := new(dns.Msg)\n m.SetQuestion(dns.Fqdn(q), dns.TypeSRV)\n m.RecursionDesired = true\n\n dns_server := \"127.0.0.1:8600\"\n if len(os.Args) > 1 {\n dns_server = os.Args[1]\n }\n fmt.Printf(\"Using dns server: %v\\n\", dns_server)\n\n r, _, err := c.Exchange(m, dns_server)\n if r == nil {\n log.Fatalf(\"error: %s\\n\", err.Error())\n }\n\n if r.Rcode != dns.RcodeSuccess {\n log.Fatalf(\"dns lookup failed\\n\")\n }\n\n for _, srv := range r.Answer {\n port = srv.(*dns.SRV).Port\n target = srv.(*dns.SRV).Target\n\n fmt.Printf(\"%v %v\\n\", port, target)\n\n for _, a := range r.Extra {\n if target != a.(*dns.A).Hdr.Name {\n continue\n }\n ip = a.(*dns.A).A\n fmt.Printf(\"%v %v\\n\", target, ip)\n return\n }\n }\n\n log.Fatalf(\"no DNS record found\\n\")\n return\n}", "func (r *ociClient) Resolve(ctx context.Context, repoCtx cdv2.RepositoryContext, name, version string) (*cdv2.ComponentDescriptor, ctf.BlobResolver, error) {\n\t//return cached component descriptor, if availalbe\n\tif cd, blobResolver := r.resolveFromPredefined(repoCtx, name, version); cd != nil {\n\t\treturn cd, blobResolver, nil\n\t}\n\t// resolve remote component descriptor\n\tcd, blobResolver, err := r.resolver.WithRepositoryContext(repoCtx).Resolve(ctx, name, version)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\t// automatically add blueprint resolver\n\taggBlobResolver, err := ctf.AggregateBlobResolvers(blobResolver, &BlueprintResolver{ociClient: r.ociClient})\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"unable to add blueprint resolver\")\n\t}\n\treturn cd, aggBlobResolver, nil\n}", "func ResolveAddress(lookupIPFunc lookup.LookupIPFunc, dataplane *core_mesh.DataplaneResource) (*core_mesh.DataplaneResource, error) {\n\tvar ips, aips []net.IP\n\tvar err error\n\tvar update_ip, update_aip bool = false, false\n\tif ips, err = lookupIPFunc(dataplane.Spec.Networking.Address); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(ips) == 0 {\n\t\treturn nil, errors.Errorf(\"can't resolve address %v\", dataplane.Spec.Networking.Address)\n\t}\n\tif dataplane.Spec.Networking.Address != ips[0].String() {\n\t\tupdate_ip = true\n\t}\n\tif dataplane.Spec.Networking.AdvertisedAddress != \"\" {\n\t\tif aips, err = lookupIPFunc(dataplane.Spec.Networking.AdvertisedAddress); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(aips) == 0 {\n\t\t\treturn nil, errors.Errorf(\"can't resolve address %v\", dataplane.Spec.Networking.AdvertisedAddress)\n\t\t}\n\t\tif dataplane.Spec.Networking.AdvertisedAddress != aips[0].String() {\n\t\t\tupdate_aip = true\n\t\t}\n\t}\n\n\tif update_ip || update_aip { // only if we resolve any address, in most cases this is IP not a hostname\n\t\tdpSpec := proto.Clone(dataplane.Spec).(*mesh_proto.Dataplane)\n\t\tif update_ip {\n\t\t\tdpSpec.Networking.Address = ips[0].String()\n\t\t}\n\t\tif update_aip {\n\t\t\tdpSpec.Networking.AdvertisedAddress = aips[0].String()\n\t\t}\n\t\treturn &core_mesh.DataplaneResource{\n\t\t\tMeta: dataplane.Meta,\n\t\t\tSpec: dpSpec,\n\t\t}, nil\n\t}\n\treturn dataplane, nil\n}", "func (_m *Backend) ResolveVodAppeal(ctx context.Context, request *models.ResolveVodAppealRequest) error {\n\tret := _m.Called(ctx, request)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.ResolveVodAppealRequest) error); ok {\n\t\tr0 = rf(ctx, request)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (r *Resolver) Resolve(name string) (resolver.Address, error) {\n\tif r.resolveFunc != nil {\n\t\treturn r.resolveFunc(name)\n\t}\n\treturn resolver.Address{}, fmt.Errorf(\"resolveFunc: %w\", ens.ErrNotImplemented)\n}", "func (m *podManager) addRequest(request *cniserver.PodRequest) {\n\tm.requests <- request\n}", "func (m *ManagedDeviceItemRequestBuilder) RequestRemoteAssistance()(*if1e16de23b7ea9301d8d5208c98e249a57e7e1e4df4785a249e053121429dd1e.RequestRemoteAssistanceRequestBuilder) {\n return if1e16de23b7ea9301d8d5208c98e249a57e7e1e4df4785a249e053121429dd1e.NewRequestRemoteAssistanceRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (f *File) Resolve(rslv VersionResolver) (*ResolvedFile, error) {\n\treturn f.ResolveWith(rslv, common.TemplateArgs())\n}", "func (r *Cache) Resolve(q *dns.Msg, ci ClientInfo) (*dns.Msg, error) {\n\tif len(q.Question) < 1 {\n\t\treturn nil, errors.New(\"no question in query\")\n\t}\n\t// While multiple questions in one DNS message is part of the standard,\n\t// it's not actually supported by servers. If we do get one of those,\n\t// just pass it through and bypass caching.\n\tif len(q.Question) > 1 {\n\t\treturn r.resolver.Resolve(q, ci)\n\t}\n\n\tlog := Log.WithFields(logrus.Fields{\"client\": ci.SourceIP, \"qname\": qName(q)})\n\n\t// Returned an answer from the cache if one exists\n\ta, ok := r.answerFromCache(q)\n\tif ok {\n\t\tlog.Trace(\"cache-hit\")\n\t\treturn a, nil\n\t}\n\n\tlog.WithField(\"resolver\", r.resolver.String()).Trace(\"cache-miss, forwarding\")\n\n\t// Get a response from upstream\n\ta, err := r.resolver.Resolve(q, ci)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Put the upstream response into the cache and return it\n\tr.storeInCache(a)\n\treturn a, nil\n}", "func (v *Resolver) Resolve(tr *trace.Trace, addr string) ([]Recipient, error) {\n\ttr = tr.NewChild(\"Alias.Resolve\", addr)\n\tdefer tr.Finish()\n\treturn v.resolve(0, addr, tr)\n}", "func (m *MockupAssetProvider) AddECOpResponse(op entities.EdgeControllerOpResponse) derrors.Error{\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tm.pendingECResult[op.OperationId] = op\n\treturn nil\n}", "func Resolve(resolution interface{}) *Promise {\n\treturn New(func(resolve func(interface{}), reject func(error)) {\n\t\tresolve(resolution)\n\t})\n}", "func (r AssociateResolverEndpointIpAddressRequest) Send(ctx context.Context) (*AssociateResolverEndpointIpAddressOutput, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Request.Data.(*AssociateResolverEndpointIpAddressOutput), nil\n}", "func (service *ResultService) ResolveConflict(in *proto_job.ResultRequest) (*proto_job.ResultReply, error) {\n\tresult, err := service.accessor.GetByID(uint(in.Id))\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result.ID == 0 {\n\t\tlog.Fatal(\"Conflict not found in SetResultState\")\n\t}\n\n\tresult.State = \"RESOLVED\"\n\tresult.TaxonID = uint(in.TaxonId)\n\terr = service.accessor.Save(result)\n\n\treturn converters.ResultModelToProto(result), err\n}", "func (r *ReportResolvable) Resolve(ctx context.Context) (interface{}, error) {\n\tctx = SetupContext(ctx, r.Path.Capture, r.Config)\n\n\tc, err := capture.ResolveGraphics(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer analytics.SendTiming(\"resolve\", \"report\")(analytics.Size(len(c.Commands)))\n\n\tsd, err := SyncData(ctx, r.Path.Capture)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilter, err := buildFilter(ctx, r.Path.Capture, r.Path.Filter, sd, r.Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuilder := service.NewReportBuilder()\n\n\tvar currentCmd uint64\n\titems := []*service.ReportItemRaw{}\n\tstate := c.NewState(ctx)\n\tstate.NewMessage = func(s log.Severity, m *stringtable.Msg) uint32 {\n\t\titems = append(items, r.newReportItem(s, currentCmd, m))\n\t\treturn uint32(len(items) - 1)\n\t}\n\tstate.AddTag = func(i uint32, t *stringtable.Msg) {\n\t\titems[i].Tags = append(items[i].Tags, t)\n\t}\n\n\tissues := map[api.CmdID][]replay.Issue{}\n\n\tif r.Path.Device != nil {\n\t\t// Request is for a replay report too.\n\t\tintent := replay.Intent{\n\t\t\tCapture: r.Path.Capture,\n\t\t\tDevice: r.Path.Device,\n\t\t}\n\n\t\tmgr := replay.GetManager(ctx)\n\n\t\t// Capture can use multiple APIs.\n\t\t// Iterate the APIs in use looking for those that support the\n\t\t// QueryIssues interface. Call QueryIssues for each of these APIs.\n\t\thints := &service.UsageHints{Background: true}\n\t\tfor _, a := range c.APIs {\n\t\t\tif qi, ok := a.(replay.QueryIssues); ok {\n\t\t\t\tapiIssues, err := qi.QueryIssues(ctx, intent, mgr, 1, r.Path.DisplayToSurface, hints)\n\t\t\t\tif err != nil {\n\t\t\t\t\tissue := replay.Issue{\n\t\t\t\t\t\tCommand: api.CmdNoID,\n\t\t\t\t\t\tSeverity: service.Severity_ErrorLevel,\n\t\t\t\t\t\tError: err,\n\t\t\t\t\t}\n\t\t\t\t\tissues[api.CmdNoID] = append(issues[api.CmdNoID], issue)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, issue := range apiIssues {\n\t\t\t\t\tissues[issue.Command] = append(issues[issue.Command], issue)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Gather report items from the state mutator, and collect together all the\n\t// APIs in use.\n\tapi.ForeachCmd(ctx, c.Commands, true, func(ctx context.Context, id api.CmdID, cmd api.Cmd) error {\n\t\titems, currentCmd = items[:0], uint64(id)\n\n\t\tif as := cmd.Extras().Aborted(); as != nil && as.IsAssert {\n\t\t\titems = append(items, r.newReportItem(log.Fatal, uint64(id),\n\t\t\t\tmessages.ErrTraceAssert(as.Reason)))\n\t\t}\n\n\t\tif err := cmd.Mutate(ctx, id, state, nil /* builder */, nil /* watcher */); err != nil {\n\t\t\tif !api.IsErrCmdAborted(err) {\n\t\t\t\titems = append(items, r.newReportItem(log.Error, uint64(id),\n\t\t\t\t\tmessages.ErrInternalError(err.Error())))\n\t\t\t}\n\t\t}\n\n\t\tif filter(id, cmd, state) {\n\t\t\tfor _, item := range items {\n\t\t\t\titem.Tags = append(item.Tags, getCommandNameTag(cmd))\n\t\t\t\tbuilder.Add(ctx, item)\n\t\t\t}\n\t\t\tfor _, issue := range issues[id] {\n\t\t\t\titem := r.newReportItem(log.Severity(issue.Severity), uint64(issue.Command),\n\t\t\t\t\tmessages.ErrReplayDriver(issue.Error.Error()))\n\t\t\t\tif int(issue.Command) < len(c.Commands) {\n\t\t\t\t\titem.Tags = append(item.Tags, getCommandNameTag(c.Commands[issue.Command]))\n\t\t\t\t}\n\t\t\t\tbuilder.Add(ctx, item)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn builder.Build(), nil\n}", "func (r *Resolver) Resolve(name string) Extension {\n\te, ok := r.Execers[name]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn e\n}", "func CreateDescribeDomainResolveRequest() (request *DescribeDomainResolveRequest) {\n\trequest = &DescribeDomainResolveRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cloudfw\", \"2017-12-07\", \"DescribeDomainResolve\", \"cloudfirewall\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c PersistentIdentity) Resolve(ctx context.Context) error {\n\treturn capnp.Client(c).Resolve(ctx)\n}", "func AssetResolverResponse(a *AssetResolver) ContextHandlerFunc {\n\treturn func(ctx *RequestContext) Response {\n\t\tpath := ctx.Request.URL.Path\n\t\tif !strings.HasPrefix(path, \"/\") {\n\t\t\tpath = \"/\" + path\n\t\t}\n\n\t\tif response := a.Resolve(path); response != nil {\n\t\t\treturn response\n\t\t}\n\n\t\treturn BlankResponse(http.StatusNotFound)\n\t}\n}", "func (c *CreateOptions) resolve(ctx context.Context) error {\n\tif err := c.resolveFromFiles(); err != nil {\n\t\treturn err\n\t}\n\n\tif c.Interactive {\n\t\treturn c.resolveWithSurvey()\n\t}\n\n\tc.setDefaults()\n\n\tif c.Validate {\n\t\treturn c.validate(ctx)\n\t}\n\n\treturn nil\n}", "func (r *resolver) TradeStageAddReq() gql.TradeStageAddReqResolver {\n\treturn r.tradeStageAddReqRes\n}", "func NewResolverResolveInvocation(name string, ident1 []string, ident2 merry.Error) *ResolverResolveInvocation {\n\tinvocation := new(ResolverResolveInvocation)\n\n\tinvocation.Parameters.Name = name\n\n\tinvocation.Results.Ident1 = ident1\n\tinvocation.Results.Ident2 = ident2\n\n\treturn invocation\n}", "func (r *Resolver) Register(f Factory) {\n\tr.fs = append(r.fs, f)\n}", "func (r *RoundRobin) Resolve(q *dns.Msg, ci ClientInfo) (*dns.Msg, error) {\n\tr.mu.Lock()\n\tresolver := r.resolvers[r.current]\n\tr.current = (r.current + 1) % len(r.resolvers)\n\tr.mu.Unlock()\n\tLog.WithFields(logrus.Fields{\n\t\t\"client\": ci.SourceIP,\n\t\t\"qname\": qName(q),\n\t\t\"resolver\": resolver.String(),\n\t}).Trace(\"forwarding query to resolver\")\n\treturn resolver.Resolve(q, ci)\n}", "func Resolve(rc RectificationClient, state State) error {\n\treturn ResolveFilteredDeployments(rc, state, nil)\n}", "func (c *Client) ResolveReference(u *url.URL) *url.URL {\n\tabsurl := c.Endpoint.ResolveReference(u)\n\tif len(c.Query) > 0 {\n\t\tabsurl.RawQuery = mergeQueries(c.Query, absurl.Query())\n\t}\n\treturn absurl\n}", "func (c *Client) Resolver(_ context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) {\n\treturn docker.NewResolver(docker.ResolverOptions{\n\t\tCredentials: c.Credential,\n\t\tClient: client,\n\t\tPlainHTTP: plainHTTP,\n\t}), nil\n}", "func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool {\n\tvar err error\n\tvar requestVersion uint64\n\tvar requestNonce string\n\tvar lastVersion uint64\n\tvar lastNonce string\n\n\tlog.Debug().Msgf(\"Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]\",\n\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl,\n\t\tdiscoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames,\n\t\tproxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl)))\n\n\tif discoveryRequest.ErrorDetail != nil {\n\t\tlog.Error().Msgf(\"Proxy SerialNumber=%s PodUID=%s: [NACK] err: \\\"%s\\\" for nonce %s, last version applied on request %s\",\n\t\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo)\n\t\treturn false\n\t}\n\n\ttypeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl]\n\tif !ok {\n\t\tlog.Error().Msgf(\"Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s\",\n\t\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl)\n\t\treturn false\n\t}\n\n\t// It is possible for Envoy to return an empty VersionInfo.\n\t// When that's the case - start with 0\n\tif discoveryRequest.VersionInfo != \"\" {\n\t\tif requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil {\n\t\t\t// It is probable that Envoy responded with a VersionInfo we did not understand\n\t\t\tlog.Error().Err(err).Msgf(\"Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)\",\n\t\t\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err)\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Set last version applied\n\tproxy.SetLastAppliedVersion(typeURL, requestVersion)\n\n\trequestNonce = discoveryRequest.ResponseNonce\n\t// Handle first request on stream, should always reply to empty nonce\n\tif requestNonce == \"\" {\n\t\tlog.Debug().Msgf(\"Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)\",\n\t\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames)\n\t\treturn true\n\t}\n\n\t// The version of the config received along with the DiscoveryRequest (ackVersion)\n\t// is what the Envoy proxy may be acknowledging. It is acknowledging\n\t// and not requesting when the ackVersion is <= what we last sent.\n\t// It is possible however for a proxy to have a version that is higher\n\t// than what we last sent. (Perhaps the control plane restarted.)\n\t// In that case we want to make sure that we send new responses with\n\t// VersionInfo incremented starting with the version which the proxy last had.\n\tlastVersion = proxy.GetLastSentVersion(typeURL)\n\tif requestVersion > lastVersion {\n\t\tlog.Debug().Msgf(\"Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.\",\n\t\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion)\n\t\tproxy.SetLastSentVersion(typeURL, requestVersion)\n\t\treturn true\n\t}\n\n\t// Compare Nonces\n\t// As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the\n\t// nonce is non-empty.\n\tlastNonce = proxy.GetLastSentNonce(typeURL)\n\tif requestNonce != lastNonce {\n\t\tlog.Debug().Msgf(\"Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)\",\n\t\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce)\n\t\treturn false\n\t}\n\n\t// ----\n\t// At this point, there is no error and nonces match, it is guaranteed an ACK with last version.\n\t// What's left is to check if the resources listed are the same. If they are not, we must respond\n\t// with the new resources requested.\n\t//\n\t// In case of LDS and CDS, \"Envoy will always use wildcard mode for Listener and Cluster resources\".\n\t// The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case.\n\t//\n\t// This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference\n\t// https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347\n\t// ----\n\tresourcesLastSent := proxy.GetLastResourcesSent(typeURL)\n\tresourcesRequested := getRequestedResourceNamesSet(discoveryRequest)\n\n\t// If what we last sent is a superset of what the\n\t// requests resources subscribes to, it's ACK and nothing needs to be done.\n\t// Otherwise, envoy might be asking us for additional resources that have to be sent along last time.\n\t// Difference returns elemenets of <requested> that are not part of elements of <last sent>\n\n\trequestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent)\n\tif requestedResourcesDifference.Cardinality() != 0 {\n\t\tlog.Debug().Msgf(\"Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update\",\n\t\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference)\n\t\treturn true\n\t}\n\n\tlog.Debug().Msgf(\"Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v\",\n\t\tproxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested)\n\treturn false\n}", "func (s *Script) resolve(L *lua.LState) int {\n\tctx, err := extractContext(L.CheckUserData(1))\n\tname := L.CheckString(2)\n\tqtype := convertType(L.CheckString(3))\n\tif err != nil || name == \"\" || qtype == 0 {\n\t\tL.Push(lua.LNil)\n\t\tL.Push(lua.LString(\"proper parameters were not provided\"))\n\t\treturn 2\n\t}\n\n\tresp, err := s.fwdQuery(ctx, name, qtype)\n\tif err != nil || resp.Rcode != dns.RcodeSuccess || len(resp.Answer) == 0 {\n\t\tL.Push(lua.LNil)\n\t\tL.Push(lua.LString(\"the query was unsuccessful for \" + name))\n\t\treturn 2\n\t}\n\n\tdetection := true\n\tif L.GetTop() == 4 {\n\t\tdetection = L.CheckBool(4)\n\t}\n\n\tif detection {\n\t\tdomain, err := publicsuffix.EffectiveTLDPlusOne(name)\n\n\t\tif err != nil || s.sys.TrustedResolvers().WildcardDetected(ctx, resp, domain) {\n\t\t\tL.Push(lua.LNil)\n\t\t\tL.Push(lua.LString(\"DNS wildcard detection made a positive match for \" + name))\n\t\t\treturn 2\n\t\t}\n\t}\n\n\ttb := L.NewTable()\n\tif ans := resolve.ExtractAnswers(resp); len(ans) > 0 {\n\t\tif records := resolve.AnswersByType(ans, qtype); len(records) > 0 {\n\t\t\tfor _, rr := range records {\n\t\t\t\tentry := L.NewTable()\n\t\t\t\tentry.RawSetString(\"rrname\", lua.LString(rr.Name))\n\t\t\t\tentry.RawSetString(\"rrtype\", lua.LNumber(rr.Type))\n\t\t\t\tentry.RawSetString(\"rrdata\", lua.LString(rr.Data))\n\t\t\t\ttb.Append(entry)\n\t\t\t}\n\t\t}\n\t}\n\tL.Push(tb)\n\tL.Push(lua.LNil)\n\treturn 2\n}", "func (term *Terminology) Resolve(ctx context.Context, id *apiv1.Identifier) (proto.Message, error) {\n\tsctID, err := snomed.ParseAndValidate(id.GetValue())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT: %w\", err)\n\t}\n\theader := metadata.New(map[string]string{\"accept-language\": \"en-GB\"})\n\tctx = metadata.NewOutgoingContext(ctx, header)\n\tif sctID.IsConcept() {\n\t\tec, err := term.client.GetExtendedConcept(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT concept '%d': %w\", sctID, err)\n\t\t}\n\t\treturn ec, nil\n\t}\n\tif sctID.IsDescription() {\n\t\td, err := term.client.GetDescription(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT description '%d': %w\", sctID, err)\n\t\t}\n\t\treturn d, nil\n\t}\n\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT entity '%d': only concepts and descriptions supported\", sctID)\n}", "func (*GetNearbyAgenciesResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_agency_web_proto_rawDescGZIP(), []int{21}\n}", "func (r *Resolver) Resolve(did string, parsed *did.DID, res resolver.Resolver) (*resolver.Document, error) {\n\tif parsed.Method != r.Method() {\n\t\treturn nil, fmt.Errorf(\"unknown did method: '%s'\", parsed.Method)\n\t}\n\t_, bytes, err := mbase.Decode(parsed.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyType, n, err := varint.FromUvarint(bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != 2 {\n\t\treturn nil, fmt.Errorf(\"error parsing varint\")\n\t}\n\tswitch keyType {\n\tcase uint64(codec.Ed25519Pub):\n\t\treturn ExpandEd25519Key(bytes[n:], parsed.ID)\n\tcase uint64(codec.Secp256k1Pub):\n\t\treturn ExpandSecp256k1Key(bytes[n:], parsed.ID)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown key type: '%s'\", codec.Code(keyType).String())\n\t}\n}", "func (_f6 *FakeResolver) ResolveCalledWith(name string) (found bool) {\n\tfor _, call := range _f6.ResolveCalls {\n\t\tif reflect.DeepEqual(call.Parameters.Name, name) {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func ResolveVanityURL(vanityURL string, apiKey string) (*ResolveVanityURLResponse, error) {\n\tvar resolveVanityURL = NewSteamMethod(\"ISteamUser\", \"ResolveVanityURL\", 1)\n\tdata := url.Values{}\n\tdata.Add(\"key\", apiKey)\n\tdata.Add(\"vanityURL\", vanityURL)\n\n\tvar resp ResolveVanityURLResponse\n\terr := resolveVanityURL.Request(data, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (c *ClientWithResponses) AppStoreVersionsIdfaDeclarationGetToOneRelatedWithResponse(ctx context.Context, id string, params *AppStoreVersionsIdfaDeclarationGetToOneRelatedParams, reqEditors ...RequestEditorFn) (*AppStoreVersionsIdfaDeclarationGetToOneRelatedResponse, error) {\n\trsp, err := c.AppStoreVersionsIdfaDeclarationGetToOneRelated(ctx, id, params, reqEditors...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseAppStoreVersionsIdfaDeclarationGetToOneRelatedResponse(rsp)\n}" ]
[ "0.60127324", "0.57031727", "0.5662719", "0.55577856", "0.54912895", "0.54861164", "0.5339197", "0.5312581", "0.5171849", "0.51083773", "0.5107675", "0.505698", "0.5027165", "0.49780568", "0.4963554", "0.49580607", "0.4939172", "0.4843758", "0.4825311", "0.48133907", "0.47647476", "0.47558436", "0.47286028", "0.47160804", "0.47011194", "0.46715537", "0.46552238", "0.46424428", "0.46417978", "0.463897", "0.46122265", "0.45738724", "0.4544329", "0.45413372", "0.45369565", "0.45317933", "0.45221254", "0.45190528", "0.451477", "0.45041105", "0.44906554", "0.44832546", "0.44798538", "0.44675356", "0.44411334", "0.4433015", "0.4426694", "0.44262862", "0.44154593", "0.44003272", "0.4397498", "0.43938056", "0.43937176", "0.43885326", "0.43730423", "0.43636364", "0.43578616", "0.43558118", "0.4354204", "0.43523157", "0.43439785", "0.43387508", "0.43372092", "0.43317485", "0.4329816", "0.43167877", "0.43158993", "0.43081096", "0.43063694", "0.43025175", "0.4294131", "0.4287302", "0.42710242", "0.42518672", "0.42510536", "0.42500275", "0.42430416", "0.42400998", "0.42350525", "0.4229073", "0.42268708", "0.42253086", "0.422408", "0.4214163", "0.42104506", "0.420732", "0.41993892", "0.41900027", "0.41848794", "0.4176256", "0.417114", "0.41656223", "0.41635185", "0.41609842", "0.41602376", "0.41593516", "0.41516733", "0.41457886", "0.4145592", "0.41422746" ]
0.48631617
17
Process calling points so that we generate the appropriate via and include their tiplocs
func (bf *boardFilter) processCallingPoints(s ldb.Service) { if len(s.CallingPoints) > 0 { viaRequest := bf.addVia(s.RID, s.CallingPoints[len(s.CallingPoints)-1].Tiploc) for _, cp := range s.CallingPoints { bf.addTiploc(cp.Tiploc) viaRequest.AppendTiploc(cp.Tiploc) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (bf *boardFilter) callsAt(callingPoints []darwind3.CallingPoint, tpls []string) bool {\n\tfor _, cp := range callingPoints {\n\t\tfor _, tpl := range tpls {\n\t\t\tif tpl == cp.Tiploc {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, messageID hornet.MessageID))(params[0].(int), params[1].(int), params[2].(int), params[3].(hornet.MessageID))\n}", "func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash aingle.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(aingle.Hash))\n}", "func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash hornet.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(hornet.Hash))\n}", "func parsePointInfo(p Point, chargerType []string) PointInfoJS {\r\n\tpJS := PointInfoJS{}\r\n\r\n\tpJS.Provider = p.Provider\r\n\tpJS.Address = p.Address\r\n\tpJS.Operator = p.Operator\r\n\tpJS.Requirement = p.Requirement\r\n\tpJS.Charger = p.Charger\r\n\tpJS.Parking = p.Parking\r\n\tpJS.Hour = p.Hour\r\n\tpJS.Facility = p.Facility\r\n\tpJS.Website = p.Website\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[1])\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[0])\r\n\r\n\tfor _, v := range chargerType {\r\n\t\tfor k, n := range pJS.Charger {\r\n\t\t\tif v == n.Type {\r\n\t\t\t\tpJS.Charger[k].Match = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn pJS\r\n}", "func (cb *CanBusClient) Points(nodeID string, points []data.Point) {\n\tcb.newPoints <- NewPoints{nodeID, \"\", points}\n}", "func (g *F) Call(p ...float64) []float64 {\n\tcoords := make([]float64, len(p))\n\tfor i := 0; i < len(p); i++ {\n\t\tcoords[i] = g.f[i](p...)\n\t}\n\treturn coords\n}", "func (src *prometheusMetricsSource) buildPoint(name string, m *dto.Metric, now int64, tags map[string]string) []*MetricPoint {\n\tvar result []*MetricPoint\n\tif m.Gauge != nil {\n\t\tif !math.IsNaN(m.GetGauge().GetValue()) {\n\t\t\tpoint := src.metricPoint(name+\".gauge\", float64(m.GetGauge().GetValue()), now, src.source, tags)\n\t\t\tresult = src.filterAppend(result, point)\n\t\t}\n\t} else if m.Counter != nil {\n\t\tif !math.IsNaN(m.GetCounter().GetValue()) {\n\t\t\tpoint := src.metricPoint(name+\".counter\", float64(m.GetCounter().GetValue()), now, src.source, tags)\n\t\t\tresult = src.filterAppend(result, point)\n\t\t}\n\t} else if m.Untyped != nil {\n\t\tif !math.IsNaN(m.GetUntyped().GetValue()) {\n\t\t\tpoint := src.metricPoint(name+\".value\", float64(m.GetUntyped().GetValue()), now, src.source, tags)\n\t\t\tresult = src.filterAppend(result, point)\n\t\t}\n\t}\n\treturn result\n}", "func linePointsGen(p1, p2 Point, speed float64) (gen func() (x, y float64, e error)) {\n\t// Set up math\n\tslopeT, slope, _ := getLineParams(p1, p2)\n\n\tx := p1.X\n\txPrev := x\n\ty := p1.Y\n\tyPrev := y\n\te := fmt.Errorf(\"End of path reached\")\n\ttheta := math.Atan(slope)\n\n\t// Every slope type has a different iterator, since they change the\n\t// x and y values in different combinations, as well as do different\n\t// comparisons on the values.\n\tswitch slopeT {\n\tcase ZERORIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx += speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase ZEROLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx -= speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase POSRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase POSLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase INFUP:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty += speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\tcase INFDOWN:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty -= speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *BaseAspidaListener) EnterPoints(ctx *PointsContext) {}", "func (b *block) Plan(pointIds ...string) ([]spi.PointSPI, error) {\n\tpoints := []spi.PointSPI{}\n\n\tif len(pointIds) == 0 {\n\t\t// if there are no specified points, include all points\n\n\t\tfor _, p := range b.points {\n\t\t\tpoints = append(points, p)\n\t\t}\n\t} else {\n\t\tincluded := map[string]bool{}\n\t\tincluded_sf := map[string]bool{}\n\n\t\t// include all specified points\n\t\tfor _, id := range pointIds {\n\t\t\tif p, ok := b.points[id]; !ok {\n\t\t\t\treturn nil, sunspec.ErrNoSuchPoint\n\t\t\t} else {\n\t\t\t\tif !included[id] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[id] = true\n\t\t\t\t}\n\t\t\t\tif p.Type() == typelabel.ScaleFactor {\n\t\t\t\t\tincluded_sf[id] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// include their scale factors too...\n\t\t//\n\t\t// we do this for several reasons:\n\t\t// - to interpret a point that uses a scale factor, we need the scale factor too\n\t\t// - if we don't there we may read a value point after its scale factor point has changed\n\t\t// By forcing contemporaneous reads of a scale factor and its related points we help to ensure\n\t\t// that the two values are consistent.\n\t\t// - we want to avoid app programmers having to encode knowedlege in their programs\n\t\t// about these depednencies - the knowledge is in the SMDX documents, so lets use it\n\t\tfor _, p := range points {\n\t\t\tsfp := p.(*point).scaleFactor\n\t\t\tif sfp != nil {\n\t\t\t\tif !included[sfp.Id()] {\n\t\t\t\t\tpoints = append(points, sfp.(spi.PointSPI))\n\t\t\t\t\tincluded[sfp.Id()] = true\n\t\t\t\t\tincluded_sf[sfp.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// We also include all the currently valid points that reference any scale\n\t\t// factor points we are going to read since we don't want such points to\n\t\t// unexpectedly enter an error state when they are invalidated by the\n\t\t// read of the scale factor point. This allows twp separate reads each\n\t\t// of which have a point that reference a shared scale factor point to\n\t\t// be equivalent to a single read of all points or to two reads in which\n\t\t// all points related to a single scale factor are read in the same read\n\t\t// as the scale factor itself.\n\t\t//\n\t\t// One consequence of this behaviour is that any local changes (via a\n\t\t// setter) to a point dependent on a scale factor point may be lost by a\n\t\t// read of any point that is dependent on the same scale factor which\n\t\t// itself means that local changes to points should be written to the\n\t\t// physical device with Block.Write before the next Block.Read or else\n\t\t// they may be lost under some circumstances even if the point concerned\n\t\t// is not directly referened by the Read call.\n\t\t//\n\t\t// Part of the reason we do this is to maximise the consistency of data\n\t\t// exposed by the API while minimising both the effort for the programmer\n\t\t// to maintain the consistency and also surprising behaviour.\n\t\tfor _, p := range b.points {\n\t\t\tif sfp := p.scaleFactor; sfp == nil || p.Error() != nil || !included_sf[sfp.Id()] {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tif !included[p.Id()] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[p.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// sort so scale factors come first, then other points in offset order\n\tsort.Sort(scaleFactorFirstOrder(points))\n\treturn points, nil\n}", "func PointSlope() {\n\tfmt.Print(c.CL)\n\tstrx1, err := i.Prompt(c.G + \"Enter x1 point\\n\" + c.B + \">\" + c.M)\n\tu.QuitAtError(err)\n\tstry1, err := i.Prompt(c.G + \"Enter y1 point\\n\" + c.B + \">\" + c.M)\n\tu.QuitAtError(err)\n\tstrm, err := i.Prompt(c.G + \"Enter slope\\n\" + c.B + \">\" + c.M)\n\tu.QuitAtError(err)\n\t//all inputs now provided, to be converted.\n\tx1, err := strconv.ParseFloat(strx1, 64)\n\tu.QuitAtError(err)\n\ty1, err := strconv.ParseFloat(stry1, 64)\n\tu.QuitAtError(err)\n\tm, err := strconv.ParseFloat(strm, 64)\n\tu.QuitAtError(err)\n\t//all data points provided and converted, now to math-a-tise.\n\tfmt.Println(c.CL, c.G+\"The formula is\")\n\tfmt.Println(\"y = m(x) + b\")\n\tu.Spacer(3)\n\t//to find \"b\"\n\tfmt.Println(y1, \" = \", m, \"(\", x1, \") + b\")\n\tfiller := x1 * m\n\t//multiplies x1 and m to filler.\n\tu.Spacer(1)\n\tfmt.Println(y1, \" = \", filler, \" + b\")\n\tfmt.Println(\"-\", filler, \"---------------|\")\n\t//Shows subtraction\n\tu.Spacer(1)\n\tb := y1 - filler\n\tfmt.Println(c.B2+\"b = \", b)\n\tu.Spacer(3)\n\tfmt.Println(c.B3+\"y = \", m, \"(x) + \", b)\n\tu.Go(1)\n\t//prints out completed statment, ends function\n}", "func applyToPoints(points []Point, fn func(*Point)) {\n\tfor j := range points {\n\t\tfn(&points[j])\n\t}\n}", "func (orderbook *Orderbook) backfillPoints(topbook []*Point, pointDistance uint64, leftMultiple uint64, rightMultiple uint64) []*Point {\n\tfor currentMultiple := leftMultiple; currentMultiple < rightMultiple; currentMultiple++ {\n\t\tpoint := CreatePoint(orderbook, (currentMultiple+1)*pointDistance)\n\t\ttopbook = append(topbook, &point)\n\t}\n\treturn topbook\n}", "func (a axes) drawPoint(p *vg.Painter, xy xyer, cs vg.CoordinateSystem, l Line, pointNumber int) {\n\tx, y, isEnvelope := xy.XY(l)\n\n\t// add number of NaNs leading pointNumber to pointNumber.\n\ttargetNumber := pointNumber\n\tfor i, v := range x {\n\t\tif i > targetNumber {\n\t\t\tbreak\n\t\t}\n\t\tif math.IsNaN(v) {\n\t\t\tpointNumber++\n\t\t}\n\t}\n\n\tif len(x) <= pointNumber || len(y) <= pointNumber || pointNumber < 0 {\n\t\treturn\n\t}\n\tp.SetFont(font1)\n\tlabels := make([]vg.FloatText, 2)\n\tif isEnvelope {\n\t\tif n := len(x); n != len(y) || pointNumber+2 > n {\n\t\t\treturn\n\t\t} else {\n\t\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\t\txp2, yp2 := x[n-pointNumber-2], y[n-pointNumber-2]\n\t\t\tx = []float64{xp, xp2}\n\t\t\ty = []float64{yp, yp2}\n\t\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp), Align: 5}\n\t\t\tlabels[1] = vg.FloatText{X: xp2, Y: yp2, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp2, yp2), Align: 1}\n\t\t}\n\t} else {\n\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\tx = []float64{xp}\n\t\ty = []float64{yp}\n\t\tvar s string\n\t\tif xyp, ok := xy.(xyPolar); ok {\n\t\t\txstr := \"\"\n\t\t\tif xyp.rmin == 0 && xyp.rmax == 0 { // polar\n\t\t\t\tif len(l.X) > pointNumber && pointNumber >= 0 {\n\t\t\t\t\txstr = fmt.Sprintf(\"%.4g, \", l.X[pointNumber])\n\t\t\t\t}\n\t\t\t\ts = xstr + xmath.Absang(complex(yp, xp), \"%.4g@%.0f\")\n\t\t\t} else { // ring\n\t\t\t\ts = fmt.Sprintf(\"%.4g@%.1f\", l.X[pointNumber], 180.0*l.Y[pointNumber]/math.Pi)\n\t\t\t}\n\t\t} else {\n\t\t\ts = fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp)\n\t\t}\n\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: s, Align: 1}\n\t\tlabels = labels[:1]\n\t}\n\n\tsize := l.Style.Marker.Size\n\tif size == 0 {\n\t\tsize = l.Style.Line.Width\n\t}\n\tif size == 0 {\n\t\tsize = 9\n\t} else {\n\t\tsize *= 3\n\t}\n\tc := a.plot.Style.Order.Get(l.Style.Marker.Color, l.Id+1).Color()\n\tp.SetColor(c)\n\tp.Add(vg.FloatCircles{X: x, Y: y, CoordinateSystem: cs, Radius: size, Fill: true})\n\trect := a.inside.Bounds()\n\tfor _, l := range labels {\n\t\tl.CoordinateSystem = cs\n\t\tl.Rect = rect\n\n\t\t// Change the alignment, if the label would be placed at a picture boundary.\n\t\tx0, y0 := cs.Pixel(l.X, l.Y, rect)\n\t\tif l.Align == 1 && y0 < 30 {\n\t\t\tl.Align = 5\n\t\t} else if l.Align == 5 && y0 > rect.Max.Y-30 {\n\t\t\tl.Align = 1\n\t\t}\n\t\tif x0 < 50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 0\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 6\n\t\t\t}\n\t\t} else if x0 > rect.Max.X-50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 2\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 4\n\t\t\t}\n\t\t}\n\n\t\t// Place the label above or below with the offset of the marker's radius.\n\t\tif l.Align <= 2 { // Label is above point.\n\t\t\tl.Yoff = -size\n\t\t} else if l.Align >= 4 { // Label is below point\n\t\t\tl.Yoff = size\n\t\t}\n\n\t\t// Fill background rectangle of the label.\n\t\tx, y, w, h := l.Extent(p)\n\t\tsaveColor := p.GetColor()\n\t\tp.SetColor(a.bg)\n\t\tp.Add(vg.Rectangle{X: x, Y: y, W: w, H: h, Fill: true})\n\t\tp.SetColor(saveColor)\n\t\tp.Add(l)\n\t}\n}", "func (b *BccLatticePointGenerator) forEachPoint(\n\tboundingBox *BoundingBox3D,\n\tspacing float64,\n\tpoints *[]*Vector3D.Vector3D,\n\tcallback func(*([]*Vector3D.Vector3D), *Vector3D.Vector3D) bool,\n) {\n\n\thalfSpacing := spacing / 2\n\tboxWidth := boundingBox.width()\n\tboxHeight := boundingBox.height()\n\tboxDepth := boundingBox.depth()\n\n\tposition := Vector3D.NewVector(0, 0, 0)\n\thasOffset := false\n\tshouldQuit := false\n\n\tfor k := float64(0); k*halfSpacing <= boxDepth && !shouldQuit; k++ {\n\n\t\tposition.Z = k*halfSpacing + boundingBox.lowerCorner.Z\n\t\tvar offset float64\n\t\tif hasOffset {\n\n\t\t\toffset = halfSpacing\n\t\t} else {\n\t\t\toffset = 0\n\t\t}\n\n\t\tfor j := float64(0); j*spacing+offset <= boxHeight && !shouldQuit; j++ {\n\t\t\tposition.Y = j*spacing + offset + boundingBox.lowerCorner.Y\n\n\t\t\tfor i := float64(0); i*spacing+offset <= boxWidth; i++ {\n\t\t\t\tposition.X = i*spacing + offset + boundingBox.lowerCorner.X\n\n\t\t\t\tif !callback(points, position) {\n\t\t\t\t\tshouldQuit = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\thasOffset = !hasOffset\n\t}\n}", "func (d *droid) backtraceToPoint(currLoc, rootLoc *point) []*point {\n\tmovement := d.findPointInPath(currLoc)\n\tbackPath := []*point{movement.location}\n\n\tisEqual := pointsEquals(movement.location, rootLoc)\n\n\tfor !isEqual {\n\t\tbackPath = append(backPath, movement.parent.location)\n\t\tmovement = movement.parent\n\t\tisEqual = pointsEquals(movement.location, rootLoc)\n\t}\n\n\treturn backPath\n}", "func OfGeomPoints(points ...geom.Point) Winding { return Order{}.OfGeomPoints(points...) }", "func CallInfo(lv int) string {\n\tpc, file, line, ok := runtime.Caller(lv)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tfile = callerShortfile(file)\n\tfuncName := runtime.FuncForPC(pc).Name()\n\tfuncName = callerShortfile(funcName)\n\tfn := callerShortfile(funcName, ')')\n\tif len(fn) < len(funcName) {\n\t\tif len(fn) > 1 && fn[0] == '.' {\n\t\t\tfn = fn[1:]\n\t\t}\n\t\tfuncName = fn\n\t} else {\n\t\tfuncName = callerShortfile(funcName, '.')\n\t}\n\ts := fmt.Sprintf(\"%s:%d(%s)\", file, line, funcName)\n\treturn s\n}", "func (me *messageEvents) checkNewCalls(ctx context.Context, from, to *types.TipSet) map[triggerID][]eventData {\n\tme.lk.RLock()\n\tdefer me.lk.RUnlock()\n\n\t// For each message in the tipset\n\tres := make(map[triggerID][]eventData)\n\tme.messagesForTs(from, func(msg *types.Message) {\n\t\t// TODO: provide receipts\n\n\t\t// Run each trigger's matcher against the message\n\t\tfor tid, matchFn := range me.matchers {\n\t\t\tmatched, err := matchFn(msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"event matcher failed: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If there was a match, include the message in the results for the\n\t\t\t// trigger\n\t\t\tif matched {\n\t\t\t\tres[tid] = append(res[tid], msg)\n\t\t\t}\n\t\t}\n\t})\n\n\treturn res\n}", "func callerInfo(skip int) string {\n\t_, file, line, _ := runtime.Caller(skip)\n\treturn fmt.Sprintf(\"%v:%v\", file, line)\n}", "func (s *BaseAspidaListener) EnterTPoints(ctx *TPointsContext) {}", "func getCallerInfo(skip int) *callerInfo {\n\tvar (\n\t\tpkg string\n\t\tfile string\n\t\tline int\n\t\tfunctions []string\n\t)\n\n\t// maximum depth of 20\n\tpcs := make([]uintptr, 20)\n\tn := runtime.Callers(skip+2, pcs)\n\tpcs = pcs[:n-1]\n\n\tframes := runtime.CallersFrames(pcs)\n\tfirstCaller := true\n\tfor {\n\t\tframe, more := frames.Next()\n\t\tif !more {\n\t\t\tbreak\n\t\t}\n\n\t\tfn := frame.Function\n\t\tfnStart := strings.LastIndexByte(fn, '/')\n\t\tif fnStart == -1 {\n\t\t\tfnStart = 0\n\t\t} else {\n\t\t\tfnStart++\n\t\t}\n\n\t\tfn = fn[fnStart:]\n\t\tpkgEnd := strings.IndexByte(fn, '.')\n\t\tif pkgEnd == -1 {\n\t\t\tfnStart = 0\n\t\t} else {\n\t\t\tfnStart = pkgEnd + 1\n\t\t}\n\t\tfunctions = append(functions, fn[fnStart:])\n\n\t\tif firstCaller {\n\t\t\tline = frame.Line\n\t\t\tfile = frame.File\n\t\t\t// set file as relative path\n\t\t\tpat := \"tracee/\"\n\t\t\ttraceeIndex := strings.Index(file, pat)\n\t\t\tif traceeIndex != -1 {\n\t\t\t\tfile = file[traceeIndex+len(pat):]\n\t\t\t}\n\t\t\tpkg = fn[:pkgEnd]\n\n\t\t\tfirstCaller = false\n\t\t}\n\t}\n\n\treturn &callerInfo{\n\t\tpkg: pkg,\n\t\tfile: file,\n\t\tline: line,\n\t\tfunctions: functions,\n\t}\n}", "func transformCall(n *ir.CallExpr) {\n\t// Set base.Pos, since transformArgs below may need it, but transformCall\n\t// is called in some passes that don't set base.Pos.\n\tir.SetPos(n)\n\t// n.Type() can be nil for calls with no return value\n\tassert(n.Typecheck() == 1)\n\ttransformArgs(n)\n\tl := n.X\n\tt := l.Type()\n\n\tswitch l.Op() {\n\tcase ir.ODOTINTER:\n\t\tn.SetOp(ir.OCALLINTER)\n\n\tcase ir.ODOTMETH:\n\t\tl := l.(*ir.SelectorExpr)\n\t\tn.SetOp(ir.OCALLMETH)\n\n\t\ttp := t.Recv().Type\n\n\t\tif l.X == nil || !types.Identical(l.X.Type(), tp) {\n\t\t\tbase.Fatalf(\"method receiver\")\n\t\t}\n\n\tdefault:\n\t\tn.SetOp(ir.OCALLFUNC)\n\t}\n\n\ttypecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)\n\tif l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {\n\t\ttypecheck.FixMethodCall(n)\n\t}\n\tif t.NumResults() == 1 {\n\t\tif n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {\n\t\t\tif sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == \"getg\" {\n\t\t\t\t// Emit code for runtime.getg() directly instead of calling function.\n\t\t\t\t// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,\n\t\t\t\t// so that the ordering pass can make sure to preserve the semantics of the original code\n\t\t\t\t// (in particular, the exact time of the function call) by introducing temporaries.\n\t\t\t\t// In this case, we know getg() always returns the same result within a given function\n\t\t\t\t// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.\n\t\t\t\tn.SetOp(ir.OGETG)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}", "func checkPoints(fabric *elfFabric, takenFabric map[point]int) int {\n\tfor addX := 1; addX <= fabric.size[0]; addX++ {\n\t\tfor addY := 1; addY <= fabric.size[1]; addY++ {\n\t\t\tp := point{x: fabric.position[0] + addX, y: fabric.position[1] + addY}\n\t\t\tnumOfOverlaps := takenFabric[p]\n\t\t\tif numOfOverlaps != 1 {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t}\n\t}\n\treturn fabric.ID\n}", "func (w *wire) interceptPoints(o wire) []point {\n\tvar interceptPoints []point\n\tfor i := 1; i < len(w.points); i++ {\n\t\tv1 := segment{\n\t\t\tfrom: w.points[i-1],\n\t\t\tto: w.points[i],\n\t\t}\n\t\tfor u := 1; u < len(o.points); u++ {\n\t\t\tv2 := segment{\n\t\t\t\tfrom: o.points[u-1],\n\t\t\t\tto: o.points[u],\n\t\t\t}\n\t\t\tintercept := v1.intercepts(v2)\n\t\t\tif intercept.x != 0 && intercept.y != 0 {\n\t\t\t\t// Calculate total wire length (both wires combined)\n\t\t\t\tintercept.wireLen = v1.from.wireLen + intercept.distanceToPoint(v1.from) +\n\t\t\t\t\tv2.from.wireLen + intercept.distanceToPoint(v2.from)\n\t\t\t\tinterceptPoints = append(interceptPoints, intercept)\n\t\t\t}\n\t\t}\n\t}\n\treturn interceptPoints\n}", "func (a axes) click(x, y int, xy xyer, snapToPoint bool) (PointInfo, bool) {\n\t// x, y := a.toFloats(xClick, yClick)\n\tlim := a.limits\n\tcs := vg.CoordinateSystem{lim.Xmin, lim.Ymax, lim.Xmax, lim.Ymin}\n\tbounds := image.Rect(a.x, a.y, a.x+a.width, a.y+a.height)\n\n\tif snapToPoint == false {\n\t\tpx, py := cs.Point(x, y, bounds)\n\t\treturn PointInfo{\n\t\t\tLineID: -1,\n\t\t\tPointNumber: -1,\n\t\t\tNumPoints: 0,\n\t\t\tX: px,\n\t\t\tY: py,\n\t\t}, true\n\t}\n\n\tdist := math.Inf(1)\n\tpIdx := -1\n\tlIdx := -1\n\tnumPoints := 0\n\tisEnvelope := false\n\tmaxSegment := 0\n\tisSegment := false\n\tfor i, l := range a.plot.Lines {\n\t\tX, Y, isEnv := xy.XY(l)\n\t\tnNotNaN := -1\n\t\tsegmentIdx := 0\n\t\tfor n := range X {\n\t\t\txi, yi := cs.Pixel(X[n], Y[n], bounds)\n\t\t\t// We only increase the index, if the data point is valid.\n\t\t\tnNotNaN++\n\t\t\tif math.IsNaN(X[n]) || math.IsNaN(Y[n]) {\n\t\t\t\tsegmentIdx++\n\t\t\t\tif segmentIdx > maxSegment {\n\t\t\t\t\tmaxSegment = segmentIdx\n\t\t\t\t}\n\t\t\t\tnNotNaN--\n\t\t\t}\n\t\t\tif d := float64((xi-x)*(xi-x) + (yi-y)*(yi-y)); d < dist {\n\t\t\t\tlIdx = i\n\t\t\t\tpIdx = nNotNaN\n\t\t\t\tisEnvelope = isEnv\n\t\t\t\tif isEnvelope {\n\t\t\t\t\tif n > len(X)/2 {\n\t\t\t\t\t\tpIdx = len(X) - n - 2\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdist = d\n\n\t\t\t\tnumPoints = len(X)\n\t\t\t\tif l.Segments {\n\t\t\t\t\tpIdx = segmentIdx\n\t\t\t\t\tisSegment = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif lIdx < 0 || pIdx < 0 {\n\t\treturn PointInfo{}, false\n\t}\n\tvar px, py float64\n\tvar pc complex128\n\tl := a.plot.Lines[lIdx]\n\tif len(l.X) > pIdx {\n\t\tpx = l.X[pIdx]\n\t}\n\tif len(l.Y) > pIdx {\n\t\tpy = l.Y[pIdx]\n\t}\n\tif len(l.C) > pIdx {\n\t\tpc = l.C[pIdx]\n\t}\n\tif isSegment {\n\t\tpx = 0\n\t\tpy = 0\n\t\tpc = complex(0, 0)\n\t\tnumPoints = maxSegment + 1\n\t}\n\treturn PointInfo{\n\t\tLineID: l.Id,\n\t\tPointNumber: pIdx,\n\t\tNumPoints: numPoints,\n\t\tIsEnvelope: isEnvelope,\n\t\tX: px,\n\t\tY: py,\n\t\tC: pc,\n\t}, true\n}", "func (mock *IKeptnNatsMessageHandlerMock) ProcessCalls() []struct {\n\tEvent apimodels.KeptnContextExtendedCE\n\tSync bool\n} {\n\tvar calls []struct {\n\t\tEvent apimodels.KeptnContextExtendedCE\n\t\tSync bool\n\t}\n\tmock.lockProcess.RLock()\n\tcalls = mock.calls.Process\n\tmock.lockProcess.RUnlock()\n\treturn calls\n}", "func prepTrackpoints(trackpointPrepper *TrackpointPrepper, streamer *Streamer, db *sql.DB, conf base.Configuration) {\n\tfmt.Println(\"TrackpointPrepper:\", trackpointPrepper.WindowStart, \"-\", trackpointPrepper.WindowEnd)\n\twindowSize := conf.TrackpointPrepWindowSize\n\ttimeWarp := conf.TimeWarp\n\ttargetSpeed := conf.TargetSpeedPerSecond\n\n\t// Get all currently active routes.\n\tids := make([]int64, 0)\n\tfor _, r := range trackpointPrepper.Routes {\n\t\tids = append(ids, r.Id)\n\t}\n\troutes := getRoutes(trackpointPrepper.WindowStart, trackpointPrepper.WindowEnd, ids, db)\n\n\t// Get new set of active routes.\n\ttrackpointPrepper.Routes = append(trackpointPrepper.Routes, routes...)\n\tnewRoutes := make([]Route, 0)\n\tfor _, r := range trackpointPrepper.Routes {\n\t\tif !r.DoTime.Before(trackpointPrepper.WindowStart) {\n\t\t\tnewRoutes = append(newRoutes, r)\n\t\t}\n\t}\n\n\t// Update everything to contain the final set of routes and make ready for next iteration.\n\ttrackpointPrepper.Routes = newRoutes\n\tfmt.Println(\"TrackpointPrepper.Routes.len:\", len(trackpointPrepper.Routes))\n\n\tif len(trackpointPrepper.Routes) > int(conf.NumTaxis / 10) {\n\t\t// Create updates for all taxis. First, compute how many updates we need to reach the target speed.\n\t\tnumUpdates := windowSize * targetSpeed\n\t\tnumTimeSlices := numUpdates / float64(len(trackpointPrepper.Routes))\n\t\ttimeInc := time.Duration(1000000000.0*windowSize*timeWarp/numTimeSlices) * time.Nanosecond\n\n\t\ttimeSlice := trackpointPrepper.WindowStart\n\t\tupdates := make([][]byte, 0)\n\t\tfor timeSlice.Before(trackpointPrepper.WindowEnd) {\n\t\t\tsliceEnd := timeSlice.Add(timeInc)\n\n\t\t\tfor _, r := range trackpointPrepper.Routes {\n\t\t\t\t// Check if this route just started now. If so, we have to create an occupancy message.\n\t\t\t\t// If it's a route with passengers, a destination message has to be added too.\n\t\t\t\tif r.PuTime.After(timeSlice) && r.PuTime.Before(sliceEnd) {\n\t\t\t\t\t// This is a new route, we have to generate an occupancy message.\n\t\t\t\t\t// Since we include all messages in both streams, here we kinda redundantly send both messages.\n\t\t\t\t\to, _ := json.Marshal(TaxiOccupancyUpdate{r.TaxiId, r.PassengerCount,\n\t\t\t\t\t\tr.EndLon, r.EndLat})\n\t\t\t\t\tupdates = append(updates, o)\n\n\t\t\t\t\tb, _ := json.Marshal(TaxiDestinationUpdate{r.TaxiId, r.PassengerCount,\n\t\t\t\t\t\tr.EndLon, r.EndLat})\n\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t}\n\n\t\t\t\t// Check if this route is just stopping now. If so, we have to send the journey (esp. price) information.\n\t\t\t\tif r.DoTime.After(timeSlice) && r.DoTime.Before(sliceEnd) {\n\t\t\t\t\tb, _ := json.Marshal(TaxiRouteCompletedUpdate{r.TaxiId, r.PassengerCount,\n\t\t\t\t\t\tr.Distance, r.Duration, r.FareAmount, r.Extra,\n\t\t\t\t\t\tr.MTATax, r.TipAmount, r.TollsAmount, r.EHailFee,\n\t\t\t\t\t\tr.ImprovementSurcharge, r.TotalAmount, r.PaymentType,\n\t\t\t\t\t\tr.TripType})\n\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t\tdelete(trackpointPrepper.ReservedTaxis, r.TaxiId)\n\t\t\t\t}\n\n\t\t\t\t// In some rare cases, the taxi gets ordered to the pickup location (let's say by a reservation call).\n\t\t\t\t// Optimally, the simulator would already generate these events...\n\t\t\t\t// For now, we do this approx. for one taxi every 10 seconds.\n\t\t\t\tif r.PassengerCount == 0 && rand.Float64() < 1.0 /\n\t\t\t\t\t(10000000000.0/float64(timeInc.Nanoseconds())*float64(len(trackpointPrepper.Routes))) {\n\t\t\t\t\ttrackpointPrepper.ReservedTaxis[r.TaxiId] = true\n\t\t\t\t\tb, _ := json.Marshal(TaxiReservationUpdate{r.TaxiId, r.EndLon, r.EndLat})\n\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t}\n\n\t\t\t\t// In any case, we want to generate some location updates.\n\t\t\t\t// TODO Auf UNIX / Mac scheint es anders kodiert zu sein, d.h. das strings Replace ist nicht nötig.\n\t\t\t\t// TODO Auf Ubuntu geht es so (gleich wie Windows).\n\t\t\t\tcoords, _, err := polyline.DecodeCoords([]byte(r.Geometry))\n\t\t\t\t// coords, _, err := polyline.DecodeCoords([]byte(strings.Replace(r.Geometry, \"\\\\\\\\\", \"\\\\\", -1)))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tperc := timeSlice.Sub(r.PuTime).Seconds() / r.DoTime.Sub(r.PuTime).Seconds()\n\t\t\t\tif perc > 0 && perc < 1 {\n\t\t\t\t\tlon, lat := taxisim.AlongPolyline(taxisim.PolylineLength(coords)*perc, coords)\n\t\t\t\t\tif streamer.TaxiupdateChannel != nil {\n\t\t\t\t\t\tvar resLon *float64\n\t\t\t\t\t\tvar resLat *float64\n\t\t\t\t\t\tif trackpointPrepper.ReservedTaxis[r.TaxiId] {\n\t\t\t\t\t\t\tresLon = &r.EndLon\n\t\t\t\t\t\t\tresLat = &r.EndLat\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif r.PassengerCount > 0 {\n\t\t\t\t\t\t\tb, _ := json.Marshal(TaxiUpdate{r.TaxiId, lon, lat,\n\t\t\t\t\t\t\t\tr.PassengerCount, &r.EndLon, &r.EndLat, resLon, resLat})\n\t\t\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tb, _ := json.Marshal(TaxiUpdate{r.TaxiId, lon, lat,\n\t\t\t\t\t\t\t\tr.PassengerCount, nil, nil, resLon, resLat})\n\t\t\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\ttimeSlice = timeSlice.Add(timeInc)\n\t\t}\n\t\t// Because some routes are not within the time slices, there are not enough updates. We fill in the missing ones\n\t\t// by repeating some.\n\t\tmissingUpdates := int(numUpdates) - len(updates)\n\t\tupdateCount := float64(len(updates)) / float64(missingUpdates)\n\t\tcnt := 0.0\n\t\ttotCnt := 0\n\t\tfor _, r := range updates {\n\t\t\t*streamer.TaxiupdateChannel <- r\n\t\t\ttotCnt += 1\n\t\t\tif updateCount > 0 && cnt > updateCount {\n\t\t\t\t*streamer.TaxiupdateChannel <- r\n\t\t\t\ttotCnt += 1\n\t\t\t\tcnt -= updateCount\n\t\t\t}\n\n\t\t\tcnt += 1\n\t\t}\n\t\tfmt.Println(\"Added messages\", totCnt)\n\n\t\ttrackpointPrepper.WindowStart = trackpointPrepper.WindowStart.Add(time.Second * time.Duration(windowSize*timeWarp))\n\t\ttrackpointPrepper.WindowEnd = trackpointPrepper.WindowEnd.Add(time.Second * time.Duration(windowSize*timeWarp))\n\t} else {\n\t\ttrackpointPrepper.WindowStart = time.Date(2016, time.January, 1, 0, 29, 20, 0, time.UTC)\n\t\ttrackpointPrepper.WindowEnd = time.Date(2016, time.January, 1, 0, 29, int(20+windowSize*conf.TimeWarp), 0, time.UTC)\n\t}\n}", "func (pc *ParticleClient) Points(nodeID string, points []data.Point) {\n\tpc.newPoints <- NewPoints{nodeID, \"\", points}\n}", "func (_Posminer *PosminerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {\n\treturn _Posminer.Contract.PosminerCaller.contract.Call(opts, result, method, params...)\n}", "func CallGraph(skip int) CallGraphInfo {\n\tpc, f, line, _ := runtime.Caller(skip)\n\n\tsegs := strings.Split(runtime.FuncForPC(pc).Name(), \"/\")\n\tlastSegs := strings.Split(segs[len(segs)-1], \".\")\n\n\tpackageName := strings.Join(append(segs[:len(segs)-1], lastSegs[0]), \"/\")\n\n\treturn CallGraphInfo{\n\t\tPackageName: packageName,\n\t\tFileName: f,\n\t\tLine: line,\n\t}\n}", "func parsePoint(p Point, chargerType []string) PointJS {\r\n\tpJS := PointJS{}\r\n\r\n\tpJS.Provider = p.Provider\r\n\tpJS.Address = p.Address\r\n\tpJS.Postal = p.Postal\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[1])\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[0])\r\n\tpJS.Charger = append(p.Charger)\r\n\r\n\tfor _, v := range chargerType {\r\n\t\tfor k, n := range pJS.Charger {\r\n\t\t\tif v == n.Type {\r\n\t\t\t\tpJS.Charger[k].Match = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\treturn pJS\r\n}", "func OfPoints(pts ...[2]float64) Winding { return Order{}.OfPoints(pts...) }", "func here(skip ...int) loc {\n\tsk := 1\n\tif len(skip) > 0 && skip[0] > 1 {\n\t\tsk = skip[0]\n\t}\n\tpc, fileName, fileLine, ok := runtime.Caller(sk)\n\tfn := runtime.FuncForPC(pc)\n\tvar res loc\n\tdefer func() {\n\t\tif res.long != \"\" {\n\t\t\treturn\n\t\t}\n\t\tres.long = res.FuncName\n\t}()\n\tif !ok {\n\t\tres.FuncName = \"N/A\"\n\t\treturn res\n\t}\n\tres.FileName = fileName\n\tres.FileLine = fileLine\n\tres.FuncName = fn.Name()\n\tfileName = filepath.Join(filepath.Base(filepath.Dir(fileName)), filepath.Base(fileName))\n\tres.long = fmt.Sprintf(\"%s@%d:%s()\", fileName, res.FileLine, res.FuncName)\n\tres.short = fmt.Sprintf(\"%s@%d:%s()\", fileName, res.FileLine, strings.TrimLeft(filepath.Ext(res.FuncName), \".\"))\n\treturn res\n}", "func Points(r io.Reader, cfg Config) (client.BatchPoints, error) {\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbenchset, err := parse.ParseMultipleBenchmarks(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbp, err := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tPrecision: \"s\",\n\t\tDatabase: cfg.Database,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor pkg, bs := range benchset {\n\t\tfor _, b := range bs {\n\t\t\ttags := map[string]string{\n\t\t\t\t\"goversion\": cfg.GoVersion,\n\t\t\t\t\"hwid\": cfg.HardwareID,\n\t\t\t\t\"pkg\": pkg,\n\t\t\t\t\"procs\": strconv.Itoa(b.Procs),\n\t\t\t\t\"name\": b.Name,\n\t\t\t}\n\t\t\tif cfg.Branch != \"\" {\n\t\t\t\ttags[\"branch\"] = cfg.Branch\n\t\t\t}\n\t\t\tp, err := client.NewPoint(\n\t\t\t\tcfg.Measurement,\n\t\t\t\ttags,\n\t\t\t\tmakeFields(b, cfg.Revision),\n\t\t\t\tcfg.Timestamp,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbp.AddPoint(p)\n\t\t}\n\t}\n\n\treturn bp, nil\n}", "func (cb *CanBusClient) EdgePoints(nodeID, parentID string, points []data.Point) {\n\tcb.newEdgePoints <- NewPoints{nodeID, parentID, points}\n}", "func (s *QuasiSampler) collectPoints(filterBounds bool) []f64.Vec2 {\n\tvar pointlist []f64.Vec2\n\n\tit := NewTileLeafIterator(s.root)\n\tfor {\n\t\tpt := it.GetShape().GetP1()\n\t\t// Only \"pentagonal\" tiles generate sampling points.\n\t\tif it.GetShape().IsSamplingType() {\n\t\t\timportance := s.GetImportanceAt_bounded(pt)\n\n\t\t\t// Threshold the function against the F-Code value.\n\t\t\tif importance >= calcFCodeValue(it.GetShape().GetFCode(), it.GetShape().GetLevel()) {\n\t\t\t\t// Get the displaced point using the lookup table.\n\t\t\t\tpt_displaced := it.GetShape().GetDisplacedSamplingPoint(importance)\n\n\t\t\t\tif !filterBounds ||\n\t\t\t\t\t(pt_displaced.X >= 0 && pt_displaced.X < s.width &&\n\t\t\t\t\t\tpt_displaced.Y >= 0 && pt_displaced.Y < s.height) {\n\t\t\t\t\tpointlist = append(pointlist, pt_displaced)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif !it.Next() {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn pointlist\n}", "func (eng *Engine) fanIn(fn func(shipper Shipper) chan error) chan error {\n\tvar wg sync.WaitGroup\n\taggregator := make(chan error)\n\n\tfor target, shipper := range eng.Shippers {\n\t\twg.Add(1)\n\t\tgo func(target string, shipper Shipper) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfmt.Printf(\"%v: Running target\\n\", target)\n\t\t\tfor err := range fn(shipper) {\n\t\t\t\taggregator <- err\n\t\t\t}\n\t\t\tfmt.Printf(\"%v: Completed target\\n\", target)\n\t\t}(target, shipper)\n\t}\n\n\t// Wait for all sub processes to finish and send a signal to the parent\n\t// when they do.\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(aggregator)\n\t}()\n\n\treturn aggregator\n}", "func (f *timeShiftByMetric) extractCallParams(ctx context.Context, e parser.Expr, from, until int64, values map[parser.MetricRequest][]*types.MetricData) (*callParams, error) {\n\tmetrics, err := helper.GetSeriesArg(ctx, e.Arg(0), from, until, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarks, err := helper.GetSeriesArg(ctx, e.Arg(1), from, until, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversionRank, err := e.GetIntArg(2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// validating data sets: both metrics and marks must have at least 2 series each\n\t// also, all IsAbsent and Values lengths must be equal to each other\n\tpointsQty := -1\n\tstepTime := int64(-1)\n\tvar dataSets map[string][]*types.MetricData = map[string][]*types.MetricData{\n\t\t\"marks\": marks,\n\t\t\"metrics\": metrics,\n\t}\n\tfor name, dataSet := range dataSets {\n\t\tif len(dataSet) < 2 {\n\t\t\treturn nil, merry.WithMessagef(errTooFewDatasets, \"bad data: need at least 2 %s data sets to process, got %d\", name, len(dataSet))\n\t\t}\n\n\t\tfor _, series := range dataSet {\n\t\t\tif pointsQty == -1 {\n\t\t\t\tpointsQty = len(series.Values)\n\t\t\t\tif pointsQty == 0 {\n\t\t\t\t\treturn nil, merry.WithMessagef(errEmptySeries, \"bad data: empty series %s\", series.Name)\n\t\t\t\t}\n\t\t\t} else if pointsQty != len(series.Values) {\n\t\t\t\treturn nil, merry.WithMessagef(errSeriesLengthMismatch, \"bad data: length of Values for series %s differs from others\", series.Name)\n\t\t\t}\n\n\t\t\tif stepTime == -1 {\n\t\t\t\tstepTime = series.StepTime\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := &callParams{\n\t\tmetrics: metrics,\n\t\tmarks: marks,\n\t\tversionRank: versionRank,\n\t\tpointsQty: pointsQty,\n\t\tstepTime: stepTime,\n\t}\n\treturn result, nil\n}", "func (_Bep20 *Bep20CallerSession) Checkpoints(arg0 common.Address, arg1 uint32) (struct {\n\tFromBlock uint32\n\tVotes *big.Int\n}, error) {\n\treturn _Bep20.Contract.Checkpoints(&_Bep20.CallOpts, arg0, arg1)\n}", "func bezier(t float64, p0, p1, p2, p3 Point) (res Point) {\n\ts := 1 - t\n\tres.X = s*s*s*p0.X + 3*(s*s*t)*p1.X + 3*(t*t*s)*p2.X + t*t*t*p3.X\n\tres.Y = s*s*s*p0.Y + 3*(s*s*t)*p1.Y + 3*(t*t*s)*p2.Y + t*t*t*p3.Y\n\treturn\n}", "func (breaker *ServiceBreaker) Call(exec func() (interface{}, error)) (interface{}, error) {\n\tlog.Printf(\"start call, %v state is %v\\n\", breaker.name, breaker.state)\n\t//before call\n\terr := breaker.beforeCall()\n\tif err != nil {\n\t\tlog.Printf(\"end call,%v batch:%v,metrics:(%v,%v,%v,%v,%v),window time start:%v\\n\\n\",\n\t\t\tbreaker.name,\n\t\t\tbreaker.metrics.WindowBatch,\n\t\t\tbreaker.metrics.CountAll,\n\t\t\tbreaker.metrics.CountSuccess,\n\t\t\tbreaker.metrics.CountFail,\n\t\t\tbreaker.metrics.ConsecutiveSuccess,\n\t\t\tbreaker.metrics.ConsecutiveFail,\n\t\t\tbreaker.metrics.WindowTimeStart.Format(\"2006/01/02 15:04:05\"))\n\t\treturn nil, err\n\t}\n\n\t//if panic occur\n\tdefer func() {\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tbreaker.afterCall(false)\n\t\t\tpanic(err) //todo?\n\t\t}\n\t}()\n\n\t//call\n\tbreaker.metrics.OnCall()\n\tresult, err := exec()\n\n\t//after call\n\tbreaker.afterCall(err == nil)\n\tlog.Printf(\"end call,%v batch:%v,metrics:(%v,%v,%v,%v,%v),window time start:%v\\n\\n\",\n\t\tbreaker.name,\n\t\tbreaker.metrics.WindowBatch,\n\t\tbreaker.metrics.CountAll,\n\t\tbreaker.metrics.CountSuccess,\n\t\tbreaker.metrics.CountFail,\n\t\tbreaker.metrics.ConsecutiveSuccess,\n\t\tbreaker.metrics.ConsecutiveFail,\n\t\tbreaker.metrics.WindowTimeStart.Format(\"2006/1/2 15:04:05\"))\n\n\treturn result, err\n}", "func (pw *PointsParser) ParsePoints(ctx context.Context, orgID, bucketID influxdb.ID, rc io.ReadCloser) (*ParsedPoints, error) {\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"write points\")\n\tdefer span.Finish()\n\treturn pw.parsePoints(ctx, orgID, bucketID, rc)\n}", "func testPoints(nodes ...*runtime.Node) (points []*client.Point) {\n\t// Create dummy client\n\tinfluxClient, err := client.NewHTTPClient(client.HTTPConfig{Addr: \"http://127.0.0.1\"})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnodesList := runtime.NewNodes(&runtime.Config{})\n\n\t// Create dummy connection\n\tconn := &Connection{\n\t\tpoints: make(chan *client.Point),\n\t\tclient: influxClient,\n\t}\n\n\tfor _, node := range nodes {\n\t\tnodesList.AddNode(node)\n\t}\n\n\t// Process data\n\tgo func() {\n\t\tfor _, node := range nodes {\n\t\t\tconn.InsertNode(node)\n\t\t\tif node.Neighbours != nil {\n\t\t\t\tfor _, link := range nodesList.NodeLinks(node) {\n\t\t\t\t\tconn.InsertLink(&link, node.Lastseen.GetTime())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tconn.Close()\n\t}()\n\n\t// Read points\n\tfor point := range conn.points {\n\t\tpoints = append(points, point)\n\t}\n\n\treturn\n}", "func (v *coveredPathsVisitor) VisitCall(i *Instruction) interface{} {\n\tif i.Protobuf != nil {\n\t\tif err := v.addPaths(i.Protobuf); err != nil {\n\t\t\treturn fmt.Errorf(\"instruction ID: %d: %v\", i.ID, err)\n\t\t}\n\t}\n\tif i.Response != nil {\n\t\tif err := v.addPaths(i.Response); err != nil {\n\t\t\treturn fmt.Errorf(\"instruction ID: %d: %v\", i.ID, err)\n\t\t}\n\t}\n\treturn i.VisitChildren(v)\n}", "func (_Bep20 *Bep20Caller) Checkpoints(opts *bind.CallOpts, arg0 common.Address, arg1 uint32) (struct {\n\tFromBlock uint32\n\tVotes *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _Bep20.contract.Call(opts, &out, \"checkpoints\", arg0, arg1)\n\n\toutstruct := new(struct {\n\t\tFromBlock uint32\n\t\tVotes *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.FromBlock = *abi.ConvertType(out[0], new(uint32)).(*uint32)\n\toutstruct.Votes = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}", "func GeneratePoint(conf CurveConfig) error {\n\n\tbavardOpts := []func(*bavard.Bavard) error{\n\t\tbavard.Apache2(\"ConsenSys AG\", 2020),\n\t\tbavard.Package(conf.CurveName),\n\t\tbavard.GeneratedBy(\"gurvy\"),\n\t}\n\n\t// point code\n\tsrc := []string{\n\t\tpoint.Point,\n\t}\n\n\tpathSrc := filepath.Join(conf.OutputDir, conf.PointName+\".go\")\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\n\t// point test\n\tsrc = []string{\n\t\tpoint.PointTests,\n\t}\n\n\tpathSrc = filepath.Join(conf.OutputDir, conf.PointName+\"_test.go\")\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (m *ItemItemsItemWorkbookWorksheetsItemChartsItemSeriesWorkbookChartSeriesItemRequestBuilder) PointsById(id string)(*ItemItemsItemWorkbookWorksheetsItemChartsItemSeriesItemPointsWorkbookChartPointItemRequestBuilder) {\n urlTplParams := make(map[string]string)\n for idx, item := range m.pathParameters {\n urlTplParams[idx] = item\n }\n if id != \"\" {\n urlTplParams[\"workbookChartPoint%2Did\"] = id\n }\n return NewItemItemsItemWorkbookWorksheetsItemChartsItemSeriesItemPointsWorkbookChartPointItemRequestBuilderInternal(urlTplParams, m.requestAdapter)\n}", "func (mp *MetricTranslator) TranslateDataPoints(logger *zap.Logger, sfxDataPoints []*sfxpb.DataPoint) []*sfxpb.DataPoint {\n\tprocessedDataPoints := sfxDataPoints\n\n\tfor _, tr := range mp.rules {\n\t\tswitch tr.Action {\n\t\tcase ActionRenameDimensionKeys:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tfor _, d := range dp.Dimensions {\n\t\t\t\t\tif newKey, ok := tr.Mapping[d.Key]; ok {\n\t\t\t\t\t\td.Key = newKey\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionRenameMetrics:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif newKey, ok := tr.Mapping[dp.Metric]; ok {\n\t\t\t\t\tdp.Metric = newKey\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionMultiplyInt:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif multiplier, ok := tr.ScaleFactorsInt[dp.Metric]; ok {\n\t\t\t\t\tv := dp.GetValue().IntValue\n\t\t\t\t\tif v != nil {\n\t\t\t\t\t\t*v = *v * multiplier\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionDivideInt:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif divisor, ok := tr.ScaleFactorsInt[dp.Metric]; ok {\n\t\t\t\t\tv := dp.GetValue().IntValue\n\t\t\t\t\tif v != nil {\n\t\t\t\t\t\t*v = *v / divisor\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionMultiplyFloat:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif multiplier, ok := tr.ScaleFactorsFloat[dp.Metric]; ok {\n\t\t\t\t\tv := dp.GetValue().DoubleValue\n\t\t\t\t\tif v != nil {\n\t\t\t\t\t\t*v = *v * multiplier\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionCopyMetrics:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif newMetric, ok := tr.Mapping[dp.Metric]; ok {\n\t\t\t\t\tnewDataPoint := copyMetric(tr, dp, newMetric)\n\t\t\t\t\tif newDataPoint != nil {\n\t\t\t\t\t\tprocessedDataPoints = append(processedDataPoints, newDataPoint)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionSplitMetric:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif tr.MetricName == dp.Metric {\n\t\t\t\t\tsplitMetric(dp, tr.DimensionKey, tr.Mapping)\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionConvertValues:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif newType, ok := tr.TypesMapping[dp.Metric]; ok {\n\t\t\t\t\tconvertMetricValue(logger, dp, newType)\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionCalculateNewMetric:\n\t\t\tvar operand1, operand2 *sfxpb.DataPoint\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif dp.Metric == tr.Operand1Metric {\n\t\t\t\t\toperand1 = dp\n\t\t\t\t} else if dp.Metric == tr.Operand2Metric {\n\t\t\t\t\toperand2 = dp\n\t\t\t\t}\n\t\t\t}\n\t\t\tnewPt := calculateNewMetric(logger, operand1, operand2, tr)\n\t\t\tif newPt == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprocessedDataPoints = append(processedDataPoints, newPt)\n\n\t\tcase ActionAggregateMetric:\n\t\t\t// NOTE: Based on the usage of TranslateDataPoints we can assume that the datapoints batch []*sfxpb.DataPoint\n\t\t\t// represents only one metric and all the datapoints can be aggregated together.\n\t\t\tvar dpsToAggregate []*sfxpb.DataPoint\n\t\t\tvar otherDps []*sfxpb.DataPoint\n\t\t\tfor i, dp := range processedDataPoints {\n\t\t\t\tif dp.Metric == tr.MetricName {\n\t\t\t\t\tif dpsToAggregate == nil {\n\t\t\t\t\t\tdpsToAggregate = make([]*sfxpb.DataPoint, 0, len(processedDataPoints)-i)\n\t\t\t\t\t}\n\t\t\t\t\tdpsToAggregate = append(dpsToAggregate, dp)\n\t\t\t\t} else {\n\t\t\t\t\tif otherDps == nil {\n\t\t\t\t\t\totherDps = make([]*sfxpb.DataPoint, 0, len(processedDataPoints)-i)\n\t\t\t\t\t}\n\t\t\t\t\t// This slice can contain additional datapoints from a different metric\n\t\t\t\t\t// for example copied in a translation step before\n\t\t\t\t\totherDps = append(otherDps, dp)\n\t\t\t\t}\n\t\t\t}\n\t\t\taggregatedDps := aggregateDatapoints(logger, dpsToAggregate, tr.Dimensions, tr.AggregationMethod)\n\t\t\tprocessedDataPoints = append(otherDps, aggregatedDps...)\n\t\t}\n\t}\n\n\treturn processedDataPoints\n}", "func SplitPoints(points PointArray, numStrips int) (splitValues []uint64, splitPoses []int) {\n\tif numStrips <= 1 {\n\t\treturn\n\t}\n\tsplitPos := points.Len() / 2\n\tnth.Element(points, splitPos)\n\tsplitValue := points.GetValue(splitPos)\n\n\tnumStrips1 := (numStrips + 1) / 2\n\tnumStrips2 := numStrips - numStrips1\n\tsplitValues1, splitPoses1 := SplitPoints(points.SubArray(0, splitPos), numStrips1)\n\tsplitValues = append(splitValues, splitValues1...)\n\tsplitPoses = append(splitPoses, splitPoses1...)\n\tsplitValues = append(splitValues, splitValue)\n\tsplitPoses = append(splitPoses, splitPos)\n\tsplitValues2, splitPoses2 := SplitPoints(points.SubArray(splitPos, points.Len()), numStrips2)\n\tsplitValues = append(splitValues, splitValues2...)\n\tfor i := 0; i < len(splitPoses2); i++ {\n\t\tsplitPoses = append(splitPoses, splitPos+splitPoses2[i])\n\t}\n\treturn\n}", "func (info *Info) BuildCallGraph(algo string, tests bool) (*CallGraph, error) {\n\tvar cg *callgraph.Graph\n\tswitch algo {\n\tcase \"static\":\n\t\tcg = static.CallGraph(info.Prog)\n\n\tcase \"cha\":\n\t\tcg = cha.CallGraph(info.Prog)\n\n\tcase \"pta\":\n\t\tptrCfg, err := info.PtrAnlysCfg(tests)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tptrCfg.BuildCallGraph = true\n\t\tptares, err := info.RunPtrAnlys(ptrCfg)\n\t\tif err != nil {\n\t\t\treturn nil, err // internal error in pointer analysis\n\t\t}\n\t\tcg = ptares.CallGraph\n\n\tcase \"rta\":\n\t\tmains, err := MainPkgs(info.Prog, tests)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar roots []*ssa.Function\n\t\tfor _, main := range mains {\n\t\t\troots = append(roots, main.Func(\"init\"), main.Func(\"main\"))\n\t\t}\n\t\trtares := rta.Analyze(roots, true)\n\t\tcg = rtares.CallGraph\n\t}\n\n\tcg.DeleteSyntheticNodes()\n\n\treturn &CallGraph{cg: cg, prog: info.Prog}, nil\n}", "func (p *ProcessCalls) pendingTraces(ctx context.Context, msgChan <-chan *parser.LogEntry, notifyNewTrace chan<- string ) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase msg := <-msgChan:\n\t\t\tp.m.Lock()\n {\n if _, ok := p.logs[msg.Trace]; ! ok {\n notifyNewTrace <- msg.Trace\n }\n p.logs[msg.Trace] = append(p.logs[msg.Trace], msg)\n\n\n // if tr, ok := p.logs[msg.Trace]; ok {\n // tr = append(tr, msg)\n // // p.logs[msg.Trace] = append(p.logs[msg.Trace], msg)\n // }else {\n // // p.logs[msg.Trace] = []parser.Logs{msg}\n // p.logs[msg.Trace] = append(p.logs[msg.Trace], msg)\n // notifyNewTrace <- msg.Trace\n // }\n // fmt.Fprintln(os.Stderr,\"New Line\", msg)\n\t\t\t}\n\t\t\tp.m.Unlock()\n\t\t}\n\t}\n}", "func pPoints(q []plotValues) map[plotKeys]plotter.XYs {\n\tpts := make(map[plotKeys]plotter.XYs)\n\tfor i := range q {\n\t\tfmt.Println(i)\n\t}\n\treturn pts\n}", "func SendPoints(nc *nats.Conn, subject string, points data.Points, ack bool) error {\n\tfor i := range points {\n\t\tif points[i].Time.IsZero() {\n\t\t\tpoints[i].Time = time.Now()\n\t\t}\n\t}\n\tdata, err := points.ToPb()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ack {\n\t\tmsg, err := nc.Request(subject, data, time.Second)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(msg.Data) > 0 {\n\t\t\treturn errors.New(string(msg.Data))\n\t\t}\n\n\t} else {\n\t\tif err := nc.Publish(subject, data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}", "func generateCall(generator *Generator, node parser.Node) string {\n\tvar identifier string\n\n\t// Check if it is a built-in function or not\n\tif strings.Contains(node.Value, \"|\") {\n\t\t// Get the function identifier by spliting the value by the pipe\n\t\tidentifier = strings.Split(node.Value, \"|\")[1]\n\n\t\tcheckCall(generator, node)\n\n\t\t// Add import to the generator\n\t\taddCallImport(\n\t\t\tgenerator,\n\t\t\tnode.Value,\n\t\t)\n\t} else {\n\t\tidentifier = node.Value\n\t}\n\n\t// Translate the params\n\tparams := generateParams(generator, node.Params)\n\n\t// Link all the translations together\n\treturn fmt.Sprintf(\n\t\tcCall,\n\t\tidentifier,\n\t\tstrings.Join(params, \",\"),\n\t)\n}", "func (fi *funcInfo) emitCall(line, a, nArgs, nRet int) {\r\n\tfi.emitABC(line, OP_CALL, a, nArgs+1, nRet+1)\r\n}", "func (node *CallProc) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteString(\"call \")\n\tnode.Name.formatFast(buf)\n\tbuf.WriteByte('(')\n\tnode.Params.formatFast(buf)\n\tbuf.WriteByte(')')\n}", "func (_Pairing *PairingRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Pairing.Contract.PairingCaller.contract.Call(opts, result, method, params...)\n}", "func (g *Game) CalculatePoints(from, to int) []models.PointsEntrySimple {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\ttotalPoints := make(map[string]int)\n\tfor i := from; i <= to; i++ {\n\t\tqp := getPointsForQuestion(g.players, i)\n\t\tfor _, entry := range qp {\n\t\t\ttotalPoints[entry.Player] += entry.Points\n\t\t}\n\t}\n\tvar pes []models.PointsEntrySimple\n\tfor player, points := range totalPoints {\n\t\tpes = append(pes, models.PointsEntrySimple{\n\t\t\tPlayer: player,\n\t\t\tPoints: points,\n\t\t})\n\t}\n\treturn pes\n}", "func callers(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-scope\", scope(args), \"-modified\", \"callers\", s.pos()))\n}", "func walk(steps, face int, start point, xwards bool) {\n\tfor i := 0; i < steps; i++ {\n\t\tif xwards {\n\t\t\tstart.x += face * 1\n\t\t} else {\n\t\t\tstart.y += face * 1\n\t\t}\n\t\tp := point{x: start.x, y: start.y}\n\t\tvisited[p]++\n\t\tif visited[p] > 1 && (twice == origo) { // only set twice if we haven't before\n\t\t\ttwice = p\n\t\t}\n\t}\n}", "func (se *StateEngine) trajectory(points []string, partial bool) error {\n\n\tsubs, nosubs := se.diffSubs()\n\n\t//are we out of points to walk through? if so then fire acive and tell others to be inactive\n\tif len(points) < 1 {\n\t\t//deactivate all the non-subroot states\n\t\tfor _, ko := range nosubs {\n\t\t\tko.Deactivate()\n\t\t}\n\n\t\t//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children\n\t\tif se.owner != nil {\n\t\t\tse.owner.Activate()\n\t\t} else {\n\t\t\t//activate all the subroot states first so they can\n\t\t\t//do be ready for the root. We call this here incase the StateEngine has no root state\n\t\t\tfor _, ko := range subs {\n\t\t\t\tko.Activate()\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t//cache the first point so we dont loose it\n\tpoint := points[0]\n\n\tvar state = se.get(point)\n\n\tif state == nil {\n\t\t// for _, ko := range nosubs {\n\t\t// \tif sko.acceptable(se.getAddr(ko), point, so) {\n\t\t// \t\tstate = ko\n\t\t// \t\tbreak\n\t\t// \t}\n\t\t// }\n\t\t//\n\t\t// if state == nil {\n\t\treturn ErrStateNotFound\n\t\t// }\n\t}\n\n\t//set this state as the current active state\n\tse.curr = state\n\n\t//shift the list one more bit for the points\n\tpoints = points[1:]\n\n\t//we pass down the points since that will handle the loadup downwards\n\terr := state.Engine().trajectory(points, partial)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !partial {\n\t\t// //activate all the subroot states first so they can\n\t\t// //do any population they want\n\t\t// for _, ko := range subs {\n\t\t// \tko.Activate(so)\n\t\t// }\n\n\t\tif se.owner != nil {\n\t\t\tse.owner.Activate()\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *Tile) GetPoints(output chan<- Point) {\n\tif t.Level == level_depth {\n\t\tfor _, point := range t.Points {\n\t\t\toutput <- point\n\t\t}\n\t} else {\n\t\tfor _, tile := range t.SubTiles {\n\t\t\tif tile != nil {\n\t\t\t\ttile.GetPoints(output)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (t *Twemproxy) processStat(\n\tacc telegraf.Accumulator,\n\ttags map[string]string,\n\tdata map[string]interface{},\n) {\n\tif source, ok := data[\"source\"]; ok {\n\t\tif val, ok := source.(string); ok {\n\t\t\ttags[\"source\"] = val\n\t\t}\n\t}\n\n\tfields := make(map[string]interface{})\n\tmetrics := []string{\"total_connections\", \"curr_connections\", \"timestamp\"}\n\tfor _, m := range metrics {\n\t\tif value, ok := data[m]; ok {\n\t\t\tif val, ok := value.(float64); ok {\n\t\t\t\tfields[m] = val\n\t\t\t}\n\t\t}\n\t}\n\tacc.AddFields(\"twemproxy\", fields, tags)\n\n\tfor _, pool := range t.Pools {\n\t\tif poolStat, ok := data[pool]; ok {\n\t\t\tif data, ok := poolStat.(map[string]interface{}); ok {\n\t\t\t\tpoolTags := copyTags(tags)\n\t\t\t\tpoolTags[\"pool\"] = pool\n\t\t\t\tt.processPool(acc, poolTags, data)\n\t\t\t}\n\t\t}\n\t}\n}", "func main() {\n\tdefer testTime(\"main\", time.Now())\n\n\tif len(os.Args) != 2 {\n\t\tpanicIfError(fmt.Errorf(\"required URL does not exists\"))\n\t}\n\n\tshops := makeSliceFromShops(os.Args[1])\n\tfmt.Println(len(shops))\n\ttestLibGeoIndex(shops)\n\ttestLibRTree(shops)\n\n\t//for _, v := range points {\n\t//\tfmt.Println(v.Id())\n\t//}\n\n\tvar p1, p2 *xPoint\n\tp1 = &xPoint{50.425365, 30.459593}\n\tp2 = &xPoint{50.4214319750507, 30.458242893219}\n\tfmt.Println(GreatCircleDistance(p1, p2))\n\tp1 = &xPoint{50.425365, 30.459593}\n\tp2 = &xPoint{50.422747, 30.464512}\n\tfmt.Println(GreatCircleDistance(p1, p2))\n}", "func GetUserWithTempPoints(c *gin.Context) {\n\tvar input models.EditBookingInput\n\tif err := c.ShouldBindQuery(&input); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": \"Check input Booking ID on URL parameter\"})\n\t\tfmt.Println(\"Error in getting Booking ID. \" + err.Error() + \"\\n\")\n\t\treturn\n\t}\n\n\toldBooking, exists, err := RetrieveBooking(DB, models.URLBooking{BookingID: input.OldBookingID})\n\tif !exists {\n\t\terrorMessage := fmt.Sprintf(\"Booking with ID %s does not exist.\", input.OldBookingID)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"success\": false, \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\tif err != nil {\n\t\terrorMessage := fmt.Sprintf(\"Error in retrieving Booking with ID %s.\"+err.Error(), input.OldBookingID)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\n\tstatusCode, err := GetBookingStatusCode(DB, \"In the midst of booking\")\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": \"Error in querying for status code.\"})\n\t\tfmt.Println(\"Check statusQuery. \" + err.Error() + \"\\n\")\n\t}\n\n\tpendingBookings, err := GetPendingBookings(DB, models.User{Nusnetid: input.NUSNET_ID}, statusCode)\n\tif err != nil {\n\t\terrorMessage := fmt.Sprintf(\"Error in retrieving pending bookings with for user with NUSNET ID %s.\"+err.Error(), input.NUSNET_ID)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\n\teditCart, err := EditCartDetails(oldBooking, pendingBookings, models.User{Nusnetid: input.NUSNET_ID})\n\tif err != nil {\n\t\terrorMessage := fmt.Sprint(\"Error in making bookng cart.\" + err.Error())\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": editCart})\n\tfmt.Println(\"Return successful!\")\n}", "func prepareData(ctx context.Context, targets int, fetcher func() *point.Points) *data {\n\tdata := &data{\n\t\tData: &Data{Points: point.NewPoints()},\n\t\tb: make(chan io.ReadCloser, 1),\n\t\te: make(chan error, targets),\n\t\tmut: sync.RWMutex{},\n\t\twg: sync.WaitGroup{},\n\t}\n\tdata.wg.Add(1)\n\n\textraPoints := make(chan *point.Points, 1)\n\n\tgo func() {\n\t\t// add extraPoints. With NameToID\n\t\tdefer func() {\n\t\t\tdata.wg.Done()\n\t\t\tclose(extraPoints)\n\t\t}()\n\n\t\t// First check is context is already done\n\t\tif err := contextIsValid(ctx); err != nil {\n\t\t\tdata.e <- fmt.Errorf(\"prepareData failed: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase extraPoints <- fetcher():\n\t\t\tp := <-extraPoints\n\t\t\tif p != nil {\n\t\t\t\tdata.mut.Lock()\n\t\t\t\tdefer data.mut.Unlock()\n\n\t\t\t\textraList := p.List()\n\t\t\t\tfor i := 0; i < len(extraList); i++ {\n\t\t\t\t\tdata.Points.AppendPoint(\n\t\t\t\t\t\tdata.Points.MetricID(p.MetricName(extraList[i].MetricID)),\n\t\t\t\t\t\textraList[i].Value,\n\t\t\t\t\t\textraList[i].Time,\n\t\t\t\t\t\textraList[i].Timestamp,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\tcase <-ctx.Done():\n\t\t\tdata.e <- fmt.Errorf(\"prepareData failed: %w\", ctx.Err())\n\t\t\treturn\n\t\t}\n\t}()\n\treturn data\n}", "func readCoverPoints(file *elf.File, tracePC uint64, traceCmp map[uint64]bool) ([2][]uint64, error) {\n\tvar pcs [2][]uint64\n\ttext := file.Section(\".text\")\n\tif text == nil {\n\t\treturn pcs, fmt.Errorf(\"no .text section in the object file\")\n\t}\n\tdata, err := text.Data()\n\tif err != nil {\n\t\treturn pcs, fmt.Errorf(\"failed to read .text: %v\", err)\n\t}\n\tconst callLen = 5\n\tend := len(data) - callLen + 1\n\tfor i := 0; i < end; i++ {\n\t\tpos := bytes.IndexByte(data[i:end], 0xe8)\n\t\tif pos == -1 {\n\t\t\tbreak\n\t\t}\n\t\tpos += i\n\t\ti = pos\n\t\toff := uint64(int64(int32(binary.LittleEndian.Uint32(data[pos+1:]))))\n\t\tpc := text.Addr + uint64(pos)\n\t\ttarget := pc + off + callLen\n\t\tif target == tracePC {\n\t\t\tpcs[0] = append(pcs[0], pc)\n\t\t} else if traceCmp[target] {\n\t\t\tpcs[1] = append(pcs[1], pc)\n\t\t}\n\t}\n\treturn pcs, nil\n}", "func (conn *ConnWithParameters) bucketPoints(rawPt Point) {\n\n\t// Truncate each item in batch\n\t// Split float by decimal\n\tlatSlice := strings.SplitAfter(rawPt.Lat, \".\")\n\tlngSlice := strings.SplitAfter(rawPt.Lng, \".\")\n\n\t// Truncate second half of slices\n\tlatSlice[1] = conn.truncate(latSlice[1])\n\tlngSlice[1] = conn.truncate(lngSlice[1])\n\n\t//check for truncating edge case\n\tif strings.Contains(latSlice[0], \"-0.\") {\n\t\tlatSlice = conn.checkZero(latSlice)\n\t}\n\tif strings.Contains(lngSlice[0], \"-0.\") {\n\t\tlngSlice = conn.checkZero(lngSlice)\n\t}\n\n\t// Combine the split strings together\n\tlat := strings.Join(latSlice, \"\")\n\tlng := strings.Join(lngSlice, \"\")\n\n\t//create bucket hash\n\tbucket := lat + \":\" + lng\n\n\t//create point\n\tpt := Latlng{\n\t\tCoords: Point{\n\t\t\tLat: lat,\n\t\t\tLng: lng,\n\t\t},\n\t\tCount: 1,\n\t}\n\n\t// Bucketing\n\t// check if bucket exists\n\t// if it does exists, increase the count\n\t_, contains := conn.batchMap[bucket]\n\tif contains {\n\t\tvalue := conn.batchMap[bucket] //get the value of the bucket\n\n\t\tvalue.Count++ //increase the count\n\n\t\tconn.batchMap[bucket] = value //add the new count to the point\n\n\t} else { //otherwise, add the point with the count\n\t\tconn.batchMap[bucket] = pt\n\t}\n}", "func buildTrace(messages []*parser.Message, callTrace *call, spanTo string) {\n\tvar matches []*parser.Message\n\tfor _, msg := range messages {\n\t\tif msg.SpanFrom == spanTo {\n\t\t\tmatches = append(matches, msg)\n\t\t}\n\t}\n\tsort.Sort(parser.Messages(matches))\n\n\tfor _, m := range matches {\n\t\tc := call{\n\t\t\tStart: m.Start,\n\t\t\tEnd: m.End,\n\t\t\tService: m.Service,\n\t\t\tSpan: m.SpanTo,\n\t\t\tCalls: make([]*call, 0),\n\t\t}\n\t\tbuildTrace(messages, &c, m.SpanTo)\n\t\tcallTrace.Calls = append(callTrace.Calls, &c)\n\t}\n}", "func NewPreemptableCallerInfo(\n\tcallerName string,\n) CallerInfo {\n\treturn CallerInfo{\n\t\tCallerName: callerName,\n\t\tCallerType: CallerTypePreemptable,\n\t}\n}", "func ShipperCalled(orderID uint) error {\n\tctx := context.Background()\n\t_, err := zbClient.NewPublishMessageCommand().MessageName(\"ShipperCalled\").CorrelationKey(fmt.Sprint(orderID)).TimeToLive(1 * time.Minute).Send(ctx)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn nil\n}", "func pointsToLines(points []Point) (lines []Line) {\n\tfor i := 0; i < len(points); i++ {\n\t\tfor j := i + 1; j < len(points); j++ {\n\t\t\tif points[i].nextTo(points[j]) {\n\t\t\t\tlines = append(lines, Line{P1: points[i], P2: points[j]})\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (mmGetUserLocation *mStorageMockGetUserLocation) Calls() []*StorageMockGetUserLocationParams {\n\tmmGetUserLocation.mutex.RLock()\n\n\targCopy := make([]*StorageMockGetUserLocationParams, len(mmGetUserLocation.callArgs))\n\tcopy(argCopy, mmGetUserLocation.callArgs)\n\n\tmmGetUserLocation.mutex.RUnlock()\n\n\treturn argCopy\n}", "func (t *Tables) Callers(ctx context.Context, req *epb.CallersRequest) (*epb.CallersReply, error) {\n\ttickets := req.Tickets\n\tif len(tickets) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing input tickets: %v\", req)\n\t}\n\n\t// succMap maps nodes onto sets of successor nodes\n\tsuccMap := make(map[string]stringset.Set)\n\n\t// At the moment, this is our policy for missing data: if an input ticket has\n\t// no record in the table, we don't include data for that ticket in the response.\n\t// Other table access errors result in returning an error.\n\tfor _, ticket := range tickets {\n\t\tvar callgraph srvpb.Callgraph\n\t\tif err := t.FunctionToCallers.Lookup(ctx, []byte(ticket), &callgraph); err == table.ErrNoSuchKey {\n\t\t\tcontinue // skip tickets with no mappings\n\t\t} else if err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error looking up callers with ticket %q: %v\", ticket, err)\n\t\t}\n\n\t\t// This can only happen in the context of a postprocessor bug.\n\t\tif callgraph.Type != srvpb.Callgraph_CALLER {\n\t\t\treturn nil, fmt.Errorf(\"type of callgraph is not 'CALLER': %v\", callgraph)\n\t\t}\n\n\t\t// TODO(jrtom): consider logging a warning if len(callgraph.Tickets) == 0\n\t\t// (postprocessing should disallow this)\n\t\tfor _, predTicket := range callgraph.Tickets {\n\t\t\tif _, ok := succMap[predTicket]; !ok {\n\t\t\t\tsuccMap[predTicket] = stringset.New()\n\t\t\t}\n\t\t\tset := succMap[predTicket]\n\t\t\tset.Add(ticket)\n\t\t}\n\t}\n\n\treturn &epb.CallersReply{Graph: convertSuccMapToGraph(succMap)}, nil\n}", "func (gen *Db) nodePoints(id string, points data.Points) error {\n\tfor _, p := range points {\n\t\tif p.Time.IsZero() {\n\t\t\tp.Time = time.Now()\n\t\t}\n\t}\n\n\treturn gen.store.Update(func(tx *genji.Tx) error {\n\t\tnec := newNodeEdgeCache(tx)\n\n\t\tne, err := nec.getNodeAndEdges(id)\n\n\t\tif err != nil {\n\t\t\tif err == genjierrors.ErrDocumentNotFound {\n\t\t\t\tif gen.meta.RootID == \"\" {\n\t\t\t\t\tgen.lock.Lock()\n\t\t\t\t\tdefer gen.lock.Unlock()\n\t\t\t\t\tgen.meta.RootID = id\n\t\t\t\t\terr := tx.Exec(`update meta set rootid = ?`, id)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"Error setting rootid in meta: %w\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tne = &nodeAndEdges{\n\t\t\t\t\tnode: &data.Node{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tType: data.NodeTypeDevice,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfor _, point := range points {\n\t\t\tif point.Type == data.PointTypeNodeType {\n\t\t\t\tne.node.Type = point.Text\n\t\t\t\t// we don't encode type in points as this has its own field\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tne.node.Points.ProcessPoint(point)\n\t\t}\n\n\t\t/*\n\t\t\t * FIXME: need to clean up offline processing\n\t\t\tstate := node.State()\n\t\t\tif state != data.PointValueSysStateOnline {\n\t\t\t\tnode.Points.ProcessPoint(\n\t\t\t\t\tdata.Point{\n\t\t\t\t\t\tTime: time.Now(),\n\t\t\t\t\t\tType: data.PointTypeSysState,\n\t\t\t\t\t\tText: data.PointValueSysStateOnline,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\t\t*/\n\n\t\tsort.Sort(ne.node.Points)\n\n\t\terr = nec.processNode(ne, false)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"processNode error: %w\", err)\n\t\t}\n\n\t\terr = nec.writeEdges()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = tx.Exec(`insert into nodes values ? on conflict do replace`, ne.node)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error inserting/updating node: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func NewSendPoints(portalURL, deviceID, authToken string, timeout time.Duration, debug bool) func(data.Points) error {\n\tvar netClient = &http.Client{\n\t\tTimeout: timeout,\n\t}\n\n\treturn func(points data.Points) error {\n\t\tpointURL := portalURL + \"/v1/devices/\" + deviceID + \"/points\"\n\n\t\ttempJSON, err := json.Marshal(NewPoints(points))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif debug {\n\t\t\tlog.Println(\"Sending points: \", string(tempJSON))\n\t\t}\n\n\t\treq, err := http.NewRequest(\"POST\", pointURL, bytes.NewBuffer(tempJSON))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t\treq.Header.Set(\"Authorization\", authToken)\n\t\tresp, err := netClient.Do(req)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\terrstring := \"Server error: \" + resp.Status + \" \" + pointURL\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\terrstring += \" \" + string(body)\n\t\t\treturn errors.New(errstring)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func pointsto(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-modified\", \"-scope\", scope(args), \"pointsto\", s.sel()))\n}", "func (d *DXF) Points(s v2.VecSet, r float64) {\n\td.drawing.ChangeLayer(\"Points\")\n\tfor _, p := range s {\n\t\td.drawing.Circle(p.X, p.Y, 0, r)\n\t}\n}", "func (ff *fftag) resetLocations() {\n\tfor _, chaser := range ff.chasers {\n\t\tspot := ff.spots[rand.Intn(len(ff.spots))] // get open location.\n\t\tx, y := ff.at(spot) // get map location.\n\t\tchaser.SetLocation(float64(x), float64(y), 0)\n\t}\n\tspot := ff.spots[rand.Intn(len(ff.spots))]\n\tgoalx, goaly := ff.at(spot)\n\tff.goal.SetLocation(float64(goalx), float64(goaly), 0)\n\n\t// create the flow field based on the given goal.\n\tff.flow.Create(goalx, goaly)\n}", "func (_Posminer *PosminerCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {\n\treturn _Posminer.Contract.contract.Call(opts, result, method, params...)\n}", "func grabLocation(){\n\t//sends out an immediate request of closest people\n\n}", "func getCallerSourceLocation() string {\n\t_, file, line, ok := runtime.Caller(2)\n\tresult := \"unknown:unknown\"\n\tif ok {\n\t\tresult = fmt.Sprintf(\"%s:%d\", file, line)\n\t}\n\treturn result\n}", "func (p *ProcessCalls) processAfterContextExpiration(ctx context.Context, traceId string) {\n p.pendingTracesWG.Add(1)\n defer p.pendingTracesWG.Done()\n <-ctx.Done()\n\n\n if tr, ok := p.logs[traceId]; ok {\n if !tr.HasRootCallerSpan() {\n // fmt.Fprintln(os.Stderr,\"HasRootCallerSpan is false\",tr)\n return\n }\n }\n\n // ll := p.logs[traceId]\n // fmt.Fprintln(os.Stderr,traceId, \"ll.String\",ll.String())\n // if ll.HasRootCallerSpan() == false {\n // fmt.Fprintln(os.Stderr,ll.String())\n // fmt.Fprintln(os.Stderr,\"HasRootCallerSpan is false\")\n // return\n // }\n // } else {\n // fmt.Fprintln(os.Stderr,ll.String())\n //\n // }\n // fmt.Fprintln(os.Stderr,\"==> buildTraceTree \",p.logs[traceId], traceId, p.info)\n var result traceTree\n tr :=append(p.logs[traceId][:0:0], p.logs[traceId]...)\n // if tr, ok := p.logs[traceId]; ok {\n result = buildTraceTree(&tr, traceId, p.info)\n // fmt.Fprintln(os.Stderr,\"==> result.Root \",result.Root)\n if result.Root == nil {\n return\n }\n\n // }else {\n // return\n // }\n // result := buildTraceTree(p.logs[traceId], traceId, p.info)\n // fmt.Fprintln(os.Stderr,\"result %v\",result)\n res, err := json.Marshal(result)\n if err != nil {\n // TODO: Log an error and update the statistics.\n fmt.Fprintln(os.Stderr, err)\n }\n // fmt.Fprintln(os.Stderr,ll.String())\n p.m.Lock()\n defer p.m.Unlock()\n // delete(p.logs, traceId)\n\n // TODO: output this result to a proper source destination.\n fmt.Fprintln(os.Stdout, string(res))\n}", "func Caller(skip string) (string, int, string) {\n\tpc := make([]uintptr, 7)\n\tn := runtime.Callers(2, pc)\n\tframes := runtime.CallersFrames(pc[:n])\n\tfor {\n\t\tframe, more := frames.Next()\n\t\t// frame.File = /tmp/sandbox469341579/prog.go\n\t\t// frame.Line = 28\n\t\t// frame.Function = main.Announcer.Info\n\n\t\t// file = prog.go\n\t\tfile := path.Base(frame.File)\n\t\t// function = Info\n\t\tfunction := path.Base(strings.Replace(frame.Function, \".\", \"/\", -1))\n\n\t\tif file != skip {\n\t\t\treturn file, frame.Line, function\n\t\t}\n\n\t\tif !more {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn \"\", 0, \"\"\n}", "func (mock *MailgunMock) ParseAddressesCalls() []struct {\n\tAddresses []string\n} {\n\tvar calls []struct {\n\t\tAddresses []string\n\t}\n\tlockMailgunMockParseAddresses.RLock()\n\tcalls = mock.calls.ParseAddresses\n\tlockMailgunMockParseAddresses.RUnlock()\n\treturn calls\n}", "func (t *Translator) translateCallStatement(stmt *vm_ast.CallStatement) []string {\n\tkeyString := generateUuidForIdent()\n\treturnAddressLabel := \"return_to_\" + keyString\n\n\t// push return address\n\tresult := []string{\n\t\t\"@\" + returnAddressLabel,\n\t\t\"D=A;\",\n\t}\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current LCL\n\tresult = append(result, []string{\n\t\t\"@LCL\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current ARG\n\tresult = append(result, []string{\n\t\t\"@ARG\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current THIS\n\tresult = append(result, []string{\n\t\t\"@THIS\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current THAT\n\tresult = append(result, []string{\n\t\t\"@THAT\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// ARG = SP - stmt.LocalNum.Literal - 5\n\tresult = append(result, []string{\n\t\t// save current M[SP] in M[R5]\n\t\t\"@SP\",\n\t\t\"D=M;\",\n\t\t\"@R5\",\n\t\t\"M=D;\",\n\t}...)\n\tcalcM := []string{}\n\tswitch stmt.ArgNum.Type {\n\tcase vm_tokenizer.IDENT:\n\t\tcalcM = []string{\n\t\t\t\"@\" + stmt.ArgNum.Literal,\n\t\t\t\"D=M;\",\n\t\t\t\"@R5\",\n\t\t\t\"M=M-D;\",\n\t\t}\n\tcase vm_tokenizer.INT:\n\t\tcalcM = []string{\n\t\t\t\"@\" + stmt.ArgNum.Literal,\n\t\t\t\"D=A;\",\n\t\t\t\"@R5\",\n\t\t\t\"M=M-D;\",\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"unexpected token type\")\n\t}\n\tresult = append(result, calcM...)\n\tresult = append(result, []string{\n\t\t\"@5\",\n\t\t\"D=A;\",\n\t\t\"@R5\",\n\t\t\"M=M-D;\",\n\t\t\"D=M;\",\n\t\t\"@ARG\",\n\t\t\"M=D;\",\n\t}...)\n\n\t// LCL = SP\n\tresult = append(result, []string{\n\t\t\"@SP\",\n\t\t\"D=M;\",\n\t\t\"@LCL\",\n\t\t\"M=D;\",\n\t}...)\n\n\tresult = append(result, []string{\n\t\t\"@\" + getFunctionLabel(stmt.Name.Literal),\n\t\t\"0;JMP\",\n\t\t\"(return_to_\" + keyString + \")\",\n\t}...)\n\n\treturn result\n}", "func checkCoord(outputArray []string, stringarray []string, shape string, crtPosition int)string{\n\tvar i int = 0\n\t\n\t//Validates if the Shape is a square\n\t//if it is we should have two values so increment by two to proceed with the Right most derivation\n\tif shape == \"SQR\"{\n\tcrtPosition=crtPosition+2\n\n\tfor ; i != 3; i++{\n\t\tvar frtCoord string=stringarray[crtPosition]\n\t\tx:=frtCoord[0:1]\n\t\ty:=frtCoord[1:2]\n\t// Validates the Y to the give range\n\tif y==\"0\" ||y==\"1\" || y==\"2\" || y==\"3\" || y==\"4\" || y==\"5\" || y==\"6\"|| y==\"7\"|| y==\"8\"|| y==\"9\"{\n\t\t\tif i == 1 {\t//If the loop was incremented if so proceed with this new instructions\t\t\t\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x>\"+y+endString)\n\t\t\t\t\tendString=(\"\"+y+endString)\t\t\t\n\t\t\t\t\t}else{ if endString != \"\" { //If the endString is not empty if so proceed with this new instructions\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<coord>\"+endString)\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x><y>\"+ endString)\t\t\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x>\"+y+endString)\t\t\t\t\t\t\n\t\t\t\t\t\t\tendString=(\"\"+y+endString)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t}else{ //The First time that we are entering the loop so access the default format\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x><y>\"+ \"finish\")\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x>\"+y+\" finish\")}\n\t\t\t\t\t}\n\n\t\t\t\t\tif x==\"A\" || x==\"B\" || x==\"C\" || x==\"D\" || x==\"E\" || x==\"F\" || x==\"G\"|| x==\"H\"|| x==\"I\"|| x==\"J\"{\n\t\t\t\t\t\tif i == 1 {\t//If the loop was incremented if so proceed with this new instructions\t\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" \"+x+endString)\n\t\t\t\t\t\t\tendString=(\" SQR \"+x+endString)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t}else{\tif endString !=\"\"{//If the endString is empty if so proceed with this new instructions\n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,\"+x+endString)\n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>, \"+x+endString)\n\t\t\t\t\t\t\t\tendString=(\",\"+x+endString)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t}else{\t\t//The First time that we are entering the loop for the x so access the default format\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,\"+x+y+\" finish\")\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>,\"+x+y+\" finish\")\n\t\t\t\t\t\t\t\t\tendString=(\",\"+x+\"\"+y+\" finish\")\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\t\t\t\t\t\t\t\n\t\t\t\t\t}else{\n\t\t\t\t\t\tfmt.Println(\"Error: \"+x+ \" is invalid\")\n\t\t\t\t\t}\n\tcrtPosition--\n\tif stringarray[crtPosition] == shape{\n\tbreak\n\t}\n\t\n\t\n\t}else{\n\t\tfmt.Println(\"Error: \"+y+ \" is invalid\")\n\t\tbreak\n\t}\n}\n\n}\n\n/*************************************************************************************/\n\t//Validates if the Shape is a Triangle\n\t//if it is we should have three values so increment by two to proceed with the Right most derivation\n\t\n\tif shape == \"TRI\"{\n\tcrtPosition=crtPosition+3\n\n\tfor ; i != 3; i++{\n\t\tvar frtCoord string=stringarray[crtPosition]\n\t\tx:=frtCoord[0:1]\n\t\ty:=frtCoord[1:2]\n\t\t\t// Validates the Y to the give range\n\t\tif y==\"0\" ||y==\"1\" || y==\"2\" || y==\"3\" || y==\"4\" || y==\"5\" || y==\"6\"{\n\t\t\tif i == 1 {\t//If the loop was incremented if so proceed with this new instructions\t\t\n\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<x>\"+y+endString)\n\t\t\t\tendString=(\"\"+y+endString)\t\t\t\n\t\t\t\t\t}else {\n\t\t\t\t\t\tif i == 2 {//If the loop was incremented the third time if so proceed with this new instructions\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<x><y>,\"+endString)\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<x>\"+y+\",\"+endString)\t\n\t\t\t\t\t\t\tendString = (y+\",\"+endString)\t\t\n\t\t\t\t\t\t\t}else{ \n\t\t\t\t\t\t\t\tif endString != \"\" {//If the endString is not empty if so proceed with this new instructions \n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,<coord>\"+endString)\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,<x><y>\"+ endString)\t\t\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,<x>\"+y+endString)\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tendString=(\"\"+y+endString)\t\n\t\t\t\t\t\t\t\t}else{ //The First time that we are entering the loop so access the default format\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<coord>,<x><y>\"+ \"finish1\")\t\t\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<coord>,<x>\"+y+\" finish1\")\n\t\t\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t}\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t}\n\n\t\tif x==\"A\" || x==\"B\" || x==\"C\" || x==\"D\" || x==\"E\" || x==\"G\" || x==\"F\"{\n\t\t\t\tif i == 1 { //If the loop was incremented if so proceed with this new instructions\t\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,\"+x+endString)\n\t\t\t\t\t\t\tendString=(x+endString)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t}else{ \t\t\t\t\t\t\n\t\t\t\t\t\t\tif endString !=\"\"{\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif i == 2{//If the loop was incremented the third time if so proceed with this new instructions\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" \"+x+endString)\n\t\t\t\t\t\t\t\t\tendString = (\" TRI \"+x+endString)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t}else{//If the endString is not empty if so proceed with this new instructions \n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,\"+x+endString)\n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<x><y>, \"+x+endString)\n\t\t\t\t\t\t\t\tendString=(\",\"+x+endString)\n\t\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t}else{\t\t//The First time that we are entering the loop so access the default format\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,\"+x+y+\" finish2\")\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<x><y>,\"+x+y+\" finish2\")\n\t\t\t\t\t\t\t\t\tendString=(\",\"+x+\"\"+y+\" finish\")\n\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\n\t\t}\n\tcrtPosition--\n\t\n\t}\n\n}\n}\n\n/**************************************************************************************************/\n//Validates if the Shape is a Triangle\n//if it is we should have one value so increment by one to proceed with the Right most derivation\n//Either of these values can we FILL GRID or CIR\n\nif shape == \"FILL\" || shape == \"GRID\" || shape == \"CIR\"{\n\tcrtPosition=crtPosition+1\n\tfor ; i != 1; i++{\n\t\tvar frtCoord string=stringarray[crtPosition]\n\t\tx:=frtCoord[0:1]\n\t\ty:=frtCoord[1:2]\n\t\t// Validates the Y to the give range\n\t\tif y==\"0\" ||y==\"1\" || y==\"2\" || y==\"3\" || y==\"4\" || y==\"5\" || y==\"6\"{\n\t\t\t\tif shape == \"CIR\"{ // Circle Carry a different format so the printing is as follows\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>\"+ \"finish1\")\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>,<x>\"+ \"finish1\")\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x>\"+y+\" finish1\")\n\t\t\t\t\t}else{ // Its just the regular format\n\t\t\t\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>\"+ \"finish1\")\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x>\"+y+\" finish1\")\n\t\t\t\t\t}\n\t\t\tif x==\"A\" || x==\"B\" || x==\"C\" || x==\"D\" || x==\"E\" || x==\"G\" || x==\"F\"{\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" \"+x+y+\" finish2\")\n\t\t\t\t\tendString=(\",\"+x+\"\"+y+\" finish\")\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tcrtPosition--\t\n\t}\n}\n}\n\nreturn endString\n}", "func getPoint(x, y []byte) plotter.XYZs {\n\tpts := make(plotter.XYZs, len(x))\n\tfor i := range x {\n\t\tpts[i].X = float64(x[i])\n\t\tpts[i].Y = float64(y[i])\n\t\tpts[i].Z = 0.1\n\n\t}\n\treturn pts\n}", "func CallEvalArgAt(vm *VM, target, locals Interface, msg *Message) Interface {\n\tc := target.(*Call)\n\tv, stop := msg.NumberArgAt(vm, locals, 0)\n\tif stop != nil {\n\t\treturn stop\n\t}\n\treturn c.Msg.EvalArgAt(vm, c.Sender, int(v.Value))\n}", "func (l *Logger) PrintCaller(skip int) {\n\tl.Log(Info, SPrintCaller(skip+2))\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (pc *programCode) createCall(name string) {\n\tcode := \"\"\n\tcode += \"\\n\\tcall \" + name + \"\\t; call label \" + name + \"\\n\"\n\tpc.appendCode(code)\n}", "func (pc *ParticleClient) EdgePoints(nodeID, parentID string, points []data.Point) {\n\tpc.newEdgePoints <- NewPoints{nodeID, parentID, points}\n}", "func (s *BaseAspidaListener) ExitPoints(ctx *PointsContext) {}", "func (s *Service) processBatches(batcher *tsdb.PointBatcher) {\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\t\tcase batch := <-batcher.Out():\n\t\t\t// Will attempt to create database if not yet created.\n\t\t\tif err := s.createInternalStorage(); err != nil {\n\t\t\t\ts.Logger.Info(\"Required database does not yet exist\", logger.Database(s.Database), zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := s.PointsWriter.WritePointsPrivileged(s.Database, s.RetentionPolicy, coordinator.ConsistencyLevelAny, batch); err == nil {\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitted, 1)\n\t\t\t\tatomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch)))\n\t\t\t} else {\n\t\t\t\ts.Logger.Info(\"Failed to write point batch to database\",\n\t\t\t\t\tlogger.Database(s.Database), zap.Error(err))\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitFail, 1)\n\t\t\t}\n\t\t}\n\t}\n}" ]
[ "0.5804814", "0.55561084", "0.54828817", "0.54530644", "0.5207425", "0.51024735", "0.5098401", "0.50678176", "0.5043976", "0.50209445", "0.49973452", "0.49861223", "0.49611437", "0.4915316", "0.4851949", "0.48359668", "0.48310632", "0.48305863", "0.4799254", "0.4785857", "0.47404245", "0.47260666", "0.471229", "0.4692623", "0.4668686", "0.46684283", "0.4668256", "0.46433884", "0.4631132", "0.46265152", "0.46122575", "0.46096632", "0.45997483", "0.45756313", "0.45704678", "0.4565594", "0.45535722", "0.45529148", "0.45525354", "0.4542994", "0.45405114", "0.45199203", "0.45143414", "0.45112288", "0.45055437", "0.4503116", "0.45026866", "0.44925693", "0.44793507", "0.44611636", "0.44490153", "0.4445905", "0.44390187", "0.4438711", "0.44382766", "0.4436614", "0.44322366", "0.44248334", "0.44220203", "0.44158792", "0.4413424", "0.44094837", "0.44091365", "0.4407834", "0.44056374", "0.43943876", "0.43943852", "0.43928373", "0.43847877", "0.4381054", "0.43742898", "0.43694118", "0.43687925", "0.43525147", "0.43520886", "0.43508846", "0.43455824", "0.4343373", "0.43370873", "0.43367916", "0.4334879", "0.43278193", "0.43245715", "0.430518", "0.43004552", "0.42907017", "0.42865756", "0.4285323", "0.427855", "0.42713702", "0.42708027", "0.4267776", "0.42665762", "0.42665762", "0.42665762", "0.42665762", "0.4266447", "0.42616004", "0.42595842", "0.4259145" ]
0.72903216
0
Process any associations, pulling in their schedules
func (bf *boardFilter) processAssociations(s ldb.Service) { for _, assoc := range s.Associations { assoc.AddTiplocs(bf.tiplocs) //if assoc.IsJoin() || assoc.IsSplit() { ar := assoc.Main.RID ai := assoc.Main.LocInd if ar == s.RID { ar = assoc.Assoc.RID ai = assoc.Assoc.LocInd } // Resolve the schedule if a split, join or if NP only if previous service & we are not yet running //if ar != s.RID { if assoc.Category != "NP" || (s.LastReport.Tiploc == "" && assoc.Assoc.RID == s.RID) { as := bf.d.ldb.GetSchedule(ar) if as != nil { assoc.Schedule = as as.AddTiplocs(bf.tiplocs) as.LastReport = as.GetLastReport() bf.processToc(as.Toc) if ai < (len(as.Locations) - 1) { if as.Origin != nil { bf.addTiploc(as.Destination.Tiploc) } destination := as.Locations[len(as.Locations)-1].Tiploc if as.Destination != nil { destination = as.Destination.Tiploc } viaRequest := bf.addVia(ar, destination) for _, l := range as.Locations[ai:] { bf.addTiploc(l.Tiploc) viaRequest.AppendTiploc(l.Tiploc) } } bf.processReason(as.CancelReason, true) bf.processReason(as.LateReason, false) } } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *candidate) Schedule() (constructedSchedule, error) {\n\tsch := constructedSchedule{\n\t\tearliest: s.earliest,\n\t\teventsByAttendee: make(map[AttendeeID]*attendeeEvents),\n\t}\n\tfor _, event := range s.order {\n\t\tif err := sch.Add(s.reqs[event]); err != nil {\n\t\t\treturn sch, err\n\t\t}\n\t}\n\treturn sch, nil\n}", "func fetchClassSchedules(roomname, term string) ([]ClassSchedule, *nerr.E) {\n\n\t//we figure out the building\n\tbr := strings.Split(roomname, \"-\")\n\tvar toReturn []ClassSchedule\n\n\tvar resp ClassResponse\n\n\terr := wso2requests.MakeWSO2Request(\"GET\", fmt.Sprintf(\"https://api.byu.edu/byuapi/classes/v1?year_term=%v&building=%v&context=class_schedule\", term, br[0]), []byte{}, &resp)\n\n\tif err != nil {\n\t\treturn toReturn, err.Addf(\"Couldn't fetch class scheudle\")\n\t}\n\n\tfor i := range resp.Values {\n\t\ttoReturn = append(toReturn, resp.Values[i])\n\t}\n\n\tfor resp.Metadata.PageEnd < resp.Metadata.CollectionSize {\n\n\t\terr := wso2requests.MakeWSO2Request(\"GET\", fmt.Sprintf(\"https://api.byu.edu/byuapi/classes/v1?year_term=%v&building=%v&context=class_schedule&page_start=%v\", term, br[0], resp.Metadata.PageEnd+1), []byte{}, &resp)\n\n\t\tif err != nil {\n\t\t\treturn toReturn, err.Addf(\"Couldn't fetch class scheudle\")\n\t\t}\n\n\t\tfor i := range resp.Values {\n\t\t\ttoReturn = append(toReturn, resp.Values[i])\n\t\t}\n\t\tlog.L.Debugf(\"Have %v classes\", len(toReturn))\n\t}\n\n\treturn toReturn, nil\n}", "func (records Records) LoadDoctorSchedule(fetcher DoctorScheduleFetcher) {\n\tvar lastID, lastSpec, lastName string\n\n\tdoctorRecords := make(Records, 0)\n\n\tfor _, r := range records {\n\t\tif lastID == \"\" {\n\t\t\tlastID = r.ID()\n\t\t\tlastSpec = r.Spec\n\t\t\tlastName = r.Name\n\t\t}\n\n\t\tif r.ID() != lastID {\n\t\t\tdoctorRecords = doctorRecords.Cleaned()\n\n\t\t\tschedule := &DoctorSchedule{\n\t\t\t\tSpec: lastSpec,\n\t\t\t\tName: lastName,\n\t\t\t\tCells: make(TimeCells, len(doctorRecords)),\n\t\t\t}\n\n\t\t\tfor i, rr := range doctorRecords {\n\t\t\t\tschedule.Cells[i] = &TimeCell{\n\t\t\t\t\tStartTime: rr.StartTime,\n\t\t\t\t\tDuration: rr.Duration,\n\t\t\t\t\tFree: rr.Free,\n\t\t\t\t\tRoom: rr.Room,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfetcher(schedule)\n\n\t\t\tlastID = r.ID()\n\t\t\tlastSpec = r.Spec\n\t\t\tlastName = r.Name\n\t\t\tdoctorRecords = make(Records, 0)\n\t\t}\n\n\t\tdoctorRecords = append(doctorRecords, r)\n\t}\n}", "func notifyScheduleAssociates(s models.Schedule, action string) error {\n\t// Get the associated schedule events\n\tvar events []models.ScheduleEvent\n\tif err := dbClient.GetScheduleEventsByScheduleName(&events, s.Name); err != nil {\n\t\treturn err\n\t}\n\n\t// Get the device services for the schedule events\n\tvar services []models.DeviceService\n\tfor _, se := range events {\n\t\tvar ds models.DeviceService\n\t\tif err := dbClient.GetDeviceServiceByName(&ds, se.Service); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tservices = append(services, ds)\n\t}\n\n\t// Notify the associated device services\n\tif err := notifyAssociates(services, s.Id.Hex(), action, models.SCHEDULE); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (db *Database) GetSchedule(startLocationName, destinationName, date string) ([]Trip, map[int][]TripOffering, error) {\n trips := []Trip{}\n offerings := make(map[int][]TripOffering)\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM Trip WHERE StartLocationName=%s\", startLocationName))\n if err != nil {\n return trips, offerings, err\n }\n // Get the trips with the given start location name\n trips = RowToTrips(row)\n row.Close()\n // Get the trip offerings for each trip\n for _, t := range trips {\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM TripOffering WHERE TripNumber=%d\", t.TripNumber))\n if err != nil {\n return trips, offerings, err\n }\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n if _, ok := offerings[tripNumber]; !ok {\n offerings[tripNumber] = []TripOffering{}\n }\n offerings[tripNumber] = append(offerings[tripNumber], TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n row.Close()\n }\n return trips, offerings, nil\n}", "func soundersScheduleCollector() {\n\n\tfetchSoundersSchedule()\n\n\tc := time.Tick(24 * time.Hour)\n\tfor _ = range c {\n\t\tfetchSoundersSchedule()\n\t}\n}", "func ScheduleUnmarshalJSON(b []byte) (schedule Schedule, err error) {\n\tvar mixed interface{}\n\tjson.Unmarshal(b, &mixed)\n\n\tfor key, value := range mixed.(map[string]interface{}) {\n\t\trawValue, _ := json.Marshal(value)\n\t\tswitch key {\n\t\tcase \"date\":\n\t\t\tvar date Date\n\t\t\terr = json.Unmarshal(rawValue, &date)\n\t\t\tschedule = date\n\t\tcase \"day\":\n\t\t\tvar day Day\n\t\t\terr = json.Unmarshal(rawValue, &day)\n\t\t\tschedule = day\n\t\tcase \"intersection\":\n\t\t\tvar intersection Intersection\n\t\t\terr = json.Unmarshal(rawValue, &intersection)\n\t\t\tschedule = intersection\n\t\tcase \"month\":\n\t\t\tvar month Month\n\t\t\terr = json.Unmarshal(rawValue, &month)\n\t\t\tschedule = month\n\t\tcase \"union\":\n\t\t\tvar union Union\n\t\t\terr = json.Unmarshal(rawValue, &union)\n\t\t\tschedule = union\n\t\tcase \"week\":\n\t\t\tvar week Week\n\t\t\terr = json.Unmarshal(rawValue, &week)\n\t\t\tschedule = week\n\t\tcase \"weekday\":\n\t\t\tvar weekday Weekday\n\t\t\terr = json.Unmarshal(rawValue, &weekday)\n\t\t\tschedule = weekday\n\t\tcase \"year\":\n\t\t\tvar year Year\n\t\t\terr = json.Unmarshal(rawValue, &year)\n\t\t\tschedule = year\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"%s is not a recognized schedule\", key)\n\t\t}\n\t}\n\treturn\n}", "func (e *Server) Association() []interface{} {\n\tret := []interface{}{}\n\tfor _, x := range e.Processors {\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.Memory {\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.EthernetInterfaces {\n\t\tfor _, y := range x.IPv4Addresses {\n\t\t\tret = append(ret, &y)\n\t\t}\n\t\tfor _, y := range x.IPv6Addresses {\n\t\t\tret = append(ret, &y)\n\t\t}\n\t\tfor _, y := range x.VLANs {\n\t\t\tret = append(ret, &y)\n\t\t}\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.NetworkInterfaces {\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.Storages {\n\t\tfor _, y := range x.StorageControllers {\n\t\t\tret = append(ret, &y)\n\t\t}\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.Power.PowerControl {\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.Power.PowerSupplies {\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.Power.Redundancy {\n\t\tret = append(ret, &x)\n\t}\n\tret = append(ret, &e.Power)\n\tfor _, x := range e.Thermal.Fans {\n\t\tret = append(ret, &x)\n\t}\n\tret = append(ret, &e.Thermal)\n\tfor _, x := range e.Boards {\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.NetworkAdapters {\n\t\tfor _, y := range x.Controllers {\n\t\t\tfor _, z := range y.NetworkPorts {\n\t\t\t\tret = append(ret, &z)\n\t\t\t}\n\t\t\tret = append(ret, &y)\n\t\t}\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.Drives {\n\t\tfor _, y := range x.Location {\n\t\t\tret = append(ret, y.PostalAddress)\n\t\t\tret = append(ret, y.Placement)\n\t\t\tret = append(ret, &y)\n\t\t}\n\t\tret = append(ret, &x)\n\t}\n\tfor _, x := range e.PCIeDevices {\n\t\tfor _, y := range x.PCIeFunctions {\n\t\t\tret = append(ret, &y)\n\t\t}\n\t\tret = append(ret, &x)\n\t}\n\treturn ret\n}", "func (jf JobFactory) Process(schedules []Schedule) {\n\tfor _, item := range schedules {\n\t\tif item.Api.Url != \"\" {\n\t\t\tlocalItem := item\n\t\t\tAddJob(item.Schedule, func() {\n\t\t\t\tlog.Printf(\"executing %s at %s\", localItem.Name, localItem.Api.Url)\n\t\t\t\toptions := restful.Options{}\n\t\t\t\toptions.Method = localItem.Api.Method\n\t\t\t\toptions.Headers = make(map[string]string)\n\t\t\t\toptions.Headers[\"Content-Type\"] = \"application/json\"\n\t\t\t\tif localItem.Api.Authorization != \"\" {\n\t\t\t\t\toptions.Headers[\"Authorization\"] = localItem.Api.Authorization\n\t\t\t\t}\n\t\t\t\toptions.Transformer = localItem.Api.Transform\n\t\t\t\toptions.Payload = localItem.Api.Body\n\t\t\t\tmessage, _ := restful.Call(localItem.Api.Url, &options)\n\t\t\t\tevent := EventData{}\n\t\t\t\tjson.Unmarshal([]byte(message), &event)\n\t\t\t\tGetEventsManager().Notify(event)\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tvalue, ok := advertisedJobs[item.Name]\n\t\tif ok {\n\t\t\tlog.Printf(\"%s, %s\", item.Schedule, item.Name)\n\t\t\tAddJob(item.Schedule, value)\n\t\t}\n\t}\n\tInitJobs()\n}", "func (s *Schedule) GetAll(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tconn, err := db.Connect()\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\tsession := conn.NewSession(nil)\n\tdefer session.Close()\n\tdefer conn.Close()\n\n\tif request.QueryStringParameters == nil {\n\t\trequest.QueryStringParameters = map[string]string{\n\t\t\t\"event_id\": request.PathParameters[\"id\"],\n\t\t}\n\t} else {\n\t\trequest.QueryStringParameters[\"event_id\"] = request.PathParameters[\"id\"]\n\t}\n\n\tresult, err := db.Select(session, db.TableEventSchedule, request.QueryStringParameters, Schedule{})\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\treturn common.APIResponse(result, http.StatusOK)\n}", "func extract_schedules(hull []fpoint) []vrp.Schedule {\n\tschedules := make([]vrp.Schedule, len(hull))\n\tfor i, h := range hull {\n\t\tschedules[i] = h.schedule\n\t}\n\treturn schedules\n}", "func (s *Scheduler) ScheduleTasks() {\n\t/*\n\t\tif events exist unattended, make tasks based on set up times\n\t*/\n\n}", "func (a *Airport) processArrivals() {\n\tfor {\n\t\tarrival, ok := <-a.arrivalChan\n\t\tif !ok {\n\t\t\ta.log.Errorf(\"arrival channel closed\")\n\t\t\treturn\n\t\t}\n\t\tswitch arrival.GetChangeType() {\n\t\tcase datasync.Put:\n\t\t\tfl := flight.Info{}\n\t\t\tif err := arrival.GetValue(&fl); err != nil {\n\t\t\t\ta.log.Errorf(\"failed to get value for arrival flight: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfl.Status = flight.Status_arrival\n\t\t\ta.runwayChan <- fl\n\t\tcase datasync.Delete:\n\t\t\ta.log.Debugf(\"arrival %s deleted\\n\", arrival.GetKey())\n\t\t}\n\t}\n}", "func GetADVSchedules(id string, addr string, localIP string) error {\r\n\tlocalAddr, err := net.ResolveIPAddr(\"ip\", localIP)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tLocalBindAddr := &net.TCPAddr{IP: localAddr.IP}\r\n\ttransport := &http.Transport{\r\n\t\tDial: (&net.Dialer{\r\n\t\t\tLocalAddr: LocalBindAddr,\r\n\t\t\tTimeout: 5 * time.Second,\r\n\t\t\tKeepAlive: 30 * time.Second,\r\n\t\t}).Dial,\r\n\t}\r\n\tclient := &http.Client{\r\n\t\tTransport: transport,\r\n\t}\r\n\r\n\turl := \"http://\" + addr + \"/adm/adv-schedules/\" + id + \"?format=cic\"\r\n\r\n\treq, err := http.NewRequest(\"GET\", url, nil)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tresp, err := client.Do(req)\r\n\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tif resp.StatusCode != 200 {\r\n\t\treturn fmt.Errorf(\"ADM Receved %v\", resp.Status)\r\n\t}\r\n\r\n\tfor {\r\n\t\tbuf := make([]byte, 32*1024)\r\n\t\t_, err := resp.Body.Read(buf)\r\n\r\n\t\tif err != nil && err != io.EOF {\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tresp.Body.Close()\r\n\ttransport.CloseIdleConnections()\r\n\r\n\treturn nil\r\n}", "func notifyScheduleEventAssociates(se models.ScheduleEvent, action string) error {\n\t// Get the associated device service\n\tvar ds models.DeviceService\n\tif err := dbClient.GetDeviceServiceByName(&ds, se.Service); err != nil {\n\t\treturn err\n\t}\n\n\tvar services []models.DeviceService\n\tservices = append(services, ds)\n\n\t// Notify the associated device service\n\tif err := notifyAssociates(services, se.Id.Hex(), action, models.SCHEDULEEVENT); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func doEvents() error {\n\tif len(accounts) == 0 {\n\t\twf.NewItem(\"No Accounts Configured\").\n\t\t\tSubtitle(\"Action this item to add a Google account\").\n\t\t\tAutocomplete(\"workflow:login\").\n\t\t\tIcon(aw.IconWarning)\n\n\t\twf.SendFeedback()\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tcals []*Calendar\n\t\terr error\n\t)\n\n\tif cals, err = activeCalendars(); err != nil {\n\t\tif err == errNoActive {\n\t\t\twf.NewItem(\"No Active Calendars\").\n\t\t\t\tSubtitle(\"Action this item to choose calendars\").\n\t\t\t\tAutocomplete(\"workflow:calendars\").\n\t\t\t\tIcon(aw.IconWarning)\n\n\t\t\twf.SendFeedback()\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err == errNoCalendars {\n\t\t\tif !wf.IsRunning(\"update-calendars\") {\n\t\t\t\tcmd := exec.Command(os.Args[0], \"update\", \"calendars\")\n\t\t\t\tif err := wf.RunInBackground(\"update-calendars\", cmd); err != nil {\n\t\t\t\t\treturn errors.Wrap(err, \"run calendar update\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twf.NewItem(\"Fetching List of Calendars…\").\n\t\t\t\tSubtitle(\"List will reload shortly\").\n\t\t\t\tValid(false).\n\t\t\t\tIcon(ReloadIcon())\n\n\t\t\twf.Rerun(0.1)\n\t\t\twf.SendFeedback()\n\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\tlog.Printf(\"%d active calendar(s)\", len(cals))\n\n\tvar (\n\t\tall []*Event\n\t\tevents []*Event\n\t\tparsed time.Time\n\t)\n\n\tif all, err = loadEvents(opts.StartTime, cals...); err != nil {\n\t\treturn errors.Wrap(err, \"load events\")\n\t}\n\n\t// Filter out events after cutoff\n\tfor _, e := range all {\n\t\tif !opts.ScheduleMode && e.Start.After(opts.EndTime) {\n\t\t\tbreak\n\t\t}\n\t\tevents = append(events, e)\n\t\tlog.Printf(\"%s\", e.Title)\n\t}\n\n\tif len(all) == 0 && wf.IsRunning(\"update-events\") {\n\t\twf.NewItem(\"Fetching Events…\").\n\t\t\tSubtitle(\"Results will refresh shortly\").\n\t\t\tIcon(ReloadIcon()).\n\t\t\tValid(false)\n\n\t\twf.Rerun(0.1)\n\t}\n\n\tlog.Printf(\"%d event(s) for %s\", len(events), opts.StartTime.Format(timeFormat))\n\n\tif t, ok := parseDate(opts.Query); ok {\n\t\tparsed = t\n\t}\n\n\tif len(events) == 0 && opts.Query == \"\" {\n\t\twf.NewItem(fmt.Sprintf(\"No Events on %s\", opts.StartTime.Format(timeFormatLong))).\n\t\t\tIcon(ColouredIcon(iconCalendar, yellow))\n\t}\n\n\tvar day time.Time\n\n\tfor _, e := range events {\n\t\t// Show day indicator if this is the first event of a given day\n\t\tif opts.ScheduleMode && midnight(e.Start).After(day) {\n\t\t\tday = midnight(e.Start)\n\n\t\t\twf.NewItem(day.Format(timeFormatLong)).\n\t\t\t\tArg(day.Format(timeFormat)).\n\t\t\t\tValid(true).\n\t\t\t\tIcon(iconDay)\n\t\t}\n\n\t\ticon := ColouredIcon(iconCalendar, e.Colour)\n\n\t\tsub := fmt.Sprintf(\"%s – %s / %s\",\n\t\t\te.Start.Local().Format(hourFormat),\n\t\t\te.End.Local().Format(hourFormat),\n\t\t\te.CalendarTitle)\n\n\t\tif e.Location != \"\" {\n\t\t\tsub = sub + \" / \" + e.Location\n\t\t}\n\n\t\tit := wf.NewItem(e.Title).\n\t\t\tSubtitle(sub).\n\t\t\tIcon(icon).\n\t\t\tArg(e.URL).\n\t\t\tQuicklook(previewURL(opts.StartTime, e.ID)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"open\")\n\n\t\tif e.Location != \"\" {\n\t\t\tapp := \"Google Maps\"\n\t\t\tif opts.UseAppleMaps {\n\t\t\t\tapp = \"Apple Maps\"\n\t\t\t}\n\n\t\t\ticon := ColouredIcon(iconMap, e.Colour)\n\t\t\tit.NewModifier(\"cmd\").\n\t\t\t\tSubtitle(\"Open in \"+app).\n\t\t\t\tArg(mapURL(e.Location)).\n\t\t\t\tValid(true).\n\t\t\t\tIcon(icon).\n\t\t\t\tVar(\"CALENDAR_APP\", \"\") // Don't open Maps URLs in CALENDAR_APP\n\t\t}\n\t}\n\n\tif !opts.ScheduleMode {\n\t\t// Navigation items\n\t\tprev := opts.StartTime.AddDate(0, 0, -1)\n\t\twf.NewItem(\"Previous: \"+relativeDate(prev)).\n\t\t\tIcon(iconPrevious).\n\t\t\tArg(prev.Format(timeFormat)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"date\")\n\n\t\tnext := opts.StartTime.AddDate(0, 0, 1)\n\t\twf.NewItem(\"Next: \"+relativeDate(next)).\n\t\t\tIcon(iconNext).\n\t\t\tArg(next.Format(timeFormat)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"date\")\n\t}\n\n\tif opts.Query != \"\" {\n\t\twf.Filter(opts.Query)\n\t}\n\n\tif !parsed.IsZero() {\n\t\ts := parsed.Format(timeFormat)\n\n\t\twf.NewItem(parsed.Format(timeFormatLong)).\n\t\t\tSubtitle(relativeDays(parsed, false)).\n\t\t\tArg(s).\n\t\t\tAutocomplete(s).\n\t\t\tValid(true).\n\t\t\tIcon(iconDefault)\n\t}\n\n\twf.WarnEmpty(\"No Matching Events\", \"Try a different query?\")\n\twf.SendFeedback()\n\treturn nil\n}", "func updateScheduleFields(from models.Schedule, to *models.Schedule, w http.ResponseWriter) error {\n\tif from.Cron != \"\" {\n\t\tif _, err := cron.Parse(from.Cron); err != nil {\n\t\t\terr = errors.New(\"Invalid cron format\")\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Cron = from.Cron\n\t}\n\tif from.End != \"\" {\n\t\tif _, err := msToTime(from.End); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.End = from.End\n\t}\n\tif from.Frequency != \"\" {\n\t\tif !isIntervalValid(from.Frequency) {\n\t\t\terr := errors.New(\"Frequency format is incorrect: \" + from.Frequency)\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Frequency = from.Frequency\n\t}\n\tif from.Start != \"\" {\n\t\tif _, err := msToTime(from.Start); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Start = from.Start\n\t}\n\tif from.Origin != 0 {\n\t\tto.Origin = from.Origin\n\t}\n\tif from.Name != \"\" && from.Name != to.Name {\n\t\t// Check if new name is unique\n\t\tvar checkS models.Schedule\n\t\tif err := dbClient.GetScheduleByName(&checkS, from.Name); err != nil {\n\t\t\tif err != db.ErrNotFound {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\t}\n\t\t} else {\n\t\t\tif checkS.Id != to.Id {\n\t\t\t\terr := errors.New(\"Duplicate name for the schedule\")\n\t\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Check if the schedule still has attached schedule events\n\t\tstillInUse, err := isScheduleStillInUse(*to)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\treturn err\n\t\t}\n\t\tif stillInUse {\n\t\t\terr = errors.New(\"Schedule is still in use, can't change the name\")\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\n\t\tto.Name = from.Name\n\t}\n\n\treturn nil\n}", "func (scheduleSubjectL) LoadScheduleSubject(ctx context.Context, e boil.ContextExecutor, singular bool, maybeScheduleSubject interface{}, mods queries.Applicator) error {\n\tvar slice []*ScheduleSubject\n\tvar object *ScheduleSubject\n\n\tif singular {\n\t\tobject = maybeScheduleSubject.(*ScheduleSubject)\n\t} else {\n\t\tslice = *maybeScheduleSubject.(*[]*ScheduleSubject)\n\t}\n\n\targs := make([]interface{}, 0, 1)\n\tif singular {\n\t\tif object.R == nil {\n\t\t\tobject.R = &scheduleSubjectR{}\n\t\t}\n\t\targs = append(args, object.ScheduleSubjectID)\n\n\t} else {\n\tOuter:\n\t\tfor _, obj := range slice {\n\t\t\tif obj.R == nil {\n\t\t\t\tobj.R = &scheduleSubjectR{}\n\t\t\t}\n\n\t\t\tfor _, a := range args {\n\t\t\t\tif a == obj.ScheduleSubjectID {\n\t\t\t\t\tcontinue Outer\n\t\t\t\t}\n\t\t\t}\n\n\t\t\targs = append(args, obj.ScheduleSubjectID)\n\n\t\t}\n\t}\n\n\tif len(args) == 0 {\n\t\treturn nil\n\t}\n\n\tquery := NewQuery(qm.From(`schedule_content`), qm.WhereIn(`schedule_content.id in ?`, args...))\n\tif mods != nil {\n\t\tmods.Apply(query)\n\t}\n\n\tresults, err := query.QueryContext(ctx, e)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to eager load ScheduleContent\")\n\t}\n\n\tvar resultSlice []*ScheduleContent\n\tif err = queries.Bind(results, &resultSlice); err != nil {\n\t\treturn errors.Wrap(err, \"failed to bind eager loaded slice ScheduleContent\")\n\t}\n\n\tif err = results.Close(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to close results of eager load for schedule_content\")\n\t}\n\tif err = results.Err(); err != nil {\n\t\treturn errors.Wrap(err, \"error occurred during iteration of eager loaded relations for schedule_content\")\n\t}\n\n\tif len(scheduleSubjectAfterSelectHooks) != 0 {\n\t\tfor _, obj := range resultSlice {\n\t\t\tif err := obj.doAfterSelectHooks(ctx, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(resultSlice) == 0 {\n\t\treturn nil\n\t}\n\n\tif singular {\n\t\tforeign := resultSlice[0]\n\t\tobject.R.ScheduleSubject = foreign\n\t\tif foreign.R == nil {\n\t\t\tforeign.R = &scheduleContentR{}\n\t\t}\n\t\tforeign.R.ScheduleSubjectScheduleSubjects = append(foreign.R.ScheduleSubjectScheduleSubjects, object)\n\t\treturn nil\n\t}\n\n\tfor _, local := range slice {\n\t\tfor _, foreign := range resultSlice {\n\t\t\tif local.ScheduleSubjectID == foreign.ID {\n\t\t\t\tlocal.R.ScheduleSubject = foreign\n\t\t\t\tif foreign.R == nil {\n\t\t\t\t\tforeign.R = &scheduleContentR{}\n\t\t\t\t}\n\t\t\t\tforeign.R.ScheduleSubjectScheduleSubjects = append(foreign.R.ScheduleSubjectScheduleSubjects, local)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func PreloadAssociations(preloadAssociations []string) QueryProcessor {\n\treturn func(db *gorm.DB, out interface{}) (*gorm.DB, microappError.DatabaseError) {\n\t\tif preloadAssociations != nil {\n\t\t\tfor _, association := range preloadAssociations {\n\t\t\t\tdb = db.Preload(association)\n\t\t\t}\n\t\t}\n\t\treturn db, nil\n\t}\n}", "func (tbl AssociationTable) Fetch(req require.Requirement, query string, args ...interface{}) ([]*Association, error) {\n\treturn doAssociationTableQueryAndScan(tbl, req, false, query, args...)\n}", "func (h *TileServiceSubscriber) SubscriberChangeSchedule(ctx context.Context, schedule *SchedularService.CloudwalkerScheduler) error{\n\tlog.Info(\"SubscriberChangeSchedule 1\")\n\tfor _,i := range schedule.Shedule {\n\t\tlog.Info(i.TimeZone,\" \", getTimeZone())\n\t\tif i.TimeZone == getTimeZone() {\n\t\t\tlog.Info(\"Refreshing schedule for \", schedule.Vendor, schedule.Brand, i.TimeZone)\n\t\t\tredisScheduleKey := MakeRedisKey(schedule.Vendor+\":\"+schedule.Brand+\":\"+i.TimeZone)\n\t\t\th.RedisConnection.Del(redisScheduleKey)\n\t\t\tfor _,j := range i.Pages {\n\t\t\t\tredisPageKey := MakeRedisKey(redisScheduleKey+\":\"+String(j.PageIndex)+\":\"+j.PageName)\n\t\t\t\th.RedisConnection.Del(redisPageKey)\n\t\t\t\th.RedisConnection.SAdd(redisScheduleKey,redisPageKey)\n\t\t\t\t// making carousel\n\t\t\t\tredisCarouselKey := MakeRedisKey(redisPageKey+\":carousel\")\n\t\t\t\th.RedisConnection.Del(redisCarouselKey)\n\t\t\t\tfor _, k := range j.Carousel{\n\n\t\t\t\t\tresultByte, err := proto.Marshal(k)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\th.RedisConnection.SAdd(redisCarouselKey, resultByte)\n\t\t\t\t}\n\t\t\t\th.RedisConnection.Expire(redisCarouselKey, 3*time.Hour)\n\n\t\t\t\tredisRowsKey := MakeRedisKey(redisPageKey+\":rows\")\n\t\t\t\th.RedisConnection.Del(redisRowsKey)\n\t\t\t\t// making rows\n\t\t\t\tfor _,l := range j.Rows {\n\t\t\t\t\trowKey := MakeRedisKey(redisRowsKey+\":\"+String(l.RowIndex)+\":\"+l.Rowname)\n\t\t\t\t\th.RedisConnection.Del(rowKey)\n\t\t\t\t\th.RedisConnection.SAdd(redisRowsKey, rowKey)\n\t\t\t\t\tvar myPipes []bson.D\n\t\t\t\t\t// pipe1\n\t\t\t\t\tif l.Categorylist != nil && len(l.Categorylist) > 0 {\n\t\t\t\t\t\tmyPipes = append(myPipes, bson.D{{\"$match\", bson.D{{\"metadata.categories\", bson.D{{\"$in\", l.Categorylist}}}}}},)\n\t\t\t\t\t}\n\t\t\t\t\t//pipe2\n\t\t\t\t\tif l.Languagelist != nil && len(l.Languagelist) > 0 {\n\t\t\t\t\t\tmyPipes = append(myPipes, bson.D{{\"$match\", bson.D{{\"metadata.languages\", bson.D{{\"$in\", l.Languagelist}}}}}},)\n\t\t\t\t\t}\n\t\t\t\t\t//pipe3\n\t\t\t\t\tif l.GenreList != nil && len(l.GenreList) > 0 {\n\t\t\t\t\t\tmyPipes = append(myPipes, bson.D{{\"$match\", bson.D{{\"metadata.genre\", bson.D{{\"$in\", l.GenreList}}}}}},)\n\t\t\t\t\t}\n\t\t\t\t\t//pipe4\n\t\t\t\t\tif l.SourceList != nil && len(l.SourceList) > 0 {\n\t\t\t\t\t\tmyPipes = append(myPipes, bson.D{{\"$match\", bson.D{{\"content.source\", bson.D{{\"$in\", l.SourceList}}}}}},)\n\t\t\t\t\t}\n\t\t\t\t\t//pipe5\n\t\t\t\t\tmyPipes = append(myPipes,bson.D{{\"$sort\", bson.D{{\"metadata.year\", -1}}}}, )\n\t\t\t\t\t//pipe6\n\t\t\t\t\tmyPipes = append(myPipes, bson.D{{\"$project\", bson.D{{\"_id\", 0}, {\"ref_id\", 1}}}},)\n\t\t\t\t\tcur, err := h.MongoCollection.Aggregate(context.Background(), myPipes, options.Aggregate().SetMaxTime(2000*time.Millisecond))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tfor cur.Next(context.TODO()) {\n\t\t\t\t\t\th.RedisConnection.SAdd(rowKey, cur.Current.Lookup(\"ref_id\").StringValue())\n\t\t\t\t\t}\n\t\t\t\t\tcur.Close(context.TODO())\n\t\t\t\t\th.RedisConnection.Expire(rowKey, 3*time.Hour)\n\t\t\t\t}\n\t\t\t\th.RedisConnection.Expire(redisPageKey, 3*time.Hour)\n\t\t\t}\n\t\t\th.RedisConnection.Expire(redisScheduleKey, 3*time.Hour)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tmessageToPublish := NotificationService.MessageToPublish{\n\t\tExchangeName: \"amq.topic\",\n\t\tRoutingKeyName: schedule.Vendor+\".\"+schedule.Brand,\n\t\tMessageTosend: []byte(\"refreshSchedule\"),\n\t}\n\tlog.Info(\"Sub tile \")\n\treturn h.NotificationEventPublisher.Publish(ctx,&messageToPublish)\n}", "func updateScheduleEventFields(from models.ScheduleEvent, to *models.ScheduleEvent, w http.ResponseWriter) error {\n\t// Boolean used to notify the proper associates based on if the service changed\n\tvar serviceChanged bool\n\toldService := \"\" // Hold the old service name in case it changes\n\n\t// Use .String() method to compare empty structs (not ideal, but there's no .equals() method)\n\tif (from.Addressable.String() != models.Addressable{}.String()) {\n\t\t// Check if the new addressable exists\n\t\t// Try by ID\n\t\tif err := dbClient.GetAddressableById(&from.Addressable, from.Addressable.Id.Hex()); err != nil {\n\t\t\t// Try by name\n\t\t\tif err = dbClient.GetAddressableByName(&from.Addressable, from.Addressable.Name); err != nil {\n\t\t\t\tif err == db.ErrNotFound {\n\t\t\t\t\thttp.Error(w, \"Addressable not found for schedule event\", http.StatusNotFound)\n\t\t\t\t} else {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tto.Addressable = from.Addressable\n\t}\n\tif from.Service != \"\" {\n\t\tif from.Service != to.Service {\n\t\t\tserviceChanged = true\n\t\t\t// Verify that the new service exists\n\t\t\tvar checkDS models.DeviceService\n\t\t\tif err := dbClient.GetDeviceServiceByName(&checkDS, from.Service); err != nil {\n\t\t\t\tif err == db.ErrNotFound {\n\t\t\t\t\thttp.Error(w, \"Device Service not found for schedule event\", http.StatusNotFound)\n\t\t\t\t} else {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\toldService = to.Service\n\t\t\tto.Service = from.Service\n\t\t}\n\t}\n\tif from.Schedule != \"\" {\n\t\tif from.Schedule != to.Schedule {\n\t\t\t// Verify that the new schedule exists\n\t\t\tvar checkS models.Schedule\n\t\t\tif err := dbClient.GetScheduleByName(&checkS, from.Schedule); err != nil {\n\t\t\t\tif err == db.ErrNotFound {\n\t\t\t\t\thttp.Error(w, \"Schedule not found for schedule event\", http.StatusNotFound)\n\t\t\t\t} else {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tto.Schedule = from.Schedule\n\t}\n\tif from.Name != \"\" {\n\t\tif from.Name != to.Name {\n\t\t\t// Verify data integrity\n\t\t\tvar reports []models.DeviceReport\n\t\t\tif err := dbClient.GetDeviceReportsByScheduleEventName(&reports, to.Name); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(reports) > 0 {\n\t\t\t\terr := errors.New(\"Data integrity issue. Schedule is still referenced by device reports, can't change the name\")\n\t\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tto.Name = from.Name\n\t}\n\tif from.Origin != 0 {\n\t\tto.Origin = from.Origin\n\t}\n\n\t// Notify associates (based on if the service changed)\n\tif serviceChanged {\n\t\t// Delete from old\n\t\tif err := notifyScheduleEventAssociates(models.ScheduleEvent{Name: oldService}, http.MethodDelete); err != nil {\n\t\t\tLoggingClient.Error(\"Problem notifying associated device services for the schedule event: \"+err.Error(), \"\")\n\t\t}\n\t\t// Add to new\n\t\tif err := notifyScheduleEventAssociates(*to, http.MethodPost); err != nil {\n\t\t\tLoggingClient.Error(\"Problem notifying associated device services for the schedule event: \"+err.Error(), \"\")\n\t\t}\n\t} else {\n\t\t// Changed schedule event\n\t\tif err := notifyScheduleEventAssociates(*to, http.MethodPut); err != nil {\n\t\t\tLoggingClient.Error(\"Problem notifying associated device services for the schedule event: \"+err.Error(), \"\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func HasScheduleWith(preds ...predicate.Schedule) predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(ScheduleInverseTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2O, false, ScheduleTable, ScheduleColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func restGetScheduleEventByAddressableId(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tvar aid string = vars[ADDRESSABLEID]\n\tvar res []models.ScheduleEvent = make([]models.ScheduleEvent, 0)\n\n\t// Check if the addressable exists\n\tvar a models.Addressable\n\tif err := dbClient.GetAddressableById(&a, aid); err != nil {\n\t\tif err == db.ErrNotFound {\n\t\t\thttp.Error(w, \"Addressable not found for schedule event\", http.StatusNotFound)\n\t\t\tLoggingClient.Error(\"Addressable not found for schedule event: \"+err.Error(), \"\")\n\t\t} else {\n\t\t\tLoggingClient.Error(err.Error(), \"\")\n\t\t\thttp.Error(w, \"Problem getting addressable for schedule event: \"+err.Error(), http.StatusServiceUnavailable)\n\t\t}\n\t\treturn\n\t}\n\n\t// Get the schedule events\n\tif err := dbClient.GetScheduleEventsByAddressableId(&res, aid); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\tLoggingClient.Error(\"Problem getting schedule events: \"+err.Error(), \"\")\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(res)\n}", "func Schedules(config *Config) error {\n\n\tlogrus.Infof(\"running schedule sync...\")\n\ts, err := newSlackClient(config.SlackToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp := newPagerDutyClient(config.PagerDutyToken)\n\n\tupdateSchedule := func(emails []string, groupName string) error {\n\t\tslackIDs, err := s.getSlackIDsFromEmails(emails)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tuserGroup, err := s.createOrGetUserGroup(groupName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembers, err := s.Client.GetUserGroupMembers(userGroup.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !compare.Array(slackIDs, members) {\n\t\t\tlogrus.Infof(\"member list %s needs updating...\", groupName)\n\t\t\t_, err = s.Client.UpdateUserGroupMembers(userGroup.ID, strings.Join(slackIDs, \",\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, schedule := range config.Schedules {\n\t\tlogrus.Infof(\"checking slack group: %s\", schedule.CurrentOnCallGroupName)\n\t\temails, err := p.getEmailsOfCurrentOnCallForSchedule(schedule.ScheduleID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = updateSchedule(emails, schedule.CurrentOnCallGroupName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlogrus.Infof(\"checking slack group: %s\", schedule.AllOnCallGroupName)\n\t\temails, err = p.getEmailsOfAllOnCallForSchedule(schedule.ScheduleID, config.PagerdutyScheduleLookahead)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = updateSchedule(emails, schedule.AllOnCallGroupName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func (o SynchronizationJobOutput) Schedules() SynchronizationJobScheduleArrayOutput {\n\treturn o.ApplyT(func(v *SynchronizationJob) SynchronizationJobScheduleArrayOutput { return v.Schedules }).(SynchronizationJobScheduleArrayOutput)\n}", "func (sc *ScheduleController) reconcileSchedule(sID, subID primitive.ObjectID) (err error) {\n\tctx := context.Background()\n\tsCol := sc.subscriptionScheduleService.Collection\n\tsubCol := sc.courseSubscriptionService.Collection\n\n\t// update schedule\n\tupdate := bson.M{\"$set\": bson.M{\"completed\": true}}\n\t_, err = sCol.UpdateOne(ctx, bson.M{\"_id\": sID}, update)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// update subscription\n\tupdate = bson.M{\"$inc\": bson.M{\"modulesCompleted\": 1}}\n\t_, err = subCol.UpdateOne(ctx, bson.M{\"_id\": subID}, update)\n\n\treturn\n}", "func ScanAssociations(query string, rows sqlapi.SqlRows, firstOnly bool) (vv []*Association, n int64, err error) {\n\tfor rows.Next() {\n\t\tn++\n\n\t\tvar v0 int64\n\t\tvar v1 sql.NullString\n\t\tvar v2 sql.NullString\n\t\tvar v3 sql.NullInt64\n\t\tvar v4 sql.NullInt64\n\t\tvar v5 sql.NullInt64\n\n\t\terr = rows.Scan(\n\t\t\t&v0,\n\t\t\t&v1,\n\t\t\t&v2,\n\t\t\t&v3,\n\t\t\t&v4,\n\t\t\t&v5,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn vv, n, errors.Wrap(err, query)\n\t\t}\n\n\t\tv := &Association{}\n\t\tv.Id = v0\n\t\tif v1.Valid {\n\t\t\ta := v1.String\n\t\t\tv.Name = &a\n\t\t}\n\t\tif v2.Valid {\n\t\t\ta := QualName(v2.String)\n\t\t\tv.Quality = &a\n\t\t}\n\t\tif v3.Valid {\n\t\t\ta := v3.Int64\n\t\t\tv.Ref1 = &a\n\t\t}\n\t\tif v4.Valid {\n\t\t\ta := v4.Int64\n\t\t\tv.Ref2 = &a\n\t\t}\n\t\tif v5.Valid {\n\t\t\ta := Category(v5.Int64)\n\t\t\tv.Category = &a\n\t\t}\n\n\t\tvar iv interface{} = v\n\t\tif hook, ok := iv.(sqlapi.CanPostGet); ok {\n\t\t\terr = hook.PostGet()\n\t\t\tif err != nil {\n\t\t\t\treturn vv, n, errors.Wrap(err, query)\n\t\t\t}\n\t\t}\n\n\t\tvv = append(vv, v)\n\n\t\tif firstOnly {\n\t\t\tif rows.Next() {\n\t\t\t\tn++\n\t\t\t}\n\t\t\treturn vv, n, errors.Wrap(rows.Err(), query)\n\t\t}\n\t}\n\n\treturn vv, n, errors.Wrap(rows.Err(), query)\n}", "func (m *modelLoader) finalize(itemMap map[reflect.Type][]interface{}) error {\n\t//fill all relationships we can on our items\n\tfor _, f := range m.relationships {\n\t\titems, ok := itemMap[baseType(f.Struct.Type)]\n\t\tif !ok {\n\t\t\t//this relationship isn't in our item map\n\t\t\tcontinue\n\t\t}\n\n\t\tlookup := make(map[string][]reflect.Value)\n\n\t\t//construct a map with possibilities of this relationship\n\t\tfor _, n := range items {\n\t\t\titemVal := reflect.ValueOf(n).Elem()\n\n\t\t\t//build a key for the attributes of this relationship\n\t\t\tvar sb strings.Builder\n\t\t\tfor i, name := range f.Relationship.ForeignFieldNames {\n\t\t\t\tval := itemVal.FieldByName(name).Interface()\n\n\t\t\t\tif valuer, ok := val.(driver.Valuer); ok {\n\t\t\t\t\tvar err error\n\t\t\t\t\tval, err = valuer.Value()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsb.WriteString(fmt.Sprintf(\"[%d:%v]\", i, val))\n\t\t\t}\n\n\t\t\tkey := sb.String()\n\t\t\tlookup[key] = append(lookup[key], itemVal.Addr())\n\t\t}\n\n\t\t//go through all models were tracking and fill in this relationship\n\t\tfor _, item := range m.items {\n\t\t\titemVal := reflect.ValueOf(item).Elem()\n\t\t\trelVal := itemVal.FieldByName(f.Name)\n\n\t\t\t//build a key for the attributes of this relationship\n\t\t\tvar sb strings.Builder\n\t\t\tfor i, name := range f.Relationship.AssociationForeignFieldNames {\n\t\t\t\tval := itemVal.FieldByName(name)\n\t\t\t\tif val.Kind() == reflect.Ptr && !val.IsNil() {\n\t\t\t\t\tval = val.Elem()\n\t\t\t\t}\n\n\t\t\t\tkeyValue := val.Interface()\n\t\t\t\tif valuer, ok := keyValue.(driver.Valuer); ok {\n\t\t\t\t\tkeyValue, _ = valuer.Value()\n\t\t\t\t}\n\t\t\t\tsb.WriteString(fmt.Sprintf(\"[%d:%v]\", i, keyValue))\n\t\t\t}\n\n\t\t\tkey := sb.String()\n\t\t\t//find items corresponding to this item for this relationship\n\t\t\tfor _, newVal := range lookup[key] {\n\t\t\t\t//we have items to fill this relationship, fill it based on the struct\n\t\t\t\tif relVal.Kind() == reflect.Slice {\n\t\t\t\t\t//add the result to our slice\n\t\t\t\t\tif relVal.Type().Elem().Kind() != reflect.Ptr {\n\t\t\t\t\t\t//we have a slice of structs so add the struct we're pointing to\n\t\t\t\t\t\tnewVal = newVal.Elem()\n\t\t\t\t\t}\n\n\t\t\t\t\trelVal.Set(reflect.Append(relVal, newVal))\n\t\t\t\t} else {\n\t\t\t\t\t//we don't have a slice so set the item to the first one we have and move on\n\t\t\t\t\tif relVal.Type().Kind() != reflect.Ptr {\n\t\t\t\t\t\tnewVal = newVal.Elem()\n\t\t\t\t\t}\n\n\t\t\t\t\trelVal.Set(newVal)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func New(apiSchedule guestApi.Schedule) (schedule HumanReadableSchedule){\n\n for _, period := range apiSchedule.Periods {\n\n // specify format for parsing time strings\n form := \"15:04:00\"\n\n startDay := period.Open.Day\n startTime, _ := time.Parse(form, period.Open.Time)\n\n endDay := period.Close.Day\n endTime, _ := time.Parse(form, period.Close.Time)\n\n // Determine if the period spans multiple days\n // Handle wrap arround from sat to sunday\n var daySpan int\n if (endDay >= startDay){\n daySpan = (endDay - startDay)\n } else {\n daySpan = (7 - startDay + endDay)\n }\n\n // Determine if we need to split this period into multiple daily hours\n // We allow the end times to go past midnight for a given day (i.e M:10pm - 1am)\n // but if we go past 4am we will consider that the start of a new day\n startOfNewDay, _ := time.Parse(form, \"04:00:00\")\n\n // split periods if necesary until we are no longer spaning multiple days\n var openHours OpenHours\n for daySpan >=0 {\n if ( daySpan < 1 || (daySpan == 1 && endTime.Before(startOfNewDay))){\n // this is the normal case where the start and end times are on the same day\n // or the end time is \"late night\" hours on the next day\n openHours = OpenHours{startTime, endTime}\n daySpan = 0\n } else {\n // since the period spans past the start of a new day we will split it up\n // and carry the remander of the period over to a new day\n openHours = OpenHours{startTime, startOfNewDay}\n }\n\n schedule[startDay] = append(schedule[startDay], openHours)\n\n startTime = startOfNewDay\n startDay = (startDay + 1) % 7 // wrap sat to sun\n daySpan -= 1\n }\n }\n\n // sort the open hours for each day by start time\n for _, dailyOpenHours := range schedule {\n sort.Sort(dailyOpenHours)\n }\n\n return\n}", "func restGetScheduleEventByAddressableName(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tan, err := url.QueryUnescape(vars[ADDRESSABLENAME])\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\tLoggingClient.Error(err.Error(), \"\")\n\t\treturn\n\t}\n\tvar res []models.ScheduleEvent = make([]models.ScheduleEvent, 0)\n\n\t// Check if the addressable exists\n\tvar a models.Addressable\n\tif err = dbClient.GetAddressableByName(&a, an); err != nil {\n\t\tif err == db.ErrNotFound {\n\t\t\tLoggingClient.Error(\"Addressable not found for schedule event: \"+err.Error(), \"\")\n\t\t\thttp.Error(w, \"Addressable not found for schedule event\", http.StatusNotFound)\n\t\t} else {\n\t\t\tLoggingClient.Error(\"Problem getting addressable for schedule event: \"+err.Error(), \"\")\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\n\t// Get the schedule events\n\tif err = dbClient.GetScheduleEventsByAddressableId(&res, a.Id.Hex()); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tLoggingClient.Error(\"Problem getting schedule events: \"+err.Error(), \"\")\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(res)\n}", "func mapToTask(yaml map[string]interface{}, task *Task, today time.Time) {\n (*task).created = today\n (*task).modified = today\n (*task).completed = dayZero\n for key, val := range yaml {\n switch key {\n case \"id\":\n // parse the id\n if err := fillId(val, task); err != nil {\n log.Printf(\"Invalid ID found: %q\\n\", val)\n }\n case \"name\":\n // parse the name\n if err := fillName(val, task); err != nil {\n log.Printf(\"Task name must be text (%q found)\\n\", val)\n }\n case \"created\":\n // parse the \"created\" date\n if thedate, err := interfaceDate(val, today, today); err != nil {\n log.Println(err.Error())\n }\n (*task).created = thedate\n case \"modified\":\n // parse the modified date\n if thedate, err := interfaceDate(val, today, today); err != nil {\n log.Println(err.Error())\n }\n (*task).modified = thedate\n case \"completed\":\n // parse the completed date, if any. Default is nil\n thedate, err := interfaceDate(val, dayZero, today)\n if err {\n log.Println(err.Error())\n }\n (*task).completed = thedate\n case \"priority\":\n // parse the priority string\n if err := fillPriority(val, task); err != nil {\n log.Printf(\"Invalid priority found %q\\n\", val)\n }\n case \"items\":\n // parse the items\n if err := fillTasks(val, &(*task).items); err != nil {\n log.Printf(\"Unable to parse sub items %q\\n\", val)\n }\n default:\n log.Printf(\"Unrecognized key found: %q\\n\", key)\n }\n }\n}", "func (c *Client) FetchSchedule(fetchMoreInfo bool) ([]Course, error) {\n\t// TODO: GET page, then check if a <form> exists, then extract the name of the radio buttons?\n\tpostData := url.Values{}\n\tpostData.Add(\"SSR_DUMMY_RECV1$sels$0\", \"0\")\n\n\tif resp, err := c.RequestPagePost(scheduleListViewPath, postData); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tdefer resp.Body.Close()\n\n\t\tcontents, err := ioutil.ReadAll(resp.Body)\n\t\tfmt.Println(string(contents))\n\n\t\tnodes, err := html.ParseFragment(resp.Body, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(nodes) != 1 {\n\t\t\treturn nil, errors.New(\"invalid number of root elements\")\n\t\t}\n\n\t\tcourses, err := parseSchedule(nodes[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif fetchMoreInfo {\n\t\t\tc.authLock.RLock()\n\t\t\tdefer c.authLock.RUnlock()\n\t\t\tif err := fetchExtraScheduleInfo(&c.client, courses, nodes[0]); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn courses, nil\n\t}\n}", "func (a Put) Schedule() *destination.Schedule {\n\treturn nil\n}", "func (c *constructedSchedule) Add(req *ScheduleRequest) error {\n\tcandidate := ScheduledEvent{\n\t\tTimeInterval: TimeInterval{\n\t\t\tc.earliest,\n\t\t\tc.earliest.Add(req.Length),\n\t\t},\n\t\tAttendees: req.Attendees,\n\t\tRequest: req,\n\t}\n\n\titerations := 0\n\tfor {\n\t\t// TODO: Attendee already has meeting better name?\n\t\toverlap, overlaps, err := c.findAttendeeOverlap(candidate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif overlaps {\n\t\t\tcandidate.Start = overlap.End\n\t\t\tcandidate.End = candidate.Start.Add(req.Length)\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO: Investigate if we can do better room allocation. For example,\n\t\t// cost for switching room or cost for using a large room with few\n\t\t// people.\n\t\tbusyRooms, nextTimeToTry := c.findAlreadyScheduledRooms(candidate.TimeInterval)\n\t\troom, found, err := c.findAvailableRoom(candidate, busyRooms)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif found {\n\t\t\tcandidate.Room = *room\n\t\t\tbreak\n\t\t} else {\n\t\t\tcandidate.Start = *nextTimeToTry\n\t\t\tcandidate.End = candidate.Start.Add(req.Length)\n\t\t}\n\n\t\titerations++\n\t\tif iterations > MaxIterations {\n\t\t\treturn errors.New(\"too many iterations\")\n\t\t}\n\t}\n\n\t// We have found a time that works.\n\n\tc.Events = append(c.Events, candidate)\n\tfor _, a := range req.Attendees {\n\t\te, exists := c.eventsByAttendee[a.ID]\n\t\tif !exists {\n\t\t\te = &attendeeEvents{\n\t\t\t\tAttendee: a,\n\t\t\t}\n\t\t\tc.eventsByAttendee[a.ID] = e\n\t\t}\n\t\te.Scheduled = append(e.Scheduled, candidate)\n\t}\n\n\treturn nil\n}", "func (w *Work) Calendar(entries []*orgodb.OrgEntry) {\n\n\tt, err := w.db.GetToken(\"google\", entries[0].UserID)\n\tif err != nil {\n\t\tw.ErrChan <- err\n\t}\n\n\tclient := w.GoogleOauth.Client(oauth2.NoContext, &oauth2.Token{\n\t\tAccessToken: t.AccessToken,\n\t\tRefreshToken: t.RefreshToken,\n\t\tExpiry: t.Expiry,\n\t})\n\tservice, err := tasks.New(client)\n\tif err != nil {\n\t\tw.ErrChan <- err\n\t\treturn\n\t}\n\n\ttaskService, err := getTasklist(service)\n\tif err != nil {\n\t\tw.ErrChan <- err\n\t\treturn\n\t}\n\n\tfor _, entry := range entries {\n\t\terr := w.db.SaveOrUpdate(entry)\n\t\tif err != nil {\n\t\t\tw.ErrChan <- err\n\t\t}\n\n\t\tvar completed *string\n\t\tclosed := entry.Closed.Format(time.RFC3339)\n\t\tcompleted = &closed\n\t\ttask := &tasks.Task{\n\t\t\tTitle: entry.Title,\n\t\t\tDue: entry.Scheduled.Format(time.RFC3339),\n\t\t\tCompleted: completed,\n\t\t\tNotes: entry.Body,\n\t\t}\n\n\t\tif completed != nil {\n\t\t\ttask.Status = \"completed\"\n\t\t}\n\n\t\terr = addTask(service, taskService.Id, task)\n\t\tif err != nil {\n\t\t\tw.ErrChan <- err\n\t\t\treturn\n\t\t}\n\t}\n\n\tdeleteTasks(service, taskService.Id, entries)\n}", "func getStructRelationships(relationer MarshalLinkedRelations, information ServerInformation) *map[string]Relationship {\n\treferencedIDs := relationer.GetReferencedIDs()\n\tsortedResults := map[string][]ReferenceID{}\n\trelationships := map[string]Relationship{}\n\n\tfor _, referenceID := range referencedIDs {\n\t\tsortedResults[referenceID.Name] = append(sortedResults[referenceID.Name], referenceID)\n\t}\n\n\treferences := relationer.GetReferences()\n\n\t// helper mad to check if all references are included to also include mepty ones\n\tnotIncludedReferences := map[string]Reference{}\n\tfor _, reference := range references {\n\t\tnotIncludedReferences[reference.Name] = reference\n\t}\n\n\tfor name, referenceIDs := range sortedResults {\n\t\trelationships[name] = Relationship{}\n\t\t// if referenceType is plural, we need to use an array for data, otherwise it's just an object\n\t\tcontainer := RelationshipDataContainer{}\n\t\tif Pluralize(name) == name {\n\t\t\t// multiple elements in links\n\t\t\tcontainer.DataArray = []RelationshipData{}\n\t\t\tfor _, referenceID := range referenceIDs {\n\t\t\t\tcontainer.DataArray = append(container.DataArray, RelationshipData{\n\t\t\t\t\tType: referenceID.Type,\n\t\t\t\t\tID: referenceID.ID,\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tcontainer.DataObject = &RelationshipData{\n\t\t\t\tType: referenceIDs[0].Type,\n\t\t\t\tID: referenceIDs[0].ID,\n\t\t\t}\n\t\t}\n\n\t\t// set URLs if necessary\n\t\tlinks := getLinksForServerInformation(relationer, name, information)\n\n\t\trelationship := Relationship{\n\t\t\tData: &container,\n\t\t\tLinks: links,\n\t\t}\n\n\t\trelationships[name] = relationship\n\n\t\t// this marks the reference as already included\n\t\tdelete(notIncludedReferences, referenceIDs[0].Name)\n\t}\n\n\t// check for empty references\n\tfor name, reference := range notIncludedReferences {\n\t\tcontainer := RelationshipDataContainer{}\n\t\t// Plural empty relationships need an empty array and empty to-one need a null in the json\n\t\tif !reference.IsNotLoaded && Pluralize(name) == name {\n\t\t\tcontainer.DataArray = []RelationshipData{}\n\t\t}\n\n\t\tlinks := getLinksForServerInformation(relationer, name, information)\n\t\trelationship := Relationship{\n\t\t\tLinks: links,\n\t\t}\n\n\t\t// skip relationship data completely if IsNotLoaded is set\n\t\tif !reference.IsNotLoaded {\n\t\t\trelationship.Data = &container\n\t\t}\n\n\t\trelationships[name] = relationship\n\t}\n\n\treturn &relationships\n}", "func HasSchedule() predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(ScheduleTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2O, false, ScheduleTable, ScheduleColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func (m *MaintenanceScheduler) Schedule(id string,\n\tschedules map[string]Schedule) error {\n\tbytes, err := json.Marshal(schedules)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"schedules marshall error\")\n\t}\n\tmbytes, err := json.Marshal(EventMessage{ID: id})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"schedules marshall error\")\n\t}\n\tc := m.rclt.gconn()\n\tdefer c.Close()\n\t_, err = c.Do(\"HSET\", fmt.Sprintf(\"%s_flags\", m.hkey), id, false)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fail to set new flag data\")\n\t}\n\t_, err = c.Do(\"HSET\", fmt.Sprintf(\"%s_schedules\", m.hkey), id, string(bytes))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fail to set new schedule data\")\n\t}\n\tkey := fmt.Sprintf(\"%s_channel\", m.hkey)\n\t_, err = c.Do(\"PUBLISH\", key, string(mbytes))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fail to set new schedule data\")\n\t}\n\treturn nil\n}", "func (m *GetSchedulePostRequestBody) GetSchedules()([]string) {\n return m.schedules\n}", "func (db *BotDB) GetSchedule(guild uint64) []ScheduleEvent {\n\tq, err := db.sqlGetSchedule.Query(guild)\n\tif db.CheckError(\"GetSchedule\", err) != nil {\n\t\treturn []ScheduleEvent{}\n\t}\n\tdefer q.Close()\n\tr := make([]ScheduleEvent, 0, 2)\n\tfor q.Next() {\n\t\tp := ScheduleEvent{}\n\t\tif err := q.Scan(&p.ID, &p.Date, &p.Type, &p.Data); err == nil {\n\t\t\tr = append(r, p)\n\t\t}\n\t}\n\treturn r\n}", "func (delQ *DeleteQueue) Schedule() map[string][]*model.ShardToDelete {\n\tquotas := make(map[string][]*model.ShardToDelete)\n\n\t// for every files to delete\n\tfor _, file := range delQ.Files {\n\t\t// for every shards of the file\n\t\tfor _, shard := range file.Shards {\n\t\t\tshard := shard\n\t\t\tquotas[shard.MachineID] = append(\n\t\t\t\tquotas[shard.MachineID],\n\t\t\t\t&model.ShardToDelete{Name: shard.Name},\n\t\t\t)\n\n\t\t\t// delete shard record\n\t\t\tdatabase.Conn().Delete(&shard)\n\t\t}\n\n\t\t// delete file record\n\t\tdatabase.Conn().Delete(file)\n\t}\n\n\treturn quotas\n}", "func (e UserEdges) CardSchedulesOrErr() ([]*CardSchedule, error) {\n\tif e.loadedTypes[1] {\n\t\treturn e.CardSchedules, nil\n\t}\n\treturn nil, &NotLoadedError{edge: \"CardSchedules\"}\n}", "func executeSchedule(resp http.ResponseWriter, request *http.Request) {\n\tcors := handleCors(resp, request)\n\tif cors {\n\t\treturn\n\t}\n\n\tlocation := strings.Split(request.URL.String(), \"/\")\n\tvar workflowId string\n\n\tif location[1] == \"api\" {\n\t\tif len(location) <= 4 {\n\t\t\tresp.WriteHeader(401)\n\t\t\tresp.Write([]byte(`{\"success\": false}`))\n\t\t\treturn\n\t\t}\n\n\t\tworkflowId = location[4]\n\t}\n\n\tif len(workflowId) != 32 {\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false, \"message\": \"ID not valid\"}`))\n\t\treturn\n\t}\n\n\tctx := context.Background()\n\tlog.Printf(\"EXECUTING %s!\", workflowId)\n\tidConfig, err := getSchedule(ctx, workflowId)\n\tif err != nil {\n\t\tlog.Printf(\"Error getting schedule: %s\", err)\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(fmt.Sprintf(`{\"success\": false, \"reason\": \"%s\"}`, err)))\n\t\treturn\n\t}\n\n\t// Basically the src app\n\tinputStrings := map[string]string{}\n\tfor _, item := range idConfig.Translator {\n\t\tif item.Dst.Required == \"false\" {\n\t\t\tlog.Println(\"Skipping not required\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif item.Src.Name == \"\" {\n\t\t\terrorMsg := fmt.Sprintf(\"Required field %s has no source\", item.Dst.Name)\n\t\t\tlog.Println(errorMsg)\n\t\t\tresp.WriteHeader(401)\n\t\t\tresp.Write([]byte(fmt.Sprintf(`{\"success\": false, \"reason\": \"%s\"}`, errorMsg)))\n\t\t\treturn\n\t\t}\n\n\t\tinputStrings[item.Dst.Name] = item.Src.Name\n\t}\n\n\tconfigmap := map[string]string{}\n\tfor _, config := range idConfig.AppInfo.SourceApp.Config {\n\t\tconfigmap[config.Key] = config.Value\n\t}\n\n\t// FIXME - this wont work for everything lmao\n\tfunctionName := strings.ToLower(idConfig.AppInfo.SourceApp.Action)\n\tfunctionName = strings.Replace(functionName, \" \", \"_\", 10)\n\n\tcmdArgs := []string{\n\t\tfmt.Sprintf(\"%s/%s/app.py\", baseAppPath, \"thehive\"),\n\t\tfmt.Sprintf(\"--referenceid=%s\", workflowId),\n\t\tfmt.Sprintf(\"--function=%s\", functionName),\n\t}\n\n\tfor key, value := range configmap {\n\t\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--%s=%s\", key, value))\n\t}\n\n\t// FIXME - processname\n\tbaseProcess := \"python3\"\n\tlog.Printf(\"Executing: %s %s\", baseProcess, strings.Join(cmdArgs, \" \"))\n\texecSubprocess(baseProcess, cmdArgs)\n\n\tresp.WriteHeader(200)\n\tresp.Write([]byte(`{\"success\": true}`))\n}", "func (s *ScheduleProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn err\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"createdDate\":\n\t\t\terr = unpopulateTimeRFC3339(val, &s.CreatedDate)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"dailyRecurrence\":\n\t\t\terr = unpopulate(val, &s.DailyRecurrence)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"hourlyRecurrence\":\n\t\t\terr = unpopulate(val, &s.HourlyRecurrence)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"notificationSettings\":\n\t\t\terr = unpopulate(val, &s.NotificationSettings)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"provisioningState\":\n\t\t\terr = unpopulate(val, &s.ProvisioningState)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"status\":\n\t\t\terr = unpopulate(val, &s.Status)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"targetResourceId\":\n\t\t\terr = unpopulate(val, &s.TargetResourceID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"taskType\":\n\t\t\terr = unpopulate(val, &s.TaskType)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"timeZoneId\":\n\t\t\terr = unpopulate(val, &s.TimeZoneID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"uniqueIdentifier\":\n\t\t\terr = unpopulate(val, &s.UniqueIdentifier)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"weeklyRecurrence\":\n\t\t\terr = unpopulate(val, &s.WeeklyRecurrence)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func createEventsFromAssessment(a *scraper.Assessment) (error, []*goics.Component) {\n\tevents := make([]*goics.Component, 0)\n\tvar err error = nil\n\tswitch a.DueDate.DateType {\n\tcase date.RangeDateType:\n\t\terr = addEventFromRangeDate(a, &events)\n\tcase date.SingleDateType:\n\t\terr = addEventFromSingleDate(a, &events)\n\tcase date.MultiDateType:\n\t\tfor _, d := range a.DueDate.ChildDates {\n\t\t\tnewA := a\n\t\t\tnewA.DueDate = d\n\n\t\t\tvar subEvents []*goics.Component\n\t\t\terr, subEvents = createEventsFromAssessment(newA)\n\t\t\tevents = append(events, subEvents...)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\n\treturn nil, events\n}", "func (e *Enclosure) Association() []interface{} {\n\tret := []interface{}{}\n\tfor _, v := range e.PowerSlots {\n\t\tret = append(ret, v)\n\t}\n\tfor _, v := range e.FanSlots {\n\t\tret = append(ret, v)\n\t}\n\tfor _, v := range e.ApplianceSlots {\n\t\tret = append(ret, v)\n\t}\n\tfor _, v := range e.ManagerSlots {\n\t\tret = append(ret, v)\n\t}\n\tfor _, v := range e.SwitchSlots {\n\t\tret = append(ret, v)\n\t}\n\tfor _, v := range e.ServerSlots {\n\t\tret = append(ret, v)\n\t}\n\treturn ret\n}", "func (a *Airport) processDepartures() {\n\tfor {\n\t\tdeparture, ok := <-a.departureChan\n\t\tif !ok {\n\t\t\ta.log.Errorf(\"departure channel closed\")\n\t\t\treturn\n\t\t}\n\t\tswitch departure.GetChangeType() {\n\t\tcase datasync.Put:\n\t\t\tfl := flight.Info{}\n\t\t\tif err := departure.GetValue(&fl); err != nil {\n\t\t\t\ta.log.Errorf(\"failed to get value for departure flight: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfl.Status = flight.Status_departure\n\t\t\ta.runwayChan <- fl\n\t\tcase datasync.Delete:\n\t\t\ta.log.Debugf(\"departure %s deleted\\n\", departure.GetKey())\n\t\t}\n\t}\n}", "func (s *Scheduler) Run() ([]ScheduledEvent, error) {\n\t// Instantiate a GA with a GAConfig\n\tga, err := eaopt.NewDefaultGAConfig().NewGA()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set the number of generations to run for\n\tga.NGenerations = s.ngenerations\n\n\t// Add a custom print function to track progress\n\t// TODO: Make this callback(ish) be definable as an Config option.\n\t/*ga.Callback = func(ga *eaopt.GA) {\n\t\tfmt.Printf(\"Best fitness at generation %d: %f\\n\", ga.Generations, ga.HallOfFame[0].Fitness)\n\t}*/\n\n\t// TODO: Stop early if no progress is being made.\n\n\t// Find the minimum\n\terr = ga.Minimize(s.scheduleFactory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Assuming the first individual is the best -\n\t// https://godoc.org/github.com/MaxHalford/eaopt#GA isn't too well\n\t// documented.\n\tschedule, err := ga.HallOfFame[0].Genome.(*candidate).Schedule()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn schedule.Events, nil\n}", "func CreateSchedules(schedules ...Schedule) map[string]Schedule {\n\tsm := make(map[string]Schedule, 0)\n\tfor _, s := range schedules {\n\t\tsm[s.Day] = s\n\t}\n\treturn sm\n}", "func (automodRuleDatumL) LoadTriggerAutomodTriggeredRules(ctx context.Context, e boil.ContextExecutor, singular bool, maybeAutomodRuleDatum interface{}, mods queries.Applicator) error {\n\tvar slice []*AutomodRuleDatum\n\tvar object *AutomodRuleDatum\n\n\tif singular {\n\t\tobject = maybeAutomodRuleDatum.(*AutomodRuleDatum)\n\t} else {\n\t\tslice = *maybeAutomodRuleDatum.(*[]*AutomodRuleDatum)\n\t}\n\n\targs := make([]interface{}, 0, 1)\n\tif singular {\n\t\tif object.R == nil {\n\t\t\tobject.R = &automodRuleDatumR{}\n\t\t}\n\t\targs = append(args, object.ID)\n\t} else {\n\tOuter:\n\t\tfor _, obj := range slice {\n\t\t\tif obj.R == nil {\n\t\t\t\tobj.R = &automodRuleDatumR{}\n\t\t\t}\n\n\t\t\tfor _, a := range args {\n\t\t\t\tif queries.Equal(a, obj.ID) {\n\t\t\t\t\tcontinue Outer\n\t\t\t\t}\n\t\t\t}\n\n\t\t\targs = append(args, obj.ID)\n\t\t}\n\t}\n\n\tif len(args) == 0 {\n\t\treturn nil\n\t}\n\n\tquery := NewQuery(\n\t\tqm.From(`automod_triggered_rules`),\n\t\tqm.WhereIn(`automod_triggered_rules.trigger_id in ?`, args...),\n\t)\n\tif mods != nil {\n\t\tmods.Apply(query)\n\t}\n\n\tresults, err := query.QueryContext(ctx, e)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to eager load automod_triggered_rules\")\n\t}\n\n\tvar resultSlice []*AutomodTriggeredRule\n\tif err = queries.Bind(results, &resultSlice); err != nil {\n\t\treturn errors.Wrap(err, \"failed to bind eager loaded slice automod_triggered_rules\")\n\t}\n\n\tif err = results.Close(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to close results in eager load on automod_triggered_rules\")\n\t}\n\tif err = results.Err(); err != nil {\n\t\treturn errors.Wrap(err, \"error occurred during iteration of eager loaded relations for automod_triggered_rules\")\n\t}\n\n\tif singular {\n\t\tobject.R.TriggerAutomodTriggeredRules = resultSlice\n\t\tfor _, foreign := range resultSlice {\n\t\t\tif foreign.R == nil {\n\t\t\t\tforeign.R = &automodTriggeredRuleR{}\n\t\t\t}\n\t\t\tforeign.R.Trigger = object\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, foreign := range resultSlice {\n\t\tfor _, local := range slice {\n\t\t\tif queries.Equal(local.ID, foreign.TriggerID) {\n\t\t\t\tlocal.R.TriggerAutomodTriggeredRules = append(local.R.TriggerAutomodTriggeredRules, foreign)\n\t\t\t\tif foreign.R == nil {\n\t\t\t\t\tforeign.R = &automodTriggeredRuleR{}\n\t\t\t\t}\n\t\t\t\tforeign.R.Trigger = local\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (e *Event) CalculateTimes(until time.Time) ([]time.Time, error) {\n\teventStartTime := time.Unix(int64(e.Start), 0).UTC()\n\n\tif e.StartTimezone != \"\" {\n\t\tlocation, err := time.LoadLocation(e.StartTimezone)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\teventStartTime = eventStartTime.In(location)\n\t}\n\n\teventTimes := []time.Time{}\n\n\t// obviously it has to happen at least once\n\teventTimes = append(eventTimes, eventStartTime)\n\n\tif e.RecurRule != nil {\n\t\tcurrentTime := eventStartTime\n\n\t\tvar ruleUntilTime time.Time\n\t\thaveRuleUntilTime := false\n\t\tif e.RecurRule.Until != \"\" {\n\t\t\thaveRuleUntilTime = true\n\n\t\t\tlocation, err := time.LoadLocation(e.EndTimezone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\truleUntilTime, err = time.ParseInLocation(\"2006-01-02\", e.RecurRule.Until, location)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tfor currentTime.Before(until) {\n\t\t\tyears := 0\n\t\t\tmonths := 0\n\t\t\tdays := 0\n\n\t\t\tif e.RecurRule.Frequency == RecurFrequencyDaily {\n\t\t\t\tdays = 1\n\t\t\t} else if e.RecurRule.Frequency == RecurFrequencyWeekly {\n\t\t\t\tdays = 7\n\t\t\t} else if e.RecurRule.Frequency == RecurFrequencyMonthly {\n\t\t\t\tmonths = 1\n\t\t\t} else { // if e.RecurRule.Frequency == RecurFrequencyYearly {\n\t\t\t\tyears = 1\n\t\t\t}\n\n\t\t\tyears *= e.RecurRule.Interval\n\t\t\tmonths *= e.RecurRule.Interval\n\t\t\tdays *= e.RecurRule.Interval\n\n\t\t\tpreviousTime := currentTime\n\t\t\tcurrentTime = currentTime.AddDate(years, months, days)\n\n\t\t\tif previousTime == currentTime {\n\t\t\t\t// we're not making progress, escape\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif haveRuleUntilTime {\n\t\t\t\tif ruleUntilTime.Sub(currentTime) < -24*time.Hour {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\teventTimes = append(eventTimes, currentTime)\n\t\t}\n\t}\n\n\treturn eventTimes, nil\n}", "func (db *Database) GetDriverWeeklySchedule(driverName string, date string) ([]TripOffering, error) {\n result := []TripOffering{}\n sameWeek := func(t1, t2 *time.Time) bool {\n year1, week1 := t1.ISOWeek()\n year2, week2 := t2.ISOWeek()\n return year1 == year2 && week1 == week2\n }\n row, err := db.Query(\"SELECT * FROM TripOffering WHERE DriverName=%q\", driverName)\n if err != nil {\n return result, err\n }\n defer row.Close()\n date1, err := time.Parse(DATE_FORMAT, date)\n if err != nil {\n return result, err\n }\n if err != nil {\n return result, err\n }\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n date2, err := time.Parse(DATE_FORMAT, date)\n if err != nil {\n log.Fatal(err)\n }\n if sameWeek(&date1, &date2) {\n result = append(result, TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n }\n return result, nil\n}", "func (s *Service) GetAssociations(p *Price, fmtName string) ([]uuid.UUID, error) {\n\tif p == nil || *p == (Price{}) {\n\t\treturn nil, errors.New(errTickerPriceIsNil)\n\t}\n\tvar ids []uuid.UUID\n\texchangeID, ok := s.Exchange[fmtName]\n\tif !ok {\n\t\tvar err error\n\t\texchangeID, err = s.mux.GetID()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.Exchange[fmtName] = exchangeID\n\t}\n\n\tids = append(ids, exchangeID)\n\treturn ids, nil\n}", "func (c Client) GetSchedules(stationIds []string, dates []string) ([]Schedule, error) {\n\turl := fmt.Sprint(DefaultBaseURL, APIVersion, \"/schedules\")\n\tfmt.Println(\"URL:>\", url)\n\n\t//buffer to store the json request\n\tvar buffer bytes.Buffer\n\n\t//creating the request\n\tbuffer.WriteString(\"[\")\n\tfor index, station := range stationIds {\n\t\t//fmt.Println(station)\n\t\tbuffer.WriteString(`{\"stationID\":\"`+ station + `\",\"date\":[`)\n \tfor index2, date := range dates {\n\t\t buffer.WriteString(`\"`+date+`\"`)\n\t\t if index2 != len(dates)-1 {\n\t\t\t buffer.WriteString(\",\")\n\t\t } else {\n buffer.WriteString(\"]\")\n }\n }\n\t\tif index != len(stationIds)-1 {\n\t\t\tbuffer.WriteString(\"},\")\n\t\t} else {\n buffer.WriteString(\"}\")\n }\n\t}\n\tbuffer.WriteString(\"]\")\n\n\t//setup the request\n\treq, err := http.NewRequest(\"POST\", url, &buffer)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Accept-Encoding\", \"deflate,gzip\")\n\treq.Header.Set(\"token\", c.Token)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Fatal(resp.Status)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close() //resp.Body.Close() will run when we're finished.\n \n // decode the response\n\tvar h []Schedule\n \n // debug code\t\n //body, _ := ioutil.ReadAll(resp.Body)\n\t//fmt.Println(string(body))\n \n\t// decode the body\n\terr = json.NewDecoder(resp.Body).Decode(&h)\n\tif err != nil {\n\t\tfmt.Println(\"Error parsing schedules response\")\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}", "func (q *Queue) RunSchedule() error {\n\n\tfor {\n\t\t// Read DB and current to run a this time\n\t\ttasks, err := TaskFind(q.DB)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[ERROR] \", err)\n\t\t\tbreak\n\t\t}\n\t\tif tasks == nil || len(*tasks) == 0 {\n\t\t\t// Wait...\n\t\t\tfmt.Println(\"=========================================================================\")\n\t\t\tfmt.Println(\"[INFO] Nothing to do. You can CTRL-C. Unless you have tasks in the future.\")\n\t\t\ttime.Sleep(time.Duration(1) * time.Minute)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Update records as \"hidden\"\n\t\terr = TaskBulkUpdateState(q.DB, tasks, HiddenState)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[ERROR] update hidden state error:\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t// Split the tasks into bulk. Hard-code to 4 for now\n\t\tbulkTasks := [][]Task{}\n\t\tbIndex := -1\n\t\tfor i, t := range *tasks {\n\t\t\tif i%4 == 0 {\n\t\t\t\tbulkTasks = append(bulkTasks, []Task{})\n\t\t\t\tbIndex++\n\t\t\t}\n\t\t\tbulkTasks[bIndex] = append(bulkTasks[bIndex], t)\n\t\t\tfmt.Println(\"[INFO] Hidden:\", i, t.ID)\n\t\t}\n\n\t\t// Channels - convince memory to safely pass data\n\t\t// through goroutines.\n\t\t// It is buffer so the goroutinue can quickly free\n\t\t// itself and avoid too much context switching.\n\t\tsuccessChan := make(chan Task, 4)\n\t\tfailureChan := make(chan Task, 4)\n\t\tfor _, bulk := range bulkTasks {\n\t\t\tgo q.BulkRunner(bulk, successChan, failureChan)\n\t\t}\n\n\t\t// Wait for the GoRoutines to finish and organize\n\t\t// the tasks between failure and success\n\t\tsuccessTasks := []Task{}\n\t\tfailureTasks := []Task{}\n\t\tfor _ = range *tasks {\n\t\t\tselect {\n\t\t\tcase s := <-successChan:\n\t\t\t\tfmt.Println(\"[INFO] Success received\", s.ID)\n\t\t\t\tsuccessTasks = append(successTasks, s)\n\t\t\tcase e := <-failureChan:\n\t\t\t\tfmt.Println(\"[INFO] Fake Failure received\", e.ID)\n\t\t\t\tfailureTasks = append(failureTasks, e)\n\t\t\t}\n\t\t}\n\n\t\t// Update records as \"success\"\n\t\terr = TaskBulkUpdateState(q.DB, &successTasks, SuccessState)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[ERROR] update success state error:\", err)\n\t\t\tbreak\n\t\t}\n\t\t// Update records as \"failures\"\n\t\terr = TaskBulkUpdateState(q.DB, &failureTasks, FailureState)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[ERROR] update failure state error:\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o LookupInstanceResultOutput) Scheduling() SchedulingResponseOutput {\n\treturn o.ApplyT(func(v LookupInstanceResult) SchedulingResponse { return v.Scheduling }).(SchedulingResponseOutput)\n}", "func (o *ModelsBackupSchedule) HasSchedule() bool {\n\tif o != nil && o.Schedule != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func RunSchedule() {\n\tgo func() {\n\t\tcount := 0\n\t\tfor {\n\t\t\ttime.Sleep(time.Hour * 4)\n\t\t\tcount++\n\t\t\tBuzzy = true\n\t\t\tif count == 12 {\n\t\t\t\tcount = 0\n\t\t\t\terr := SearchForClanIds(other.Flags, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\tapiErr(\"RunSchedule\", err, \"error check SearchForClanIds\")\n\t\t\t\t\tother.DevPrint(\"ERROR: [SearchForClanIds]:\", err.Error())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr := GetClanData()\n\t\t\t\tif err != nil {\n\t\t\t\t\tapiErr(\"RunSchedule\", err, \"error check GetClanData\")\n\t\t\t\t\tother.DevPrint(\"ERROR: [GetClanData]:\", err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tGetIcons()\n\t\t\tBuzzy = false\n\t\t}\n\t}()\n}", "func cmdGetPolicySchedules(ccmd *cobra.Command, args []string) {\n\taplSvc := apl.NewClient()\n\n\toutput := runGetCommand(args, aplSvc.PolicySchedules.Get)\n\n\tif output != nil {\n\t\tfields := []string{\"ID\", \"Name\", \"ResourceType\", \"Status\", \"CreatedTime\"}\n\t\tprintTableResultsCustom(output.(apl.PolicySchedule), fields)\n\t}\n}", "func (a *SchedulesAPI) RequestHolidaySchedules() (res HolidaySchedulesResponse, err error) {\n\tparams := initSchedulesRequest(\"holiday\")\n\terr = params.requestAPI(a, &res)\n\treturn\n}", "func (o SourceInstancePropertiesResponseOutput) Scheduling() SchedulingResponseOutput {\n\treturn o.ApplyT(func(v SourceInstancePropertiesResponse) SchedulingResponse { return v.Scheduling }).(SchedulingResponseOutput)\n}", "func (o IntegrationOutput) Schedules() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *Integration) pulumi.StringArrayOutput { return v.Schedules }).(pulumi.StringArrayOutput)\n}", "func (e *Element) Finalize() {\n\tfor _, rel := range e.Relationships {\n\t\trel.Finalize()\n\t}\n}", "func (s *Service) AttestAndScheduleAggregate(ctx context.Context, data interface{}) {\n\tstarted := time.Now()\n\tduty, ok := data.(*attester.Duty)\n\tif !ok {\n\t\tlog.Error().Msg(\"Passed invalid data\")\n\t\treturn\n\t}\n\tlog := log.With().Uint64(\"slot\", uint64(duty.Slot())).Logger()\n\n\tattestations, err := s.attester.Attest(ctx, duty)\n\tif err != nil {\n\t\tlog.Warn().Err(err).Msg(\"Failed to attest\")\n\t\treturn\n\t}\n\tlog.Trace().Dur(\"elapsed\", time.Since(started)).Msg(\"Attested\")\n\n\tif len(attestations) == 0 || attestations[0].Data == nil {\n\t\tlog.Debug().Msg(\"No attestations; nothing to aggregate\")\n\t\treturn\n\t}\n\n\tepoch := s.chainTimeService.SlotToEpoch(duty.Slot())\n\ts.subscriptionInfosMutex.Lock()\n\tsubscriptionInfoMap, exists := s.subscriptionInfos[epoch]\n\ts.subscriptionInfosMutex.Unlock()\n\tif !exists {\n\t\tlog.Debug().\n\t\t\tUint64(\"epoch\", uint64(epoch)).\n\t\t\tMsg(\"No subscription info for this epoch; not aggregating\")\n\t\treturn\n\t}\n\n\tfor _, attestation := range attestations {\n\t\tlog := log.With().Uint64(\"attestation_slot\", uint64(attestation.Data.Slot)).Uint64(\"committee_index\", uint64(attestation.Data.Index)).Logger()\n\t\tslotInfoMap, exists := subscriptionInfoMap[attestation.Data.Slot]\n\t\tif !exists {\n\t\t\tlog.Debug().Msg(\"No slot info; not aggregating\")\n\t\t\tcontinue\n\t\t}\n\t\t// Do not schedule aggregations for past slots.\n\t\tif attestation.Data.Slot < s.chainTimeService.CurrentSlot() {\n\t\t\tlog.Debug().Uint64(\"current_slot\", uint64(s.chainTimeService.CurrentSlot())).Msg(\"Aggregation in the past; not scheduling\")\n\t\t\tcontinue\n\t\t}\n\t\tinfo, exists := slotInfoMap[attestation.Data.Index]\n\t\tif !exists {\n\t\t\tlog.Debug().Uint64(\"committee_index\", uint64(attestation.Data.Index)).Msg(\"No committee info; not aggregating\")\n\t\t\tcontinue\n\t\t}\n\t\tif info.IsAggregator {\n\t\t\taccounts, err := s.validatingAccountsProvider.ValidatingAccountsForEpochByIndex(ctx, epoch, []phase0.ValidatorIndex{info.Duty.ValidatorIndex})\n\t\t\tif err != nil {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Err(err).Msg(\"Failed to obtain accounts\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(accounts) == 0 {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Msg(\"Failed to obtain account of attester\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tattestationDataRoot, err := attestation.Data.HashTreeRoot()\n\t\t\tif err != nil {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Err(err).Msg(\"Failed to obtain hash tree root of attestation\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taggregatorDuty := &attestationaggregator.Duty{\n\t\t\t\tSlot: info.Duty.Slot,\n\t\t\t\tAttestationDataRoot: attestationDataRoot,\n\t\t\t\tValidatorIndex: info.Duty.ValidatorIndex,\n\t\t\t\tSlotSignature: info.Signature,\n\t\t\t}\n\t\t\tif err := s.scheduler.ScheduleJob(ctx,\n\t\t\t\tfmt.Sprintf(\"Beacon block attestation aggregation for slot %d committee %d\", attestation.Data.Slot, attestation.Data.Index),\n\t\t\t\ts.chainTimeService.StartOfSlot(attestation.Data.Slot).Add(s.slotDuration*2/3),\n\t\t\t\ts.attestationAggregator.Aggregate,\n\t\t\t\taggregatorDuty,\n\t\t\t); err != nil {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Err(err).Msg(\"Failed to schedule beacon block attestation aggregation job\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// We are set up as an aggregator for this slot and committee. It is possible that another validator has also been\n\t\t\t// assigned as an aggregator, but we're already carrying out the task so do not need to go any further.\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *Scheduler) schedule() {\n\t// Do we have space left in our buffer?\n\tif s.CountScheduledRuns() >= schedulerBufferLimit {\n\t\t// No space left. Exit.\n\t\treturn\n\t}\n\n\t// Get scheduled pipelines but limit the returning number of elements.\n\tscheduled, err := s.storeService.PipelineGetScheduled(schedulerBufferLimit)\n\tif err != nil {\n\t\tgaia.Cfg.Logger.Debug(\"cannot get scheduled pipelines\", \"error\", err.Error())\n\t\treturn\n\t}\n\n\t// Iterate scheduled runs\n\tfor id := range scheduled {\n\t\t// If we are a server instance, we will by default give the worker the advantage.\n\t\t// Only in case all workers are busy we will schedule work on the server.\n\t\tworkers := s.memDBService.GetAllWorker()\n\t\tif gaia.Cfg.Mode == gaia.ModeServer && len(workers) > 0 {\n\t\t\t// Check if all workers are busy / inactive\n\t\t\tinvalidWorkers := 0\n\t\t\tfor _, w := range workers {\n\t\t\t\tif w.Slots == 0 || w.Status != gaia.WorkerActive {\n\t\t\t\t\tinvalidWorkers++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Insert pipeline run into memdb where all workers get their work from\n\t\t\tif len(workers) > invalidWorkers {\n\t\t\t\t// Mark them as scheduled\n\t\t\t\tscheduled[id].Status = gaia.RunScheduled\n\n\t\t\t\t// Update entry in store\n\t\t\t\terr = s.storeService.PipelinePutRun(scheduled[id])\n\t\t\t\tif err != nil {\n\t\t\t\t\tgaia.Cfg.Logger.Debug(\"could not put pipeline run into store\", \"error\", err.Error())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif err := s.memDBService.InsertPipelineRun(scheduled[id]); err != nil {\n\t\t\t\t\tgaia.Cfg.Logger.Error(\"failed to insert pipeline run into memdb via schedule\", \"error\", err.Error())\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Check if this primary is not allowed to run work\n\t\tif gaia.Cfg.PreventPrimaryWork {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Mark them as scheduled\n\t\tscheduled[id].Status = gaia.RunScheduled\n\n\t\t// Update entry in store\n\t\terr = s.storeService.PipelinePutRun(scheduled[id])\n\t\tif err != nil {\n\t\t\tgaia.Cfg.Logger.Debug(\"could not put pipeline run into store\", \"error\", err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\t// push scheduled run into our channel\n\t\ts.scheduledRuns <- *scheduled[id]\n\t}\n}", "func processPodcastEpisodes(feed *gofeed.Feed, id string, hashes []string) {\n\tfor _, episode := range feed.Items {\n\t\tprocessPodcastEpisode(episode, id, hashes)\n\t}\n}", "func GetAllScheduledTrips(db DatabaseInterface) ([]*TripSchedule, error) {\n\treturn db.GetAllScheduledTrips()\n}", "func loadEvents(t time.Time, cal ...*Calendar) ([]*Event, error) {\n\tvar (\n\t\tevents = []*Event{}\n\t\tdateStr = t.Format(timeFormat)\n\t\tname = fmt.Sprintf(\"events-%s.json\", dateStr)\n\t\tjobName = \"update-events\"\n\t)\n\n\tif wf.Cache.Expired(name, opts.MaxAgeEvents()) {\n\t\twf.Rerun(0.1)\n\t\tif !wf.IsRunning(jobName) {\n\t\t\tcmd := exec.Command(os.Args[0], \"update\", \"events\", dateStr)\n\t\t\tif err := wf.RunInBackground(jobName, cmd); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif wf.Cache.Exists(name) {\n\t\tif err := wf.Cache.LoadJSON(name, &events); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Set map URL\n\tfor _, e := range events {\n\t\te.MapURL = mapURL(e.Location)\n\t}\n\treturn events, nil\n}", "func doFetch(cfg config.View, be pb.BackendClient, matches chan *pb.Match) {\n\tstartTime := time.Now()\n\tmprofiles := profiles.Generate(cfg)\n\tfor {\n\t\tvar wg sync.WaitGroup\n\t\tfor _, p := range mprofiles {\n\t\t\twg.Add(1)\n\t\t\tp := p\n\t\t\tgo func(wg *sync.WaitGroup) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfetch(be, p, matches)\n\t\t\t}(&wg)\n\t\t}\n\n\t\t// Wait for all FetchMatches calls to complete before proceeding.\n\t\twg.Wait()\n\t\tlogger.Infof(\"FetchedMatches:%v, AssignedTickets:%v, DeletedTickets:%v in time %v\", atomic.LoadUint64(&matchCount), atomic.LoadUint64(&assigned), atomic.LoadUint64(&deleted), time.Since(startTime))\n\t}\n}", "func ScheduleJobs() {\n\tcount := max\n\ti := 0\n\tdeadline := -1\n\tfor count != 0 {\n\t\tif i == len(jobs)-1 { // a complete scan of all jobs is completed\n\t\t\tbreak\n\t\t}\n\t\tif deadline == -1 { // if deadline gets to -1 it can't be scheduled\n\t\t\tdeadline = int(jobs[i].deadline)\n\t\t\tif deadline == 0 && result[deadline-1] == 0 { // if a job's deadline is 0\n\t\t\t\tresult[deadline] = i + 1\n\t\t\t\tdeadline = -1\n\t\t\t\tcount--\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif deadline-1 != -1 && result[deadline-1] == 0 { // check if the job can scheduled\n\t\t\tresult[deadline-1] = i + 1\n\t\t\tdeadline = -1\n\t\t\tcount--\n\t\t\ti++\n\t\t} else if deadline-1 == -1 { // check if the deadline is 0\n\t\t\tdeadline = -1\n\t\t\ti++\n\t\t} else if result[deadline-1] != 0 {\n\t\t\tdeadline--\n\t\t}\n\t}\n}", "func (wq *WordQuery) QueryCardSchedules() *CardScheduleQuery {\n\tquery := &CardScheduleQuery{config: wq.config}\n\tquery.path = func(ctx context.Context) (fromU *sql.Selector, err error) {\n\t\tif err := wq.prepareQuery(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector := wq.sqlQuery()\n\t\tif err := selector.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(word.Table, word.FieldID, selector),\n\t\t\tsqlgraph.To(cardschedule.Table, cardschedule.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, true, word.CardSchedulesTable, word.CardSchedulesColumn),\n\t\t)\n\t\tfromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step)\n\t\treturn fromU, nil\n\t}\n\treturn query\n}", "func (o CrawlerOutput) Schedule() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Crawler) pulumi.StringPtrOutput { return v.Schedule }).(pulumi.StringPtrOutput)\n}", "func (o InstancePropertiesResponseOutput) Scheduling() SchedulingResponseOutput {\n\treturn o.ApplyT(func(v InstancePropertiesResponse) SchedulingResponse { return v.Scheduling }).(SchedulingResponseOutput)\n}", "func (hs *HuddleScheduler) ScheduleHuddles() ([]*Huddle, error) {\n\t// First populate the structures we need to do the scheduling\n\tif err := hs.populatePatientInfosWithRiskScores(); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := hs.populatePatientInfosWithHuddleInfo()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Then create the populated huddles\n\terr = hs.createHuddles()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Store the huddles in the database\n\tvar lastErr error\n\tfor i := range hs.Huddles {\n\t\tif _, err := server.Database.C(\"groups\").UpsertId(hs.Huddles[i].Id, hs.Huddles[i]); err != nil {\n\t\t\tlastErr = err\n\t\t\tlog.Printf(\"Error storing huddle: %s\\n\", err)\n\t\t}\n\t}\n\n\ths.printInfo()\n\n\treturn hs.Huddles, lastErr\n}", "func (db *SQLDB) fetchDesiredLRPSchedulingInfoAndMore(logger lager.Logger, scanner helpers.RowScanner, dest ...interface{}) (*models.DesiredLRPSchedulingInfo, error) {\n\tschedulingInfo := &models.DesiredLRPSchedulingInfo{}\n\tvar routeData, volumePlacementData, placementTagData []byte\n\tvalues := []interface{}{\n\t\t&schedulingInfo.ProcessGuid,\n\t\t&schedulingInfo.Domain,\n\t\t&schedulingInfo.LogGuid,\n\t\t&schedulingInfo.Annotation,\n\t\t&schedulingInfo.Instances,\n\t\t&schedulingInfo.MemoryMb,\n\t\t&schedulingInfo.DiskMb,\n\t\t&schedulingInfo.MaxPids,\n\t\t&schedulingInfo.RootFs,\n\t\t&routeData,\n\t\t&volumePlacementData,\n\t\t&schedulingInfo.ModificationTag.Epoch,\n\t\t&schedulingInfo.ModificationTag.Index,\n\t\t&placementTagData,\n\t}\n\tvalues = append(values, dest...)\n\n\terr := scanner.Scan(values...)\n\tif err == sql.ErrNoRows {\n\t\treturn nil, err\n\t}\n\n\tif err != nil {\n\t\tlogger.Error(\"failed-scanning\", err)\n\t\treturn nil, err\n\t}\n\n\tvar routes models.Routes\n\tencodedData, err := db.encoder.Decode(routeData)\n\tif err != nil {\n\t\tlogger.Error(\"failed-decrypting-routes\", err)\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(encodedData, &routes)\n\tif err != nil {\n\t\tlogger.Error(\"failed-parsing-routes\", err)\n\t\treturn nil, err\n\t}\n\tschedulingInfo.Routes = routes\n\n\tvar volumePlacement models.VolumePlacement\n\terr = db.deserializeModel(logger, volumePlacementData, &volumePlacement)\n\tif err != nil {\n\t\tlogger.Error(\"failed-parsing-volume-placement\", err)\n\t\treturn nil, err\n\t}\n\tschedulingInfo.VolumePlacement = &volumePlacement\n\tif placementTagData != nil {\n\t\terr = json.Unmarshal(placementTagData, &schedulingInfo.PlacementTags)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"failed-parsing-placement-tags\", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn schedulingInfo, nil\n}", "func (table *Timetable) UnmarshalJSON(b []byte) error {\n\tif table.schedule == nil {\n\t\ttable.schedule = make(map[string]*Task)\n\t}\n\tdata := make(map[string]interface{})\n\tjson.Unmarshal(b, &data)\n\ttable.Key = data[\"_key\"].(string)\n\tfor _, task := range data[\"schedule\"].([]interface{}) {\n\t\tv, _ := task.(map[string]interface{})\n\t\ttask := &Task{\n\t\t\tId: v[\"_key\"].(string),\n\t\t\tRunAt: v[\"runAt\"].(string),\n\t\t}\n\t\ttable.schedule[task.RunAt] = task\n\t}\n\treturn nil\n}", "func (a *SchedulesApiService) OrganizationsOrganizationIdAccountsAccountIdSchedulesGet(ctx context.Context, accountId string, organizationId string) (Schedule, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tsuccessPayload Schedule\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/organizations/{organization_id}/accounts/{account_id}/schedules\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"account_id\"+\"}\", fmt.Sprintf(\"%v\", accountId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"organization_id\"+\"}\", fmt.Sprintf(\"%v\", organizationId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func UnmarshalSchedule(b []byte) (Schedule, error) {\n\tvar schedule Schedule\n\tif err := json.Unmarshal(b, &schedule); err != nil {\n\t\treturn schedule, err\n\t}\n\treturn schedule, nil\n}", "func (s *Scheduler) handleEvents(resp *http.Response) {\n\tr := misc.NewReader(resp.Body)\n\tdec := json.NewDecoder(r)\n\tfor {\n\t\tevent := new(sched.Event)\n\t\tif err := dec.Decode(event); err != nil {\n\t\t\tblog.Error(\"Decode mesos event failed: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch event.GetType() {\n\t\tcase sched.Event_SUBSCRIBED:\n\t\t\tsub := event.GetSubscribed()\n\t\t\tblog.Info(\"subscribe mesos successful with frameworkId %s\", sub.FrameworkId.GetValue())\n\t\t\tif registered, _ := s.store.HasFrameworkID(); !registered {\n\t\t\t\tif err := s.store.SaveFrameworkID(sub.FrameworkId.GetValue()); err != nil {\n\t\t\t\t\tblog.Error(\"save frameworkId to DB failed: %s\", sub.FrameworkId.GetValue(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tblog.Info(\"save frameworkId %s to DB succeed\", sub.FrameworkId.GetValue())\n\t\t\t}\n\n\t\t\tif s.framework.Id == nil {\n\t\t\t\ts.framework.Id = sub.FrameworkId\n\t\t\t}\n\t\t\ts.lockService()\n\t\t\ts.mesosHeartBeatTime = time.Now().Unix()\n\t\t\ts.unlockService()\n\n\t\tcase sched.Event_OFFERS:\n\t\t\tfor _, offer := range event.Offers.Offers {\n\t\t\t\tby, _ := json.Marshal(offer)\n\t\t\t\tblog.V(3).Infof(\"mesos report offer %s\", string(by))\n\n\t\t\t\tcpus, mem, disk := s.OfferedResources(offer)\n\t\t\t\tblog.Infof(\"mesos report offer %s||%s: cpu(%f) mem(%f) disk(%f)\",\n\t\t\t\t\toffer.GetHostname(), *(offer.Id.Value), cpus, mem, disk)\n\t\t\t}\n\t\t\ts.offerPool.AddOffers(event.Offers.Offers)\n\n\t\tcase sched.Event_RESCIND:\n\t\t\tblog.Info(\"mesos report rescind offers event\")\n\n\t\tcase sched.Event_UPDATE:\n\t\t\tblog.V(3).Infof(\"mesos report update event\")\n\t\t\tstatus := event.GetUpdate().GetStatus()\n\t\t\tgo func() {\n\t\t\t\ts.StatusReport(status)\n\t\t\t}()\n\n\t\tcase sched.Event_MESSAGE:\n\t\t\tmessage := event.GetMessage()\n\t\t\tblog.V(3).Infof(\"receive message(%s)\", message.String())\n\t\t\tdata := message.GetData()\n\t\t\tvar bcsMsg *types.BcsMessage\n\t\t\terr := json.Unmarshal(data, &bcsMsg)\n\t\t\tif err != nil {\n\t\t\t\tblog.Error(\"unmarshal bcsmessage(%s) err:%s\", data, err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch *bcsMsg.Type {\n\t\t\tcase types.Msg_Res_COMMAND_TASK:\n\t\t\t\tgo s.ProcessCommandMessage(bcsMsg)\n\t\t\tcase types.Msg_TASK_STATUS_UPDATE:\n\t\t\t\tgo s.UpdateTaskStatus(message.GetAgentId().GetValue(), message.GetExecutorId().GetValue(), bcsMsg)\n\t\t\tdefault:\n\t\t\t\tblog.Error(\"unknown message type(%s)\", *bcsMsg.Type)\n\t\t\t}\n\n\t\tcase sched.Event_FAILURE:\n\t\t\t//blog.Warn(\"Received failure event\")\n\t\t\tfail := event.GetFailure()\n\t\t\tif fail.ExecutorId != nil {\n\t\t\t\tblog.Info(\"Executor(%s) terminated with status(%d) on agent(%s)\",\n\t\t\t\t\tfail.ExecutorId.GetValue(), fail.GetStatus(), fail.GetAgentId().GetValue())\n\t\t\t} else {\n\t\t\t\tif fail.GetAgentId() != nil {\n\t\t\t\t\tblog.Info(\"Agent \" + fail.GetAgentId().GetValue() + \" failed \")\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase sched.Event_ERROR:\n\t\t\terr := event.GetError().GetMessage()\n\t\t\tblog.Error(\"mesos report error event. err:%s\", err)\n\n\t\tcase sched.Event_HEARTBEAT:\n\t\t\tblog.V(3).Infof(\"mesos report heartbeat event\")\n\t\t\ts.lockService()\n\t\t\ts.mesosHeartBeatTime = time.Now().Unix()\n\t\t\ts.unlockService()\n\t\tdefault:\n\t\t\tblog.Warn(\"unkown mesos event type(%d)\", event.GetType())\n\t\t}\n\n\t}\n}", "func (m *subscriptionMigrator) populateTriggers(namespaces []string) error {\n\tm.triggersByNamespace = make(triggersByNamespaces)\n\n\tfor _, ns := range namespaces {\n\t\ttriggers, err := m.knativeClient.EventingV1alpha1().Triggers(ns).List(metav1.ListOptions{})\n\t\tswitch {\n\t\tcase apierrors.IsNotFound(err):\n\t\t\treturn NewTypeNotFoundError(err.(*apierrors.StatusError).ErrStatus.Details.Kind)\n\t\tcase err != nil:\n\t\t\treturn errors.Wrapf(err, \"listing Triggers in namespace %s\", ns)\n\t\t}\n\n\t\tm.triggersByNamespace[ns] = triggers.Items\n\t}\n\n\treturn nil\n}", "func Schedules(duration string) (string, error) {\n\tclient, err := Client()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsrv, err := newService(client)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tnow := time.Now()\n\tvar end time.Time\n\tswitch duration {\n\tcase \"day\":\n\t\tend = now.AddDate(0, 0, 1)\n\tcase \"week\":\n\t\tend = now.AddDate(0, 0, 7)\n\t}\n\tif end.IsZero() {\n\t\tend = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)\n\t}\n\tevents, err := srv.Event(now, end)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar messages []string\n\tif len(events.Items) == 0 {\n\t\tfmt.Println(\"No upcoming events found.\")\n\t} else {\n\t\tfor _, item := range events.Items {\n\t\t\tdate := item.Start.DateTime\n\t\t\tif date == \"\" {\n\t\t\t\tdate = item.Start.Date\n\t\t\t}\n\t\t\tmessages = append(messages, fmt.Sprintf(\"%v: %v\\n\", date, item.Summary))\n\t\t}\n\t}\n\treturn strings.Join(messages, \"\"), nil\n}", "func (e *CronJob) parseSchedule() (*schedule.Schedule, error) {\n\tif e.cachedSchedule == nil && e.cachedScheduleErr == nil {\n\t\thash := fnv.New64()\n\t\thash.Write([]byte(e.JobID))\n\t\tseed := hash.Sum64()\n\t\te.cachedSchedule, e.cachedScheduleErr = schedule.Parse(e.effectiveSchedule(), seed)\n\t\tif e.cachedSchedule == nil && e.cachedScheduleErr == nil {\n\t\t\tpanic(\"no schedule and no error\")\n\t\t}\n\t}\n\treturn e.cachedSchedule, e.cachedScheduleErr\n}", "func (ras *RoleAssignmentSchedule) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tras.ID = &ID\n\t\t\t}\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tras.Name = &name\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar string\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tras.Type = &typeVar\n\t\t\t}\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar roleAssignmentScheduleProperties RoleAssignmentScheduleProperties\n\t\t\t\terr = json.Unmarshal(*v, &roleAssignmentScheduleProperties)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tras.RoleAssignmentScheduleProperties = &roleAssignmentScheduleProperties\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (tbl AssociationTable) Query(req require.Requirement, query string, args ...interface{}) ([]*Association, error) {\n\treturn doAssociationTableQueryAndScan(tbl, req, false, query, args)\n}", "func HandleSchedule(conf *Conf, svc *Services, ev json.RawMessage) (res interface{}, err error) {\n\n\tdoneCh := make(chan struct{})\n\tif err = svc.DB.ScanPages(&dynamodb.ScanInput{\n\t\tTableName: aws.String(conf.PoolsTableName),\n\t},\n\t\tfunc(page *dynamodb.ScanOutput, lastPage bool) bool {\n\t\t\tfor _, item := range page.Items {\n\t\t\t\tpool := &Pool{}\n\t\t\t\terr := dynamodbattribute.UnmarshalMap(item, pool)\n\t\t\t\tif err != nil {\n\t\t\t\t\tsvc.Logs.Error(\"failed to unmarshal replica item\", zap.Error(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pool.TTL > 0 {\n\t\t\t\t\tcontinue //pool is marked for deletion, no evaluations allowed\n\t\t\t\t}\n\n\t\t\t\tsvc.Logs.Info(\"pool\", zap.String(\"pool\", fmt.Sprintf(\"%+v\", pool)))\n\t\t\t\tgo ReceiveEvals(conf, svc, pool)\n\t\t\t}\n\t\t\treturn true\n\t\t}); err != nil {\n\t\treturn\n\t}\n\n\t//this will block forever while messages are being received for pools concurrently\n\t<-doneCh\n\n\treturn ev, nil\n}", "func (p *timeWheel) cascade() {\n\ttv1_index := p.timeVectorIndex(0)\n\tif tv1_index != 0 {\n\t\treturn\n\t}\n\tfor i := 1; i < tvCount; i++ {\n\t\tidx := p.timeVectorIndex(i)\n\t\tif idx == 0 {\n\t\t\treturn\n\t\t}\n\t\tcascadelist := p.tvs[i].vector[idx]\n\t\tfor e := cascadelist.Front(); e != nil; e = e.Next() {\n\t\t\tp.add(e.Value.(*job))\n\t\t}\n\t\tp.deleteJobList(uint32(i), idx)\n\t}\n}", "func (q scheduleSubjectQuery) All(ctx context.Context, exec boil.ContextExecutor) (ScheduleSubjectSlice, error) {\n\tvar o []*ScheduleSubject\n\n\terr := q.Bind(ctx, exec, &o)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"models: failed to assign all query results to ScheduleSubject slice\")\n\t}\n\n\tif len(scheduleSubjectAfterSelectHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doAfterSelectHooks(ctx, exec); err != nil {\n\t\t\t\treturn o, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn o, nil\n}", "func (s *supervisor) processSchedule(r *processorRequestSchedule) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tn := s.nodeByDN(r.dn)\n\tgo func() {\n\t\tif !s.propagatePanic {\n\t\t\tdefer func() {\n\t\t\t\tif rec := recover(); rec != nil {\n\t\t\t\t\ts.pReq <- &processorRequest{\n\t\t\t\t\t\tdied: &processorRequestDied{\n\t\t\t\t\t\t\tdn: r.dn,\n\t\t\t\t\t\t\terr: fmt.Errorf(\"panic: %v, stacktrace: %s\", rec, string(debug.Stack())),\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\tres := n.runnable(n.ctx)\n\n\t\ts.pReq <- &processorRequest{\n\t\t\tdied: &processorRequestDied{\n\t\t\t\tdn: r.dn,\n\t\t\t\terr: res,\n\t\t\t},\n\t\t}\n\t}()\n}", "func (c *CIFImporter) cleanup() error {\n log.Println(\"Removing historic associations\")\n res, err := c.db.Exec(\"DELETE FROM timetable.assoc WHERE enddate < NOW()::DATE\")\n if err != nil {\n return err\n }\n rc, err := res.RowsAffected()\n if err != nil {\n return err\n }\n if rc > 0 {\n log.Printf(\"Removed %d associations\", rc)\n }\n\n log.Println(\"Removing historic schedules\")\n res, err = c.db.Exec(\"DELETE FROM timetable.schedule WHERE enddate < NOW()::DATE\")\n if err != nil {\n return err\n }\n rc, err = res.RowsAffected()\n if err != nil {\n return err\n }\n if rc > 0 {\n log.Printf(\"Removed %d schedules\", rc)\n }\n\n log.Println(\"Fixing tiploc crs codes\")\n _, err = c.db.Exec(\"SELECT timetable.fixtiploccrs()\")\n if err != nil {\n return err\n }\n\n return nil\n}", "func AddRelatedPropertyGeneratorsForScheduledEventsProfile(gens map[string]gopter.Gen) {\n\tgens[\"TerminateNotificationProfile\"] = gen.PtrOf(TerminateNotificationProfileGenerator())\n}", "func (m *DirectoryRequestBuilder) RoleAssignmentSchedulesById(id string)(*ifcfeaaa38c74c27248ac242dc390a943df9fda837089f362cf5a0b616515e16e.UnifiedRoleAssignmentScheduleItemRequestBuilder) {\n urlTplParams := make(map[string]string)\n for idx, item := range m.pathParameters {\n urlTplParams[idx] = item\n }\n if id != \"\" {\n urlTplParams[\"unifiedRoleAssignmentSchedule%2Did\"] = id\n }\n return ifcfeaaa38c74c27248ac242dc390a943df9fda837089f362cf5a0b616515e16e.NewUnifiedRoleAssignmentScheduleItemRequestBuilderInternal(urlTplParams, m.requestAdapter);\n}", "func doUpdateEvents() error {\n\twf.Configure(aw.TextErrors(true))\n\n\tvar (\n\t\tname = fmt.Sprintf(\"events-%s.json\", opts.StartTime.Format(timeFormat))\n\t\tcals []*Calendar\n\t\tevents []*Event\n\t\terr error\n\t)\n\n\tlog.Printf(\"[update] fetching events for %s ...\", opts.StartTime.Format(timeFormat))\n\n\tif err := clearOldFiles(); err != nil {\n\t\tlog.Printf(\"[update] ERR: delete old cache files: %v\", err)\n\t}\n\n\tif cals, err = activeCalendars(); err != nil {\n\t\treturn err\n\t}\n\n\tif len(accounts) == 0 {\n\t\tlog.Print(\"[update] no Google accounts configured\")\n\t\treturn nil\n\t}\n\n\tif len(cals) == 0 {\n\t\tlog.Print(\"[update] no active calendars\")\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"[update] %d active calendar(s)\", len(cals))\n\n\t// Fetch events in parallel\n\tvar (\n\t\tch = make(chan *Event)\n\t\twg sync.WaitGroup\n\t\twanted = make(map[string]bool, len(cals)) // IDs of calendars to update\n\t)\n\n\tfor _, c := range cals {\n\t\twanted[c.ID] = true\n\t}\n\n\twg.Add(len(cals))\n\n\tfor _, acc := range accounts {\n\t\tfor _, c := range acc.Calendars {\n\t\t\tif _, ok := wanted[c.ID]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tgo func(c *Calendar, acc *Account) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tevs, err := acc.FetchEvents(c, opts.StartTime)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"[update] ERR: fetching events for calendar %q: %v\", c.Title, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tlog.Printf(\"[update] %d event(s) in calendar %q\", len(evs), c.Title)\n\n\t\t\t\tfor _, e := range evs {\n\t\t\t\t\tch <- e\n\t\t\t\t}\n\t\t\t}(c, acc)\n\t\t}\n\t}\n\n\t// Close channel when all goroutines are done\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(ch)\n\t}()\n\n\tcolours := map[string]bool{}\n\tfor e := range ch {\n\t\tlog.Printf(\"[update] %s\", e)\n\t\tevents = append(events, e)\n\t\tcolours[e.Colour] = true\n\t}\n\n\tsort.Sort(EventsByStart(events))\n\n\tif err := wf.Cache.StoreJSON(name, events); err != nil {\n\t\treturn err\n\t}\n\n\t// Ensure icons exist in all colours\n\tfor clr := range colours {\n\t\t_ = ColouredIcon(iconCalendar, clr)\n\t\t_ = ColouredIcon(iconMap, clr)\n\t}\n\n\treturn nil\n}", "func ScheduleScrapes(mtID int, attempt int, app *Application) func(time.Time) {\n\n\treturn func(now time.Time) {\n\n\t\tfail := func(err error) {\n\t\t\tlog.Print(log.Error, err)\n\t\t\tat := now.Add(time.Duration(app.Config.Scheduling.WaitTime) * time.Minute)\n\n\t\t\t// schedule another attempt unless max attempts have been done.\n\t\t\t// if max attempts exceeded, schedule the next day's task\n\t\t\tif attempt < app.Config.Scheduling.MaxAttempts {\n\t\t\t\tlog.Printf(log.Warning, \"attempt %d to schedule scrapes for mtID=%d will retry at %s\",\n\t\t\t\t\tattempt+2, mtID, at.Format(time.UnixDate))\n\t\t\t\tapp.Scheduler.Add(scheduler.NewTask(\n\t\t\t\t\tat,\n\t\t\t\t\tScheduleScrapes(mtID, attempt+1, app)))\n\t\t\t} else {\n\t\t\t\tlog.Printf(log.Warning, \"exceeded max attempts (%d) to schedule scrapes for mtID=%d).\", attempt, mtID)\n\t\t\t\tapp.Scheduler.Add(scheduler.NewTask(\n\t\t\t\t\tstartOfNextDay(at),\n\t\t\t\t\tScheduleScrapes(mtID, 0, app)))\n\t\t\t}\n\t\t}\n\n\t\t// read mt and cams\n\t\tmt, err := db.Mountain(mtID)\n\t\tcams, err := db.CamerasOnMountain(mtID)\n\t\tif err != nil {\n\t\t\tfail(err)\n\t\t\treturn // can't continue if can't read DB\n\t\t}\n\n\t\t// get tz info for mt\n\t\ttz, err := time.LoadLocation(mt.TzLocation)\n\t\tif err != nil {\n\t\t\tfail(err)\n\t\t\treturn // can't continue if can't get tz\n\t\t}\n\t\tnow = now.In(tz) // convert time to correct tz\n\t\tlog.Printf(log.Debug, \"processing mountain %s(id=%d)\", mt.Name, mt.ID)\n\n\t\t// get astro data for mt\n\t\t// const maxTries = 3\n\t\t// var tries int\n\t\tvar sun astro.Data\n\t\t// for ; tries < maxTries; tries++ {\n\t\t// \tsun, err = astro.Get(mt.Latitude, mt.Longitude, now)\n\t\t// \tif err == nil {\n\t\t// \t\tbreak\n\t\t// \t}\n\t\t// \ttime.Sleep(3 * time.Second)\n\t\t// }\n\t\t// if tries >= maxTries {\n\t\t// \tlog.Printf(log.Error, \"too many tries to get astro data for %s(id=%d). falling back to local calculation\", mt.Name, mt.ID)\n\t\tsun, err = astro.GetLocal(mt.Latitude, mt.Longitude, now)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"using local calculation\")\n\t\t\tfail(err)\n\t\t\treturn\n\t\t}\n\t\t// } else {\n\t\t// \tlog.Printf(log.Debug, \"took %d/%d tries to get astro data for %s(id=%d)\", tries+1, maxTries, mt.Name, mt.ID)\n\t\t// }\n\n\t\t// for each cam\n\t\tfor _, cam := range cams {\n\t\t\t// skip inactive cams\n\t\t\tif !cam.IsActive {\n\t\t\t\tlog.Printf(log.Debug, \"skipping inactive cam %s(id=%d)\", cam.Name, cam.ID)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// round current time to nearest cam interval\n\t\t\tinterval := time.Duration(cam.Interval) * time.Minute\n\t\t\tstart := roundup(now, interval)\n\t\t\tstop := startOfNextDay(now)\n\t\t\tcount := 0\n\t\t\tbegin, end := start, stop\n\t\t\t// for each time+interval until end-of-day...\n\t\t\tfor t := start; t.Before(stop); t = t.Add(interval) {\n\t\t\t\t// determine if the cam should be scraped at time t\n\t\t\t\tdata := RulesData{\n\t\t\t\t\tAstro: sun,\n\t\t\t\t\tMountain: mt,\n\t\t\t\t\tCamera: cam,\n\t\t\t\t\tNow: t}\n\t\t\t\tdo, err := cam.ExecuteRules(data)\n\t\t\t\tif do {\n\t\t\t\t\t// schedule a scrape\n\t\t\t\t\tapp.Scheduler.Add(scheduler.NewTask(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tScrape(mt.ID, cam.ID, app.Config)))\n\t\t\t\t\t// record actual number of scrapes scheduled\n\t\t\t\t\t// and the true first and last times\n\t\t\t\t\tcount++\n\t\t\t\t\tif begin.IsZero() {\n\t\t\t\t\t\tbegin = t\n\t\t\t\t\t}\n\t\t\t\t\tend = t\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tfail(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Printf(log.Debug, \"%d scrapes scheduled for %s(id=%d) from %s to %s every %s\",\n\t\t\t\tcount, cam.Name, cam.ID,\n\t\t\t\tbegin.Format(time.UnixDate), end.Format(time.UnixDate),\n\t\t\t\tinterval)\n\t\t}\n\n\t\t// schedule ScheduleScrapes() for next day\n\t\tnext := startOfNextDay(now)\n\t\tapp.Scheduler.Add(scheduler.NewTask(\n\t\t\tnext,\n\t\t\tScheduleScrapes(mtID, 0, app)))\n\t\tlog.Printf(log.Debug, \"next ScheduleScrapes(%s) at %s\", mt.Name, next.Format(time.UnixDate))\n\t}\n}", "func collectMatchingEvents(ctx context.Context, kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string) ([]*corev1.Event, error) {\n\tvar events []*corev1.Event\n\n\twatchEvents, err := kubeClient.CoreV1().Events(namespace).Watch(ctx, metav1.ListOptions{})\n\t// close watchEvents channel\n\tdefer watchEvents.Stop()\n\tif err != nil {\n\t\treturn events, err\n\t}\n\n\t// create timer to not wait for events longer than 5 seconds\n\ttimer := time.NewTimer(5 * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase wevent := <-watchEvents.ResultChan():\n\t\t\tevent := wevent.Object.(*corev1.Event)\n\t\t\tif val, ok := kinds[event.InvolvedObject.Kind]; ok {\n\t\t\t\tfor _, expectedName := range val {\n\t\t\t\t\tif event.InvolvedObject.Name == expectedName {\n\t\t\t\t\t\tevents = append(events, event)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-timer.C:\n\t\t\treturn events, nil\n\t\t}\n\t}\n}", "func (o AssociationOutput) ScheduleExpression() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Association) pulumi.StringPtrOutput { return v.ScheduleExpression }).(pulumi.StringPtrOutput)\n}", "func doFetch(cfg config.View, be pb.BackendClient) {\n\tstartTime := time.Now()\n\tmprofiles := profiles.Generate(cfg)\n\n\tfor {\n\t\tvar wg sync.WaitGroup\n\t\tfor _, p := range mprofiles {\n\t\t\twg.Add(1)\n\t\t\tgo func(wg *sync.WaitGroup, p *pb.MatchProfile) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfetch(be, p)\n\t\t\t}(&wg, p)\n\t\t}\n\n\t\t// Wait for all FetchMatches calls to complete before proceeding.\n\t\twg.Wait()\n\t\terrMap.Range(func(k interface{}, v interface{}) bool {\n\t\t\tlogger.Infof(\"Got error %s: %#v\", k, v)\n\t\t\treturn true\n\t\t})\n\t\tlogger.Infof(\n\t\t\t\"FetchedMatches:%v, AssignedTickets:%v, DeletedTickets:%v in time %v, Total profiles: %v\",\n\t\t\tatomic.LoadUint64(&matchCount),\n\t\t\tatomic.LoadUint64(&assigned),\n\t\t\tatomic.LoadUint64(&deleted),\n\t\t\ttime.Since(startTime).Seconds(),\n\t\t\tlen(mprofiles),\n\t\t)\n\t}\n}", "func (ctx *Context) ScheduleHandler(w http.ResponseWriter, r *http.Request) {\n\t// Only support GET POST method\n\tif r.Method != \"GET\" && r.Method != \"POST\" && r.Method != \"DELETE\" {\n\t\thttp.Error(w, errUnsuportMethod, http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\t// parse meeting id\n\turlID := path.Base(path.Dir(r.URL.Path))\n\tmid := getIDfromURL(w, r, urlID)\n\tif mid < 0 {\n\t\treturn\n\t}\n\n\tif r.Method == \"POST\" {\n\t\t// Only support JSON body\n\t\tif !isContentTypeJSON(w, r) {\n\t\t\treturn\n\t\t}\n\n\t\t// Read response body\n\t\tbody := getRequestBody(w, r)\n\t\tif body == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Unmarshal body to group object\n\t\tsch := &model.Schedule{}\n\t\tif !unmarshalBody(w, body, sch) {\n\t\t\treturn\n\t\t}\n\t\tsch.MeetingID = mid\n\n\t\t// Add to database\n\t\tid, err := ctx.Store.CreateSchedule(sch)\n\t\tif !dbErrorHandle(w, \"Insert group\", err) {\n\t\t\treturn\n\t\t}\n\n\t\tschedule, err := ctx.Store.GetScheduleByID(id)\n\t\tif !dbErrorHandle(w, \"Get schedule\", err) {\n\t\t\treturn\n\t\t}\n\n\t\t// marshal into bytes\n\t\tresponse := marshalRep(w, schedule)\n\t\tif response == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Response\n\t\trespondWithHeader(w, typeJSON, response, http.StatusCreated)\n\t}\n\n\tif r.Method == \"GET\" {\n\t\tschedules, err := ctx.Store.GetAllSchedule(mid)\n\t\tif !dbErrorHandle(w, \"Get schedule\", err) {\n\t\t\treturn\n\t\t}\n\n\t\tresponse := marshalRep(w, schedules)\n\t\trespondWithHeader(w, typeJSON, response, http.StatusOK)\n\t}\n\n}", "func (n *Neis) GetCalendar(year int, month time.Month) ([]Calendar, error) {\n\tstart := time.Date(year, month, 1, 0, 0, 0, 0, time.Now().Location())\n\tend := time.Date(year, month+1, 0, 0, 0, 0, 0, time.Now().Location())\n\tduration := int(end.Sub(start).Hours()/24) + 1\n\n\tq := url.Values{\n\t\t\"KEY\": []string{n.apiKey},\n\t\t\"Type\": []string{\"json\"},\n\t\t\"ATPT_OFCDC_SC_CODE\": []string{n.region},\n\t\t\"SD_SCHUL_CODE\": []string{n.code},\n\t\t\"AA_FROM_YMD\": []string{start.Format(\"20060102\")},\n\t\t\"AA_TO_YMD\": []string{end.Format(\"20060102\")},\n\t}\n\n\turl := fmt.Sprintf(\"https://open.neis.go.kr/hub/SchoolSchedule?%s\", q.Encode())\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tb, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data calendarSchema\n\tvar calendars = make([]Calendar, duration)\n\n\tif err := json.Unmarshal(b, &data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, row := range data.Schoolschedule[1].Row {\n\t\td, err := time.Parse(\"20060102\", row.AaYmd)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tindex := end.Day() - d.Day()\n\n\t\tcalendars[index] = Calendar{\n\t\t\tDate: d,\n\t\t\tName: row.EventNm,\n\t\t\tContent: row.EventCntnt,\n\t\t\tClassTime: row.DghtCrseScNm,\n\t\t\tDeduction: row.SbtrDdScNm,\n\t\t\tTarget: [6]bool{\n\t\t\t\trow.OneGradeEventYn == \"Y\",\n\t\t\t\trow.TwGradeEventYn == \"Y\",\n\t\t\t\trow.ThreeGradeEventYn == \"Y\",\n\t\t\t\trow.FrGradeEventYn == \"Y\",\n\t\t\t\trow.FivGradeEventYn == \"Y\",\n\t\t\t\trow.SixGradeEventYn == \"Y\",\n\t\t\t},\n\t\t}\n\n\t}\n\n\treturn calendars, nil\n}", "func (t *DarwinTimetable) PruneSchedules() (int, error) {\n\tcount := 0\n\n\tif err := t.Update(func(tx *bolt.Tx) error {\n\t\tlim := time.Now().Truncate(24 * time.Hour)\n\n\t\tlog.Println(\"PruneSchedules:\", lim.Format(\"2006-01-02\"))\n\n\t\tbucket := tx.Bucket([]byte(\"DarwinJourney\"))\n\n\t\tif err := bucket.ForEach(func(k, v []byte) error {\n\t\t\tj := &Journey{}\n\t\t\terr := json.Unmarshal(v, j)\n\t\t\tif err != nil || j.SSD.Before(lim) {\n\t\t\t\tcount++\n\t\t\t\treturn bucket.Delete(k)\n\t\t\t}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Println(\"PruneSchedules:\", count)\n\t\treturn nil\n\t}); err != nil {\n\t\treturn 0, err\n\t}\n\treturn count, nil\n}" ]
[ "0.56238943", "0.52930427", "0.523047", "0.5199952", "0.5147988", "0.5058819", "0.50440824", "0.5031958", "0.50148183", "0.49584836", "0.492599", "0.48424038", "0.47939503", "0.47636837", "0.474132", "0.47204733", "0.47124", "0.4708795", "0.46959302", "0.46930283", "0.46441042", "0.46397638", "0.4638275", "0.46077445", "0.45739353", "0.45188418", "0.45097196", "0.4507946", "0.44982883", "0.44772503", "0.44640192", "0.44490108", "0.44418752", "0.44415745", "0.44383585", "0.44332182", "0.4425612", "0.44127637", "0.4393232", "0.43922463", "0.4390169", "0.43839315", "0.4367697", "0.43561953", "0.4342043", "0.43346956", "0.43321508", "0.43301642", "0.43014818", "0.42988703", "0.42926562", "0.4282989", "0.42762962", "0.42686203", "0.426351", "0.42580327", "0.42491552", "0.42444476", "0.42309502", "0.4218744", "0.421691", "0.42020217", "0.41887048", "0.4188213", "0.41832337", "0.4179968", "0.41789207", "0.41722327", "0.41671228", "0.4164589", "0.41575515", "0.41546735", "0.41439897", "0.41430378", "0.41397104", "0.41395226", "0.41326007", "0.41290554", "0.41276476", "0.41243166", "0.41198403", "0.4112329", "0.41071886", "0.4098504", "0.40981942", "0.40974608", "0.4092768", "0.40871918", "0.40851253", "0.40789855", "0.40783492", "0.4076762", "0.4072604", "0.40723535", "0.40713456", "0.40709916", "0.4070439", "0.4070276", "0.4069553", "0.4067768" ]
0.6897732
0
acceptService returns true if the service is to be accepted, false if it's to be ignored
func (bf *boardFilter) acceptService(service ldb.Service) bool { // Original requirement, must have an RID if service.RID == "" { return false } // remove terminating services if bf.terminated && bf.atStation(service.Destination) { return false } if bf.callAt && !bf.callsAt(service.CallingPoints, bf.callAtTiplocs) { return false } return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *aclFilter) allowService(service string) bool {\n\tif service == \"\" {\n\t\treturn true\n\t}\n\n\tif !f.enforceVersion8 && service == structs.ConsulServiceID {\n\t\treturn true\n\t}\n\treturn f.authorizer.ServiceRead(service)\n}", "func (m *MockMessageSvc) Accept(msgType string, purpose []string) bool {\n\tif m.AcceptFunc != nil {\n\t\treturn m.AcceptFunc(msgType, purpose)\n\t}\n\n\treturn true\n}", "func (s *Service) Accept(conn net.Conn, ipport string) error {\n\tswitch s.Role {\n\tcase ROLE_MANAGE:\n\t\treturn TcpAcceptor(conn, s, ipport)\n\tcase ROLE_PROXY, ROLE_WEBSERVER:\n\t\treturn HttpAcceptor(conn, s, ipport)\n\tdefault:\n\t\tlog.Fatal(\"unknown role in accept\")\n\t}\n\treturn errors.New(\"Accept fell through!\")\n}", "func (s *ss) accept(ok string) bool {\n\treturn s.consume(ok, true)\n}", "func (s *Suite) Accept(t string) bool {\n\treturn t == signatureType\n}", "func (f *MSPFilter) Accept(peer fab.Peer) bool {\n\treturn peer.MSPID() == f.mspID\n}", "func IsValidService(s string) bool {\n\tswitch s {\n\tcase\n\t\t\"all\",\n\t\t\"proxy\",\n\t\t\"authorize\",\n\t\t\"authenticate\":\n\t\treturn true\n\t}\n\treturn false\n}", "func (v *VDRI) Accept(method string) bool {\n\treturn v.accept(method)\n}", "func (s ServiceSpecs) SupportService(serviceUrl string, serviceOrg string) bool {\n\tif serviceUrl == \"\" {\n\t\treturn true\n\t} else {\n\t\tif len(s) == 0 {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfor _, sp := range s {\n\t\t\t\tif sp.Url == serviceUrl && (sp.Org == \"\" || sp.Org == serviceOrg) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func isRelevantExposedService(service *v1alpha1.ServiceExpositionPolicy_ExposedService, toClusterID string) bool {\n\t// If there is no clusters list, we treat this policy as exposed to all trusted clusters\n\tif len(service.Clusters) == 0 {\n\t\treturn true\n\t}\n\n\t// Go through the list of allowed clusters and see if it is listed\n\tfor _, cluster := range service.Clusters {\n\t\tif cluster == toClusterID {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Service is not exposed to the specified cluster\n\treturn false\n}", "func containsService(name string, services []servicescm.Service) bool {\n\tfor _, svc := range services {\n\t\tif svc.Name == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsExposedService(svc *corev1.Service) bool {\n\tlabels := svc.Labels\n\tif labels == nil {\n\t\tlabels = map[string]string{}\n\t}\n\tfor _, l := range ExposeLabelKeys {\n\t\tif labels[l] == \"true\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p awsPeeringServiceOp) Accept(ctx context.Context, input *models.AcceptAwsPeeringInput) (*models.Result, *Response, error) {\n\tvar peeringResult models.Result\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"acceptAwsPeering\",\n\t\tOperation: models.Mutation,\n\t\tInput: *input,\n\t\tArgs: nil,\n\t\tResponse: peeringResult,\n\t}\n\treq, err := p.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := p.client.Do(ctx, req, &peeringResult)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &peeringResult, resp, err\n}", "func (px *Paxos) send_accept(seq int, p Proposal) bool {\n\tok_count := 0\n\n\tfor idx, peer := range px.peers {\n\t\targs := &AcceptArgs{}\n\t\treply := &AcceptReply{}\n\n\t\targs.Seq = seq\n\t\targs.Proposal = p\n\n\t\tok := false\n\n\t\tif idx == px.me {\n\t\t\tpx.Accept(args, reply)\n\t\t\tok = true\n\t\t} else {\n\t\t\tok = call(peer, \"Paxos.Accept\", args, reply)\n\t\t}\n\n\t\tif ok && reply.Err == OK {\n\t\t\tok_count++\n\t\t}\n\t}\n\n\tpx.clog(DBG_PREPARE, \"send_accept\", \"seq=%d p=%v ok_count=%d/%d\", seq, p, ok_count, px.majority)\n\n\treturn (ok_count >= px.majority)\n}", "func (q *Query) ServiceMatches(s ServiceInfo) bool {\n\tif q.Service != \"\" && s.Config.Name != q.Service {\n\t\treturn false\n\t}\n\n\tif q.Version != \"\" && s.Config.Version != q.Version {\n\t\treturn false\n\t}\n\n\tif q.Region != \"\" && s.Config.Region != q.Region {\n\t\treturn false\n\t}\n\n\tif q.Host != \"\" && s.Config.ServiceAddr.IPAddress != q.Host {\n\t\treturn false\n\t}\n\n\tif q.Port != \"\" && fmt.Sprintf(\"%d\", s.Config.ServiceAddr.Port) != q.Port {\n\t\treturn false\n\t}\n\n\tif q.Registered != nil && s.Registered != *q.Registered {\n\t\treturn false\n\t}\n\n\tif q.UUID != \"\" && s.Config.UUID != q.UUID {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (c *comp) Accepting() bool {\n\treturn !c.r.Accepting()\n}", "func (o *ReservationStayOfferServiceModel) GetServiceOk() (*EmbeddedServiceModel, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Service, true\n}", "func (p *Reader) Accept(services protocol.ServiceFlag) error {\n\tif err := p.Handshake(services); err != nil {\n\t\t_ = p.Conn.Close()\n\t\treturn err\n\t}\n\n\tif config.Get().API.Enabled {\n\t\tgo func() {\n\t\t\tstore := capi.GetStormDBInstance()\n\t\t\taddr := p.Addr()\n\t\t\tpeerJSON := capi.PeerJSON{\n\t\t\t\tAddress: addr,\n\t\t\t\tType: \"Reader\",\n\t\t\t\tMethod: \"Accept\",\n\t\t\t\tLastSeen: time.Now(),\n\t\t\t}\n\n\t\t\terr := store.Save(&peerJSON)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"failed to save peer into StormDB\")\n\t\t\t}\n\n\t\t\t// save count\n\t\t\tpeerCount := capi.PeerCount{\n\t\t\t\tID: addr,\n\t\t\t\tLastSeen: time.Now(),\n\t\t\t}\n\n\t\t\terr = store.Save(&peerCount)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"failed to save peerCount into StormDB\")\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn nil\n}", "func (m *OverDIDComm) Accept(msgType string, purpose []string) bool {\n\tif msgType != OverDIDCommMsgRequestType {\n\t\treturn false\n\t}\n\n\t// if purpose not set, then match only message type.\n\tif len(m.purpose) == 0 {\n\t\treturn true\n\t}\n\n\t// match purpose if provided\n\tfor _, msgPurpose := range purpose {\n\t\tfor _, svcPurpose := range m.purpose {\n\t\t\tif msgPurpose == svcPurpose {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (c *Client) AcceptTOS(ctx context.Context, id tg.DataJSON) error {\n\t_, err := c.api.HelpAcceptTermsOfService(ctx, id)\n\treturn err\n}", "func (input *Input) AcceptsXML() bool {\n\treturn acceptsXMLRegex.MatchString(input.Header(\"Accept\"))\n}", "func (pf *File) HasService(name string) bool {\n\treturn pf.GetService(name) != nil\n}", "func MatchService(inputName string) ServiceConfig {\n\tinputName = strings.ToLower(inputName)\n\timgName, ok := keywords[inputName]\n\tif !ok {\n\t\timgName = keywords[\"default\"]\n\t}\n\treturn ServiceConfig{\n\t\tServiceName: sanitizeServiceName(inputName),\n\t\tServiceImage: imgName,\n\t}\n}", "func (o GetVpcEndpointServicesServiceOutput) AutoAcceptConnection() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v GetVpcEndpointServicesService) bool { return v.AutoAcceptConnection }).(pulumi.BoolOutput)\n}", "func (f *urlTargetFilter) Accept(peer fab.Peer) bool {\n\treturn peer.URL() == f.url\n}", "func (*endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) {\n\treturn nil, nil, tcpip.ErrNotSupported\n}", "func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*networkallocator.ServiceAllocationOpts)) bool {\n\tvar options networkallocator.ServiceAllocationOpts\n\tfor _, flag := range flags {\n\t\tflag(&options)\n\t}\n\n\tspecNetworks := serviceNetworks(s)\n\n\t// If endpoint mode is VIP and allocator does not have the\n\t// service in VIP allocated set then it needs to be allocated.\n\tif len(specNetworks) != 0 &&\n\t\t(s.Spec.Endpoint == nil ||\n\t\t\ts.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP) {\n\n\t\tif _, ok := na.services[s.ID]; !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tif s.Endpoint == nil || len(s.Endpoint.VirtualIPs) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\t// If the spec has networks which don't have a corresponding VIP,\n\t\t// the service needs to be allocated.\n\tnetworkLoop:\n\t\tfor _, net := range specNetworks {\n\t\t\tfor _, vip := range s.Endpoint.VirtualIPs {\n\t\t\t\tif vip.NetworkID == net.Target {\n\t\t\t\t\tcontinue networkLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If the spec no longer has networks attached and has a vip allocated\n\t// from previous spec the service needs to allocated.\n\tif s.Endpoint != nil {\n\tvipLoop:\n\t\tfor _, vip := range s.Endpoint.VirtualIPs {\n\t\t\tif na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) {\n\t\t\t\t// This checks the condition when ingress network is needed\n\t\t\t\t// but allocation has not been done.\n\t\t\t\tif _, ok := na.services[s.ID]; !ok {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tcontinue vipLoop\n\t\t\t}\n\t\t\tfor _, net := range specNetworks {\n\t\t\t\tif vip.NetworkID == net.Target {\n\t\t\t\t\tcontinue vipLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If the endpoint mode is DNSRR and allocator has the service\n\t// in VIP allocated set then we return to be allocated to make\n\t// sure the allocator triggers networkallocator to free up the\n\t// resources if any.\n\tif s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {\n\t\tif _, ok := na.services[s.ID]; ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||\n\t\t(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {\n\t\treturn na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)\n\t}\n\treturn true\n}", "func acceptsOffer(spec, offer string) bool {\n\tif len(spec) >= 1 && spec[len(spec)-1] == '*' {\n\t\treturn true\n\t} else if strings.HasPrefix(spec, offer) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *EventAttributes) GetServiceOk() (*string, bool) {\n\tif o == nil || o.Service == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Service, true\n}", "func (s *Service) Matches(check string) bool {\n\tif serviceMatches(check, s.Name()) {\n\t\treturn true\n\t}\n\tfor _, p := range s.Provides() {\n\t\tif serviceMatches(check, p) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (ic IgnoredService) IsServiceIgnored(srv Service) bool {\n\tfor _, ignoredCheck := range ic.ignoredChecks {\n\t\tif ignoredCheck.Name == srv.Name {\n\t\t\tinstances := strings.Split(ignoredCheck.Instance, \" \")\n\t\t\tif len(instances) == 1 && instances[0] == \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tfor _, instance := range instances {\n\t\t\t\thasMatched := matchInstance(instance, srv.ContainerName)\n\t\t\t\tif hasMatched {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (m *BACnetServiceAckAtomicWriteFile) ServiceChoice() uint8 {\n\treturn 0x07\n}", "func isValidServiceName(name string) bool {\n\tswitch name {\n\tcase\n\t\t\"register\",\n\t\t\"ping\",\n\t\t\"conv_creation\",\n\t\t\"conv_manag\",\n\t\t\"msg_sender\",\n\t\t\"conv-sub\",\n\t\t\"user-manag\",\n\t\t\"login\":\n\t\treturn true\n\t}\n\treturn false\n}", "func IsTypeAService(t string) bool {\n\tfor _, serviceType := range ServiceTypes() {\n\t\tif t == serviceType {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (t *AuroraTask) IsService(isService bool) *AuroraTask {\n\tt.task.IsService = isService\n\treturn t\n}", "func (o *OfferServiceModel) GetServiceOk() (*EmbeddedServiceModel, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Service, true\n}", "func (input *BeegoInput) AcceptsXML() bool {\n\treturn acceptsXMLRegex.MatchString(input.Header(\"Accept\"))\n}", "func (ctx *Context) AcceptXML() bool {\r\n\treturn acceptsXMLRegex.MatchString(ctx.HeaderParam(HeaderAccept))\r\n}", "func (r *Automaton) IsAccept(state int) bool {\n\treturn r.isAccept.Test(uint(state))\n}", "func ServiceAvailable(ctx *Context, url string, timeout time.Duration) bool {\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url)\n\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\tLog(ERROR, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\treturn false\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"code\", resp.StatusCode, \"available\", false)\n\t\treturn false\n\t}\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"available\", true)\n\treturn true\n}", "func handleService(req typhon.Request) typhon.Response {\n\tparts := reService.FindStringSubmatch(req.URL.Path)\n\tif len(parts) != 3 {\n\t\treturn typhon.Response{Error: terrors.NotFound(\"bad_endpoint\", \"Unable to determine service endpoint.\", nil)}\n\t}\n\n\treturn handle(req, \"s-\"+parts[1], parts[2])\n}", "func (m *Message) IsService() bool {\n\tfact := false\n\n\tfact = fact || m.UserJoined != nil\n\tfact = fact || len(m.UsersJoined) > 0\n\tfact = fact || m.UserLeft != nil\n\tfact = fact || m.NewGroupTitle != \"\"\n\tfact = fact || m.NewGroupPhoto != nil\n\tfact = fact || m.GroupPhotoDeleted\n\tfact = fact || m.GroupCreated || m.SuperGroupCreated\n\tfact = fact || (m.MigrateTo != m.MigrateFrom)\n\n\treturn fact\n}", "func (r *ReconcileTFAnalytics) handleCollectorService() (bool, error) {\n\t// Define a new Collector service object\n\tcollectorService := newServicesForCollector(r.instance)\n\t// Set TFAnalytics instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(r.instance, collectorService, r.scheme); err != nil {\n\t\treturn false, err\n\t}\n\t// Check if this Collector Service already exists\n\tfoundCollectorService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: collectorService.Name, Namespace: collectorService.Namespace}, foundCollectorService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tr.reqLogger.Info(\"Creating a new Collector Service\", \"Service.Name\", collectorService.Name)\n\t\terr = r.client.Create(context.TODO(), collectorService)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t// Service has been created successfully - don't requeue\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\t// Service already exists - don't requeue\n\tr.reqLogger.Info(\"Skip reconcile: Collector Service already exists\", \"Service.Name\", foundCollectorService.Name)\n\treturn false, nil\n}", "func (s *Server) Accept() chan rtmputils.ConnPair {\n\treturn s.accept\n}", "func (oc *Controller) handlePeerService(\n\tpolicy *knet.NetworkPolicy, gp *gressPolicy, np *networkPolicy) {\n\n\th := oc.watchFactory.AddFilteredServiceHandler(policy.Namespace,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\t// Service is matched so add VIP to addressSet\n\t\t\t\toc.handlePeerServiceAdd(gp, obj)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\t// If Service that has matched pods are deleted remove VIP\n\t\t\t\toc.handlePeerServiceDelete(gp, obj)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\t// If Service Is updated make sure same pods are still matched\n\t\t\t\toldSvc := oldObj.(*kapi.Service)\n\t\t\t\tnewSvc := newObj.(*kapi.Service)\n\t\t\t\tif reflect.DeepEqual(newSvc.Spec.ExternalIPs, oldSvc.Spec.ExternalIPs) &&\n\t\t\t\t\treflect.DeepEqual(newSvc.Spec.ClusterIP, oldSvc.Spec.ClusterIP) &&\n\t\t\t\t\treflect.DeepEqual(newSvc.Spec.Type, oldSvc.Spec.Type) &&\n\t\t\t\t\treflect.DeepEqual(newSvc.Status.LoadBalancer.Ingress, oldSvc.Status.LoadBalancer.Ingress) {\n\n\t\t\t\t\tklog.V(5).Infof(\"Skipping service update for: %s as change does not apply to any of .Spec.Ports, \"+\n\t\t\t\t\t\t\".Spec.ExternalIP, .Spec.ClusterIP, .Spec.Type, .Status.LoadBalancer.Ingress\", newSvc.Name)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\toc.handlePeerServiceDelete(gp, oldObj)\n\t\t\t\toc.handlePeerServiceAdd(gp, newObj)\n\t\t\t},\n\t\t}, nil)\n\tnp.svcHandlerList = append(np.svcHandlerList, h)\n}", "func IsServiceCondition(t apis.ConditionType) bool {\n\tswitch t {\n\tcase\n\t\tServiceConditionReady,\n\t\tServiceConditionRoutesReady,\n\t\tServiceConditionConfigurationsReady:\n\t\treturn true\n\t}\n\treturn false\n}", "func is_accepted(w http.ResponseWriter, r *http.Request) {\r\n\tfmt.Println(\"\\n Api Hit====>isAccepted\")\r\n\tvar vars = mux.Vars(r)\r\n\tvar id = vars[\"id\"]\r\n\tproc := cache[id]\r\n\tflag := isAccepted(proc)\r\n\tif flag {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens successfully Accepted\")\r\n\t} else {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens Rejected by the PDA\")\r\n\t}\r\n}", "func (fsm *DeployFSMContext) checkServiceReady() (bool, error) {\n\truntime := fsm.Runtime\n\t// do not check if nil for compatibility\n\tif fsm.Deployment.Extra.ServicePhaseStartAt != nil {\n\t\tstartCheckPoint := fsm.Deployment.Extra.ServicePhaseStartAt.Add(30 * time.Second)\n\t\tif time.Now().Before(startCheckPoint) {\n\t\t\tfsm.pushLog(fmt.Sprintf(\"checking too early, delay to: %s\", startCheckPoint.String()))\n\t\t\t// too early to check\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tisReplicasZero := false\n\tfor _, s := range fsm.Spec.Services {\n\t\tif s.Deployments.Replicas == 0 {\n\t\t\tisReplicasZero = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif isReplicasZero {\n\t\tfsm.pushLog(\"checking status by inspect\")\n\t\t// we do double check to prevent `fake Healthy`\n\t\t// runtime.ScheduleName must have\n\t\tsg, err := fsm.getServiceGroup()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn sg.Status == \"Ready\" || sg.Status == \"Healthy\", nil\n\t}\n\n\t// 获取addon状态\n\tserviceGroup, err := fsm.getServiceGroup()\n\tif err != nil {\n\t\tfsm.pushLog(fmt.Sprintf(\"获取service状态失败,%s\", err.Error()))\n\t\treturn false, nil\n\t}\n\tfsm.pushLog(fmt.Sprintf(\"checking status: %s, servicegroup: %v\", serviceGroup.Status, runtime.ScheduleName))\n\t// 如果状态是failed,说明服务或者job运行失败\n\tif serviceGroup.Status == apistructs.StatusFailed {\n\t\treturn false, errors.New(serviceGroup.LastMessage)\n\t}\n\t// 如果状态是ready或者healthy,说明服务已经发起来了\n\truntimeStatus := apistructs.RuntimeStatusUnHealthy\n\tif serviceGroup.Status == apistructs.StatusReady || serviceGroup.Status == apistructs.StatusHealthy {\n\t\truntimeStatus = apistructs.RuntimeStatusHealthy\n\t}\n\truntimeItem := fsm.Runtime\n\tif runtimeItem.Status != runtimeStatus {\n\t\truntimeItem.Status = runtimeStatus\n\t\tif err := fsm.db.UpdateRuntime(runtime); err != nil {\n\t\t\tlogrus.Errorf(\"failed to update runtime status changed, runtime: %v, err: %v\", runtime.ID, err.Error())\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tif runtimeStatus == apistructs.RuntimeStatusHealthy {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func WaitForService(address string, logger *log.Logger) bool {\n\n\tfor i := 0; i < 12; i++ {\n\t\tconn, err := net.Dial(\"tcp\", address)\n\t\tif err != nil {\n\t\t\tlogger.Println(\"Connection error:\", err)\n\t\t} else {\n\t\t\tconn.Close()\n\t\t\tlogger.Println(fmt.Sprintf(\"Connected to %s\", address))\n\t\t\treturn true\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\n\treturn false\n}", "func (h *Handler) Accept() {\n}", "func RunService(ser Server) {\n\tfor {\n\t\tcon := Accept(ser)\n\t\tgo ser.HandleRequest(con)\n\t}\n}", "func Service(b bool) Option {\n\treturn func(o *Options) {\n\t\to.Discovery = b\n\t}\n}", "func (p *PaxosNode) ReceiveAcceptOK(OKMsg *paxos.PaxosMessage, slot *paxos.Slot) bool {\n\tif slot.Nhighest != OKMsg.ProposalNumber {\n\t\treturn false\n\t}\n\n\tif _, ok := slot.AcceptOKIDs[OKMsg.ProposerID]; ok {\n\t\treturn false\n\t}\n\n\tslot.AcceptOKIDs[OKMsg.ProposerID] = true\n\tslot.NumberOfAcceptOK++\n\tif slot.NumberOfAcceptOK >= ((len(slot.ClusterMembers) + 1) / 2) { // yay!!! majority\n\t\treturn true\n\t}\n\treturn false\n}", "func Accepts(request *http.Request, mimetype string) (bool, error) {\n\tmediaRange := request.Header.Get(\"Accept\")\n\tfor _, v := range strings.Split(mediaRange, \",\") {\n\t\tt, _, err := mime.ParseMediaType(v)\n\t\tif err != nil {\n\t\t\treturn false, ErrInvalid\n\t\t}\n\n\t\tif IsAccepted(mimetype, t) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (cs *OutboundHTTPClient) AcceptRecipient([]string) bool {\n\treturn false\n}", "func (op *EnableServiceOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func selectService(context context.T, tracer trace.Tracer, input *ConfigurePackagePluginInput, localrepo localpackages.Repository, appCfg *appconfig.SsmagentConfig, birdwatcherFacade facade.BirdwatcherFacade, isDocumentArchive *bool) (packageservice.PackageService, error) {\n\tregion, _ := context.Identity().Region()\n\tserviceEndpoint := input.Repository\n\tresponse := &ssm.GetManifestOutput{}\n\tvar err error\n\tvar s3Endpoint string\n\tif s3Endpoint, err = s3util.GetS3Endpoint(context, region); err != nil {\n\t\ttracer.CurrentTrace().AppendErrorf(\"Failed to generate s3 endpoint - %v.\", err.Error())\n\t\treturn nil, err\n\t}\n\n\tif (appCfg != nil && appCfg.Birdwatcher.ForceEnable) || !ssms3.UseSSMS3Service(context, tracer, s3Endpoint, serviceEndpoint, region) {\n\t\t// This indicates that it would be the birdwatcher service.\n\t\t// Before creating an object of type birdwatcher here, check if the name is of document arn. If it is, return with a Document type service\n\t\tif regexp.MustCompile(documentArnPattern).MatchString(input.Name) {\n\t\t\t*isDocumentArchive = true\n\t\t\t// return a new object of type document\n\t\t\treturn birdwatcherservice.NewDocumentArchive(context, birdwatcherFacade, localrepo), nil\n\t\t}\n\t\tif input.Version != \"\" {\n\t\t\t// Birdwatcher version pattern and document version name pattern is different. If the pattern doesn't match Birdwatcher,\n\t\t\t// we assume document and continue, since birdwatcher will error out with ValidationException.\n\t\t\t// This could also happen if there is a typo in the birdwatcher version, but we assume Document and continue.\n\t\t\tif !regexp.MustCompile(birdwatcherVersionPattern).MatchString(input.Version) {\n\t\t\t\t*isDocumentArchive = true\n\t\t\t\t// return a new object of type document\n\t\t\t\treturn birdwatcherservice.NewDocumentArchive(context, birdwatcherFacade, localrepo), nil\n\t\t\t}\n\t\t}\n\n\t\t// If not, make a call to GetManifest and try to figure out if it is birdwatcher or document archive.\n\t\tversion := input.Version\n\t\tif packageservice.IsLatest(version) {\n\t\t\tversion = packageservice.Latest\n\t\t}\n\t\tresponse, err = birdwatcherFacade.GetManifest(\n\t\t\t&ssm.GetManifestInput{\n\t\t\t\tPackageName: &input.Name,\n\t\t\t\tPackageVersion: &version,\n\t\t\t},\n\t\t)\n\n\t\t// If the error returned is the \"ResourceNotFoundException\", create a service with document archive\n\t\t// if any other response, create a service of birdwatcher type\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), resourceNotFoundException) {\n\t\t\t\t*isDocumentArchive = true\n\t\t\t\t// return a new object of type document\n\t\t\t\treturn birdwatcherservice.NewDocumentArchive(context, birdwatcherFacade, localrepo), nil\n\t\t\t} else {\n\t\t\t\ttracer.CurrentTrace().AppendErrorf(\"Error returned for GetManifest - %v.\", err.Error())\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t*isDocumentArchive = false\n\n\t\t// return a new object of type birdwatcher\n\t\tbirdWatcherArchiveContext := make(map[string]string)\n\t\tbirdWatcherArchiveContext[\"packageName\"] = input.Name\n\t\tbirdWatcherArchiveContext[\"packageVersion\"] = input.Version\n\t\tbirdWatcherArchiveContext[\"manifest\"] = *response.Manifest\n\t\treturn birdwatcherservice.NewBirdwatcherArchive(context, birdwatcherFacade, localrepo, birdWatcherArchiveContext), nil\n\t}\n\n\ttracer.CurrentTrace().AppendInfof(\"S3 repository is marked active\")\n\treturn ssms3.New(context, s3Endpoint, serviceEndpoint, region), nil\n}", "func (s *State) acceptable(addr string, point string) bool {\n\tif s.optionalValidator == nil {\n\t\tif addr == point {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\ts.vo.Lock()\n\tstate := s.optionalValidator(addr, point)\n\ts.vo.Unlock()\n\treturn state\n}", "func (q *Quotas) TryAccept(issuerKey utils.IssuerKey) (bool, int) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tif quotas, ok := q.issuerToQuotas[issuerKey]; ok {\n\t\treturn quotas.rateLimiter.TryAccept(), quotas.requestsPerDay\n\t}\n\treturn false, 0\n}", "func DoAcceptDfsServer(ss chan<- interface{}, conn *grpc.ClientConn, clientId string) {\n\tdiscoveryClient := discovery.NewDiscoveryServiceClient(conn)\n\tstream, err := discoveryClient.GetDfsServers(context.Background(),\n\t\t&discovery.GetDfsServersReq{\n\t\t\tClient: &discovery.DfsClient{\n\t\t\t\tId: clientId,\n\t\t\t},\n\t\t})\n\n\tfor err == nil {\n\t\tvar rep *discovery.GetDfsServersRep\n\t\trep, err = stream.Recv()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Failed to recv from stream %v\", err)\n\t\t\tbreak // break the whole loop.\n\t\t}\n\n\t\tswitch union := rep.GetDfsServerUnion.(type) {\n\t\tdefault:\n\t\t\tglog.Warningf(\"Failed to receive DfsServer list: unexpected type %T\", union)\n\t\tcase *discovery.GetDfsServersRep_Sl: // server list\n\t\t\tss <- union.Sl.GetServer()\n\t\tcase *discovery.GetDfsServersRep_Hb: // heartbeat\n\t\t\tss <- union.Hb.Timestamp\n\t\t}\n\t}\n\n\tclose(ss)\n}", "func (s *Seller) acceptBid(offer int) bool {\r\n\tif !s.Object.getSold() && offer >= s.bidAccept {\r\n\t\ts.Object.setSold(true)\r\n\t\treturn true\r\n\t} else {\r\n\t\treturn false\r\n\t}\r\n}", "func (s *Seller) acceptBid(offer int) bool {\r\n\tif !s.Object.getSold() && offer >= s.bidAccept {\r\n\t\ts.Object.setSold(true)\r\n\t\treturn true\r\n\t} else {\r\n\t\treturn false\r\n\t}\r\n}", "func (s *JSONHTTPServer) StartService(\n\tctx context.Context,\n\tservices ...ServiceAPI,\n) <-chan struct{} {\n\tstarted := make(chan struct{})\n\n\t// This will block, so run it in a goroutine\n\tgo s.startInternal(\n\t\tctx,\n\t\tstarted,\n\t\tservices...)\n\n\treturn started\n}", "func (s *Server) Accept() chan *gortsplib.ServerConn {\n\treturn s.accept\n}", "func AlwaysService(proxyInfo *ProxyInfo) bool {\n\treturn true\n}", "func ProbablyInstallAsService(opts *ServiceInstallerOptions) {\r\n\targs := os.Args\r\n\tisServiceInstaller := false\r\n\tpos := 0\r\n\tfor i, v := range args {\r\n\t\tif strings.HasPrefix(v, \"__installservice\") {\r\n\t\t\tisServiceInstaller = true\r\n\t\t\tpos = i\r\n\t\t}\r\n\t}\r\n\tif !isServiceInstaller {\r\n\t\treturn\r\n\t}\r\n\targs = append(args[:pos], args[pos+1:]...)\r\n\r\n\tif runtime.GOOS == \"windows\" {\r\n\t\tlog.Fatalln(\"installation as service under windows is not yet supported :(\")\r\n\t}\r\n\r\n\tlog.SetFlags(0)\r\n\tlog.Println(\"...will modify /etc/init.d\")\r\n\tbeServiceInstaller(args, opts)\r\n\r\n\t// do not run the app!\r\n\tos.Exit(0)\r\n}", "func AcceptArithServiceClient(lis net.Listener, x ArithService) {\n\tsrv := rpc.NewServer()\n\tif err := srv.RegisterName(\"ArithService\", x); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tconn, err := lis.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"lis.Accept(): %v\\n\", err)\n\t\t}\n\t\tgo srv.ServeCodec(protorpc.NewServerCodec(conn))\n\t}\n}", "func CheckVirtualService(virtualService IstioObject, namespace string, serviceName string, subsets []string) bool {\n\tif virtualService == nil || virtualService.GetSpec() == nil || subsets == nil {\n\t\treturn false\n\t}\n\tif len(subsets) > 0 && FilterByHost(virtualService.GetSpec(), serviceName) {\n\t\tif http, ok := virtualService.GetSpec()[\"http\"]; ok && checkSubsetRoute(http, serviceName, subsets) {\n\t\t\treturn true\n\t\t}\n\t\tif tcp, ok := virtualService.GetSpec()[\"tcp\"]; ok && checkSubsetRoute(tcp, serviceName, subsets) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (input *Input) AcceptsJSON() bool {\n\treturn acceptsJSONRegex.MatchString(input.Header(\"Accept\"))\n}", "func ServiceIsReady(resource common.ComponentResource) (bool, error) {\n\tvar service corev1.Service\n\tif err := getObject(resource, &service, true); err != nil {\n\t\treturn false, err\n\t}\n\n\t// if we have a name that is empty, we know we did not find the object\n\tif service.Name == \"\" {\n\t\treturn false, nil\n\t}\n\n\t// return if we have an external service type\n\tif service.Spec.Type == corev1.ServiceTypeExternalName {\n\t\treturn true, nil\n\t}\n\n\t// ensure a cluster ip address exists for cluster ip types\n\tif service.Spec.ClusterIP != corev1.ClusterIPNone && len(service.Spec.ClusterIP) == 0 {\n\t\treturn false, nil\n\t}\n\n\t// ensure a load balancer ip or hostname is present\n\tif service.Spec.Type == corev1.ServiceTypeLoadBalancer {\n\t\tif len(service.Status.LoadBalancer.Ingress) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func ServiceIsReady(resource common.ComponentResource) (bool, error) {\n\tvar service corev1.Service\n\tif err := getObject(resource, &service, true); err != nil {\n\t\treturn false, err\n\t}\n\n\t// if we have a name that is empty, we know we did not find the object\n\tif service.Name == \"\" {\n\t\treturn false, nil\n\t}\n\n\t// return if we have an external service type\n\tif service.Spec.Type == corev1.ServiceTypeExternalName {\n\t\treturn true, nil\n\t}\n\n\t// ensure a cluster ip address exists for cluster ip types\n\tif service.Spec.ClusterIP != corev1.ClusterIPNone && len(service.Spec.ClusterIP) == 0 {\n\t\treturn false, nil\n\t}\n\n\t// ensure a load balancer ip or hostname is present\n\tif service.Spec.Type == corev1.ServiceTypeLoadBalancer {\n\t\tif len(service.Status.LoadBalancer.Ingress) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func (o *CatalogEntry) GetServiceOk() (*string, bool) {\n\tif o == nil || o.Service == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Service, true\n}", "func (c *Contract) Accept(ctx TransactionContextInterface, jeweler string, paperNumber string, acceptDate string) (*InventoryFinancingPaper, error) {\r\n\tpaper, err := ctx.GetPaperList().GetPaper(jeweler, paperNumber)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tif paper.IsReadyREPO() {\r\n\t\tpaper.SetAccepted()\r\n\t}\r\n\r\n\tif !paper.IsAccepted() {\r\n\t\treturn nil, fmt.Errorf(\"inventory paper %s:%s is not accepted by bank. Current state = %s\", jeweler, paperNumber, paper.GetState())\r\n\t}\r\n\r\n\terr = ctx.GetPaperList().UpdatePaper(paper)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tfmt.Printf(\"The bank %q has accepted the inventory financing paper %q:%q ,The accept date is %q.\\nCurrent state is %q\", paper.GetBank(), paper.GetEvaluator(), paperNumber, acceptDate, paper.GetState())\r\n\treturn paper, nil\r\n}", "func (fpp *FoPoPattern) Match(serviceName string) bool {\n\tvar pattern string\n\tvar matched bool\n\tfor _, pattern = range fpp.ServicePatterns {\n\t\tmatched, _ = filepath.Match(pattern, serviceName)\n\t\tif matched {\n\t\t\treturn matched\n\t\t}\n\t}\n\treturn false\n}", "func (l *SensorListener) Accept() (connectorAddress string, state bool, err error) {\n // Keep looping until we get a notification message\n for {\n resp, err := l.conn.ReadResponse()\n if err != nil {\n return \"\", false, err\n }\n\n if strings.HasPrefix(resp, \"sensornotify,\") {\n resp = strings.TrimPrefix(resp, \"sensornotify,\")\n split := strings.Split(resp, \",\")\n\n if len(split) >= 2 {\n state, err = strconv.ParseBool(split[1])\n if err == nil {\n return split[0], state, nil\n }\n }\n }\n }\n}", "func (ctx *Context) AcceptJSON() bool {\r\n\treturn acceptsJSONRegex.MatchString(ctx.HeaderParam(HeaderAccept))\r\n}", "func (e *EdgeRequestContext) Service() (service Service, ok bool) {\n\ttoken := e.AuthToken()\n\tif token == nil {\n\t\treturn\n\t}\n\treturn Service(*token), true\n}", "func (t *raftLayer) Accept() (c net.Conn, err error) {\n\treturn t.listener.Accept()\n}", "func (in *ApplicationMapping) IsServiceEnabled(id string) bool {\n\tif in.IsAllApplicationServicesEnabled() {\n\t\treturn true\n\t}\n\tfor _, svc := range in.Spec.Services {\n\t\tif svc.ID == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (streamLayer *StreamLayer) Accept() (net.Conn, error) {\n\tconn, err := streamLayer.listener.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := make([]byte, 1)\n\t_, err = conn.Read(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif bytes.Compare([]byte{byte(RaftRPC)}, b) != 0 {\n\t\treturn nil, fmt.Errorf(\"not a raft rpc\")\n\t}\n\tif streamLayer.serverTLSConfig != nil {\n\t\treturn tls.Server(conn, streamLayer.serverTLSConfig), nil\n\t}\n\treturn conn, nil\n}", "func (r *ReconcileTFAnalytics) handleAlarmGenService() (bool, error) {\n\t// Define a new AlarmGen service object\n\tsvcmService := newServicesForAlarmGen(r.instance)\n\t// Set TFAnalytics instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(r.instance, svcmService, r.scheme); err != nil {\n\t\treturn false, err\n\t}\n\t// Check if this AlarmGen Service already exists\n\tfoundSvcmService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: svcmService.Name, Namespace: svcmService.Namespace}, foundSvcmService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tr.reqLogger.Info(\"Creating a new AlarmGen Service\", \"Service.Name\", svcmService.Name)\n\t\terr = r.client.Create(context.TODO(), svcmService)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t// Service has been created successfully - don't requeue\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\t// Service already exists - don't requeue\n\tr.reqLogger.Info(\"Skip reconcile: AlarmGen Service already exists\", \"Service.Name\", foundSvcmService.Name)\n\treturn false, nil\n}", "func (_Dospayment *DospaymentCaller) HasServiceFee(opts *bind.CallOpts, payer common.Address, serviceType *big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Dospayment.contract.Call(opts, out, \"hasServiceFee\", payer, serviceType)\n\treturn *ret0, err\n}", "func (r *RPCIngressGateway) Accept(lisID *uint16, resp *AcceptResp) (err error) {\n\tdefer rpcutil.LogCall(r.log, \"Accept\", lisID)(resp, &err)\n\n\tlog := r.log.WithField(\"func\", \"Accept\")\n\n\tlog.Debug(\"Getting listener...\")\n\tlis, err := r.getListener(*lisID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Reserving next ID...\")\n\tconnID, free, err := r.cm.ReserveNextID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Accepting conn...\")\n\tconn, err := lis.Accept()\n\tif err != nil {\n\t\tfree()\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Wrapping conn...\")\n\twrappedConn, err := appnet.WrapConn(conn)\n\tif err != nil {\n\t\tfree()\n\t\treturn err\n\t}\n\n\tif err := r.cm.Set(*connID, wrappedConn); err != nil {\n\t\tif cErr := wrappedConn.Close(); cErr != nil {\n\t\t\tr.log.WithError(cErr).Error(\"Failed to close wrappedConn.\")\n\t\t}\n\t\tfree()\n\t\treturn err\n\t}\n\n\tremote := wrappedConn.RemoteAddr().(appnet.Addr)\n\n\tresp.Remote = remote\n\tresp.ConnID = *connID\n\n\treturn nil\n}", "func isServiceStubType(t reflect.Type) bool {\n\tif isStructPtr(t) == false {\n\t\treturn false\n\t} else if t.Implements(stubType) == false {\n\t\treturn false\n\t}\n\t// Return success\n\treturn true\n}", "func (m Model) acceptRequest() (Model, tea.Cmd) { // nolint: unparam\n\tm.lh.response <- true\n\treturn m, nil\n}", "func (customer *Customer) acceptPrice(check bool) (err error) {\n\n\tvar currentTrigger ssm.Trigger\n\n\tcurrentTrigger = TriggerCustomerCommandAcceptPrice\n\n\tswitch check {\n\n\tcase true:\n\t\t// Do a check if state machine is in correct state for triggering event\n\t\tif customer.CustomerStateMachine.CanFire(currentTrigger.Key) == true {\n\t\t\terr = nil\n\n\t\t} else {\n\n\t\t\terr = customer.CustomerStateMachine.Fire(currentTrigger.Key, nil)\n\t\t}\n\n\tcase false:\n\t\t// Execute Trigger\n\n\t\tresp, err := customerClient.AcceptPrice(context.Background(), useEnvironment)\n\t\tif err != nil {\n\t\t\tlogMessagesWithError(4, \"Could not send 'AcceptPrice' to address: \"+taxi_address_to_dial+\". Error Message:\", err)\n\t\t\tbreak\n\n\t\t} else {\n\n\t\t\t//Save last PriceAccept respons\n\t\t\tcustomer.lastRecievedPriceAccept = resp\n\n\t\t\tif resp.GetAcknack() == true {\n\t\t\t\tlogMessagesWithOutError(4, \"'AcceptPrice' on address \"+taxi_address_to_dial+\" successfully processed\")\n\t\t\t\tlogMessagesWithOutError(4, \"Response Message (Comments): \"+resp.Comments)\n\n\t\t\t\t// Moved to state machine ---go receiveTaxiInvoices(customerClient, useEnvironment)\n\n\t\t\t} else {\n\t\t\t\tlogMessagesWithOutError(4, \"'AcceptPrice' on address \"+taxi_address_to_dial+\" NOT successfully processed\")\n\t\t\t\tlogMessagesWithOutError(4, \"Response Message (Comments): \"+resp.Comments)\n\n\t\t\t\terr = errors.New(\"'AcceptPrice' on address \" + taxi_address_to_dial + \" NOT successfully processed\")\n\t\t\t}\n\t\t}\n\n\t\tif err == nil && resp.GetAcknack() == true {\n\t\t\terr = customer.CustomerStateMachine.Fire(currentTrigger.Key, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogTriggerStateError(4, customer.CustomerStateMachine.State(), currentTrigger, err)\n\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n\n}", "func (t *Task) Accept() (interface{}, error) {\n\tpar := map[string]interface{}{\n\t\t\"taskid\": t.taskId,\n\t\t\"result\": nil,\n\t}\n\treturn t.nc.Exec(\"task.result\", par)\n}", "func (s *Server) Accept() error {\n\tvar tempDelay time.Duration // how long to sleep on accept failure\n\tfor {\n\t\tc, e := s.Listener.Accept()\n\t\tif e != nil {\n\t\t\tif ne, ok := e.(net.Error); ok && ne.Temporary() {\n\t\t\t\tif tempDelay == 0 {\n\t\t\t\t\ttempDelay = 5 * time.Millisecond\n\t\t\t\t} else {\n\t\t\t\t\ttempDelay *= 2\n\t\t\t\t}\n\t\t\t\tif max := 1 * time.Second; tempDelay > max {\n\t\t\t\t\ttempDelay = max\n\t\t\t\t}\n\t\t\t\ttime.Sleep(tempDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn e\n\t\t}\n\t\tgo s.accept(c)\n\t}\n}", "func (_this *StoppableListener) Accept() (conn net.Conn, err error) {\n\tconnc := make(chan *net.TCPConn, 1)\n\terrc := make(chan error, 1)\n\n\tgo func() {\n\t\ttc, err := _this.AcceptTCP()\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t\treturn\n\t\t}\n\t\tconnc <- tc\n\t}()\n\n\tselect {\n\tcase <-_this.stopc:\n\t\treturn nil, errors.New(\"Server stopped.\")\n\tcase err := <-errc:\n\t\treturn nil, err\n\tcase tc := <-connc:\n\t\ttc.SetKeepAlive(true)\n\t\ttc.SetKeepAlivePeriod(3 * time.Minute)\n\t\treturn tc, nil\n\t}\n}", "func (input *BeegoInput) AcceptsJSON() bool {\n\treturn acceptsJSONRegex.MatchString(input.Header(\"Accept\"))\n}", "func (r *ReconcileTFAnalytics) handleTopologyService() (bool, error) {\n\t// Define a new Topology service object\n\ttopologyService := newServicesForTopology(r.instance)\n\t// Set TFAnalytics instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(r.instance, topologyService, r.scheme); err != nil {\n\t\treturn false, err\n\t}\n\t// Check if this Topology Service already exists\n\tfoundTopologyService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: topologyService.Name, Namespace: topologyService.Namespace}, foundTopologyService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tr.reqLogger.Info(\"Creating a new Topology Service\", \"Service.Name\", topologyService.Name)\n\t\terr = r.client.Create(context.TODO(), topologyService)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t// Service has been created successfully - don't requeue\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\t// Service already exists - don't requeue\n\tr.reqLogger.Info(\"Skip reconcile: Topology Service already exists\", \"Service.Name\", foundTopologyService.Name)\n\treturn false, nil\n}", "func WaitForService(org string, waitService string, waitTimeout int, pattern string) {\n\n\tconst UpdateThreshold = 5 // How many service check iterations before updating the user with a msg on the console.\n\tconst ServiceUpThreshold = 5 // How many service check iterations before deciding that the service is up.\n\n\t// get message printer\n\tmsgPrinter := i18n.GetMessagePrinter()\n\n\t// Verify that the input makes sense.\n\tif waitTimeout < 0 {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, msgPrinter.Sprintf(\"--timeout must be a positive integer.\"))\n\t}\n\n\t// 1. Wait for the /service API to return a service with url that matches the input\n\t// 2. While waiting, report when at least 1 agreement is formed\n\n\tmsgPrinter.Printf(\"Waiting for up to %v seconds for service %v/%v to start...\", waitTimeout, org, waitService)\n\tmsgPrinter.Println()\n\n\t// Save the most recent set of services here.\n\tservices := api.AllServices{}\n\n\t// Start monitoring the agent's /service API, looking for the presence of the input waitService.\n\tupdateCounter := UpdateThreshold\n\tserviceUp := 0\n\tserviceFailed := false\n\tnow := uint64(time.Now().Unix())\n\tfor (uint64(time.Now().Unix())-now < uint64(waitTimeout) || serviceUp > 0) && !serviceFailed {\n\t\ttime.Sleep(time.Duration(3) * time.Second)\n\t\tif _, err := cliutils.HorizonGet(\"service\", []int{200}, &services, true); err != nil {\n\t\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, err.Error())\n\t\t}\n\n\t\t// Active services are services that have at least been started. When the execution time becomes non-zero\n\t\t// it means the service container is started. The container could still fail quickly after it is started.\n\t\tinstances := services.Instances[\"active\"]\n\t\tfor _, serviceInstance := range instances {\n\n\t\t\tif !(serviceInstance.SpecRef == waitService && serviceInstance.Org == org) {\n\t\t\t\t// Skip elements for other services\n\t\t\t\tcontinue\n\n\t\t\t} else if serviceInstance.ExecutionStartTime != 0 {\n\t\t\t\t// The target service is started. If stays up then declare victory and return.\n\t\t\t\tif serviceUp >= ServiceUpThreshold {\n\t\t\t\t\tmsgPrinter.Printf(\"Service %v/%v is started.\", org, waitService)\n\t\t\t\t\tmsgPrinter.Println()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// The service could fail quickly if we happened to catch it just as it was starting, so make sure\n\t\t\t\t// the service stays up.\n\t\t\t\tserviceUp += 1\n\n\t\t\t} else if serviceUp > 0 {\n\t\t\t\t// The service has been up for at least 1 iteration, so it's absence means that it failed.\n\t\t\t\tserviceUp = 0\n\t\t\t\tmsgPrinter.Printf(\"The service %v/%v has failed.\", org, waitService)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t\tserviceFailed = true\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\t// Service is not there yet. Update the user on progress, and wait for a bit.\n\t\tupdateCounter = updateCounter - 1\n\t\tif updateCounter <= 0 && !serviceFailed {\n\t\t\tupdateCounter = UpdateThreshold\n\t\t\tmsgPrinter.Printf(\"Waiting for service %v/%v to start executing.\", org, waitService)\n\t\t\tmsgPrinter.Println()\n\t\t}\n\t}\n\n\t// If we got to this point, then there is a problem.\n\tmsgPrinter.Printf(\"Timeout waiting for service %v/%v to successfully start. Analyzing possible reasons for the timeout...\", org, waitService)\n\tmsgPrinter.Println()\n\n\t// Let's see if we can provide the user with some help figuring out what's going on.\n\tfound := false\n\tfor _, serviceInstance := range services.Instances[\"active\"] {\n\n\t\t// 1. Maybe the service is there but just hasnt started yet.\n\t\tif serviceInstance.SpecRef == waitService && serviceInstance.Org == org {\n\t\t\tmsgPrinter.Printf(\"Service %v/%v is deployed to the node, but not executing yet.\", org, waitService)\n\t\t\tmsgPrinter.Println()\n\t\t\tfound = true\n\n\t\t\t// 2. Maybe the service has encountered an error.\n\t\t\tif serviceInstance.ExecutionStartTime == 0 && serviceInstance.ExecutionFailureCode != 0 {\n\t\t\t\tmsgPrinter.Printf(\"Service %v/%v execution failed: %v.\", org, waitService, serviceInstance.ExecutionFailureDesc)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t\tserviceFailed = true\n\t\t\t} else {\n\t\t\t\tmsgPrinter.Printf(\"Service %v/%v might need more time to start executing, continuing analysis.\", org, waitService)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t}\n\t\t\tbreak\n\n\t\t}\n\t}\n\n\t// 3. The service might not even be there at all.\n\tif !found {\n\t\tmsgPrinter.Printf(\"Service %v/%v is not deployed to the node, continuing analysis.\", org, waitService)\n\t\tmsgPrinter.Println()\n\t}\n\n\t// 4. Are there any agreements being made? Check for only non-archived agreements. Skip this if we know the service failed\n\t// because we know there are agreements.\n\tif !serviceFailed {\n\t\tmsgPrinter.Println()\n\t\tags := agreement.GetAgreements(false)\n\t\tif len(ags) != 0 {\n\t\t\tmsgPrinter.Printf(\"Currently, there are %v active agreements on this node. Use `hzn agreement list' to see the agreements that have been formed so far.\", len(ags))\n\t\t\tmsgPrinter.Println()\n\t\t} else {\n\t\t\tmsgPrinter.Printf(\"Currently, there are no active agreements on this node.\")\n\t\t\tmsgPrinter.Println()\n\t\t}\n\t}\n\n\t// 5. Scan the event log for errors related to this service. This should always be done if the service did not come up\n\t// successfully.\n\teLogs := make([]persistence.EventLogRaw, 0)\n\tcliutils.HorizonGet(\"eventlog?severity=error\", []int{200}, &eLogs, true)\n\tmsgPrinter.Println()\n\tif len(eLogs) == 0 {\n\t\tmsgPrinter.Printf(\"Currently, there are no errors recorded in the node's event log.\")\n\t\tmsgPrinter.Println()\n\t\tif pattern == \"\" {\n\t\t\tmsgPrinter.Printf(\"Use the 'hzn deploycheck all -b' or 'hzn deploycheck all -B' command to verify that node, service configuration and deployment policy is compatible.\")\n\t\t} else {\n\t\t\tmsgPrinter.Printf(\"Use the 'hzn deploycheck all -p' command to verify that node, service configuration and pattern is compatible.\")\n\t\t}\n\t\tmsgPrinter.Println()\n\t} else {\n\t\tmsgPrinter.Printf(\"The following errors were found in the node's event log and are related to %v/%v. Use 'hzn eventlog list -s severity=error -l' to see the full detail of the errors.\", org, waitService)\n\t\tmsgPrinter.Println()\n\n\t\t// Scan the log for events related to the service we're waiting for.\n\t\tsel := persistence.Selector{\n\t\t\tOp: \"=\",\n\t\t\tMatchValue: waitService,\n\t\t}\n\t\tmatch := make(map[string][]persistence.Selector)\n\t\tmatch[\"service_url\"] = []persistence.Selector{sel}\n\n\t\tfor _, el := range eLogs {\n\t\t\tt := time.Unix(int64(el.Timestamp), 0)\n\t\t\tprintLog := false\n\t\t\tif strings.Contains(el.Message, waitService) {\n\t\t\t\tprintLog = true\n\t\t\t} else if es, err := persistence.GetRealEventSource(el.SourceType, el.Source); err != nil {\n\t\t\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"unable to convert eventlog source, error: %v\", err)\n\t\t\t} else if (*es).Matches(match) {\n\t\t\t\tprintLog = true\n\t\t\t}\n\n\t\t\t// Put relevant events on the console.\n\t\t\tif printLog {\n\t\t\t\tmsgPrinter.Printf(\"%v: %v\", t.Format(\"2006-01-02 15:04:05\"), el.Message)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t}\n\t\t}\n\t}\n\n\t// Done analyzing\n\tmsgPrinter.Printf(\"Analysis complete.\")\n\tmsgPrinter.Println()\n\n\treturn\n}", "func (input *Input) AcceptsHTML() bool {\n\treturn acceptsHTMLRegex.MatchString(input.Header(\"Accept\"))\n}", "func (ssd StatelessServiceDescription) AsServiceDescription() (*ServiceDescription, bool) {\n\treturn nil, false\n}", "func (e *connectionedEndpoint) Accept(ctx context.Context, peerAddr *Address) (Endpoint, *syserr.Error) {\n\te.Lock()\n\n\tif !e.ListeningLocked() {\n\t\te.Unlock()\n\t\treturn nil, syserr.ErrInvalidEndpointState\n\t}\n\n\tne, err := e.getAcceptedEndpointLocked(ctx)\n\te.Unlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif peerAddr != nil {\n\t\tne.Lock()\n\t\tc := ne.connected\n\t\tne.Unlock()\n\t\tif c != nil {\n\t\t\taddr, err := c.GetLocalAddress()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, syserr.TranslateNetstackError(err)\n\t\t\t}\n\t\t\t*peerAddr = addr\n\t\t}\n\t}\n\treturn ne, nil\n}", "func (o *EventAttributes) HasService() bool {\n\treturn o != nil && o.Service != nil\n}", "func WaitForService(ctx context.Context, conn *dbus.Conn, svc string) error {\n\t// If the name is already owned, we're done.\n\tif ServiceOwned(ctx, conn, svc) {\n\t\treturn nil\n\t}\n\n\tsw, err := NewSignalWatcher(ctx, conn, MatchSpec{\n\t\tType: \"signal\",\n\t\tPath: busPath,\n\t\tInterface: busInterface,\n\t\tSender: busName,\n\t\tMember: \"NameOwnerChanged\",\n\t\tArg0: svc,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sw.Close(ctx)\n\n\t// Make sure the name wasn't taken while we were creating the watcher.\n\tif ServiceOwned(ctx, conn, svc) {\n\t\treturn nil\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase sig := <-sw.Signals:\n\t\t\tif len(sig.Body) < 3 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Skip signals about this service if the \"new owner\" arg is empty.\n\t\t\tif v, ok := sig.Body[2].(string); !ok || v == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Otherwise, we're done.\n\t\t\treturn nil\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}", "func ApplyServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {\n\tnamespace := required.GetNamespace()\n\texisting, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{})\n\tif errors.IsNotFound(err) {\n\t\tnewObj, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{})\n\t\tif createErr != nil {\n\t\t\trecorder.Warningf(\"ServiceMonitorCreateFailed\", \"Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v\", createErr)\n\t\t\treturn nil, true, createErr\n\t\t}\n\t\trecorder.Eventf(\"ServiceMonitorCreated\", \"Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing\")\n\t\treturn newObj, true, nil\n\t}\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\texistingCopy := existing.DeepCopy()\n\n\ttoUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif !modified {\n\t\treturn nil, false, nil\n\t}\n\n\tif klog.V(4).Enabled() {\n\t\tklog.Infof(\"ServiceMonitor %q changes: %v\", namespace+\"/\"+required.GetName(), JSONPatchNoError(existing, toUpdate))\n\t}\n\n\tnewObj, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\trecorder.Warningf(\"ServiceMonitorUpdateFailed\", \"Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v\", err)\n\t\treturn nil, true, err\n\t}\n\n\trecorder.Eventf(\"ServiceMonitorUpdated\", \"Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed\")\n\treturn newObj, true, err\n}", "func (j *AuroraJob) IsService(isService bool) Job {\n\tj.jobConfig.TaskConfig.IsService = isService\n\treturn j\n}", "func (m *OutboundMock) CanAccept(p Inbound) (r bool) {\n\tcounter := atomic.AddUint64(&m.CanAcceptPreCounter, 1)\n\tdefer atomic.AddUint64(&m.CanAcceptCounter, 1)\n\n\tif len(m.CanAcceptMock.expectationSeries) > 0 {\n\t\tif counter > uint64(len(m.CanAcceptMock.expectationSeries)) {\n\t\t\tm.t.Fatalf(\"Unexpected call to OutboundMock.CanAccept. %v\", p)\n\t\t\treturn\n\t\t}\n\n\t\tinput := m.CanAcceptMock.expectationSeries[counter-1].input\n\t\ttestify_assert.Equal(m.t, *input, OutboundMockCanAcceptInput{p}, \"Outbound.CanAccept got unexpected parameters\")\n\n\t\tresult := m.CanAcceptMock.expectationSeries[counter-1].result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the OutboundMock.CanAccept\")\n\t\t\treturn\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.CanAcceptMock.mainExpectation != nil {\n\n\t\tinput := m.CanAcceptMock.mainExpectation.input\n\t\tif input != nil {\n\t\t\ttestify_assert.Equal(m.t, *input, OutboundMockCanAcceptInput{p}, \"Outbound.CanAccept got unexpected parameters\")\n\t\t}\n\n\t\tresult := m.CanAcceptMock.mainExpectation.result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the OutboundMock.CanAccept\")\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.CanAcceptFunc == nil {\n\t\tm.t.Fatalf(\"Unexpected call to OutboundMock.CanAccept. %v\", p)\n\t\treturn\n\t}\n\n\treturn m.CanAcceptFunc(p)\n}" ]
[ "0.63734", "0.6060541", "0.59677523", "0.58842057", "0.5720938", "0.5569348", "0.5523225", "0.54433316", "0.5340439", "0.53300637", "0.5299485", "0.5284296", "0.52749", "0.52703655", "0.52669793", "0.5251331", "0.52413315", "0.52043164", "0.51288015", "0.5126478", "0.511291", "0.5094832", "0.5026353", "0.50225866", "0.5018173", "0.5017211", "0.50089806", "0.4993258", "0.49862543", "0.49840206", "0.49737436", "0.49688202", "0.4953105", "0.49435169", "0.49395347", "0.49356285", "0.4920474", "0.49174678", "0.49092406", "0.4884988", "0.48676953", "0.4848693", "0.4845149", "0.48440522", "0.48013315", "0.47910658", "0.47899097", "0.4770117", "0.4741985", "0.47419038", "0.4728911", "0.4724682", "0.4718462", "0.4707024", "0.47063035", "0.47051919", "0.46987534", "0.46986803", "0.46951863", "0.46838206", "0.467274", "0.467274", "0.46685025", "0.46675754", "0.46612936", "0.46571434", "0.4656498", "0.46508643", "0.46449792", "0.4643559", "0.4641994", "0.463333", "0.46308696", "0.46287844", "0.46286452", "0.4622316", "0.4615369", "0.46113005", "0.46047142", "0.46007034", "0.45865437", "0.45782655", "0.45769903", "0.45709112", "0.45650518", "0.45605838", "0.45587662", "0.4556341", "0.45479268", "0.4545016", "0.45435578", "0.4525287", "0.45244485", "0.45235834", "0.45201638", "0.45161125", "0.4510356", "0.4508397", "0.45000607", "0.44962978" ]
0.75590676
0
NewSettingsDialog(parent widgets.QWidget, flags) is automatically generated
func (sd *SettingsDialog) OpenSettingsDialog() { if !sd.isDisplayInited { sd.__init_display() } sd.populateFields() sd.Open() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *User) SettingsUI(title string, editors []string) {\n\tapp := tview.NewApplication()\n\n\tform := tview.NewForm().\n\t\tAddCheckbox(\"Update on starting katbox\", s.AutoUpdate, nil).\n\t\tAddDropDown(\"Editor\", editors, 0, nil).\n\t\tAddInputField(\"(optional) Custom editor Path\", s.Editor, 30, nil, nil).\n\t\tAddInputField(\"Git clone path\", s.GitPath, 30, nil, nil).\n\t\tAddCheckbox(\"Open URLs in Browser\", s.OpenURL, nil).\n\t\tAddButton(\"Save Settings\", func() { app.Stop() })\n\n\tform.SetBorder(true).SetTitle(title).SetTitleAlign(tview.AlignLeft)\n\tif err := app.SetRoot(form, true).Run(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Retrieve values and update settings\n\n\t_, s.Editor = form.GetFormItemByLabel(\"Editor\").(*tview.DropDown).GetCurrentOption()\n\t// If a custom editor has been selected then set the value from the custom Editor field\n\tif s.Editor == \"Custom\" {\n\t\ts.CustomEditor = form.GetFormItemByLabel(\"Editor Path\").(*tview.InputField).GetText()\n\t}\n\n\t// TODO - do a OS/Editor lookup and set the path accordingly\n\n\ts.OpenURL = form.GetFormItemByLabel(\"Open URLs in Browser\").(*tview.Checkbox).IsChecked()\n}", "func newSettingsForm() *tview.Form {\n\tform := tview.NewForm()\n\tform.SetFieldBackgroundColor(tcell.ColorBlack)\n\tform.SetFieldTextColor(tcell.ColorWhite)\n\tform.SetLabelColor(tcell.ColorWhite)\n\tform.SetButtonTextColor(tcell.ColorBlack)\n\tform.SetButtonBackgroundColor(tcell.ColorWhiteSmoke)\n\treturn form\n}", "func newSettings() *settings {\n\ts := new(settings)\n\ts.windowSettings = &graphics.WindowSettings{}\n\ts.windowSettings.SetResolution(800, 600)\n\treturn s\n}", "func newSettings(settings *C.WebKitSettings) *Settings {\n\treturn &Settings{&glib.Object{glib.ToGObject(unsafe.Pointer(settings))}, settings}\n}", "func windowsShowSettingsUI(_ *cagent.Cagent, _ bool) {\n\n}", "func newOptions(parent *options) *options {\n\topts := &options{\n\t\tinherited: inherited{\n\t\t\tfocusedColor: cell.ColorYellow,\n\t\t},\n\t\thAlign: align.HorizontalCenter,\n\t\tvAlign: align.VerticalMiddle,\n\t\tsplitPercent: DefaultSplitPercent,\n\t}\n\tif parent != nil {\n\t\topts.inherited = parent.inherited\n\t}\n\treturn opts\n}", "func ShowPreferencesDialog(parent gtk.IWindow, onMpdReconnect, onQueueColumnsChanged, onPlayerSettingChanged func()) {\n\t// Create the dialog\n\td := &PrefsDialog{\n\t\tonQueueColumnsChanged: onQueueColumnsChanged,\n\t\tonPlayerSettingChanged: onPlayerSettingChanged,\n\t}\n\n\t// Load the dialog layout and map the widgets\n\tbuilder, err := NewBuilder(prefsGlade)\n\tif err == nil {\n\t\terr = builder.BindWidgets(d)\n\t}\n\n\t// Check for errors\n\tif errCheck(err, \"ShowPreferencesDialog(): failed to initialise dialog\") {\n\t\tutil.ErrorDialog(parent, fmt.Sprint(glib.Local(\"Failed to load UI widgets\"), err))\n\t\treturn\n\t}\n\tdefer d.PreferencesDialog.Destroy()\n\n\t// Set the dialog up\n\td.PreferencesDialog.SetTransientFor(parent)\n\n\t// Remove the 2-pixel \"aura\" around the notebook\n\tif box, err := d.PreferencesDialog.GetContentArea(); err == nil {\n\t\tbox.SetBorderWidth(0)\n\t}\n\n\t// Map the handlers to callback functions\n\tbuilder.ConnectSignals(map[string]interface{}{\n\t\t\"on_PreferencesDialog_map\": d.onMap,\n\t\t\"on_Setting_change\": d.onSettingChange,\n\t\t\"on_MpdReconnect\": onMpdReconnect,\n\t\t\"on_ColumnMoveUpToolButton_clicked\": d.onColumnMoveUp,\n\t\t\"on_ColumnMoveDownToolButton_clicked\": d.onColumnMoveDown,\n\t})\n\n\t// Run the dialog\n\td.PreferencesDialog.Run()\n}", "func (p *PopupModalWidget) Flags(flags WindowFlags) *PopupModalWidget {\n\tp.flags = flags\n\treturn p\n}", "func NewUISettings() *UISettings {\n\treturn &UISettings{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: KindUISettings,\n\t\t\tAPIVersion: GroupVersionCurrent,\n\t\t},\n\t}\n}", "func (p *PopupWidget) Flags(flags WindowFlags) *PopupWidget {\n\tp.flags = flags\n\treturn p\n}", "func NewDialog(text string, fontSize float32) {\n\trl.DrawRectangleRec(\n\t\trl.NewRectangle(0, 0, float32(rl.GetScreenWidth()), float32(rl.GetScreenHeight()/5)),\n\t\trl.Black,\n\t)\n\trl.DrawRectangleLinesEx(\n\t\trl.NewRectangle(0, 0, float32(rl.GetScreenWidth()), float32(rl.GetScreenHeight()/5)),\n\t\t4,\n\t\trl.White,\n\t)\n\trl.DrawTextRecEx(\n\t\trl.GetFontDefault(),\n\t\ttext,\n\t\trl.NewRectangle(20, 20, float32(rl.GetScreenWidth()), float32(rl.GetScreenHeight()/5)),\n\t\tfontSize,\n\t\t1,\n\t\ttrue,\n\t\trl.RayWhite,\n\t\t0,\n\t\tint32(rl.GetScreenWidth()),\n\t\trl.White,\n\t\trl.Black,\n\t)\n}", "func (d *PrefsDialog) onSettingChange() {\n\t// Ignore if the dialog is not initialised yet\n\tif !d.initialised {\n\t\treturn\n\t}\n\tlog.Debug(\"onSettingChange()\")\n\n\t// Collect settings\n\tcfg := config.GetConfig()\n\t// General page\n\tcfg.MpdNetwork = d.MpdNetworkComboBox.GetActiveID()\n\tcfg.MpdSocketPath = util.EntryText(d.MpdPathEntry, \"\")\n\tcfg.MpdHost = util.EntryText(d.MpdHostEntry, \"\")\n\tcfg.MpdPort = int(d.MpdPortAdjustment.GetValue())\n\tif s, err := d.MpdPasswordEntry.GetText(); !errCheck(err, \"MpdPasswordEntry.GetText() failed\") {\n\t\tcfg.MpdPassword = s\n\t}\n\tcfg.MpdAutoConnect = d.MpdAutoConnectCheckButton.GetActive()\n\tcfg.MpdAutoReconnect = d.MpdAutoReconnectCheckButton.GetActive()\n\td.updateGeneralWidgets()\n\n\t// Interface page\n\tif b := d.QueueToolbarCheckButton.GetActive(); b != cfg.QueueToolbar {\n\t\tcfg.QueueToolbar = b\n\t\td.schedulePlayerSettingChange()\n\t}\n\tcfg.TrackDefaultReplace = d.LibraryDefaultReplaceRadioButton.GetActive()\n\tcfg.PlaylistDefaultReplace = d.PlaylistsDefaultReplaceRadioButton.GetActive()\n\tcfg.StreamDefaultReplace = d.StreamsDefaultReplaceRadioButton.GetActive()\n\n\t// Automation page\n\tcfg.SwitchToOnQueueReplace = d.AutomationQueueReplaceSwitchToCheckButton.GetActive()\n\tcfg.PlayOnQueueReplace = d.AutomationQueueReplacePlayCheckButton.GetActive()\n\n\t// Player page\n\tif b := d.PlayerShowAlbumArtTracksCheckButton.GetActive(); b != cfg.PlayerAlbumArtTracks {\n\t\tcfg.PlayerAlbumArtTracks = b\n\t\td.schedulePlayerSettingChange()\n\t}\n\tif b := d.PlayerShowAlbumArtStreamsCheckButton.GetActive(); b != cfg.PlayerAlbumArtStreams {\n\t\tcfg.PlayerAlbumArtStreams = b\n\t\td.schedulePlayerSettingChange()\n\t}\n\tif i := int(d.PlayerAlbumArtSizeAdjustment.GetValue()); i != cfg.PlayerAlbumArtSize {\n\t\tcfg.PlayerAlbumArtSize = i\n\t\td.schedulePlayerSettingChange()\n\t}\n\tif s, err := util.GetTextBufferText(d.PlayerTitleTemplateTextBuffer); !errCheck(err, \"util.GetTextBufferText() failed\") {\n\t\tif s != cfg.PlayerTitleTemplate {\n\t\t\tcfg.PlayerTitleTemplate = s\n\t\t\td.schedulePlayerSettingChange()\n\t\t}\n\t}\n}", "func NewSettings(typeVal config.Type) *Settings {\n\treturn &Settings{TypeVal: typeVal, NameVal: string(typeVal)}\n}", "func sonicForm(tui *TUI) *tview.Form {\n\thost, username, password := strDefaultHostStr, strBlank, strBlank\n\tcredBuf, err := tui.db.GetCredentials(subsonic.CredentialKey)\n\tif err == nil {\n\t\tvar creds subsonic.Credentials\n\t\terr := json.Unmarshal(credBuf, &creds)\n\t\tif err == nil {\n\t\t\thost, username = creds.Host, creds.Username\n\t\t}\n\t}\n\tform := newSettingsForm()\n\tform.AddInputField(strHost, host, fieldWidth, nil, nil).\n\t\tAddInputField(strUsername, username, fieldWidth, nil, nil).\n\t\tAddPasswordField(strPassword, password, fieldWidth, passwordMask, nil).\n\t\tAddButton(strSave, func() {\n\t\t\th := form.GetFormItemByLabel(strHost).(*tview.InputField).GetText()\n\t\t\tu := form.GetFormItemByLabel(strUsername).(*tview.InputField).GetText()\n\t\t\tp := form.GetFormItemByLabel(strPassword).(*tview.InputField).GetText()\n\t\t\tc, err := subsonic.Login(u, p, h)\n\t\t\tif err != nil {\n\t\t\t\ttui.player.Error <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbuf, err := json.Marshal(&c.Credentials)\n\t\t\tif err != nil {\n\t\t\t\ttui.player.Error <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = tui.db.SaveCredentials(subsonic.CredentialKey, buf)\n\t\t\tif err != nil {\n\t\t\t\ttui.player.Error <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttui.player.UpdateProvider(c)\n\t\t\ttui.app.SetFocus(tui.settingsList)\n\t\t}).\n\t\tAddButton(strCancel, func() {\n\t\t\ttui.app.SetFocus(tui.settingsList)\n\t\t})\n\treturn form\n}", "func (w *WindowWidget) Flags(flags WindowFlags) *WindowWidget {\n\tw.flags = flags\n\treturn w\n}", "func newDialogFromNative(obj unsafe.Pointer) interface{} {\n\td := &Dialog{}\n\td.object = C.to_GtkDialog(obj)\n\n\tif gobject.IsObjectFloating(d) {\n\t\tgobject.RefSink(d)\n\t} else {\n\t\tgobject.Ref(d)\n\t}\n\td.Window = newWindowFromNative(obj).(*Window)\n\tdialogFinalizer(d)\n\n\treturn d\n}", "func (m *Application) SetParentalControlSettings(value ParentalControlSettingsable)() {\n m.parentalControlSettings = value\n}", "func (gn *Gen) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Gen\")\n\tgi.SetAppAbout(`Gen concatenated strings of syllables`)\n\n\twin := gi.NewMainWindow(\"one\", \"Gen ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\t// vi.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMaxWidth()\n\tsplit.SetStretchMaxHeight()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(gn)\n\tgn.StructView = sv\n\n\t// tv := gi.AddNewTabView(split, \"tv\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen cat string\", Icon: \"new\", Tooltip: \"Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.CatNoRepeat(gn.syls1)\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}", "func (gn *Gen) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Gen\")\n\tgi.SetAppAbout(`Gen concatenated strings of syllables`)\n\n\twin := gi.NewMainWindow(\"one\", \"Gen ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\t// vi.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMaxWidth()\n\tsplit.SetStretchMaxHeight()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(gn)\n\tgn.StructView = sv\n\n\t// tv := gi.AddNewTabView(split, \"tv\")\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Reset\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.Reset()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Load Params\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.LoadParams()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen Wavs\", Icon: \"new\", Tooltip: \"Generate the .wav files\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.GenWavs()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Split Wavs\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.SplitWavs()\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}", "func NewTeamMemberSettings()(*TeamMemberSettings) {\n m := &TeamMemberSettings{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func NewWindow() *Window {\n\tfile := ui.NewFileWithName(\":/widget.ui\")\n\tloader := ui.NewUiLoader()\n\twidget := loader.Load(file)\n\n\t// Init main window\n\twindow := ui.NewMainWindow()\n\twindow.SetCentralWidget(widget)\n\twindow.SetWindowTitle(\"DFSS Demonstrator v\" + dfss.Version)\n\n\tw := &Window{\n\t\tQMainWindow: window,\n\t\tscene: &Scene{},\n\t}\n\tw.InstallEventFilter(w)\n\n\t// Load dynamic elements from driver\n\tw.logField = ui.NewTextEditFromDriver(widget.FindChild(\"logField\"))\n\tw.graphics = ui.NewGraphicsViewFromDriver(widget.FindChild(\"graphicsView\"))\n\tw.progress = ui.NewLabelFromDriver(widget.FindChild(\"progressLabel\"))\n\n\tw.playButton = ui.NewPushButtonFromDriver(widget.FindChild(\"playButton\"))\n\tw.stopButton = ui.NewPushButtonFromDriver(widget.FindChild(\"stopButton\"))\n\tw.replayButton = ui.NewPushButtonFromDriver(widget.FindChild(\"replayButton\"))\n\n\tw.quantumField = ui.NewSpinBoxFromDriver(widget.FindChild(\"quantumField\"))\n\tw.speedSlider = ui.NewSliderFromDriver(widget.FindChild(\"speedSlider\"))\n\n\t// Load pixmaps\n\tw.pixmaps = map[string]*ui.QPixmap{\n\t\t\"ttp\": ui.NewPixmapWithFilenameFormatFlags(\":/images/server_key.png\", \"\", ui.Qt_AutoColor),\n\t\t\"platform\": ui.NewPixmapWithFilenameFormatFlags(\":/images/server_connect.png\", \"\", ui.Qt_AutoColor),\n\t}\n\n\t// Load icons\n\tw.addIcons()\n\n\t// Add actions\n\tw.addActions()\n\tw.initScene()\n\tw.initTimer()\n\n\tw.StatusBar().ShowMessage(\"Ready\")\n\tw.PrintQuantumInformation()\n\treturn w\n}", "func NewDialog(text string) *Dialog {\n\treturn &Dialog{\n\t\tStyleName: \"Default\",\n\t\tStart: \"0:00:00.00\", End: \"0:00:05.00\",\n\t\tText: text}\n}", "func UI(redraw func()) *tview.Form {\n\tredrawParent = redraw\n\tcommand = settings.Get(settings.SetConfig, settings.KeyOmxplayerCommand).(string)\n\n\tuiForm = tview.NewForm().\n\t\tAddInputField(\"omxmplayer command\", command, 40, nil, handleCommandChange).\n\t\tAddButton(\"Save\", handlePressSave).\n\t\tAddButton(\"Cancel\", handlePressCancel)\n\n\tuiForm.SetFieldBackgroundColor(tcell.ColorGold).SetFieldTextColor(tcell.ColorBlack)\n\tuiForm.SetBorder(true).SetTitle(\"Settings\")\n\n\treturn uiForm\n}", "func newOptions(streams genericclioptions.IOStreams) *options {\n\to := &options{\n\t\tIOStreams: streams,\n\t}\n\to.SetConfigFlags()\n\n\treturn o\n}", "func (d *PrefsDialog) updateGeneralWidgets() {\n\tnetwork := d.MpdNetworkComboBox.GetActiveID()\n\tunix, tcp := network == \"unix\", network == \"tcp\"\n\td.MpdPathEntry.SetVisible(unix)\n\td.MpdPathLabel.SetVisible(unix)\n\td.MpdHostEntry.SetVisible(tcp)\n\td.MpdHostLabel.SetVisible(tcp)\n\td.MpdHostLabelRemark.SetVisible(tcp)\n\td.MpdPortSpinButton.SetVisible(tcp)\n\td.MpdPortLabel.SetVisible(tcp)\n}", "func NewUserSettings()(*UserSettings) {\n m := &UserSettings{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewUi(w *app.Window) *Ui {\n\tu := Ui{\n\t\tw: w,\n\t\tth: material.NewTheme(gofont.Collection()),\n\t\tga: engine.NewGame(),\n\t}\n\tu.th.TextSize = unit.Dp(topMenuPx / 5)\n\tu.ga.ScaleOffset(WidthPx)\n\tu.nameEditor = &widget.Editor{\n\t\tSingleLine: true,\n\t\tSubmit: true,\n\t}\n\tu.menuBtn.pressed = true\n\tu.titleScreen = true\n\treturn &u\n}", "func mainStartGtk(winTitle string, width, height int, center bool) {\n\tobj = new(MainControlsObj)\n\tgtk.Init(nil)\n\tif newBuilder(mainGlade) == nil {\n\t\t// Init tempDir and Remove it on quit if requested.\n\t\tif doTempDir {\n\t\t\ttempDir = tempMake(Name)\n\t\t\tdefer os.RemoveAll(tempDir)\n\t\t}\n\t\t// Parse Gtk objects\n\t\tgladeObjParser()\n\t\t// Objects Signals initialisations\n\t\tsignalsPropHandler()\n\t\t/* Fill control with images */\n\t\tassignImages()\n\t\t// Set Window Properties\n\t\tif center {\n\t\t\tobj.MainWindow.SetPosition(gtk.WIN_POS_CENTER)\n\t\t}\n\t\tobj.MainWindow.SetTitle(winTitle)\n\t\tobj.MainWindow.SetDefaultSize(width, height)\n\t\tobj.MainWindow.Connect(\"delete-event\", windowDestroy)\n\t\t// Start main application ...\n\t\tmainApplication()\n\t\t//\tStart Gui loop\n\t\tobj.MainWindow.ShowAll()\n\t\tgtk.Main()\n\t} else {\n\t\tlog.Fatal(\"Builder initialisation error.\")\n\t}\n}", "func InputDialog(opt ...interface{}) string {\n b, _ := gtk.BuilderNewFromFile(\"glade/input-dialog.glade\")\n d := GetDialog(b, \"input_dialog\")\n entry := GetEntry(b, \"input_entry\")\n\n for i, v := range(opt) {\n if i % 2 == 0 {\n key := v.(string)\n switch key {\n case \"title\":\n d.SetTitle(opt[i+1].(string))\n case \"label\":\n l := GetLabel(b,\"input_label\")\n l.SetText(opt[i+1].(string))\n case \"password-mask\":\n entry.SetInvisibleChar(opt[i+1].(rune))\n entry.SetVisibility(false)\n case \"default\":\n entry.SetText(opt[i+1].(string))\n }\n }\n }\n\n output := \"\"\n entry.Connect(\"activate\", func (o *gtk.Entry) { d.Response(gtk.RESPONSE_OK) } )\n btok := GetButton(b, \"bt_ok\")\n btok.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_OK) } )\n\n btcancel := GetButton(b, \"bt_cancel\")\n btcancel.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_CANCEL) } )\n\n code := d.Run()\n if code == gtk.RESPONSE_OK {\n output, _ = entry.GetText()\n }\n\n d.Destroy()\n return output\n}", "func NewMailboxSettings()(*MailboxSettings) {\n m := &MailboxSettings{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func NewDialog(text string) *writer.Dialog {\n\treturn writer.NewDialog(text)\n}", "func (u *Projects) Settings(c echo.Context) error {\n\tpc, ok := c.(*middlewares.ProjectContext)\n\tif !ok {\n\t\terr := errors.New(\"Can not cast context\")\n\t\tlogging.SharedInstance().ControllerWithStacktrace(err, c).Error(err)\n\t\treturn err\n\t}\n\tp := pc.Project\n\n\tsettingsProjectForm := new(SettingsProjectForm)\n\terr := c.Bind(settingsProjectForm)\n\tif err != nil {\n\t\terr := errors.Wrap(err, \"wrong parameter\")\n\t\tlogging.SharedInstance().ControllerWithStacktrace(err, c).Error(err)\n\t\treturn err\n\t}\n\tlogging.SharedInstance().Controller(c).Debugf(\"post edit project parameter: %+v\", settingsProjectForm)\n\tif err := board.UpdateProject(\n\t\tp,\n\t\tp.Title,\n\t\tp.Description,\n\t\tsettingsProjectForm.ShowIssues,\n\t\tsettingsProjectForm.ShowPullRequests,\n\t); err != nil {\n\t\tlogging.SharedInstance().ControllerWithStacktrace(err, c).Error(err)\n\t\treturn err\n\t}\n\tlogging.SharedInstance().Controller(c).Info(\"success to update project\")\n\n\tjsonProject, err := views.ParseProjectJSON(p)\n\tif err != nil {\n\t\tlogging.SharedInstance().Controller(c).Error(err)\n\t\treturn err\n\t}\n\treturn c.JSON(http.StatusOK, jsonProject)\n}", "func newSetupOptions() *SetupOptions {\n\treturn &SetupOptions{}\n}", "func NewModalDialog(idd uintptr, parent win.HWND, dialogConfig *DialogConfig, cb ModalDialogCallBack) int {\n\tif dialogConfig == nil {\n\t\tdialogConfig = &DialogConfig{}\n\t}\n\tdlg := &Dialog{\n\t\titems: make(map[win.HWND]Widget),\n\t\tiddMap: make(map[uintptr]Widget),\n\t\tconfig: dialogConfig,\n\t\tcb: cb,\n\t}\n\tdlg.idd = idd\n\treturn win.DialogBoxParam(hInstance, win.MAKEINTRESOURCE(idd), parent, syscall.NewCallback(dlg.dialogWndProc), 0)\n}", "func NewSetting()(*Setting) {\n m := &Setting{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (d *DSP) ShowConfigDialog(hwnd *interface{}, show C.FMOD_BOOL) error {\n\t//FMOD_RESULT F_API FMOD_DSP_ShowConfigDialog (FMOD_DSP *dsp, void *hwnd, FMOD_BOOL show);\n\treturn ErrNoImpl\n}", "func setupWindow(title string) *gtk.Window {\n\twin, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create window:\", err)\n\t}\n\n\twin.SetTitle(title)\n\twin.Connect(\"destroy\", func() {\n\t\tgtk.MainQuit()\n\t})\n\twin.SetPosition(gtk.WIN_POS_CENTER)\n\twidth, height := 600, 300\n\twin.SetDefaultSize(width, height)\n\n\tbox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)\n\tbtn, _ := gtk.ButtonNew()\n\tbtn.Connect(\"clicked\", ButtonClicked)\n\tbtn.SetLabel(\"Stop timeout\")\n\n\tbox.Add(btn)\n\twin.Add(box)\n\n\treturn win\n}", "func NewDeviceManagementSettings()(*DeviceManagementSettings) {\n m := &DeviceManagementSettings{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func newOptions() *options {\n\treturn &options{}\n}", "func CreateMainWindow() {\n\n\tvBox := tui.NewVBox()\n\tvBox.SetSizePolicy(tui.Minimum, tui.Minimum)\n\tSidebar := tui.NewVBox()\n\tSidebar.SetSizePolicy(tui.Minimum, tui.Minimum)\n\n\tfor _, cmd := range strings.Split(libs.Cmds, \",\") {\n\t\tSidebar.Append(tui.NewLabel(wordwrap.WrapString(cmd, 50)))\n\t}\n\n\tSidebar.SetBorder(true)\n\tSidebar.Prepend(tui.NewLabel(\"***COMMANDS***\"))\n\n\tInput.SetFocused(true)\n\tInput.SetSizePolicy(tui.Expanding, tui.Maximum)\n\n\tinputBox := tui.NewHBox(Input)\n\tinputBox.SetBorder(true)\n\tinputBox.SetSizePolicy(tui.Expanding, tui.Maximum)\n\n\thistoryScroll := tui.NewScrollArea(History)\n\thistoryScroll.SetAutoscrollToBottom(true)\n\thistoryBox := tui.NewVBox(historyScroll)\n\thistoryBox.SetBorder(true)\n\n\tchat := tui.NewVBox(historyBox, inputBox)\n\tchat.SetSizePolicy(tui.Expanding, tui.Expanding)\n\n\t// create root window and add all windows\n\troot := tui.NewHBox(Sidebar, chat)\n\tui, err := tui.New(root)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tui.SetKeybinding(\"Esc\", func() { ui.Quit() })\n\n\tInput.OnSubmit(func(e *tui.Entry) {\n\t\t// this is just to see what command given\n\t\tuserCommand := e.Text()\n\t\tif userCommand == \"\" {\n\t\t\tHistory.Append(tui.NewLabel(\"that is not acceptable command\"))\n\t\t\tHistory.Append(tui.NewLabel(libs.PrintHelp()))\n\t\t} else {\n\t\t\tHistory.Append(tui.NewHBox(\n\t\t\t\ttui.NewLabel(\"Your Command: \" + userCommand),\n\t\t\t))\n\t\t\tHistory.Append(tui.NewHBox(tui.NewLabel(\"\")))\n\n\t\t\tif strings.HasPrefix(userCommand, \"\\\\\") {\n\t\t\t\t// then this is command ..\n\t\t\t\tswitch userCommand {\n\t\t\t\tcase \"\\\\help\":\n\t\t\t\t\tHistory.Append(tui.NewLabel(libs.PrintHelp()))\n\t\t\t\tcase \"\\\\monitor\":\n\t\t\t\t\tHistory.Append(tui.NewLabel(\"Switching to MONITOR mode for device \" + DeviceName))\n\t\t\t\t\tChangeToMonitorMode()\n\t\t\t\tcase \"\\\\managed\":\n\t\t\t\t\tHistory.Append(tui.NewLabel(\"Switching to MANAGED mode for device \" + DeviceName))\n\t\t\t\t\tChangeToManagedMode()\n\t\t\t\tcase \"\\\\exit\":\n\t\t\t\t\tHistory.Append(tui.NewHBox(tui.NewLabel(\"quitting...\")))\n\t\t\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t\t\t\t// os.Exit(0)\n\n\t\t\t\t}\n\t\t\t} else if strings.Contains(userCommand, \":\") {\n\t\t\t\t// then this is declaration\n\t\t\t\tcmdSplit := strings.Split(userCommand, \":\")\n\t\t\t\tif cmdSplit[1] == \"\" {\n\t\t\t\t\tHistory.Append(tui.NewLabel(\"that is not acceptable command\"))\n\t\t\t\t\tHistory.Append(tui.NewLabel(libs.PrintHelp()))\n\t\t\t\t} else {\n\t\t\t\t\tswitch cmdSplit[0] {\n\t\t\t\t\tcase \"device\":\n\t\t\t\t\t\tSetDeviceName(cmdSplit[1])\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tHistory.Append(tui.NewLabel(\"there is no such declaration or command\"))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tHistory.Append(tui.NewHBox(tui.NewLabel(userCommand + \" is not command or a declaration\")))\n\t\t\t}\n\t\t}\n\t\tInput.SetText(\"\")\n\t})\n\n\tif err := ui.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (dialog *Dialog) NewSubdialog() *Dialog {\n\treturn &Dialog{\n\t\tdepth: dialog.depth,\n\t\tisSub: true,\n\t}\n}", "func newUpdateConnectionSettingsFrame(settings connectionSettings) frame {\n\t// Prepare frame.\n\tpayload := encoding.Marshal(settings)\n\tf := frame{\n\t\tframeHeader: frameHeader{\n\t\t\tid: frameIDUpdateSettings,\n\t\t\tlength: uint32(len(payload)),\n\t\t\tflags: 0,\n\t\t},\n\t\tpayload: payload,\n\t}\n\treturn f\n}", "func (*Settings) Descriptor() ([]byte, []int) {\n\treturn file_google_actions_sdk_v2_settings_proto_rawDescGZIP(), []int{0}\n}", "func NewSettings() *Settings {\n\treturn &Settings{\n\t\tclient: http.DefaultClient,\n\t}\n}", "func NewAdminReportSettings()(*AdminReportSettings) {\n m := &AdminReportSettings{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewUI() flags.Commander {\n\treturn &ui{}\n}", "func DialogBox(\n\thInstance HINSTANCE,\n\tTemplateName string,\n\thWndParent HWND,\n\tlpDialogFunc DLGPROC,\n) INT_PTR {\n\tvar ret, _, _ = userDialogBoxParamW.Call(\n\t\tuintptr(hInstance),\n\t\tUintptrFromString(&TemplateName),\n\t\tuintptr(hWndParent),\n\t\tuintptr(lpDialogFunc),\n\t\t0,\n\t)\n\treturn INT_PTR(ret)\n}", "func NewSettingsController() *SettingsController {\n\tpath, err := homedir.Dir()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsettingPath := filepath.Join(path, \".notorious\", \"settings.json\")\n\tvar settings *models.Settings\n\tif _, err := os.Stat(settingPath); os.IsNotExist(err) {\n\t\tsettings = &models.Settings{\n\t\t\tLastOpenFile: \"\",\n\t\t\tLastOpenWorkspace: \"default\",\n\t\t\tJiraSettings: &models.Jira{\n\t\t\t\tBaseURL: \"\",\n\t\t\t\tUserName: \"\",\n\t\t\t\tAccessToken: \"\",\n\t\t\t},\n\t\t}\n\t\twriteFile(settings)\n\t} else {\n\t\tdata, _ := ioutil.ReadFile(settingPath)\n\t\tjson.Unmarshal(data, &settings)\n\t}\n\treturn &SettingsController{\n\t\tSettings: settings,\n\t}\n}", "func (client *WebAppsClient) getAuthSettingsSlotCreateRequest(ctx context.Context, resourceGroupName string, name string, slot string, options *WebAppsGetAuthSettingsSlotOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/authsettings/list\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif slot == \"\" {\n\t\treturn nil, errors.New(\"parameter slot cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{slot}\", url.PathEscape(slot))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (d Client) CreateDialog(name string, filename string, data io.Reader) (string, error) {\n\treturn d.createOrUpdateDialog(\"\", name, filename, data)\n}", "func (s settings) WindowSettings() interfaces.WindowSettings {\n\treturn s.windowSettings\n}", "func configureFlags(api *operations.LolchestWinAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func InstallPluginSettingsHandler(ctx *macaron.Context, f *session.Flash) {\n\tid := ctx.Params(\"id\")\n\trepoDesc := fmt.Sprintf(\"%s/%s-%s/%s.toml\",\n\t\tsettings.Config.Get(\"Marketplace.RepositoryURL\"),\n\t\truntime.GOOS,\n\t\truntime.GOARCH,\n\t\tid)\n\tdesc, err := getPluginDesc(repoDesc)\n\tif err != nil {\n\t\tf.Error(\"Cannot find plugin.\")\n\t\tctx.Redirect(\"/settings/plugins\")\n\t\treturn\n\t}\n\tctx.Data[\"Plugin\"] = desc\n\n\tctx.HTML(http.StatusOK, \"settings/plugin_install_confirm\")\n}", "func (sd *SimpleDialog) Custom(owner walk.Form, widget Widget) (accepted bool, err error) {\n\tvar (\n\t\tdlg *walk.Dialog\n\t)\n\n\tif _, err := (Dialog{\n\t\tAssignTo: &dlg,\n\t\tLayout: VBox{Margins: Margins{}},\n\t\tChildren: []Widget{\n\t\t\twidget,\n\t\t\tComposite{\n\t\t\t\tLayout: HBox{Margins: Margins{}},\n\t\t\t\tChildren: []Widget{\n\t\t\t\t\tPushButton{\n\t\t\t\t\t\tText: i18n.Tr(\"widget.button.ok\"),\n\t\t\t\t\t\tOnClicked: func() {\n\t\t\t\t\t\t\t// some stuff here...\n\t\t\t\t\t\t\tdlg.Close(0)\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tPushButton{\n\t\t\t\t\t\tText: i18n.Tr(\"widget.button.cancel\"),\n\t\t\t\t\t\tOnClicked: func() {\n\t\t\t\t\t\t\tdlg.Close(0)\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tTitle: sd.Title,\n\t\tSize: sd.Size,\n\t\tFixedSize: sd.FixedSize,\n\t}).Run(owner); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn\n}", "func newOptions() (*Options, error) {\n\to := &Options{\n\t\tconfig: new(componentconfig.CoordinatorConfiguration),\n\t}\n\treturn o, nil\n}", "func NewForm1(owner vcl.IComponent) (root *TForm1) {\n vcl.CreateResForm(owner, &root)\n return\n}", "func (s *Settings) DisableToggleableSettings() {\n\tC.webkit_settings_set_auto_load_images(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_frame_flattening(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_html5_database(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_html5_local_storage(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_hyperlink_auditing(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_java(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_javascript(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_offline_web_application_cache(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_plugins(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_xss_auditor(s.settings, gboolean(false))\n\tC.webkit_settings_set_javascript_can_open_windows_automatically(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_private_browsing(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_developer_extras(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_resizable_text_areas(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_tabs_to_links(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_dns_prefetching(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_caret_browsing(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_fullscreen(s.settings, gboolean(false))\n\tC.webkit_settings_set_print_backgrounds(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_webaudio(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_webgl(s.settings, gboolean(false))\n\tC.webkit_settings_set_allow_modal_dialogs(s.settings, gboolean(false))\n\tC.webkit_settings_set_javascript_can_access_clipboard(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_page_cache(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_smooth_scrolling(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_accelerated_2d_canvas(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_media_stream(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_spatial_navigation(s.settings, gboolean(false))\n\tC.webkit_settings_set_enable_mediasource(s.settings, gboolean(false))\n}", "func (opt *MainOpt) UpdateOptions() {\n\n\topt.MainWinWidth, opt.MainWinHeight = mainObjects.MainWindow.GetSize()\n\topt.MainWinPosX, opt.MainWinPosY = mainObjects.MainWindow.GetPosition()\n\n\topt.Reminder = mainObjects.CheckbuttonAddReminder.GetActive()\n\topt.Md4 = mainObjects.CheckbuttonMd4.GetActive()\n\topt.Md5 = mainObjects.CheckbuttonMd5.GetActive()\n\topt.Sha1 = mainObjects.CheckbuttonSha1.GetActive()\n\topt.Sha256 = mainObjects.CheckbuttonSha256.GetActive()\n\topt.Sha384 = mainObjects.CheckbuttonSha384.GetActive()\n\topt.Sha512 = mainObjects.CheckbuttonSha512.GetActive()\n\topt.Sha3_256 = mainObjects.CheckbuttonSha3_256.GetActive()\n\topt.Sha3_384 = mainObjects.CheckbuttonSha3_384.GetActive()\n\topt.Sha3_512 = mainObjects.CheckbuttonSha3_512.GetActive()\n\topt.Blake2b256 = mainObjects.CheckbuttonBlake2b256.GetActive()\n\topt.Blake2b384 = mainObjects.CheckbuttonBlake2b384.GetActive()\n\topt.Blake2b512 = mainObjects.CheckbuttonBlake2b512.GetActive()\n\topt.ShowFilename = mainObjects.CheckbuttonShowFilename.GetActive()\n\topt.AppendDroppedFiles = mainObjects.CheckbuttonAppendFiles.GetActive()\n\topt.UseDecimal = mainObjects.CheckbuttonUseDecimal.GetActive()\n\topt.ConcurrentOp = mainObjects.CheckbuttonConcurrentOp.GetActive()\n\topt.RecursiveScan = mainObjects.CheckbuttonRecursiveScan.GetActive()\n\topt.MakeOutputFile = mainObjects.CheckbuttonCreateFile.GetActive()\n\n\topt.CurrentStackPage = mainObjects.Stack.GetVisibleChildName()\n\topt.SwitchStackPage = mainObjects.SwitchTreeView.GetActive()\n\topt.SwitchExpandState = mainObjects.SwitchExpand.GetActive()\n\n\topt.ShowSplash = mainObjects.CheckbuttonShowSplash.GetActive()\n}", "func (v Vehicle) GuiSettings() (*GuiSettings, error) {\n\tstateRequest, err := fetchState(\"/gui_settings\", v.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn stateRequest.Response.GuiSettings, nil\n}", "func (syn *Synth) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Synth\")\n\tgi.SetAppAbout(`This demonstrates synthesizing a sound (phone or word)`)\n\n\twin := gi.NewMainWindow(\"one\", \"Auditory ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\tsyn.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMax()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(syn)\n\n\ttview := gi.AddNewTabView(split, \"tv\")\n\n\tplt := tview.AddNewTab(eplot.KiT_Plot2D, \"wave\").(*eplot.Plot2D)\n\tsyn.WavePlot = syn.ConfigWavePlot(plt, syn.SignalData)\n\n\t// tbar.AddAction(gi.ActOpts{Label: \"Update Wave\", Icon: \"new\"}, win.This(),\n\t// \tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t// \t\tsyn.GetWaveData()\n\t// \t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Synthesize\", Icon: \"new\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tsyn.Synthesize()\n\t\t})\n\n\tsplit.SetSplitsList([]float32{.3, .7})\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}", "func (as *AdminServer) Settings(w http.ResponseWriter, r *http.Request) {\n\tswitch {\n\tcase r.Method == \"GET\":\n\t\tparams := newTemplateParams(r)\n\t\tparams.Title = \"Settings\"\n\t\tsession := ctx.Get(r, \"session\").(*sessions.Session)\n\t\tsession.Save(r, w)\n\t\tgetTemplate(w, \"settings\").ExecuteTemplate(w, \"base\", params)\n\tcase r.Method == \"POST\":\n\t\tu := ctx.Get(r, \"user\").(models.User)\n\t\tcurrentPw := r.FormValue(\"current_password\")\n\t\tnewPassword := r.FormValue(\"new_password\")\n\t\tconfirmPassword := r.FormValue(\"confirm_new_password\")\n\t\t// Check the current password\n\t\terr := auth.ValidatePassword(currentPw, u.Hash)\n\t\tmsg := models.Response{Success: true, Message: \"Settings Updated Successfully\"}\n\t\tif err != nil {\n\t\t\tmsg.Message = err.Error()\n\t\t\tmsg.Success = false\n\t\t\tapi.JSONResponse(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tnewHash, err := auth.ValidatePasswordChange(u.Hash, newPassword, confirmPassword)\n\t\tif err != nil {\n\t\t\tmsg.Message = err.Error()\n\t\t\tmsg.Success = false\n\t\t\tapi.JSONResponse(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tu.Hash = string(newHash)\n\t\tif err = models.PutUser(&u); err != nil {\n\t\t\tmsg.Message = err.Error()\n\t\t\tmsg.Success = false\n\t\t\tapi.JSONResponse(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tapi.JSONResponse(w, msg, http.StatusOK)\n\t}\n}", "func (c *FakeUISettings) Create(ctx context.Context, uISettings *v3.UISettings, opts v1.CreateOptions) (result *v3.UISettings, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewRootCreateAction(uisettingsResource, uISettings), &v3.UISettings{})\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v3.UISettings), err\n}", "func (m *GraphBaseServiceClient) GroupSettings()(*i4794c103c0d044c27a3ca3af0a0e498e93a9863420c1a4e7a29ef37590053c7b.GroupSettingsRequestBuilder) {\n return i4794c103c0d044c27a3ca3af0a0e498e93a9863420c1a4e7a29ef37590053c7b.NewGroupSettingsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) GroupSettings()(*i4794c103c0d044c27a3ca3af0a0e498e93a9863420c1a4e7a29ef37590053c7b.GroupSettingsRequestBuilder) {\n return i4794c103c0d044c27a3ca3af0a0e498e93a9863420c1a4e7a29ef37590053c7b.NewGroupSettingsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (f *Factory) SetFlags() {\n}", "func (t *qmlfrontend) onNew(v *backend.View) {\n\tfv := &frontendView{bv: v}\n\tv.AddObserver(fv)\n\tv.Settings().AddOnChange(\"blah\", fv.onChange)\n\n\tfv.Title.Text = v.FileName()\n\tif len(fv.Title.Text) == 0 {\n\t\tfv.Title.Text = \"untitled\"\n\t}\n\n\tw2 := t.windows[v.Window()]\n\tw2.views = append(w2.views, fv)\n\n\tif w2.window == nil {\n\t\treturn\n\t}\n\n\tw2.window.Call(\"addTab\", \"\", fv)\n}", "func (gn *Gen) ConfigGui() *gi.Window {\n\twidth := 1600\n\theight := 1200\n\n\tgi.SetAppName(\"Gen\")\n\tgi.SetAppAbout(`Gen concatenated strings of syllables`)\n\n\twin := gi.NewMainWindow(\"one\", \"Gen ...\", width, height)\n\n\tvp := win.WinViewport2D()\n\tupdt := vp.UpdateStart()\n\n\tmfr := win.SetMainFrame()\n\n\ttbar := gi.AddNewToolBar(mfr, \"tbar\")\n\ttbar.SetStretchMaxWidth()\n\t// vi.ToolBar = tbar\n\n\tsplit := gi.AddNewSplitView(mfr, \"split\")\n\tsplit.Dim = gi.X\n\tsplit.SetStretchMaxWidth()\n\tsplit.SetStretchMaxHeight()\n\n\tsv := giv.AddNewStructView(split, \"sv\")\n\tsv.SetStruct(gn)\n\tgn.StructView = sv\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen TriSyllable Strings\", Icon: \"new\", Tooltip: \"Generate all combinations of tri syllabic strings from the sets of syllables for each position\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.GenTriSyllables()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Write TriSyls Strings\", Icon: \"new\", Tooltip: \"\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.WriteTriSyllables()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Shuffle CVs\", Icon: \"new\", Tooltip: \"Shuffle the syllables and add this shuffle to the list of shuffles\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.ShuffleCVs()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Write Shuffled CVs\", Icon: \"new\", Tooltip: \"WriteShuffles writes an individual file for each of the shuffled CV lists generated\\n// and also writes a file called \\\"ls\\\" that is a list of the files written!\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.WriteShuffles()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen Speech\", Icon: \"new\", Tooltip: \"Calls GnuSpeech on content of files\\n// and also writes a file called \\\"ls\\\" that is a list of the files written!\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.GenSpeech(gn.ShufflesIn, gn.ShufflesOut)\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Rename Individual CVs\", Icon: \"new\", Tooltip: \"Must run this after splitting shuffle files into individual CVs before concatenating!\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.Rename()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen Whole Word Wavs\", Icon: \"new\", Tooltip: \"Generates wav files of 3 CVs where the second and third are fully predictable based on first CV\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.GenWholeWordWavs()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Gen Part Word Wavs\", Icon: \"new\", Tooltip: \"Generates wav files of 3 CVs, the second CV is of a set (so partially predictable), the third CV is predictable based on second\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.GenPartWordWavs()\n\t\t})\n\n\ttbar.AddAction(gi.ActOpts{Label: \"Wav sequence from tri wavs\", Icon: \"new\", Tooltip: \"Write wav file that is the concatenation of wav files of tri CVs\"}, win.This(),\n\t\tfunc(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tgn.SequenceFromTriCVs()\n\t\t})\n\n\tvp.UpdateEndNoSig(updt)\n\n\t// main menu\n\tappnm := gi.AppName()\n\tmmen := win.MainMenu\n\tmmen.ConfigMenus([]string{appnm, \"File\", \"Edit\", \"Window\"})\n\n\tamen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)\n\tamen.Menu.AddAppMenu(win)\n\n\temen := win.MainMenu.ChildByName(\"Edit\", 1).(*gi.Action)\n\temen.Menu.AddCopyCutPaste(win)\n\n\tvp.UpdateEndNoSig(updt)\n\n\twin.MainMenuUpdated()\n\treturn win\n}", "func (c Help) Settings() *multiplexer.CommandSettings {\n\treturn &multiplexer.CommandSettings{\n\t\tCommand: c.Command,\n\t\tHelpText: c.HelpText,\n\t}\n}", "func QReplaceDialog(avp *gi.Viewport2D, find string, opts gi.DlgOpts, recv ki.Ki, fun ki.RecvFunc) *gi.Dialog {\n\tdlg := gi.NewStdDialog(opts, gi.AddOk, gi.AddCancel)\n\tdlg.Modal = true\n\n\tframe := dlg.Frame()\n\t_, prIdx := dlg.PromptWidget(frame)\n\ttff := frame.InsertNewChild(gi.KiT_ComboBox, prIdx+1, \"find\").(*gi.ComboBox)\n\ttff.Editable = true\n\ttff.SetStretchMaxWidth()\n\ttff.SetMinPrefWidth(units.NewCh(60))\n\ttff.ConfigParts()\n\ttff.ItemsFromStringList(PrevQReplaceFinds, true, 0)\n\tif find != \"\" {\n\t\ttff.SetCurVal(find)\n\t}\n\n\ttfr := frame.InsertNewChild(gi.KiT_ComboBox, prIdx+2, \"repl\").(*gi.ComboBox)\n\ttfr.Editable = true\n\ttfr.SetStretchMaxWidth()\n\ttfr.SetMinPrefWidth(units.NewCh(60))\n\ttfr.ConfigParts()\n\ttfr.ItemsFromStringList(PrevQReplaceRepls, true, 0)\n\n\tif recv != nil && fun != nil {\n\t\tdlg.DialogSig.Connect(recv, fun)\n\t}\n\tdlg.UpdateEndNoSig(true)\n\tdlg.Open(0, 0, avp, nil)\n\treturn dlg\n}", "func NewUISettingsList() *UISettingsList {\n\treturn &UISettingsList{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: KindUISettingsList,\n\t\t\tAPIVersion: GroupVersionCurrent,\n\t\t},\n\t}\n}", "func (me TxsdShow) IsNew() bool { return me == \"new\" }", "func newConfigParser(conf *dlcConfig, options flags.Options) *flags.Parser {\n\tparser := flags.NewParser(conf, options)\n\treturn parser\n}", "func createSetting(name string, defValue interface{}, validationFn []validationFnType) *setting {\n\ts := setting{Name: name, DefaultValue: defValue, ValidationFns: validationFn}\n\tSettingsList[name] = &s\n\treturn &s\n}", "func configureFlags(api *operations.OpenPitrixAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func newConfigParser(conf *config, options flags.Options) *flags.Parser {\n\tparser := flags.NewParser(conf, options)\n\treturn parser\n}", "func configureUI() {\n\tterminal.Prompt = \"› \"\n\tterminal.TitleColorTag = \"{s}\"\n\n\tif options.GetB(OPT_NO_COLOR) {\n\t\tfmtc.DisableColors = true\n\t}\n\n\tswitch {\n\tcase fmtc.IsTrueColorSupported():\n\t\tcolorTagApp, colorTagVer = \"{#CC1E2C}\", \"{#CC1E2C}\"\n\tcase fmtc.Is256ColorsSupported():\n\t\tcolorTagApp, colorTagVer = \"{#160}\", \"{#160}\"\n\tdefault:\n\t\tcolorTagApp, colorTagVer = \"{r}\", \"{r}\"\n\t}\n}", "func (p *Plugin) InitEmptySettings() {\n\tp.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts))\n\tcopy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts)\n\tp.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices))\n\tcopy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices)\n\tp.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env))\n\tfor _, env := range p.PluginObj.Config.Env {\n\t\tif env.Value != nil {\n\t\t\tp.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf(\"%s=%s\", env.Name, *env.Value))\n\t\t}\n\t}\n\tp.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value))\n\tcopy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value)\n}", "func (d MessagesDialogsNotModified) construct() MessagesDialogsClass { return &d }", "func (m *_MonitoringParameters) InitializeParent(parent ExtensionObjectDefinition) {}", "func (w *Window) createLayout(parent widgets.QWidget_ITF) {\n\t// Create the window layout, which will act as the layout for the underlying QMainWindow's\n\t// central widget's layout.\n\tw.windowLayout = widgets.NewQHBoxLayout2(parent)\n\tw.windowLayout.SetContentsMargins(7, 7, 7, 7)\n\n\tw.leftLayout = widgets.NewQHBoxLayout2(w.windowLayout.Widget())\n\tw.leftLayout.SetAlign(core.Qt__AlignLeft)\n\n\tw.rightLayout = widgets.NewQHBoxLayout2(w.windowLayout.Widget())\n\tw.rightLayout.SetAlign(core.Qt__AlignRight)\n\n\t// Add the left and right layout widgets, providing them equal, but positive stretch so they\n\t// meet in the middle of the window by default.\n\tw.windowLayout.AddLayout(w.leftLayout, 1)\n\tw.windowLayout.AddLayout(w.rightLayout, 1)\n}", "func SettingsHandler(w http.ResponseWriter, r *http.Request) {\n\tvar data Settings\n\n\tctx := appengine.NewContext(r)\n\tkey := datastore.NewKey(ctx, \"Settings\", \"main\", 0, nil)\n\n\tif r.Method == \"POST\" {\n\t\tfitbitClientID := r.FormValue(\"fitbit_client_id\")\n\t\tfitbitClientSecret := r.FormValue(\"fitbit_client_secret\")\n\n\t\tdata = Settings{\n\t\t\tFitbitClientID: fitbitClientID,\n\t\t\tFitbitClientSecret: fitbitClientSecret,\n\t\t}\n\n\t\t_, err := datastore.Put(ctx, key, &data)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(ctx, \"%v\", err)\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\terr := GetSettings(ctx, &data)\n\n\t\tif err != nil {\n\n\t\t\tif err != datastore.ErrNoSuchEntity {\n\t\t\t\tlog.Errorf(ctx, \"%v\", err)\n\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tsettingsFormTemplate.Execute(w, data)\n}", "func WindowNew(t WindowType) *Window {\n\tid := Candy().Guify(\"gtk_window_new\", t).String()\n\treturn NewWindow(Candy(), id)\n}", "func (m *IntentsDeviceManagementIntentItemRequestBuilder) Settings()(*IntentsItemSettingsRequestBuilder) {\n return NewIntentsItemSettingsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func configureFlags(api *operations.EsiAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (this *Window) GetSettings() (settings ContextSettings) {\n\tsettings.fromC(C.sfWindow_getSettings(this.cptr))\n\treturn\n}", "func newConfigParser(conf *litConfig, options flags.Options) *flags.Parser {\n\tparser := flags.NewParser(conf, options)\n\treturn parser\n}", "func (ap *App) ConfigGui() *gi.Window {\n\tgi.SetAppName(\"Gabor View\")\n\tgi.SetAppAbout(\"Application/Utility to allow viewing of gabor convolution with sound\")\n\n\tap.GUI.Win = gi.NewMainWindow(\"gb\", \"Gabor View\", 1600, 1200)\n\tap.GUI.ViewPort = ap.GUI.Win.Viewport\n\tap.GUI.ViewPort.UpdateStart()\n\n\tmfr := ap.GUI.Win.SetMainFrame()\n\n\tap.GUI.ToolBar = gi.AddNewToolBar(mfr, \"tbar\")\n\tap.GUI.ToolBar.SetStretchMaxWidth()\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Init\", Icon: \"update\",\n\t\tTooltip: \"Initialize everything including network weights, and start over. Also applies current params.\",\n\t\tActive: egui.ActiveAlways,\n\t\tFunc: func() {\n\t\t\tap.Init()\n\t\t\tap.GUI.UpdateWindow()\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Open Sound Files\",\n\t\tIcon: \"file-open\",\n\t\tTooltip: \"Opens a file dialog for selecting a single sound file or a directory of sound files (only .wav files work at this time)\",\n\t\tActive: egui.ActiveAlways,\n\t\tFunc: func() {\n\t\t\texts := \".wav\"\n\t\t\tgiv.FileViewDialog(ap.GUI.ViewPort, ap.OpenPath, exts, giv.DlgOpts{Title: \"Open .wav Sound File\", Prompt: \"Open a .wav file, or directory of .wav files, for sound processing.\"}, nil,\n\t\t\t\tap.GUI.Win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\t\t\tdlg, _ := send.Embed(gi.KiT_Dialog).(*gi.Dialog)\n\t\t\t\t\t\tfn := giv.FileViewDialogValue(dlg)\n\t\t\t\t\t\tinfo, err := os.Stat(fn)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(\"error stating %s\", fn)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif info.IsDir() {\n\t\t\t\t\t\t\t// Could do fully recursive by passing path var to LoadTranscription but I\n\t\t\t\t\t\t\t// tried it and it didn't return from TIMIT/TRAIN/DR1 even after 10 minutes\n\t\t\t\t\t\t\t// This way it does one level directory only and is fast\n\t\t\t\t\t\t\tfilepath.Walk(fn, func(path string, info os.FileInfo, err error) error {\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tlog.Fatalf(err.Error())\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif info.IsDir() == false {\n\t\t\t\t\t\t\t\t\t//fmt.Printf(\"File Name: %s\\n\", info.Name())\n\t\t\t\t\t\t\t\t\tfp := filepath.Join(fn, info.Name())\n\t\t\t\t\t\t\t\t\tap.LoadTranscription(fp)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tap.LoadTranscription(fn)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tap.ConfigTableView(ap.SndsTable.View)\n\t\t\t\t\t\tap.GUI.IsRunning = true\n\t\t\t\t\t\tap.GUI.ToolBar.UpdateActions()\n\t\t\t\t\t\tap.GUI.Win.UpdateSig()\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Unload Sounds\",\n\t\tIcon: \"file-close\",\n\t\tTooltip: \"Clears the table of sounds and closes the open sound files\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\tap.SndsTable.Table.SetNumRows(0)\n\t\t\tap.SndsTable.View.UpdateTable()\n\t\t\tap.GUI.IsRunning = false\n\t\t\tap.GUI.ToolBar.UpdateActions()\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Process 1\", Icon: \"play\",\n\t\tTooltip: \"Process the segment of audio from SegmentStart to SegmentEnd applying the gabor filters to the Mel tensor\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\terr := ap.ProcessSetup(&ap.WParams1, &ap.CurSnd1)\n\t\t\tif err == nil {\n\t\t\t\terr = ap.Process(&ap.WParams1, &ap.PParams1, &ap.GParams1)\n\t\t\t\tif err == nil {\n\t\t\t\t\tap.ApplyGabor(&ap.PParams1, &ap.GParams1)\n\t\t\t\t\tap.GUI.UpdateWindow()\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Process 2\", Icon: \"play\",\n\t\tTooltip: \"Process the segment of audio from SegmentStart to SegmentEnd applying the gabor filters to the Mel tensor\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\terr := ap.ProcessSetup(&ap.WParams2, &ap.CurSnd2)\n\t\t\tif err == nil {\n\t\t\t\terr = ap.Process(&ap.WParams2, &ap.PParams2, &ap.GParams2)\n\t\t\t\tif err == nil {\n\t\t\t\t\tap.ApplyGabor(&ap.PParams2, &ap.GParams2)\n\t\t\t\t\tap.GUI.UpdateWindow()\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Next 1\", Icon: \"fast-fwd\",\n\t\tTooltip: \"Process the next segment of audio\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\t// setup the next segment of sound\n\t\t\tif ap.WParams1.TimeMode == false { // default\n\t\t\t\tap.SndsTable.View.ResetSelectedIdxs()\n\t\t\t\tif ap.Row == ap.SndsTable.View.DispRows-1 {\n\t\t\t\t\tap.Row = 0\n\t\t\t\t} else {\n\t\t\t\t\tap.Row += 1\n\t\t\t\t}\n\t\t\t\tap.SndsTable.View.SelectedIdx = ap.Row\n\t\t\t\tap.SndsTable.View.SelectIdx(ap.Row)\n\t\t\t} else {\n\t\t\t\td := ap.WParams1.SegmentEnd - ap.WParams1.SegmentStart\n\t\t\t\tap.WParams1.SegmentStart += d\n\t\t\t\tap.WParams1.SegmentEnd += d\n\t\t\t}\n\t\t\terr := ap.ProcessSetup(&ap.WParams1, &ap.CurSnd1)\n\t\t\tif err == nil {\n\t\t\t\terr = ap.Process(&ap.WParams1, &ap.PParams1, &ap.GParams1)\n\t\t\t\tif err == nil {\n\t\t\t\t\tap.ApplyGabor(&ap.PParams1, &ap.GParams1)\n\t\t\t\t\tap.GUI.UpdateWindow()\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Next 2\", Icon: \"fast-fwd\",\n\t\tTooltip: \"Process the next segment of audio\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\t// setup the next segment of sound\n\t\t\tif ap.WParams2.TimeMode == false { // default\n\t\t\t\tap.SndsTable.View.ResetSelectedIdxs()\n\t\t\t\tif ap.Row == ap.SndsTable.View.DispRows-1 {\n\t\t\t\t\tap.Row = 0\n\t\t\t\t} else {\n\t\t\t\t\tap.Row += 1\n\t\t\t\t}\n\t\t\t\tap.SndsTable.View.SelectedIdx = ap.Row\n\t\t\t\tap.SndsTable.View.SelectIdx(ap.Row)\n\t\t\t} else {\n\t\t\t\td := ap.WParams2.SegmentEnd - ap.WParams2.SegmentStart\n\t\t\t\tap.WParams2.SegmentStart += d\n\t\t\t\tap.WParams2.SegmentEnd += d\n\t\t\t}\n\t\t\terr := ap.ProcessSetup(&ap.WParams2, &ap.CurSnd2)\n\t\t\tif err == nil {\n\t\t\t\terr = ap.Process(&ap.WParams2, &ap.PParams2, &ap.GParams2)\n\t\t\t\tif err == nil {\n\t\t\t\t\tap.ApplyGabor(&ap.PParams2, &ap.GParams2)\n\t\t\t\t\tap.GUI.UpdateWindow()\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Update Gabors\", Icon: \"update\",\n\t\tTooltip: \"Call this to see the result of changing the Gabor specifications\",\n\t\tActive: egui.ActiveAlways,\n\t\tFunc: func() {\n\t\t\tap.UpdateGabors(&ap.GParams1)\n\t\t\tap.UpdateGabors(&ap.GParams2)\n\t\t\tap.GUI.UpdateWindow()\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Save 1\", Icon: \"fast-fwd\",\n\t\tTooltip: \"Save the mel and result grids\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\tap.SnapShot1()\n\t\t},\n\t})\n\n\t//ap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Copy 1 -> 2\", Icon: \"copy\",\n\t//\tTooltip: \"Copy all set 1 params (window, process, gabor) to set 2\",\n\t//\tActive: egui.ActiveAlways,\n\t//\tFunc: func() {\n\t//\t\tap.CopyOne()\n\t//\t\tap.GUI.UpdateWindow()\n\t//\t},\n\t//})\n\t//\n\t//ap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Copy 2 -> 1\", Icon: \"copy\",\n\t//\tTooltip: \"Copy all set 2 params (window, process, gabor) to set 1\",\n\t//\tActive: egui.ActiveAlways,\n\t//\tFunc: func() {\n\t//\t\tap.CopyTwo()\n\t//\t\tap.GUI.UpdateWindow()\n\t//\t},\n\t//})\n\n\tap.GUI.ToolBar.AddSeparator(\"filt\")\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Filter sounds...\", Icon: \"search\",\n\t\tTooltip: \"filter the table of sounds for sounds containing string...\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\tgiv.CallMethod(ap, \"FilterSounds\", ap.GUI.ViewPort)\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"Unilter sounds...\", Icon: \"reset\",\n\t\tTooltip: \"clear sounds table filter\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\tap.UnfilterSounds()\n\t\t\tap.GUI.UpdateWindow()\n\t\t},\n\t})\n\n\tap.GUI.AddToolbarItem(egui.ToolbarItem{Label: \"View\", Icon: \"file-open\",\n\t\tTooltip: \"opens spectrogram view of selected sound in external application 'Audacity' - edit code to use a different application\",\n\t\tActive: egui.ActiveRunning,\n\t\tFunc: func() {\n\t\t\tap.View()\n\t\t\t//giv.CallMethod(ap, \"ViewSpectrogram\", ap.GUI.ViewPort)\n\t\t},\n\t})\n\n\tsplit1 := gi.AddNewSplitView(mfr, \"split1\")\n\tsplit1.Dim = 0\n\tsplit1.SetStretchMax()\n\n\tsplit := gi.AddNewSplitView(split1, \"split\")\n\tsplit.Dim = 1\n\tsplit.SetStretchMax()\n\n\ttv1 := gi.AddNewTabView(split1, \"tv1\")\n\tap.SndsTable.View = tv1.AddNewTab(etview.KiT_TableView, \"Sounds\").(*etview.TableView)\n\tap.ConfigTableView(ap.SndsTable.View)\n\tap.SndsTable.View.SetTable(ap.SndsTable.Table, nil)\n\n\tsplit1.SetSplits(.75, .25)\n\n\tap.GUI.StructView = giv.AddNewStructView(split, \"app\")\n\tap.GUI.StructView.SetStruct(ap)\n\n\tspecs := giv.AddNewTableView(split, \"specs1\")\n\tspecs.Viewport = ap.GUI.ViewPort\n\tspecs.SetSlice(&ap.GParams1.GaborSpecs)\n\n\tspecs = giv.AddNewTableView(split, \"specs2\")\n\tspecs.Viewport = ap.GUI.ViewPort\n\tspecs.SetSlice(&ap.GParams2.GaborSpecs)\n\n\ttv := gi.AddNewTabView(split, \"tv\")\n\n\ttg := tv.AddNewTab(etview.KiT_TensorGrid, \"Gabors\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\tap.PParams1.LogPowerSegment.SetMetaData(\"grid-min\", \"10\")\n\ttg.SetTensor(&ap.GParams1.GaborSet.Filters)\n\t// set Display after setting tensor\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv.AddNewTab(etview.KiT_TensorGrid, \"Power\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\tap.PParams1.LogPowerSegment.SetMetaData(\"grid-min\", \"10\")\n\ttg.SetTensor(&ap.PParams1.LogPowerSegment)\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv.AddNewTab(etview.KiT_TensorGrid, \"Mel\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams1.MelFBankSegment)\n\t// set Display after setting tensor\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv.AddNewTab(etview.KiT_TensorGrid, \"Result\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.GParams1.GborOutput)\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv.AddNewTab(etview.KiT_TensorGrid, \"MFCC\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams1.MFCCSegment)\n\t// set Display after setting tensor\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv.AddNewTab(etview.KiT_TensorGrid, \"Deltas\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams1.MFCCDeltas)\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv.AddNewTab(etview.KiT_TensorGrid, \"DeltaDeltas\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams1.MFCCDeltaDeltas)\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttv2 := gi.AddNewTabView(split, \"tv2\")\n\tsplit.SetSplits(.3, .15, .15, .2, .2)\n\n\ttg = tv2.AddNewTab(etview.KiT_TensorGrid, \"Gabors\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.GParams2.GaborSet.Filters)\n\t// set Display after setting tensor\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv2.AddNewTab(etview.KiT_TensorGrid, \"Power\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\tap.PParams2.LogPowerSegment.SetMetaData(\"grid-min\", \"10\")\n\ttg.SetTensor(&ap.PParams2.LogPowerSegment)\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv2.AddNewTab(etview.KiT_TensorGrid, \"Mel\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams2.MelFBankSegment)\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv2.AddNewTab(etview.KiT_TensorGrid, \"Result\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.GParams2.GborOutput)\n\t// set Display after setting tensor\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv2.AddNewTab(etview.KiT_TensorGrid, \"MFCC\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams2.MFCCSegment)\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv2.AddNewTab(etview.KiT_TensorGrid, \"Deltas\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams2.MFCCDeltas)\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\ttg = tv2.AddNewTab(etview.KiT_TensorGrid, \"DeltaDeltas\").(*etview.TensorGrid)\n\ttg.SetStretchMax()\n\ttg.SetTensor(&ap.PParams2.MFCCDeltaDeltas)\n\t// set Display after setting tensor\n\ttg.Disp.ColorMap = \"ColdHot\"\n\ttg.Disp.Range.FixMin = false\n\ttg.Disp.Range.FixMax = false\n\n\tap.StatLabel = gi.AddNewLabel(mfr, \"status\", \"Status...\")\n\tap.StatLabel.SetStretchMaxWidth()\n\tap.StatLabel.Redrawable = true\n\n\tap.GUI.FinalizeGUI(false)\n\treturn ap.GUI.Win\n}", "func GetCreationOptionEnumStringValues() []string {\n\treturn []string{\n\t\t\"TEMPLATE\",\n\t\t\"CLONE\",\n\t}\n}", "func NewSettings(\n\ttaskName string,\n\tsoftwareName string,\n\tsoftwareVersion string,\n\tcaBundlePath string,\n\tprobeASN string,\n\tprobeCC string,\n\tprobeIP string,\n\tprobeNetworkName string,\n\tlogLevel string,\n) Settings {\n\treturn Settings{\n\t\tLogLevel: makeLogLevel(logLevel),\n\t\tName: taskName,\n\t\tOptions: Options{\n\t\t\tCaBundlePath: caBundlePath,\n\t\t\tNoBouncer: true,\n\t\t\tNoCollector: true,\n\t\t\tNoFileReport: true,\n\t\t\tNoGeoIP: true,\n\t\t\tNoResolverLookup: true,\n\t\t\tProbeASN: probeASN,\n\t\t\tProbeCC: probeCC,\n\t\t\tProbeIP: probeIP,\n\t\t\tProbeNetworkName: probeNetworkName,\n\t\t\tSaveRealProbeIP: true,\n\t\t\tSoftwareName: softwareName,\n\t\t\tSoftwareVersion: softwareVersion,\n\t\t},\n\t}\n}", "func newGLWindow(opts *oswin.NewWindowOptions, sc *oswin.Screen) (*glfw.Window, error) {\n\t_, _, tool, fullscreen := oswin.WindowFlagsToBool(opts.Flags)\n\tglfw.DefaultWindowHints()\n\tglfw.WindowHint(glfw.Resizable, glfw.True)\n\tglfw.WindowHint(glfw.Visible, glfw.False) // needed to position\n\tglfw.WindowHint(glfw.Focused, glfw.True)\n\t// glfw.WindowHint(glfw.ScaleToMonitor, glfw.True)\n\tglfw.WindowHint(glfw.ContextVersionMajor, glosGlMajor)\n\tglfw.WindowHint(glfw.ContextVersionMinor, glosGlMinor)\n\tglfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile)\n\tglfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True)\n\tglfw.WindowHint(glfw.Samples, 0) // don't do multisampling for main window -- only in sub-render\n\tif glosDebug {\n\t\tglfw.WindowHint(glfw.OpenGLDebugContext, glfw.True)\n\t}\n\n\t// todo: glfw.Samples -- multisampling\n\tif fullscreen {\n\t\tglfw.WindowHint(glfw.Maximized, glfw.True)\n\t}\n\tif tool {\n\t\tglfw.WindowHint(glfw.Decorated, glfw.False)\n\t} else {\n\t\tglfw.WindowHint(glfw.Decorated, glfw.True)\n\t}\n\t// todo: glfw.Floating for always-on-top -- could set for modal\n\tsz := opts.Size // note: this is already in standard window size units!\n\twin, err := glfw.CreateWindow(sz.X, sz.Y, opts.GetTitle(), nil, theApp.shareWin)\n\tif err != nil {\n\t\treturn win, err\n\t}\n\twin.SetPos(opts.Pos.X, opts.Pos.Y)\n\treturn win, err\n}", "func configureFlags(api *operations.KubernikusAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (client *WebAppsClient) listApplicationSettingsSlotCreateRequest(ctx context.Context, resourceGroupName string, name string, slot string, options *WebAppsListApplicationSettingsSlotOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/appsettings/list\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif slot == \"\" {\n\t\treturn nil, errors.New(\"parameter slot cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{slot}\", url.PathEscape(slot))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (obj *Device) SetDialogBoxMode(enableDialogs bool) Error {\n\tret, _, _ := syscall.Syscall(\n\t\tobj.vtbl.SetDialogBoxMode,\n\t\t2,\n\t\tuintptr(unsafe.Pointer(obj)),\n\t\tuintptrBool(enableDialogs),\n\t\t0,\n\t)\n\treturn toErr(ret)\n}", "func (c *Configuration) InitializeFlags() {\n\t// Flags\n\tflag.BoolVar(&c.Flags.Daemon, \"daemon\", false, \"Start kpmenu directly as daemon\")\n\tflag.BoolVarP(&c.Flags.Version, \"version\", \"v\", false, \"Show kpmenu version\")\n\n\t// General\n\tflag.StringVarP(&c.General.Menu, \"menu\", \"m\", c.General.Menu, \"Choose which menu to use\")\n\tflag.StringVar(&c.General.ClipboardTool, \"clipboardTool\", c.General.ClipboardTool, \"Choose which clipboard tool to use\")\n\tflag.IntVarP(&c.General.ClipboardTimeout, \"clipboardTime\", \"c\", c.General.ClipboardTimeout, \"Timeout of clipboard in seconds (0 = no timeout)\")\n\tflag.BoolVarP(&c.General.NoCache, \"nocache\", \"n\", c.General.NoCache, \"Disable caching of database\")\n\tflag.BoolVar(&c.General.CacheOneTime, \"cacheOneTime\", c.General.CacheOneTime, \"Cache the database only the first time\")\n\tflag.IntVar(&c.General.CacheTimeout, \"cacheTimeout\", c.General.CacheTimeout, \"Timeout of cache in seconds\")\n\tflag.BoolVar(&c.General.NoOTP, \"nootp\", c.General.NoOTP, \"Disable OTP handling\")\n\n\t// Executable\n\tflag.StringVar(&c.Executable.CustomPromptPassword, \"customPromptPassword\", c.Executable.CustomPromptPassword, \"Custom executable for prompt password\")\n\tflag.StringVar(&c.Executable.CustomPromptMenu, \"customPromptMenu\", c.Executable.CustomPromptMenu, \"Custom executable for prompt menu\")\n\tflag.StringVar(&c.Executable.CustomPromptEntries, \"customPromptEntries\", c.Executable.CustomPromptEntries, \"Custom executable for prompt entries\")\n\tflag.StringVar(&c.Executable.CustomPromptFields, \"customPromptFields\", c.Executable.CustomPromptFields, \"Custom executable for prompt fields\")\n\tflag.StringVar(&c.Executable.CustomClipboardCopy, \"customClipboardCopy\", c.Executable.CustomClipboardCopy, \"Custom executable for clipboard copy\")\n\tflag.StringVar(&c.Executable.CustomClipboardPaste, \"customClipboardPaste\", c.Executable.CustomClipboardPaste, \"Custom executable for clipboard paste\")\n\tflag.StringVar(&c.Executable.CustomClipboardClean, \"customClipboardClean\", c.Executable.CustomClipboardClean, \"Custom executable for clipboard clean\")\n\n\t// Style\n\tflag.StringVar(&c.Style.PasswordBackground, \"passwordBackground\", c.Style.PasswordBackground, \"Color of dmenu background and text for password selection, used to hide password typing\")\n\tflag.StringVar(&c.Style.TextPassword, \"textPassword\", c.Style.TextPassword, \"Label for password selection\")\n\tflag.StringVar(&c.Style.TextMenu, \"textMenu\", c.Style.TextMenu, \"Label for menu selection\")\n\tflag.StringVar(&c.Style.TextEntry, \"textEntry\", c.Style.TextEntry, \"Label for entry selection\")\n\tflag.StringVar(&c.Style.TextField, \"textField\", c.Style.TextField, \"Label for field selection\")\n\tflag.StringVar(&c.Style.ArgsPassword, \"argsPassword\", c.Style.ArgsPassword, \"Additional arguments for dmenu at password selection, separated by a space\")\n\tflag.StringVar(&c.Style.ArgsMenu, \"argsMenu\", c.Style.ArgsMenu, \"Additional arguments for dmenu at menu selection, separated by a space\")\n\tflag.StringVar(&c.Style.ArgsEntry, \"argsEntry\", c.Style.ArgsEntry, \"Additional arguments for dmenu at entry selection, separated by a space\")\n\tflag.StringVar(&c.Style.ArgsField, \"argsField\", c.Style.ArgsField, \"Additional arguments for dmenu at field selection, separated by a space\")\n\n\t// Database\n\tflag.StringVarP(&c.Database.Database, \"database\", \"d\", c.Database.Database, \"Path to the KeePass database\")\n\tflag.StringVarP(&c.Database.KeyFile, \"keyfile\", \"k\", c.Database.KeyFile, \"Path to the database keyfile\")\n\tflag.StringVarP(&c.Database.Password, \"password\", \"p\", c.Database.Password, \"Password of the database\")\n\tflag.StringVar(&c.Database.FieldOrder, \"fieldOrder\", c.Database.FieldOrder, \"String order of fields to show on field selection\")\n\tflag.BoolVar(&c.Database.FillOtherFields, \"fillOtherFields\", c.Database.FillOtherFields, \"Enable fill of remaining fields\")\n\tflag.StringVar(&c.Database.FillBlacklist, \"fillBlacklist\", c.Database.FillBlacklist, \"String of blacklisted fields that won't be shown\")\n}", "func (client *MonitoringSettingsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *MonitoringSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (o ClientLibrarySettingsOutput) CppSettings() CppSettingsPtrOutput {\n\treturn o.ApplyT(func(v ClientLibrarySettings) *CppSettings { return v.CppSettings }).(CppSettingsPtrOutput)\n}", "func changeSettings(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"trying to change settings\")\n\tif err := r.ParseForm(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func NewDialog() *Dialog {\n\treturn &Dialog{}\n}", "func (p *usermdPlugin) Settings() []backend.PluginSetting {\n\tlog.Tracef(\"usermd Settings\")\n\n\treturn nil\n}", "func (client *WebAppsClient) updateAuthSettingsSlotCreateRequest(ctx context.Context, resourceGroupName string, name string, slot string, siteAuthSettings SiteAuthSettings, options *WebAppsUpdateAuthSettingsSlotOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/authsettings\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif slot == \"\" {\n\t\treturn nil, errors.New(\"parameter slot cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{slot}\", url.PathEscape(slot))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, siteAuthSettings)\n}" ]
[ "0.6050621", "0.6031621", "0.5928538", "0.58055913", "0.57716674", "0.51237786", "0.5116521", "0.51071554", "0.5070288", "0.49982792", "0.4967534", "0.4916568", "0.4900102", "0.4772437", "0.47615722", "0.47517538", "0.47404546", "0.47228342", "0.4722345", "0.47178954", "0.4714612", "0.4671497", "0.46566063", "0.4643672", "0.46254584", "0.46064693", "0.45973453", "0.45405102", "0.4539083", "0.45352766", "0.45138037", "0.45032182", "0.44447088", "0.4419712", "0.43982285", "0.43785712", "0.43725222", "0.4367618", "0.43482503", "0.43253937", "0.42940202", "0.42855734", "0.42537224", "0.42530507", "0.42421675", "0.4238771", "0.4222798", "0.42194876", "0.41938794", "0.4183948", "0.41833", "0.41805124", "0.4179322", "0.41777715", "0.41681623", "0.41481194", "0.41413903", "0.4133233", "0.4124212", "0.411528", "0.41123948", "0.410972", "0.41094977", "0.41094977", "0.41030425", "0.41030392", "0.41018003", "0.4095263", "0.40914276", "0.40901923", "0.40880325", "0.40750194", "0.4067039", "0.40558425", "0.40547976", "0.40512052", "0.4036083", "0.4035653", "0.40342674", "0.4028252", "0.40220752", "0.4020864", "0.40118834", "0.39930433", "0.3992072", "0.39863697", "0.39784285", "0.39758325", "0.39748013", "0.39738488", "0.39655146", "0.39589265", "0.39545175", "0.3948671", "0.3946265", "0.39449638", "0.39442587", "0.39398366", "0.39397794", "0.3939165" ]
0.56048965
5
/ Send file to server in app dir, used for making dockercompose and nginx configuration available.
func Upload( src, dest string ) ( err error ) { cfg, err := config.Read() if err != nil { return } connString := "leo-deploy@" + cfg.ServerName dest = connString + ":apps/" + cfg.AppName + "/" + dest fmt.Printf( "Copying %s into %s\n", src, dest ) out, err := exec.Command( "scp", src, dest ).CombinedOutput() fmt.Println( string( out ) ) if err != nil { return fmt.Errorf( "Can't upload to " + connString + ". Did you install your public key there?" ) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func sendFile(w http.ResponseWriter, r *http.Request, file string) {\n\tfilepath := \"./assets/\" + file\n\tlog.Printf(\"Serving file %s\", filepath)\n\thttp.ServeFile(w, r, filepath)\n}", "func FileServer(c Config) Server {\n abs, err := filepath.Abs(c.Root)\n if err != nil { panic(err) }\n c.Root = abs\n\n if c.TempDir == \"\" {\n c.TempDir = filepath.Join(c.Root, \"tmp\")\n } else {\n abs, err = filepath.Abs(c.TempDir)\n if err != nil { panic(err) }\n c.TempDir = abs\n }\n\n return &fileServer{ assets: make(map[string]*assetMeta), config: c }\n}", "func main() {\n\thttp.Handle(\"/public/\", http.StripPrefix(\"/public/\", http.FileServer(statik.FS)))\n\tlog.Println(\"visit http://localhost:8080/public/hello.txt\")\n\thttp.ListenAndServe(\":8080\", nil)\n}", "func startService() {\n\n box := packr.NewBox(\"./templates\")\n\n http.Handle(\"/\", http.FileServer(box))\n http.HandleFunc(\"/upload/\", ReceiveFile)\n\n log.Println(\"starting http service...\")\n if err := http.ListenAndServe(\":80\", nil); err != nil {\n log.Fatal(err)\n }\n}", "func handler(w http.ResponseWriter, r *http.Request) {\n\tfilePath := getFileToServe(r.URL, os.Getenv(\"PATH_PREFIX\"))\n\thttp.ServeFile(w, r, filePath)\n}", "func fileServer(r chi.Router, public string, static string) {\n\n\tif strings.ContainsAny(public, \"{}*\") {\n\t\tpanic(\"FileServer does not permit URL parameters.\")\n\t}\n\n\troot, _ := filepath.Abs(static)\n\tif _, err := os.Stat(root); os.IsNotExist(err) {\n\t\tpanic(\"Website bundle not found. Please run: `cd ../myapp-frontend-angular && npm run build`\")\n\t}\n\n\tfs := http.StripPrefix(public, http.FileServer(http.Dir(root)))\n\n\tif public != \"/\" && public[len(public)-1] != '/' {\n\t\tr.Get(public, http.RedirectHandler(public+\"/\", 301).ServeHTTP)\n\t\tpublic += \"/\"\n\t}\n\n\tr.Get(public+\"*\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfile := strings.Replace(r.RequestURI, public, \"/\", 1)\n\t\tif _, err := os.Stat(root + file); os.IsNotExist(err) {\n\t\t\thttp.ServeFile(w, r, path.Join(root, \"index.html\"))\n\t\t\treturn\n\t\t}\n\t\tfs.ServeHTTP(w, r)\n\t}))\n}", "func ServeWithFileServer() {\n\thttp.Handle(\"/fs/\", http.StripPrefix(\"/fs\", http.FileServer(http.Dir(\".\"))))\n\thttp.ListenAndServe(\":6081\", nil)\n}", "func serveFile(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\terr := serveAssets(w, r, r.URL.Path)\n\tcheckError(err)\n}", "func serveFile(context router.Context) error {\n\t// Assuming we're running from the root of the website\n\tlocalPath := \"./public\" + path.Clean(context.Path())\n\n\tif _, err := os.Stat(localPath); err != nil {\n\t\t// If file not found return error\n\t\tif os.IsNotExist(err) {\n\t\t\treturn router.NotFoundError(err)\n\t\t}\n\n\t\t// For other errors return not authorised\n\t\treturn router.NotAuthorizedError(err)\n\t}\n\n\t// If the file exists and we can access it, serve it\n\thttp.ServeFile(context, context.Request(), localPath)\n\treturn nil\n}", "func serveDirectory() {\n\n\tlog.Println(config.Directory)\n\tfsys := http.Dir(config.Directory)\n\tfs := http.FileServer(fsys)\n\taddress := \":\" + config.Port\n\tprefix := config.Path[0 : len(config.Path)-1]\n\tlog.Println(prefix)\n\n\thttp.Handle(config.Path, http.StripPrefix(prefix, fs))\n\tlog.Fatalln(http.ListenAndServe(address, nil))\n}", "func (c *Controller) SendFile(path string) Result {\n\tvar file io.ReadSeeker\n\tpath = filepath.FromSlash(path)\n\tif rc, ok := includedResources[path]; ok {\n\t\tfile = rc.Open()\n\t} else {\n\t\tif !filepath.IsAbs(path) {\n\t\t\tpath = filepath.Join(appConfig.AppPath, StaticDir, path)\n\t\t}\n\t\tif _, err := os.Stat(path); err != nil {\n\t\t\treturn c.RenderError(http.StatusNotFound)\n\t\t}\n\t\tvar err error\n\t\tif file, err = os.Open(path); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tc.Response.ContentType = util.DetectContentTypeByExt(path)\n\tif c.Response.ContentType == \"\" {\n\t\tc.Response.ContentType = util.DetectContentTypeByBody(file)\n\t}\n\treturn &resultContent{\n\t\tBody: file,\n\t}\n}", "func (c *Context) File(filepath string) {\n\tc.SendFile(filepath)\n}", "func StaticFileServer() {\n\thttp.Handle(\"/static/\", http.StripPrefix(\"/static\", http.FileServer(http.Dir(\"./file/\"))))\n\thttp.ListenAndServe(\":6082\", nil)\n}", "func (r *Router) ServeFile(base, path string) {\n\tif strings.ContainsAny(base, \":*\") {\n\t\tpanic(\"Lion: ServeFile cannot have url parameters\")\n\t}\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, path)\n\t})\n\n\tr.Get(base, handler)\n\tr.Head(base, handler)\n}", "func Embed(rootMux *mux.Router, listenPort int, serverName string) {\n\turl := fmt.Sprintf(\"http://localhost:%d\", listenPort)\n\tembeddedFS := _escFS(false)\n\tembeddedServ := http.FileServer(embeddedFS)\n\n\trootMux.Handle(\"/\", embeddedServ)\n\trootMux.HandleFunc(\"/config.json\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tconfigFile, err := embeddedFS.Open(\"/config.sample.json\")\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tio.WriteString(w, \"Couldn't open the file: \"+err.Error())\n\t\t\treturn\n\t\t}\n\t\tconfigFileInfo, err := configFile.Stat()\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tio.WriteString(w, \"Couldn't stat the file: \"+err.Error())\n\t\t\treturn\n\t\t}\n\t\tbuf := make([]byte, configFileInfo.Size())\n\t\tn, err := configFile.Read(buf)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tio.WriteString(w, \"Couldn't read the file: \"+err.Error())\n\t\t\treturn\n\t\t}\n\t\tif int64(n) != configFileInfo.Size() {\n\t\t\tw.WriteHeader(500)\n\t\t\tio.WriteString(w, \"The returned file size didn't match what we expected\")\n\t\t\treturn\n\t\t}\n\t\tjs, _ := sjson.SetBytes(buf, \"default_server_config.m\\\\.homeserver.base_url\", url)\n\t\tjs, _ = sjson.SetBytes(js, \"default_server_config.m\\\\.homeserver.server_name\", serverName)\n\t\tjs, _ = sjson.SetBytes(js, \"brand\", fmt.Sprintf(\"Riot %s\", serverName))\n\t\tjs, _ = sjson.SetBytes(js, \"disable_guests\", true)\n\t\tjs, _ = sjson.SetBytes(js, \"disable_3pid_login\", true)\n\t\tjs, _ = sjson.DeleteBytes(js, \"welcomeUserId\")\n\t\t_, _ = w.Write(js)\n\t})\n\n\tfmt.Println(\"*-------------------------------*\")\n\tfmt.Println(\"| This build includes Riot Web! |\")\n\tfmt.Println(\"*-------------------------------*\")\n\tfmt.Println(\"Point your browser to:\", url)\n\tfmt.Println()\n}", "func main() {\n\treader := bufio.NewReader(os.Stdin)\n\targs := os.Args[1:]\n\n\tvar uploadingFile string\n\tif len(args) == 1 {\n\t\tif args[0] == \"server\" {\n\t\t\tsignalingServer()\n\t\t\treturn\n\t\t} else {\n\t\t\tuploadingFile = args[0]\n\t\t}\n\t}\n\n\tvar f *os.File\n\tif uploadingFile != \"\" {\n\t\tfmt.Printf(\"Uploading %s\\n\", uploadingFile)\n\n\t\tvar err error\n\t\tf, err = os.Open(uploadingFile)\n\t\tcheckError(err)\n\t} else {\n\t\tfor {\n\t\t\tfmt.Printf(\"Save to: \")\n\n\t\t\tfilePath, err := reader.ReadString('\\n')\n\t\t\tcheckError(err)\n\t\t\tf, err = os.Create(strings.Trim(filePath, \"\\n\\r\"))\n\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tfmt.Printf(err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tdefer f.Close()\n\n\tvar connIO io.ReadWriteCloser\n\tif transport == \"tcp\" {\n\t\tuploaderAddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:8888\")\n\t\tif uploadingFile != \"\" {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tconnIO, err = net.Dial(\"tcp\", uploaderAddr.String())\n\t\t\tcheckError(err)\n\t\t} else {\n\t\t\tlistener, err := net.ListenTCP(\"tcp\", uploaderAddr)\n\t\t\tcheckError(err)\n\t\t\tfmt.Printf(\"Listening tcp....\\n\")\n\t\t\tconn, err := listener.Accept()\n\t\t\tcheckError(err)\n\t\t\tconnIO = conn\n\t\t}\n\t} else if transport == \"udp\" {\n\t\tuploaderAddr, err := net.ResolveUDPAddr(\"udp\", \"localhost:8888\")\n\t\tif uploadingFile != \"\" {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tconnIO, err = net.Dial(\"udp\", uploaderAddr.String())\n\t\t\tcheckError(err)\n\t\t} else {\n\t\t\tlistener, err := net.ListenUDP(\"udp\", uploaderAddr)\n\t\t\tcheckError(err)\n\t\t\tfmt.Printf(\"Listening udp....\\n\")\n\t\t\tcheckError(err)\n\t\t\tconnIO = listener\n\t\t}\n\t} else {\n\t\tstunUrl, err := ice.ParseURL(stun)\n\t\tcandidateSelectionTimeout := 30 * time.Second\n\t\tconnectionTimeout := 5 * time.Second\n\t\tconfig := &ice.AgentConfig{\n\t\t\tUrls: []*ice.URL{stunUrl},\n\t\t\tNetworkTypes: []ice.NetworkType{\n\t\t\t\tice.NetworkTypeUDP4,\n\t\t\t\tice.NetworkTypeTCP4,\n\t\t\t},\n\t\t\tCandidateTypes: []ice.CandidateType{\n\t\t\t\tice.CandidateTypeHost,\n\t\t\t\tice.CandidateTypeServerReflexive,\n\t\t\t\tice.CandidateTypePeerReflexive,\n\t\t\t\tice.CandidateTypeRelay,\n\t\t\t},\n\t\t\tCandidateSelectionTimeout: &candidateSelectionTimeout,\n\t\t\tConnectionTimeout: &connectionTimeout,\n\t\t}\n\n\t\tagent, err := ice.NewAgent(config)\n\t\tcheckError(err)\n\n\t\tdefer agent.Close()\n\n\t\terr = agent.OnConnectionStateChange(func(state ice.ConnectionState) {\n\t\t\tfmt.Printf(\"State Change: %s\\n\", state.String())\n\t\t\tif state == ice.ConnectionStateDisconnected {\n\t\t\t\tif connIO != nil {\n\t\t\t\t\terr := connIO.Close()\n\t\t\t\t\tcheckError(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tmyCandidates, err := agent.GetLocalCandidates()\n\t\tmyIceCandidates, err := newICECandidatesFromICE(myCandidates)\n\t\tcheckError(err)\n\t\tuflag, pass := agent.GetLocalUserCredentials()\n\n\t\tpartnerData := exchange(ExchangeData{\n\t\t\tCandidates: myIceCandidates,\n\t\t\tUflag: uflag,\n\t\t\tPass: pass,\n\t\t})\n\n\t\tfor _, c := range partnerData.Candidates {\n\t\t\ti, err := c.toICE()\n\t\t\tcheckError(err)\n\n\t\t\terr = agent.AddRemoteCandidate(i)\n\t\t\tcheckError(err)\n\t\t}\n\n\t\tvar conn *ice.Conn\n\t\tif uploadingFile != \"\" {\n\t\t\tconn, err = agent.Accept(context.Background(), partnerData.Uflag, partnerData.Pass)\n\t\t} else {\n\t\t\tconn, err = agent.Dial(context.Background(), partnerData.Uflag, partnerData.Pass)\n\t\t}\n\t\tcheckError(err)\n\t\tdefer conn.Close()\n\n\t\tgo func() {\n\t\t\tif uploadingFile != \"\" {\n\t\t\t\tconn.Write([]byte(\"hello\"))\n\t\t\t} else {\n\t\t\t\tconn.Write([]byte(\"world\"))\n\t\t\t}\n\t\t}()\n\n\t\tbuffer := make([]byte, 32 * 1000)\n\t\tconn.Read(buffer)\n\t\tfmt.Printf(\"Receive msg: %s\\n\", string(buffer))\n\n\n\t\tif transport == \"sctp\" {\n\t\t\tvar association *sctp.Association\n\t\t\tconfig := sctp.Config{\n\t\t\t\tNetConn: conn,\n\t\t\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t\t\t\tMaxReceiveBufferSize: 10 * 1024 * 1024,\n\t\t\t}\n\t\t\tif uploadingFile != \"\" {\n\t\t\t\tassociation, err = sctp.Client(config)\n\t\t\t} else {\n\t\t\t\tassociation, err = sctp.Server(config)\n\t\t\t}\n\t\t\tcheckError(err)\n\t\t\tdefer association.Close()\n\n\t\t\tvar stream *sctp.Stream\n\t\t\tif uploadingFile != \"\" {\n\t\t\t\tstream, err = association.OpenStream(777, sctp.PayloadTypeWebRTCBinary)\n\t\t\t} else {\n\t\t\t\tstream, err = association.AcceptStream()\n\t\t\t}\n\t\t\tcheckError(err)\n\n\t\t\tdefer stream.Close()\n\n\t\t\tconnIO = stream\n\t\t} else {\n\t\t\tconnIO = conn\n\t\t}\n\t}\n\n\tif uploadingFile != \"\" {\n\t\ttime.Sleep(2 * time.Second)\n\t\tfmt.Printf(\"Uploading....\\n\")\n\t} else {\n\t\tfmt.Printf(\"Downloading....\\n\")\n\t}\n\n\tif uploadingFile != \"\" {\n\t\tvar n int64\n\t\tvar err error\n\t\tif transport == \"ice\" || transport == \"sctp\" {\n\t\t\tn, err = io.CopyBuffer(connIO, f, make([]byte, 5 * 1200))\n\t\t\t//n = copy(connIO, f)\n\t\t} else {\n\t\t\tn, err = io.Copy(connIO, f)\n\t\t\t//n = copy(connIO, f)\n\t\t}\n\t\tcheckError(err)\n\n\t\tfmt.Printf(\"Success %v bytes sent!\\n\", n)\n\t\tconnIO.Close()\n\t} else {\n\t\tn, err := io.Copy(f, connIO)\n\t\tcheckError(err)\n\n\t\t//n := copy(f, connIO)\n\n\t\tfmt.Printf(\"Saved %v bytes!\\n\", n)\n\t\tconnIO.Close()\n\t}\n}", "func (s *Service) serveFile(req ServiceRequest) {\n\tfilepath, err := CleanPath(s.DocRoot, req.request.RequestURI)\n\tif err != nil {\n\t\treq.rchan <- HttpErrorResponse(req.request, err)\n\t\treturn\n\t}\n\n\tfi, err := os.Stat(filepath)\n\tif err != nil {\n\t\treq.rchan <- HttpErrorResponse(req.request, err)\n\t\treturn\n\t}\n\n\t// If it's a directory, try appending index.html\n\tif fi.IsDir() {\n\t\tfilepath = path.Join(filepath, \"index.html\")\n\t}\n\n\tf, err := os.Open(filepath)\n\tif err != nil {\n\t\treq.rchan <- HttpErrorResponse(req.request, err)\n\t\treturn\n\t}\n\n\t// TODO: Don't read the file into main memory. Splice it, send this\n\t// reading filehandle to the user's writing filehandle...\n\trd, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treq.rchan <- HttpErrorResponse(req.request, err)\n\t\treturn\n\t}\n\n\treq.rchan <- HttpSimpleResponse(req.request, 200, string(rd))\n}", "func (d *BackingImageHandler) Send(filePath string, address string) error {\n\tif d.isProcessingInitialized() {\n\t\treturn fmt.Errorf(\"handler cannot send files when the pulling or receiving is still in progress\")\n\t}\n\n\treturn d.HandlerEngine.SenderLaunch(filePath, address, types.FileSyncTimeout, false)\n}", "func sendFile(src, dest string, client *sftp.Client) error {\n\tsrcBytes, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to read source file: %v\", err)\n\t}\n\n\t// Create file on remote server.\n\tcreatedFile, err := client.Create(dest)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create file %v on remote server: %v\", dest, err)\n\t}\n\n\t// Write source file contents to remote file.\n\tif _, err := createdFile.Write(srcBytes); err != nil {\n\t\treturn fmt.Errorf(\"Failed to write to remote file: %v\", err)\n\t}\n\n\treturn nil\n}", "func applepic(res http.ResponseWriter, req *http.Request) {\n\t// serves from current directory http.ServeFile(res, req, \"img/pic.jpg\") for another directory\n\thttp.ServeFile(res, req, \"pic.jpg\")\n\t// this wants\n}", "func rootFileHandler(w http.ResponseWriter, r *http.Request) {\n\tfile, err := ioutil.ReadFile(\"root.txt\")\n\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"unable to open root.txt: %s\", err)\n\t}\n\n\tfmt.Fprintf(w, \"%s\\n\", string(file))\n}", "func main() {\n\tlog.Printf(\"listening on %s and serving files from %s\\n\", port, dir)\n\thttp.ListenAndServe(port, server.Handler(dir))\n}", "func (bot *luaBot) sendFile(config Fileable) (Message, error) {\n\tif config.useExistingFile() {\n\t\treturn bot.sendExisting(config.method(), config)\n\t}\n\n\treturn bot.uploadAndSend(config.method(), config)\n}", "func (req *Request) ServeFile(path string) {\n\twww.ServeFile(req.Res(), req.R().(*www.Request), path)\n}", "func staticFile(mux *http.ServeMux, name string) {\n\tabs := filepath.Join(service.Static.DiskPath, name)\n\tmux.HandleFunc(name, func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, abs)\n\t})\n}", "func (server *GorillaServer) ServeFile(pattern, path string) {\n\tserver.HandleFunc(pattern, ServeFile(path))\n}", "func addFileServer(r chi.Router, path string, root http.FileSystem) {\n\tlog.Printf(\"[INFO] run file server for %s, path %s\", root, path)\n\torigPath := path\n\tfs := http.StripPrefix(path, http.FileServer(root))\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\tr.Get(path, http.RedirectHandler(path+\"/\", 301).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\tr.With(tollbooth_chi.LimitHandler(tollbooth.NewLimiter(20, nil))).\n\t\tGet(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t// don't show dirs, just serve files\n\t\t\tif strings.HasSuffix(r.URL.Path, \"/\") && len(r.URL.Path) > 1 && r.URL.Path != (origPath+\"/\") {\n\t\t\t\thttp.NotFound(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfs.ServeHTTP(w, r)\n\t\t}))\n}", "func SendFile(n *net_node.Node, connection net.Conn) {\n\t// Get the server_index\n\tserver_index_buff := make([]byte, 32)\n\tconnection.Read(server_index_buff)\n\tserver_index_str := strings.Trim(string(server_index_buff), \" \")\n\ti, _ := strconv.ParseInt(server_index_str, 10, 32)\n\tserver_index := int32(i)\n\n\t// Get the local file path\n\tfile_path_buff := make([]byte, 100)\n\tconnection.Read(file_path_buff)\n\tlocal_filepath := strings.Trim(string(file_path_buff), \" \")\n\n\t// Now, get the file name\n\tfile_name_buff := make([]byte, 100)\n\tconnection.Read(file_name_buff)\n\tfilename := strings.Trim(string(file_name_buff), \" \")\n\n\t// Determine if the file we are putting actually exists\n\tf, err := os.Stat(local_filepath)\n\tif os.IsNotExist(err) {\n\t\tfmt.Println(local_filepath, \"does not exist, cant send this file\")\n\t\treturn\n\t}\n\tfile_size := f.Size()\n\n\tSend_file_tcp(n, server_index, local_filepath, filename, file_size, \"\", false)\n}", "func (c *Context) File(filepath string) {\n\thttp.ServeFile(c.Response, c.Request, filepath)\n}", "func fileServer(r chi.Router, path string, root http.FileSystem) {\n\tif strings.ContainsAny(path, \"{}*\") {\n\t\tpanic(\"fileServer does not permit URL parameters.\")\n\t}\n\n\tfs := http.StripPrefix(path, http.FileServer(root))\n\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\tr.Get(path, http.RedirectHandler(path+\"/\", 301).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\tr.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfs.ServeHTTP(w, r)\n\t}))\n}", "func serveTestFile(w http.ResponseWriter, r *http.Request) {\n\t// Open our test file\n\ttestSource, err := os.Open(testFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Copy the contents to the http response\n\t_, err = io.Copy(w, testSource)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func FileServer(directory string, opts ...DirOptions) context.Handler {\n\tif directory == \"\" {\n\t\tpanic(\"FileServer: directory is empty. The directory parameter should point to a physical system directory or to an embedded one\")\n\t}\n\n\toptions := getDirOptions(opts...)\n\n\t// `embeddedFileSystem` (if AssetInfo, Asset and AssetNames are defined) or `http.Dir`.\n\tvar fs http.FileSystem = http.Dir(directory)\n\n\tif options.Asset != nil && options.AssetInfo != nil && options.AssetNames != nil {\n\t\t// Depends on the command the user gave to the go-bindata\n\t\t// the assset path (names) may be or may not be prepended with a slash.\n\t\t// What we do: we remove the ./ from the vdir which should be\n\t\t// the same with the asset path (names).\n\t\t// we don't pathclean, because that will prepend a slash\n\t\t//\t\t\t\t\t go-bindata should give a correct path format.\n\t\t// On serve time we check the \"paramName\" (which is the path after the \"requestPath\")\n\t\t// so it has the first directory part missing, we use the \"vdir\" to complete it\n\t\t// and match with the asset path (names).\n\t\tvdir := directory\n\n\t\tif vdir[0] == '.' {\n\t\t\tvdir = vdir[1:]\n\t\t}\n\n\t\t// second check for /something, (or ./something if we had dot on 0 it will be removed)\n\t\tif vdir[0] == '/' || vdir[0] == os.PathSeparator {\n\t\t\tvdir = vdir[1:]\n\t\t}\n\n\t\t// check for trailing slashes because new users may be do that by mistake\n\t\t// although all examples are showing the correct way but you never know\n\t\t// i.e \"./assets/\" is not correct, if was inside \"./assets\".\n\t\t// remove last \"/\".\n\t\tif trailingSlashIdx := len(vdir) - 1; vdir[trailingSlashIdx] == '/' {\n\t\t\tvdir = vdir[0:trailingSlashIdx]\n\t\t}\n\n\t\t// select only the paths that we care;\n\t\t// that have prefix of the directory and\n\t\t// skip any unnecessary the end-dev or the 3rd party tool may set.\n\t\tvar names []string\n\t\tfor _, name := range options.AssetNames() {\n\t\t\t// i.e: name = static/css/main.css (including the directory, see `embeddedFileSystem.vdir`)\n\n\t\t\tif !strings.HasPrefix(name, vdir) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnames = append(names, strings.TrimPrefix(name, vdir))\n\t\t}\n\n\t\tif len(names) == 0 {\n\t\t\tpanic(\"FileServer: zero embedded files\")\n\t\t}\n\n\t\tasset := func(name string) ([]byte, error) {\n\t\t\treturn options.Asset(vdir + name)\n\t\t}\n\n\t\tassetInfo := func(name string) (os.FileInfo, error) {\n\t\t\treturn options.AssetInfo(vdir + name)\n\t\t}\n\n\t\tdirNames := make(map[string]*embeddedDir)\n\n\t\t// sort filenames by smaller path.\n\t\tsort.Slice(names, func(i, j int) bool {\n\t\t\treturn strings.Count(names[j], \"/\") > strings.Count(names[i], \"/\")\n\t\t})\n\n\t\tfor _, name := range names {\n\t\t\tdirName := path.Dir(name)\n\t\t\td, ok := dirNames[dirName]\n\n\t\t\tif !ok {\n\t\t\t\td = &embeddedDir{\n\t\t\t\t\tname: dirName,\n\t\t\t\t\tmodTimeUnix: time.Now().Unix(),\n\t\t\t\t}\n\t\t\t\tdirNames[dirName] = d\n\t\t\t}\n\n\t\t\tinfo, err := assetInfo(name)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"FileServer: report as bug: file info: %s not found in: %s\", name, dirName))\n\t\t\t}\n\t\t\td.list = append(d.list, &embeddedBaseFileInfo{path.Base(name), info})\n\t\t}\n\n\t\tfs = &embeddedFileSystem{\n\t\t\tvdir: vdir,\n\t\t\tdirNames: dirNames,\n\n\t\t\tasset: asset,\n\t\t\tassetInfo: assetInfo,\n\t\t}\n\t}\n\t// Let it for now.\n\t// else if !DirectoryExists(directory) {\n\t// \tpanic(\"FileServer: system directory: \" + directory + \" does not exist\")\n\t// }\n\n\tplainStatusCode := func(ctx context.Context, statusCode int) {\n\t\tif writer, ok := ctx.ResponseWriter().(*context.GzipResponseWriter); ok && writer != nil {\n\t\t\twriter.ResetBody()\n\t\t\twriter.Disable()\n\t\t}\n\t\tctx.StatusCode(statusCode)\n\t}\n\n\thtmlReplacer := strings.NewReplacer(\n\t\t\"&\", \"&amp;\",\n\t\t\"<\", \"&lt;\",\n\t\t\">\", \"&gt;\",\n\t\t// \"&#34;\" is shorter than \"&quot;\".\n\t\t`\"`, \"&#34;\",\n\t\t// \"&#39;\" is shorter than \"&apos;\" and apos was not in HTML until HTML5.\n\t\t\"'\", \"&#39;\",\n\t)\n\n\tdirList := options.DirList\n\tif dirList == nil {\n\t\tdirList = func(ctx context.Context, dirName string, dir http.File) error {\n\t\t\tdirs, err := dir.Readdir(-1)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// dst, _ := dir.Stat()\n\t\t\t// dirName := dst.Name()\n\n\t\t\tsort.Slice(dirs, func(i, j int) bool { return dirs[i].Name() < dirs[j].Name() })\n\n\t\t\tctx.ContentType(context.ContentHTMLHeaderValue)\n\t\t\t_, err = ctx.WriteString(\"<pre>\\n\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, d := range dirs {\n\t\t\t\tname := d.Name()\n\t\t\t\tif d.IsDir() {\n\t\t\t\t\tname += \"/\"\n\t\t\t\t}\n\t\t\t\t// name may contain '?' or '#', which must be escaped to remain\n\t\t\t\t// part of the URL path, and not indicate the start of a query\n\t\t\t\t// string or fragment.\n\t\t\t\turl := url.URL{Path: joinPath(\"./\"+dirName, name)} // edit here to redirect correctly, standard library misses that.\n\t\t\t\t_, err = ctx.Writef(\"<a href=\\\"%s\\\">%s</a>\\n\", url.String(), htmlReplacer.Replace(name))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\t_, err = ctx.WriteString(\"</pre>\\n\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\th := func(ctx context.Context) {\n\t\tname := prefix(ctx.Request().URL.Path, \"/\")\n\t\tctx.Request().URL.Path = name\n\n\t\tgzip := options.Gzip\n\t\tif !gzip {\n\t\t\t// if false then check if the dev did something like `ctx.Gzip(true)`.\n\t\t\t_, gzip = ctx.ResponseWriter().(*context.GzipResponseWriter)\n\t\t}\n\n\t\tf, err := fs.Open(name)\n\t\tif err != nil {\n\t\t\tplainStatusCode(ctx, http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\n\t\tinfo, err := f.Stat()\n\t\tif err != nil {\n\t\t\tplainStatusCode(ctx, http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\t// use contents of index.html for directory, if present\n\t\tif info.IsDir() && options.IndexName != \"\" {\n\t\t\t// Note that, in contrast of the default net/http mechanism;\n\t\t\t// here different handlers may serve the indexes\n\t\t\t// if manually then this will block will never fire,\n\t\t\t// if index handler are automatically registered by the framework\n\t\t\t// then this block will be fired on indexes because the static site routes are registered using the static route's handler.\n\t\t\t//\n\t\t\t// End-developers must have the chance to register different logic and middlewares\n\t\t\t// to an index file, useful on Single Page Applications.\n\n\t\t\tindex := strings.TrimSuffix(name, \"/\") + options.IndexName\n\t\t\tfIndex, err := fs.Open(index)\n\t\t\tif err == nil {\n\t\t\t\tdefer fIndex.Close()\n\t\t\t\tinfoIndex, err := fIndex.Stat()\n\t\t\t\tif err == nil {\n\t\t\t\t\tinfo = infoIndex\n\t\t\t\t\tf = fIndex\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Still a directory? (we didn't find an index.html file)\n\t\tif info.IsDir() {\n\t\t\tif !options.ShowList {\n\t\t\t\tplainStatusCode(ctx, http.StatusNotFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif modified, err := ctx.CheckIfModifiedSince(info.ModTime()); !modified && err == nil {\n\t\t\t\tctx.WriteNotModified()\n\t\t\t\tctx.StatusCode(http.StatusNotModified)\n\t\t\t\tctx.Next()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.SetLastModified(info.ModTime())\n\t\t\terr = dirList(ctx, info.Name(), f)\n\t\t\tif err != nil {\n\t\t\t\tplainStatusCode(ctx, http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tctx.Next()\n\t\t\treturn\n\t\t}\n\n\t\t// index requested, send a moved permanently status\n\t\t// and navigate back to the route without the index suffix.\n\t\tif strings.HasSuffix(name, options.IndexName) {\n\t\t\tlocalRedirect(ctx, \"./\")\n\t\t\treturn\n\t\t}\n\n\t\tif options.AssetValidator != nil {\n\t\t\tif !options.AssetValidator(ctx, info.Name()) {\n\t\t\t\terrCode := ctx.GetStatusCode()\n\t\t\t\tif ctx.ResponseWriter().Written() <= context.StatusCodeWritten {\n\t\t\t\t\t// if nothing written as body from the AssetValidator but 200 status code(which is the default),\n\t\t\t\t\t// then we assume that the end-developer just returned false expecting this to be not found.\n\t\t\t\t\tif errCode == http.StatusOK {\n\t\t\t\t\t\terrCode = http.StatusNotFound\n\t\t\t\t\t}\n\t\t\t\t\tplainStatusCode(ctx, errCode)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// try to find and send the correct content type based on the filename\n\t\t// and the binary data inside \"f\".\n\t\tdetectOrWriteContentType(ctx, info.Name(), f)\n\n\t\tif gzip {\n\t\t\t// set the last modified as \"serveContent\" does.\n\t\t\tctx.SetLastModified(info.ModTime())\n\n\t\t\t// write the file to the response writer.\n\t\t\tcontents, err := ioutil.ReadAll(f)\n\t\t\tif err != nil {\n\t\t\t\tctx.Application().Logger().Debugf(\"err reading file: %v\", err)\n\t\t\t\tplainStatusCode(ctx, http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Use `WriteNow` instead of `Write`\n\t\t\t// because we need to know the compressed written size before\n\t\t\t// the `FlushResponse`.\n\t\t\t_, err = ctx.GzipResponseWriter().Write(contents)\n\t\t\tif err != nil {\n\t\t\t\tctx.Application().Logger().Debugf(\"short write: %v\", err)\n\t\t\t\tplainStatusCode(ctx, http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\thttp.ServeContent(ctx.ResponseWriter(), ctx.Request(), info.Name(), info.ModTime(), f)\n\t\tif serveCode := ctx.GetStatusCode(); context.StatusCodeNotSuccessful(serveCode) {\n\t\t\tplainStatusCode(ctx, serveCode)\n\t\t\treturn\n\t\t}\n\n\t\tctx.Next() // fire any middleware, if any.\n\t}\n\n\treturn h\n}", "func writeConfigFile() {\n f, err := os.Create(\"../src/config/config.toml\")\n if err != nil {\n log.Panic(\"Could not open config.toml\")\n }\n defer f.Close()\n\n var api = \"http://localhost:3000\" // Placeholder\n var rpc = \"http://localhost:8545\" // Placeholder\n dir, _ := filepath.Abs(filepath.Dir(os.Args[0]))\n\n var s = fmt.Sprintf(`[development]\ngridplus_api = \"%s\"\nrpc_provider = \"%s\"\nserial_no = \"ABCD0101\"\n[wallet]\nkey_path = \"%s/../src/config\"`, api, rpc, dir)\n\n _, err2 := f.WriteString(s)\n if err2 != nil {\n log.Panic(\"Could not write config file\")\n }\n return\n}", "func FileServer(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tfile := vars[\"filename\"]\n\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(filepath.Ext(file)))\n\thttp.ServeFile(w, r, \"./static/\"+file)\n}", "func Callfile(w http.ResponseWriter, r *http.Request) {\n\thttp.ServeFile(w, r, \"/tmp/exampleTest.call\")\n\treturn\n}", "func (server HTTPServer) serveAPISpecFile(writer http.ResponseWriter, request *http.Request) {\n\tabsPath, err := filepath.Abs(server.Config.APISpecFile)\n\tif err != nil {\n\t\tconst message = \"Error creating absolute path of OpenAPI spec file\"\n\t\tlog.Error().Err(err).Msg(message)\n\t\thandleServerError(writer, err)\n\t\treturn\n\t}\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\thttp.ServeFile(writer, request, absPath)\n}", "func File(path, mime string) (f *FileServer) {\n\tf = &FileServer{\n\t\tmime: mime,\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error:\", err)\n\t\t\t} else {\n\t\t\t\tf.data = data\n\t\t\t}\n\t\t\tsleep()\n\t\t}\n\t}()\n\treturn f\n}", "func File(filePath string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, filePath)\n\t})\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Printf(\"presentation request: %s\\n\", r.URL)\n\tfilename := filepath.Join(presentDir, r.URL.Path)\n\thttp.ServeFile(w, r, filename)\n}", "func serveFile(w http.ResponseWriter, r *http.Request, fs http.Dir, name string) {\n\n\tif r.Method == http.MethodGet && !config.AllowGet {\n\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif _, found := r.Header[\"X-Codemirror\"]; found {\n\t\tw.Header().Set(\"Cache-Control\", \"no-store\")\n\t}\n\n\tvar cookie string\n\tvar err error\n\tif config.Auth.UseAuth {\n\t\tcookie, err = checkCookie(w, r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tf, err := fs.Open(name)\n\tif err != nil {\n\t\tmsg, code := toHTTPError(err)\n\t\thttp.Error(w, msg, code)\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\td, err := f.Stat()\n\tif err != nil {\n\t\tmsg, code := toHTTPError(err)\n\t\thttp.Error(w, msg, code)\n\t\treturn\n\t}\n\n\t// redirect to canonical path: / at end of directory url\n\t// r.URL.Path always begins with /\n\turl := r.URL.Path\n\tif d.IsDir() {\n\t\tif url[len(url)-1] != '/' {\n\t\t\tlocalRedirect(w, r, path.Base(url)+\"/\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif url[len(url)-1] == '/' {\n\t\t\tprinter.Note(\"Путь к файлу заканчивается на /\")\n\t\t\tlocalRedirect(w, r, \"../\"+path.Base(url))\n\t\t\treturn\n\t\t}\n\t}\n\n\tif d.IsDir() {\n\t\tif r.Method == \"POST\" {\n\t\t\tif !config.AllowPost {\n\t\t\t\tprinter.Error(\"no POST allowed\")\n\t\t\t\thttp.Error(w, \"no POST allowed\", 404)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpostStarted := time.Now()\n\t\t\terr := r.ParseMultipartForm(10 * mb)\n\t\t\tif err != nil {\n\t\t\t\tprinter.Error(err)\n\t\t\t\tmsg, code := toHTTPError(err)\n\t\t\t\thttp.Error(w, msg, code)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor v := range r.MultipartForm.File[fileField] {\n\t\t\t\tfileHeader := r.MultipartForm.File[fileField][v]\n\t\t\t\tfn := fileHeader.Filename\n\n\t\t\t\tdir := filepath.Join(string(fs), name)\n\t\t\t\tduration := time.Since(postStarted)\n\n\t\t\t\tfn = saveAs(fn, dir)\n\t\t\t\tfilePath := filepath.Join(dir, fn)\n\n\t\t\t\tspeed := int64(float64(fileHeader.Size) / duration.Seconds())\n\t\t\t\tprinter.Debug(\"\", \"File Upload\", map[string]string{\n\t\t\t\t\t\"Filename\": fn,\n\t\t\t\t\t\"Absolute path\": filePath,\n\t\t\t\t\t\"File size\": hrSize(fileHeader.Size),\n\t\t\t\t\t\"Duration\": fmt.Sprintf(\"%v\", duration),\n\t\t\t\t\t\"Speed\": hrSize(speed) + \"/с\",\n\t\t\t\t})\n\n\t\t\t\tcopyTo, err := os.OpenFile(filePath, MODE_WRITE, PERM_ALL)\n\t\t\t\tif err != nil {\n\t\t\t\t\tprinter.Fatal(err, \"post request\")\n\t\t\t\t}\n\t\t\t\tcopyFrom, err := fileHeader.Open()\n\t\t\t\tif err != nil {\n\t\t\t\t\tprinter.Fatal(err, \"post request\")\n\t\t\t\t}\n\t\t\t\tio.Copy(copyTo, copyFrom)\n\t\t\t}\n\t\t\terr = r.MultipartForm.RemoveAll()\n\t\t\tif err != nil {\n\t\t\t\tprinter.Error(err)\n\t\t\t}\n\t\t\tlocalRedirect(w, r, \"./\")\n\t\t\treturn\n\t\t} else if config.AllowListing {\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\tmaxModtime, err := dirList(buf, f, name, cookie)\n\t\t\tif err != nil {\n\t\t\t\tprinter.Error(err)\n\t\t\t\thttp.Error(w, \"Error reading directory\", http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\t\thttp.ServeContent(w, r, d.Name(), maxModtime, bytes.NewReader(buf.Bytes()))\n\t\t\treturn\n\t\t}\n\n\t\tmsg, code := \"empty\", http.StatusOK\n\t\thttp.Error(w, msg, code)\n\n\t} else {\n\t\tif r.Method == \"DELETE\" {\n\t\t\terr := os.Remove(path.Clean(string(fs) + name))\n\t\t\tif err != nil {\n\t\t\t\tprinter.Error(err)\n\t\t\t\tmsg, code := toHTTPError(err)\n\t\t\t\thttp.Error(w, msg, code)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\t}\n\t\thttp.ServeContent(w, r, d.Name(), d.ModTime(), f)\n\t}\n}", "func createServerFile() error {\n\tcmdPath := \"./\" + Name + \"/cmd\"\n\tutil.CreateFolder(cmdPath)\n\n\tserverPath := cmdPath + \"/server\"\n\tutil.CreateFolder(serverPath)\n\n\tpath := serverPath + \"/server.go\"\n\n\t// Create the service.go file content\n\tserver := fmt.Sprintf(`package server\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/ezegrosfeld/yoda\"\n)\n\ntype server struct{}\n\nfunc NewServer() *server {\n\treturn &server{}\n}\n\nfunc (s *server) StartServer() {\n\tsrv := yoda.NewServer(yoda.Config{\n\t\tAddr: \":8080\",\n\t\tName: \"%s\",\n\t\tIdleTimeout: 30 * time.Second,\n\t\tWriteTimeout: 30 * time.Second,\n\t\tReadTimeout: 30 * time.Second,\n\t})\n\n\t// Routers\n\tr := srv.Group(\"/ping\")\n\tr.Get(\"\", func(c *yoda.Context) error {\n\t\treturn c.JSON(200, \"pong\")\n\t})\n\n\t// Initialize server\n\tif err := srv.Start(); err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n\n`, Name)\n\n\t// Create the service.go file\n\treturn util.CreateFile(path, server)\n}", "func (r *Router) FileServer(path string) {\n\tif path[0] != '/' {\n\t\tpanic(\"Path has to start with a /.\")\n\t}\n\tr.tree.addFileServer(path)\n}", "func (opt *option) Open(filePath string) (http.File, error) {\n\tif go_restful_routes.Verbose {\n\t\tgo_restful_routes.Log(fmt.Sprintf(\"server for static file: %v\", filePath))\n\t}\n\n\tif name, err := opt.parse(filePath); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn http.Dir(opt.Dir).Open(name)\n\t}\n}", "func serve(w http.ResponseWriter, r *http.Request) {\n\tfile := r.URL.Path\n\tprintln(ldir + file)\n\tw.Header().Set(\"Cach-Control\", \"no-cache, no-store, must-revalidate\")\n\tw.Header().Set(\"Pragma\", \"no-cache\")\n\tw.Header().Set(\"Expires\", \"0\")\n\thttp.ServeFile(w, r, ldir+file)\n\tlog.Print(r.Method, \" \", file)\n\n}", "func FileServer(r chi.Router, public string, static string) {\r\n\tif strings.ContainsAny(public, \"{}*\") {\r\n\t\tpanic(\"FileServer does not permit URL parameters.\")\r\n\t}\r\n\r\n\t// get absolute path to the specified static directory, returning an error if not found\r\n\troot, _ := filepath.Abs(static)\r\n\tif _, err := os.Stat(root); os.IsNotExist(err) {\r\n\t\tpanic(\"Static Documents Directory Not Found\")\r\n\t}\r\n\r\n\tfs := http.StripPrefix(public, http.FileServer(http.Dir(root)))\r\n\r\n\tif public != \"/\" && public[len(public)-1] != '/' {\r\n\t\tr.Get(public, http.RedirectHandler(public+\"/\", 301).ServeHTTP)\r\n\t\tpublic += \"/\"\r\n\t}\r\n\r\n\tr.Get(public+\"*\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tfile := strings.Replace(r.RequestURI, public, \"/\", 1)\r\n\t\tif _, err := os.Stat(root + file); os.IsNotExist(err) {\r\n\t\t\thttp.ServeFile(w, r, path.Join(root, \"index.html\"))\r\n\t\t\treturn\r\n\t\t}\r\n\t\tfs.ServeHTTP(w, r)\r\n\t}))\r\n}", "func fileServer(r chi.Router, path string, root http.FileSystem) {\n\tif strings.ContainsAny(path, \"{}*\") {\n\t\tpanic(\"FileServer does not permit any URL parameters.\")\n\t}\n\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\tr.Get(path, http.RedirectHandler(path+\"/\", 301).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\tr.Get(path, func(w http.ResponseWriter, r *http.Request) {\n\t\trctx := chi.RouteContext(r.Context())\n\t\tpathPrefix := strings.TrimSuffix(rctx.RoutePattern(), \"/*\")\n\t\tfs := http.StripPrefix(pathPrefix, http.FileServer(root))\n\t\tfs.ServeHTTP(w, r)\n\t})\n}", "func filePath() []byte {\n\tconfigFileName := \"config.dev.json\"\n\tif isProd() {\n\t\tconfigFileName = \"config.prod.json\"\n\t}\n\treturn []byte(fmt.Sprintf(\"%s/%s\", directoryPath, configFileName))\n}", "func (c *Client) SendFileToIndex(s *string) error {\n\ttosend := &gossiper.Message{File: s}\n\tmsg, err := protobuf.Encode(tosend)\n\tif err != nil {\n\t\tlog.Error(\"Error on encoding : \", err)\n\t}\n\terr = c.SendBytes(msg)\n\tif err != nil {\n\t\tlog.Error(\"Could not write to connection : \", err)\n\n\t}\n\treturn err\n\n}", "func Setup(conf *config.Config) (router *gin.Engine, err error) {\n\tgin.SetMode(gin.ReleaseMode)\n\n\trouter = gin.Default()\n\tif conf.AllowCors {\n\t\trouter.Use(cors.Default())\n\t}\n\n\tsubContent, err := fs.Sub(content, \"dist/go-file-server\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trouter.Use(func(c *gin.Context) {\n\t\tif c.Request.Method != http.MethodGet && c.Request.Method != http.MethodHead {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\n\t\treqPath := strings.TrimPrefix(c.Request.URL.Path, \"/\")\n\t\tif reqPath == \"\" {\n\t\t\tfile, err := subContent.Open(\"index.html\")\n\t\t\tif err != nil {\n\t\t\t\tc.Next()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpage, err := io.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tc.Next()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.Status(http.StatusOK)\n\t\t\t_, err = c.Writer.Write(page)\n\t\t\tif err != nil {\n\t\t\t\tif e := c.Error(err); e.Err == nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\tif _, err = subContent.Open(reqPath); err != nil {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\n\t\thttp.FileServer(http.FS(subContent)).ServeHTTP(c.Writer, c.Request)\n\t\tc.Abort()\n\t})\n\n\tstatic := router.Group(\"/static\")\n\tstatic.Static(\"\", conf.BaseDir)\n\n\tzipDir := router.Group(\"/dir_zip\")\n\tzipDir.GET(\"*dir\", handlers.ZipDir(conf.BaseDir))\n\n\tapi := router.Group(\"/api\")\n\tapi.POST(\"list_dir_files\", handlers.ListDirFiles(conf.BaseDir))\n\n\trouter.NoRoute(func(c *gin.Context) {\n\t\tc.Redirect(http.StatusTemporaryRedirect, \"/\")\n\t})\n\treturn\n}", "func frameworkHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Printf(\"framework request: %s\\n\", r.URL)\n\tfilename := filepath.Join(shellserverDir, r.URL.Path)\n\thttp.ServeFile(w, r, filename)\n}", "func pathHandler(w http.ResponseWriter, r *http.Request) {\n\tpath := \"/var/www/ear7h-net/\" + r.URL.Path[1:]\n\t//open file and send\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\tfourOhFour(w, r)\n\t} else {\n\t\thttp.ServeContent(w, r, r.URL.Path, time.Now(), f)\n\t}\n}", "func FileServerHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tfile := vars[\"filename\"]\n\tfType := vars[\"type\"]\n\tfilepath := \"/home/jordan/convo/static/\" + fType + \"/\" + file\n\t//the file path here may need some refactoring on different environments\n\thttp.ServeFile(w, r, filepath)\n\n}", "func fileOnHost(path string) (*os.File, error) {\n\tif err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.Create(path) //nolint:gosec // No security issue: path is safe.\n}", "func uploadFile(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"File Upload Endpoint Hit\")\n\n\t// Parse our multipart form, 10 << 20 specifies a maximum\n\t// upload of 10 MB files.\n\tr.ParseMultipartForm(10 << 20)\n\t// FormFile returns the first file for the given key `myFile`\n\tfile, _, err := r.FormFile(\"myFile\")\n\tif err != nil {\n\t\tfmt.Println(\"Error Retrieving the File\\n\" + err.Error())\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\t// read all of the contents of our uploaded file into a\n\t// byte array\n\tfileBytes, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t// write this byte array to our temporary file\n\tpath := saveFile(fileBytes)\n\t// return that we have successfully uploaded our file!\n\tfmt.Fprintf(w, \"Successfully Uploaded File\\n\")\n\n\t//Send the filepath to rabbitMQ\n\tinitRabbit(path)\n\n}", "func (s *Server) upload(info string) {\n\t// rsync -av -e 'ssh -o \"ProxyCommand ssh -p port bastion-dev@proxy exec nc %h %p 2>/dev/null\"' test.txt app@target:~/\n\t// rsync -avrP -e 'ssh -o ProxyCommand=\"ssh -W %h:%p bastion-dev@proxy -p port\"' test.txt app@target:~/\n\tssh := fmt.Sprintf(`ssh -o StrictHostKeyChecking=no -l %s -p %s`, s.User, strings.TrimLeft(s.Port, \":\"))\n\tif s.Proxy != nil {\n\t\tssh = fmt.Sprintf(`ssh -o StrictHostKeyChecking=no -o ProxyCommand=\"ssh -W %%h:%%p %s@%s -p %s\"`, s.Proxy.User, s.Proxy.Host, strings.TrimLeft(s.Proxy.Port, \":\"))\n\t}\n\n\tappName := cfg.App.Name\n\tdst := fmt.Sprintf(\"%s@%s:%s/harp/%s/\", s.User, s.Host, s.Home, appName)\n\t// if option.debug {\n\t// \tfmt.Println(\"rsync\", \"-az\", \"--delete\", \"-e\", ssh, filepath.Join(tmpDir, appName), filepath.Join(tmpDir, \"files\"), dst)\n\t// }\n\targs := []string{\"-az\", \"--delete\", \"-e\", ssh}\n\tif option.debug {\n\t\targs = append(args, \"-P\")\n\t}\n\tif !option.noBuild {\n\t\targs = append(args, filepath.Join(tmpDir, appName))\n\t}\n\tif !option.noFiles {\n\t\targs = append(args, filepath.Join(tmpDir, \"files\"))\n\t}\n\tif option.debug {\n\t\tfmt.Println(\"upload cmd:\", strings.Join(append([]string{\"rsync\"}, append(args, dst)...), \" \"))\n\t}\n\tcmd := exec.Command(\"rsync\", append(args, dst)...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\ts.exitf(\"failed to sync binary %s: %s\", appName, err)\n\t}\n\n\tsession := s.getSession()\n\toutput, err := session.CombinedOutput(fmt.Sprintf(\"cat <<EOF > %s/harp/%s/harp-build.info\\n%s\\nEOF\", s.Home, appName, info))\n\tif err != nil {\n\t\ts.exitf(\"failed to save build info: %s: %s\", err, string(output))\n\t}\n\tsession.Close()\n}", "func main() {\n\t// Spin off the hub\n\thub := newHub()\n\tgo hub.run()\n\n\thttp.Handle(\"/frontend/dist/\", http.StripPrefix(\"/frontend/dist/\", http.FileServer(http.Dir(\"./frontend/dist/\"))))\n\thttp.Handle(\"/assets/\", http.StripPrefix(\"/assets/\", http.FileServer(http.Dir(\"./assets/\"))))\n\t// Serve index.html specifically\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"index.html\")\n\t})\n\thttp.HandleFunc(\"/api/socket\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveWs(hub, w, r)\n\t})\n\n\tport := \":4567\"\n\tlog.Println(\"Server listening at localhost\" + port)\n\thttp.ListenAndServe(port, nil)\n\n}", "func StaticFileServer(r chi.Router, public string, static string) {\n\n\t// everything up to the r.Get call is executed the first time the function is called\n\tif strings.ContainsAny(public, \"{}*\") {\n\t\tpanic(\"FileServer does not permit URL parameters.\")\n\t}\n\n\troot, _ := filepath.Abs(static)\n\tif _, err := os.Stat(root); os.IsNotExist(err) {\n\t\tpanic(\"Static Documents Directory Not Found\")\n\t}\n\n\tfs := http.StripPrefix(public, http.FileServer(http.Dir(root)))\n\n\tif public != \"/\" && public[len(public)-1] != '/' {\n\t\tr.Get(public, http.RedirectHandler(public+\"/\", 301).ServeHTTP)\n\t\tpublic += \"/\"\n\t}\n\n\tlog.Printf(\"Serving spa index.html from: %s\", http.Dir(root))\n\n\t// Register the Get request for the specified path, most likely /*\n\tr.Get(public+\"*\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfile := strings.Replace(r.RequestURI, public, \"/\", 1)\n\t\t// if the requested resource was not found, pass the request to the client\n\t\tif _, err := os.Stat(root + file); os.IsNotExist(err) {\n\t\t\thttp.ServeFile(w, r, path.Join(root, \"index.html\"))\n\t\t\treturn\n\t\t}\n\t\t// if the requested resource was found, serve it\n\t\tfs.ServeHTTP(w, r)\n\t})\n}", "func main() {\n\tfs := http.FileServer(http.Dir(\"public\"))\n\tfmt.Printf(\"fs type: %T\\n\", fs)\n\thttp.Handle(\"/pics/\", fs)\n\thttp.HandleFunc(\"/\", dogs)\n\thttp.ListenAndServe(\":8080\", nil)\n}", "func ServeHtmlFile(filePath string) error {\n\t_, fileErr := os.Stat(filePath)\n\tif os.IsNotExist(fileErr) {\n\t\treturn errors.New(\"File do not exist.\")\n\t}\n\n\thandler := ServeFileHandler{filePath}\n\terr := http.ListenAndServe(\":8080\", &handler)\n\treturn err\n}", "func ServeFile(filePath string) (address string, stop func(), err error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tf, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn \"\", cancel, err\n\t}\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\n\t\tpath.Join(\"/\", path.Base(filePath)),\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\thttp.ServeContent(w, r, path.Base(filePath), time.Now(), f)\n\t\t},\n\t)\n\n\tfor tries := 5; tries > 0; tries-- {\n\t\t// get a random port in the IANA dynamic/ephemeral range\n\t\tport := rand.Intn(65535-49151) + 49151\n\n\t\t// start an HTTP server on that port\n\t\tserver := rpc.NewContextServer(ctx, mux)\n\t\terrors := make(chan error)\n\n\t\tgo func(errors chan error) {\n\t\t\terrors <- server.ListenAndServe(fmt.Sprintf(\"localhost:%d\", port))\n\t\t}(errors)\n\n\t\t// if it hasn't terminated in .1s, assume it's listening\n\t\tdur, _ := time.ParseDuration(\".1s\")\n\t\tselect {\n\t\tcase err := <-errors:\n\t\t\tlog.WithError(err).Error(\"error in ServeFile\")\n\t\tcase <-time.After(dur):\n\t\t\treturn fmt.Sprintf(\"http://localhost:%d/%s\", port, path.Base(filePath)), cancel, nil\n\t\t}\n\t}\n\n\treturn \"\", cancel, errors.New(\"Couldn't find port to listen on\")\n}", "func FileServerMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Server\", globalAppName) // do not add version information\n\t\tswitch {\n\t\tcase strings.HasPrefix(r.URL.Path, \"/ws\"):\n\t\t\tserveWS(w, r)\n\t\tcase strings.HasPrefix(r.URL.Path, \"/api\"):\n\t\t\tnext.ServeHTTP(w, r)\n\t\tdefault:\n\t\t\tbuildFs, err := fs.Sub(portal_ui.GetStaticAssets(), \"build\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\twrapHandlerSinglePageApplication(http.FileServer(http.FS(buildFs))).ServeHTTP(w, r)\n\t\t}\n\t})\n}", "func (s *Server) Service() string { return \"file\" }", "func (t *Socket) StoreFile(args *File, reply *string) error {\n\n\tlog.Println(\"Storing File......\")\n\t//fmt.Println(\"File:\", args)\n\n\tfileName := args.FileName\n\tfileData := args.Data\n\tputRequestTime := args.PutRequestTime\n\n\tmyIp := GetOutboundIP()\n\tlog.Println(\"In machine:\" + myIp)\n\tlog.Println(\"fileName:\", fileName)\n\tlog.Println(\"putRequestTime:\", putRequestTime)\n\n\tpath := \"sdfs\"\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tos.Mkdir(path, 0700)\n\t}\n\t// f, err := os.OpenFile(path+\"/\"+fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tf, err := os.Create(path + \"/\" + fileName)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\t_, err = f.Write(fileData)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tf.Close()\n\n\t// Todo: notifyWriteSucceed()\n\tlocalSDFSData[fileName] = putRequestTime\n\tlogStoringFile(fileName)\n\n\treturn nil\n}", "func Run(configPath, devURL, addr string) error {\n\tcfg := new(Config)\n\tf, err := os.Open(configPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open config file: %s\", err)\n\t}\n\n\tif err := json.NewDecoder(f).Decode(cfg); err != nil {\n\t\treturn fmt.Errorf(\"failed to decode config file: %s\", err)\n\t}\n\n\tsrv, err := setupServer(cfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to initialise server: %s\", err)\n\t}\n\n\tif err := setupAssets(devURL, srv); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Serving on\", addr)\n\n\tif err := http.ListenAndServe(addr, srv); err != nil {\n\t\treturn fmt.Errorf(\"failed to start server: %s\", err)\n\t}\n\n\treturn nil\n}", "func MountApplication(mountpoints map[string]http.Handler, host *Host, app *Application) {\n listing := &DirList{Host: host}\n\n\t// Serve the static build files from the mountpoint path.\n\turl := app.PublishUrl()\n\tlog.Printf(\"Serving app %s from %s\", url, app.PublicDirectory())\n fileserver := http.FileServer(http.Dir(app.PublicDirectory()))\n mountpoints[url] = http.StripPrefix(url, PublicHandler{Listing: listing, App: app, FileServer: fileserver})\n}", "func (r *Router) ServeFile(ctx context.Context, w http.ResponseWriter, req *http.Request, f string) {\n\treqID := r.GetRequestID(ctx)\n\n\tr.logger.Debugw(\"response.image\",\n\t\t\"request_id\", reqID,\n\t\t\"file_name\", f,\n\t)\n\n\thttp.ServeFile(w, req, f)\n}", "func init() {\n //fmt.Println(\"init\")\n app_data.file_name = \"/tmp/roll_test/test.txt\"\n}", "func (m *Macross) File(path, file string) {\n\tm.Get(path, func(c *Context) error {\n\t\treturn c.ServeFile(file)\n\t})\n}", "func ServeFile(relativePath string) (*httptest.Server, error) {\n\tbody, err := ReadFile(relativePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"X-Plan-Qps-Allotted\", \"0\")\n\t\tw.Header().Add(\"X-Plan-Qps-Current\", \"0\")\n\t\tw.Header().Add(\"X-Plan-Quota-Allotted\", \"0\")\n\t\tw.Header().Add(\"X-Plan-Quota-Current\", \"0\")\n\t\tfmt.Fprintln(w, string(body))\n\t}))\n\n\treturn ts, nil\n}", "func (s *server) File(_ context.Context, request *pb.FileRequest) (*pb.FileResponse, error) {\n\tfmt.Printf(\"Patching file %s\\n\", path.Join(s.localReplicaPath, request.FullPath))\n\terr := ioutil.WriteFile(path.Join(s.localReplicaPath, request.FullPath), []byte(request.FullContents), defaultRights)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to file %s: %w\", request.FullPath, err)\n\t}\n\treturn &pb.FileResponse{}, nil\n}", "func (s *scpSession) sendFile(mode string, length int64, remoteFile string, content io.ReadCloser) error {\n\tfilename := filepath.Base(remoteFile)\n\n\t_, err := fmt.Fprintf(s.in, \"%s%s %d %s\\n\", msgCopyFile, mode, length, filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create a new file: err=%s\", err)\n\t}\n\n\terr = s.readReply()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = io.Copy(s.in, content)\n\t//defer content.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while writing content file: err=%s\", err)\n\t}\n\n\t_, err = s.in.Write([]byte{msgOK})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while ending transfer: err=%s\", err)\n\t}\n\n\terr = s.readReply()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func FileServer(r chi.Router, path string, root http.FileSystem) {\n\tif strings.ContainsAny(path, \"{}*\") {\n\t\tpanic(\"FileServer does not permit URL parameters.\")\n\t}\n\tfs := http.StripPrefix(path, http.FileServer(root))\n\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\tr.Get(path, http.RedirectHandler(path+\"/\", http.StatusMovedPermanently).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\tr.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfs.ServeHTTP(w, r)\n\t}))\n}", "func generateAndRunFile(projectDir, fileName string, tmpl *template.Template) {\n\tprojectPack, err := build.ImportDir(path.Join(projectDir, \"config\"), 0)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Error while importing project path: %s\", err))\n\t}\n\n\ttmplData := struct {\n\t\tImports []string\n\t\tConfig string\n\t}{\n\t\tImports: projectPack.Imports,\n\t\tConfig: fmt.Sprintf(\"%#v\", viper.AllSettings()),\n\t}\n\tstartFileName := path.Join(projectDir, fileName)\n\tgenerate.CreateFileFromTemplate(startFileName, tmpl, tmplData)\n\tcmd := exec.Command(\"go\", \"run\", startFileName)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Run()\n}", "func RootHandler(w http.ResponseWriter, r *http.Request) {\n\tfilePath := r.URL.Path[len(\"/\"):]\n\n\tsource, err := ioutil.ReadFile(root + filePath)\n\tif err != nil {\n\t\tsource, err = ioutil.ReadFile(root + filePath + \"/index.html\")\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t\tfilePath += \"index.html\"\n\t}\n\n\t// Set response headers\n\theaders.SetDefaultHeaders(w)\n\theaders.SetContentTypeHeader(w, filePath)\n\n\t// Send the response\n\tw.Write(source)\n\n\t//TODO: Log more detailed information.\n\tlog.Println(\"(rootHandler) The requested file has been sent: \", root+filePath)\n}", "func SendFileServer(from *os.File, stream TagIOService_PullServer) error {\n\tfinfo, err := from.Stat()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting file info: %s\", err)\n\t}\n\tfsize := finfo.Size()\n\n\tvar counter int\n\tvar totread uint64\n\tfor {\n\t\tcontent := make([]byte, 1024)\n\t\tread, err := from.Read(content)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"error reading file: %w\", err)\n\t\t}\n\t\ttotread += uint64(read)\n\n\t\tif counter%50 == 0 {\n\t\t\terr := SendProgressMessage(totread, fsize, stream)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error sending progress: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tif err := stream.Send(\n\t\t\t&PullResult{\n\t\t\t\tTestOneof: &PullResult_Chunk{\n\t\t\t\t\tChunk: &Chunk{\n\t\t\t\t\t\tContent: content,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t); err != nil {\n\t\t\treturn fmt.Errorf(\"error sending chunk: %w\", err)\n\t\t}\n\t\tcounter++\n\t}\n\treturn nil\n}", "func (ctx *Context) ServeFile(filepath string) (err error) {\n\thttp.ServeFile(ctx.ResponseWriter, ctx.Request, filepath)\n\treturn\n}", "func (s *VarlinkInterface) SendFile(ctx context.Context, c VarlinkCall, type_ string, length_ int64) error {\n\treturn c.ReplyMethodNotImplemented(ctx, \"io.podman.SendFile\")\n}", "func Serve(path string, options ServeOptions) error {\n\t// First check if the passed path is a verless project (valid verless cfg).\n\tcfg, err := config.FromFile(path, config.Filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetFiles := outputDir(path, &options.BuildOptions)\n\n\t// If yes, build it if requested to do so.\n\toptions.BuildOptions.RecompileTemplates = options.Watch\n\toptions.Overwrite = true\n\n\tmemMapFs := afero.NewMemMapFs()\n\n\tdone := make(chan bool)\n\trebuildCh := make(chan string)\n\n\tif options.Watch {\n\t\tif err := watch(watchContext{\n\t\t\tIgnorePaths: []string{\n\t\t\t\ttargetFiles,\n\t\t\t\tfilepath.Join(path, config.StaticDir, config.GeneratedDir),\n\t\t\t\ttheme.GeneratedPath(path, cfg.Theme),\n\t\t\t},\n\t\t\tPath: path,\n\t\t\tChangedCh: rebuildCh,\n\t\t\tStopCh: done,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tout.T(style.Sparkles, \"building project ...\")\n\n\tbuild, err := NewBuild(memMapFs, path, options.BuildOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := build.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tout.T(style.HeavyCheckMark, \"project built successfully\")\n\n\t// If --watch is enabled, launch a goroutine that handles rebuilds.\n\tif options.Watch {\n\t\tfactory := func() (*Build, error) {\n\t\t\treturn NewBuild(memMapFs, path, options.BuildOptions)\n\t\t}\n\t\tgo watchAndRebuild(factory, rebuildCh, done)\n\t}\n\n\t// If the target folder doesn't exist, return an error.\n\tif _, err := memMapFs.Stat(targetFiles); err != nil {\n\t\treturn err\n\t}\n\n\terr = listenAndServe(memMapFs, targetFiles, options.IP, options.Port)\n\tclose(done)\n\n\treturn err\n}", "func (p *process) startSaveFile(path string, body string, opt *Options) error {\n\n\terr := ioutil.WriteFile(path, []byte(body), 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.run = nil\n\treturn nil\n}", "func staticFile(name string) string {\n\treturn filepath.Join(Config.StaticPath, name)\n}", "func Test_Ctx_SendFile_404(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tapp.Get(\"/\", func(ctx *Ctx) {\n\t\terr := ctx.SendFile(\"./john_dow.go/\")\n\t\tutils.AssertEqual(t, false, err == nil)\n\t})\n\n\tresp, err := app.Test(httptest.NewRequest(\"GET\", \"/\", nil))\n\tutils.AssertEqual(t, nil, err)\n\tutils.AssertEqual(t, StatusNotFound, resp.StatusCode)\n}", "func FileServer(r chi.Router, path string, root http.FileSystem) {\n\tif strings.ContainsAny(path, \"{}*\") {\n\t\tpanic(\"FileServer does not permit URL parameters.\")\n\t}\n\n\tfs := http.StripPrefix(path, http.FileServer(root))\n\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\tr.Get(path, http.RedirectHandler(path+\"/\", 301).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\tr.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfs.ServeHTTP(w, r)\n\t}))\n}", "func FileServer(r chi.Router, path string, root http.FileSystem) {\n\tif strings.ContainsAny(path, \"{}*\") {\n\t\tpanic(\"FileServer does not permit URL parameters.\")\n\t}\n\n\tfs := http.StripPrefix(path, http.FileServer(root))\n\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\tr.Get(path, http.RedirectHandler(path+\"/\", 301).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\tr.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfs.ServeHTTP(w, r)\n\t}))\n}", "func send_mp3_file(song_file string, client int) {\n\tdefer syscall.Close(client)\n\tbytes, err := ioutil.ReadFile(\"songs/\" + song_file)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsyscall.Write(client, bytes)\n}", "func main() {\n\trouter := mux.NewRouter()\n\n\t//read files\n\trouter.HandleFunc(`/api/open/{path:.*}`, getDir).Methods(\"GET\")\n\n\t//close folders\n\trouter.HandleFunc(`/api/close/{path:.*}`, closeDir).Methods(\"GET\")\n\n\t//notify api\n\t// Create a new glue server.\n\tserver = glue.NewServer(glue.Options{\n\t\tHTTPListenAddress: \":8080\",\n\t\tHTTPSocketType: glue.HTTPSocketTypeNone,\n\t\tHTTPHandleURL: \"/channel/\",\n\t})\n\n\t// Release the glue server on defer.\n\t// This will block new incoming connections\n\t// and close all current active sockets.\n\tdefer server.Release()\n\n\t// Set the glue event function to handle new incoming socket connections.\n\tserver.OnNewSocket(onNewSocket)\n\n\t// Run the glue server.\n\tgo server.Run()\n\n\trouter.PathPrefix(\"/channel/\").Handler(server)\n\n\t//serve static\n\tstaticFileDirectory := http.Dir(\"./millertoy_html/\")\n\tstaticFileHandler := http.StripPrefix(\"/\", http.FileServer(staticFileDirectory))\n\trouter.PathPrefix(\"/\").Handler(staticFileHandler).Methods(\"GET\")\n\t//check static updates\n\tgo refreshFolder(\"./millertoy_html\")\n\n\tlog.Fatal(http.ListenAndServe(\":8080\", router))\n}", "func main() {\n\tfile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY, 0666)\n\tdefer file.Close()\n\tif err == nil {\n\t\tlog.SetOutput(file)\n\t} else {\n\t\tlog.Println(\"Failed to log to file, using default stderr\")\n\t}\n\thttp.Handle(\"/panic\", panicHandler{http.HandlerFunc(panicPathHandler)})\n\thttp.HandleFunc(\"/\", myHandleFunc)\n\tlog.Fatalln(http.ListenAndServe(\"10.253.98.20:8080\", nil))\n\t//http.ListenAndServeTLS(\"ap-pun-lp1408.internal.sungard.corp:10443\", \"cert.pem\", \"key.pem\", nil)\n\n}", "func ServeFilesInManyWays() {\n\tdefHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tt := template.Must(template.ParseFiles(\"file/helloImage.gohtml\"))\n\t\te := t.ExecuteTemplate(w, \"helloImage.gohtml\", nil)\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t}\n\t}\n\n\timgHandlerIOCopy := func(w http.ResponseWriter, r *http.Request) {\n\t\tf, e := os.Open(\"file/golang.png\")\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tio.Copy(w, f)\n\t}\n\n\timgHandlerServeContent := func(w http.ResponseWriter, r *http.Request) {\n\t\tf, e := os.Open(\"file/gogogo.jpeg\")\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tfi, e := os.Stat(\"file/gogogo.jpeg\")\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t}\n\n\t\thttp.ServeContent(w, r, fi.Name(), fi.ModTime(), f)\n\t}\n\n\timgHandlerServeFile := func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"file/golangticket.png\")\n\t}\n\n\thttp.HandleFunc(\"/\", defHandler)\n\thttp.HandleFunc(\"/golang.png\", imgHandlerIOCopy)\n\thttp.HandleFunc(\"/gogogo.jpeg\", imgHandlerServeContent)\n\thttp.HandleFunc(\"/golangticket.png\", imgHandlerServeFile)\n\thttp.ListenAndServe(\":6080\", nil)\n}", "func uploadHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tfmt.Fprintf(w, `<html>\n<head>\n <title>GoLang HTTP Fileserver</title>\n</head>\n\n<body>\n\n<h2>Upload a file</h2>\n\n<form action=\"/receive\" method=\"post\" enctype=\"multipart/form-data\">\n <label for=\"file\">Filename:</label>\n <input type=\"file\" name=\"file\" id=\"file\">\n <br>\n <input type=\"submit\" name=\"submit\" value=\"Submit\">\n</form>\n\n</body>\n</html>`)\n\t}\n}", "func FileServer(r chi.Router, path string, root http.FileSystem) {\n\n\tif strings.ContainsAny(path, \"{}*\") {\n\t\tpanic(\"FileServer does not permit URL parameters.\")\n\t}\n\n\tfs := http.FileServer(root)\n\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\tr.Get(path, http.RedirectHandler(path+\"/\", 301).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\tr.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfs.ServeHTTP(w, r)\n\t}))\n}", "func routePostFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {\n\tpath := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)\n\n\tdir := filepath.Dir(path)\n\tlog.Println(dir)\n\terr := os.MkdirAll(dir, FSDirPermission)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfile, err := os.Create(path)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\twritten, err := io.Copy(file, r.Body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Println(written)\n\n\treturn\n}", "func getFileserverHandler(folder string) http.Handler {\n\treturn http.StripPrefix(\"/\"+folder+\"/\", http.FileServer(http.Dir(\"./\"+folder)))\n}", "func imageHandler(res http.ResponseWriter, req *http.Request) {\n\tfmt.Println(req.URL.Path[12:])\n\tf, err := ioutil.ReadFile(\"../\" + req.URL.Path)\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(res, \"%s\", f)\n}", "func SendFile(addr, from, subj, text, name string, file io.ReadCloser, to ...string) error {\n\tmgn, err := newMailgun(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsg := mgn.NewMessage(from, subj, text, to...)\n\tif file != nil {\n\t\tmsg.AddReaderAttachment(name, file)\n\t}\n\n\t_, _, err = mgn.Send(msg)\n\n\treturn err\n}", "func SendFiles(sshClient *ssh.Client, opts *Options, files map[string]io.Reader) error {\n\tfor fileName, reader := range files {\n\t\tdeploymentName, err := opts.GetDeploymentName()\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error getting full app name: %s\", err)\n\t\t}\n\n\t\tremotePath := fmt.Sprintf(\"%s/%s/%s\", opts.RootDir, deploymentName, fileName)\n\n\t\tpermissions := \"0644\"\n\t\terr = copyFile(fileName, reader, remotePath, permissions, sshClient)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Server) Run(ctx context.Context) error {\n\tfs := customFileSystem{http.Dir(s.Dir)}\n\n\thandler := http.NewServeMux()\n\thandler.Handle(\"/\", http.FileServer(fs))\n\n\tfileServer := &http.Server{\n\t\tAddr: s.ListenAddress,\n\t\tHandler: handler,\n\t\tReadTimeout: time.Second * 15,\n\t\tReadHeaderTimeout: time.Second * 15,\n\t\tWriteTimeout: time.Second * 15,\n\t\tIdleTimeout: time.Second * 30,\n\t\tMaxHeaderBytes: 4096,\n\t}\n\n\terrs := make(chan error)\n\tgo func() {\n\t\terrs <- fileServer.ListenAndServe()\n\t}()\n\n\tselect {\n\tcase err := <-errs:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn fileServer.Shutdown(ctx)\n\t}\n}", "func ServeFile(fname string) func(*Request, *Response) {\n\t//variables\n\tvar mimetype string\n\tvar lastmodified string\n\n\t// collect file stats\n\tfdata := statFile(fname, false)\n\t// pre-calculate function varaibles\n\text := filepath.Ext(fname)\n\tmimetype = mime.TypeByExtension(ext)\n\tif n := strings.Index(mimetype, \";\"); n > 0 {\n\t\tmimetype = mimetype[:n] //ignore charset returned\n\t}\n\tlastmodified = fdata.ModTime().Format(timeFormat)\n\n\t// generate function\n\treturn func(req *Request, resp *Response) {\n\t\tcontent, err := ioutil.ReadFile(fname)\n\t\tif err == nil {\n\t\t\tresp.SetHeader(\"Server\", \"nginx\")\n\t\t\tresp.SetHeader(\"Date\", GetDate())\n\t\t\tresp.SetHeader(\"Content-Type\", mimetype)\n\t\t\tresp.SetHeader(\"Content-Length\", strconv.Itoa(len(content)))\n\t\t\tresp.SetHeader(\"Last-Modified\", lastmodified)\n\t\t\tresp.SetHeader(\"Connection\", \"close\")\n\t\t\tresp.SetBodyBytes(content)\n\t\t} else {\n\t\t\tresp.StatusCode(500)\n\t\t}\n\t}\n}", "func FileServer(router *chi.Mux, path string, root string) {\n\tif strings.ContainsAny(path, \"{}*\") {\n\t\tpanic(\"FileServer does not permit URL parameters.\")\n\t}\n\n\tfs := http.StripPrefix(path, http.FileServer(\n\t\t&CustomFilesystem{\n\t\t\tFileSystem: http.Dir(root),\n\t\t\treadDirBatchSize: 2,\n\t\t},\n\t))\n\n\t// redirect to / terminated urls\n\tif path != \"/\" && path[len(path)-1] != '/' {\n\t\trouter.Get(path, http.RedirectHandler(path+\"/\", 301).ServeHTTP)\n\t\tpath += \"/\"\n\t}\n\tpath += \"*\"\n\n\trouter.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// check if url has GET parameters\n\t\tif strings.Contains(r.RequestURI, \"?\") {\n\t\t\t// trim parameters as server is not gonna parse them\n\t\t\tr.RequestURI = r.RequestURI[:strings.LastIndex(r.RequestURI, \"?\")]\n\t\t\tfmt.Println(r.RequestURI)\n\t\t}\n\n\t\tinfo, err := os.Stat(fmt.Sprintf(\"%s%s\", root, r.RequestURI))\n\t\tif err == nil && info.IsDir() {\n\t\t\t_, err = os.Stat(fmt.Sprintf(\"%s%s/index.html\", root, r.RequestURI))\n\t\t}\n\n\t\tif os.IsNotExist(err) {\n\t\t\trouter.NotFoundHandler().ServeHTTP(w, r)\n\t\t} else {\n\t w.Header().Set(\"Cache-Control\", \"max-age=3600\")\n\t\t\tfs.ServeHTTP(w, r)\n\t\t}\n\t}))\n}", "func getFileToServe(url *url.URL, pathPrefix string) string {\n\tvar file string\n\n\tif pathPrefix == \"/\" {\n\t\tpathPrefix = \"\"\n\t}\n\n\tswitch filepath.Ext(url.Path) {\n\tcase \".html\", \".htm\", \"\":\n\t\tfile = IndexHTML\n\tdefault:\n\t\tpath := strings.TrimPrefix(url.Path, pathPrefix)\n\t\tfile = PublicPath + path\n\t}\n\n\treturn file\n}", "func (a *api) FileHandler() http.Handler {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tlog.Print(\"HTTP\", req.Method, req.URL, GetIP(req))\n\n\t\tswitch req.Method {\n\t\tcase \"GET\":\n\t\t\tvars := mux.Vars(req)\n\t\t\tid, err := uuid.FromString(vars[\"fileID\"])\n\t\t\tif err != nil {\n\t\t\t\tres.WriteHeader(500)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfile := File{}\n\t\t\ta.db.db.Find(&file, \"id = ?\", id.String())\n\n\t\t\tif file.ID == uuid.Nil.String() {\n\t\t\t\t// file doesn't exist\n\t\t\t\tres.WriteHeader(500)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfileB, err := ioutil.ReadFile(fileFolder + \"/\" + id.String())\n\t\t\tif err != nil {\n\t\t\t\t// file doesn't exist\n\t\t\t\tres.WriteHeader(500)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tres.WriteHeader(200)\n\t\t\tres.Write(fileB)\n\t\tcase \"POST\":\n\t\t\tfile, handler, err := req.FormFile(\"file\")\n\t\t\tif err != nil {\n\t\t\t\t// file doesn't exist\n\t\t\t\tres.WriteHeader(500)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdefer file.Close()\n\n\t\t\tfileID := uuid.NewV4()\n\n\t\t\tfilePath := fileFolder + \"/\" + fileID.String()\n\n\t\t\tf, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0666)\n\t\t\tif err != nil {\n\t\t\t\t// file doesn't exist\n\t\t\t\tres.WriteHeader(500)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tio.Copy(f, file)\n\n\t\t\tfileBytes, err := ioutil.ReadFile(filePath)\n\t\t\tif err != nil {\n\t\t\t\t// file doesn't exist\n\t\t\t\tres.WriteHeader(500)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tnewFile := File{\n\t\t\t\tID: fileID.String(),\n\t\t\t\tFileName: handler.Filename,\n\t\t\t\tData: fileBytes,\n\t\t\t}\n\n\t\t\tfileMessage := Message{\n\t\t\t\tUsername: \"Anonymous\",\n\t\t\t\tType: \"message\",\n\t\t\t\tText: \"\",\n\t\t\t\tID: uuid.NewV4().String(),\n\t\t\t\tTime: time.Now(),\n\t\t\t\tFile: newFile,\n\t\t\t}\n\n\t\t\tbroadcastB, err := json.Marshal(fileMessage)\n\t\t\ta.db.db.Create(&newFile)\n\t\t\tres.WriteHeader(200)\n\t\t\ta.p2p.Broadcast(broadcastB)\n\t\t}\n\n\t})\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tpath := r.URL.Path\n\n\t// Serve from embedded files in executable if not web client directory was specified\n\tif WebClient() == \"\" {\n\t\tif len(path) > 0 && path[0:1] == \"/\" {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tdvid.Debugf(\"[%s] Serving from embedded files: %s\\n\", r.Method, path)\n\n\t\tresource := nrsc.Get(path)\n\t\tif resource == nil {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\trsrc, err := resource.Open()\n\t\tif err != nil {\n\t\t\tBadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tdata, err := ioutil.ReadAll(rsrc)\n\t\tif err != nil {\n\t\t\tBadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tdvid.SendHTTP(w, r, path, data)\n\t} else {\n\t\tfilename := filepath.Join(WebClient(), path)\n\t\tredirectURL := WebRedirectPath()\n\t\tif len(redirectURL) > 0 || WebDefaultFile() != \"\" {\n\t\t\t_, err := os.Stat(filename)\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tif len(redirectURL) > 0 {\n\t\t\t\t\tif redirectURL[0] != '/' {\n\t\t\t\t\t\tredirectURL = \"/\" + redirectURL\n\t\t\t\t\t}\n\t\t\t\t\tdvid.Debugf(\"[%s] Redirecting bad file (%s) to default path: %s\\n\", r.Method, filename, redirectURL)\n\t\t\t\t\thttp.Redirect(w, r, redirectURL, http.StatusPermanentRedirect)\n\t\t\t\t} else {\n\t\t\t\t\tfilename = filepath.Join(WebClient(), WebDefaultFile())\n\t\t\t\t\tdvid.Debugf(\"[%s] Serving default file from webclient directory: %s\\n\", r.Method, filename)\n\t\t\t\t\thttp.ServeFile(w, r, filename)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tdvid.Debugf(\"[%s] Serving from webclient directory: %s\\n\", r.Method, filename)\n\t\thttp.ServeFile(w, r, filename)\n\t}\n}", "func (r *Router) ServeFile(route, file string) *Router {\n\tr.Handle(MethodGET, route, func(ctx *Context) {\n\t\tctx.SendFile(file)\n\t})\n\treturn r\n}" ]
[ "0.68540484", "0.64961594", "0.6205975", "0.6052243", "0.5979095", "0.5941416", "0.5835912", "0.58248794", "0.5780296", "0.5768345", "0.5687944", "0.5671151", "0.56280226", "0.56184274", "0.5585889", "0.55772424", "0.55736756", "0.55638856", "0.5534361", "0.5530158", "0.5529184", "0.551577", "0.55080026", "0.5493092", "0.54924893", "0.54898185", "0.5488722", "0.5485594", "0.5481958", "0.54713035", "0.54669756", "0.5460677", "0.5448781", "0.543237", "0.54253685", "0.54241836", "0.5392457", "0.53915685", "0.5375261", "0.5370696", "0.53658074", "0.5359873", "0.5359227", "0.535233", "0.53452843", "0.5333411", "0.5311142", "0.53054214", "0.53042", "0.5303502", "0.52926177", "0.5289943", "0.5287011", "0.5284112", "0.52697545", "0.52678496", "0.5260297", "0.5257583", "0.5252347", "0.5250755", "0.52470887", "0.52358055", "0.5224383", "0.52218026", "0.5221246", "0.5211253", "0.52026176", "0.51860106", "0.5181437", "0.51750916", "0.5162948", "0.5162931", "0.5161897", "0.51613915", "0.5156797", "0.5156119", "0.5149653", "0.5147825", "0.51400405", "0.51367795", "0.5124706", "0.51191586", "0.51191586", "0.51160794", "0.51004374", "0.50995344", "0.50948864", "0.50797135", "0.5078449", "0.5078145", "0.5076699", "0.5076606", "0.5068568", "0.50672346", "0.50657594", "0.50601697", "0.5058422", "0.5052837", "0.50508606", "0.5047242", "0.50441456" ]
0.0
-1
List all the known records
func (s *sqlStore) List(opts ...store.ListOption) ([]string, error) { options := store.ListOptions{ Order: store.OrderAsc, } for _, o := range opts { o(&options) } db, queries, err := s.db(options.Database, options.Table) if err != nil { return nil, err } pattern := "%" if options.Prefix != "" { pattern = options.Prefix + pattern } if options.Suffix != "" { pattern = pattern + options.Suffix } var rows pgx.Rows if options.Limit > 0 { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset) } else { rows, err = db.Query(s.options.Context, queries.ListDescLimit, pattern, options.Limit, options.Offset) } } else { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAsc, pattern) } else { rows, err = db.Query(s.options.Context, queries.ListDesc, pattern) } } if err != nil { if err == pgx.ErrNoRows { return nil, nil } return nil, err } defer rows.Close() keys := make([]string, 0, 10) for rows.Next() { var key string err = rows.Scan(&key) if err != nil { return nil, err } keys = append(keys, key) } return keys, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (sr *StoredRecording) List(filter *ari.Key) (sx []*ari.Key, err error) {\n\tvar recs []struct {\n\t\tName string `json:\"name\"`\n\t}\n\n\tif filter == nil {\n\t\tfilter = sr.client.stamp(ari.NewKey(ari.StoredRecordingKey, \"\"))\n\t}\n\n\terr = sr.client.get(\"/recordings/stored\", &recs)\n\n\tfor _, rec := range recs {\n\t\tk := sr.client.stamp(ari.NewKey(ari.StoredRecordingKey, rec.Name))\n\t\tif filter.Match(k) {\n\t\t\tsx = append(sx, k)\n\t\t}\n\t}\n\n\treturn\n}", "func displayAllRecords(records []Record) {\n\tfmt.Printf(\"\\nDisplaying all records...\\n\\n\")\n\n\tfor i := 0; i < len(records); i++ {\n\t\tfmt.Printf(\"Record ID: %d: %+v\\n\", i, records[i])\n\t\ttime.Sleep(5 * time.Millisecond) // 5ms between records\n\t}\n}", "func (r Record) List(bs []byte) *RecordListResult {\n\tdata := new(RecordListResult)\n\terr := json.Unmarshal(bs, data)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn data\n}", "func (p *RecordAPI) List(domain string) (list []Record, err error) {\n\t// Get DNS record list (POST https://dnsapi.cn/Record.List)\n\tparams := url.Values{}\n\tparams.Set(\"format\", \"json\")\n\tparams.Set(\"login_token\", p.loginToken)\n\tparams.Set(\"domain\", domain)\n\tres, err := simpleHTTP(p.client, \"POST\", \"https://dnsapi.cn/Record.List\", params)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar jsonRes struct {\n\t\tStatus Status `json:\"status\"`\n\t\tRecords []Record `json:\"records\"`\n\t}\n\t// fmt.Println(string(res))\n\tif err = json.Unmarshal(res, &jsonRes); err != nil {\n\t\treturn\n\t}\n\tif jsonRes.Status.Code != 1 {\n\t\treturn list, errors.New(jsonRes.Status.Message)\n\t}\n\treturn jsonRes.Records, nil\n}", "func (k Keeper) RecordsAll(c context.Context, req *types.RecordsAllRequest) (*types.RecordsAllResponse, error) {\n\tdefer telemetry.MeasureSince(time.Now(), types.ModuleName, \"query\", \"RecordsAll\")\n\tretval := types.RecordsAllResponse{Request: req}\n\n\tpageRequest := getPageRequest(req)\n\n\tctx := sdk.UnwrapSDKContext(c)\n\tkvStore := ctx.KVStore(k.storeKey)\n\tprefixStore := prefix.NewStore(kvStore, types.RecordKeyPrefix)\n\n\tpageRes, err := query.Paginate(prefixStore, pageRequest, func(key, value []byte) error {\n\t\tvar record types.Record\n\t\tvErr := record.Unmarshal(value)\n\t\tif vErr == nil {\n\t\t\tretval.Records = append(retval.Records, types.WrapRecord(&record))\n\t\t\treturn nil\n\t\t}\n\t\t// Something's wrong. Let's do what we can to give indications of it.\n\t\tvar addr types.MetadataAddress\n\t\tkErr := addr.Unmarshal(key)\n\t\tif kErr == nil {\n\t\t\tk.Logger(ctx).Error(\"failed to unmarshal record\", \"address\", addr, \"error\", vErr)\n\t\t\tretval.Records = append(retval.Records, types.WrapRecordNotFound(addr))\n\t\t} else {\n\t\t\tk64 := b64.StdEncoding.EncodeToString(key)\n\t\t\tk.Logger(ctx).Error(\"failed to unmarshal record key and value\",\n\t\t\t\t\"key error\", kErr, \"value error\", vErr, \"key (base64)\", k64)\n\t\t\tretval.Records = append(retval.Records, &types.RecordWrapper{})\n\t\t}\n\t\treturn nil // Still want to move on to the next.\n\t})\n\tif err != nil {\n\t\treturn &retval, status.Error(codes.Unavailable, err.Error())\n\t}\n\tretval.Pagination = pageRes\n\treturn &retval, nil\n}", "func outputRecordsAll(cmd *cobra.Command) error {\n\tclientCtx, err := client.GetClientQueryContext(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpageReq, e := client.ReadPageRequest(withPageKeyDecoded(cmd.Flags()))\n\tif e != nil {\n\t\treturn e\n\t}\n\tqueryClient := types.NewQueryClient(clientCtx)\n\tres, err := queryClient.RecordsAll(\n\t\tcontext.Background(),\n\t\t&types.RecordsAllRequest{Pagination: pageReq},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !includeRequest {\n\t\tres.Request = nil\n\t}\n\n\treturn clientCtx.PrintProto(res)\n}", "func getAllRecordsList(stub shim.ChaincodeStubInterface) ([]string, error) {\n\tvar recordList []string\n\trecBytes, _ := stub.GetState(ALL_ELEMENENTS)\n\n\terr := json.Unmarshal(recBytes, &recordList)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to unmarshal getAllRecordsList \")\n\t}\n\n\treturn recordList, nil\n}", "func getAllRecordsList(stub shim.ChaincodeStubInterface) ([]string, error) {\r\n\tvar recordList []string\r\n\trecBytes, _ := stub.GetState(ALL_ELEMENENTS)\r\n\r\n\terr := json.Unmarshal(recBytes, &recordList)\r\n\tif err != nil {\r\n\t\treturn nil, errors.New(\"Failed to unmarshal getAllRecordsList \")\r\n\t}\r\n\r\n\treturn recordList, nil\r\n}", "func GetAllRecords(client *mongo.Collection) *[]Record {\n\tcursor, err := client.Find(context.TODO(), bson.D{{}})\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to find any records: %s\", err)\n\t}\n\n\tvar result []Record\n\tfor cursor.Next(context.TODO()) {\n\t\tvar elem Record\n\t\terr := cursor.Decode(&elem)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tresult = append(result, elem)\n\t}\n\tcursor.Close(context.TODO())\n\treturn &result\n}", "func List(d Recorder, limit, offset uint64) ([]Recorder, error) {\n\tfn := func(desc Describer, query squirrel.SelectBuilder) (squirrel.SelectBuilder, error) {\n\t\treturn query.Limit(limit).Offset(offset), nil\n\t}\n\n\treturn ListWhere(d, fn)\n}", "func (db *DB) GetAll(ctx context.Context) ([]*databroker.Record, error) {\n\treturn db.getAll(ctx, func(record *databroker.Record) bool { return true })\n}", "func (s *SqliteServer) ReadAllRecords() ([]gdp.Record, error) {\n\tqueryString := \"SELECT hash, recno, timestamp, accuracy, prevhash, value, sig FROM log_entry\"\n\n\trows, err := s.db.Query(queryString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trecords, err := parseRecordRows(rows)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func (d *DigitalOcean) List(ctx context.Context, domain string) ([]Record, error) {\n\treq, err := d.prepareRequest(http.MethodGet, fmt.Sprintf(\"/domains/%s/records?per_page=200\", domain), nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to prepare a request: %w\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(ctx, d.timeout)\n\tdefer cancel()\n\n\treq = req.WithContext(ctx)\n\n\tres, err := d.c.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to do a request: %w\", err)\n\t}\n\n\tdefer res.Body.Close()\n\n\tif !misc.Success(res.StatusCode) {\n\t\treturn nil, fmt.Errorf(\"unexpected response with status code %d\", res.StatusCode)\n\t}\n\n\tvar records domainRecords\n\tif err := json.NewDecoder(res.Body).Decode(&records); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode the response: %w\", err)\n\t}\n\n\tif records.Links.Pages.Next != \"\" {\n\t\tlog.Debugf(\"there are more than 200 dns record for %s domain, are you sure that's correct? if yes, please raise an issue here: https://github.com/skibish/ddns/issues/new\", domain)\n\t}\n\n\treturn records.Records, nil\n}", "func listRecords(client *dnsimple.Client, accountID, domain string,\n\toptions *dnsimple.ZoneRecordListOptions) (records zoneRecords, err error) {\n\tif options == nil {\n\t\toptions = &dnsimple.ZoneRecordListOptions{}\n\t}\n\tfor p := 1; ; p++ {\n\t\tlistZoneRecordsResponse, err := client.Zones.ListRecords(accountID, domain, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor i := range listZoneRecordsResponse.Data {\n\t\t\trecords = append(records, listZoneRecordsResponse.Data[i])\n\t\t}\n\t\tif options.Page == 0 {\n\t\t\toptions.Page = 2\n\t\t} else {\n\t\t\toptions.Page++\n\t\t}\n\t\tif p >= listZoneRecordsResponse.Pagination.TotalPages {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func GetAllRecords(db string, name string) ([]string, error) {\n\tsession := ConnectDB(db)\n\tids, err := r.Table(name).Field(\"id\").Run(session)\n\tif err != nil {\n\t\tuniledgerlog.Error(err.Error())\n\t}\n\tvar idlist []string\n\terr = ids.All(&idlist)\n\tif err != nil {\n\t\tuniledgerlog.Error(err.Error())\n\t\treturn nil, errors.New(err.Error())\n\t}\n\treturn idlist, nil\n}", "func (c *Context) All(records interface{}) error {\n\td, lock := driver.Get()\n\tdefer lock.Unlock()\n\n\tmeta, err := getMetadata(records)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfieldss, err := d.All(meta.tablename, meta.fields, c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to fetch all records - %s\", err)\n\t}\n\n\tif err := populateRecordsFromFieldss(records, fieldss); err != nil {\n\t\treturn fmt.Errorf(\"Unable to fetch all records - %s\", err)\n\t}\n\n\treturn nil\n}", "func (a *Users) ListAll(w http.ResponseWriter, r *http.Request) {\n\ta.l.Println(\"[DEBUG] get all records\")\n\n\taccs := models.GetUsers()\n\n\t//err := models.ToJSON(accs, w)\n\terr := utils.Respond(w, accs)\n\tif err != nil {\n\t\t// we should never be here but log the error just incase\n\t\ta.l.Println(\"[ERROR] serializing user\", err)\n\t}\n}", "func (db *DB) GetAll(ip string) (*Record, error) { return db.query(ip, ModeDB24) }", "func (t *BPTree) All() (records Records, err error) {\n\treturn getRecordWrapper(t.getAll())\n}", "func getAll(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\n\t// sending query over db object and storing respose in var result\n\tresult, err := db.Query(\"SELECT fname, lname, email, pword, id FROM person\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer result.Close()\n\n\t// to fetch one record at a time from result\n\tfor result.Next() {\n\n\t\t// creating a variable person to store the and then show it\n\t\tvar person Person\n\t\terr := result.Scan(&person.Fname, &person.Lname, &person.Email, &person.Pword, &person.Id)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tpeople = append(people, person)\n\t}\n\t// Encode json to be sent to client machine\n\tjson.NewEncoder(w).Encode(people)\n}", "func (d *DomainRecordsServiceHandler) List(ctx context.Context, domain string, options *ListOptions) ([]DomainRecord, *Meta, error) {\n\treq, err := d.client.NewRequest(ctx, http.MethodGet, fmt.Sprintf(\"%s/%s/records\", domainPath, domain), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnewValues, err := query.Values(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.URL.RawQuery = newValues.Encode()\n\n\trecords := new(domainRecordsBase)\n\tif err = d.client.DoWithContext(ctx, req, records); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn records.Records, records.Meta, nil\n}", "func (o *dnsOp) listRecords(zone dnsprovider.Zone) ([]dnsprovider.ResourceRecordSet, error) {\n\tkey := zone.Name() + \"::\" + zone.ID()\n\n\trrs := o.recordsCache[key]\n\tif rrs == nil {\n\t\trrsProvider, ok := zone.ResourceRecordSets()\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"zone does not support resource records %q\", zone.Name())\n\t\t}\n\n\t\tklog.V(2).Infof(\"Querying all dnsprovider records for zone %q\", zone.Name())\n\t\tvar err error\n\t\trrs, err = rrsProvider.List()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error querying resource records for zone %q: %v\", zone.Name(), err)\n\t\t}\n\n\t\to.recordsCache[key] = rrs\n\t}\n\n\treturn rrs, nil\n}", "func (c *Client) ListRecords(ctx context.Context, domain string) ([]Record, error) {\n\tdata := APIRequest{\n\t\tMethod: \"list-records\",\n\t\tParams: Record{\n\t\t\tDomain: domain,\n\t\t},\n\t}\n\n\treq, err := newJSONRequest(ctx, http.MethodPost, c.apiEndpoint, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result APIResponse[Records]\n\terr = c.do(req, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result.Result.Records, nil\n}", "func (db *DB) List(ctx context.Context, sinceVersion string) ([]*databroker.Record, error) {\n\tc := db.pool.Get()\n\tdefer c.Close()\n\n\tv, err := strconv.ParseUint(sinceVersion, 16, 64)\n\tif err != nil {\n\t\tv = 0\n\t}\n\n\tids, err := redis.Strings(c.Do(\"ZRANGEBYSCORE\", db.versionSet, fmt.Sprintf(\"(%d\", v), \"+inf\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpbRecords := make([]*databroker.Record, 0, len(ids))\n\tfor _, id := range ids {\n\t\tb, err := redis.Bytes(c.Do(\"HGET\", db.recordType, id))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbRecord, err := db.toPbRecord(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbRecords = append(pbRecords, pbRecord)\n\t}\n\treturn pbRecords, nil\n}", "func (n *NameCom) ListRecords(request *ListRecordsRequest) (*ListRecordsResponse, error) {\n\tendpoint := fmt.Sprintf(\"/v4/domains/%s/records\", request.DomainName)\n\n\tvalues := url.Values{}\n\tif request.PerPage != 0 {\n\t\tvalues.Set(\"perPage\", fmt.Sprintf(\"%d\", request.PerPage))\n\t}\n\tif request.Page != 0 {\n\t\tvalues.Set(\"page\", fmt.Sprintf(\"%d\", request.Page))\n\t}\n\n\tbody, err := n.get(endpoint, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ListRecordsResponse{}\n\n\terr = json.NewDecoder(body).Decode(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func All(w http.ResponseWriter, r *http.Request) {\n\tvar result []Location\n\terr := store.Find(&result, bolthold.Where(\"Serial\").Eq(\"ce011711bd1668d80c\").Index(\"Serial\"))\n\tif err != nil {\n\t\tfmt.Println(\"Err\")\n\t\tfmt.Println(err)\n\t}\n\n\tjson.NewEncoder(w).Encode(result)\n\n}", "func (h *Hostsfile) Records() []*Record {\n\treturn h.records\n}", "func GetAllRecords(db *sql.DB, id int) ([]Record, error) {\n\trows, err := db.Query(`SELECT * FROM user_records WHERE user_id = $1`, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar records []Record\n\tfor rows.Next() {\n\t\tvar record Record\n\t\terr := rows.Scan(\n\t\t\t&record.ID,\n\t\t\t&record.Weight,\n\t\t\t&record.Reps,\n\t\t\t&record.RPE,\n\t\t\t&record.DatePerformed,\n\t\t\t&record.ExerciseID,\n\t\t\t&record.UserID,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\n\treturn records, nil\n}", "func (g *Game) Records(filter *LeaderboardFilter, embeds string) (*LeaderboardCollection, *Error) {\n\treturn fetchLeaderboardsLink(firstLink(g, \"records\"), filter, nil, embeds)\n}", "func (i *API) List(kind string) ([][]byte, error) {\n\treturn i.primaryStore.RawList(kind)\n}", "func (f fileRecordRepository) All() (files []fileRecord) {\n\t// Open database connection\n\tdb, err := dbConnect()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\n\t// Retrieve all files\n\tif files, err = db.GetAllFileRecords(); err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\n\tif err := db.Close(); err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\n\treturn files\n}", "func (ix *IndexedBucket) List(offset, size int) ([]Record, int, error) {\n\ttotal := 0\n\tvar ret []Record = nil\n\tviewFn := func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket(ix.mainBucket)\n\t\tnEntries := bucket.Stats().KeyN\n\n\t\t// If size <= 0 then we want all entries.\n\t\tif size <= 0 {\n\t\t\tsize = nEntries\n\t\t}\n\t\tresultSize := util.MaxInt(0, util.MinInt(nEntries-offset, size))\n\t\tif resultSize == 0 {\n\t\t\tret = []Record{}\n\t\t\treturn nil\n\t\t}\n\n\t\tcursor := bucket.Cursor()\n\t\t_, val := cursor.First()\n\n\t\t// TODO(stephana): This is definitely inefficient, but there is no\n\t\t// option in boltdb to address a specific element by index. If this is\n\t\t// too slow we can add an additional pseudo index for issue ids.\n\n\t\t// Skip the first entries if offset > 0.\n\t\tfor i := 0; i < offset; i++ {\n\t\t\t_, val = cursor.Next()\n\t\t}\n\n\t\tallEntries := make([]Record, resultSize, resultSize)\n\t\tvar err error\n\t\tvar iRec interface{}\n\t\t// Note: We are guaranteed to have resultSize records in the database.\n\t\tfor i := 0; i < resultSize; i++ {\n\t\t\tif iRec, err = ix.codec.Decode(val); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tallEntries[i] = iRec.(Record)\n\t\t\t_, val = cursor.Next()\n\t\t}\n\n\t\t// Success assign the results now.\n\t\tret = allEntries\n\t\ttotal = nEntries\n\t\treturn nil\n\t}\n\n\terr := ix.DB.View(viewFn)\n\treturn ret, total, err\n}", "func (c *client) ListPatient(filter map[string]interface{}, limit int64, offset int64) ([]*models.Patient, error) {\n\tvar pat []*models.Patient\n\n\tif err := c.db.Select(&pat, fmt.Sprintf(\"SELECT * FROM %s %s LIMIT %b, %b\", patientTableName, mkFilter(filter), offset, limit)); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn pat, domainErr.NewAPIError(domainErr.NotFound, err)\n\t\t}\n\t\treturn pat, err\n\t}\n\n\treturn pat, nil\n}", "func (sc *SmartContract) RetriveRecords(stub shim.ChaincodeStubInterface, criteria string) []map[string]interface{} {\n\trecords := make([]map[string]interface{}, 0)\n\tselectorString := fmt.Sprintf(\"{\\\"selector\\\":%s }\", criteria)\n\t_SC_LOGGER.Info(\"Query Selector :\" + selectorString)\n\tresultsIterator, _ := stub.GetQueryResult(selectorString)\n\tfor resultsIterator.HasNext() {\n\t\trecord := make(map[string]interface{})\n\t\trecordBytes, _ := resultsIterator.Next()\n\t\terr := json.Unmarshal(recordBytes.Value, &record)\n\t\tif err != nil {\n\t\t\t_SC_LOGGER.Infof(\"Unable to unmarshal data retived:: %v\", err)\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\treturn records\n}", "func ReadAllRecords(rr RecordReader) ([]Record, error) {\n\tvar files []Record\n\terr := ForEachRecord(rr, func(r Record) error {\n\t\tfiles = append(files, r)\n\t\treturn nil\n\t})\n\treturn files, err\n}", "func ReadAllRecords(rr RecordReader) ([]Record, error) {\n\tvar files []Record\n\terr := ForEachRecord(rr, func(r Record) error {\n\t\tfiles = append(files, r)\n\t\treturn nil\n\t})\n\treturn files, err\n}", "func (db *InMemDatabase) List(typename string) []contrail.IObject {\n\tnilList := []contrail.IObject{}\n\ttypeMap, ok := db.typeDB[typename]\n\tif !ok {\n\t\treturn nilList\n\t}\n\tvalues := make([]contrail.IObject, len(typeMap))\n\ti := 0\n\tfor _, v := range typeMap {\n\t\tvalues[i] = v\n\t\ti++\n\t}\n\treturn values\n}", "func (ms *mysqlstore) GetAll() (records []store.Record, err error) {\n\tdefer func(t0 time.Time) {\n\t\terrStr := \"\"\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\tms.logger.Printf(\"%+v\",\n\t\t\tlogrec{\n\t\t\t\tService: \"mysql\",\n\t\t\t\tOperation: \"get-all\",\n\t\t\t\tError: errStr,\n\t\t\t\tDuration: fmt.Sprintf(\"%v\", time.Since(t0)),\n\t\t\t},\n\t\t)\n\t}(time.Now())\n\n\treturn\n}", "func RunRecordList(ns string, config doit.Config, out io.Writer, args []string) error {\n\tif len(args) != 1 {\n\t\treturn doit.NewMissingArgsErr(ns)\n\t}\n\tname := args[0]\n\n\tclient := config.GetGodoClient()\n\n\tif len(name) < 1 {\n\t\treturn errors.New(\"domain name is missing\")\n\t}\n\n\tf := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) {\n\t\tlist, resp, err := client.Domains.Records(name, opt)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tsi := make([]interface{}, len(list))\n\t\tfor i := range list {\n\t\t\tsi[i] = list[i]\n\t\t}\n\n\t\treturn si, resp, err\n\t}\n\n\tsi, err := doit.PaginateResp(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist := make([]godo.DomainRecord, len(si))\n\tfor i := range si {\n\t\tlist[i] = si[i].(godo.DomainRecord)\n\t}\n\n\treturn displayOutput(&domainRecord{domainRecords: list}, out)\n}", "func (zone *Zone) Records() []dns.RR {\n\treturn zone.records\n}", "func ListRecord(domain string, domainID string) ([]RecordEntry, error) {\n\tdata := P()\n\tif domain != \"\" {\n\t\tdata.Add(\"domain\", domain)\n\t}\n\tif domainID != \"\" {\n\t\tdata.Add(\"domain_id\", domainID)\n\t}\n\tres := recordReflectFunc(\"list\", data)\n\tif res.Err != nil {\n\t\treturn nil, res.Err\n\t}\n\tif ret, ok := res.Data.(*RecordListResult); ok {\n\t\tif ret != nil {\n\t\t\tif ret.Status.Code == \"1\" {\n\t\t\t\treturn ret.Records, nil\n\t\t\t}\n\t\t\treturn nil, Err(ErrInvalidStatus, \"Record.List\", ret.Status.Code,\n\t\t\t\tret.Status.Message)\n\t\t}\n\t}\n\treturn nil, Err(ErrInvalidTypeAssertion, \"RecordListResult\")\n}", "func (c *cockroachdb) getRecords(tokens []string, fetchFiles bool) ([]Record, error) {\n\t// Lookup the latest version of each record specified by\n\t// the provided tokens.\n\tquery := `SELECT a.*\n FROM records a\n LEFT OUTER JOIN records b\n ON a.token = b.token\n AND a.version < b.version\n WHERE b.token IS NULL\n AND a.token IN (?)`\n\trows, err := c.recordsdb.Raw(query, tokens).Rows()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\trecords := make([]Record, 0, len(tokens))\n\tfor rows.Next() {\n\t\tvar r Record\n\t\terr := c.recordsdb.ScanRows(rows, &r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trecords = append(records, r)\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compile a list of record primary keys\n\tkeys := make([]string, 0, len(records))\n\tfor _, v := range records {\n\t\tkeys = append(keys, v.Key)\n\t}\n\n\tif fetchFiles {\n\t\t// Lookup files and metadata streams for each of the\n\t\t// previously queried records.\n\t\terr = c.recordsdb.\n\t\t\tPreload(\"Metadata\").\n\t\t\tPreload(\"Files\").\n\t\t\tWhere(keys).\n\t\t\tFind(&records).\n\t\t\tError\n\t} else {\n\t\t// Lookup just the metadata streams for each of the\n\t\t// previously queried records.\n\t\terr = c.recordsdb.\n\t\t\tPreload(\"Metadata\").\n\t\t\tWhere(keys).\n\t\t\tFind(&records).\n\t\t\tError\n\t}\n\n\treturn records, err\n}", "func (k Keeper) GetAllRecords(ctx sdk.Context) ([]types.RecordCompositeKey, []types.Record) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.RecordKeyPrefix)\n\titerator := sdk.KVStorePrefixIterator(store, []byte{})\n\tdefer iterator.Close()\n\n\tkeys := make([]types.RecordCompositeKey, 0)\n\tvalues := make([]types.Record, 0)\n\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tvar key types.RecordCompositeKey\n\t\tcompkey.MustDecode(iterator.Key(), &key)\n\t\tkeys = append(keys, key)\n\n\t\tvar value types.Record\n\t\tk.cdc.MustUnmarshal(iterator.Value(), &value)\n\t\tvalues = append(values, value)\n\t}\n\n\treturn keys, values\n}", "func ListAll() []ModelUser {\n\tcommon.Logger(\"info\", \"Initialize Get Database in PostgreSQL\", \"Modul User : ListAll\")\n\tdb := common.GetPostgreSQLDB()\n\n\ttx := db.Begin()\n\n\tcommon.Logger(\"info\", \"Prepare Query Select Table in Database PostgreSQL\", \"Modul User : ListAll\")\n\tvar models []ModelUser\n\n\tcommon.Logger(\"info\", \"Prepare Read Data from PostgreSQL\", \"Modul User : ListAll\")\n\ttx.Find(&models)\n\n\ttx.Commit()\n\tcommon.Logger(\"info\", \"Finnished Read Data from PostgreSQL\", \"Modul User : ListAll\")\n\n\treturn models\n}", "func listAccountsRecord(db *sqlite.Driver, filter AccountsFilter) ([]Account, error) {\n\tvar err error\n\tvar rows *sql.Rows\n\n\t// if true, prepend \"where\" statement on the query string\n\tvar whereFlag = false\n\tvar conditionBlocks = []string{}\n\tvar valueBlocks = []interface{}{}\n\n\tif filter.nameLike != nil {\n\t\twhereFlag = true\n\t\tconditionBlocks = append(conditionBlocks, \"name like ?\")\n\t\tvalueBlocks = append(valueBlocks, *(filter.nameLike))\n\t}\n\t// limit\n\tif filter.limit != nil {\n\t\tif *(filter.limit) < 0 {\n\t\t\treturn nil, ValidationError(\"limit < 0\")\n\t\t}\n\n\t\tconditionBlocks = append(conditionBlocks, \"limit ?\")\n\t\tvalueBlocks = append(valueBlocks, *(filter.limit))\n\t}\n\t// offset\n\tif filter.limit != nil && filter.offset != nil {\n\t\tif *(filter.offset) < 0 {\n\t\t\treturn nil, ValidationError(\"offset < 0\")\n\t\t}\n\n\t\tconditionBlocks = append(conditionBlocks, \"offset ?\")\n\t\tvalueBlocks = append(valueBlocks, *(filter.offset))\n\t}\n\t// add \"where\" statement if necessary\n\tif whereFlag {\n\t\tconditionBlocks = append([]string{\"where\"}, conditionBlocks...)\n\t}\n\tvar accounts []Account\n\n\t// query statement\n\tvar stmt = fmt.Sprintf(\"select %s from %s %s\", listColumns, tableName, strings.Join(conditionBlocks, \" \"))\n\tif rows, err = db.Query(stmt, valueBlocks...); err != nil {\n\t\treturn nil, SQLExecutionError(err)\n\t}\n\n\tfor rows.Next() {\n\t\tvar newAccount Account\n\t\tif err = rows.Scan(&newAccount.ID, &newAccount.Name, &newAccount.PermLevel); err != nil {\n\t\t\treturn nil, SQLExecutionError(err)\n\t\t}\n\n\t\taccounts = append(accounts, newAccount)\n\t}\n\n\treturn accounts, nil\n}", "func (db *SQLStore) List(includeExpired bool) ([]*CertRecord, error) {\n\tif err := db.conn.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\trecs := []*CertRecord{}\n\tif includeExpired {\n\t\tif err := db.listAll.Select(&recs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err := db.listCurrent.Select(&recs, time.Now()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn recs, nil\n}", "func read(res http.ResponseWriter, req *http.Request) {\n\trows, err := db.Query(`SELECT * FROM customer;`)\n\tcheck(err)\n\tdefer rows.Close()\n\tvar name string\n\tfor rows.Next() {\n\t\terr = rows.Scan(&name)\n\t\tcheck(err)\n\t\tfmt.Fprintln(res, \"The records are\", name)\n\t}\n}", "func (s *Service) List(ctx context.Context, req *api.ListRequest) (*api.ListResults, error) {\n\tvar resp = api.ListResults{Type: req.Type, Request: req}\n\tvar searchRequest *bleve.SearchRequest\n\n\tif _, ok := Index[req.Type]; !ok {\n\t\tresp.Err = api.ErrorInvalidContentType.Error()\n\t\treturn &resp, nil\n\t}\n\n\tquery := bleve.NewMatchAllQuery()\n\tsearchRequest = bleve.NewSearchRequest(query)\n\n\tif req.SortBy == \"\" {\n\t\treq.SortBy = \"id\"\n\t}\n\tsearchRequest.SortBy([]string{req.SortBy})\n\tsearchRequest.Fields = []string{\"*\"}\n\tsearchRequest.Size = req.Size\n\tif searchRequest.Size <= 0 {\n\t\tsearchRequest.Size = 10\n\t}\n\tsearchRequest.From = req.Skip\n\n\tindex, err := getIndex(req.Type, req.Language)\n\tif err != nil {\n\t\tresp.Err = api.ErrorNotFound.Error()\n\t\treturn &resp, nil\n\t}\n\tsearchResult, err := index.Search(searchRequest)\n\tif err != nil {\n\t\tresp.Err = api.ErrorNotFound.Error()\n\t\treturn &resp, nil\n\t}\n\n\tresp.Total = searchResult.Total\n\n\tfor _, hit := range searchResult.Hits {\n\t\tresp.List = append(resp.List, hit.Fields)\n\t}\n\n\treturn &resp, nil\n}", "func (list *APTAuditList) printAll() {\n\tfor _, result := range list.results {\n\t\tfmt.Print(result)\n\t}\n}", "func (s *Service) Records(c context.Context, types []int64, mid, stime, etime int64, order, sort string, pn, ps int32) (res []*model.Record, total int32, err error) {\n\tvar midAts []int64\n\tif res, total, err = s.search.RecordPaginate(c, types, mid, stime, etime, order, sort, pn, ps); err != nil {\n\t\tlog.Error(\"s.search.RecordPaginate(%d,%d,%d,%d,%s,%s) error(%v)\", mid, sort, pn, ps, stime, etime, err)\n\t\treturn\n\t}\n\tif res == nil {\n\t\tres = _emptyRecords\n\t\treturn\n\t}\n\tfor _, r := range res {\n\t\tr.Message = template.HTMLEscapeString(r.Message)\n\t\tif len(r.Ats) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar ats []int64\n\t\tif ats, err = xstr.SplitInts(r.Ats); err != nil {\n\t\t\tlog.Error(\"xstr.SplitInts(%s) error(%v)\", r.Ats, err)\n\t\t\terr = nil\n\t\t}\n\t\tmidAts = append(midAts, ats...)\n\t}\n\tif len(midAts) == 0 {\n\t\treturn\n\t}\n\taccMap, _ := s.getAccInfo(c, midAts)\n\tfor _, r := range res {\n\t\tr.FillAts(accMap)\n\t}\n\treturn\n}", "func (p *Projects) ListAll(rw http.ResponseWriter, r *http.Request) {\n\tp.l.Println(\"[DEBUG] get all records\")\n\n\tvars := mux.Vars(r)\n\tuserID, ok := vars[\"userID\"]\n\n\tif !ok {\n\t\tio.WriteString(rw, `{{\"error\": \"id not found\"}}`)\n\t\treturn\n\t}\n\tprojects := data.GetAllUserProjects(userID)\n\n\terr := data.ToJSON(projects, rw)\n\tif err != nil {\n\t\t// we should never be here but log the error just incase\n\t\tp.l.Println(\"[ERROR] serializing project\", err)\n\t}\n}", "func (m MariaDB) All(ctx context.Context) ([]entity.PersonalData, error) {\n\tsqlQuery := fmt.Sprintf(\"SELECT * FROM person\")\n\tvar p personalData\n\tvar persons []entity.PersonalData\n\trows, err := m.Person.QueryContext(ctx, sqlQuery)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not make query\")\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr = rows.Scan(&p.ID, &p.Name, &p.LastName, &p.Phone, &p.Email, &p.YearOfBirth)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"could not scan rows\")\n\t\t}\n\t\tpersons = append(persons, p.transmit())\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"rows error\")\n\t}\n\treturn persons, nil\n}", "func outputRecordSpecsAll(cmd *cobra.Command) error {\n\tclientCtx, err := client.GetClientQueryContext(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpageReq, e := client.ReadPageRequest(withPageKeyDecoded(cmd.Flags()))\n\tif e != nil {\n\t\treturn e\n\t}\n\tqueryClient := types.NewQueryClient(clientCtx)\n\tres, err := queryClient.RecordSpecificationsAll(\n\t\tcontext.Background(),\n\t\t&types.RecordSpecificationsAllRequest{Pagination: pageReq},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !includeRequest {\n\t\tres.Request = nil\n\t}\n\n\treturn clientCtx.PrintProto(res)\n}", "func (p *PDNSProvider) Records(ctx context.Context) (endpoints []*endpoint.Endpoint, _ error) {\n\tzones, _, err := p.client.ListZones()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilteredZones, _ := p.client.PartitionZones(zones)\n\n\tfor _, zone := range filteredZones {\n\t\tz, _, err := p.client.ListZone(zone.Id)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Unable to fetch Records\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, rr := range z.Rrsets {\n\t\t\te, err := p.convertRRSetToEndpoints(rr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendpoints = append(endpoints, e...)\n\t\t}\n\t}\n\n\tlog.Debugf(\"Records fetched:\\n%+v\", endpoints)\n\treturn endpoints, nil\n}", "func (db *Redis) GetAllRecords(table string) (map[string]string, int, error) {\n\telems := map[string]string{}\n\titer := db.Client.HScan(table, 0, \"*\", maxScan).Iterator()\n\n\tcounter := 0\n\tkey := \"\"\n\tfor iter.Next() {\n\t\t// stop iterating\n\t\tif counter == maxEntries+1 {\n\t\t\treturn elems, -1, nil\n\t\t}\n\t\tif iter.Err() != nil {\n\t\t\tlog.Error().Err(iter.Err()).Msg(\"REDIS could not get records\")\n\t\t\tcontinue\n\t\t}\n\t\tval := iter.Val()\n\t\tif counter%2 == 0 {\n\t\t\tkey = val\n\t\t} else {\n\t\t\telems[key] = val\n\t\t}\n\t\tcounter++\n\t}\n\n\t// retrieve number of elements\n\tcount, err := db.Client.HLen(table).Result()\n\tif err == redis.Nil || err != nil {\n\t\tlog.Error().Err(err).Msg(\"REDIS could not get count\")\n\t}\n\n\treturn elems, int(count), nil\n}", "func (api *API) ListDNSRecords(ctx context.Context, rc *ResourceContainer, params ListDNSRecordsParams) ([]DNSRecord, *ResultInfo, error) {\n\tif rc.Identifier == \"\" {\n\t\treturn nil, nil, ErrMissingZoneID\n\t}\n\n\tparams.Name = toUTS46ASCII(params.Name)\n\n\tautoPaginate := true\n\tif params.PerPage >= 1 || params.Page >= 1 {\n\t\tautoPaginate = false\n\t}\n\n\tif params.PerPage < 1 {\n\t\tparams.PerPage = listDNSRecordsDefaultPageSize\n\t}\n\n\tif params.Page < 1 {\n\t\tparams.Page = 1\n\t}\n\n\tvar records []DNSRecord\n\tvar lastResultInfo ResultInfo\n\n\tfor {\n\t\turi := buildURI(fmt.Sprintf(\"/zones/%s/dns_records\", rc.Identifier), params)\n\t\tres, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil)\n\t\tif err != nil {\n\t\t\treturn []DNSRecord{}, &ResultInfo{}, err\n\t\t}\n\t\tvar listResponse DNSListResponse\n\t\terr = json.Unmarshal(res, &listResponse)\n\t\tif err != nil {\n\t\t\treturn []DNSRecord{}, &ResultInfo{}, fmt.Errorf(\"%s: %w\", errUnmarshalError, err)\n\t\t}\n\t\trecords = append(records, listResponse.Result...)\n\t\tlastResultInfo = listResponse.ResultInfo\n\t\tparams.ResultInfo = listResponse.ResultInfo.Next()\n\t\tif params.ResultInfo.Done() || !autoPaginate {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn records, &lastResultInfo, nil\n}", "func (db *MongoDBAccess) GetAll() ([]Record, error) {\n\tvar records []Record\n\tcursor, err := db.client.Database(db.database).Collection(\"days\").Find(context.Background(), bson.D{})\n\tif err != nil {\n\t\treturn []Record{}, err\n\t}\n\tdefer cursor.Close(context.Background())\n\tfor cursor.Next(context.Background()) {\n\t\tvar record Record\n\t\tif err = cursor.Decode(&record); err != nil {\n\t\t\treturn []Record{}, err\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\treturn records, nil\n}", "func getRecords(res *RecordsResp, qntString string) {\n\t//Setting the default value of the query status to false.\n\t//If the query succeeds, at the end, we cange this status to true.\n\tres.Status = false\n\n\tqnt, err := strconv.Atoi(qntString)\n\tif err != nil {\n\t\tlog.Printf(\"Function getRecords: Something went wrong when converting the quantity of records from string to int.\\n %v\\n\", err)\n\t\treturn\n\t}\n\t\n\t// Connecting to the database\n session, err := mgo.Dial(\"localhost\");\n if err != nil {\n \tlog.Printf(\"Function getRecords: Error when opening connection to database.\\n %v\\n\", err)\n \treturn\n }\n defer session.Close()\n \n // Querying the database\n conn := session.DB(DATABASE_NAME).C(RECORDS_COLLECTION)\n if err := conn.Find(nil).Limit(qnt).All(&res.Records); err != nil {\n \tlog.Printf(\"Function getRecords: Error when querying database.\\n %v\\n\", err)\n \treturn\n }\n \n // Getting the User Data\n conn = session.DB(DATABASE_NAME).C(USERS_COLLECTION)\n for i, _ := range res.Records {\n \tif err := conn.FindId(res.Records[i].UserId).One(&res.Records[i].UserData); err != nil {\n \t\tlog.Printf(\"Function getRecords: Error when getting user data\\n %v\\n\", err)\n \t\treturn\n \t}\n }\n \n //Query succeeded\n res.Status = true\n}", "func listDNSRecords(cfg *Config, c *CfVars, zoneID string, recordName string) ([]cloudflare.DNSRecord, error) {\n\tsubDomainRecord := cloudflare.DNSRecord{Name: recordName}\n\trec, err := c.API.DNSRecords(c.context, zoneID, subDomainRecord)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\n\treturn rec, nil\n}", "func getAll(w http.ResponseWriter, r *http.Request) {\n\tenableCors(&w)\n\trows, err := mainDB.Query(\"SELECT * FROM testTable\")\n\tcheckErr(err)\n\tvar logs Logs\n\tfor rows.Next() {\n\t\tvar log Log\n\t\terr = rows.Scan(&log.ID, &log.Time, &log.Level, &log.Msg, &log.Category, &log.DebugId, &log.Ip, &log.RequestId, &log.Type, &log.Uri, &log.UserId)\n\t\tcheckErr(err)\n\t\tlogs = append(logs, log)\n\t}\n\tjsonB, errMarshal := json.Marshal(logs)\n\tcheckErr(errMarshal)\n\tfmt.Fprintf(w, \"%s\", string(jsonB))\n}", "func (c *ApiService) ListRecording(params *ListRecordingParams) ([]ApiV2010Recording, error) {\n\tresponse, errors := c.StreamRecording(params)\n\n\trecords := make([]ApiV2010Recording, 0)\n\tfor record := range response {\n\t\trecords = append(records, record)\n\t}\n\n\tif err := <-errors; err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func (c *applicationUsecaseImpl) ListRecords(ctx context.Context, appName string) (*apisv1.ListWorkflowRecordsResponse, error) {\n\tvar record = model.WorkflowRecord{\n\t\tAppPrimaryKey: appName,\n\t\tStatus: model.RevisionStatusRunning,\n\t}\n\trecords, err := c.ds.List(ctx, &record, &datastore.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(records) == 0 {\n\t\trecord.Status = model.RevisionStatusComplete\n\t\trecords, err = c.ds.List(ctx, &record, &datastore.ListOptions{\n\t\t\tPage: 1,\n\t\t\tPageSize: 1,\n\t\t\tSortBy: []datastore.SortOption{{Key: \"model.createTime\", Order: datastore.SortOrderDescending}},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresp := &apisv1.ListWorkflowRecordsResponse{\n\t\tRecords: []apisv1.WorkflowRecord{},\n\t}\n\tfor _, raw := range records {\n\t\trecord, ok := raw.(*model.WorkflowRecord)\n\t\tif ok {\n\t\t\tresp.Records = append(resp.Records, *convertFromRecordModel(record))\n\t\t}\n\t}\n\tresp.Total = int64(len(records))\n\n\treturn resp, nil\n}", "func (db database) list(w http.ResponseWriter, req *http.Request) {\n\n\tif err := itemList.Execute(w, db); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func list(db *sql.DB) ([]Todo, error) {\n\treturn read(db, -1)\n}", "func FetchList() (*List, error) {\n db := OpenDb()\n list := new(List) \n err := db.View(func(tx *bolt.Tx) error {\n b := tx.Bucket([]byte(\"Fika\"))\n err := b.ForEach(func(k, v []byte) error {\n var fetchedPerson Person\n err := json.Unmarshal(v, &fetchedPerson)\n \n if err != nil {\n return err\n }\n list.AddPerson(&fetchedPerson)\n return nil\n })\n \n if err != nil {\n return err\n }\n \n return nil\n })\n \n if err != nil {\n return nil, err\n }\n CloseDb(db)\n return list, nil\n}", "func (s *Service) ListPatient(filter map[string]interface{}, limit int64, offset int64) ([]*models.Patient, error) {\n\treturn s.db.ListPatient(filter, offset, limit)\n}", "func (db *DB) List(items interface{}) error {\n\tif db.tablename.length() <= 0 {\n\t\treturn errors.New(MongoDBErrTableName)\n\t}\n\tif err := db.Collection[db.tablename].Find(nil).All(items); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func GetList(tx *sql.Tx) (list []Info, err error) {\n\tmapper := rlt.NewAccountMapper(tx)\n\trows, err := mapper.FindAccountAll()\n\tfor _, row := range rows {\n\t\tinfo := Info{}\n\t\tinfo.ID = row.ID\n\t\tinfo.Domain = row.Domain.String\n\t\tinfo.UserName = row.UserName\n\t\tinfo.DisplayName = row.DisplayName\n\t\tinfo.Email = row.Email\n\t\tlist = append(list, info) //数据写入\n\t}\n\treturn list, err\n}", "func outputRecords(cmd *cobra.Command, recordAddr string, scopeID string, sessionID string, name string) error {\n\tclientCtx, err := client.GetClientQueryContext(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := types.RecordsRequest{\n\t\tRecordAddr: recordAddr,\n\t\tScopeId: scopeID,\n\t\tSessionId: sessionID,\n\t\tName: name,\n\t\tIncludeScope: includeScope,\n\t\tIncludeSessions: includeSessions,\n\t}\n\n\tqueryClient := types.NewQueryClient(clientCtx)\n\tres, err := queryClient.Records(context.Background(), &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !includeRequest {\n\t\tres.Request = nil\n\t}\n\n\treturn clientCtx.PrintProto(res)\n}", "func (c *cockroachdb) inventory() ([]Record, error) {\n\t// Lookup the latest version of all records\n\tquery := `SELECT a.*\n FROM records a\n LEFT OUTER JOIN records b\n ON a.token = b.token\n AND a.version < b.version\n WHERE b.token IS NULL`\n\trows, err := c.recordsdb.Raw(query).Rows()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\trecords := make([]Record, 0, 1024) // PNOOMA\n\tfor rows.Next() {\n\t\tvar r Record\n\t\terr := c.recordsdb.ScanRows(rows, &r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trecords = append(records, r)\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compile a list of record primary keys\n\tkeys := make([]string, 0, len(records))\n\tfor _, v := range records {\n\t\tkeys = append(keys, v.Key)\n\t}\n\n\t// Lookup the files and metadata streams for each of the\n\t// previously queried records.\n\terr = c.recordsdb.\n\t\tPreload(\"Metadata\").\n\t\tPreload(\"Files\").\n\t\tWhere(keys).\n\t\tFind(&records).\n\t\tError\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func listAllRecordSets(r53 *route53.Route53, id string) (rrsets []*route53.ResourceRecordSet, err error) {\n\treq := route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: &id,\n\t}\n\n\tfor {\n\t\tvar resp *route53.ListResourceRecordSetsOutput\n\t\tresp, err = r53.ListResourceRecordSets(&req)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trrsets = append(rrsets, resp.ResourceRecordSets...)\n\t\tif *resp.IsTruncated {\n\t\t\treq.StartRecordName = resp.NextRecordName\n\t\t\treq.StartRecordType = resp.NextRecordType\n\t\t\treq.StartRecordIdentifier = resp.NextRecordIdentifier\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// unescape wildcards\n\t//for _, rrset := range rrsets {\n\t//\trrset.Name = aws.String(unescaper.Replace(*rrset.Name))\n\t//}\n\n\treturn\n}", "func (s *store) ListAllCurrent() (int64, []types.Record, error) {\n\treturn s.listAllCurrent(0)\n}", "func (c *controller) FindAll() []string {\n\treturn c.service.FindAll()\n}", "func (s *dnsRecordSetLister) List(selector labels.Selector) (ret []*v1alpha1.DnsRecordSet, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DnsRecordSet))\n\t})\n\treturn ret, err\n}", "func (d database) list(w http.ResponseWriter, r *http.Request) {\n\tfor item, price := range d {\n\t\tfmt.Fprintf(w, \"item: %s, price: %s\\n\", item, price)\n\t}\n}", "func (db database) list(w http.ResponseWriter, req *http.Request) {\n\tfor item, price := range db {\n\t\tfmt.Fprintf(w, \"%s: %s\\n\", item, price)\n\t}\n}", "func (db database) list(w http.ResponseWriter, req *http.Request) {\n\tfor item, price := range db {\n\t\tfmt.Fprintf(w, \"%s: %s\\n\", item, price)\n\t}\n}", "func SelectRecordings(recording []Recording) {\n\tDB.Select(&recording, \"SELECT * FROM Recording\")\n}", "func (_class PIFClass) GetAllRecords(sessionID SessionRef) (_retval map[PIFRef]PIFRecord, _err error) {\n\t_method := \"PIF.get_all_records\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_result, _err := _class.client.APICall(_method, _sessionIDArg)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_retval, _err = convertPIFRefToPIFRecordMapToGo(_method + \" -> \", _result.Value)\n\treturn\n}", "func findAll(findAllStruct *FindAll) ([]interface{}, error) {\n\tvar records []interface{}\n\terr := findAllStruct.Collection.Find(nil).All(&records)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn records, err\n}", "func List(customerID int) ([]Employee, error) {\n\n\tmyData := make([]Employee, 0)\n\trows, err := database.DBCon.Query(\"SELECT id, first_name, last_name, email, customer_id FROM incidents.employees where customer_id=$1\", customerID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\te := New()\n\t\terr = rows.Scan(&e.ID, &e.FirstName, &e.LastName, &e.Email, &e.CustomerID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmyData = append(myData, *e)\n\t}\n\n\terr = rows.Err()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trows.Close()\n\n\treturn myData, nil\n}", "func (l Leftovers) List(filter string, regex bool) {\n\tl.logger.NoConfirm()\n\tvar deletables []common.Deletable\n\n\tfor _, r := range l.resources {\n\t\tlist, err := r.List(filter, regex)\n\t\tif err != nil {\n\t\t\tl.logger.Println(color.YellowString(err.Error()))\n\t\t}\n\n\t\tdeletables = append(deletables, list...)\n\t}\n\n\tfor _, d := range deletables {\n\t\tl.logger.Println(fmt.Sprintf(\"[%s: %s]\", d.Type(), d.Name()))\n\t}\n}", "func (l Leftovers) List(filter string, regex bool) {\n\tl.logger.NoConfirm()\n\n\tvar deletables []common.Deletable\n\n\tfor _, r := range l.resources {\n\t\tlist, err := r.List(filter, regex)\n\t\tif err != nil {\n\t\t\tl.logger.Println(color.YellowString(err.Error()))\n\t\t}\n\n\t\tdeletables = append(deletables, list...)\n\t}\n\n\tfor _, d := range deletables {\n\t\tl.logger.Println(fmt.Sprintf(\"[%s: %s]\", d.Type(), d.Name()))\n\t}\n}", "func (db *DB) GetAll(ctx context.Context) (recs []*databroker.Record, err error) {\n\t_, span := trace.StartSpan(ctx, \"databroker.redis.GetAll\")\n\tdefer span.End()\n\tdefer recordOperation(ctx, time.Now(), \"get_all\", err)\n\treturn db.getAll(ctx, func(record *databroker.Record) bool { return true })\n}", "func (s *Store) List() []interface{} {\n\tmetaSlice := make([]interface{}, 0)\n\trangeFunc := func(key, value interface{}) bool {\n\t\tmetaSlice = append(metaSlice, value)\n\t\treturn true\n\t}\n\ts.Range(rangeFunc)\n\n\treturn metaSlice\n}", "func (s *business) List(where repository.Example, order string, limit int32, page int32) (*repository.ExampleList, *response.Error) {\n\ts.logger = s.loggerClone\n\ts.logger.SugaredLogger = s.logger.With(\"method\", \"List\")\n\n\t// Pagination\n\tif limit == 0 {\n\t\tlimit = viper.GetInt32(\"PAGE_LIMIT\")\n\t}\n\tif page <= 0 {\n\t\tpage = 1\n\t}\n\toffset := limit * (page - 1)\n\n\t// Get data list\n\texamples, count, err := s.repository.ListWhere(where, order, limit, offset)\n\tif err != nil {\n\t\ts.logger.Errorw(\"list data error\", \"error\", err)\n\t\treturn nil, response.NewErrorFromCode(errorcode.GetDataError)\n\t}\n\texampleList := repository.NewExampleList(examples, count)\n\treturn exampleList, nil\n}", "func (c *Command) GetAll(ctx *gin.Context) {\n\ttoken := strings.ToLower(html.EscapeString(ctx.Param(\"token\")))\n\tfilter := map[string]interface{}{\"token\": token}\n\tfromDB, err := c.Conn.GetByFilter(c.Table, filter, 0)\n\tif err != nil {\n\t\tutil.NiceError(ctx, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\tif fromDB == nil {\n\t\tctx.JSON(http.StatusNotFound, make([]struct{}, 0))\n\t\treturn\n\t}\n\n\tvar respDecode ResponseSchema\n\tvar decoded = make([]map[string]interface{}, len(fromDB))\n\tfor pos, record := range fromDB {\n\t\t// If there's an issue decoding it, just log it and move on to the next record\n\t\tif err := mapstruct.Decode(record, &respDecode); err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tmarshalled := util.MarshalResponse(respDecode)\n\t\tdecoded[pos] = map[string]interface{}{\n\t\t\t\"id\": marshalled[\"data\"].(map[string]interface{})[\"id\"],\n\t\t\t\"attributes\": marshalled[\"data\"].(map[string]interface{})[\"attributes\"],\n\t\t\t\"meta\": marshalled[\"meta\"],\n\t\t}\n\t}\n\tvar response = make(map[string]interface{})\n\n\tresponse[\"data\"] = decoded\n\n\tctx.Header(\"x-total-count\", fmt.Sprint(len(decoded)))\n\tctx.JSON(http.StatusOK, response)\n}", "func (us UserService) ListAll() string {\n\t// Read data from file\n\tallUsers := us.Store.Read()\n\t// JSON message to Struct\n\tusers := jsontoStruct(allUsers)\n\t// Formatting output\n\treturn formatting(users)\n}", "func (c *DeviceController) GetRecords(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tres := r53.GetRecordSets(vars[\"id\"])\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}", "func (s Store) List() ([]string, error) {\n\treturn s.backingStore.List(ItemType, \"\")\n}", "func GetRecords(db *sql.DB, table string, iplist map[string]*string) {\n\tvar query string\n\tif table == \"short_ban\" {\n\t\tquery = shortBanIPs\n\t} else if table == \"long_ban\" {\n\t\tquery = longBanIPs\n\t} else {\n\t\tlog.Error().\n\t\t\tStr(\"Table\", table).\n\t\t\tMsg(\"GetRecords: Invalid table name\")\n\t\treturn\n\t}\n\trows, err := db.Query(query)\n\tif err != nil {\n\t\tlog.Printf(\"Error getting IP from row on table %s: %s\", table, err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar ip string\n\t\terr = rows.Scan(&ip)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error getting IP from row in table %s: %s\", table, err)\n\t\t\tcontinue\n\t\t}\n\t\t// modify the IP to have a CIDR value\n\t\t// TODO: handle this more gracefully in the future\n\t\tip = ip + \"/32\"\n\t\tiplist[ip] = &ip\n\t\t//*iplist = append(*iplist, &ip)\n\t}\n}", "func (m *Manager) GetRecords(name string, from, to int) (record.Records, error) {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\tt, err := m.recent.GetThread(name)\n\tif log.If(err) {\n\t\treturn nil, err\n\t}\n\treturn t.Slice(from, to, m.spams)\n}", "func (a *resource) List() []interface{} {\n\treturn nil\n}", "func (p *F5DNSLBProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {\n\t// If not present return empty\n\t// else find all A pool-members from the pool\n\tlog.Println(\"Records invoked\")\n\tsubs, err := p.GetFilteredDNSConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trecords := p.TransformToRecords(subs)\n\treturn records, nil\n}", "func (a *App) retrieveAll(c *echo.Context) error {\n\tvar tasks []*model.Task\n\ta.GetDB().Find(&tasks, struct{}{})\n\tc.JSON(http.StatusOK, tasks)\n\treturn nil\n}", "func List(c *gin.Context){\n\tlimitStr := c.Query(\"limit\")\n\tlimit, err := strconv.Atoi(limitStr)\n\tif err != nil {\n\t\tlimit = 0\n\t}\n\tres, err := list(limit)\n\tif err != nil {\n\t\tresponese.Error(c, err, nil)\n\t\treturn\n\t}\n\tresponese.Success(c, \"successed\", res)\n}", "func (db *DB) List(ctx context.Context, sinceVersion string) (rec []*databroker.Record, err error) {\n\tc := db.pool.Get()\n\t_, span := trace.StartSpan(ctx, \"databroker.redis.List\")\n\tdefer span.End()\n\tdefer recordOperation(ctx, time.Now(), \"list\", err)\n\tdefer c.Close()\n\n\tv, err := strconv.ParseUint(sinceVersion, 16, 64)\n\tif err != nil {\n\t\tv = 0\n\t}\n\n\tids, err := redis.Strings(c.Do(\"ZRANGEBYSCORE\", db.versionSet, fmt.Sprintf(\"(%d\", v), \"+inf\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpbRecords := make([]*databroker.Record, 0, len(ids))\n\tfor _, id := range ids {\n\t\tb, err := redis.Bytes(c.Do(\"HGET\", db.recordType, id))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbRecord, err := db.toPbRecord(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbRecords = append(pbRecords, pbRecord)\n\t}\n\treturn pbRecords, nil\n}", "func (p *Personal) All(ctx context.Context) (*[]PersonalData, error) {\n\tusrs, err := p.DB.All(ctx)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not select all personal data\")\n\t}\n\treturn usrs, nil\n}", "func (s Store) List() ([]string, error) {\n\treturn s.backingStore.List(ItemType)\n}", "func (crud *CrudStoreClient) List(result interface{}) error {\n\t_, err := crud.ListWithPagination(result, \"\", 0)\n\treturn err\n}" ]
[ "0.6895555", "0.6680016", "0.66310436", "0.6533258", "0.6475071", "0.64534694", "0.64447963", "0.6441886", "0.64385545", "0.63736784", "0.6371333", "0.6340122", "0.6304793", "0.62884146", "0.62821186", "0.6224998", "0.62233186", "0.6219472", "0.6186004", "0.61814195", "0.6165832", "0.61091405", "0.6075851", "0.607081", "0.6024685", "0.6003757", "0.6001669", "0.5997162", "0.5976752", "0.5953439", "0.5950955", "0.59114546", "0.5901275", "0.5888797", "0.5870085", "0.5870085", "0.5861473", "0.58607155", "0.5838653", "0.58376485", "0.5830457", "0.58296585", "0.58254814", "0.58136094", "0.5801877", "0.58014023", "0.5800967", "0.5793345", "0.5777997", "0.5774204", "0.5764423", "0.57626235", "0.5759224", "0.5758457", "0.5755529", "0.57336473", "0.57325965", "0.57108974", "0.5698027", "0.5690204", "0.56870925", "0.56827337", "0.56714594", "0.5664154", "0.5645706", "0.56453323", "0.5643355", "0.5639842", "0.5633449", "0.5621453", "0.561942", "0.5600517", "0.55981266", "0.55867904", "0.5583394", "0.5580487", "0.5580487", "0.5579095", "0.55755913", "0.5573176", "0.5572805", "0.55722505", "0.55667007", "0.55609924", "0.5560046", "0.55542415", "0.5551877", "0.55474514", "0.5540425", "0.5534875", "0.55326", "0.55324966", "0.553117", "0.55227935", "0.55170995", "0.55150783", "0.55111", "0.5497217", "0.54900366", "0.54863256" ]
0.58201265
43
rowToRecord converts from pgx.Row to a store.Record
func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { if err == sql.ErrNoRows { return record, store.ErrNotFound } return nil, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } return record, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {\n\tvar records []*store.Record\n\n\tfor rows.Next() {\n\t\tvar expiry *time.Time\n\t\trecord := &store.Record{}\n\t\tmetadata := make(Metadata)\n\n\t\tif err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\t\treturn records, err\n\t\t}\n\n\t\t// set the metadata\n\t\trecord.Metadata = toMetadata(&metadata)\n\t\tif expiry != nil {\n\t\t\trecord.Expiry = time.Until(*expiry)\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\treturn records, nil\n}", "func recordToRecord(\n\ttopic string,\n\tpartition int32,\n\tbatch *kmsg.RecordBatch,\n\trecord *kmsg.Record,\n) *Record {\n\th := make([]RecordHeader, 0, len(record.Headers))\n\tfor _, kv := range record.Headers {\n\t\th = append(h, RecordHeader{\n\t\t\tKey: kv.Key,\n\t\t\tValue: kv.Value,\n\t\t})\n\t}\n\n\treturn &Record{\n\t\tKey: record.Key,\n\t\tValue: record.Value,\n\t\tHeaders: h,\n\t\tTimestamp: timeFromMillis(batch.FirstTimestamp + int64(record.TimestampDelta)),\n\t\tTopic: topic,\n\t\tPartition: partition,\n\t\tAttrs: RecordAttrs{uint8(batch.Attributes)},\n\t\tProducerID: batch.ProducerID,\n\t\tProducerEpoch: batch.ProducerEpoch,\n\t\tLeaderEpoch: batch.PartitionLeaderEpoch,\n\t\tOffset: batch.FirstOffset + int64(record.OffsetDelta),\n\t}\n}", "func toRow(pl any) []byte {\n\trt := reflect.TypeOf(pl)\n\n\tenc, err := coder.RowEncoderForStruct(rt)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to get row encoder\"))\n\t}\n\tvar buf bytes.Buffer\n\tif err := enc(pl, &buf); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to do row encoding\"))\n\t}\n\treturn buf.Bytes()\n}", "func (r RecordV1) toRecord() Record {\n\treturn Record{\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAppliedAt: r.AppliedAt,\n\t}\n}", "func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}", "func convertRow(\n\trow *Row,\n\twantsNode bool,\n\twantsTimestamp bool,\n\tdesiredValues []string,\n) *stats.Row {\n\tvar (\n\t\tnode string\n\t\ttimestamp time.Time\n\t)\n\n\tvar resultValues map[string]interface{}\n\tif len(desiredValues) > 0 {\n\t\tresultValues = make(map[string]interface{})\n\t}\n\n\tfor _, v := range desiredValues {\n\t\tresultValues[v] = row.value(v)\n\t}\n\n\tif wantsNode {\n\t\tnode = row.Node\n\t}\n\tif wantsTimestamp {\n\t\ttimestamp = row.Timestamp.UTC()\n\t}\n\n\treturn &stats.Row{\n\t\tNode: node,\n\t\tTimestamp: timestamp,\n\t\tValues: resultValues,\n\t}\n}", "func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}", "func (e *commonFormatEncoder) Row(tp int, row *[]interface{}, seqno uint64) ([]byte, error) {\n\tcf := convertRowToCommonFormat(tp, row, e.inSchema, seqno, e.filter)\n\treturn CommonFormatEncode(cf)\n}", "func MarshalRecord(record *rangedb.Record) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tnewRecord := *record\n\tnewRecord.Data = nil\n\n\tencoder := msgpack.NewEncoder(&buf)\n\tencoder.UseJSONTag(true)\n\n\terr := encoder.Encode(newRecord)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record: %v\", err)\n\t}\n\n\terr = encoder.Encode(record.Data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record data: %v\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func RowTo[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&value)\n\treturn value, err\n}", "func rowToRSDocument(row string) (document *redisearch.Document) {\n\tif debug > 0 {\n\t\tfmt.Fprintln(os.Stderr, \"converting row to rediSearch Document \"+row)\n\t}\n\tfieldSizesStr := strings.Split(row, \",\")\n\t// we need at least the id and score\n\tif len(fieldSizesStr) >= 2 {\n\t\tdocumentId := index + \"-\" + fieldSizesStr[0]\n\t\tdocumentScore, _ := strconv.ParseFloat(fieldSizesStr[1], 64)\n\t\tdoc := redisearch.NewDocument(documentId, float32(documentScore))\n\n\t\tfor _, keyValuePair := range fieldSizesStr[2:] {\n\t\t\tpair := strings.Split(keyValuePair, \"=\")\n\t\t\tif len(pair) == 2 {\n\t\t\t\tif debug > 0 {\n\t\t\t\t\tfmt.Fprintln(os.Stderr, \"On doc \"+documentId+\" adding field with NAME \"+pair[0]+\" and VALUE \"+pair[1])\n\t\t\t\t}\n\t\t\t\tdoc.Set(pair[0], pair[1])\n\t\t\t} else {\n\t\t\t\tif debug > 0 {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"On doc \"+documentId+\" len(pair)=%d\", len(pair))\n\t\t\t\t}\n\t\t\t\tlog.Fatalf(\"keyValuePair pair size != 2 . Got \" + keyValuePair)\n\t\t\t}\n\t\t}\n\t\tif debug > 0 {\n\t\t\tfmt.Fprintln(os.Stderr, \"Doc \"+documentId)\n\t\t}\n\t\treturn &doc\n\t}\n\treturn document\n}", "func (dao PathProfileDAOPsql) rowToPathProfile(row *sql.Row, o *models.PathProfile) error {\n\treturn row.Scan(&o.ID, &o.ProfileID, &o.Path.ID, &o.Path.Path, &o.Path.PathName, &o.Path.Description, &o.Post, &o.Put, &o.Del, &o.Get, &o.CreatedAt, &o.UpdatedAt)\n}", "func (mcs *MemoryCellStore) MakeRow(sheet *Sheet) *Row {\n\treturn makeMemoryRow(sheet).row\n}", "func (r *Rows) row(a ...interface{}) error {\n\tdefer r.Close()\n\n\tfor _, dp := range a {\n\t\tif _, ok := dp.(*sql.RawBytes); ok {\n\t\t\treturn VarTypeError(\"RawBytes isn't allowed on Row()\")\n\t\t}\n\t}\n\n\tif !r.Next() {\n\t\tif err := r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sql.ErrNoRows\n\t}\n\tif err := r.Scan(a...); err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Close()\n}", "func ConvertRecord(s string) (r record) {\n // Drop the last char pf the string (it's a ' ')\n s = s[:len(s) - 1]\n\n // Split the string in the various fields\n var fields []string = strings.Split(s, \" \")\n\n // Update the fields of the record based on the various fields\n for _, f := range fields {\n switch f[:3] {\n case \"byr\": r.byr = f[4:]\n case \"iyr\": r.iyr = f[4:]\n case \"eyr\": r.eyr = f[4:]\n case \"hgt\": r.hgt = f[4:]\n case \"hcl\": r.hcl = f[4:]\n case \"ecl\": r.ecl = f[4:]\n case \"pid\": r.pid = f[4:]\n }\n }\n\n return\n}", "func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}", "func doltRowToSqlRow(doltRow row.Row, sch schema.Schema) (sql.Row, error) {\n\tcolVals := make(sql.Row, sch.GetAllCols().Size())\n\n\ti := 0\n\terr := sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {\n\t\tvar innerErr error\n\t\tvalue, _ := doltRow.GetColVal(tag)\n\t\tcolVals[i], innerErr = col.TypeInfo.ConvertNomsValueToValue(value)\n\t\tif innerErr != nil {\n\t\t\treturn true, innerErr\n\t\t}\n\t\ti++\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sql.NewRow(colVals...), nil\n}", "func (mcs *MemoryCellStore) WriteRow(r *Row) error {\n\tif r != nil {\n\t\tkey := r.key()\n\t\tmcs.rows[key] = r\n\t}\n\treturn nil\n}", "func convertToUser(row *sql.Row) (*User, error) {\n\tuser := User{}\n\terr := row.Scan(&user.UserID, &user.Mail, &user.Password)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"model.convertToUser: %w\", err)\n\t}\n\treturn &user, nil\n}", "func (p *partitionImpl) getRow(row *rowImpl, rowNum int) sif.Row {\n\trow.rowNum = rowNum\n\trow.partition = p\n\treturn row\n}", "func SqlRowToDoltRow(nbf *types.NomsBinFormat, r sql.Row, doltSchema schema.Schema) (row.Row, error) {\n\ttaggedVals := make(row.TaggedValues)\n\tallCols := doltSchema.GetAllCols()\n\tfor i, val := range r {\n\t\ttag := allCols.Tags[i]\n\t\tschCol := allCols.TagToCol[tag]\n\t\tif val != nil {\n\t\t\tvar err error\n\t\t\ttaggedVals[tag], err = schCol.TypeInfo.ConvertValueToNomsValue(val)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else if !schCol.IsNullable() {\n\t\t\treturn nil, fmt.Errorf(\"column <%v> received nil but is non-nullable\", schCol.Name)\n\t\t}\n\t}\n\treturn row.New(nbf, doltSchema, taggedVals)\n}", "func NewRecord(f sql.SelectObjectFormat) *Record {\n\treturn &Record{\n\t\tKVS: jstream.KVS{},\n\t\tSelectFormat: f,\n\t}\n}", "func NewRecord(record map[string]interface{}, schema *schma.Schema, schemaText string) *Record {\n\treturn &Record{\n\t\tschema: schema,\n\t\tschemaText: schemaText,\n\t\tData: record,\n\t}\n}", "func createRecord(\n\ts *database.Store,\n\tdoc *document.Document,\n) (*database.Record, error) {\n\tnow := ptypes.TimestampNow()\n\tnew := &database.Record{\n\t\tRevision: 1,\n\t\tKeys: marshalKeys(doc.Keys),\n\t\tCreatedAt: now,\n\t\tUpdatedAt: now,\n\t}\n\n\tif err := s.PutRecord(doc.ID, new); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := s.UpdateKeys(doc.ID, nil, new.Keys); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn new, nil\n}", "func updateRecord(\n\ts *database.Store,\n\tdoc *document.Document,\n\trec *database.Record,\n) (*database.Record, error) {\n\tnew := proto.Clone(rec).(*database.Record)\n\tnew.Revision++\n\tnew.Keys = marshalKeys(doc.Keys)\n\tnew.UpdatedAt = ptypes.TimestampNow()\n\n\tif err := s.PutRecord(doc.ID, new); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := s.UpdateKeys(doc.ID, rec.Keys, new.Keys); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn new, nil\n}", "func (kvcodec *tableKVEncoder) AddRecord(\n\trow []types.Datum,\n\trowID int64,\n\tcolumnPermutation []int,\n) (Row, int, error) {\n\tcols := kvcodec.tbl.Cols()\n\n\tvar value types.Datum\n\tvar err error\n\n\trecord := kvcodec.recordCache\n\tif record == nil {\n\t\trecord = make([]types.Datum, 0, len(cols)+1)\n\t}\n\n\tisAutoRandom := false\n\tif kvcodec.tbl.Meta().PKIsHandle && kvcodec.tbl.Meta().ContainsAutoRandomBits() {\n\t\tisAutoRandom = true\n\t}\n\n\tfor i, col := range cols {\n\t\tj := columnPermutation[i]\n\t\tisAutoIncCol := mysql.HasAutoIncrementFlag(col.Flag)\n\t\tisPk := mysql.HasPriKeyFlag(col.Flag)\n\t\tswitch {\n\t\tcase j >= 0 && j < len(row):\n\t\t\tvalue, err = table.CastValue(kvcodec.se, row[j], col.ToInfo(), false, false)\n\t\t\tif err == nil {\n\t\t\t\terr = col.HandleBadNull(&value, kvcodec.se.vars.StmtCtx)\n\t\t\t}\n\t\tcase isAutoIncCol:\n\t\t\t// we still need a conversion, e.g. to catch overflow with a TINYINT column.\n\t\t\tvalue, err = table.CastValue(kvcodec.se, types.NewIntDatum(rowID), col.ToInfo(), false, false)\n\t\tdefault:\n\t\t\tvalue, err = table.GetColDefaultValue(kvcodec.se, col.ToInfo())\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, errors.Trace(err)\n\t\t}\n\n\t\trecord = append(record, value)\n\n\t\tif isAutoRandom && isPk {\n\t\t\ttypeBitsLength := uint64(mysql.DefaultLengthOfMysqlTypes[col.Tp] * 8)\n\t\t\tincrementalBits := typeBitsLength - kvcodec.tbl.Meta().AutoRandomBits\n\t\t\thasSignBit := !mysql.HasUnsignedFlag(col.Flag)\n\t\t\tif hasSignBit {\n\t\t\t\tincrementalBits--\n\t\t\t}\n\t\t\t_ = kvcodec.tbl.RebaseAutoID(kvcodec.se, value.GetInt64()&((1<<incrementalBits)-1), false, autoid.AutoRandomType)\n\t\t}\n\t\tif isAutoIncCol {\n\t\t\t// TODO use auto incremental type\n\t\t\t_ = kvcodec.tbl.RebaseAutoID(kvcodec.se, getAutoRecordID(value, &col.FieldType), false, autoid.RowIDAllocType)\n\t\t}\n\t}\n\n\tif TableHasAutoRowID(kvcodec.tbl.Meta()) {\n\t\tj := columnPermutation[len(cols)]\n\t\tif j >= 0 && j < len(row) {\n\t\t\tvalue, err = table.CastValue(kvcodec.se, row[j], extraHandleColumnInfo, false, false)\n\t\t} else {\n\t\t\tvalue, err = types.NewIntDatum(rowID), nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, errors.Trace(err)\n\t\t}\n\t\trecord = append(record, value)\n\t\t_ = kvcodec.tbl.RebaseAutoID(kvcodec.se, value.GetInt64(), false, autoid.RowIDAllocType)\n\t}\n\t_, err = kvcodec.tbl.AddRecord(kvcodec.se, record)\n\tif err != nil {\n\t\tlog.Error(\"kv add Record failed\",\n\t\t\tzapRow(\"originalRow\", row),\n\t\t\tzapRow(\"convertedRow\", record),\n\t\t\tzap.Error(err),\n\t\t)\n\t\treturn nil, 0, errors.Trace(err)\n\t}\n\n\tpairs, size := kvcodec.se.takeKvPairs()\n\tkvcodec.recordCache = record[:0]\n\treturn Pairs(pairs), size, nil\n}", "func RowToDrivers(row *sql.Rows) []Driver {\n result := []Driver{}\n for row.Next() {\n var driverName string\n var driverTelephoneNumber string\n row.Scan(&driverName, &driverTelephoneNumber)\n result = append(result, Driver{\n DriverName: driverName,\n DriverTelephoneNumber: driverTelephoneNumber,\n })\n }\n return result\n}", "func RowToTrips(row *sql.Rows) []Trip {\n trips := []Trip{}\n for row.Next() {\n var tripNumber int\n var startLocationName string\n var destinationName string\n row.Scan(&tripNumber, &startLocationName, &destinationName)\n trips = append(trips, Trip{\n TripNumber: tripNumber,\n StartLocationName: startLocationName,\n DestinationName: destinationName,\n })\n }\n return trips\n}", "func (f *fragment) row(rowID uint64) *Row {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\treturn f.unprotectedRow(rowID)\n}", "func DoltRowToSqlRow(doltRow row.Row, sch schema.Schema) (sql.Row, error) {\n\tif doltRow == nil {\n\t\treturn nil, nil\n\t}\n\n\tcolVals := make(sql.Row, sch.GetAllCols().Size())\n\ti := 0\n\n\t_, err := doltRow.IterSchema(sch, func(tag uint64, val types.Value) (stop bool, err error) {\n\t\tcol, _ := sch.GetAllCols().GetByTag(tag)\n\t\tcolVals[i], err = col.TypeInfo.ConvertNomsValueToValue(val)\n\t\ti++\n\n\t\tstop = err != nil\n\t\treturn\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sql.NewRow(colVals...), nil\n}", "func (r *Result) Recordx() *Record {\n\treturn &Record{r.Record()}\n}", "func (d *DB) readfloat_row(row []byte, pos uint32) float32 {\n\tvar retval float32\n\tdata := row[pos : pos+4]\n\tbits := binary.LittleEndian.Uint32(data)\n\tretval = math.Float32frombits(bits)\n\treturn retval\n}", "func (env *Environment) ConvertRowToTable(row bt.Row) *lua.LTable {\n\ttable := env.state.NewTable()\n\tfor cfName, cf := range row.ColumnFamilies {\n\t\tcfTable := env.state.NewTable()\n\t\tfor column, value := range cf {\n\t\t\tcfTable.RawSetString(column, lua.LString(value))\n\t\t}\n\n\t\ttable.RawSet(lua.LString(cfName), cfTable)\n\t}\n\n\t// set key last to ensure it doesn't get overwritten\n\ttable.RawSetString(\"key\", lua.LString(row.Key))\n\treturn table\n}", "func (t *Table) GetRow(ctx context.Context, pk types.Tuple, sch schema.Schema) (row.Row, bool, error) {\n\trowMap, err := t.GetRowData(ctx)\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tfieldsVal, _, err := rowMap.MaybeGet(ctx, pk)\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif fieldsVal == nil {\n\t\treturn nil, false, nil\n\t}\n\n\tr, err := row.FromNoms(sch, pk, fieldsVal.(types.Tuple))\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\treturn r, true, nil\n}", "func (f *fragment) rowFromStorage(rowID uint64) *Row {\n\t// Only use a subset of the containers.\n\t// NOTE: The start & end ranges must be divisible by container width.\n\t//\n\t// Note that OffsetRange now returns a new bitmap which uses frozen\n\t// containers which will use copy-on-write semantics. The actual bitmap\n\t// and Containers object are new and not shared, but the containers are\n\t// shared.\n\tdata := f.storage.OffsetRange(f.shard*ShardWidth, rowID*ShardWidth, (rowID+1)*ShardWidth)\n\n\trow := &Row{\n\t\tsegments: []rowSegment{{\n\t\t\tdata: data,\n\t\t\tshard: f.shard,\n\t\t\twritable: true,\n\t\t}},\n\t}\n\trow.invalidateCount()\n\n\treturn row\n}", "func getNewRecordFunc(rowMeta sqlz.RecordMeta) driver.NewRecordFunc {\n\treturn func(row []interface{}) (sqlz.Record, error) {\n\t\trec, skipped := driver.NewRecordFromScanRow(rowMeta, row, nil)\n\t\t// We iterate over each element of val, checking for certain\n\t\t// conditions. A more efficient approach might be to (in\n\t\t// the outside func) iterate over the column metadata, and\n\t\t// build a list of val elements to visit.\n\t\tfor _, i := range skipped {\n\t\t\tif nullTime, ok := rec[i].(*mysql.NullTime); ok {\n\t\t\t\tif nullTime.Valid {\n\t\t\t\t\t// Make a copy of the value\n\t\t\t\t\tt := nullTime.Time\n\t\t\t\t\trec[i] = &t\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Else\n\t\t\t\trec[i] = nil\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif rowMeta[i].DatabaseTypeName() == \"TIME\" && rec[i] != nil {\n\t\t\t\t// MySQL may return TIME as RawBytes... convert to a string.\n\t\t\t\t// https://github.com/go-sql-driver/mysql#timetime-support\n\t\t\t\tif rb, ok := rec[i].(*sql.RawBytes); ok {\n\t\t\t\t\tif len(*rb) == 0 {\n\t\t\t\t\t\t// shouldn't happen\n\t\t\t\t\t\tzero := \"00:00\"\n\t\t\t\t\t\trec[i] = &zero\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// Else\n\t\t\t\t\ttext := string(*rb)\n\t\t\t\t\trec[i] = &text\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// else, we don't know what to do with this col\n\t\t\treturn nil, errz.Errorf(\"column %d %s: unknown type db(%T) with kind(%s), val(%v)\", i, rowMeta[i].Name(), rec[i], rowMeta[i].Kind(), rec[i])\n\t\t}\n\t\treturn rec, nil\n\t}\n}", "func RowToQueryResult(row *sql.Row, colDefines []database.Column) (QueryResult, error) {\n\tcols := database.Columns(colDefines).Names()\n\tcolumns := make([]interface{}, len(cols))\n\tcolumnPointers := make([]interface{}, len(cols))\n\tfor i := range columns {\n\t\tcolumnPointers[i] = &columns[i]\n\t}\n\t// Scan the result into the column pointers...\n\tif err := row.Scan(columnPointers...); err != nil {\n\t\treturn nil, err\n\t}\n\n\trowData := makeRowDataSet(colDefines)\n\tfor i, colName := range cols {\n\t\tval := columnPointers[i].(*interface{})\n\t\trowData[colName] = ColData{Data: val, DataType: rowData[colName].DataType}\n\t}\n\n\treturn QueryResult(rowData), nil\n}", "func CreateRecord(db *sql.DB, e Record) (Record, error) {\n\tvar record Record\n\terr := db.QueryRow(`\n\t\tINSERT INTO user_records(weight, reps, rpe, date_performed, exercise_id, user_id)\n\t\tVALUES\n\t\t($1, $2, $3, $4, $5, $6)\n\t\tRETURNING *`,\n\t\te.Weight, e.Reps, e.RPE, e.DatePerformed, e.ExerciseID, e.UserID,\n\t).Scan(\n\t\t&record.ID,\n\t\t&record.Weight,\n\t\t&record.Reps,\n\t\t&record.RPE,\n\t\t&record.DatePerformed,\n\t\t&record.ExerciseID,\n\t\t&record.UserID,\n\t)\n\n\tif err != nil {\n\t\treturn record, err\n\t}\n\n\treturn record, nil\n}", "func (r *distSQLReceiver) PushRow(row sqlbase.EncDatumRow) bool {\n\tif r.err != nil {\n\t\treturn false\n\t}\n\tif r.rows == nil {\n\t\tr.numRows++\n\t\treturn true\n\t}\n\tif r.row == nil {\n\t\tr.row = make(parser.DTuple, len(r.resultToStreamColMap))\n\t}\n\tfor i, resIdx := range r.resultToStreamColMap {\n\t\terr := row[resIdx].EnsureDecoded(&r.alloc)\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn false\n\t\t}\n\t\tr.row[i] = row[resIdx].Datum\n\t}\n\t// Note that AddRow accounts for the memory used by the Datums.\n\tif _, err := r.rows.AddRow(r.row); err != nil {\n\t\tr.err = err\n\t\treturn false\n\t}\n\treturn true\n}", "func recordToSlice(record Record) []string {\n\tvar recordSlice []string\n\n\trecordSlice = []string{\n\t\tfmt.Sprintf(\"%d\",record.CheeseId), record.CheeseName, record.ManufacturerName, record.ManufacturerProvCode,\n\t\trecord.ManufacturingType, record.WebSite, fmt.Sprintf(\"%.2f\", record.FatContentPercent), \n\t\tfmt.Sprintf(\"%.2f\", record.MoisturePercent), record.Particularities, record.Flavour, \n\t\trecord.Characteristics, record.Ripening, fmt.Sprintf(\"%t\", record.Organic),\n\t\trecord.CategoryType, record.MilkType, record.MilkTreatmentType, record.RindType, record.LastUpdateDate,\n\t}\n\n\treturn recordSlice\n}", "func parseRecord(table string, r *Record) error {\n\t// it's ok if some records don't return a value\n\tif len(r.Value) == 0 {\n\t\treturn nil\n\t}\n\tif r.Table == \"\" {\n\t\tr.Table = table\n\t} else {\n\t\t// TODO: probably never happens\n\t\tpanicIf(r.Table != table)\n\t}\n\n\t// set Block/Space etc. based on TableView type\n\tvar pRawJSON *map[string]interface{}\n\tvar obj interface{}\n\tswitch table {\n\tcase TableActivity:\n\t\tr.Activity = &Activity{}\n\t\tobj = r.Activity\n\t\tpRawJSON = &r.Activity.RawJSON\n\tcase TableBlock:\n\t\tr.Block = &Block{}\n\t\tobj = r.Block\n\t\tpRawJSON = &r.Block.RawJSON\n\tcase TableUser:\n\t\tr.User = &User{}\n\t\tobj = r.User\n\t\tpRawJSON = &r.User.RawJSON\n\tcase TableSpace:\n\t\tr.Space = &Space{}\n\t\tobj = r.Space\n\t\tpRawJSON = &r.Space.RawJSON\n\tcase TableCollection:\n\t\tr.Collection = &Collection{}\n\t\tobj = r.Collection\n\t\tpRawJSON = &r.Collection.RawJSON\n\tcase TableCollectionView:\n\t\tr.CollectionView = &CollectionView{}\n\t\tobj = r.CollectionView\n\t\tpRawJSON = &r.CollectionView.RawJSON\n\tcase TableDiscussion:\n\t\tr.Discussion = &Discussion{}\n\t\tobj = r.Discussion\n\t\tpRawJSON = &r.Discussion.RawJSON\n\tcase TableComment:\n\t\tr.Comment = &Comment{}\n\t\tobj = r.Comment\n\t\tpRawJSON = &r.Comment.RawJSON\n\t}\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"unsupported table '%s'\", r.Table)\n\t}\n\tif err := jsonit.Unmarshal(r.Value, pRawJSON); err != nil {\n\t\treturn err\n\t}\n\tid := (*pRawJSON)[\"id\"]\n\tif id != nil {\n\t\tr.ID = id.(string)\n\t}\n\tif err := jsonit.Unmarshal(r.Value, &obj); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewRecord(value interface{}) Record {\n\n\tswitch obj := value.(type) {\n\tcase nil:\n\t\treturn Record{\n\t\t\tdropped: true,\n\t\t}\n\tcase map[string]interface{}:\n\t\treturn Record{\n\t\t\tvalues: GuessType(obj).(map[string]interface{}),\n\t\t}\n\t}\n\n\treturn Record{\n\t\tvalues: map[string]interface{}{\n\t\t\t\"value\": GuessType(value),\n\t\t},\n\t}\n}", "func (kvcodec *tableKVEncoder) RemoveRecord(\n\trow []types.Datum,\n\trowID int64,\n\tcolumnPermutation []int,\n) (Row, int, error) {\n\tcols := kvcodec.tbl.Cols()\n\n\tvar value types.Datum\n\tvar err error\n\n\trecord := kvcodec.recordCache\n\tif record == nil {\n\t\trecord = make([]types.Datum, 0, len(cols)+1)\n\t}\n\n\tfor i, col := range cols {\n\t\tj := columnPermutation[i]\n\t\tisAutoIncCol := mysql.HasAutoIncrementFlag(col.Flag)\n\t\tswitch {\n\t\tcase j >= 0 && j < len(row):\n\t\t\tvalue, err = table.CastValue(kvcodec.se, row[j], col.ToInfo(), false, false)\n\t\t\tif err == nil {\n\t\t\t\terr = col.HandleBadNull(&value, kvcodec.se.vars.StmtCtx)\n\t\t\t}\n\t\tcase isAutoIncCol:\n\t\t\t// we still need a conversion, e.g. to catch overflow with a TINYINT column.\n\t\t\tvalue, err = table.CastValue(kvcodec.se, types.NewIntDatum(rowID), col.ToInfo(), false, false)\n\t\tdefault:\n\t\t\tvalue, err = table.GetColDefaultValue(kvcodec.se, col.ToInfo())\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, errors.Trace(err)\n\t\t}\n\t\trecord = append(record, value)\n\t}\n\terr = kvcodec.tbl.RemoveRecord(kvcodec.se, kv.IntHandle(rowID), record)\n\tif err != nil {\n\t\tlog.Error(\"kv remove record failed\",\n\t\t\tzapRow(\"originalRow\", row),\n\t\t\tzapRow(\"convertedRow\", record),\n\t\t\tzap.Error(err),\n\t\t)\n\t\treturn nil, 0, errors.Trace(err)\n\t}\n\n\tpairs, size := kvcodec.se.takeKvPairs()\n\tkvcodec.recordCache = record[:0]\n\treturn Pairs(pairs), size, nil\n}", "func convert(ent *entry.Entry) pdata.LogRecord {\n\tdest := pdata.NewLogRecord()\n\tconvertInto(ent, dest)\n\treturn dest\n}", "func NewBrokerRowProtoConverter(\n\tnamespace []byte,\n\tenrichedTags tag.Tags,\n\tlimits *models.Limits,\n) (\n\tcvt *BrokerRowProtoConverter,\n\treleaseFunc func(cvt *BrokerRowProtoConverter),\n) {\n\treleaseFunc = func(cvt *BrokerRowProtoConverter) { rowConverterPool.Put(cvt) }\n\titem := rowConverterPool.Get()\n\tif item == nil {\n\t\tcvt = NewProtoConverter(limits)\n\t} else {\n\t\tcvt = item.(*BrokerRowProtoConverter)\n\t}\n\tcvt.Reset()\n\tcvt.namespace = namespace\n\tcvt.enrichedTags = enrichedTags\n\tcvt.limits = limits\n\treturn cvt, releaseFunc\n}", "func SqlRowToDoltRow(ctx context.Context, vrw types.ValueReadWriter, r sql.Row, doltSchema schema.Schema) (row.Row, error) {\n\tif schema.IsKeyless(doltSchema) {\n\t\treturn keylessDoltRowFromSqlRow(ctx, vrw, r, doltSchema)\n\t}\n\treturn pkDoltRowFromSqlRow(ctx, vrw, r, doltSchema)\n}", "func (r *Reader) Row() []interface{} {\n\treturn r.row\n}", "func InsertRow(db *sql.DB, tab DBTable, row interface{}) (err error) {\n\tnumRows := reflect.ValueOf(row).Len()\n\t// log.Println(fmt.Sprintf(\"Inserting %s record ...\", tab.name))\n\tinsertSQL := fmt.Sprintf(\"INSERT INTO %s(%s) VALUES\", tab.name, tab.columns)\n\tvaluesSQL := fmt.Sprintf(\" (%s)\", tab.questions)\n\tfor j := 0; j < numRows-1; j++ {\n\t\tvaluesSQL = fmt.Sprintf(\"%s, (%s)\", valuesSQL, tab.questions)\n\t}\n\n\tinsertSQL = fmt.Sprint(insertSQL, valuesSQL)\n\tstatement, err := db.Prepare(insertSQL) // Prepare statement. This is good to avoid SQL injections\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar args []interface{}\n\tfor j := 0; j < numRows; j++ {\n\t\trv := reflect.ValueOf(row).Index(j)\n\t\tfor i := 0; i < rv.NumField(); i++ {\n\t\t\targs = append(args, rv.Field(i).Interface())\n\t\t}\n\t}\n\t_, err = statement.Exec(args...)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func RowToTripOfferings(row *sql.Rows) []TripOffering {\n tripOffering := []TripOffering{}\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n tripOffering = append(tripOffering, TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n return tripOffering\n}", "func RowToMap(rows *sql.Rows) []map[string]string {\n\tcolumns, _ := rows.Columns()\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\tvar records []map[string]string\n\tfor rows.Next() {\n\t\t// resultCols := make(map[string]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\trows.Scan(readCols...)\n\n\t\t// all conver to string\n\t\tresultCols := assertTypeMap(columns, rawCols)\n\n\t\trecords = append(records, resultCols)\n\t}\n\treturn records\n}", "func PrintRow(fields []string, row map[string]interface{}) {\n\ttable := New(fields)\n\t// add row\n\ttable.AddRow(row)\n\t// And display table\n\ttable.Print()\n}", "func RecordToProto(ctx context.Context, dag format.DAGService, rec net.Record) (*pb.Log_Record, error) {\n\tblock, err := rec.GetBlock(ctx, dag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevent, ok := block.(*Event)\n\tif !ok {\n\t\tevent, err = EventFromNode(block)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\theader, err := event.GetHeader(ctx, dag, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := event.GetBody(ctx, dag, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb.Log_Record{\n\t\tRecordNode: rec.RawData(),\n\t\tEventNode: block.RawData(),\n\t\tHeaderNode: header.RawData(),\n\t\tBodyNode: body.RawData(),\n\t}, nil\n}", "func GetRecord(db *sql.DB, id int) (Record, error) {\n\tvar record Record\n\terr := db.QueryRow(`SELECT * FROM user_records WHERE id = ($1)`, id).Scan(\n\t\t&record.ID,\n\t\t&record.Weight,\n\t\t&record.Reps,\n\t\t&record.RPE,\n\t\t&record.DatePerformed,\n\t\t&record.ExerciseID,\n\t\t&record.UserID,\n\t)\n\n\tif err != nil {\n\t\treturn record, err\n\t}\n\n\treturn record, nil\n}", "func (writer *Writer) WriteRow(row Row, recordMd *[]Metadata) (e error) {\n\tvar md *Metadata\n\tvar inMd *Metadata\n\tvar rIdx int\n\tvar nRecord = len(row)\n\tvar recV []byte\n\tv := []byte{}\n\n\tfor i := range writer.OutputMetadata {\n\t\tmd = &writer.OutputMetadata[i]\n\n\t\t// find the input index based on name on record metadata.\n\t\trIdx = 0\n\t\tfor y := range (*recordMd) {\n\t\t\tinMd = &(*recordMd)[y]\n\n\t\t\tif inMd.Name == md.Name {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif ! (*recordMd)[y].Skip {\n\t\t\t\trIdx++\n\t\t\t}\n\t\t}\n\n\t\t// If input column is ignored, continue to next record.\n\t\tif inMd.Skip {\n\t\t\tcontinue\n\t\t}\n\n\t\t// No input metadata matched? skip it too.\n\t\tif rIdx >= nRecord {\n\t\t\tcontinue\n\t\t}\n\n\t\trecV = row[rIdx].ToByte()\n\n\t\tif \"\" != md.LeftQuote {\n\t\t\tv = append (v, []byte (md.LeftQuote)...)\n\t\t}\n\n\t\tv = append (v, recV...)\n\n\t\tif \"\" != md.RightQuote {\n\t\t\tv = append (v, []byte (md.RightQuote)...)\n\t\t}\n\n\t\tif \"\" != md.Separator {\n\t\t\tv = append (v, []byte (md.Separator)...)\n\t\t}\n\t}\n\n\tv = append (v, '\\n')\n\n\t_, e = writer.BufWriter.Write (v)\n\n\tif nil != e {\n\t\treturn e\n\t}\n\n\treturn nil\n}", "func NewRecord() *Record {\n\treturn new(Record)\n}", "func sensorValueFromRow(rows *sql.Rows) (SensorValue, error) {\n\tvar sensorId,\n\t\tip,\n\t\ttyp string\n\tvar t time.Time\n\tvar value float64\n\n\terr := rows.Scan(\n\t\t&sensorId,\n\t\t&typ,\n\t\t&t,\n\t\t&value,\n\t\t&ip)\n\n\treturn SensorValue{\n\t\tSensorId: sensorId,\n\t\tType: typ,\n\t\tTime: t,\n\t\tValue: value,\n\t\tIp: ip,\n\t}, err\n}", "func lineToRecord(line []string) Record {\n\n\t// parse some values from strings\n\tcheeseId, err := strconv.ParseInt(line[0], 10, 64)\n\tif err != nil { cheeseId = 0 }\n\tfatContentPercent, err := strconv.ParseFloat(line[10], 32)\n\tif err != nil { fatContentPercent = 0.0 }\n\tmoisturePercent, err := strconv.ParseFloat(line[11], 32)\n\tif err != nil { moisturePercent = 0.0 }\n\torganic, err := strconv.ParseBool(line[20])\n\tif err != nil { organic = false }\n\n\treturn Record {\n\t\tCheeseId: int(cheeseId),\n\t\tCheeseName: getFirstNonEmptyStringOrNA(line[1], line[2]),\n\t\tManufacturerName: getFirstNonEmptyStringOrNA(line[3], line[4]),\n\t\tManufacturerProvCode: getFirstNonEmptyStringOrNA(line[5], \"??\"),\n\t\tManufacturingType: getFirstNonEmptyStringOrNA(line[6], line[7]),\n\t\tWebSite: getFirstNonEmptyStringOrNA(line[8], line[9]),\n\t\tFatContentPercent: float32(fatContentPercent),\n\t\tMoisturePercent: float32(moisturePercent),\n\t\tParticularities: getFirstNonEmptyStringOrNA(line[12], line[13]),\n\t\tFlavour: getFirstNonEmptyStringOrNA(line[14], line[15]),\n\t\tCharacteristics: getFirstNonEmptyStringOrNA(line[16], line[17]),\n\t\tRipening: getFirstNonEmptyStringOrNA(line[18], line[19]),\n\t\tOrganic: organic,\n\t\tCategoryType: getFirstNonEmptyStringOrNA(line[21], line[22]),\n\t\tMilkType: getFirstNonEmptyStringOrNA(line[23], line[24]),\n\t\tMilkTreatmentType: getFirstNonEmptyStringOrNA(line[25], line[26]),\n\t\tRindType: getFirstNonEmptyStringOrNA(line[27], line[28]),\n\t\tLastUpdateDate: line[29],\n\t}\n}", "func (fr *FakeResult) Row(ptr interface{}) error {\n\tif fr.Force == \"true\" {\n\t\treturn errors.New(\"Function Row forced error\")\n\t}\n\tif reflect.TypeOf(ptr).String() == \"**schema.Stat\" {\n\t\tvar stat *schema.Stat\n\t\tvar data string\n\n\t\tswitch count {\n\t\tcase 0:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"Cancel Subscription\",\n\t\t\t\t\"count\": 25\n\t\t\t}`\n\t\t\tbreak\n\t\tcase 1:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"Cancel Autorenewal\",\n\t\t\t\t\"count\": 13\n\t\t\t}`\n\t\t\tbreak\n\t\tcase 2:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"\",\n\t\t\t\t\"count\": 34\n\t\t\t}`\n\t\t\tbreak\n\t\tcase 3:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"\",\n\t\t\t\t\"count\": 95\n\t\t\t}`\n\t\t\tbreak\n\t\t}\n\t\tjson.Unmarshal([]byte(data), &stat)\n\t\t*ptr.(**schema.Stat) = stat\n\t} else {\n\t\tvar rl *schema.ReportList\n\t\tdata := `{ \"id\": \"096esbpfrk8b3nhdlfhditsmk10gj03g06i3c201.json\",\n \"servisbotstats\": {\n \"EmailClassification\": \"Cancel\",\n \"ProcessOutcome\": \"No Action\",\n \"UserClassification\": \"\",\n \"success\": false\n }}`\n\t\tjson.Unmarshal([]byte(data), &rl)\n\t\t*ptr.(**schema.ReportList) = rl\n\t}\n\treturn nil\n}", "func (rc *BrokerRowProtoConverter) ConvertTo(m *protoMetricsV1.Metric, row *BrokerRow) error {\n\tblock, err := rc.MarshalProtoMetricV1(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\trow.FromBlock(block)\n\treturn nil\n}", "func (f *FakeTable) ReadRow(ovs *libovsdb.OvsdbClient, readRowArgs ovsdb.ReadRowArgs) (map[string]interface{}, error) {\n\tm := make(map[string]interface{})\n\treturn m, nil\n}", "func fetchObject(row scannableRow) (*remember.DataObject, error) {\n\tobject := &remember.DataObject{}\n\n\tvar created int64\n\tvar updated int64\n\n\terr := row.Scan(\n\t\t&object.ID,\n\t\t&object.Title,\n\t\t&object.GroupId,\n\t\t&object.Payload,\n\t\tcreated,\n\t\tupdated,\n\t)\n\n\tobject.CreatedAt = time.Unix(created, 0)\n\tobject.UpdatedAt = time.Unix(updated, 0)\n\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\treturn object, nil\n}", "func (vt *perfSchemaTable) Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) {\n\treturn nil, table.ErrUnsupportedOp\n}", "func marshalRecord(r common.Record, buff []byte) {\n\tcopy(buff, r.ID)\n\n\tbinary.LittleEndian.PutUint64(buff[16:24], r.Start)\n\tbinary.LittleEndian.PutUint32(buff[24:], r.Length)\n}", "func (c *ConnCtx) InsertRecordsRowByRow(records []*SongRecord) error {\n\ttemplate := c.getInsertQueryTempalte()\n\tstmt, err := c.Conn.PrepareNamed(template)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor _, item := range records {\n\t\tresult, err := stmt.Exec(&item)\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"execute sql error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\trowsAffected, err := result.RowsAffected()\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"pg server executes error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\tif rowsAffected != 1 {\n\t\t\tWarningF(fmt.Sprintf(\"insert record affected row error: %d\", rowsAffected))\n\t\t}\n\t\tDebugF(\"insert done: %#v\", item)\n\t}\n\n\treturn nil\n}", "func (s SQLite) QueryRow(query string, args ...interface{}) (*sql.Row, error) {\n\tif s.DB == nil {\n\t\treturn nil, fmt.Errorf(\"db is not created\")\n\t}\n\treturn s.DB.QueryRow(query, args...), nil\n}", "func GetRecord(m interface{}) (ret int, ts interface{}, rec map[string]string) {\n\tslice := reflect.ValueOf(m)\n\tt := slice.Index(0).Interface()\n\tdata := slice.Index(1)\n\n\tmapInterfaceData := data.Interface().(map[interface{}]interface{})\n\n\tmapData := make(map[string]string)\n\n\tfor kData, vData := range mapInterfaceData {\n\t\tmapData[kData.(string)] = string(vData.([]uint8))\n\t}\n\n\tmapData[\"id\"] = uuid.NewV4().String()\n\n\treturn 0, t, mapData\n}", "func (m *PgSQL) QueryRow(query string, args ...interface{}) *sql.Row {\n\treturn m.Connection.QueryRow(query, args...)\n}", "func (dht *FullRT) getRecordFromDatastore(ctx context.Context, dskey ds.Key) (*recpb.Record, error) {\n\tbuf, err := dht.datastore.Get(ctx, dskey)\n\tif err == ds.ErrNotFound {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\tlogger.Errorw(\"error retrieving record from datastore\", \"key\", dskey, \"error\", err)\n\t\treturn nil, err\n\t}\n\trec := new(recpb.Record)\n\terr = proto.Unmarshal(buf, rec)\n\tif err != nil {\n\t\t// Bad data in datastore, log it but don't return an error, we'll just overwrite it\n\t\tlogger.Errorw(\"failed to unmarshal record from datastore\", \"key\", dskey, \"error\", err)\n\t\treturn nil, nil\n\t}\n\n\terr = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue())\n\tif err != nil {\n\t\t// Invalid record in datastore, probably expired but don't return an error,\n\t\t// we'll just overwrite it\n\t\tlogger.Debugw(\"local record verify failed\", \"key\", rec.GetKey(), \"error\", err)\n\t\treturn nil, nil\n\t}\n\n\treturn rec, nil\n}", "func NewSmsLogRow()(*SmsLogRow) {\n m := &SmsLogRow{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (r *RowCache) Row(uuid string) Model {\n\tr.mutex.RLock()\n\tdefer r.mutex.RUnlock()\n\tif row, ok := r.cache[uuid]; ok {\n\t\treturn row.(Model)\n\t}\n\treturn nil\n}", "func (db *DB) QueryRowx(query string, args ...interface{}) *Row {\n rows, err := db.DB.Query(query, args...)\n return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}\n}", "func (mr MutRow) ToRow() Row {\n\treturn Row(mr)\n}", "func WriteRow(writer io.Writer, ts int64, anyObject ...interface{}) error {\n\tencoded, err := EncodeRow(ts, anyObject...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"WriteRow encoding error: %v\", err)\n\t}\n\treturn WriteMessage(writer, encoded)\n}", "func RowToStructByNameLax[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})\n\treturn value, err\n}", "func RowToStructByName[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&namedStructRowScanner{ptrToStruct: &value})\n\treturn value, err\n}", "func SerializeRecord(data []string) Record {\n\ttimestamp, err := time.Parse(\"2006-01-02 15:04:05\", data[1])\n\tif err != nil {\n\t\tlog.Fatalf(\"could not parse time from %s: %s\", data[1], err)\n\t}\n\treturn Record{\n\t\tID: data[0],\n\t\tTimestamp: timestamp,\n\t\tEmail: data[2],\n\t\tIP: data[3],\n\t\tMac: data[4],\n\t\tCountryCode: data[5],\n\t\tUserAgent: data[6],\n\t}\n}", "func insertRecordToDB(db *sql.DB, r *gosince.APIRecord) error {\n\tctx, cancel := context.WithTimeout(context.Background(), gosince.DBTimeout)\n\tdefer cancel()\n\tresult, err := db.ExecContext(ctx,\n\t\t`INSERT INTO goapis(name, category, version, package_name, description, golang_url)\n\t\tvalues(?, ?, ?, ?, ?, ?)`,\n\t\tr.Name, r.Category, r.Version, r.PackageName, r.Description, r.GolangURL)\n\n\t// Ignore duplicate API records. For example.\n\t// pkg log/syslog (darwin-386), const LOG_ALERT = 1\n\t// pkg log/syslog (openbsd-amd64-cgo), const LOG_ALERT = 1\n\tif err != nil {\n\t\tif e, ok := err.(gosqlite3.Error); ok && e.Code == 19 {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\trows, err := result.RowsAffected()\n\tif err != nil || rows != 1 {\n\t\treturn err\n\t}\n\treturn nil\n}", "func RowToTripStopInfos(row *sql.Rows) []TripStopInfo {\n result := []TripStopInfo{}\n for row.Next() {\n var tripNumber int\n var stopNumber int\n var sequenceNumber int\n var drivingTime float32\n result = append(result, TripStopInfo{\n TripNumber: tripNumber,\n StopNumber: stopNumber,\n SequenceNumber: sequenceNumber,\n DrivingTime: drivingTime,\n })\n }\n return result\n}", "func (p *partitionImpl) GetRow(rowNum int) sif.Row {\n\treturn &rowImpl{rowNum, p}\n}", "func (self *RowsBuffer) ReadRow() (*Row, error) {\n\tself.Lock()\n\tdefer self.Unlock()\n\n\tfor self.Index >= self.RowsNumber {\n\t\tself.ClearValues()\n\t\tif err := self.readRows(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\trow := RowPool.Get().(*Row)\n\trow.Clear()\n\trow.Vals = make([]interface{}, len(self.ValueBuffers))\n\tfor i, col := range self.ValueBuffers {\n\t\trow.Vals[i] = col[self.Index]\n\t}\n\n\trow.Keys = make([]interface{}, len(self.KeyBuffers))\n\tfor i, col := range self.KeyBuffers {\n\t\trow.Keys[i] = col[self.Index]\n\t}\n\tself.Index++\n\treturn row, nil\n}", "func (d *Db) GetNextRecord(r *Record) (*Record, error) {\n\tnr := &Record{rec: C.wg_get_next_record(d.db, r.rec)}\n\tif nr.rec == nil {\n\t\treturn nil, WDBError(\"Done With DB\")\n\t}\n\treturn nr, nil\n}", "func NewRow(schema typeof.Schema, capacity int) Row {\n\tif schema == nil {\n\t\tschema = make(typeof.Schema, capacity)\n\t}\n\n\treturn Row{\n\t\tValues: make(map[string]interface{}, capacity),\n\t\tSchema: schema,\n\t}\n}", "func (mcs *MemoryCellStore) ReadRow(key string, s *Sheet) (*Row, error) {\n\tr, ok := mcs.rows[key]\n\tif !ok {\n\t\treturn nil, NewRowNotFoundError(key, \"No such row\")\n\t}\n\treturn r, nil\n}", "func (r *Report) ReportRow(rowID int) *db.ReportRow {\n\treturn &db.ReportRow{\n\t\tReportID: r.FullID,\n\t\tTitle: r.Title,\n\t\tRowID: rowID,\n\t\tPostDateTime: r.DateTime,\n\t\tRawText: r.rawString,\n\t}\n}", "func rowsToThings(rows *sql.Rows) Things {\n\tvar (\n\t\tt Thing\n\t\tresult Things\n\t\terr error\n\t)\n\n\tcheckRows(\"Things\", rows)\n\n\tfor i := 0; rows.Next(); i++ {\n\t\terr := rows.Scan(&t.ckey, &t.cval, &t.url, &t.data, &t.clockid, &t.tsn)\n\t\tcheckErr(\"scan things\", err)\n\n\t\tresult = append(result, t)\n\t}\n\terr = rows.Err()\n\tcheckErr(\"end reading things loop\", err)\n\n\tfmt.Printf(\"returning things: %d rows\\n\", len(result))\n\treturn result\n}", "func rowToSample(row map[string]bigquery.Value) (prompb.Sample, model.Metric, []*prompb.Label, error) {\n\tvar v interface{}\n\tlabelsJSON := row[\"tags\"].(string)\n\terr := json.Unmarshal([]byte(labelsJSON), &v)\n\tif err != nil {\n\t\treturn prompb.Sample{}, nil, nil, err\n\t}\n\tlabels := v.(map[string]interface{})\n\tlabelPairs := make([]*prompb.Label, 0, len(labels))\n\tmetric := model.Metric{}\n\tfor name, value := range labels {\n\t\tlabelPairs = append(labelPairs, &prompb.Label{\n\t\t\tName: name,\n\t\t\tValue: value.(string),\n\t\t})\n\t\tmetric[model.LabelName(name)] = model.LabelValue(value.(string))\n\t}\n\tlabelPairs = append(labelPairs, &prompb.Label{\n\t\tName: model.MetricNameLabel,\n\t\tValue: row[\"metricname\"].(string),\n\t})\n\t// Make sure we sort the labels, so the test cases won't blow up\n\tsort.Slice(labelPairs, func(i, j int) bool { return labelPairs[i].Name < labelPairs[j].Name })\n\tmetric[model.LabelName(model.MetricNameLabel)] = model.LabelValue(row[\"metricname\"].(string))\n\treturn prompb.Sample{Timestamp: row[\"timestamp\"].(int64), Value: row[\"value\"].(float64)}, metric, labelPairs, nil\n}", "func TestRecord(t *testing.T) {\n\terr := testDbf.GoTo(1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// test if the record is deleted\n\tdeleted, err := testDbf.Deleted()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !deleted {\n\t\tt.Fatal(\"Record should be deleted\")\n\t}\n\n\t// read the same record using Record() and RecordAt()\n\trecs := [2]*Record{}\n\trecs[0], err = testDbf.Record()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trecs[1], err = testDbf.RecordAt(1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor irec, rec := range recs {\n\t\tfor _, want := range wantValues {\n\t\t\tval, err := rec.Field(want.pos)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tstrval := strings.TrimSpace(fmt.Sprintf(\"%v\", val))\n\t\t\tstrtype := fmt.Sprintf(\"%T\", val)\n\n\t\t\tif want.strval != strval || want.strtype != strtype {\n\t\t\t\tt.Errorf(\"Record %d: Wanted value %s with type %s, have value %s with type %s\", irec, want.strval, want.strtype, strval, strtype)\n\t\t\t}\n\t\t}\n\t}\n}", "func (f *FieldValues) Row(idx int, dest []driver.Value) {\n\tcopy(dest, f.values[idx*f.cols:(idx+1)*f.cols])\n\n\tif f.lobCols == 0 {\n\t\treturn\n\t}\n\n\tfor i, descr := range f.descrs {\n\t\tcol := descr.col\n\t\twriter := dest[col].(lobWriter)\n\t\tf.writers[i] = writer\n\t\tdescr.w = writer\n\t\tdest[col] = lobReadDescrToPointer(descr)\n\t}\n\n\t// last descriptor triggers lob read\n\tf.descrs[f.lobCols-1].fn = func() error {\n\t\treturn f.s.readLobStream(f.writers)\n\t}\n}", "func returnSingleRecord(w http.ResponseWriter, r *http.Request, ps httprouter.Params){\n\n\t//msg := <- requestChannel\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tid, ok := getID(w, ps)\n\tfmt.Print(\"val\",id,ok)\n\tif !ok {\n\t\trec, ires := mytable.searchByKey(ps.ByName(\"id\"))\n\t\tfmt.Println(rec,ires)\n\t\tif ires == -1{\n\t\t\tjson.NewEncoder(w).Encode(\"No record ith that key\")\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(rec)\n\t\t}\n\t} else {\n\t\trec, ires := mytable.searchById(id)\n\t\tfmt.Println(rec,ires)\n\t\tif ires == -1 {\n\t\t\tjson.NewEncoder(w).Encode(\"No value with that id\")\n\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(rec)\n\t\t}\n\t}\n}", "func (w *Wrapper) queryRow(query string, args ...interface{}) *sql.Row {\n\tw.connLock.RLock()\n\tdefer w.connLock.RUnlock()\n\n\treturn w.connection.QueryRow(w.prepare(query), args...)\n}", "func (v *recordingTable) NewRecord() reform.Record {\n\treturn new(Recording)\n}", "func (f *recordingSource) parseRecord(recordNum int) *record {\n\tr, ok := f.recordDecls[recordNum]\n\tif !ok {\n\t\tpanicf(\"record with number %d must exist\", recordNum)\n\t}\n\n\t// Record fields are separated by tabs, with the first field being the name\n\t// of the driver method.\n\tfields := splitString(r, \"\\t\")\n\trecType, ok := strToRecType[fields[0]]\n\tif !ok {\n\t\tpanicf(\"record type %v is not recognized\", fields[0])\n\t}\n\n\t// Remaining fields are record arguments in \"<dataType>:<formattedValue>\"\n\t// format.\n\trec := &record{Typ: recType}\n\tfor i := 1; i < len(fields); i++ {\n\t\tval, err := parseValueWithType(fields[i])\n\t\tif err != nil {\n\t\t\tpanicf(\"error parsing %s: %v\", fields[i], err)\n\t\t}\n\t\trec.Args = append(rec.Args, val)\n\t}\n\treturn rec\n}", "func MarshalRecord(subRecord Record) ([]byte, error) {\n\tbase := X_Record{}\n\n\tswitch subRecord.(type) {\n\tcase *GenesisRecord:\n\t\tbase.Union = &X_Record_Genesis{(*X_GenesisRecord)(subRecord.(*GenesisRecord))}\n\tcase *ChildRecord:\n\t\tbase.Union = &X_Record_Child{(*X_ChildRecord)(subRecord.(*ChildRecord))}\n\tcase *JetRecord:\n\t\tbase.Union = &X_Record_Jet{(*X_JetRecord)(subRecord.(*JetRecord))}\n\tcase *RequestRecord:\n\t\tbase.Union = &X_Record_Request{(*X_RequestRecord)(subRecord.(*RequestRecord))}\n\tcase *ResultRecord:\n\t\tbase.Union = &X_Record_Result{(*X_ResultRecord)(subRecord.(*ResultRecord))}\n\tcase *TypeRecord:\n\t\tbase.Union = &X_Record_Type{(*X_TypeRecord)(subRecord.(*TypeRecord))}\n\tcase *CodeRecord:\n\t\tbase.Union = &X_Record_Code{(*X_CodeRecord)(subRecord.(*CodeRecord))}\n\tcase *ObjectActivateRecord:\n\t\tbase.Union = &X_Record_ObjectActivate{(*X_ObjectActivateRecord)(subRecord.(*ObjectActivateRecord))}\n\tcase *ObjectAmendRecord:\n\t\tbase.Union = &X_Record_ObjectAmend{(*X_ObjectAmendRecord)(subRecord.(*ObjectAmendRecord))}\n\tcase *ObjectDeactivateRecord:\n\t\tbase.Union = &X_Record_ObjectDeactivate{(*X_ObjectDeactivateRecord)(subRecord.(*ObjectDeactivateRecord))}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"__Record.union has unexpected type %T\", subRecord)\n\t}\n\treturn base.Marshal()\n}", "func scanRow(scanner db.Scanner, dest *core.Connection) error {\n\treturn scanner.Scan(\n\t\t&dest.ID,\n\t\t&dest.Name,\n\t\t&dest.PID,\n\t\t&dest.DataBase,\n\t\t&dest.Host,\n\t\t&dest.Port,\n\t\t&dest.User,\n\t\t&dest.Password,\n\t\t&dest.Description,\n\t\t&dest.Created,\n\t\t&dest.Updated,\n\t)\n}", "func (empHandler *EmployeeHandler) storeRecord() []Employee {\n\tvar emp []Employee\n\tdis, err := empHandler.DB.Query(\"select id, name, age, gender, role from employee\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tfor dis.Next() {\n\t\tvar row Employee\n\t\terr = dis.Scan(&row.Id, &row.Name, &row.Age, &row.Gender, &row.Role)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\temp = append(emp, row)\n\t}\n\treturn emp\n}", "func (c *Cache) saveRecord(r quandl.Record) {\n\terr := c.DB.Save(&r).Error\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (d *Database) QueryRow(db DB, dst interface{}, query string, args ...interface{}) error {\n\treturn d.QueryRowContext(context.Background(), db, dst, query, args...)\n}", "func NewRowToRow() AStarConfig {\n\tr2r := &rowToRow{}\n\treturn r2r\n}", "func RowToStructByPos[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})\n\treturn value, err\n}", "func (db TestDB) QueryRow(query string, args ...interface{}) *sql.Row {\n\treturn db.testTx.QueryRow(query, args...)\n}" ]
[ "0.6539114", "0.64575875", "0.61635786", "0.59165096", "0.5815466", "0.576414", "0.5693766", "0.5690296", "0.5623456", "0.56130004", "0.5544808", "0.5532619", "0.5377269", "0.5357239", "0.5338372", "0.5308314", "0.5301193", "0.5299179", "0.52847064", "0.52828956", "0.52663213", "0.5221548", "0.52182883", "0.5197463", "0.5188103", "0.5166546", "0.516225", "0.51385635", "0.5130053", "0.50951684", "0.50754106", "0.5067426", "0.50646496", "0.5041237", "0.5012035", "0.49921605", "0.49876735", "0.49822807", "0.49818763", "0.496961", "0.49623063", "0.49587193", "0.49577093", "0.49284422", "0.49174178", "0.49067694", "0.49048546", "0.48960373", "0.48814198", "0.48755592", "0.48547187", "0.48513025", "0.4851149", "0.4835554", "0.48347515", "0.48285773", "0.48144278", "0.4814123", "0.48133892", "0.47810516", "0.47697294", "0.47667855", "0.47648776", "0.47247112", "0.47129786", "0.46996424", "0.4699345", "0.46989405", "0.4698544", "0.46979606", "0.46959728", "0.4693592", "0.46931618", "0.4658993", "0.46518588", "0.46499443", "0.46494862", "0.46467692", "0.4642963", "0.46422005", "0.4630433", "0.46166307", "0.4610473", "0.4604428", "0.46040887", "0.45966443", "0.45946082", "0.45845747", "0.45771766", "0.45755392", "0.45755172", "0.45690742", "0.4564756", "0.45634598", "0.45623347", "0.45606554", "0.45550045", "0.45541856", "0.4552429", "0.4549263" ]
0.8461538
0
rowsToRecords converts from pgx.Rows to []store.Record
func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) { var records []*store.Record for rows.Next() { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { return records, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } records = append(records, record) } return records, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {\n\tvar expiry *time.Time\n\trecord := &store.Record{}\n\tmetadata := make(Metadata)\n\n\tif err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn record, store.ErrNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t// set the metadata\n\trecord.Metadata = toMetadata(&metadata)\n\tif expiry != nil {\n\t\trecord.Expiry = time.Until(*expiry)\n\t}\n\n\treturn record, nil\n}", "func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}", "func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}", "func toRow(pl any) []byte {\n\trt := reflect.TypeOf(pl)\n\n\tenc, err := coder.RowEncoderForStruct(rt)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to get row encoder\"))\n\t}\n\tvar buf bytes.Buffer\n\tif err := enc(pl, &buf); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to do row encoding\"))\n\t}\n\treturn buf.Bytes()\n}", "func (a *kinesisFirehoseWriter) toRecords(msg message.Batch) ([]*firehose.Record, error) {\n\tentries := make([]*firehose.Record, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tentry := firehose.Record{\n\t\t\tData: p.AsBytes(),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis Firehose payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}", "func rowsToThings(rows *sql.Rows) Things {\n\tvar (\n\t\tt Thing\n\t\tresult Things\n\t\terr error\n\t)\n\n\tcheckRows(\"Things\", rows)\n\n\tfor i := 0; rows.Next(); i++ {\n\t\terr := rows.Scan(&t.ckey, &t.cval, &t.url, &t.data, &t.clockid, &t.tsn)\n\t\tcheckErr(\"scan things\", err)\n\n\t\tresult = append(result, t)\n\t}\n\terr = rows.Err()\n\tcheckErr(\"end reading things loop\", err)\n\n\tfmt.Printf(\"returning things: %d rows\\n\", len(result))\n\treturn result\n}", "func recordToSlice(record Record) []string {\n\tvar recordSlice []string\n\n\trecordSlice = []string{\n\t\tfmt.Sprintf(\"%d\",record.CheeseId), record.CheeseName, record.ManufacturerName, record.ManufacturerProvCode,\n\t\trecord.ManufacturingType, record.WebSite, fmt.Sprintf(\"%.2f\", record.FatContentPercent), \n\t\tfmt.Sprintf(\"%.2f\", record.MoisturePercent), record.Particularities, record.Flavour, \n\t\trecord.Characteristics, record.Ripening, fmt.Sprintf(\"%t\", record.Organic),\n\t\trecord.CategoryType, record.MilkType, record.MilkTreatmentType, record.RindType, record.LastUpdateDate,\n\t}\n\n\treturn recordSlice\n}", "func RowToMap(rows *sql.Rows) []map[string]string {\n\tcolumns, _ := rows.Columns()\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\tvar records []map[string]string\n\tfor rows.Next() {\n\t\t// resultCols := make(map[string]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\trows.Scan(readCols...)\n\n\t\t// all conver to string\n\t\tresultCols := assertTypeMap(columns, rawCols)\n\n\t\trecords = append(records, resultCols)\n\t}\n\treturn records\n}", "func databaseRowsToPaginationDataList(rows *sql.Rows, dtFields []dtColumn) ([]map[string]string, error) {\n\tvar dataList []map[string]string\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get row.Columns %w\", err)\n\t}\n\n\tvalues := make([]sql.RawBytes, len(columns))\n\t// rows.Scan wants '[]interface{}' as an argument, so we must copy the\n\t// references into such a slice\n\t// See http://code.google.com/p/go-wiki/wiki/InterfaceSlice for details\n\tscanArgs := make([]interface{}, len(values))\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\n\tfor rows.Next() {\n\t\t// get RawBytes from data\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not scan rows to 'scanArgs...' %w\", err)\n\t\t}\n\n\t\tvar value string\n\n\t\tfor i, col := range values {\n\t\t\t// Here we can check if the value is nil (NULL value)\n\t\t\tif col == nil {\n\t\t\t\tvalue = \"NULL\"\n\t\t\t} else {\n\t\t\t\tvalue = string(col)\n\t\t\t}\n\n\t\t\tfor _, dtField := range dtFields {\n\t\t\t\tif dtField.dbColumnName == columns[i] {\n\t\t\t\t\tdtObject := map[string]string{dtField.dtColumnName: value}\n\t\t\t\t\tdataList = append(dataList, dtObject)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn dataList, nil\n}", "func RowsToMaps(rows *sql.Rows, geomColumn string) ([]map[string]interface{}, error) {\n\tvar maps []map[string]interface{}\n\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\trow := make([]interface{}, len(cols))\n\t\tfor idx, col := range cols {\n\t\t\tif col == geomColumn {\n\t\t\t\trow[idx] = new(wkb.GeometryScanner)\n\t\t\t} else {\n\t\t\t\trow[idx] = new(DumbScanner)\n\t\t\t}\n\t\t}\n\t\terr := rows.Scan(row...)\n\t\tif err != nil {\n\t\t\treturn maps, err\n\t\t}\n\t\tm := make(map[string]interface{})\n\t\tfor idx, col := range cols {\n\t\t\tif geom, isGeomScanner := row[idx].(*wkb.GeometryScanner); isGeomScanner {\n\t\t\t\tif geom.Valid {\n\t\t\t\t\tm[col] = geom.Geometry\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, InvalidGeometryErr\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tds := row[idx].(*DumbScanner)\n\t\t\t\tm[col] = ds.Value\n\t\t\t}\n\t\t}\n\t\tmaps = append(maps, m)\n\t}\n\n\treturn maps, nil\n}", "func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {\n\tdefer rows.Close()\n\n\tslice := []T{}\n\n\tfor rows.Next() {\n\t\tvalue, err := fn(rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tslice = append(slice, value)\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slice, nil\n}", "func RowsToMap(rows *sql.Rows, typeString string) ([]map[string]interface{}, error) {\n\tarr := make([]map[string]interface{}, 0)\n\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Set up valuePointers slice using types from typeString\n\ttypes := strings.Split(typeString, \",\")\n\tvaluePointers := make([]interface{}, len(types))\n\tfor i, t := range types {\n\t\tif t == \"int\" {\n\t\t\tvaluePointers[i] = new(int)\n\t\t} else if t == \"string\" {\n\t\t\tvaluePointers[i] = new(string)\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Unknown type in typeString\")\n\t\t}\n\t}\n\n\tfor rows.Next() {\n\t\t// Scan the result into the value pointers...\n\t\tif err := rows.Scan(valuePointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tm := make(map[string]interface{})\n\t\tfor i, colName := range cols {\n\t\t\tm[colName] = valuePointers[i]\n\t\t}\n\n\t\tarr = append(arr, m)\n\t}\n\n\treturn arr, nil\n}", "func RowToDrivers(row *sql.Rows) []Driver {\n result := []Driver{}\n for row.Next() {\n var driverName string\n var driverTelephoneNumber string\n row.Scan(&driverName, &driverTelephoneNumber)\n result = append(result, Driver{\n DriverName: driverName,\n DriverTelephoneNumber: driverTelephoneNumber,\n })\n }\n return result\n}", "func (a *kinesisWriter) toRecords(msg message.Batch) ([]*kinesis.PutRecordsRequestEntry, error) {\n\tentries := make([]*kinesis.PutRecordsRequestEntry, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tpartKey, err := a.partitionKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"partition key interpolation error: %w\", err)\n\t\t}\n\t\tentry := kinesis.PutRecordsRequestEntry{\n\t\t\tData: p.AsBytes(),\n\t\t\tPartitionKey: aws.String(partKey),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\thashKey, err := a.hashKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"hash key interpolation error: %w\", err)\n\t\t}\n\t\tif hashKey != \"\" {\n\t\t\tentry.ExplicitHashKey = aws.String(hashKey)\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}", "func RowToTrips(row *sql.Rows) []Trip {\n trips := []Trip{}\n for row.Next() {\n var tripNumber int\n var startLocationName string\n var destinationName string\n row.Scan(&tripNumber, &startLocationName, &destinationName)\n trips = append(trips, Trip{\n TripNumber: tripNumber,\n StartLocationName: startLocationName,\n DestinationName: destinationName,\n })\n }\n return trips\n}", "func NewRows(rs *sql.Rows) (*Rows, error) {\n\tif nil == rs {\n\t\trs = new(sql.Rows)\n\t}\n\tdefer rs.Close()\n\n\tvar err error\n\tvar tmp map[string]string\n\n\tret := &Rows{}\n\tret.currentData = make(map[string]string)\n\tret.data = make([]map[string]string, 0)\n\tret.colnames, err = rs.Columns()\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\tfor rs.Next() {\n\t\ttmp, err = fetchMap(rs)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tret.data = append(ret.data, tmp)\n\t\tret.dataLen++\n\t}\n\treturn ret, nil\n}", "func convertFromTsRows(tsRows [][]TsCell) []*riak_ts.TsRow {\n\tvar rows []*riak_ts.TsRow\n\tvar cells []*riak_ts.TsCell\n\tfor _, tsRow := range tsRows {\n\t\tcells = make([]*riak_ts.TsCell, 0)\n\n\t\tfor _, tsCell := range tsRow {\n\t\t\tcells = append(cells, tsCell.cell)\n\t\t}\n\n\t\tif len(rows) < 1 {\n\t\t\trows = make([]*riak_ts.TsRow, 0)\n\t\t}\n\n\t\trows = append(rows, &riak_ts.TsRow{Cells: cells})\n\t}\n\n\treturn rows\n}", "func (r *Rows) row(a ...interface{}) error {\n\tdefer r.Close()\n\n\tfor _, dp := range a {\n\t\tif _, ok := dp.(*sql.RawBytes); ok {\n\t\t\treturn VarTypeError(\"RawBytes isn't allowed on Row()\")\n\t\t}\n\t}\n\n\tif !r.Next() {\n\t\tif err := r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sql.ErrNoRows\n\t}\n\tif err := r.Scan(a...); err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Close()\n}", "func (res *Result) Rows() [][]interface{} {\n\tifacesSlice := make([][]interface{}, len(res.rows))\n\tfor i := range res.rows {\n\t\tifaces := make([]interface{}, len(res.rows[i]))\n\t\tfor j := range res.rows[i] {\n\t\t\tifaces[j] = res.rows[i][j]\n\t\t}\n\t\tifacesSlice[i] = ifaces\n\t}\n\treturn ifacesSlice\n}", "func (r *Reader) Row() []interface{} {\n\treturn r.row\n}", "func RowsToJSONArray(rows *sql.Rows) (string, error) {\n\tvar ret string\n\tvar err error\n\tret = \"[]\"\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//Scan requires pointers and []*interface does not work for rows.Scan so this is a workaround\n\t//Since interface can be anything, we create a pointer to another interface in another slice to pass type-check\n\t//https://stackoverflow.com/questions/29102725/go-sql-driver-get-interface-column-values\n\tcolPointers := make([]interface{}, len(columns))\n\tcols := make([]interface{}, len(columns))\n\tfor i := range colPointers {\n\t\tcolPointers[i] = &cols[i]\n\t}\n\n\tcounter := 0\n\tfor rows.Next() {\n\t\terr := rows.Scan(colPointers...)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor i, v := range cols {\n\t\t\tpath := fmt.Sprintf(\"%d.%s\", counter, columns[i])\n\t\t\tret, err = sjson.Set(ret, path, v)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\tcounter++\n\t}\n\treturn ret, nil\n}", "func RowsToStrings(qr *sqltypes.Result) [][]string {\n\tvar result [][]string\n\tfor _, row := range qr.Rows {\n\t\tvar srow []string\n\t\tfor _, cell := range row {\n\t\t\tsrow = append(srow, cell.ToString())\n\t\t}\n\t\tresult = append(result, srow)\n\t}\n\treturn result\n}", "func RowToStops(row *sql.Rows) []Stop {\n result := []Stop{}\n for row.Next() {\n var stopNumber int\n var stopAddress string\n result = append(result, Stop{\n StopNumber: stopNumber,\n StopAddress: stopAddress,\n })\n }\n return result\n}", "func recordToRecord(\n\ttopic string,\n\tpartition int32,\n\tbatch *kmsg.RecordBatch,\n\trecord *kmsg.Record,\n) *Record {\n\th := make([]RecordHeader, 0, len(record.Headers))\n\tfor _, kv := range record.Headers {\n\t\th = append(h, RecordHeader{\n\t\t\tKey: kv.Key,\n\t\t\tValue: kv.Value,\n\t\t})\n\t}\n\n\treturn &Record{\n\t\tKey: record.Key,\n\t\tValue: record.Value,\n\t\tHeaders: h,\n\t\tTimestamp: timeFromMillis(batch.FirstTimestamp + int64(record.TimestampDelta)),\n\t\tTopic: topic,\n\t\tPartition: partition,\n\t\tAttrs: RecordAttrs{uint8(batch.Attributes)},\n\t\tProducerID: batch.ProducerID,\n\t\tProducerEpoch: batch.ProducerEpoch,\n\t\tLeaderEpoch: batch.PartitionLeaderEpoch,\n\t\tOffset: batch.FirstOffset + int64(record.OffsetDelta),\n\t}\n}", "func RowsToQueryResults(rows *sql.Rows, coldefs []database.Column) (QueryResults, error) {\n\tcols := database.Columns(coldefs).Names()\n\tres := []RowData{}\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trowData := makeRowDataSet(coldefs)\n\t\tfor i, colName := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\trowData[colName] = ColData{Data: val, DataType: rowData[colName].DataType}\n\t\t}\n\n\t\tres = append(res, rowData)\n\t}\n\n\treturn res, nil\n}", "func RowTo[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&value)\n\treturn value, err\n}", "func convertRow(\n\trow *Row,\n\twantsNode bool,\n\twantsTimestamp bool,\n\tdesiredValues []string,\n) *stats.Row {\n\tvar (\n\t\tnode string\n\t\ttimestamp time.Time\n\t)\n\n\tvar resultValues map[string]interface{}\n\tif len(desiredValues) > 0 {\n\t\tresultValues = make(map[string]interface{})\n\t}\n\n\tfor _, v := range desiredValues {\n\t\tresultValues[v] = row.value(v)\n\t}\n\n\tif wantsNode {\n\t\tnode = row.Node\n\t}\n\tif wantsTimestamp {\n\t\ttimestamp = row.Timestamp.UTC()\n\t}\n\n\treturn &stats.Row{\n\t\tNode: node,\n\t\tTimestamp: timestamp,\n\t\tValues: resultValues,\n\t}\n}", "func sqlReceiveRows(rows *sql.Rows,\n\tcolumnTypes []query.GoColumnType,\n\tcolumnNames []string,\n\tbuilder *sqlBuilder,\n\t) []map[string]interface{} {\n\n\tvar values []map[string]interface{}\n\n\tcursor := NewSqlCursor(rows, columnTypes, columnNames, nil)\n\tdefer cursor.Close()\n\tfor v := cursor.Next();v != nil;v = cursor.Next() {\n\t\tvalues = append(values, v)\n\t}\n\tif builder != nil {\n\t\tvalues = builder.unpackResult(values)\n\t}\n\n\treturn values\n}", "func (s Series) Records(force bool) ([]string, error) {\n\tret := make([]string, s.Len())\n\tfor i := 0; i < s.Len(); i++ {\n\t\te := s.elements.Elem(i)\n\t\tval, err := e.String()\n\t\tif err != nil && !force {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err != nil {\n\t\t\tret[i] = \"\"\n\t\t} else {\n\t\t\tret[i] = val\n\t\t}\n\t}\n\treturn ret, nil\n}", "func RowsScan(rows *sql.Rows) (result []map[string]string, err error) {\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvalues := make([]sql.RawBytes, len(columns))\n\tscanArgs := make([]interface{}, len(values))\n\t// ret := make(map[string]string, len(scanArgs))\n\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar value string\n\t\tret := make(map[string]string, len(scanArgs))\n\n\t\tfor i, col := range values {\n\t\t\tif col == nil {\n\t\t\t\tvalue = \"NULL\"\n\t\t\t} else {\n\t\t\t\tvalue = string(col)\n\t\t\t}\n\t\t\tret[columns[i]] = value\n\t\t}\n\n\t\tresult = append(result, ret)\n\n\t\t// break //get the first row only\n\t}\n\n\treturn\n}", "func (handler *SQLLiteTableHandler) ParseRows(rows *sql.Rows) per.IQueryResult {\n\thandler.Parent.LogDebug(\"ParseRows\", \"Returing empty results - was this function replaced\")\n\treturn NewDataQueryResult(false, []per.IDataItem{})\n}", "func convertFromPbTsRows(tsRows []*riak_ts.TsRow, tsCols []*riak_ts.TsColumnDescription) [][]TsCell {\n\tvar rows [][]TsCell\n\tvar row []TsCell\n\tvar cell TsCell\n\n\tfor _, tsRow := range tsRows {\n\t\trow = make([]TsCell, 0)\n\n\t\tfor i, tsCell := range tsRow.Cells {\n\t\t\ttsColumnType := riak_ts.TsColumnType_VARCHAR\n\t\t\tif tsCols != nil {\n\t\t\t\ttsColumnType = tsCols[i].GetType()\n\t\t\t}\n\t\t\tcell.setCell(tsCell, tsColumnType)\n\t\t\trow = append(row, cell)\n\t\t}\n\n\t\tif len(rows) < 1 {\n\t\t\trows = make([][]TsCell, 0)\n\t\t}\n\n\t\trows = append(rows, row)\n\t}\n\n\treturn rows\n}", "func (rows *Rows) ToMap() ([]map[string]interface{}, error) {\n\n\tcolumns, err := rows.Rows.Columns()\n\tif err != nil {\n\t\treturn nil,err\n\t}\n\n\tvalues := make([]interface{}, len(columns))\n\tfor i := range values {\n\t\tvalues[i] = new(interface{})\n\t}\n\n\trowMaps := make([]map[string]interface{}, 0)\n\n\tfor rows.Rows.Next() {\n\t\terr = rows.Rows.Scan(values...)\n\t\tif err != nil {\n\t\t\treturn nil,err\n\t\t}\n\n\t\tcurrRow := make(map[string]interface{})\n\t\tfor i, name := range columns {\n\t\t\tcurrRow[name] = *(values[i].(*interface{}))\n\t\t}\n\t\t// accumulating rowMaps is the easy way out\n\t\trowMaps = append(rowMaps, currRow)\n\t}\n\n\treturn rowMaps,nil\n}", "func getMapFromRows(rows *sql.Rows) (map[string]interface{}, error) {\n\tcols, _ := rows.Columns()\n\tm := make(map[string]interface{})\n\tfor rows.Next() {\n\t\t// Create a slice of interface{}'s to represent each column,\n\t\t// and a second slice to contain pointers to each item in the columns slice.\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i, _ := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create our map, and retrieve the value for each column from the pointers slice,\n\t\t// storing it in the map with the name of the column as the key.\n\t\tfor i, colName := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\tm[colName] = *val\n\t\t}\n\t}\n\treturn m, nil\n}", "func (g *GroupByAggregator) recordsForTable(table map[string]*GroupByRow) []*zng.Record {\n\tvar keys []string\n\tfor k := range table {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tvar recs []*zng.Record\n\tfor _, k := range keys {\n\t\trow := table[k]\n\t\tvar zv zcode.Bytes\n\t\tif g.TimeBinDuration > 0 {\n\t\t\tzv = zcode.AppendPrimitive(zv, zng.EncodeTime(row.ts))\n\t\t}\n\t\tzv = append(zv, row.keyvals...)\n\t\tfor _, red := range row.reducers.Reducers {\n\t\t\t// a reducer value is never a container\n\t\t\tv := reducer.Result(red)\n\t\t\tif v.IsContainer() {\n\t\t\t\tpanic(\"internal bug: reducer result cannot be a container!\")\n\t\t\t}\n\t\t\tzv = v.Encode(zv)\n\t\t}\n\t\ttyp := g.lookupRowType(row)\n\t\tr := zng.NewRecordTs(typ, row.ts, zv)\n\t\trecs = append(recs, r)\n\t}\n\treturn recs\n}", "func RowToTripOfferings(row *sql.Rows) []TripOffering {\n tripOffering := []TripOffering{}\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n tripOffering = append(tripOffering, TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n return tripOffering\n}", "func GetAllRecords(client *mongo.Collection) *[]Record {\n\tcursor, err := client.Find(context.TODO(), bson.D{{}})\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to find any records: %s\", err)\n\t}\n\n\tvar result []Record\n\tfor cursor.Next(context.TODO()) {\n\t\tvar elem Record\n\t\terr := cursor.Decode(&elem)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tresult = append(result, elem)\n\t}\n\tcursor.Close(context.TODO())\n\treturn &result\n}", "func RowToActualStopInfos(row *sql.Rows) []ActualTripStopInfo {\n result := []ActualTripStopInfo{}\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var stopNumber int\n var scheduledArrivalTime string\n var actualStartTime string\n var actualArrivalTime string\n var numberOfPassengerIn int\n var numberOfPassengerOut int\n result = append(result, ActualTripStopInfo{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n StopNumber: stopNumber,\n ScheduledArrivalTime: scheduledArrivalTime,\n ActualStartTime: actualStartTime,\n ActualArrivalTime: actualArrivalTime,\n NumberOfPassengerIn: numberOfPassengerIn,\n NumberOfPassengerOut: numberOfPassengerOut,\n })\n }\n return result\n}", "func (m *sparse) Rows() func() *sparseRow {\n\ti := 0\n\tr := &sparseRow{}\n\n\treturn func() *sparseRow {\n\t\tif i == (len(m.ptr) - 1) {\n\t\t\treturn nil\n\t\t}\n\n\t\tstart := m.ptr[i]\n\t\tend := m.ptr[i+1]\n\n\t\tr.index = i\n\t\tr.ind = m.ind[start:end]\n\t\tr.val = m.val[start:end]\n\t\ti++\n\n\t\treturn r\n\t}\n}", "func (r *Runner) Rows() (*sql.Rows, error) {\n\tq, err := r.query.Construct()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.db.QueryContext(r.ctx, q.Query(), q.Args()...)\n}", "func rowsToResultStrings(ctx *sql.Context, iter sql.RowIter) ([]string, error) {\n\tvar results []string\n\tif iter == nil {\n\t\treturn results, nil\n\t}\n\n\tfor {\n\t\trow, err := iter.Next(ctx)\n\t\tif err == io.EOF {\n\t\t\treturn results, nil\n\t\t} else if err != nil {\n\t\t\tdrainIteratorIgnoreErrors(ctx, iter)\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor _, col := range row {\n\t\t\t\tresults = append(results, toSqlString(col))\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Statement) Row() (values []interface{}) {\n\tfor i := 0; i < s.Columns(); i++ {\n\t\tvalues = append(values, s.Column(i))\n\t}\n\treturn\n}", "func (m *Message) getRows() Rows {\n\t// Read the column count and column names.\n\tcolumns := make([]string, m.getUint64())\n\n\tfor i := range columns {\n\t\tcolumns[i] = m.getString()\n\t}\n\n\trows := Rows{\n\t\tColumns: columns,\n\t\tmessage: m,\n\t}\n\treturn rows\n}", "func partitionRecords(size int, records []types.Record) [][]types.Record {\n\tnumberOfPartitions := len(records) / size\n\tif len(records)%size != 0 {\n\t\tnumberOfPartitions++\n\t}\n\n\tpartitions := make([][]types.Record, 0, numberOfPartitions)\n\tfor i := 0; i < numberOfPartitions; i++ {\n\t\tstart := size * i\n\t\tend := size * (i + 1)\n\t\tif end > len(records) {\n\t\t\tend = len(records)\n\t\t}\n\n\t\tpartitions = append(partitions, records[start:end])\n\t}\n\n\treturn partitions\n}", "func getRecordWrapper(numFound int, keys [][]byte, pointers []interface{}) (records Records, err error) {\n\tif numFound == 0 {\n\t\treturn nil, ErrScansNoResult\n\t}\n\n\trecords = Records{}\n\tfor i := 0; i < numFound; i++ {\n\t\trecords = append(records, pointers[i].(*Record))\n\t}\n\n\treturn records, nil\n}", "func (r *Representer) RepresentationFromRows(rows *sql.Rows) *Table {\n\treturn nil\n}", "func RowToTripStopInfos(row *sql.Rows) []TripStopInfo {\n result := []TripStopInfo{}\n for row.Next() {\n var tripNumber int\n var stopNumber int\n var sequenceNumber int\n var drivingTime float32\n result = append(result, TripStopInfo{\n TripNumber: tripNumber,\n StopNumber: stopNumber,\n SequenceNumber: sequenceNumber,\n DrivingTime: drivingTime,\n })\n }\n return result\n}", "func (c *ConnCtx) InsertRecordsRowByRow(records []*SongRecord) error {\n\ttemplate := c.getInsertQueryTempalte()\n\tstmt, err := c.Conn.PrepareNamed(template)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor _, item := range records {\n\t\tresult, err := stmt.Exec(&item)\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"execute sql error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\trowsAffected, err := result.RowsAffected()\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"pg server executes error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\tif rowsAffected != 1 {\n\t\t\tWarningF(fmt.Sprintf(\"insert record affected row error: %d\", rowsAffected))\n\t\t}\n\t\tDebugF(\"insert done: %#v\", item)\n\t}\n\n\treturn nil\n}", "func (f *FakeTable) ReadRows(ovs *libovsdb.OvsdbClient, readRowArgs ovsdb.ReadRowArgs) ([]map[string]interface{}, error) {\n\tif f.ReadRowsFunc != nil {\n\t\treturn f.ReadRowsFunc(ovs, readRowArgs)\n\t}\n\tm := make([]map[string]interface{}, 10)\n\treturn m, nil\n}", "func readUsersFromRows(rows *sql.Rows) ([]*User, error) {\n\tvar users []*User\n\n\tfor rows.Next() {\n\t\tu := User{}\n\t\terr := rows.Scan(\n\t\t\t&u.ID,\n\t\t\t&u.Username,\n\t\t\t&u.Email,\n\t\t\t&u.Bio,\n\t\t\t&u.Password,\n\t\t\t&u.Clicks,\n\t\t\t&u.LastClick,\n\t\t\t&u.IsAdmin,\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tusers = append(users, &u)\n\t}\n\n\treturn users, nil\n}", "func (e *commonFormatEncoder) Row(tp int, row *[]interface{}, seqno uint64) ([]byte, error) {\n\tcf := convertRowToCommonFormat(tp, row, e.inSchema, seqno, e.filter)\n\treturn CommonFormatEncode(cf)\n}", "func (t *Table) Rows(fi, li int) []map[string]interface{} {\r\n\r\n\tif fi < 0 || fi >= len(t.header.cols) {\r\n\t\tpanic(tableErrInvRow)\r\n\t}\r\n\tif li < 0 {\r\n\t\tli = len(t.rows) - 1\r\n\t} else if li < 0 || li >= len(t.rows) {\r\n\t\tpanic(tableErrInvRow)\r\n\t}\r\n\tif li < fi {\r\n\t\tpanic(\"Last index less than first index\")\r\n\t}\r\n\tres := make([]map[string]interface{}, li-li+1)\r\n\tfor ri := fi; ri <= li; ri++ {\r\n\t\ttrow := t.rows[ri]\r\n\t\trmap := make(map[string]interface{})\r\n\t\tfor ci := 0; ci < len(t.header.cols); ci++ {\r\n\t\t\tc := t.header.cols[ci]\r\n\t\t\trmap[c.id] = trow.cells[c.order].value\r\n\t\t}\r\n\t\tres = append(res, rmap)\r\n\t}\r\n\treturn res\r\n}", "func RowToBuses(row *sql.Rows) []Bus {\n result := []Bus{}\n for row.Next() {\n var busID int\n var model string\n var year int\n row.Scan(&busID, &model, &year)\n result = append(result, Bus{\n BusID: busID,\n Model: model,\n Year: year,\n })\n }\n return result\n}", "func (serializer *batchSerializer) parseBatchRecord(records []IRecord) (*batchRecord, error) {\n batch := &batchRecord{\n records: make([]*binaryRecord, 0, len(records)),\n }\n\n for _, record := range records {\n bRecord, err := serializer.bSerializer.dhRecord2BinaryRecord(record)\n if err != nil {\n return nil, err\n }\n batch.records = append(batch.records, bRecord)\n }\n return batch, nil\n}", "func MakeRowTrusted(fields []*querypb.Field, row *querypb.Row) []Value {\n\tsqlRow := make([]Value, len(row.Lengths))\n\tvar offset int64\n\tfor i, length := range row.Lengths {\n\t\tif length < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsqlRow[i] = MakeTrusted(fields[i].Type, row.Values[offset:offset+length])\n\t\toffset += length\n\t}\n\treturn sqlRow\n}", "func (empHandler *EmployeeHandler) storeRecord() []Employee {\n\tvar emp []Employee\n\tdis, err := empHandler.DB.Query(\"select id, name, age, gender, role from employee\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tfor dis.Next() {\n\t\tvar row Employee\n\t\terr = dis.Scan(&row.Id, &row.Name, &row.Age, &row.Gender, &row.Role)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\temp = append(emp, row)\n\t}\n\treturn emp\n}", "func insertRows(rows []vmparser.Row) error {\n\t// ctx := GetInsertCtx()\n\t// defer PutInsertCtx(ctx)\n\n\tctx := &InsertCtx{mrs: model.MetricRows{}}\n\t// ctx.Reset(len(rows))\n\tfor i := range rows {\n\t\tr := &rows[i]\n\t\tctx.Labels = ctx.Labels[:0]\n\t\tctx.AddLabel(\"\", r.Metric)\n\t\tfor j := range r.Tags {\n\t\t\ttag := &r.Tags[j]\n\t\t\tctx.AddLabel(tag.Key, tag.Value)\n\t\t}\n\t\tctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value)\n\t}\n\trowsInserted.Add(len(rows))\n\trowsPerInsert.Update(float64(len(rows)))\n\treturn ctx.FlushBufs()\n}", "func SaveReturningPackageRows(ctx context.Context, db SQLHandle, inputs ...*PackageRow) (err error) {\n\trows := PackageRows(inputs)\n\t_, err = queryWithJSONArgs(ctx, db, rows.ReceiveRows, SQLSaveReturningPackageRows, rows)\n\tif err != nil {\n\t\treturn formatError(\"SaveReturningPackageRows\", err)\n\t}\n\treturn nil\n}", "func FetchRows(rows *sql.Rows, dst interface{}) error {\n\tvar columns []string\n\tvar err error\n\n\t// Destination.\n\tdstv := reflect.ValueOf(dst)\n\n\tif dstv.IsNil() || dstv.Kind() != reflect.Ptr {\n\t\treturn db.ErrExpectingPointer\n\t}\n\n\tif dstv.Elem().Kind() != reflect.Slice {\n\t\treturn db.ErrExpectingSlicePointer\n\t}\n\n\tif dstv.Kind() != reflect.Ptr || dstv.Elem().Kind() != reflect.Slice || dstv.IsNil() {\n\t\treturn db.ErrExpectingSliceMapStruct\n\t}\n\n\tif columns, err = rows.Columns(); err != nil {\n\t\treturn err\n\t}\n\n\tslicev := dstv.Elem()\n\titem_t := slicev.Type().Elem()\n\n\treset(dst)\n\n\tfor rows.Next() {\n\n\t\titem, err := fetchResult(item_t, rows, columns)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tslicev = reflect.Append(slicev, reflect.Indirect(item))\n\t}\n\n\trows.Close()\n\n\tdstv.Elem().Set(slicev)\n\n\treturn nil\n}", "func ToTransmissionRecordModels(trs []TransmissionRecord) []models.TransmissionRecord {\n\tmodels := make([]models.TransmissionRecord, len(trs))\n\tfor i, tr := range trs {\n\t\tmodels[i] = ToTransmissionRecordModel(tr)\n\t}\n\treturn models\n}", "func (s *Service) Records(c context.Context, types []int64, mid, stime, etime int64, order, sort string, pn, ps int32) (res []*model.Record, total int32, err error) {\n\tvar midAts []int64\n\tif res, total, err = s.search.RecordPaginate(c, types, mid, stime, etime, order, sort, pn, ps); err != nil {\n\t\tlog.Error(\"s.search.RecordPaginate(%d,%d,%d,%d,%s,%s) error(%v)\", mid, sort, pn, ps, stime, etime, err)\n\t\treturn\n\t}\n\tif res == nil {\n\t\tres = _emptyRecords\n\t\treturn\n\t}\n\tfor _, r := range res {\n\t\tr.Message = template.HTMLEscapeString(r.Message)\n\t\tif len(r.Ats) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar ats []int64\n\t\tif ats, err = xstr.SplitInts(r.Ats); err != nil {\n\t\t\tlog.Error(\"xstr.SplitInts(%s) error(%v)\", r.Ats, err)\n\t\t\terr = nil\n\t\t}\n\t\tmidAts = append(midAts, ats...)\n\t}\n\tif len(midAts) == 0 {\n\t\treturn\n\t}\n\taccMap, _ := s.getAccInfo(c, midAts)\n\tfor _, r := range res {\n\t\tr.FillAts(accMap)\n\t}\n\treturn\n}", "func (sq *testQueryService) addGeneratedRows(from, to int) {\n\tvar rows [][]sqltypes.Value\n\t// ksids has keyspace ids which are covered by the shard key ranges -40 and 40-80.\n\tksids := []uint64{0x2000000000000000, 0x6000000000000000}\n\n\tfor id := from; id < to; id++ {\n\t\t// Only return the rows which are covered by this shard.\n\t\tshardIndex := id % 2\n\t\tif sq.shardCount == 1 || shardIndex == sq.shardIndex {\n\t\t\tidValue := sqltypes.NewInt64(int64(id))\n\n\t\t\trow := []sqltypes.Value{\n\t\t\t\tidValue,\n\t\t\t\tsqltypes.NewVarBinary(fmt.Sprintf(\"Text for %v\", id)),\n\t\t\t}\n\t\t\tif !sq.omitKeyspaceID {\n\t\t\t\trow = append(row, sqltypes.NewVarBinary(fmt.Sprintf(\"%v\", ksids[shardIndex])))\n\t\t\t}\n\t\t\trows = append(rows, row)\n\t\t}\n\t}\n\n\tif sq.rows == nil {\n\t\tsq.rows = rows\n\t} else {\n\t\tsq.rows = append(sq.rows, rows...)\n\t}\n}", "func (board Board)Records()(interface{}) {\n if board.Id < 1 {\n return []bool{}\n }\n board_head := BoardHead{}\n Db.Where(\"id = ?\", board.BoardHeadId).First(&board_head)\n if board_head.BoardType == \"sample\" {\n records := []Sample{}\n Db.Where(\"board_id = ?\", board.Id).Find(&records)\n return records\n }\n if board_head.BoardType == \"primer\" {\n records := []Primer{}\n Db.Where(\"board_id = ?\", board.Id).Find(&records)\n return records\n } else {\n rows, _ := Db.Table(\"reactions\").Select(\"reactions.id, reactions.hole, samples.name, primers.name\").Joins(\"INNER JOIN samples ON samples.id = reactions.sample_id INNER JOIN primers ON primers.id = reactions.primer_id\").Where(\"reactions.board_id = ?\", board.Id).Rows()\n result := []map[string]interface{}{}\n for rows.Next() {\n var id int\n var hole, sample, primer string\n rows.Scan(&id, &hole, &sample, &primer)\n d := map[string]interface{}{\n \"id\": id,\n \"sample\": sample,\n \"primer\": primer,\n \"hole\": hole,\n }\n result = append(result, d)\n }\n return result\n }\n}", "func (s *SqliteServer) ReadRecords(hashes []gdp.Hash) ([]gdp.Record, error) {\n\tif len(hashes) == 0 {\n\t\treturn nil, nil\n\t}\n\n\thexHashes := make([]string, 0, len(hashes))\n\tfor _, hash := range hashes {\n\t\thexHashes = append(hexHashes, fmt.Sprintf(\"\\\"%X\\\"\", hash))\n\t}\n\n\tqueryString := fmt.Sprintf(\n\t\t\"SELECT hash, recno, timestamp, accuracy, prevhash, value, sig FROM log_entry WHERE hex(hash) IN (%s)\",\n\t\tstrings.Join(hexHashes, \",\"),\n\t)\n\trows, err := s.db.Query(queryString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trecords, err := parseRecordRows(rows)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func (b *RecordBuffer) Records() []interface{} {\n\treturn b.recordsInBuffer\n}", "func GetRows(currency string) (*sql.Rows, error) {\n\tif !common.ValidateCurrency(currency) {\n\t\treturn nil, errors.New(\"invalid currency\")\n\t}\n\t// TODO: implement date range windowing\n\treturn db.Queryx(fmt.Sprintf(\"SELECT * FROM %s\", currency))\n}", "func (f *fragment) rows(start uint64, filters ...rowFilter) []uint64 {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn f.unprotectedRows(start, filters...)\n}", "func RowBatchToVizierRowBatch(rb *schemapb.RowBatchData, tableID string) (*vizierpb.RowBatchData, error) {\n\tcols := make([]*vizierpb.Column, len(rb.Cols))\n\tfor i, col := range rb.Cols {\n\t\tc, err := colToVizierCol(col)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcols[i] = c\n\t}\n\n\treturn &vizierpb.RowBatchData{\n\t\tTableID: tableID,\n\t\tNumRows: rb.NumRows,\n\t\tEow: rb.Eow,\n\t\tEos: rb.Eos,\n\t\tCols: cols,\n\t}, nil\n}", "func selectRows(db *gorm.DB) ([]uint, error) {\n\n\tids := []uint{}\n\n\trows, err := db.Select(\"DISTINCT products_product.id,products_product.created_at\").Rows()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tid uint\n\t\tcreatedAt time.Time\n\t)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&id, &createdAt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tids = append(ids, id)\n\t}\n\n\treturn ids, nil\n\n}", "func readRows(db *sql.DB, query string, dataChan chan []sql.RawBytes, quitChan chan bool, goChan chan bool, csvHeader bool) {\n\trows, err := db.Query(query)\n\tdefer rows.Close()\n\tif err != nil {\n\t\tlog.Print(err)\n\t\tos.Exit(1)\n\t}\n\n\tcols, err := rows.Columns()\n\tcheckErr(err)\n\n\t// Write columns as a header line\n\tif csvHeader {\n\t\theaders := make([]sql.RawBytes, len(cols))\n\t\tfor i, col := range cols {\n\t\t\theaders[i] = []byte(col)\n\t\t}\n\t\tdataChan <- headers\n\t\t<-goChan\n\t}\n\n\t// Need to scan into empty interface since we don't know how many columns a query might return\n\tscanVals := make([]interface{}, len(cols))\n\tvals := make([]sql.RawBytes, len(cols))\n\tfor i := range vals {\n\t\tscanVals[i] = &vals[i]\n\t}\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(scanVals...)\n\t\tcheckErr(err)\n\n\t\tdataChan <- vals\n\n\t\t// Block and wait for writeRows() to signal back it has consumed the data\n\t\t// This is necessary because sql.RawBytes is a memory pointer and when rows.Next()\n\t\t// loops and change the memory address before writeRows can properly process the values\n\t\t<-goChan\n\t}\n\n\terr = rows.Err()\n\tcheckErr(err)\n\n\tclose(dataChan)\n\tquitChan <- true\n}", "func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}", "func ExtractRecords() (records []record) {\n // Open the input file and create a scanner to parse it\n file, _ := os.Open(\"Input.txt\")\n\tscanner := bufio.NewScanner(file)\n\n // Var to store the various fields which represents a record\n var fields string\n\t\n\tfor scanner.Scan() {\n // If a blank row isn't scan, add the row to the record representation\n\t\tif scanner.Text() != \"\" {\n\t\t\tfields += scanner.Text() + \" \"\n\t\t} else {\n // If a blank row is scan, convert the record representation to an actual record and add it to the records slice\n\t\t\trecords = append(records, ConvertRecord(fields))\n\n // Then reset the representation\n\t\t\tfields = \"\"\n\t\t}\n\t}\n // Add the last record, which is lost due to the end of file\n\trecords = append(records, ConvertRecord(fields))\n\tfile.Close()\n\n return\n}", "func (record) MarshalRecordsToBuffer(records []common.Record, buffer []byte) error {\n\tif len(records)*recordLength > len(buffer) {\n\t\treturn fmt.Errorf(\"buffer %d is not big enough for records %d\", len(buffer), len(records)*recordLength)\n\t}\n\n\tfor i, r := range records {\n\t\tbuff := buffer[i*recordLength : (i+1)*recordLength]\n\n\t\tif !validation.ValidTraceID(r.ID) { // todo: remove this check. maybe have a max id size of 128 bits?\n\t\t\treturn errors.New(\"ids must be 128 bit\")\n\t\t}\n\n\t\tmarshalRecord(r, buff)\n\t}\n\n\treturn nil\n}", "func NewRecordset(rows [][]interface{}, fields []string, offset int) *Recordset {\n\treturn &Recordset{\n\t\trows: rows,\n\t\tfields: fields,\n\t\toffset: offset,\n\t}\n}", "func (t *JSONTable) PartitionRows(ctx *sql.Context, partition sql.Partition) (sql.RowIter, error) {\n\treturn t.b.Build(ctx, t, nil)\n}", "func (f *File) FromRows(rows *sql.Rows) error {\n\tvar scanf struct {\n\t\tID int\n\t\tCreateTime sql.NullTime\n\t\tUpdateTime sql.NullTime\n\t\tType sql.NullString\n\t\tName sql.NullString\n\t\tSize sql.NullInt64\n\t\tModifiedAt sql.NullTime\n\t\tUploadedAt sql.NullTime\n\t\tContentType sql.NullString\n\t\tStoreKey sql.NullString\n\t\tCategory sql.NullString\n\t}\n\t// the order here should be the same as in the `file.Columns`.\n\tif err := rows.Scan(\n\t\t&scanf.ID,\n\t\t&scanf.CreateTime,\n\t\t&scanf.UpdateTime,\n\t\t&scanf.Type,\n\t\t&scanf.Name,\n\t\t&scanf.Size,\n\t\t&scanf.ModifiedAt,\n\t\t&scanf.UploadedAt,\n\t\t&scanf.ContentType,\n\t\t&scanf.StoreKey,\n\t\t&scanf.Category,\n\t); err != nil {\n\t\treturn err\n\t}\n\tf.ID = strconv.Itoa(scanf.ID)\n\tf.CreateTime = scanf.CreateTime.Time\n\tf.UpdateTime = scanf.UpdateTime.Time\n\tf.Type = scanf.Type.String\n\tf.Name = scanf.Name.String\n\tf.Size = int(scanf.Size.Int64)\n\tf.ModifiedAt = scanf.ModifiedAt.Time\n\tf.UploadedAt = scanf.UploadedAt.Time\n\tf.ContentType = scanf.ContentType.String\n\tf.StoreKey = scanf.StoreKey.String\n\tf.Category = scanf.Category.String\n\treturn nil\n}", "func ReadRows(r Rows) Stream {\n c, _ := r.(io.Closer)\n return &rowStream{rows: r, maybeCloser: maybeCloser{c: c}}\n}", "func RowsFormater(rows *sql.Rows) {\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\tglog.Errorln(err)\n\t}\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader(cols)\n\tdata := make([][]string, 1)\n\tcount := 0\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i, _ := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\t// Create our map, and retrieve the value for each column from the pointers slice,\n\t\t// storing it in the map with the name of the column as the key.\n\t\trow := make([]string, 0)\n\t\tfor i, _ := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\trow = append(row, interface2String(*val))\n\t\t}\n\n\t\tdata = append(data, row)\n\t\tcount = count + 1\n\t}\n\tfor _, v := range data {\n\t\ttable.Append(v)\n\t}\n\ttable.Render()\n\tif count > 0 {\n\t\tfmt.Printf(\"(%d rows of records)\\n\", count)\n\t}\n}", "func (r record) MarshalRecords(records []common.Record) ([]byte, error) {\n\trecordBytes := make([]byte, len(records)*recordLength)\n\n\terr := r.MarshalRecordsToBuffer(records, recordBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn recordBytes, nil\n}", "func (r RecordV1) toRecord() Record {\n\treturn Record{\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAppliedAt: r.AppliedAt,\n\t}\n}", "func GetAllRecords(db *sql.DB, id int) ([]Record, error) {\n\trows, err := db.Query(`SELECT * FROM user_records WHERE user_id = $1`, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar records []Record\n\tfor rows.Next() {\n\t\tvar record Record\n\t\terr := rows.Scan(\n\t\t\t&record.ID,\n\t\t\t&record.Weight,\n\t\t\t&record.Reps,\n\t\t\t&record.RPE,\n\t\t\t&record.DatePerformed,\n\t\t\t&record.ExerciseID,\n\t\t\t&record.UserID,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\n\treturn records, nil\n}", "func (s *Store) SortedRecords() []Record {\n\trecords := make([]Record, 0, s.sorted.Len())\n\tfor e := s.sorted.Front(); e != nil; e = e.Next() {\n\t\trecord := e.Value.(txRecord)\n\t\trecords = append(records, record.record(s))\n\t}\n\treturn records\n}", "func (sink *influxdbSink) parseRawQueryRow(rawRow influx_models.Row) ([]core.TimestampedMetricValue, error) {\n\tvals := make([]core.TimestampedMetricValue, len(rawRow.Values))\n\twasInt := make(map[string]bool, 1)\n\tfor i, rawVal := range rawRow.Values {\n\t\tval := core.TimestampedMetricValue{}\n\n\t\tif ts, err := time.Parse(time.RFC3339, rawVal[0].(string)); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unable to parse timestamp %q in series %q\", rawVal[0].(string), rawRow.Name)\n\t\t} else {\n\t\t\tval.Timestamp = ts\n\t\t}\n\n\t\tif err := tryParseMetricValue(\"value\", rawVal, &val.MetricValue, 1, wasInt); err != nil {\n\t\t\tglog.Errorf(\"Unable to parse field \\\"value\\\" in series %q: %v\", rawRow.Name, err)\n\t\t\treturn nil, fmt.Errorf(\"Unable to parse values in series %q\", rawRow.Name)\n\t\t}\n\n\t\tvals[i] = val\n\t}\n\n\tif wasInt[\"value\"] {\n\t\tfor i := range vals {\n\t\t\tvals[i].MetricValue.ValueType = core.ValueInt64\n\t\t}\n\t} else {\n\t\tfor i := range vals {\n\t\t\tvals[i].MetricValue.ValueType = core.ValueFloat\n\t\t}\n\t}\n\n\treturn vals, nil\n}", "func QueryReturnRows(query string, db *sql.DB, arg ...interface{}) (bool, []string) {\n\trows, err := db.Query(query, arg...)\n\tCheck(err)\n\tdefer rows.Close()\n\n\tvar items []string\n\tfor rows.Next() {\n\t\tvar currentItem string\n\t\terr := rows.Scan(&currentItem)\n\t\tCheck(err)\n\n\t\titems = append(items, currentItem)\n\t}\n\n\tif len(items) < 1 {\n\t\treturn false, []string{}\n\t}\n\n\treturn true, items\n}", "func ToTestRecords(searchResp *frontend.SearchResponse, imgBaseURL string) []*TestRecord {\n\t// Group the results by test.\n\tretMap := map[types.TestName]*TestRecord{}\n\tfor _, oneDigest := range searchResp.Digests {\n\t\ttestNameVal := oneDigest.ParamSet[types.PRIMARY_KEY_FIELD]\n\t\tif len(testNameVal) == 0 {\n\t\t\tsklog.Errorf(\"Error: Digest '%s' has no primaryKey in paramset\", oneDigest.Digest)\n\t\t\tcontinue\n\t\t}\n\n\t\tdigestInfo := &DigestInfo{\n\t\t\tSRDigest: oneDigest,\n\t\t\tURL: DigestUrl(imgBaseURL, oneDigest.Digest),\n\t\t}\n\n\t\ttestName := types.TestName(oneDigest.ParamSet[types.PRIMARY_KEY_FIELD][0])\n\t\tif found, ok := retMap[testName]; ok {\n\t\t\tfound.Digests = append(found.Digests, digestInfo)\n\t\t} else {\n\t\t\tretMap[testName] = &TestRecord{\n\t\t\t\tTestName: testName,\n\t\t\t\tDigests: []*DigestInfo{digestInfo},\n\t\t\t}\n\t\t}\n\t}\n\n\t// Put the records into an array and return them.\n\tret := make([]*TestRecord, 0, len(retMap))\n\tfor _, oneTestRec := range retMap {\n\t\tret = append(ret, oneTestRec)\n\t}\n\n\treturn ret\n}", "func (f *fragment) rowFromStorage(rowID uint64) *Row {\n\t// Only use a subset of the containers.\n\t// NOTE: The start & end ranges must be divisible by container width.\n\t//\n\t// Note that OffsetRange now returns a new bitmap which uses frozen\n\t// containers which will use copy-on-write semantics. The actual bitmap\n\t// and Containers object are new and not shared, but the containers are\n\t// shared.\n\tdata := f.storage.OffsetRange(f.shard*ShardWidth, rowID*ShardWidth, (rowID+1)*ShardWidth)\n\n\trow := &Row{\n\t\tsegments: []rowSegment{{\n\t\t\tdata: data,\n\t\t\tshard: f.shard,\n\t\t\twritable: true,\n\t\t}},\n\t}\n\trow.invalidateCount()\n\n\treturn row\n}", "func (o GetSrvRecordResultOutput) Records() GetSrvRecordRecordArrayOutput {\n\treturn o.ApplyT(func(v GetSrvRecordResult) []GetSrvRecordRecord { return v.Records }).(GetSrvRecordRecordArrayOutput)\n}", "func NewIterFromRows(rows []KTV) Iter {\n\treturn &memIter{rows: rows, index: -1}\n}", "func (mp *inmemoryPart) InitFromRows(rows []rawRow) {\n\tif len(rows) == 0 {\n\t\tlogger.Panicf(\"BUG: Inmemory.InitFromRows must accept at least one row\")\n\t}\n\n\tmp.Reset()\n\trrm := getRawRowsMarshaler()\n\trrm.marshalToInmemoryPart(mp, rows)\n\tputRawRowsMarshaler(rrm)\n\tmp.creationTime = fasttime.UnixTimestamp()\n}", "func (swfs *SurveyWiFiScans) FromRows(rows *sql.Rows) error {\n\tfor rows.Next() {\n\t\tscanswfs := &SurveyWiFiScan{}\n\t\tif err := scanswfs.FromRows(rows); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*swfs = append(*swfs, scanswfs)\n\t}\n\treturn nil\n}", "func makeRecords(v interface{}, header Header) [][]string {\n\tval := reflect.ValueOf(v)\n\n\tsize := val.Len()\n\tout := make([][]string, size)\n\tfor i := 0; i < size; i++ {\n\t\trecord := makeRecord(val.Index(i).Interface(), header)\n\t\tout[i] = record\n\t}\n\treturn out\n}", "func (l *ImmutableTimestampedLog) LoadRecordsRaw(fromIdx, toIdx uint32, descending bool) ([][]byte, error) {\n\tif fromIdx > toIdx {\n\t\treturn nil, nil\n\t}\n\tret := make([][]byte, 0, toIdx-fromIdx+1)\n\tfromIdxInt := int(fromIdx)\n\ttoIdxInt := int(toIdx)\n\tif !descending {\n\t\tfor i := fromIdxInt; i <= toIdxInt; i++ {\n\t\t\tr, err := l.getRawRecordAtIndex(uint32(i))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tret = append(ret, r)\n\t\t}\n\t} else {\n\t\tfor i := toIdxInt; i >= fromIdxInt; i-- {\n\t\t\tr, err := l.getRawRecordAtIndex(uint32(i))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tret = append(ret, r)\n\t\t}\n\t}\n\treturn ret, nil\n}", "func RowToQueryResult(row *sql.Row, colDefines []database.Column) (QueryResult, error) {\n\tcols := database.Columns(colDefines).Names()\n\tcolumns := make([]interface{}, len(cols))\n\tcolumnPointers := make([]interface{}, len(cols))\n\tfor i := range columns {\n\t\tcolumnPointers[i] = &columns[i]\n\t}\n\t// Scan the result into the column pointers...\n\tif err := row.Scan(columnPointers...); err != nil {\n\t\treturn nil, err\n\t}\n\n\trowData := makeRowDataSet(colDefines)\n\tfor i, colName := range cols {\n\t\tval := columnPointers[i].(*interface{})\n\t\trowData[colName] = ColData{Data: val, DataType: rowData[colName].DataType}\n\t}\n\n\treturn QueryResult(rowData), nil\n}", "func (r rowsRes) Next(dest []driver.Value) error {\n\terr := r.my.ScanRow(r.row)\n\tif err != nil {\n\t\treturn errFilter(err)\n\t}\n\tfor i, col := range r.row {\n\t\tif col == nil {\n\t\t\tdest[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tswitch c := col.(type) {\n\t\tcase time.Time:\n\t\t\tdest[i] = c\n\t\t\tcontinue\n\t\tcase mysql.Timestamp:\n\t\t\tdest[i] = c.Time\n\t\t\tcontinue\n\t\tcase mysql.Date:\n\t\t\tdest[i] = c.Localtime()\n\t\t\tcontinue\n\t\t}\n\t\tv := reflect.ValueOf(col)\n\t\tswitch v.Kind() {\n\t\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t// this contains time.Duration to\n\t\t\tdest[i] = v.Int()\n\t\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\tu := v.Uint()\n\t\t\tif u > math.MaxInt64 {\n\t\t\t\tpanic(\"Value to large for int64 type\")\n\t\t\t}\n\t\t\tdest[i] = int64(u)\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tdest[i] = v.Float()\n\t\tcase reflect.Slice:\n\t\t\tif v.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t\tdest[i] = v.Interface().([]byte)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\tpanic(fmt.Sprint(\"Unknown type of column: \", v.Type()))\n\t\t}\n\t}\n\treturn nil\n}", "func queryRows(db *sql.DB) {\n\t// Set the command to execute\n\tvar sql = `\n\t\tselect id, first_name, last_name\n\t\tfrom ` + dbname + `.DemoTable;\n\t`\n\n\t// Get row results\n\tvar rows, rowErr = db.Query(sql)\n\tif rowErr != nil {\n\t\tfmt.Println(rowErr)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\t// Iterate over the rowset and output the demo table columns\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar fname string\n\t\tvar lname string\n\t\tif err := rows.Scan(&id, &fname, &lname); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tfmt.Printf(\"id: %d, fname: %s, lname: %s\\n\", id, fname, lname)\n\t}\n}", "func (r *Relay) convertMessagesToKafkaSinkRecords(messages []interface{}) ([]*records.KafkaSinkRecord, error) {\n\tsinkRecords := make([]*records.KafkaSinkRecord, 0)\n\n\tfor i, v := range messages {\n\t\trelayMessage, ok := v.(*types.RelayMessage)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unable to type assert incoming message as RelayMessage (index: %d)\", i)\n\t\t}\n\n\t\tif err := r.validateKafkaRelayMessage(relayMessage); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to validate kafka relay message (index: %d): %s\", i, err)\n\t\t}\n\n\t\tsinkRecords = append(sinkRecords, &records.KafkaSinkRecord{\n\t\t\tTopic: relayMessage.Value.Topic,\n\t\t\tKey: relayMessage.Value.Key,\n\t\t\tValue: relayMessage.Value.Value,\n\t\t\tTimestamp: time.Now().UTC().UnixNano(),\n\t\t\tOffset: relayMessage.Value.Offset,\n\t\t\tPartition: int32(relayMessage.Value.Partition),\n\t\t\tHeaders: convertKafkaHeaders(relayMessage.Value.Headers),\n\t\t})\n\t}\n\n\treturn sinkRecords, nil\n}", "func (e *LoadDataWorker) GetRows() [][]types.Datum {\n\treturn e.rows\n}", "func toArray(row []interface{}) ([]string, []bool) {\n\tstrRow := make([]string, len(row))\n\n\tquotes := make([]bool, len(row))\n\n\tfor idx, colVal := range row {\n\t\tif colVal == nil {\n\t\t\tlog.Warn(\"column[\", idx, \"] is null\")\n\t\t\tcontinue\n\t\t}\n\t\tswitch reflect.TypeOf(colVal).Kind() {\n\t\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\tstrRow[idx] = fmt.Sprintf(\"%d\", colVal)\n\t\t\tquotes[idx] = false\n\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tstrRow[idx] = fmt.Sprintf(\"%f\", colVal)\n\t\t\tquotes[idx] = false\n\n\t\tcase reflect.Slice:\n\t\t\tv := colVal.([]uint8)\n\t\t\tbuffer := bytes.NewBuffer([]byte(\"0x\"))\n\t\t\tfor i := 0; i < len(v); i++ {\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"%.2x\", v[i]))\n\t\t\t}\n\t\t\tstrRow[idx] = buffer.String()\n\n\t\tcase reflect.String:\n\t\t\tv := mysql.Escape(colVal.(string))\n\t\t\tstrRow[idx] = v\n\t\t\tquotes[idx] = true\n\t\t}\n\t}\n\n\treturn strRow, quotes\n}", "func getRecords(res *RecordsResp, qntString string) {\n\t//Setting the default value of the query status to false.\n\t//If the query succeeds, at the end, we cange this status to true.\n\tres.Status = false\n\n\tqnt, err := strconv.Atoi(qntString)\n\tif err != nil {\n\t\tlog.Printf(\"Function getRecords: Something went wrong when converting the quantity of records from string to int.\\n %v\\n\", err)\n\t\treturn\n\t}\n\t\n\t// Connecting to the database\n session, err := mgo.Dial(\"localhost\");\n if err != nil {\n \tlog.Printf(\"Function getRecords: Error when opening connection to database.\\n %v\\n\", err)\n \treturn\n }\n defer session.Close()\n \n // Querying the database\n conn := session.DB(DATABASE_NAME).C(RECORDS_COLLECTION)\n if err := conn.Find(nil).Limit(qnt).All(&res.Records); err != nil {\n \tlog.Printf(\"Function getRecords: Error when querying database.\\n %v\\n\", err)\n \treturn\n }\n \n // Getting the User Data\n conn = session.DB(DATABASE_NAME).C(USERS_COLLECTION)\n for i, _ := range res.Records {\n \tif err := conn.FindId(res.Records[i].UserId).One(&res.Records[i].UserData); err != nil {\n \t\tlog.Printf(\"Function getRecords: Error when getting user data\\n %v\\n\", err)\n \t\treturn\n \t}\n }\n \n //Query succeeded\n res.Status = true\n}", "func TransactionArrayMarshalling(rows *sqlx.Rows) []byte {\n\tvar t types.TransactionPayload\n\tvar txs []byte\n\n\tfor rows.Next() {\n\t\ttx := types.Transaction{}\n\t\terr := rows.StructScan(&tx)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Unable to retrieve rows: \" + err.Error())\n\t\t\treturn nil\n\t\t}\n\t\tt.Payload = append(t.Payload, tx)\n\t\tserializedPayload, err := json.Marshal(t.Payload)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Unable to serialize payload: \" + err.Error())\n\t\t}\n\t\ttxs = serializedPayload\n\t}\n\tif err := rows.Close(); err != nil {\n\t\tlogger.Warn(\"Unable to close row connection: \" + err.Error())\n\t}\n\n\treturn txs\n}" ]
[ "0.6726777", "0.67042124", "0.62163883", "0.6109386", "0.6026409", "0.59923404", "0.58899695", "0.58857733", "0.5779613", "0.5778647", "0.5747893", "0.56930035", "0.56913865", "0.56230724", "0.5609854", "0.55732936", "0.5567313", "0.5530029", "0.551342", "0.5497782", "0.545771", "0.5378807", "0.5364863", "0.53523624", "0.53421175", "0.53201264", "0.5316915", "0.53010964", "0.53002036", "0.52639854", "0.52572703", "0.52417034", "0.51732534", "0.5162114", "0.5157685", "0.5157179", "0.51047903", "0.51037806", "0.51002896", "0.5094491", "0.5087671", "0.50853723", "0.5080812", "0.50781035", "0.5074195", "0.5074055", "0.5068736", "0.50538856", "0.5050229", "0.5048929", "0.50475025", "0.5039514", "0.5023499", "0.50179094", "0.500229", "0.4999025", "0.49928015", "0.49907756", "0.498282", "0.49794254", "0.49781203", "0.49693242", "0.49626142", "0.49580267", "0.49336803", "0.49315596", "0.4931276", "0.49109823", "0.49061212", "0.49049345", "0.49030268", "0.48838064", "0.48771086", "0.48680255", "0.4863948", "0.48585615", "0.48582536", "0.48531842", "0.48521975", "0.48473343", "0.4846733", "0.48464224", "0.4841058", "0.48286074", "0.48098356", "0.48066762", "0.4783066", "0.47803703", "0.47787476", "0.4776244", "0.47708806", "0.47680292", "0.47670737", "0.47660163", "0.47640884", "0.47627366", "0.4759317", "0.4747613", "0.47426212", "0.47393155" ]
0.8269437
0
Read a single key
func (s *sqlStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) { options := store.ReadOptions{ Order: store.OrderAsc, } for _, o := range opts { o(&options) } db, queries, err := s.db(options.Database, options.Table) if err != nil { return nil, err } // read one record if !options.Prefix && !options.Suffix { row := db.QueryRow(s.options.Context, queries.ReadOne, key) record, err := s.rowToRecord(row) if err != nil { return nil, err } return []*store.Record{record}, nil } // read by pattern pattern := "%" if options.Prefix { pattern = key + pattern } if options.Suffix { pattern = pattern + key } var rows pgx.Rows if options.Limit > 0 { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset) } else { rows, err = db.Query(s.options.Context, queries.ListDescLimit, pattern, options.Limit, options.Offset) } } else { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAsc, pattern) } else { rows, err = db.Query(s.options.Context, queries.ListDesc, pattern) } } if err != nil { if err == pgx.ErrNoRows { return nil, nil } return nil, err } defer rows.Close() return s.rowsToRecords(rows) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func readKey(key string, path string) (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(path, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func ReadKey(keypath string) string {\n\tkey, err := ioutil.ReadFile(keypath)\n\tif err != nil {\n\t\tLog(fmt.Sprintf(\"create: Could not read %s:\", keypath), \"info\")\n\t\tos.Exit(1)\n\t}\n\tkeyString := string(key)\n\treturn keyString\n}", "func ReadKey(r io.Reader) ([]byte, error) {\n\tbr := bufio.NewReader(io.LimitReader(r, 100))\n\tline, err := br.ReadString('\\n')\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\tif err == nil {\n\t\t// Check that we're at EOF.\n\t\t_, err = br.ReadByte()\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t} else if err == nil {\n\t\t\terr = fmt.Errorf(\"file contains more than one line\")\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tline = strings.TrimSuffix(line, \"\\n\")\n\treturn DecodeKey(line)\n}", "func ReadKey(key string) (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(MountPath, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func (p *EtcdClientV3) Read(key string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), config.PersistentStoreTimeout)\n\tresp, err := p.clientV3.Get(ctx, key)\n\tcancel()\n\tif err != nil {\n\t\t//TODO: Change for the later versions of etcd\n\t\tif err == ErrKeyNotFound {\n\t\t\treturn \"\", NewPersistentStoreError(KeyNotFoundErr, key)\n\t\t}\n\t\treturn \"\", err\n\t}\n\tif len(resp.Kvs) > 1 {\n\t\treturn \"\", fmt.Errorf(\"too many keys were returned\")\n\t}\n\t// TODO: Validate for the later versions of etcd\n\t// This version of etcd doesn't return any error for nonexistent keys\n\tif len(resp.Kvs) == 0 {\n\t\treturn \"\", NewPersistentStoreError(KeyNotFoundErr, key)\n\t}\n\treturn string(resp.Kvs[0].Value[:]), nil\n}", "func readKey(key string) (string, error) {\n\tvar env Config\n\tif err := envconfig.Process(\"\", &env); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err := ioutil.ReadFile(filepath.Join(env.NSXSecretPath, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func (k *Keyring) Read(ctx context.Context) (*string, error) {\n\tk.mu.Lock()\n\tdefer k.mu.Unlock()\n\n\tval, err := keyring.Get(service, k.key)\n\t// Make this more idiomatic\n\tif err == keyring.ErrNotFound {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not read from keyring\")\n\t}\n\treturn &val, nil\n}", "func (i *API) Read(kind, key string) ([]byte, error) {\n\treturn i.primaryStore.RawRead(kind, key)\n}", "func readKey(db *bolt.DB, name string) ([]byte, error) {\n\tkey := make([]byte, 32)\n\terr := db.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"settings\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// return key if exists\n\t\tk := b.Get([]byte(name))\n\t\tif k != nil {\n\t\t\tcopy(key, k)\n\t\t\treturn nil\n\t\t}\n\t\t// if key not found, generate one\n\t\t_, err = rand.Read(key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn b.Put([]byte(name), key)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}", "func (k *Key) Read() error {\n\tpath, err := k.path()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get ssh key path: %v\", err)\n\t}\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to load ssh key from %q: %v\", path, err)\n\t}\n\tdefer f.Close()\n\n\tbs, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read ssh key: %v\", err)\n\t}\n\tsigner, err := ssh.ParsePrivateKey(bs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse ssh key: %v\", err)\n\t}\n\tk.Signer = signer\n\treturn nil\n}", "func (fs *FileStore) Get(key string, r io.ReaderFrom) error {\n\tkey = fs.mangleKey(key, false)\n\tf, err := os.Open(filepath.Join(fs.baseDir, key))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn ErrUnknownKey\n\t\t}\n\t\treturn fmt.Errorf(\"error opening key file: %w\", err)\n\t}\n\t_, err = r.ReadFrom(f)\n\tf.Close()\n\treturn err\n}", "func (ks *keystore) ReadKey(key string) (string, error) {\n\treturn ks.getSecret(\n\t\tks.createSecretInput(key),\n\t)\n}", "func (s *Mem) Read(key interface{}) ([]byte, error) {\n\ts.lock.RLock()\n\tdefer s.lock.RUnlock()\n\n\tif exists, err := s.Exists(key); err != nil {\n\t\treturn nil, err\n\t} else if !exists {\n\t\treturn nil, fmt.Errorf(\"not found %v\", key)\n\t}\n\tv := s.store[s.Key(key)]\n\treturn v, nil\n}", "func (r *Client) Get(key string) (string, error) { return r.Getd(key, \"\") }", "func (f *FileStore) Read(key string) ([]byte, error) {\n\tif err := ValidateKey(key); err != nil {\n\t\treturn nil, err\n\t}\n\tbytes, err := f.filesystem.ReadFile(f.getPathByKey(key))\n\tif os.IsNotExist(err) {\n\t\treturn bytes, ErrKeyNotFound\n\t}\n\treturn bytes, err\n}", "func (c *CacheTable) Read(key interface{}) {\n\n}", "func (c *ConsulStore) Read(root string, key Key, tag string) ([]byte, error) {\n\n\t//Convert to string as Consul only supports string based keys\n\tk := key.String()\n\tif k == \"\" {\n\t\treturn nil, pkgerrors.New(\"Key.String() returned an empty string\")\n\t}\n\n\tk = root + \"/\" + k + \"/\" + tag\n\tpair, _, err := c.client.Get(k, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pair == nil {\n\t\treturn nil, nil\n\t}\n\treturn pair.Value, nil\n}", "func (r *Reader) Get(key []byte) ([]byte, error) {\n\tval, err := r.db.ReadTransact(func(tr fdb.ReadTransaction) (interface{}, error) {\n\t\treturn tr.Get(r.store.formatKey(key)).Get()\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn val.([]byte), nil\n}", "func (k *svKexMap) read(kexRequest string) *auth.Kex {\n\tk.rlock()\n\tdefer k.runlock()\n\tif kex, ok := k.KMap[kexRequest]; ok {\n\t\treturn &kex\n\t}\n\treturn nil\n}", "func (s *KeyStore) Get(name string) ([]byte, error) {\n\tif !s.keyExists(name) {\n\t\treturn nil, errors.Errorf(\"key store: get: unknown key '%s'\", name)\n\t}\n\treturn ioutil.ReadFile(s.keyPath(name))\n}", "func (f FileDAO) Read(name, password string) (KeyInfo, error) {\n\tfilename, err := f.filename(name)\n\tif err != nil {\n\t\treturn KeyInfo{}, err\n\t}\n\n\tif len(password) == 0 {\n\t\treturn KeyInfo{}, fmt.Errorf(\"no password\")\n\t}\n\n\tbytes, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn KeyInfo{}, errors.Wrap(err, \"not found\")\n\t}\n\n\tpayload, _, err := jose.Decode(string(bytes), password)\n\tif err != nil {\n\t\treturn KeyInfo{}, err\n\t}\n\n\tvar decoded keyring.Item\n\terr = json.Unmarshal([]byte(payload), &decoded)\n\tif err != nil {\n\t\treturn KeyInfo{}, err\n\t}\n\n\tinfo, err := unmarshalInfo(decoded.Data)\n\tif err != nil {\n\t\treturn KeyInfo{}, err\n\t}\n\n\ti, ok := info.(localInfo)\n\tif !ok {\n\t\treturn KeyInfo{}, fmt.Errorf(\"only support type KeyInfo\")\n\t}\n\n\treturn KeyInfo{\n\t\tName: i.Name,\n\t\tPubKey: cryptoamino.MarshalPubkey(i.PubKey),\n\t\tPrivKeyArmor: i.PrivKeyArmor,\n\t\tAlgo: string(i.Algo),\n\t}, nil\n}", "func read(arg string) int {\n\t// we do not consume the key, but in real life the key will be consumed to get the value\n\t// from DB or a filesystem etc.\n\t// We simply return a random number between 0 and 100 (excluded 100).\n\treturn rand.Intn(100)\n}", "func (c *Configr) Read(key string) (interface{}, error) {\n\treturn c.Get(key)\n}", "func (s HTTPStore) GetKey(role data.RoleName) ([]byte, error) {\n\turl, err := s.buildKeyURL(role)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := http.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := s.roundTrip.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, NetworkError{Wrapped: err}\n\t}\n\tdefer resp.Body.Close()\n\tif err := translateStatusToError(resp, role.String()+\" key\"); err != nil {\n\t\treturn nil, err\n\t}\n\tb := io.LimitReader(resp.Body, MaxKeySize)\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}", "func (esc *ExtendedSimpleContract) Read(ctx utils.CustomTransactionContextInterface, key string) (string, error) {\n\texisting := ctx.GetCallData()\n\n\tif existing == nil {\n\t\treturn \"\", fmt.Errorf(\"Cannot read world state pair with key %s. Does not exist\", key)\n\t}\n\n\treturn string(existing), nil\n}", "func (kvs *FS) Get(key string) ([]byte, error) {\n\tdata, err := lockedfile.Read(kvs.filename(key))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrNoSuchKey, err.Error())\n\t}\n\treturn data, nil\n}", "func (c *ConsulDB) ReadEntry(key string) (string, bool, error) {\n\n\tkv := c.consulClient.KV()\n\n\tpair, _, err := kv.Get(key, nil)\n\n\tif pair == nil {\n\t\treturn string(\"No value found for ID: \" + key), false, err\n\t}\n\treturn string(pair.Value), true, err\n}", "func (c *Context) Read(key string) (interface{}, bool) {\n\tvalue, ok := c.Data[key]\n\treturn value, ok\n}", "func (r *Reader) Key() []byte {\n\treturn r.key\n}", "func (s *Arena) getKey(offset uint32, size uint16) []byte {\n\treturn s.data[offset : offset+uint32(size)]\n}", "func readKey(uiEvents <-chan ui.Event) string {\n\tfor {\n\t\te := <-uiEvents\n\t\tif e.Type == ui.KeyboardEvent || e.Type == ui.MouseEvent {\n\t\t\treturn e.ID\n\t\t}\n\t}\n}", "func (c *cache) read(key string) interface{} {\n\treturn c.Fields[key]\n}", "func (c *Client) Get(key paxi.Key) (paxi.Value, error) {\n\tc.HTTPClient.CID++\n\tif *readLeader {\n\t\treturn c.readLeader(key)\n\t} else if *readQuorum {\n\t\treturn c.readQuorum(key)\n\t} else {\n\t\treturn c.HTTPClient.Get(key)\n\t}\n}", "func (ms *MemStore) Get(key string, r io.ReaderFrom) error {\n\td := ms.get(key)\n\tif d == nil {\n\t\treturn ErrUnknownKey\n\t}\n\t_, err := r.ReadFrom(&d)\n\treturn err\n}", "func (db *FlatDatabase) Get(key []byte) ([]byte, error) { panic(\"not supported\") }", "func getKey() (string, error) {\n\tvar b [KeyLen]byte\n\t_, err := rand.Read(b[:])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(b[:]), nil\n}", "func (c *Client) Get(key string) (string, error) {\n\treturn c.rdc.Get(c.prefix + key).Result()\n}", "func (kvStore *KvStore) Read(key string) (value []byte, flag int, err error) {\n\t// We need something more elegant than this\n\tif len(kvStore.files) <= 0 {\n\t\tlog.Fatal(\"No files\")\n\t}\n\t// Need some locking around here when we introduce file purging\n\tfor i := len(kvStore.files) - 1; i >= 0; i-- {\n\t\tvalue, flag, err = kvStore.files[i].Read(key)\n\t\tif flag == gklogfile.KeyDeleted || flag == gklogfile.KeyWritten {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func Read(stub *shim.ChaincodeStub, args []string) ([]byte, error) {\r\n\tvar key, jsonResp string\r\n\tvar err error\r\n\t\r\n\tif len(args) != 1 {\r\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the key to query\")\r\n\t}\r\n\r\n\tkey = args[0]\r\n\tvalAsbytes, err := stub.GetState(key)\r\n\t\r\n\tif err != nil {\r\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + key + \"\\\"}\"\r\n\t\treturn []byte(jsonResp), err\r\n\t}\r\n\t\r\n\tif valAsbytes == nil {\r\n\t\treturn []byte(\"Did not find entry for key: \" + key), nil\r\n\t}\r\n\treturn valAsbytes, nil\r\n}", "func (c *Client) Get(key string) (string, error) {\n\treturn c.GetKV(strings.ToLower(key))\n}", "func (gosc *gcpObjectStorageCache) Read(ctx context.Context, bucketName string, key string) (interface{}, error) {\n\tif ctx != nil {\n\t\tgosc.client = getObjectStorageClient(ctx)\n\t} else {\n\t\tctx = context.Background()\n\t}\n\n\tbkt := gosc.client.Bucket(bucketName)\n\tobj := bkt.Object(key)\n\n\t//TODO: should list the bucket first, see:\n\t// https://godoc.org/cloud.google.com/go/storage\n\tr, err := obj.NewReader(ctx)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getting object error %v\", err)\n\t}\n\n\tdefer r.Close()\n\n\tbuf := new(strings.Builder)\n\n\tif _, copyErr := io.Copy(buf, r); copyErr != nil {\n\t\treturn \"\", fmt.Errorf(\"error copying to stdout %v\", copyErr)\n\t}\n\n\tkeyString := buf.String()\n\tfmt.Printf(\"Found value %s\\n\", keyString)\n\treturn keyString, nil\n}", "func (c *Client) Read(k uint64) (float64, error) {\n\tc.m.RLock()\n\tdefer c.m.RUnlock()\n\n\tv, ok := c.d[k]\n\tif !ok {\n\t\treturn 0, ErrMissing\n\t}\n\n\treturn v, nil\n}", "func (t *SimpleChaincode) read(stub *shim.ChaincodeStub, args []string) ([]byte, error) {\n\tvar key, jsonResp string\n\tvar err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the key to query\")\n\t}\n\n\tkey = args[0]\n\tvalAsbytes, err := stub.GetState(key)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + key + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\treturn valAsbytes, nil\n}", "func getHKDFKey(hkdf io.Reader, length int) ([]byte, error) {\n\tkey := make([]byte, length)\n\tn, err := io.ReadFull(hkdf, key)\n\tif n != len(key) || err != nil {\n\t\treturn key, err\n\t}\n\n\treturn key, nil\n}", "func (c Repository) GetKey(key string) string {\n\tval, err := c.Client.Get(key).Result()\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn val\n}", "func (s *Storage) Get(key []byte) ([]byte, error) {\n\topts := gorocksdb.NewDefaultReadOptions()\n\tdefer opts.Destroy()\n\n\treturn s.db.GetBytes(opts, key)\n}", "func (t *SimpleChaincode) read(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar key, jsonResp string\n\tvar err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the key to query\")\n\t}\n\n\tkey = args[0]\n\tvalAsbytes, err := stub.GetState(key)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + key + \"\\\"}\"\n\n\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\treturn valAsbytes, nil\n}", "func (t *SimpleChaincode) read(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar key, jsonResp string\n\tvar err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the key to query\")\n\t}\n\n\tkey = args[0]\n\tvalAsbytes, err := stub.GetState(key)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + key + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\treturn valAsbytes, nil\n}", "func (t *SimpleChaincode) read(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar key, jsonResp string\n\tvar err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the key to query\")\n\t}\n\n\tkey = args[0]\n\tvalAsbytes, err := stub.GetState(key)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + key + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\treturn valAsbytes, nil\n}", "func (t *SimpleChaincode) read(stub *shim.ChaincodeStub, args []string) ([]byte, error) {\n\tvar jsonResp string\n\tvar err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the key to query\")\n\t}\n\n\tkey = args[0]\n\t\n\tfmt.Println(\"retrieving state for key: \" + key);\n\t\n\tvalAsbytes, err := stub.GetState(key)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + key + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n //valAsbytes = []byte(valAsbytes);\n\treturn valAsbytes, nil\n}", "func readSecret(key string) (string, error) {\n\tbasePath := \"/var/openfaas/secrets/\"\n\tif len(os.Getenv(\"secret_mount_path\")) > 0 {\n\t\tbasePath = os.Getenv(\"secret_mount_path\")\n\t}\n\n\treadPath := path.Join(basePath, key)\n\tsecretBytes, readErr := ioutil.ReadFile(readPath)\n\tif readErr != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to read secret: %s, error: %s\", readPath, readErr)\n\t}\n\tval := strings.TrimSpace(string(secretBytes))\n\treturn val, nil\n}", "func (v *Vault) Read(path, key string) ([]byte, error) {\n\tvar secret *vaultapi.Secret\n\tvar err error\n\n\tv.logger.WithFields(log.Fields{\"path\": path, \"key\": key}).\n\t\tInfof(\"Reading data from Vault path\")\n\n\tif secret, err = v.client.Logical().Read(path); err != nil {\n\t\treturn nil, err\n\t}\n\tif secret == nil || secret.Data == nil || len(secret.Data) == 0 {\n\t\treturn nil, fmt.Errorf(\"no data found on path '%s'\", path)\n\t}\n\n\treturn v.extractKey(secret.Data, key)\n}", "func (sto *RocksdbStorage) Get(key string) (val []byte, err error) {\n\tdb := sto.db\n\tro := sto.ro\n\tval, err = db.GetBytes(ro, []byte(key))\n\treturn val, err\n}", "func readPrivateKey() ([]byte, error) {\n\tprivateKey, e := ioutil.ReadFile(\"keys/sample-key\")\n\treturn privateKey, e\n}", "func (d *EtcdStateDriver) Read(key string) ([]byte, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ctxTimeout)\n\tdefer cancel()\n\n\tvar err error\n\tvar resp *client.GetResponse\n\tfor i := 0; i < maxEtcdRetries; i++ {\n\t\t// etcd3 uses quorum for reads by default\n\t\tresp, err = d.Client.KV.Get(ctx, key)\n\t\tlog.Infof(\"EtcdStateDriver_Read_Err:%+v\", err)\n\t\tif err != nil {\n\t\t\tif err.Error() == client.ErrNoAvailableEndpoints.Error() {\n\t\t\t\t// Retry after a delay\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif resp != nil && len(resp.Kvs) != 0 {\n\t\t\t\treturn []byte(resp.Kvs[0].Value), nil\n\t\t\t}\n\n\t\t\treturn []byte{}, fmt.Errorf(\"error reading from etcd\")\n\t\t}\n\n\t\tif resp.Count == 0 {\n\t\t\treturn []byte{}, core.Errorf(\"key not found\")\n\t\t}\n\n\n\n\t\treturn resp.Kvs[0].Value, err\n\t}\n\n\treturn []byte{}, err\n}", "func (r *RedisStorage) Get(key string) string {\n\tresult, _ := r.Client.Get(key).Result()\n\treturn result\n}", "func Read(key string, storagename string) ([]byte) {\n\tmu.Lock()\n\tstorage, _ :=gocask.NewGocask(\"images/\" + storagename)\n\tbuf, err := storage.Get(key)\n\tstorage.Close()\n\tmu.Unlock()\n\terrors.Check(err)\n\treturn buf\n}", "func (s *SlicingDice) getKey(keys map[string]string, endpointKeyLevel int) (string, error) {\n\tcurrentKeyLevel := s.getKeyLevel(keys)\n\tif currentKeyLevel == 2 {\n\t\tif len(keys[\"masterKey\"]) != 0 {\n\t\t\treturn keys[\"masterKey\"], nil\n\t\t} else if len(keys[\"customKey\"]) != 0 {\n\t\t\treturn keys[\"customKey\"], nil\n\t\t}\n\t} else if currentKeyLevel != endpointKeyLevel {\n\t\treturn \"\", errors.New(\"API key: This key don't have permission to peform this operation\")\n\t} else {\n\t\tif len(keys[\"writeKey\"]) != 0 {\n\t\t\treturn keys[\"writeKey\"], nil\n\t\t}\n\t\tif len(keys[\"readKey\"]) != 0 {\n\t\t\treturn keys[\"readKey\"], nil\n\t\t}\n\t}\n\treturn \"\", nil\n}", "func (k *K8s) Read(b []byte) (int, error) {\n\tif !k.ready {\n\t\tctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout)\n\t\tdefer cancel()\n\n\t\tsecret, err := k.client.CoreV1().Secrets(k.ns).Get(ctx, k.secret, metav1.GetOptions{})\n\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn 0, fmt.Errorf(\"failed to find secret %s in namespace %s: %v\", k.secret, k.ns, err)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"failed to read secret %s in namespace %s: %v\", k.secret, k.ns, err)\n\t\t}\n\n\t\tk.reader = bytes.NewBuffer(secret.Data[k.key])\n\t\tk.ready = true\n\t}\n\n\tn, err := k.reader.Read(b)\n\tif err != nil {\n\t\tk.ready = false\n\t}\n\n\treturn n, err\n}", "func readAPIKey() {\n\tapiKeyByte, err := ioutil.ReadFile(\"apikey.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tapiKey = string(apiKeyByte)\n}", "func (c *Client) GetKey(uuid string) (k Key, err error) {\n\topts := make(map[string]string)\n\n\treq := c.prepareRequest(\"GET\", fmt.Sprintf(\"keys/%s\", uuid), opts)\n\n\t//log.Printf(\"req: %#v\", req)\n\tresp, err := c.call(req)\n\tif err != nil {\n\t\terr = c.handleAPIResponsese(resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tk = Key{}\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\n\terr = json.Unmarshal(body, k)\n\t//log.Printf(\"json: %#v\\n\", p)\n\treturn\n}", "func (s *Session) read(key *Key, offset uint64, size uint64) (data unsafe.Pointer, dataSize uint64, err error) {\n\tatomic.AddUint64(&cReads, 1)\n\n\tio_attr := C.struct_dnet_io_attr{\n\t\tparent: key.id.id, id: key.id.id, _type: key.id._type,\n\t\toffset: C.uint64_t(offset), size: C.uint64_t(size),\n\t}\n\n\tvar cflags C.uint64_t\n\tvar errp C.int\n\tdata = C.dnet_read_data_wait(s.session, &key.id, &io_attr, cflags, &errp)\n\tdataSize = uint64(io_attr.size)\n\tif errp != 0 {\n\t\terr = Error(errp)\n\t}\n\treturn\n}", "func getKey() (string, error) {\n\tretries := 100\n\ti := 0\n\tb := make([]byte, 10)\n\trand.Read(b)\n\tfor i < retries {\n\t\tkey := fmt.Sprintf(\"%x\", md5.Sum(b))[:10]\n\t\tif exists := client.Exists(key).Val(); !exists {\n\t\t\treturn key, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"max retry limit reached\")\n}", "func ReadApiKey(input string) (string, error) {\n\tif (input == \"\") {\n\t\treturn \"\", errors.New(\"no api configuration file name, harvesting Internet Archive records only\")\n\t}\n\tdat, err := ioutil.ReadFile(input)\n\tif (err != nil) {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"unable to open api key file %s, harvesting Internet Archive records \" +\n\t\t\t\"only\", input))\n\t}\n\tkey := types.ApiKey{}\n\t_ = json.Unmarshal([]byte(dat), &key)\n\treturn key.Key, nil\n}", "func (service *service) getKey(key string) (string, error) {\n\tval, err := service.client.Get(key).Result()\n\tif err == redis.Nil {\n\t\treturn \"\", nil\n\t} else if err != nil {\n\t\treturn \"\", err\n\t}\n\treturn val, nil\n}", "func (c *Keystoreclient) Read(ctx context.Context, key string, typ proto.Message) (proto.Message, *pbd.ReadResponse, error) {\n\tif c.Fail {\n\t\treturn nil, nil, fmt.Errorf(\"Directed to fail\")\n\t}\n\tres, err := c.linker.Read(ctx, &pbd.ReadRequest{Key: key})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tproto.Unmarshal(res.GetPayload().GetValue(), typ)\n\treturn typ, res, nil\n}", "func getKey(config *viper.Viper, keyType string) ([]byte, error) {\n\tkey := config.GetString(keyType + \"Key\")\n\tif key != \"\" {\n\t\treturn []byte(key), nil\n\t}\n\tif config.GetString(keyType+\"KeyFile\") == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing %s key in the token config (%sKey or %sKeyFile)\", keyType, keyType, keyType)\n\t}\n\treturn ioutil.ReadFile(prepareFileName(config.GetString(keyType + \"KeyFile\")))\n}", "func (w *StandardClientWrapper) ReadSecret(path, key string) (interface{}, error) {\n\tsecret, err := w.Client.Logical().Read(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read secret: %v\", err)\n\t}\n\tif secret == nil || secret.Data == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Determine if this KV secret is version 1 or 2\n\t//\n\t// In version 1, the secret is stored directly under\n\t// secret[key].\n\t//\n\t// In version 2, the secret is stored\n\t// as secret[\"data\"][key]. There are also values\n\t// under secret[\"metadata\"] that have information\n\t// we can use to confirm the secret type, such as\n\t// secret[\"metadata\"][\"version\"]\n\t//\n\t// TODO(donald): Is there a better way to differentiate\n\t// between v1 and v2 secrets?\n\tif secret.Data[\"metadata\"] != nil && secret.Data[\"data\"] != nil {\n\t\tmd, mdok := secret.Data[\"metadata\"].(map[string]interface{})\n\t\tkv, kvok := secret.Data[\"data\"].(map[string]interface{})\n\t\tif !mdok || !kvok || md[\"version\"] == nil {\n\t\t\t// treat this as a v1 secret\n\t\t\treturn secret.Data[key], nil\n\t\t}\n\t\t// treat this as a v2 secret\n\t\treturn kv[key], nil\n\t}\n\n\treturn secret.Data[key], nil\n}", "func (m *RegistryKeyState) GetKey()(*string) {\n return m.key\n}", "func (ps *Store) Read(ctx context.Context, key datastore.Key, entity datastore.Entity) error {\n\tc := GetCon(ctx)\n\temd := entity.GetEntityMetadata()\n\titer := c.Query(getJSONSelect(emd.GetName(), emd.GetIDColumnName()), key).Iter()\n\tvar json string\n\tvalid := iter.Scan(&json)\n\tif !valid {\n\t\treturn common.NewError(datastore.EntityNotFound, fmt.Sprintf(\"%v not found with id = %v\", emd.GetName(), key))\n\t}\n\tdatastore.FromJSON(json, entity)\n\tif err := iter.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Get(key string) (string, error) {\n\treturn Cli.Get(Ctx, key).Result()\n}", "func (c *FakeClient) Read(secretName string) (map[string]interface{}, error) {\n\treturn c.data[secretName], nil\n}", "func GetKey(key string) (interface{}, error) {\n\tconnect := Connect()\n\treply, err := connect.Do(\"GET\", key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn reply, err\n}", "func (r *Client) Get(key string) (string, error) {\n\tvalue, err := r.client.Get(key).Result()\n\treturn value, err\n}", "func (p *PropertiesService) GetKey(file string, stanza string, key string) (*string, *http.Response, error) {\n\tapiError := &APIError{}\n\toutput := &Entry{}\n\tresp, err := p.client.New().Get(\n\t\tgetPropertiesUri(file, stanza, key)).ResponseDecoder(stringResponseDecoder{}).Receive(output, apiError)\n\tif err != nil || !apiError.Empty() {\n\t\treturn nil, resp, relevantError(err, apiError)\n\t}\n\treturn &output.Value, resp, relevantError(err, apiError)\n}", "func (tb *tableManager) read(keyIn uint64) (*Block, error) {\n\tentry, err := tb.getEntry(keyIn)\n\tif err != nil {\n\t\tlog.Println(\"Could not obtain entry.\")\n\t\treturn nil, errors.New(\"Could not obtain entry.\")\n\t}\n\tif entry.flags&flagRemove != 0 {\n\t\t// dataBase should be able to tell if a dirtyKey is marked\n\t\t// for removal so it can write it as removed in log.\n\t\treturn nil, nil\n\t}\n\ttb.updateLRUCacheHead(entry)\n\treturn entry.block, nil\n}", "func (c *Client) Get(key string) (string, error) {\n\titem, err := c.client.Get(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(item.Value), nil\n}", "func (s *Store) Get(_ context.Context, key string) ([]byte, error) {\n\tbits, err := os.ReadFile(s.keyPath(key))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr = blob.KeyNotFound(key)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"key %q: %w\", key, err)\n\t}\n\treturn bits, nil\n}", "func (c *CycleState) Read(key StateKey) (StateData, error) {\n\tif v, ok := c.storage[key]; ok {\n\t\tif v != nil {\n\t\t\treturn v, nil\n\t\t}\n\n\t\treturn nil, nil\n\t}\n\treturn nil, errors.New(NotFound)\n}", "func (s *Session) Read(key *Key, offset uint64, size uint64) (b []byte, err error) {\n\t// TODO use reflect.SliceHeader and manage data ourselves?\n\tdata, dataSize, err := s.read(key, offset, size)\n\tif data == nil {\n\t\treturn\n\t}\n\tdefer C.free(data)\n\n\tb = C.GoBytes(unsafe.Pointer(uintptr(data)+readOffset), C.int(dataSize)-C.int(readOffset))\n\treturn\n}", "func (l *localLinker) Read(ctx context.Context, req *pbd.ReadRequest) (*pbd.ReadResponse, error) {\n\tif val, ok := l.store[req.Key]; ok {\n\t\treturn &pbd.ReadResponse{Payload: val}, nil\n\t}\n\treturn nil, status.Error(codes.InvalidArgument, fmt.Sprintf(\"Unable to locate %v\", req.Key))\n}", "func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {\n\tch, err := t.open(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer ch.Release()\n\treturn ch.Value().(*table.Reader).FindKey(key, true, ro)\n}", "func (db *MemoryStorage) Get(key []byte) ([]byte, error) {\n\tif entry, ok := db.data.Load(common.BytesToHex(key)); ok {\n\t\treturn entry.([]byte), nil\n\t}\n\treturn nil, ErrKeyNotFound\n}", "func (lsm *lsm) Read(key string, ts uint64) (*Entry, error) {\n\tfor _, level := range lsm.levels {\n\t\tentry, err := level.Find(key, ts)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase *ErrKeyNotFound:\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\treturn entry, nil\n\t\t}\n\t}\n\treturn nil, newErrKeyNotFound()\n}", "func ReadPGPKey(path string) openpgp.EntityList {\n\t//Read path into file\n\tkeypath, err := os.Open(path)\n\tif err != nil {\n\t\tfmt.Println(\"Reading file failed.\")\n\t\tlog.Fatal(err)\n\t}\n\n\t//Read armored public key into Entity\n\tentityList, err := openpgp.ReadArmoredKeyRing(keypath)\n\tif err != nil {\n\t\tfmt.Println(\"Reading armored key failed.\")\n\t\tlog.Fatal(err)\n\t}\n\treturn entityList\n}", "func (db *Database) Get(key []byte) ([]byte, error) {\n\tdb.lock.RLock()\n\tdefer db.lock.RUnlock()\n\n\tswitch {\n\tcase db.db == nil:\n\t\treturn nil, database.ErrClosed\n\tcase db.corrupted():\n\t\treturn nil, database.ErrAvoidCorruption\n\t}\n\n\tvalue, err := db.db.GetBytes(db.readOptions, key)\n\tif err != nil {\n\t\tatomic.StoreUint64(&db.errored, 1)\n\t\treturn nil, err\n\t}\n\tif value != nil {\n\t\treturn value, nil\n\t}\n\treturn nil, database.ErrNotFound\n}", "func (db *Database) Get(key string) ([]byte, error) {\n\tvar data []byte\n\n\tif db == nil || db.conn == nil {\n\t\treturn data, hord.ErrNoDial\n\t}\n\n\tif err := hord.ValidKey(key); err != nil {\n\t\treturn data, err\n\t}\n\n\terr := db.conn.Query(`SELECT data FROM hord WHERE key = ?;`, key).Scan(&data)\n\tif err != nil && err != gocql.ErrNotFound {\n\t\treturn data, err\n\t}\n\tif err == gocql.ErrNotFound {\n\t\treturn data, hord.ErrNil\n\t}\n\n\treturn data, nil\n}", "func (ck *Clerk) Get(key string) string {\n\treturn ck.RunCall(\"RaftKV.Get\",\n\t\tfunc(callerId CallerId) interface{} {\n\t\t\targs := new(GetArgs)\n\t\t\targs.CID = callerId\n\t\t\targs.Key = key\n\t\t\treturn args\n\t\t},\n\t\tfunc() interface{} {\n\t\t\treturn new(GetReply)\n\t\t},\n\t\tfunc(reply interface{}) (string, Err) {\n\t\t\treturn reply.(*GetReply).Value, reply.(*GetReply).Err\n\t\t})\n}", "func (db *TriasDB) Get(key []byte) []byte {\n\tdb.mtx.Lock()\n\tdefer db.mtx.Unlock()\n\n\t// TODO: unimplement\n\n\tvalue, err := file.Get(key)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn value\n}", "func GetString(key string) string { return c.GetString(key) }", "func (s *RedisStore) Get(key interface{}) (interface{}, error) {\n\treturn s.client.Get(key.(string)).Result()\n}", "func (g *Generator) GetKey(K string) interface{} {\n\treturn g.data[K]\n}", "func (i *InMemory) Get(key string) (string, bool) {\n\ti.lock.RLock()\n\tdefer i.lock.RUnlock()\n\n\tr, ok := i.data[key]\n\treturn r, ok\n}", "func (d *Store) Read(key string, bind interface{}) error {\n\tif v, ok := d.records[key]; ok {\n\t\treturn marshalRecords(v, &bind)\n\t}\n\n\treturn NoRecordError{key, d.StoreName}\n}", "func (skv *SkvDB) QueryByKey(key *Key) ([]byte, error) {\n\tif key == nil {\n\t\treturn nil, errorEmptyKey\n\t}\n\tfilename := skv.key2Filename(key)\n\tfd, err := os.OpenFile(filename, os.O_RDONLY, 0600)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, fmt.Errorf(\"key[%s] file not found\", key)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"can not open key file, cause by %v\", err)\n\t}\n\tdefer fd.Close()\n\tfi, err := fd.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tendOffset := fi.Size()\n\tstart := int64(0)\n\tend := endOffset\n\tstack := list.New()\n\tstack.PushBack([]int64{start, end})\n\tfor stack.Len() > 0 {\n\t\tstartEnd := stack.Remove(stack.Back()).([]int64)\n\t\tstart = startEnd[0]\n\t\tend = startEnd[1]\n\t\tif start <= end {\n\t\t\tmid := (end + start) / 2\n\t\t\trecord, err := skv.readNextRecord(mid, fd, endOffset)\n\t\t\tif err != nil {\n\t\t\t\tif err != errorNoNextRecord {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == errorNoNextRecord {\n\t\t\t\tstack.PushBack([]int64{start, mid - 1})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch key.compare(record.key) {\n\t\t\tcase 0:\n\t\t\t\treturn record.val, nil\n\t\t\tcase 1:\n\t\t\t\tstack.PushBack([]int64{mid + int64(record.byteSize()), end})\n\t\t\t\tbreak\n\t\t\tcase -1:\n\t\t\t\tstack.PushBack([]int64{start, mid - 1})\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, errorNoNextRecord\n}", "func (r *RedisStore) Get(key string) (string, error) {\n\treturn redis.String(r.conn.Do(\"GET\", key))\n}", "func (s *DefaultReadWriter) Read(key string) (bool, io.ReadCloser, *string, error) {\n\ts3api, err := s.open()\n\tif err != nil {\n\t\treturn false, nil, nil, err\n\t}\n\n\tlog.WithField(\"key\", key).Info(\"Reading object from S3.\")\n\tparams := &s3.GetObjectInput{\n\t\tBucket: aws.String(s.bucketName),\n\t\tKey: aws.String(key),\n\t}\n\n\tresp, err := s3api.GetObject(params)\n\n\tif err != nil {\n\t\te, ok := err.(awserr.Error)\n\t\tif ok && e.Code() == \"NoSuchKey\" {\n\t\t\treturn false, nil, nil, nil\n\t\t}\n\t\treturn false, nil, nil, err\n\t}\n\n\treturn true, resp.Body, resp.ContentType, err\n}", "func Get(key string) interface{} { return c.Get(key) }", "func ReadValue(key []byte) ([]byte, error) {\n\tif db == nil {\n\t\treturn nil, errors.New(\"database not initialized\")\n\t}\n\n\tif len(key) == 0 {\n\t\treturn nil, errors.New(\"empty key provided\")\n\t}\n\n\treturn db.Get(key, nil)\n}", "func (gc *GokuyamaClient) GetValue(key string) (string, error) {\n\n\tfmt.Fprintf(gc.conn, fmt.Sprintf(\"2,%s\\n\", base64.StdEncoding.EncodeToString([]byte(key))))\n\n\tret, err := bufio.NewReader(gc.conn).ReadString('\\n')\n\n\tif ret == \"\" {\n\t\tfmt.Println(err)\n\t}\n\n\trets := strings.Split(ret, \",\")\n\n\tif rets[1] == \"true\" {\n\t\tdata, err := base64.StdEncoding.DecodeString(rets[2])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\treturn string(data), err\n\t} else {\n\t\treturn \"\", nil\n\t}\n}", "func (s *Storage) Get(key []byte) ([]byte, error) {\n\tv := s.kv.Get(key)\n\treturn v, nil\n}" ]
[ "0.7725113", "0.7333868", "0.7230678", "0.7222155", "0.7193259", "0.71848077", "0.71133333", "0.7095759", "0.7083336", "0.6862008", "0.68467087", "0.68404", "0.67486924", "0.6740755", "0.6702578", "0.6700314", "0.66522616", "0.6648966", "0.663196", "0.6580056", "0.6573138", "0.6550748", "0.6480801", "0.6464019", "0.6460768", "0.64427435", "0.6434366", "0.6423149", "0.6394818", "0.63917744", "0.63768137", "0.63556063", "0.6351012", "0.6342908", "0.63219625", "0.6320103", "0.629141", "0.6275892", "0.6265411", "0.6251076", "0.6245909", "0.623287", "0.6218467", "0.6197803", "0.619038", "0.61779284", "0.61719745", "0.61693203", "0.61693203", "0.6163539", "0.6163425", "0.6119301", "0.61036575", "0.6079105", "0.6055123", "0.60344195", "0.6034358", "0.60305226", "0.60294205", "0.60193574", "0.60148287", "0.6012912", "0.599724", "0.59896857", "0.59760964", "0.5969582", "0.5969198", "0.5964479", "0.5964461", "0.59644353", "0.59620017", "0.59608227", "0.595952", "0.5950419", "0.5940639", "0.5924713", "0.5923023", "0.5917448", "0.5917427", "0.59167385", "0.59140295", "0.59122103", "0.59071654", "0.5905967", "0.59035873", "0.58987784", "0.5898044", "0.5893823", "0.5892332", "0.5891837", "0.58730817", "0.5868272", "0.58655673", "0.5862237", "0.5852334", "0.5851868", "0.58433676", "0.5843185", "0.58268714", "0.5819305", "0.5817551" ]
0.0
-1
Delete records with keys
func (s *sqlStore) Delete(key string, opts ...store.DeleteOption) error { var options store.DeleteOptions for _, o := range opts { o(&options) } db, queries, err := s.db(options.Database, options.Table) if err != nil { return err } _, err = db.Exec(s.options.Context, queries.Delete, key) return err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (db *FlatDatabase) Delete(key []byte) error { panic(\"not supported\") }", "func (fb *FlatBatch) Delete(key []byte) error { panic(\"not supported\") }", "func (t *tableCommon) deleteBatchKeys(ctx context.Context, lvs []*tspb.ListValue, delOpts *deleteOptions) error {\n\tfor _, lv := range lvs {\n\t\tpkeys, err := t.getPrimaryKeyData(ctx, lv)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif delOpts.RowDelete {\n\t\t\tfor _, cf := range delOpts.cfmap {\n\t\t\t\tif err := t.removeRecordWithIndex(ctx, pkeys, cf); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err := t.removeFlexibleSparseColumn(ctx, pkeys, delOpts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Delete(keys []string, db RedisDBClientInterface, group environment.EnvironmentGroup) error {\n\tkvals := make([]string, len(keys))\n\tfor i, k := range keys {\n\t\tkvals[i] = extract(k, group)\n\t}\n\n\t_, err := db.Del(kvals...).Result()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func deleteTaintsByKey(taints []corev1.Taint, taintKey string) ([]corev1.Taint, bool) {\n\tnewTaints := []corev1.Taint{}\n\tdeleted := false\n\tfor i := range taints {\n\t\tif taintKey == taints[i].Key {\n\t\t\tdeleted = true\n\t\t\tcontinue\n\t\t}\n\t\tnewTaints = append(newTaints, taints[i])\n\t}\n\treturn newTaints, deleted\n}", "func deleteKeys(c context.Context, k []*datastore.Key) error {\n\tif len(k) == 0 {\n\t\treturn nil\n\t}\n\n\tkeys := make([]string, 0, len(k))\n\tfor _, key := range k {\n\t\tkeys = append(keys, key.Encode())\n\t}\n\n\tpayload, err := json.Marshal(struct {\n\t\tKeys []string `json:\"keys\"`\n\t}{\n\t\tkeys,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th := make(http.Header)\n\th.Set(\"Content-Type\", \"application/json\")\n\n\tlogging.Fields{\n\t\t\"keys\": keys,\n\t}.Infof(c, \"deleteKeys: enqueing\")\n\n\treturn taskqueue.Add(c, deleteKeysQueueName, &taskqueue.Task{\n\t\tPath: deleteKeysPath,\n\t\tPayload: payload,\n\t\tHeader: h,\n\t\tMethod: \"POST\",\n\t\tDelay: time.Duration(30) * time.Minute,\n\t})\n}", "func (db *BoltDB) Del(keys []string) error {\n\treturn db.bolt.Update(func(txn *bbolt.Tx) error {\n\t\tb := txn.Bucket([]byte(\"default\"))\n\t\tfor _, key := range keys {\n\t\t\tb.Delete([]byte(key))\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func (t *Testzzz) Delete(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif t._deleted {\n\t\treturn nil\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetTestzzzTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//1\n\n\t// sql query with composite primary key\n\tsqlstr := `UPDATE ` + tableName + ` SET is_del = 1 WHERE id = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, t.ID)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, t.ID)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, t.ID)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\tt._deleted = true\n\n\treturn nil\n}", "func deleteKeys(ctx context.Context, backend driver.StorageDriver, prefix string, forced bool) error {\n\tlist, err := backend.List(ctx, prefix)\n\tif err != nil {\n\t\treturn ErrorContentNotFound.Format(prefix)\n\t}\n\tif len(list) > 0 && !forced {\n\t\treturn ErrorNeedForcedDelete.Format(prefix)\n\t}\n\terr = backend.Delete(ctx, prefix)\n\tif err != nil {\n\t\treturn ErrorInternalUnknown.Format(err)\n\t}\n\treturn nil\n}", "func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {\n\tif key == nil {\n\t\tfmt.Println(\"index key is NIL strange behavior\")\n\t\t// no need to clean, this field wasn't indexed\n\t\treturn\n\t}\n\tsub := i.dir.Sub(key...)\n\tif i.Unique {\n\t\tfmt.Println(\"+++ delete the index\", sub)\n\t\ttr.Clear(sub)\n\t} else {\n\t\t// Add primary here\n\t\tsub = sub.Sub(primaryTuple...)\n\t\ttr.Clear(sub) // removing old keys\n\t}\n}", "func (opts *ListOpts) Delete(key string) {\n for i, k := range *opts.values {\n if k == key {\n (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)\n return\n }\n }\n}", "func (be *s3) removeKeys(t backend.Type) error {\n\tdone := make(chan struct{})\n\tdefer close(done)\n\tfor key := range be.List(backend.Data, done) {\n\t\terr := be.Remove(backend.Data, key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (b *batchChecker) deleteDupKeys(ctx context.Context, sctx sessionctx.Context, t table.Table, rows [][]types.Datum) error {\n\tcleanupRows, err := b.getKeysNeedCheck(ctx, sctx, t, rows)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, row := range cleanupRows {\n\t\tif row.handleKey != nil {\n\t\t\tdelete(b.dupKVs, string(row.handleKey.newKV.key))\n\t\t}\n\t\tfor _, uk := range row.uniqueKeys {\n\t\t\tdelete(b.dupKVs, string(uk.newKV.key))\n\t\t}\n\t}\n\treturn nil\n}", "func (k *keeper) DeleteByKeys(keys []string) error {\n\tif k.disableCaching {\n\t\treturn nil\n\t}\n\n\tclient := k.connPool.Get()\n\tdefer client.Close()\n\n\tredisKeys := []interface{}{}\n\tfor _, key := range keys {\n\t\tredisKeys = append(redisKeys, key)\n\t}\n\n\t_, err := client.Do(\"DEL\", redisKeys...)\n\treturn err\n}", "func (m *MonkeyWrench) DeleteMulti(table string, keys []spanner.Key) error {\n\t// Create a mutation for each value set we have.\n\tmutations := make([]*spanner.Mutation, 0, len(keys))\n\tfor _, key := range keys {\n\t\tmutations = append(mutations, spanner.Delete(table, key))\n\t}\n\n\t// Apply the mutations.\n\terr := m.applyMutations(mutations)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (ix *IndexedBucket) deleteRecs(keys []string, tx *bolt.Tx) error {\n\tupdateFn := func(tx *bolt.Tx) error {\n\t\tentries, err := ix.readRecs(keys, tx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// TODO(stephana): Combine the above readRecs step with the delete\n\t\t// step to avoid having to traverse the keyspace twice. Use a cursor.\n\n\t\t// Get the main bucket and delete the records from it.\n\t\tbucket := tx.Bucket(ix.mainBucket)\n\t\tfor _, key := range keys {\n\t\t\tif err := bucket.Delete([]byte(key)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := ix.deleteIndices(tx, entries); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif tx != nil {\n\t\treturn updateFn(tx)\n\t}\n\n\treturn ix.DB.Update(updateFn)\n}", "func (el *RRSet) DeleteRecordInKey(k string, r Record) {\n\tel.l.Lock()\n\n\tvar old = make(map[Type][]Record)\n\tfor k, v := range el.m[k] {\n\t\told[k] = v\n\t}\n\n\tNew := el.m[k]\n\trecordData := r.Get()\n\n\tdefer el.deferOnDeleteKeyInRecord(k, old)\n\tdefer el.deferOnChange(KEventDeleteKeyInRecord, k, old)\n\tdefer el.l.Unlock()\n\n\tfor rListType, rListValue := range old {\n\t\tif rListType == r.Type() {\n\t\t\tswitch rListType {\n\t\t\tcase TypeA:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*A).A.Equal(listRecordData.(*A).A) {\n\t\t\t\t\t\tNew[TypeA] = append(New[TypeA][:i], New[TypeA][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeNS:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*NS).NS == listRecordData.(*NS).NS {\n\t\t\t\t\t\tNew[TypeNS] = append(New[TypeNS][:i], New[TypeNS][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeCNAME:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*CNAME).CNAME == listRecordData.(*CNAME).CNAME {\n\t\t\t\t\t\tNew[TypeCNAME] = append(New[TypeCNAME][:i], New[TypeCNAME][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeSOA:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*SOA).Serial == listRecordData.(*SOA).Serial {\n\t\t\t\t\t\tNew[TypeSOA] = append(New[TypeSOA][:i], New[TypeSOA][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypePTR:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*PTR).PTR == listRecordData.(*PTR).PTR {\n\t\t\t\t\t\tNew[TypePTR] = append(New[TypePTR][:i], New[TypePTR][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeMX:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*MX).MX == listRecordData.(*MX).MX {\n\t\t\t\t\t\tNew[TypeMX] = append(New[TypeMX][:i], New[TypeMX][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeTXT:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif reflect.DeepEqual(recordData.(*TXT).TXT, listRecordData.(*TXT).TXT) {\n\t\t\t\t\t\tNew[TypeTXT] = append(New[TypeTXT][:i], New[TypeTXT][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeAAAA:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*AAAA).AAAA.Equal(listRecordData.(*AAAA).AAAA) {\n\t\t\t\t\t\tNew[TypeAAAA] = append(New[TypeAAAA][:i], New[TypeAAAA][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeSRV:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*SRV).Target == listRecordData.(*SRV).Target {\n\t\t\t\t\t\tNew[TypeSRV] = append(New[TypeSRV][:i], New[TypeSRV][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeDNAME:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*DNAME).DNAME == listRecordData.(*DNAME).DNAME {\n\t\t\t\t\t\tNew[TypeDNAME] = append(New[TypeDNAME][:i], New[TypeDNAME][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeOPT:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif reflect.DeepEqual(recordData.(*OPT).Options, listRecordData.(*OPT).Options) {\n\t\t\t\t\t\tNew[TypeOPT] = append(New[TypeOPT][:i], New[TypeOPT][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase TypeCAA:\n\n\t\t\t\tfor i, v := range rListValue {\n\t\t\t\t\tlistRecordData := v.Get()\n\n\t\t\t\t\tif recordData.(*CAA).Value == listRecordData.(*CAA).Value && recordData.(*CAA).Tag == listRecordData.(*CAA).Tag {\n\t\t\t\t\t\tNew[TypeCAA] = append(New[TypeCAA][:i], New[TypeCAA][i+1:]...)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tpass := false\n\tfor k := range New {\n\t\tif len(New[k]) != 0 {\n\t\t\tpass = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif pass == false {\n\t\tel.DeleteKey(k)\n\t}\n\n\tif el.beforeOnDeleteKeyInRecord != nil {\n\t\tel.beforeOnDeleteKeyInRecord(k, old, New)\n\t}\n\n\tif el.beforeOnChange != nil {\n\t\tel.beforeOnChange(KEventClear, k, old, New)\n\t}\n\n\tel.m[k] = New\n}", "func (ms Memorystore) Delete(key string) error {\n\treturn ms.DeleteMulti([]string{key})\n}", "func Del(key string) error {\n\treturn db.Update(func(txn *badger.Txn) error {\n\t\ttxn.Delete([]byte(key))\n\t\treturn nil\n\t})\n}", "func (s *mysql) Del(keys []string) error {\n\tlog.Tracef(\"Del: %v\", keys)\n\n\tif s.isShutdown() {\n\t\treturn store.ErrShutdown\n\t}\n\n\tctx, cancel := ctxWithTimeout()\n\tdefer cancel()\n\n\t// Start transaction\n\topts := &sql.TxOptions{\n\t\tIsolation: sql.LevelDefault,\n\t}\n\ttx, err := s.db.BeginTx(ctx, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Delete blobs\n\tfor _, v := range keys {\n\t\t_, err = tx.ExecContext(ctx, \"DELETE FROM kv WHERE k IN (?);\", v)\n\t\tif err != nil {\n\t\t\t// Attempt to roll back the transaction\n\t\t\tif err2 := tx.Rollback(); err2 != nil {\n\t\t\t\t// We're in trouble!\n\t\t\t\te := fmt.Sprintf(\"del: %v, unable to rollback: %v\", err, err2)\n\t\t\t\tpanic(e)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Commit transaction\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"commit: %v\", err)\n\t}\n\n\tlog.Debugf(\"Deleted blobs (%v) from store\", len(keys))\n\n\treturn nil\n}", "func Delete(key string){\n n := keyValue[key]\n n.val = \"\"\n n.hash = \"\"\n keyValue[key] = n\n}", "func DeleteMulti(c appengine.Context, key []*Key) os.Error {\n\tif len(key) == 0 {\n\t\treturn nil\n\t}\n\tif err := multiValid(key); err != nil {\n\t\treturn err\n\t}\n\treq := &pb.DeleteRequest{\n\t\tKey: multiKeyToProto(c.FullyQualifiedAppID(), key),\n\t}\n\tres := &pb.DeleteResponse{}\n\treturn c.Call(\"datastore_v3\", \"Delete\", req, res)\n}", "func DeleteMulti(ctx context.Context, ekx []string) error {\n\tvar kx []*datastore.Key\n\tvar kpcx []*datastore.Key\n\tfor _, v := range ekx {\n\t\tk, err := datastore.DecodeKey(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkx = append(kx, k)\n\t\tkpcx2, err := pageContext.GetKeys(ctx, k)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, v2 := range kpcx2 {\n\t\t\tkpcx = append(kpcx, v2)\n\t\t}\n\t}\n\topts := new(datastore.TransactionOptions)\n\topts.XG = true\n\treturn datastore.RunInTransaction(ctx, func(ctx context.Context) (err1 error) {\n\t\terr1 = datastore.DeleteMulti(ctx, kpcx)\n\t\tif err1 != nil {\n\t\t\treturn\n\t\t}\n\t\terr1 = datastore.DeleteMulti(ctx, kx)\n\t\treturn\n\t}, opts)\n}", "func (tx *Tx) Delete(key string) error {\n\te := newRecord([]byte(key), nil, StringRecord, StringRem)\n\ttx.addRecord(e)\n\n\treturn nil\n}", "func (DummyStore) DeleteMap(key string, fields ...string) error { return nil }", "func (c *Client) DeleteMulti(ctx context.Context, ks []*Key) error {\n\tpbks, err := keys(ks).proto()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := pb.NewStoreClient(c.ClientConn).Delete(c.newContext(ctx), &pb.Keys{\n\t\tKeys: pbks,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn multiErrorFromRecordStatusProto(resp.Status)\n}", "func (u *UTXOSet) DeleteByPrefix(prefix []byte) {\n\n\t// create function that deletes keys\n\tdeleteKeys := func(keysForDelete [][]byte) error {\n\t\tif err := u.BlockChain.Database.Update(func(txn *badger.Txn) error {\n\t\t\t// iterate through keys and delete them (each key is an array of bytes)\n\t\t\tfor _, key := range keysForDelete {\n\t\t\t\tif err := txn.Delete(key); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tcollectSize := 100000 // batch size to delete per iteration\n\t// read only operation\n\tu.BlockChain.Database.View(func(txn *badger.Txn) error {\n\t\topts := badger.DefaultIteratorOptions\n\t\t// makes it so we can read the keys but dont need to fetch the values\n\t\t// since we dont need to read the values when deleting\n\t\topts.PrefetchValues = false\n\t\tit := txn.NewIterator(opts) // create iterator with the options above\n\t\tdefer it.Close()\n\n\t\t// create array of bytes with batch size\n\t\tkeysForDelete := make([][]byte, 0, collectSize)\n\t\tkeysCollected := 0\n\t\t// seek all keys with prefix. ValidForPrefix() returns false when done iterating prefixes\n\t\tfor it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {\n\t\t\tkey := it.Item().KeyCopy(nil) // copy key\n\t\t\tkeysForDelete = append(keysForDelete, key) // add to array\n\t\t\tkeysCollected++\n\t\t\t// once reaching batch size, delete keys in array and reset counter + array\n\t\t\tif keysCollected == collectSize {\n\t\t\t\tif err := deleteKeys(keysForDelete); err != nil {\n\t\t\t\t\tlog.Panic(err)\n\t\t\t\t}\n\t\t\t\tkeysForDelete = make([][]byte, 0, collectSize)\n\t\t\t\tkeysCollected = 0\n\t\t\t}\n\t\t}\n\t\t// delete any remaining keys in last batch\n\t\tif keysCollected > 0 {\n\t\t\tif err := deleteKeys(keysForDelete); err != nil {\n\t\t\t\tlog.Panic(err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func (sq *SQ3Driver) Delete(key string) error {\n\t_, err := sq.DB.Exec(fmt.Sprintf(\"DELETE FROM %v WHERE key=$1\", dbTable), key)\n\treturn err\n}", "func (m *MonkeyWrench) Delete(table string, key spanner.Key) error {\n\treturn m.DeleteMulti(table, []spanner.Key{key})\n}", "func (o APIKeySlice) DeleteAll(exec boil.Executor) error {\n\tif o == nil {\n\t\treturn errors.New(\"models: no APIKey slice provided for delete all\")\n\t}\n\n\tif len(o) == 0 {\n\t\treturn nil\n\t}\n\n\tif len(apiKeyBeforeDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doBeforeDeleteHooks(exec); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), apiKeyPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM \\\"api_keys\\\" WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, apiKeyPrimaryKeyColumns, len(o))\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to delete all from apiKey slice\")\n\t}\n\n\tif len(apiKeyAfterDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doAfterDeleteHooks(exec); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (u *UTXOSet) DeleteByPrefix(prefix []byte) {\n\t// create closure that has all of the deleted keys\n\tdeleteKeys := func(keysForDelete [][]byte) error {\n\t\t// access db via the blockchain connection and expose the badger transaction\n\t\tif err := u.Blockchain.Database.Update(func(txn *badger.Txn) error {\n\t\t\t// iterate through the 2d slice of bytes\n\t\t\tfor _, k := range keysForDelete {\n\t\t\t\t// delete each key\n\t\t\t\tif err := txn.Delete(k); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// this is the optimal amount of keys to delete at a time\n\t// deletes in 100,000 increments\n\tcollectSize := 100000\n\n\t// open read only transaction\n\tu.Blockchain.Database.View(func(txn *badger.Txn) error {\n\t\topts := badger.DefaultIteratorOptions\n\t\t// allows us to read the keys but without the values for optimization\n\t\topts.PrefetchValues = false\n\t\tit := txn.NewIterator(opts)\n\t\tdefer it.Close()\n\n\t\tkeysForDelete := make([][]byte, 0, collectSize)\n\t\tkeysCollected := 0\n\n\t\tfor it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {\n\t\t\t// create a copy of the key\n\t\t\tkey := it.Item().KeyCopy(nil)\n\t\t\t// assign the key for deletion\n\t\t\tkeysForDelete = append(keysForDelete, key)\n\t\t\tkeysCollected++\n\n\t\t\t// if we hit the limit, then delete the first 100,000 keys set for deletion\n\t\t\tif keysCollected == collectSize {\n\t\t\t\tif err := deleteKeys(keysForDelete); err != nil {\n\t\t\t\t\tlog.Panic(err)\n\t\t\t\t}\n\n\t\t\t\t// reset the array and the counter\n\t\t\t\tkeysForDelete = make([][]byte, 0, collectSize)\n\t\t\t\tkeysCollected = 0\n\t\t\t}\n\t\t}\n\n\t\t// if the keys are above 0 but below 100,000 at the end of the loop, delete the rest\n\t\tif keysCollected > 0 {\n\t\t\tif err := deleteKeys(keysForDelete); err != nil {\n\t\t\t\tlog.Panic(err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func delPatricia(ptr patricia, bucket string, cb db.CachedBatch) {\n\tkey := ptr.hash()\n\tlogger.Debug().Hex(\"key\", key[:8]).Msg(\"del\")\n\tcb.Delete(bucket, key[:], \"failed to delete key = %x\", key)\n}", "func delete(nums int) {\n\tt := util.Microsecond()\n\tfor i := 0; i < nums; i++ {\n\t\tkey := []byte(\"key_\" + strconv.Itoa(i))\n\t\tbDB.Update(func(tx *bolt.Tx) error {\n\t\t\tb := tx.Bucket([]byte(\"copernicus\"))\n\t\t\tif err := b.Delete(key); err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tfmt.Println(\"delete time is :\", util.Microsecond()-t)\n}", "func BatchDeleteToken(some map[string]interface{}) (result *gorm.DB) {\n\tfor k, v := range some {\n\t\tquery := fmt.Sprintf(\"%v like '%%%v%%'\", k, v)\n\t\tprintln(query)\n\t\tresult = DB.Where(query).Delete(&Token{})\n\t}\n\treturn result\n}", "func (v *OrderedValues) Del(key []byte) {\n\tvar (\n\t\ti int\n\t\tok bool\n\t\tj [][]byte\n\t)\n\tfor i, j = range *v {\n\t\tif len(j) > 0 && bytes.Equal(j[0], key) {\n\t\t\tok = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !ok {\n\t\treturn\n\t}\n\tcopy((*v)[i:], (*v)[i+1:])\n\t(*v)[len(*v)-1] = nil\n\t*v = (*v)[:len(*v)-1]\n}", "func (s *RethinkDB) BulkDelete(ctx context.Context, req []state.DeleteRequest, _ state.BulkStoreOpts) error {\n\tlist := make([]string, len(req))\n\tfor i, d := range req {\n\t\tlist[i] = d.Key\n\t}\n\n\tc, err := r.Table(s.config.Table).GetAll(r.Args(list)).Delete().Run(s.session, r.RunOpts{Context: ctx})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting record from the database: %w\", err)\n\t}\n\tdefer c.Close()\n\n\treturn nil\n}", "func (t *Transaction) DeleteMulti(keys []*datastore.Key) (err error) {\n\tt.deleteKeys(keys...)\n\treturn t.txn.DeleteMulti(keys)\n}", "func (t *openAddressing) Delete(key string) {\n\tround := 0\n\tfor round != len(t.values) {\n\t\thash := t.hash(key, round)\n\t\tslot := t.values[hash]\n\t\tif slot != nil && slot.key == key {\n\t\t\tt.values[hash].deleted = true\n\t\t\tt.len--\n\t\t\treturn\n\t\t}\n\t\tround++\n\t}\n}", "func (ust *UsersShopTrace) Delete(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif ust._deleted {\n\t\treturn nil\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetUsersShopTraceTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//1\n\n\t// sql query with composite primary key\n\tsqlstr := `UPDATE ` + tableName + ` SET is_del = 1 WHERE id = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, ust.ID)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, ust.ID)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, ust.ID)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\tust._deleted = true\n\n\treturn nil\n}", "func (mdl *Model) Delete(key interface{}) error {\n\tmdl.mux.Lock()\n\tdefer mdl.mux.Unlock()\n\tif std.ModelTypeList == mdl.GetType() {\n\t\tk := key.(int)\n\t\tif k > len(mdl.data) {\n\t\t\treturn errors.New(InvalidIndex, \"index '%d' out of range\", k)\n\t\t}\n\t\tmdl.data = append(mdl.data[:key.(int)-1], mdl.data[key.(int):]...)\n\t\treturn nil\n\t}\n\n\tk := key.(string)\n\tif idx, ok := mdl.hashIdx[k]; ok {\n\t\tmdl.data = append(mdl.data[:idx-1], mdl.data[idx:]...)\n\t\tdelete(mdl.hashIdx, k)\n\t\tdelete(mdl.idxHash, idx)\n\t\treturn nil\n\t}\n\treturn errors.New(InvalidIndex, \"index '%s' out of range\", k)\n}", "func (t *IPDCChaincode) invoke_delete_all_records(stub shim.ChaincodeStubInterface, args []string, map_specification map[string]interface{}) pb.Response {\r\n\r\n\tfmt.Println(\"***********Entering invoke_delete_all_records***********\")\r\n\r\n\tvar arguments []string\r\n\r\n\tvar ok bool\r\n\r\n\tvar additional_json interface{}\r\n\r\n\tvar record_specification = make(map[string]interface{})\r\n\r\n\tadditional_json, ok = map_specification[\"additional_json\"]\r\n\r\n\tif ok {\r\n\r\n\t\tadditional_json_data, ok1 := additional_json.(map[string]interface{})\r\n\r\n\t\tif ok1 {\r\n\r\n\t\t\tfor spec, _ := range additional_json_data {\r\n\r\n\t\t\t\trecord_specification[spec] = additional_json_data[spec]\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tfmt.Println(\"Error: Invalid additional JSON fields in specification\")\r\n\r\n\t\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\t\treturn shim.Error(\"Error: Invalid additional JSON fields in specification\")\r\n\t\t}\r\n\t}\r\n\r\n\tvar keys_map interface{}\r\n\r\n\tvar specs map[string]interface{}\r\n\r\n\tkeys_map, error_keys_map := t.get_keys_map(stub, record_specification)\r\n\r\n\tif error_keys_map != nil {\r\n\r\n\t\tfmt.Println(error_keys_map.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(error_keys_map.Error())\r\n\t}\r\n\r\n\tspecs, ok = keys_map.(map[string]interface{})\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Error: Invalid keys_map specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Invalid keys_map specification.\")\r\n\t}\r\n\r\n\tvar composite_key = make(map[string]interface{})\r\n\r\n\t//for spec, _ := range record_specification {\r\n\t//\r\n\t//\tcomposite_key[spec] = specs[spec]\r\n\t//}\r\n\r\n\tcomposite_key[\"stagingdb-update-status\"], ok = specs[\"stagingdb-update-status\"]\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Error: Composite key specification missing for deletion.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Composite key specification missing for deletion.\")\r\n\t}\r\n\r\n\tcompositekeyJsonString, err_marshal := json.Marshal(composite_key)\r\n\r\n\tif err_marshal != nil {\r\n\r\n\t\tfmt.Println(\"Error in marshaling composite key\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error in marshaling composite key\")\r\n\t}\r\n\r\n\trecord_specification[\"stagingdb-update-status\"] = \"True\"\r\n\r\n\tvar concatenated_record_json []byte\r\n\r\n\tconcatenated_record_json, err_marshal = json.Marshal(record_specification)\r\n\r\n\tif err_marshal != nil {\r\n\r\n\t\tfmt.Println(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\t}\r\n\r\n\targuments = append(arguments, string(concatenated_record_json))\r\n\r\n\targuments = append(arguments, string(compositekeyJsonString))\r\n\r\n\terr_delete, processed_records, records_remaining := t.delete_by_composite_key(stub, arguments, specs, PROCESSING_LIMIT)\r\n\r\n\tif err_delete != nil {\r\n\r\n\t\tfmt.Println(err_delete.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(err_delete.Error())\r\n\t}\r\n\r\n\tif records_remaining {\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Success([]byte(\"1\"))\r\n\t}\r\n\r\n\trecord_specification[\"stagingdb-update-status\"] = \"False\"\r\n\r\n\tconcatenated_record_json, err_marshal = json.Marshal(record_specification)\r\n\r\n\tif err_marshal != nil {\r\n\r\n\t\tfmt.Println(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\t}\r\n\r\n\targuments = make([]string, 0)\r\n\r\n\targuments = append(arguments, string(concatenated_record_json))\r\n\r\n\targuments = append(arguments, string(compositekeyJsonString))\r\n\r\n\tPROCESSING_LIMIT_TEMP := PROCESSING_LIMIT - processed_records\r\n\r\n\terr_delete, _, records_remaining = t.delete_by_composite_key(stub, arguments, specs, PROCESSING_LIMIT_TEMP)\r\n\r\n\tif err_delete != nil {\r\n\r\n\t\tfmt.Println(err_delete.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(err_delete.Error())\r\n\t}\r\n\r\n\tif !records_remaining {\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Success([]byte(\"0\"))\r\n\r\n\t} else {\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Success([]byte(\"1\"))\r\n\t}\r\n\r\n}", "func (u *UdMap) Del(key string) { delete(u.Data, key) }", "func (kv *KV) Delete(key []byte) error {\n\t_, err := kv.db.Exec(\n\t\tfmt.Sprintf(\"DELETE FROM %s WHERE id=?\", string(kv.table)),\n\t\tkv.id(key),\n\t)\n\treturn err\n}", "func (wb *WriteBatch) Delete(key []byte) {\n\tcKey := bytesToChar(key)\n\tC.rocksdb_writebatch_delete(wb.c, cKey, C.size_t(len(key)))\n}", "func (p *Postgres) DeleteProjectKey(projectName string, keys ...string) error {\n\tkeysFormat := strings.Join(keys, \",\")\n\t_, err := p.db.Exec(fmt.Sprintf(\"UPDATE trees SET data=data #- '{%s}' where project=$1\", keysFormat), projectName)\n\treturn err\n}", "func (tcdb *Teocdb) Delete(key string) (err error) {\n\t// Does not return err of tcdb.session.Query function\n\tif err = tcdb.session.Query(`DELETE data FROM map WHERE key = ?`,\n\t\tkey).Exec(); err != nil {\n\t}\n\treturn\n}", "func (tidis *Tidis) DeleteWithTxn(txn interface{}, keys [][]byte) (deleted int64, err error) {\n\tvar (\n\t\tok bool\n\t\ttikv_txn kv.Transaction\n\t\trawData []byte\n\t\tdataType byte\n\t)\n\tif txn == nil {\n\t\terr = qkverror.ErrorServerInternal\n\t\treturn\n\t}\n\ttikv_txn, ok = txn.(kv.Transaction)\n\tif !ok {\n\t\terr = qkverror.ErrorServerInternal\n\t\treturn\n\t}\n\tfor _, k := range keys {\n\t\trawData, _ = tidis.db.Get(txn, k)\n\t\tif rawData != nil {\n\t\t\tdataType, _, err = utils.DecodeData(rawData)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tswitch dataType {\n\t\t\t//delete set member\n\t\t\tcase utils.SET_TYPE:\n\t\t\t\t_, err = tidis.ClearSetMembers(txn, k)\n\t\t\t//delete zset member\n\t\t\tcase utils.ZSET_TYPE:\n\t\t\t\t_, err = tidis.ZRemRangeByScore(txn, k, utils.SCORE_MIN, utils.SCORE_MAX)\n\t\t\t//delete hash field\n\t\t\tcase utils.HASH_TYPE:\n\t\t\t\t_, err = tidis.ClearHash(txn, k)\n\t\t\t//delete list member\n\t\t\tcase utils.LIST_TYPE:\n\t\t\t\t_, err = tidis.ClearListMembers(txn, k)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdeleted++\n\t\t}\n\n\t\terr = tikv_txn.Delete(k)\n\t\tif err != nil {\n\t\t\tdeleted = 0\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (g *Godis) DEL(keys ...string) (int, error) {\n\tcount, _ := g.EXISTS(keys...)\n\tfor _, key := range keys {\n\t\tdelete(g.db, key)\n\t}\n\treturn count, nil\n}", "func (g *GCache) DelMulti(keys []string) error {\n\tfor _, key := range keys {\n\t\tg.db.Remove(key)\n\t}\n\treturn nil\n}", "func removeKeys(keys ...string) func([]string, slog.Attr) slog.Attr {\n\treturn func(_ []string, a slog.Attr) slog.Attr {\n\t\tfor _, k := range keys {\n\t\t\tif a.Key == k {\n\t\t\t\treturn slog.Attr{}\n\t\t\t}\n\t\t}\n\t\treturn a\n\t}\n}", "func (p *PgStore) DeleteByUserKey(ctx context.Context, key string, expID ...string) error {\n\tif len(expID) > 0 {\n\t\tq := fmt.Sprintf(\"DELETE FROM %s WHERE user_key = $1 AND id != ALL ($2);\", p.tName)\n\t\t_, err := p.db.ExecContext(ctx, q, append([]interface{}{key}, pq.Array(expID))...)\n\t\treturn err\n\t}\n\n\tq := fmt.Sprintf(\"DELETE FROM %s WHERE user_key = $1;\", p.tName)\n\t_, err := p.db.ExecContext(ctx, q, key)\n\treturn err\n}", "func DelKeysonBolt(dbFileName string, bucketName string, keys []string) error {\n\tdb, err := bolt.Open(dbFileName, 0666, nil)\n\tdefer db.Close()\n\n\tif err != nil {\n\t\tlog.ErrLog(err)\n\t}\n\n\tif err = db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(bucketName))\n\t\tfor i := 0; i < len(keys); i++ {\n\t\t\tkey := keys[i]\n\t\t\terr := b.Delete([]byte(key))\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\tlog.ErrLog(err)\n\t}\n\n\treturn err\n}", "func DeleteMulti(c context.Context, key []*datastore.Key) error {\n\tl := len(key)\n\n\t// only split into batches if needed\n\tif l <= SizeDelete {\n\t\treturn datastore.DeleteMulti(c, key)\n\t}\n\n\tvar errs []error\n\tvar batch []*datastore.Key\n\n\tfor s, e := 0, 0; s < l; s += SizeDelete {\n\t\te = s + SizeDelete\n\t\tif e >= l {\n\t\t\te = l\n\t\t}\n\n\t\tbatch = key[s:e]\n\n\t\tif err := datastore.DeleteMulti(c, batch); err != nil {\n\t\t\tif me, ok := err.(appengine.MultiError); ok {\n\t\t\t\tif len(errs) == 0 { // lazy init\n\t\t\t\t\terrs = make([]error, s, l) // add nils for previous batches\n\t\t\t\t}\n\n\t\t\t\tfor i := range me {\n\t\t\t\t\terrs = append(errs, me[i])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if len(errs) > 0 { // no errors, but another batch had errors, so add nils\n\t\t\tfor _ = range batch {\n\t\t\t\terrs = append(errs, nil)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn appengine.MultiError(errs) // combined multi-error for the whole set\n\t}\n\treturn nil\n}", "func (t *chaining) Delete(key string) {\n\thash := t.hash(key)\n\tlist := t.values[hash]\n\tif list == nil {\n\t\treturn\n\t}\n\tfirst := list.Start().Prev\n\tfor first != list.End() {\n\t\tfirst = first.Next\n\t\tpair := first.Value.(*pair)\n\t\tif pair.key == key {\n\t\t\tlist.Delete(first)\n\t\t\tt.len--\n\t\t\treturn\n\t\t}\n\t}\n}", "func (tx *transaction) deleteKey(key []byte, notifyIterators bool) {\n\t// Remove the key from the list of pendings keys to be written on\n\t// transaction commit if needed.\n\ttx.pendingKeys.Delete(key)\n\n\t// Add the key to the list to be deleted on transaction\tcommit.\n\ttx.pendingRemove.Put(key, nil)\n\n\t// Notify the active iterators about the change if the flag is set.\n\tif notifyIterators {\n\t\ttx.notifyActiveIters()\n\t}\n}", "func (db *memorydb) Del(key []byte) error {\n\n\tif db.enableBatch {\n\t\tdb.batch.Del(key)\n\t} else {\n\t\tdb.writeLock <- struct{}{}\n\t\tdefer func() {\n\t\t\t<-db.writeLock\n\t\t}()\n\n\t\tdb.sm.Lock()\n\t\tdefer db.sm.Unlock()\n\n\t\tdelete(db.db, string(key))\n\t}\n\n\treturn nil\n}", "func deleteRecord(w http.ResponseWriter, r *http.Request, ps httprouter.Params){\n\tfmt.Println(\"dalete\")\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tid, ok := getID(w, ps)\n\tfmt.Println(\"val\",id,ok)\n\n\tvar rec Field\n\tvar ires int\n\tif !ok {\n\t\trec, ires = mytable.searchByKey(ps.ByName(\"id\"))\n\t} else {\n\t\trec, ires = mytable.searchById(id)\n\t}\n\n\tfmt.Println(rec,ires)\n\tif ires == -1 {\n\t\tif !ok {\n\t\t\tjson.NewEncoder(w).Encode(\"No record with that key\")\n\t\t} else{\n\t\t\tjson.NewEncoder(w).Encode(\"No record with that id\")\n\t\t}\n\t} else {\n\t\tjson.NewEncoder(w).Encode(\"Record found\")\n\t\tfmt.Println(\"DELETE\")\n\t\tmytable.Print()\n\t\tok, mytable = mytable.delete(ires)\n\t\tif ok {\n\t\t\tremoveline(ires)\n\t\t}\n\n\t\t//if !changeLine(ires, ps.ByName(\"val\")){\n\t\t//\tjson.NewEncoder(w).Encode(\"Error\")\n\t\t//}\n\t}\n\n}", "func (db *DB) lDelete(t *batch, key []byte) int64 {\n\tmk := db.lEncodeMetaKey(key)\n\n\tvar headSeq int32\n\tvar tailSeq int32\n\tvar err error\n\n\tit := db.bucket.NewIterator()\n\tdefer it.Close()\n\n\theadSeq, tailSeq, _, err = db.lGetMeta(it, mk)\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\tvar num int64 = 0\n\tstartKey := db.lEncodeListKey(key, headSeq)\n\tstopKey := db.lEncodeListKey(key, tailSeq)\n\n\trit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose})\n\tfor ; rit.Valid(); rit.Next() {\n\t\tt.Delete(rit.RawKey())\n\t\tnum++\n\t}\n\n\tt.Delete(mk)\n\n\treturn num\n}", "func deleteRow(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar numberOfArgs int = 2\n\tif len(args) != numberOfArgs {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting: \" + strconv.Itoa(numberOfArgs))\n\t}\n\n\ttableName, keyValue := args[0], args[1]\n\tvar cols []shim.Column\n\tcol := shim.Column{Value: &shim.Column_String_{String_: keyValue}}\n\tcols = append(cols, col)\n\n\terr := stub.DeleteRow(tableName, cols)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to delete row with key '\" + keyValue + \"' from '\" + tableName + \"' table: \" + err.Error())\n\t}\n\n\tfmt.Println(\"Successfuly deleted row with key '\" + keyValue + \"' from '\" + tableName + \"' table if any exists\")\n\treturn nil, nil\n}", "func delKeys(c *redis.Client, ch <-chan string, n int64, b bool) {\n\n\tpipe := c.Pipeline()\n\tvar i int64 = 0\n\tfor {\n\t\t\tif b {\n\t\t\t\tpipe.Del(<-ch)\n\t\t\t}\n\t\t\tif i >= n {\n\t\t\t\t_, err := pipe.Exec()\n\t\t\t\tcheck(err)\n\t\t\t\tfmt.Println(\"Deleted \", i, \" keys\")\n\t\t\t\ti = 0\n\t\t\t}\n\t\ti++\n\t}\n}", "func (r *rds) Del(ctx context.Context, key ...string) error {\n\t_, err := r.db.Del(ctx, key...).Result()\n\tif err != nil {\n\t\tzapLogger.Prepare(logger).\n\t\t\tDevelopment().\n\t\t\tLevel(zap.ErrorLevel).\n\t\t\tCommit(err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}", "func (h *Header) Del(key string) {\n\tfor i, ok := h.index(key); ok; i, ok = h.index(key) {\n\t\th.slice = append(h.slice[:i], h.slice[i+2:]...)\n\t}\n}", "func (batch *UpdateBatch) Delete(key []byte) {\n\tbatch.KVs[string(key)] = nil\n}", "func (t *Table) ForceDeleteAll() error {\n\thashkey := t.design.GetHashKeyName()\n\trangekey := t.design.GetRangeKeyName()\n\n\tresult, err := t.Scan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrData := newErrors()\n\tfor _, item := range result.ToSliceMap() {\n\t\tvar e error\n\t\tswitch rangekey {\n\t\tcase \"\":\n\t\t\te = t.Delete(item[hashkey])\n\t\tdefault:\n\t\t\te = t.Delete(item[hashkey], item[rangekey])\n\t\t}\n\n\t\tif e != nil {\n\t\t\terrData.Add(e)\n\t\t}\n\t}\n\n\tif errData.HasError() {\n\t\treturn errData\n\t}\n\treturn nil\n}", "func (t *Map) DeleteAll(keys []interface{}) *Map {\n\tfor _, k := range keys {\n\t\tt.Delete(k)\n\t}\n\treturn t\n}", "func (i *OrderedItems) Del(key interface{}) bool {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\tdelete(i.OrderedItems, key)\n\tfor id, val := range i.KeySlice {\n\t\tif val == key {\n\t\t\ti.KeySlice = append(i.KeySlice[:id], i.KeySlice[id+1:]...)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *Cache) DeleteMulti(keys []string) map[string]error {\n\t_, _, errs := c.GetMulti(keys)\n\tc.client.Do(\"DEL\", keyArgs(keys)...)\n\n\t// DEL will only return false if the key is not present. To get a map of bools\n\t// to return, we can go over the items that are in the store (before we've\n\t// deleted them) and see which of the specified keys to delete are present in\n\t// the list of items.\n\tresults := make(map[string]error)\n\tfor _, key := range keys {\n\t\tresults[key], _ = errs[key]\n\t}\n\n\treturn results\n}", "func (lkp *lookup) Delete(vcursor VCursor, ids []interface{}, ksid []byte) error {\n\tvar val interface{}\n\tvar err error\n\tif lkp.isHashedIndex {\n\t\tval, err = vunhash(ksid)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"lookup.Delete: %v\", err)\n\t\t}\n\t} else {\n\t\tval = ksid\n\t}\n\tbindvars := map[string]interface{}{\n\t\tlkp.To: val,\n\t}\n\tfor _, id := range ids {\n\t\tbindvars[lkp.From] = id\n\t\tif _, err := vcursor.Execute(lkp.del, bindvars); err != nil {\n\t\t\treturn fmt.Errorf(\"lookup.Delete: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func DeleteKeyValueViaCollection(iCollection string) (err error) {\n\tvar has bool\n\tvar _KeyValue = &KeyValue{Collection: iCollection}\n\tif has, err = Engine.Get(_KeyValue); (has == true) && (err == nil) {\n\t\tif row, err := Engine.Where(\"collection = ?\", iCollection).Delete(new(KeyValue)); (err != nil) || (row <= 0) {\n\t\t\treturn err\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn\n}", "func (h *HBaseClient) Del(key int64) (err error) {\n\tvar (\n\t\ti int\n\t\tk = make([]byte, 8)\n\t\tks = [Size]byte{}\n\t\tc interface{}\n\t)\n\tbinary.BigEndian.PutUint64(k, uint64(key))\n\tks = sha1.Sum(k)\n\tif c, err = hbasePool.Get(); err != nil {\n\t\tlog.Errorf(\"hbasePool.Get() error(%v)\", err)\n\t\treturn\n\t}\n\tdefer hbasePool.Put(c, false)\n\tfor i = 0; i < retryCount; i++ {\n\t\tif err = c.(hbasethrift.THBaseService).DeleteSingle(filemeta.HbaseTable, &hbasethrift.TDelete{\n\t\t\tRow: ks[:],\n\t\t\tColumns: []*hbasethrift.TColumn{\n\t\t\t\t&hbasethrift.TColumn{\n\t\t\t\t\tFamily: filemeta.HbaseFamilyBasic,\n\t\t\t\t\tQualifier: filemeta.HbaseColumnVid,\n\t\t\t\t},\n\t\t\t\t&hbasethrift.TColumn{\n\t\t\t\t\tFamily: filemeta.HbaseFamilyBasic,\n\t\t\t\t\tQualifier: filemeta.HbaseColumnCookie,\n\t\t\t\t},\n\t\t\t},\n\t\t}); err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(retrySleep)\n\t}\n\tif err != nil {\n\t\tlog.Errorf(\"client.DeleteSingle error(%v)\", err)\n\t}\n\treturn\n}", "func (b *batch) Delete(key []byte) error {\n\tb.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true})\n\tb.size += len(key)\n\treturn nil\n}", "func Delete(c appengine.Context, key *Key) os.Error {\n\terr := DeleteMulti(c, []*Key{key})\n\tif errMulti, ok := err.(ErrMulti); ok {\n\t\treturn errMulti[0]\n\t}\n\treturn err\n}", "func (am AttributeMap) Delete(key string) bool {\n\tfor i, a := range *am.orig {\n\t\tif a.Key == key {\n\t\t\t(*am.orig)[i] = (*am.orig)[len(*am.orig)-1]\n\t\t\t*am.orig = (*am.orig)[:len(*am.orig)-1]\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s *Syslog) Delete(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif s._deleted {\n\t\treturn nil\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetSyslogTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//1\n\n\t// sql query with composite primary key\n\tsqlstr := `UPDATE ` + tableName + ` SET is_del = 1 WHERE id = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, s.ID)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, s.ID)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, s.ID)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\ts._deleted = true\n\n\treturn nil\n}", "func (am *ClientSetAtomicMap) Delete(key string) {\n\tam.mu.Lock()\n\tdefer am.mu.Unlock()\n\n\tm1 := am.val.Load().(_ClientSetMap)\n\t_, ok := m1[key]\n\tif !ok {\n\t\treturn\n\t}\n\n\tm2 := make(_ClientSetMap, len(m1)-1)\n\tfor k, v := range m1 {\n\t\tif k != key {\n\t\t\tm2[k] = v\n\t\t}\n\t}\n\n\tam.val.Store(m2)\n\treturn\n}", "func Delete(conn redis.Conn, key string) error {\n\t_, err := conn.Do(\"DEL\", key)\n\treturn err\n}", "func (am AttributeMap) Delete(key string) bool {\n\tfor i := range *am.orig {\n\t\takv := &(*am.orig)[i]\n\t\tif akv.Key == key {\n\t\t\t*akv = (*am.orig)[len(*am.orig)-1]\n\t\t\t*am.orig = (*am.orig)[:len(*am.orig)-1]\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r *redisHandler) DeleteKeyEventPublisher(keys[] string, successStatus int) gin.HandlerFunc {\n\tfor _, key := range keys {\n\t\tif key == \"\" {\n\t\t\tsystemlog.Fatalln(\"parameter of DeleteKeyEventPublisher to delete redis key must not be blank string\")\n\t\t}\n\t}\n\tctx := context.Background()\n\n\treturn func(c *gin.Context) {\n\t\t// run business logic handler\n\t\tc.Next()\n\n\t\treqID := c.GetHeader(\"X-Request-Id\")\n\n\t\tinAdvanceTopSpan, _ := c.Get(\"TopSpan\")\n\t\ttopSpan, _ := inAdvanceTopSpan.(opentracing.Span)\n\n\t\tinAdvanceClaims, _ := c.Get(\"Claims\")\n\t\tuuidClaims, _ := inAdvanceClaims.(jwtutil.UUIDClaims)\n\n\t\tinAdvanceReq, _ := c.Get(\"Request\")\n\n\t\tredisSpan := r.tracer.StartSpan(\"PublishDeleteEvent\", opentracing.ChildOf(topSpan.Context())).SetTag(\"X-Request-Id\", reqID)\n\t\tvar status int\n\t\tswitch w := c.Writer.(type) {\n\t\tcase *ginHResponseWriter:\n\t\t\tstatus = w.status\n\t\tdefault:\n\t\t\terr := errors.New(\"unable to get response status code from default response writer\")\n\t\t\tredisSpan.SetTag(\"success\", false).LogFields(log.Object(\"keys\", keys), log.Error(err))\n\t\t\tredisSpan.Finish()\n\t\t\treturn\n\t\t}\n\n\t\tif status != successStatus {\n\t\t\terr := errors.New(\"response status code is not success status code to delete key in redis\")\n\t\t\tredisSpan.SetTag(\"success\", false).LogFields(log.Object(\"keys\", keys), log.Error(err))\n\t\t\tredisSpan.Finish()\n\t\t\treturn\n\t\t}\n\n\t\tredisKeys := make([]string, len(keys))\n\t\tfor i, key := range keys {\n\t\t\tredisKey, err := r.formatKeyWithRequest(key, c, inAdvanceReq, uuidClaims)\n\t\t\tif err != nil {\n\t\t\t\tredisSpan.SetTag(\"success\", false).LogFields(log.String(\"key\", redisKey), log.Error(err))\n\t\t\t\tredisSpan.Finish()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tredisKeys[i] = redisKey\n\n\t\t\t_, err = r.client.Publish(ctx, r.delTopic, redisKey).Result()\n\t\t\tif err != nil {\n\t\t\t\tredisSpan.SetTag(\"success\", false).LogFields(log.String(\"topic\", r.delTopic),\n\t\t\t\t\tlog.String(\"key\", redisKey), log.Error(err))\n\t\t\t\tredisSpan.Finish()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tredisSpan.LogFields(log.String(\"topic\", r.delTopic), log.Object(\"keys\", redisKeys))\n\t\tredisSpan.Finish()\n\t\treturn\n\t}\n}", "func (o *APIKey) Delete(exec boil.Executor) error {\n\tif o == nil {\n\t\treturn errors.New(\"models: no APIKey provided for delete\")\n\t}\n\n\tif err := o.doBeforeDeleteHooks(exec); err != nil {\n\t\treturn err\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), apiKeyPrimaryKeyMapping)\n\tsql := \"DELETE FROM \\\"api_keys\\\" WHERE \\\"id\\\"=$1\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args...)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to delete from api_keys\")\n\t}\n\n\tif err := o.doAfterDeleteHooks(exec); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (db *DB) Delete(key []byte) (err error) {\n\treturn db.LevigoDB.Delete(db.wo, key)\n}", "func (s *store) DeleteExpiredKeys() error {\n\tdbBatch := s.db.NewUpdateBatch()\n\titr, err := s.db.GetIterator(nil, []byte(fmt.Sprintf(\"%d%s\", time.Now().UTC().UnixNano(), compositeKeySep)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor itr.Next() {\n\t\tkey := string(itr.Key())\n\t\tdbBatch.Delete([]byte(key))\n\t\tdbBatch.Delete([]byte(key[strings.Index(key, compositeKeySep)+1:]))\n\t}\n\n\tif dbBatch.Len() > 0 {\n\t\terr := s.db.WriteBatch(dbBatch, true)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"failed to delete keys in db %s\", err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (hd *HelpDoc) Delete(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif hd._deleted {\n\t\treturn nil\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetHelpDocTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//1\n\n\t// sql query with composite primary key\n\tsqlstr := `UPDATE ` + tableName + ` SET is_del = 1 WHERE id = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, hd.ID)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, hd.ID)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, hd.ID)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\thd._deleted = true\n\n\treturn nil\n}", "func tcDelete(n *ir.CallExpr) ir.Node {\n\ttypecheckargs(n)\n\targs := n.Args\n\tif len(args) == 0 {\n\t\tbase.Errorf(\"missing arguments to delete\")\n\t\tn.SetType(nil)\n\t\treturn n\n\t}\n\n\tif len(args) == 1 {\n\t\tbase.Errorf(\"missing second (key) argument to delete\")\n\t\tn.SetType(nil)\n\t\treturn n\n\t}\n\n\tif len(args) != 2 {\n\t\tbase.Errorf(\"too many arguments to delete\")\n\t\tn.SetType(nil)\n\t\treturn n\n\t}\n\n\tl := args[0]\n\tr := args[1]\n\tif l.Type() != nil && !l.Type().IsMap() {\n\t\tbase.Errorf(\"first argument to delete must be map; have %L\", l.Type())\n\t\tn.SetType(nil)\n\t\treturn n\n\t}\n\n\targs[1] = AssignConv(r, l.Type().Key(), \"delete\")\n\treturn n\n}", "func (sm safeMap) Delete(key string) {\n\tsm <- commandData{action: REMOVE, key: key}\n}", "func (r *RedisSession) KeyDelete(args ...interface{}) (int, error) {\n\tprefixed := make([]interface{}, 0)\n\tfor _, arg := range args {\n\t\tprefixed = append(prefixed, r.AddPrefix(arg.(string)))\n\t}\n\n\treturn redis.Int(r.Do(\"DEL\", prefixed...))\n}", "func (r *dsState) DeleteMulti(keys []*ds.Key, cb ds.DeleteMultiCB) error {\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\treturn r.run(r.c, func() error {\n\t\treturn r.rds.DeleteMulti(keys, cb)\n\t})\n}", "func (rd *Deleter) DeleteRow(\n\tctx context.Context, b *kv.Batch, values []tree.Datum, pm PartialIndexUpdateHelper, traceKV bool,\n) error {\n\n\t// Delete the row from any secondary indices.\n\tfor i := range rd.Helper.Indexes {\n\t\t// If the index ID exists in the set of indexes to ignore, do not\n\t\t// attempt to delete from the index.\n\t\tif pm.IgnoreForDel.Contains(int(rd.Helper.Indexes[i].ID)) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// We want to include empty k/v pairs because we want to delete all k/v's for this row.\n\t\tentries, err := rowenc.EncodeSecondaryIndex(\n\t\t\trd.Helper.Codec,\n\t\t\trd.Helper.TableDesc,\n\t\t\t&rd.Helper.Indexes[i],\n\t\t\trd.FetchColIDtoRowIndex,\n\t\t\tvalues,\n\t\t\ttrue, /* includeEmpty */\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, e := range entries {\n\t\t\tif traceKV {\n\t\t\t\tlog.VEventf(ctx, 2, \"Del %s\", keys.PrettyPrint(rd.Helper.secIndexValDirs[i], e.Key))\n\t\t\t}\n\t\t\tb.Del(&e.Key)\n\t\t}\n\t}\n\n\tprimaryIndexKey, err := rd.Helper.encodePrimaryIndex(rd.FetchColIDtoRowIndex, values)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Delete the row.\n\tvar called bool\n\treturn rd.Helper.TableDesc.ForeachFamily(func(family *descpb.ColumnFamilyDescriptor) error {\n\t\tif called {\n\t\t\t// HACK: MakeFamilyKey appends to its argument, so on every loop iteration\n\t\t\t// after the first, trim primaryIndexKey so nothing gets overwritten.\n\t\t\t// TODO(dan): Instead of this, use something like engine.ChunkAllocator.\n\t\t\tprimaryIndexKey = primaryIndexKey[:len(primaryIndexKey):len(primaryIndexKey)]\n\t\t} else {\n\t\t\tcalled = true\n\t\t}\n\t\tfamilyID := family.ID\n\t\trd.key = keys.MakeFamilyKey(primaryIndexKey, uint32(familyID))\n\t\tif traceKV {\n\t\t\tlog.VEventf(ctx, 2, \"Del %s\", keys.PrettyPrint(rd.Helper.primIndexValDirs, rd.key))\n\t\t}\n\t\tb.Del(&rd.key)\n\t\trd.key = nil\n\t\treturn nil\n\t})\n}", "func deleteAllKeys() error {\n\tetcd, err := newEtcdClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer etcd.Cli.Close()\n\n\tetcd.Kv.Delete(etcd.Ctx, \"\", clientv3.WithPrefix())\n\treturn nil\n}", "func (fs *FactorySale) Delete(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif fs._deleted {\n\t\treturn nil\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetFactorySaleTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//1\n\n\t// sql query with composite primary key\n\tsqlstr := `UPDATE ` + tableName + ` SET is_del = 1 WHERE fsid = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, fs.Fsid)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, fs.Fsid)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, fs.Fsid)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\tfs._deleted = true\n\n\treturn nil\n}", "func TestHashMapDel(t *T) {\n\n\tkvs := []*KV{\n\t\tKeyVal(1, \"one\"),\n\t\tKeyVal(2, \"two\"),\n\t\tKeyVal(3, \"three\"),\n\t}\n\tkvs1 := []*KV{\n\t\tKeyVal(2, \"two\"),\n\t\tKeyVal(3, \"three\"),\n\t}\n\n\t// Degenerate case\n\tm := NewHashMap()\n\tm1, ok := m.Del(1)\n\tassert.Equal(t, 0, Size(m))\n\tassert.Equal(t, 0, Size(m1))\n\tassert.Equal(t, false, ok)\n\n\t// Delete actual key\n\tm = NewHashMap(kvs...)\n\tm1, ok = m.Del(1)\n\tassertSeqContentsHashMap(t, kvs, m)\n\tassertSeqContentsHashMap(t, kvs1, m1)\n\tassert.Equal(t, true, ok)\n\n\t// Delete it again!\n\tm2, ok := m1.Del(1)\n\tassertSeqContentsHashMap(t, kvs1, m1)\n\tassertSeqContentsHashMap(t, kvs1, m2)\n\tassert.Equal(t, false, ok)\n\n}", "func (p *MyPipeline) Del(keys ...interface{}) client.Result {\n\treturn p.Pipeline.Del(keys)\n}", "func Remove(ctx context.Context, db *sql.DB, key []byte) error {\n\tctx, cancel := context.WithDeadline(ctx, time.Now().Add(3*time.Second))\n\tdefer cancel()\n\tquery := \"DELETE FROM keys WHERE key=?\"\n\t_, err := db.ExecContext(ctx, query, string(key))\n\tif err != nil {\n\t\treturn errors.Errorf(\"could not delete key=%q: %w\", string(key), err).WithField(\"query\", query)\n\t}\n\n\treturn nil\n}", "func (t *Text) Delete(k ...string) {\n\tfor _, x := range k {\n\t\tif err := ValidateTextKey(x); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tdelete(t.m, x)\n\t}\n}", "func Delete(keys ...string) error {\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\treturn c.Del(keys...).Err()\n}", "func (md *ImpMySQLDB) Delete(_ context.Context, schema, table string, keys map[string]interface{}) error {\n\targs := make([]interface{}, 0, len(keys))\n\twhere := genWhere(keys, &args)\n\tstmt := fmt.Sprintf(\"DELETE FROM `%s`.`%s` WHERE %s;\", schema, table, where)\n\t_, err := md.db.Exec(stmt, args...)\n\n\tif md.verbose {\n\t\tstmt = md.genPlainSQL(stmt, args)\n\t\tfmt.Println(stmt)\n\t}\n\n\treturn errors.Trace(err)\n}", "func TestDelete(t *testing.T) {\n\tdb, err := Open(db_filename, \"c\")\n\tdefer db.Close()\n\tdefer os.Remove(db_filename)\n\n\tdb.Insert(\"foo\", \"bar\")\n\terr = db.Delete(\"foo\")\n\texists := db.Exists(\"foo\")\n\tif err != nil || exists {\n\t\tt.Error(\"Delete()ed key not removed\")\n\t}\n}", "func (md Metadata) Del(key string) {\n\t// fast path\n\tif _, ok := md[key]; ok {\n\t\tdelete(md, key)\n\t} else {\n\t\t// slow path\n\t\tdelete(md, textproto.CanonicalMIMEHeaderKey(key))\n\t}\n}", "func (db *DB) Delete(wo *WriteOptions, key []byte) error {\n\tif db.closed {\n\t\tpanic(ErrDBClosed)\n\t}\n\n\tvar errStr *C.char\n\tvar k *C.char\n\tif len(key) != 0 {\n\t\tk = (*C.char)(unsafe.Pointer(&key[0]))\n\t}\n\n\tC.leveldb_delete(\n\t\tdb.Ldb, wo.Opt, k, C.size_t(len(key)), &errStr)\n\n\tif errStr != nil {\n\t\tgs := C.GoString(errStr)\n\t\tC.leveldb_free(unsafe.Pointer(errStr))\n\t\treturn DatabaseError(gs)\n\t}\n\treturn nil\n}", "func (m *ValuesResultArrayHash) Delete(k field.Values) {\n\thash := m.hash(k)\n\tfor entry, ok := m.lookup[hash]; ok; entry, ok = m.lookup[hash] {\n\t\tif m.equals(entry.key.key, k) {\n\t\t\tm.removevaluesResultArrayHashKey(hash, entry.key)\n\t\t\treturn\n\t\t}\n\t\t// Linear probe to \"next\" to this entry (really a rehash)\n\t\thash++\n\t}\n}", "func (ls *KvPairs) Del(key string) {\n\n\tkvp_mu.Lock()\n\tdefer kvp_mu.Unlock()\n\n\tfor i, prev := range *ls {\n\n\t\tif prev.Key == key {\n\t\t\t*ls = append((*ls)[:i], (*ls)[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n}" ]
[ "0.6760398", "0.66280895", "0.65578467", "0.65088505", "0.6401223", "0.63470906", "0.63015383", "0.6252232", "0.6213148", "0.6193571", "0.6180504", "0.61795783", "0.6149718", "0.61181146", "0.61107606", "0.6109994", "0.6107761", "0.6069126", "0.60688424", "0.60659015", "0.60581213", "0.6028888", "0.6016618", "0.6004492", "0.6003155", "0.5999186", "0.5993487", "0.5993342", "0.5991323", "0.59899384", "0.59744316", "0.59607744", "0.5934302", "0.5908262", "0.58934027", "0.5883594", "0.58826685", "0.58696705", "0.5862265", "0.5855342", "0.58542794", "0.5848248", "0.5845864", "0.58446234", "0.58238596", "0.5812947", "0.5812737", "0.5807206", "0.5807056", "0.57839465", "0.5781932", "0.57811755", "0.5758008", "0.5748887", "0.57406867", "0.5733948", "0.57315654", "0.5728245", "0.5727046", "0.5716953", "0.57150507", "0.5708997", "0.57081705", "0.5699908", "0.5686998", "0.56821674", "0.5668977", "0.56676644", "0.56644934", "0.5660752", "0.5656925", "0.5649827", "0.5648816", "0.564793", "0.5647619", "0.5647512", "0.56433046", "0.5635078", "0.56307316", "0.5630222", "0.56207734", "0.56161726", "0.5608942", "0.5596951", "0.55836606", "0.55831695", "0.55494815", "0.55474454", "0.554737", "0.5546094", "0.5542355", "0.55417484", "0.5537693", "0.55368644", "0.55358535", "0.5534589", "0.5533105", "0.5527692", "0.55275613", "0.55274326" ]
0.56412256
77
NewStore returns a new micro Store backed by sql
func NewStore(opts ...store.Option) store.Store { options := store.Options{ Database: defaultDatabase, Table: defaultTable, } for _, o := range opts { o(&options) } // new store s := new(sqlStore) s.options = options s.databases = make(map[string]DB) s.re = regexp.MustCompile("[^a-zA-Z0-9]+") go s.expiryLoop() // return store return s }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewStore()(*Store) {\n m := &Store{\n Entity: *iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.NewEntity(),\n }\n return m\n}", "func newStore(ts service.Service, config *Config) (*Store, error) {\n\tif config.Datastore == nil {\n\t\tdatastore, err := newDefaultDatastore(config.RepoPath, config.LowMem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.Datastore = datastore\n\t}\n\tif config.EventCodec == nil {\n\t\tconfig.EventCodec = newDefaultEventCodec(config.JsonMode)\n\t}\n\tif !managedDatastore(config.Datastore) {\n\t\tif config.Debug {\n\t\t\tif err := util.SetLogLevels(map[string]logging.LogLevel{\"store\": logging.LevelDebug}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := &Store{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdatastore: config.Datastore,\n\t\tdispatcher: newDispatcher(config.Datastore),\n\t\teventcodec: config.EventCodec,\n\t\tmodelNames: make(map[string]*Model),\n\t\tjsonMode: config.JsonMode,\n\t\tlocalEventsBus: &localEventsBus{bus: broadcast.NewBroadcaster(0)},\n\t\tstateChangedNotifee: &stateChangedNotifee{},\n\t\tservice: ts,\n\t}\n\n\tif s.jsonMode {\n\t\tif err := s.reregisterSchemas(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\ts.dispatcher.Register(s)\n\treturn s, nil\n}", "func NewStore(db *sql.DB) *Store {\n\n\treturn &Store{\n\t\tdb: db,\n\t\tQueries: New(db), //defined in db.go by sqlc\n\t}\n}", "func NewStore() (StoreType, error) {\n\tconnection, err := config.DBConnectionString()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tdb, err := sqlx.Connect(driver, connection)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\ts := &Store{\n\t\tdb: db,\n\t}\n\n\treturn s, nil\n}", "func NewStore(l log.Logger, dsn string) (hydrocarbon.PrimitiveStore, error) {\n\tdb, err := sqlx.Connect(\"postgres\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.Mapper = reflectx.NewMapperFunc(\"json\", strings.ToLower)\n\n\terr = Migrate(l, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Store{\n\t\tdb: db,\n\t}, nil\n}", "func newStore(c *Config, httpAddr, raftAddr string) *store {\n\tinternalData := meta.Data{\n\t\tIndex: 1,\n\t}\n\ts := store{\n\t\tdata: &Data{\n\t\t\tData: internalData,\n\t\t},\n\t\tclosing: make(chan struct{}),\n\t\tdataChanged: make(chan struct{}),\n\t\tpath: c.Dir,\n\t\tconfig: c,\n\t\thttpAddr: httpAddr,\n\t\traftAddr: raftAddr,\n\t\tlogger: zap.New(zap.NullEncoder()),\n\t}\n\n\treturn &s\n}", "func newStore() storage.Store {\n\tdata := `{\n \"management_chain\": {\n \"bob\": [\n \"ken\",\n \"janet\"\n ],\n \"alice\": [\n \"janet\"\n ]\n }\n}`\n\n\tvar json map[string]interface{}\n\n\terr := util.UnmarshalJSON([]byte(data), &json)\n\tif err != nil {\n\t\t// Handle error.\n\t}\n\n\t// Manually create the storage layer. inmem.NewFromObject returns an\n\t// in-memory store containing the supplied data.\n\tstore := inmem.NewFromObject(json)\n\treturn store\n}", "func NewStore(dbName string) *Store {\n\tdb, err := bolt.Open(dbName, 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &Store{db: db}\n}", "func New(db *pgxpool.Pool, pools *pools.Pools) (*Store, error) {\n\t// Confirm the database has the right schema.\n\texpectedSchema, err := expectedschema.Load()\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\n\tactual, err := schema.GetDescription(db, Tables{})\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\tif diff := assertdeep.Diff(expectedSchema, *actual); diff != \"\" {\n\t\treturn nil, skerr.Fmt(\"Schema needs to be updated: %s.\", diff)\n\t}\n\n\treturn &Store{\n\t\tdb: db,\n\t\tpools: pools,\n\t}, nil\n}", "func NewStore(db *sql.DB) *Store {\n\treturn &Store{\n\t\tdb: db,\n\t\tQueries: New(db), // New creates and returns a queries object\n\t}\n}", "func New(options Options) *Store {\n\tif options.Codec == nil {\n\t\toptions.Codec = DefaultOptions.Codec\n\t}\n\n\tif options.Interval == 0 {\n\t\toptions.Interval = DefaultOptions.Interval\n\t}\n\n\tif options.TableName == \"\" {\n\t\toptions.TableName = DefaultOptions.TableName\n\t}\n\n\tsql := newSqlSvr(options.User, options.Pwd, options.Host, options.Db, options.TableName, options.Split)\n\tif sql == nil {\n\t\treturn nil\n\t}\n\n\ts := &Store{\n\t\tSql: sql,\n\t\tCodec: options.Codec,\n\t}\n\n\t//go s.autoGC(options.Interval)\n\n\treturn s\n}", "func newDBStore(db *leveldbhelper.DBHandle, dbName string) *store {\n\treturn &store{db, dbName}\n}", "func NewStore(db *sql.DB) Store {\n\treturn &SQLStore{\n\t\tdb:\t\tdb, \n\t\tQueries: New(db),\n\t}\n}", "func NewStore(name string) Store {\n\tnewFunc, ok := stores[name]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn newFunc()\n}", "func New(opts ...StoreOption) (*Store, error) {\n\tso := storeOptions{}\n\n\tfor _, opt := range opts {\n\t\topt.apply(&so)\n\t}\n\n\tdbOptions := url.Values{}\n\tdbOptions.Set(\"_foreign_keys\", \"true\")\n\n\tdbURL := url.URL{\n\t\tScheme: \"file\",\n\t\tPath: so.path,\n\t\tRawQuery: dbOptions.Encode(),\n\t}\n\n\tdb, err := sqlx.Connect(\"sqlite3\", dbURL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.Mapper = reflectx.NewMapperFunc(\"json\", strings.ToLower)\n\n\treturn &Store{\n\t\tdb: db,\n\t}, nil\n}", "func New(provider storage.Provider) (*Store, error) {\n\tstore, err := provider.OpenStore(nameSpace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open unpublished operation store: %w\", err)\n\t}\n\n\treturn &Store{\n\t\tstore: store,\n\t}, nil\n}", "func NewStore(c *Config) *Store {\n\t// create a new store\n\tstore := Store{}\n\tstore.config = c\n\tstore.DBname = c.DBname\n\t// check if the file exists\n\tvar build bool\n\t_, err := os.Stat(c.DBname)\n\tif err != nil {\n\t\tlogger.Critical(\"error on stat , %s\", err)\n\t\tbuild = true\n\t}\n\t// if it is a new file build some tables\n\tif build {\n\t\tstore.Build(c)\n\t}\n\tstore.leases = Load(c.DBname)\n\treturn &store\n}", "func New() *Store {\n\treturn &Store{}\n}", "func New() *Store {\n\treturn &Store{}\n}", "func New(c *Config) *sqlstore {\n\tdsn := c.DSN()\n\tdbx, err := sqlx.Connect(c.Driver, dsn)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error connecting to database: %s\", err)\n\t}\n\tdb := &sqlstore{dbx, c.Driver, c}\n\n\tif c.Host == \"\" || c.Driver == SQLITE {\n\t\tlog.Printf(\"Connected to %s database: %s\", c.Driver, c.Dbname())\n\t} else {\n\t\tlog.Printf(\"Connected to %s database: %s on %s\", c.Driver, c.Dbname(), c.Host)\n\t}\n\n\t// apply database migrations (if any)\n\tdb.Migrate()\n\n\treturn db\n}", "func New() (Store, error) {\n dbUsername := os.Getenv(\"DB_USERNAME\")\n dbPassword := os.Getenv(\"DB_PASSWORD\")\n dbHost := os.Getenv(\"DB_HOST\")\n dbTable := os.Getenv(\"DB_TABLE\")\n dbPort := os.Getenv(\"DB_PORT\")\n dbSSLMode := os.Getenv(\"DB_SSL_MODE\")\n\n connectionString := fmt.Sprintf(\n \"host=%s port=%s user=%s dbname=%s password=%s sslmode=%s\",\n dbHost, dbPort, dbUsername, dbTable, dbPassword, dbSSLMode,\n )\n\n db, err := sqlx.Connect(\"postgres\", connectionString)\n if err != nil {\n return Store{}, err\n }\n\n return Store{\n db: db,\n }, nil\n}", "func NewStore() *Store {\n\tvar st Store\n\tst.Records = make(map[string]HostSet)\n\tst.Netviews = make(map[string]string)\n\tst.Cidrs = make(map[string]string)\n\treturn &st\n}", "func New(uri string, name string) (*Store, error) {\r\n\tclientOptions := options.Client().ApplyURI(uri)\r\n\r\n\tclient, err := mongo.Connect(context.TODO(), clientOptions)\r\n\tif err != nil {\r\n\t\tlog.Println(\"Error connecting to the database\")\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tdb := client.Database(name)\r\n\treturn &Store{db, client}, nil\r\n}", "func NewStore(db *sql.DB) (*Store, error) {\n\tunprepared := map[string]string{\n\t\tQueryCreateItem: `\n\t\t\tINSERT INTO todo.items (title, description)\n\t\t\tVALUES(?, ?);\n\t\t`,\n\t\tQueryDeleteItem: `\n\t\t\tDELETE FROM todo.items\n\t\t\tWHERE id = ?;\n\t\t`,\n\t\tQueryFindItemByID: `\n\t\t\tSELECT i.id, i.title, i.description, i.completed, i.created_at, i.updated_at\n\t\t\tFROM todo.items i\n\t\t\tWHERE id = ?;\n\t\t`,\n\t\tQueryUpdateItemByID: `\n\t\t\tUPDATE todo.items i\n\t\t\tSET \n\t\t\t\ti.title = ?,\n\t\t\t\ti.description = ?,\n\t\t\t\ti.completed = ?\n\t\t\tWHERE i.id = ?;\n\t\t`,\n\t}\n\n\t// prepare all statements to verify syntax\n\tstmts, err := prepareStmts(db, unprepared)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := Store{\n\t\tdb: db,\n\t\tstmts: stmts,\n\t}\n\n\treturn &s, nil\n}", "func New(ctx context.Context, db *sql.DB, m map[string]string) (*Store, error) {\n\tstore := &Store{db: db}\n\terr := store.InitTable(ctx, m)\n\treturn store, err\n}", "func NewStore(db *database.DB, collection string) Store {\n\treturn &store{db, collection}\n}", "func NewStore(p string) (Store, error) {\n\tp = path.Join(p, SQLiteDBName)\n\tdb, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"file:%s?mode=ro\", p))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstmt, err := db.Prepare(\"select value from entries where key = ?\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcache, err := lru.New(DirCacheSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taclCache, err := lru.New(AccessCacheSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sqlStore{\n\t\tdb: db,\n\t\tstmt: stmt,\n\t\tcache: cache,\n\t\tacl: aclCache,\n\n\t\tusers: make(map[string]int),\n\t\tgroups: make(map[string]int),\n\t}, nil\n}", "func New(storeConfig config.Store) (*Store, error) {\n\tdb, err := bolt.Open(storeConfig.DBPath, 0644, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not open bolt DB database\")\n\t}\n\tbucketName := []byte(\"shorted\")\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists(bucketName)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Store{\n\t\tdb: db,\n\t\tidLength: storeConfig.ShortedIDLength,\n\t\tbucketName: bucketName,\n\t}, nil\n}", "func NewStore(database string) (ClientStore, error) {\n\tdb, err := newClientDatabase(database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, err\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\topaStore: inmem.New(),\n\t}\n}", "func New(provider storage.Provider) (*Store, error) {\n\tstore, err := provider.OpenStore(nameSpace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open did anchor store: %w\", err)\n\t}\n\n\treturn &Store{\n\t\tstore: store,\n\t}, nil\n}", "func New(c *sqlstore.Config) Datastore {\n\treturn sqlstore.New(c)\n}", "func NewStore(ctx context.Context, l log.Logger, db *sqlx.DB, beaconName string) (*Store, error) {\n\tp := Store{\n\t\tlog: l,\n\t\tdb: db,\n\n\t\trequiresPrevious: chain.PreviousRequiredFromContext(ctx),\n\t}\n\n\tid, err := p.AddBeaconID(ctx, beaconName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.beaconID = id\n\n\treturn &p, nil\n}", "func New(storeType string) (*Store, error) {\n\tswitch storeType {\n\tcase \"memory\":\n\t\treturn &Store{storeType: storeType, engine: makeMemoryStore()}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported store type: %s\", storeType)\n\t}\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tstore: make(map[workloadmeta.Kind]map[string]workloadmeta.Entity),\n\t}\n}", "func NewStore(schema Schema, options ...CeousOption) *BaseStore {\n\tstore := &BaseStore{\n\t\tschema: schema,\n\t}\n\tfor _, option := range options {\n\t\toption(store)\n\t}\n\tif !store.disableCache {\n\t\tstore.runner = store._runner\n\t} else {\n\t\tstore.runner = sq.NewStmtCacher(store._runner)\n\t}\n\treturn store\n}", "func New() *Store {\n\treturn &Store{\n\t\tingredient.NewStore(),\n\t\trecipe.NewStore(),\n\t}\n}", "func New(tableName string, accessor Accessor) (*Store, error) {\n\tstore := &Store{\n\t\ttableName: tableName,\n\t\taccessor: accessor,\n\t}\n\n\treturn store, nil\n}", "func NewStore() (*gorm.DB, sqlmock.Sqlmock, error) {\n\tsqldb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdb, err := gorm.Open(postgres.New(postgres.Config{\n\t\tConn: sqldb,\n\t}), &gorm.Config{})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn db, mock, nil\n}", "func NewStore() *Store {\n\tdata := `{\n\t\t\"contacts\": [{\n\t\t\t\"name\": \"郭仲杰\",\n\t\t\t\"department\": \"公司其他组织/TME商业广告部\",\n\t\t\t\"title\": \"员工\",\n\t\t\t\"phoneNumber\": \"0755-86013388-75789\",\n\t\t\t\"email\": \"authurguo@tencent.com\"\n\t\t}]\n\t}`\n\ts := &Store{}\n\terr := json.Unmarshal([]byte(data), s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}", "func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {\n\ts := &store{\n\t\tb: b,\n\t\tig: ig,\n\t\tkvindex: newTreeIndex(),\n\n\t\tle: le,\n\n\t\tcurrentRev: revision{main: 1},\n\t\tcompactMainRev: -1,\n\n\t\tbytesBuf8: make([]byte, 8, 8),\n\t\tfifoSched: schedule.NewFIFOScheduler(),\n\n\t\tstopc: make(chan struct{}),\n\t}\n\n\tif s.le != nil {\n\t\ts.le.SetRangeDeleter(s)\n\t}\n\n\ttx := s.b.BatchTx()\n\ttx.Lock()\n\ttx.UnsafeCreateBucket(keyBucketName)\n\ttx.UnsafeCreateBucket(metaBucketName)\n\ttx.Unlock()\n\ts.b.ForceCommit()\n\n\tif err := s.restore(); err != nil {\n\t\t// TODO: return the error instead of panic here?\n\t\tpanic(\"failed to recover store from backend\")\n\t}\n\n\treturn s\n}", "func newDBStore(db *couchdb.CouchDatabase, dbName string) *dbstore {\n\treturn &dbstore{dbName, db}\n}", "func NewStore(init StoreInit) (s *Store, err error) {\r\n\tvar initialCapacity int\r\n\tif utils.IsSet(init.InitCapacity) {\r\n\t\tinitialCapacity = init.InitCapacity\r\n\t} else {\r\n\t\tinitialCapacity = len(init.Args)\r\n\t}\r\n\r\n\tlocalStore := Store{\r\n\t\tstore: make(map[string]string, initialCapacity),\r\n\t\tparent: init.Parent,\r\n\t}\r\n\r\n\tfor _, arg := range init.Args {\r\n\t\tkey, value, err := splitArgument(arg)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\r\n\t\tif value, err = encode.ConvertStringToUtf8(value); err != nil {\r\n\t\t\treturn nil, fmt.Errorf(\"Error converting value for key '%v' to UTF-8: %v\", key, err)\r\n\t\t}\r\n\r\n\t\tif !localStore.hasKey(key) {\r\n\t\t\tlocalStore.Set(key, value)\r\n\t\t} else {\r\n\t\t\treturn nil, fmt.Errorf(\"Duplicate key '%v' found\", key)\r\n\t\t}\r\n\t}\r\n\r\n\treturn &localStore, nil\r\n}", "func New(ctx context.Context, log logger.Logger, db *db.Store, cache *cache.Cache) (*Store, error) {\n\ts := &Store{\n\t\tlog: log,\n\t\tcache: cache,\n\t}\n\n\t// Set configuration\n\ts.setConfig()\n\n\tvar err error\n\n\tswitch s.typeStore {\n\tcase \"postgres\":\n\t\tfallthrough\n\tdefault:\n\t\ts.store, err = postgres.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Info(\"init queryStore\", field.Fields{\n\t\t\"db\": s.typeStore,\n\t})\n\n\treturn s, nil\n}", "func newStore(c *Config) (*Store, error) {\n\tif c == nil {\n\t\tc = defaultConfig()\n\t}\n\tmutex := &sync.RWMutex{}\n\tstore := new(Store)\n\tstartTime := time.Now().UTC()\n\tfileWatcher, err := newWatcher(\".\")\n\tif err != nil {\n\t\tlog.Info(fmt.Sprintf(\"unable to init file watcher: %v\", err))\n\t}\n\tif c.Monitoring {\n\t\tmonitoring.Init()\n\t}\n\tstore.fileWatcher = fileWatcher\n\tstore.store = makeStorage(\"\")\n\tstore.keys = []string{}\n\tstore.compression = c.Compression\n\tstore.dbs = make(map[string]*DB)\n\tstore.lock = mutex\n\tstore.stat = new(stats.Statistics)\n\tstore.stat.Start = startTime\n\tstore.indexes = make(map[string]*index)\n\tc.setMissedValues()\n\tstore.config = c\n\tif c.LoadPath != \"\" {\n\t\terrLoad := loadData(store, c.LoadPath)\n\t\tif errLoad != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to load data: %v\", errLoad)\n\t\t}\n\t}\n\tstore.writer, err = newWriter(c.LoadPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create writer: %v\", err)\n\t}\n\treturn store, nil\n}", "func NewStore(storeType string) Store {\n\tns := Store{}\n\tns.Name = \"Shop with no Sign\"\n\towner := character.NewCharacter(\"\", \"\", \"\")\n\tns.Owner = owner.Name\n\tns.Location = \"Heldheim\"\n\tns.StoreType = storeType\n\t//ns.Inventory = generateInventoryForStore()\n\tcp := 0\n\tss := 0\n\tgc := 0\n\tns.Money = NewMoney(cp, ss, gc)\n\n\treturn ns\n}", "func NewStore(d *db.DB) *Store {\n\treturn &Store{\n\t\tdb: d,\n\t}\n}", "func New(ctx context.Context, log logger.Logger, db *db.Store, cache *cache.Cache) (*Store, error) { // nolint:gocognit\n\ts := &Store{\n\t\tlog: log,\n\t\tcache: cache,\n\t}\n\n\t// Set configuration\n\ts.setConfig()\n\n\tvar err error\n\n\tswitch s.typeStore {\n\tcase \"postgres\":\n\t\ts.store, err = postgres.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"mongo\":\n\t\ts.store, err = mongo.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"redis\":\n\t\ts.store, err = redis.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"dgraph\":\n\t\ts.store, err = dgraph.New(ctx, db, log)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"leveldb\":\n\t\ts.store, err = leveldb.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"badger\":\n\t\ts.store, err = badger.New(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"ram\":\n\t\tfallthrough\n\tdefault:\n\t\ts.store, err = ram.New(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Info(\"init linkStore\", field.Fields{\n\t\t\"db\": s.typeStore,\n\t})\n\n\treturn s, nil\n}", "func NewStore() (*Store, error) {\n\tpgDB, err := db.Dial()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"store.Dial\")\n\t}\n\n\tlog.Println(\"Running PostgreSQL migrations...\")\n\tif err := db.RunPgMigrations(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"store.runPgMigrations\")\n\t}\n\n\tstore := &Store{\n\t\tDB: pgDB,\n\t\tArticles: repositories.NewArticlesRepository(pgDB),\n\t\tFeeds: repositories.NewFeedsRepository(pgDB),\n\t\tCategories: repositories.NewCategoryRepository(pgDB),\n\t\tSubcategories: repositories.NewSubcategoryRepository(pgDB),\n\t}\n\n\treturn store, nil\n}", "func New(cfg Config) (*SQLStore, error) {\n\n\tdb, err := sqlx.Open(string(cfg.Driver), cfg.DataSource)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to open db connection\")\n\t}\n\n\tbuilder := sq.StatementBuilder.PlaceholderFormat(sq.Question)\n\n\tswitch cfg.Driver {\n\tcase DBDriverMySQL: // mysql\n\t\tdb.MapperFunc(func(s string) string { return s })\n\n\t\t//case DBDriverPostgres: // postgres\n\t\t//\tbuilder = builder.PlaceholderFormat(sq.Dollar)\n\n\t}\n\n\treturn &SQLStore{\n\t\tcfg: cfg,\n\t\tdb: db,\n\t\tbuilder: builder,\n\t}, nil\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tRaftDir: \"\",\n\t\tRaftBindAddr: \"\",\n\t\tdata: make(map[string]string),\n\t}\n}", "func (r *StoreRepository) CreateStore(store *model.Store) error {\n\t_id := bson.NewObjectId()\n\tstore.Id = _id\n\terr := r.C.Insert(&store)\n\n\treturn err\n\n}", "func newLocalStore(address net.Address) (Store, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\tsession, err := primitive.NewSession(ctx, primitive.Partition{ID: 1, Address: address})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tupdatesName := primitive.Name{\n\t\tNamespace: \"local\",\n\t\tName: primitiveName,\n\t}\n\tupdates, err := _map.New(context.Background(), updatesName, []*primitive.Session{session})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &atomixStore{\n\t\tupdates: updates,\n\t}, nil\n}", "func NewStore(filename string) (*Store, error) {\n\tf, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, newStoreDefaultPerms)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewStoreWithBackend(f)\n}", "func NewStore(yogo *sql.DB) Store {\n\treturn &SQLStore{\n\t\tyogo: yogo,\n\t\tQueries: New(yogo),\n\t}\n}", "func NewStore(host, notice, dbPath string) *Store {\n\ts := &Store{\n\t\thost: host,\n\t\tdbPath: dbPath,\n\t\tuser: newUserDB(dbPath),\n\t\tmessage: newMessageDB(dbPath),\n\t\tgroup: newGroupDB(dbPath),\n\t}\n\n\ts.friend = newFriendDB(s.user)\n\ts.postman = newPostman(notice, s.user, s.friend, s.group)\n\n\treturn s\n}", "func NewStore() (s Store) {\n\ts = make(Store, 0)\n\treturn s\n}", "func NewStore() *Store {\n\treturn &Store{commands: make(map[string]*Config, 0)}\n}", "func NewStore() *Store {\n\treturn &Store{}\n}", "func NewStore() *Store {\n\treturn &Store{}\n}", "func NewStore() *Store {\n\treturn &Store{}\n}", "func NewStore(client dbClient, validSec int) *Store {\n\treturn &Store{\n\t\tclient: client,\n\t\tvalid: time.Duration(validSec) * time.Second,\n\t}\n}", "func createStore(storeType, storeDir string) (core.Store, error) {\n\tswitch storeType {\n\tcase \"InMemory\":\n\t\tstore := new(core.InMemoryStore)\n\t\treturn store, core.InitializeStore(store, nil)\n\tcase \"BadgerDB\":\n\t\topts := badger.DefaultOptions\n\t\topts.Dir = storeDir\n\t\topts.ValueDir = storeDir\n\t\tstore := new(core.BadgerStore)\n\t\treturn store, core.InitializeStore(store, opts)\n\tcase \"BoltDB\":\n\t\tos.Mkdir(storeDir, os.ModePerm)\n\t\tstorePath := filepath.Join(storeDir, \"boltdbstore\")\n\t\topts := &core.BoltStoreConfig{Path: storePath, Mode: 600, Options: nil}\n\t\tstore := new(core.BoltStore)\n\t\treturn store, core.InitializeStore(store, opts)\n\t}\n\tstore := new(core.InMemoryStore)\n\treturn store, core.InitializeStore(store, nil)\n}", "func New(db *db.DB) core.StageStore {\n\treturn &stageStore{db}\n}", "func NewStore(c *cli.Context) Store {\n\treturn &datastore{\n\t\tDB: open(c.String(\"database-config\")),\n\t}\n}", "func New(dburl string) *Store {\n\treturn &Store{\n\t\tDatabaseURL: dburl,\n\t}\n}", "func New(opt StoreOptions) *Store {\n\tstore := &Store{}\n\n\tfmt.Println(opt.toString())\n\tsession, err := mgo.Dial(opt.toString())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstore.cli = session\n\tstore.database = opt.Database\n\treturn store\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tls: make(map[string]InitFunc),\n\t}\n}", "func NewStore(db *cockroach.DB, logger *zap.Logger) *Store {\n\treturn &Store{\n\t\tdb: db,\n\t\tlogger: logger,\n\t\tclock: DefaultClock,\n\t}\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tES: MustOpenConnection(),\n\t}\n}", "func NewStore(db database.Client) Store {\n\treturn &store{\n\t\tdb: db,\n\t}\n}", "func (s *service) NewStore(ctx context.Context, req *pb.NewStoreRequest) (*pb.NewStoreReply, error) {\n\tlog.Debugf(\"received new store request\")\n\n\tid, _, err := s.manager.NewStore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb.NewStoreReply{\n\t\tID: id.String(),\n\t}, nil\n}", "func NewStore() Store {\n\treturn Store{cmap.New()}\n}", "func New(connectionString string, log *log.Logger) (*Store, error) {\n\tdb, err := gorm.Open(postgres.Open(connectionString), &gorm.Config{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := db.AutoMigrate(&gh.Repository{}, &gh.Commit{}); err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"db init successful\")\n\treturn &Store{\n\t\tdb: db,\n\t\tlog: log,\n\t}, nil\n}", "func New(ds datastore.Datastore) *Store {\n\treturn &Store{\n\t\tds: ds,\n\t}\n}", "func NewStore(robot *Robot) (Store, error) {\n\tname := Config.StoreName\n\tif _, ok := Stores[name]; !ok {\n\t\treturn nil, fmt.Errorf(\"%s is not a registered store\", Config.StoreName)\n\t}\n\n\tstore, err := Stores[name].newFunc(robot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn store, nil\n}", "func New(db *db.DB) core.StepStore {\n\treturn &stepStore{db}\n}", "func New(client *ifirestore.Client, crsID string) *StoreImpl {\n\treturn &StoreImpl{\n\t\tclient: client,\n\t\tcrsID: crsID,\n\t}\n}", "func (e Engine) NewStore(config dvid.StoreConfig) (dvid.Store, bool, error) {\n\treturn e.newLogs(config)\n}", "func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {\n\tif lg == nil {\n\t\tlg = zap.NewNop()\n\t}\n\tif cfg.CompactionBatchLimit == 0 {\n\t\tcfg.CompactionBatchLimit = defaultCompactBatchLimit\n\t}\n\ts := &store{\n\t\tcfg: cfg,\n\t\tb: b,\n\t\tkvindex: newTreeIndex(lg),\n\n\t\tle: le,\n\n\t\tcurrentRev: 1,\n\t\tcompactMainRev: -1,\n\n\t\tfifoSched: schedule.NewFIFOScheduler(),\n\n\t\tstopc: make(chan struct{}),\n\n\t\tlg: lg,\n\t}\n\ts.hashes = newHashStorage(lg, s)\n\ts.ReadView = &readView{s}\n\ts.WriteView = &writeView{s}\n\tif s.le != nil {\n\t\ts.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })\n\t}\n\n\ttx := s.b.BatchTx()\n\ttx.LockOutsideApply()\n\ttx.UnsafeCreateBucket(buckets.Key)\n\ttx.UnsafeCreateBucket(buckets.Meta)\n\ttx.Unlock()\n\ts.b.ForceCommit()\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif err := s.restore(); err != nil {\n\t\t// TODO: return the error instead of panic here?\n\t\tpanic(\"failed to recover store from backend\")\n\t}\n\n\treturn s\n}", "func New(ctx context.Context, alias, path string) (*Store, error) {\n\tdebug.Log(\"Instantiating %q at %q\", alias, path)\n\n\ts := &Store{\n\t\talias: alias,\n\t\tpath: path,\n\t}\n\n\t// init storage and rcs backend\n\tif err := s.initStorageBackend(ctx); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to init storage backend: %w\", err)\n\t}\n\n\tdebug.Log(\"Storage for %s => %s initialized as %v\", alias, path, s.storage)\n\n\t// init crypto backend\n\tif err := s.initCryptoBackend(ctx); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to init crypto backend: %w\", err)\n\t}\n\n\tdebug.Log(\"Crypto for %s => %s initialized as %v\", alias, path, s.crypto)\n\n\treturn s, nil\n}", "func New() *Store {\n\treturn &Store{\n\t\tmu: sync.Mutex{},\n\t\tsess: make(map[string]*entities.Session),\n\t}\n}", "func New(ctx context.Context, cfg *config.Config, logger *logrus.Logger) (*Store, error) {\n\tvar store Store\n\n\tstore.config = cfg\n\tstore.logger = logger\n\n\t// connect to postgres\n\tpgConn, err := postgres.NewConnect(cfg.Postgres)\n\tif err != nil {\n\t\treturn &store, errpath.Err(err)\n\t}\n\tstore.Pg = pgConn\n\n\tif pgConn != nil {\n\t\tgo store.keepAlivePg()\n\t}\n\n\treturn &store, nil\n}", "func newStore() (keypair.Keystore, error) {\n\tlog := logrus.StandardLogger().WithField(\"type\", \"keypair/dynamodb\")\n\n\tawsConfig, err := external.LoadDefaultAWSConfig()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load default aws config\")\n\t}\n\n\tif awsConfig.Region == \"\" {\n\t\tregion, err := ec2metadata.New(awsConfig).Region()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get region from ec2 metadata\")\n\t\t}\n\t\tawsConfig.Region = region\n\t}\n\n\tdynamoDBClient := dynamodb.New(awsConfig)\n\ttableName := os.Getenv(tableEnvVarKey)\n\tif len(tableName) == 0 {\n\t\tlog.Infof(\"table name not configured, using default (%s)\", defaultTableName)\n\t\ttableName = defaultTableName\n\t}\n\n\treturn &store{\n\t\tlog: log,\n\t\tclient: dynamoDBClient,\n\t\ttableName: aws.String(tableName),\n\t}, nil\n}", "func New(config *Config) Store {\n\treturn newStore(config)\n}", "func NewStore() *Store {\n\n\t// Create store wrapper.\n\ts := &Store{\n\t\tStoreService: influxdb.NewStoreService(),\n\t}\n\treturn s\n}", "func New(dir string) *Store {\n\treturn NewWithDB(\"\", badger.New(dir))\n}", "func NewStore(pageRadius uint) Store {\n\tfield := cmodels.NewField(pageRadius)\n\tvar components cmodels.Components\n\treturn Store{\n\t\tfield: &field,\n\t\tcomponents: &components,\n\t\tplayers: make(map[uuid.UUID]*models.PlayerInfo, 0),\n\t\tsubscribtions: make([]func(), 0),\n\t\teventQueue: make(chan *models.Event),\n\t\tisLocked: false,\n\t}\n}", "func New(client *ifirestore.Client) *Store {\n\treturn &Store{\n\t\tclient: client,\n\t}\n}", "func NewStore(cfgFilepath string) (*Store, error) {\n\tcfg, err := getConfig(cfgFilepath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb, err := CreateDB(*cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Store{db}, nil\n}", "func New(_ string) (s *Store, err error) {\n\treturn &Store{xz.NewMap()}, nil\n}", "func New(c Config) (*Store, error) {\n\tstore := &Store{config: c}\n\tdb, err := sqlx.Connect(\"postgres\", fmt.Sprintf(\"user=%s password=%s dbname=%s host=%s port=%d sslmode=disable\", c.User, c.Password, c.Database, c.Host, c.Port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb.DB.SetMaxOpenConns(c.MaxConnections)\n\tstore.DB = db\n\treturn store, nil\n}", "func New(ctx context.Context, alias, path string, cfgdir string) (*Store, error) {\n\tpath = fsutil.CleanPath(path)\n\ts := &Store{\n\t\talias: alias,\n\t\tpath: path,\n\t\tsync: gitmock.New(),\n\t}\n\n\t// init store backend\n\tswitch backend.GetStoreBackend(ctx) {\n\tcase backend.FS:\n\t\ts.store = fs.New(path)\n\t\tout.Debug(ctx, \"Using Store Backend: fs\")\n\tcase backend.KVMock:\n\t\ts.store = kvmock.New()\n\t\tout.Debug(ctx, \"Using Store Backend: kvmock\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unknown store backend\")\n\t}\n\n\t// init sync backend\n\tswitch backend.GetSyncBackend(ctx) {\n\tcase backend.GoGit:\n\t\tout.Cyan(ctx, \"WARNING: Using experimental sync backend 'go-git'\")\n\t\tgit, err := gogit.Open(path)\n\t\tif err != nil {\n\t\t\tout.Debug(ctx, \"Failed to initialize sync backend 'gogit': %s\", err)\n\t\t} else {\n\t\t\ts.sync = git\n\t\t\tout.Debug(ctx, \"Using Sync Backend: go-git\")\n\t\t}\n\tcase backend.GitCLI:\n\t\tgpgBin, _ := gpgcli.Binary(ctx, \"\")\n\t\tgit, err := gitcli.Open(path, gpgBin)\n\t\tif err != nil {\n\t\t\tout.Debug(ctx, \"Failed to initialize sync backend 'git': %s\", err)\n\t\t} else {\n\t\t\ts.sync = git\n\t\t\tout.Debug(ctx, \"Using Sync Backend: git-cli\")\n\t\t}\n\tcase backend.GitMock:\n\t\t// no-op\n\t\tout.Debug(ctx, \"Using Sync Backend: git-mock\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unknown Sync Backend\")\n\t}\n\n\t// init crypto backend\n\tswitch backend.GetCryptoBackend(ctx) {\n\tcase backend.GPGCLI:\n\t\tgpg, err := gpgcli.New(ctx, gpgcli.Config{\n\t\t\tUmask: fsutil.Umask(),\n\t\t\tArgs: gpgcli.GPGOpts(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.crypto = gpg\n\t\tout.Debug(ctx, \"Using Crypto Backend: gpg-cli\")\n\tcase backend.XC:\n\t\t//out.Red(ctx, \"WARNING: Using highly experimental crypto backend!\")\n\t\tcrypto, err := xc.New(cfgdir, client.New(cfgdir))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.crypto = crypto\n\t\tout.Debug(ctx, \"Using Crypto Backend: xc\")\n\tcase backend.GPGMock:\n\t\t//out.Red(ctx, \"WARNING: Using no-op crypto backend (NO ENCRYPTION)!\")\n\t\ts.crypto = gpgmock.New()\n\t\tout.Debug(ctx, \"Using Crypto Backend: gpg-mock\")\n\tcase backend.OpenPGP:\n\t\tcrypto, err := openpgp.New(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.crypto = crypto\n\t\tout.Debug(ctx, \"Using Crypto Backend: openpgp\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"no valid crypto backend selected\")\n\t}\n\n\treturn s, nil\n}", "func New(cfg *types.Store, sub []byte) queue.Module {\r\n\tbs := drivers.NewBaseStore(cfg)\r\n\tvar subcfg subConfig\r\n\tif sub != nil {\r\n\t\ttypes.MustDecode(sub, &subcfg)\r\n\t}\r\n\tmavls := &Store{bs, &sync.Map{}, subcfg.EnableMavlPrefix, subcfg.EnableMVCC,\r\n\t\tsubcfg.EnableMavlPrune, subcfg.PruneHeight, subcfg.EnableMemTree, subcfg.EnableMemVal}\r\n\tmavls.enableMavlPrefix = subcfg.EnableMavlPrefix\r\n\tmavls.enableMVCC = subcfg.EnableMVCC\r\n\tmavls.enableMavlPrune = subcfg.EnableMavlPrune\r\n\tmavls.pruneHeight = subcfg.PruneHeight\r\n\tmavls.enableMemTree = subcfg.EnableMemTree\r\n\tmavls.enableMemVal = subcfg.EnableMemVal\r\n\tmavl.EnableMavlPrefix(mavls.enableMavlPrefix)\r\n\tmavl.EnableMVCC(mavls.enableMVCC)\r\n\tmavl.EnablePrune(mavls.enableMavlPrune)\r\n\tmavl.SetPruneHeight(int(mavls.pruneHeight))\r\n\tmavl.EnableMemTree(mavls.enableMemTree)\r\n\tmavl.EnableMemVal(mavls.enableMemVal)\r\n\tbs.SetChild(mavls)\r\n\treturn mavls\r\n}", "func newMemoryStore() Store {\n\treturn &memoryStore{\n\t\tprefix2base: make(map[string]*NameSpace),\n\t\tbase2prefix: make(map[string]*NameSpace),\n\t\tnamespaces: make(map[string]*NameSpace),\n\t}\n}", "func New(addr, password string) *Store {\n\treturn &Store{\n\t\tpool: newPool(addr, password),\n\t}\n}", "func New(observationCtx *observation.Context, db database.DB) Store {\n\treturn &store{\n\t\tlogger: logger.Scoped(\"uploads.store\", \"\"),\n\t\tdb: basestore.NewWithHandle(db.Handle()),\n\t\toperations: newOperations(observationCtx),\n\t}\n}", "func NewStore(db *sql.DB) *Store {\n\ts := stores.Store{DB: db}\n\n\treturn &Store{\n\t\tDB: s.DB,\n\t\tAchievements: &stores.PqAchievementsStore{Store: s},\n\t\tCourses: &stores.PqCoursesStore{Store: s},\n\t\tUsers: &stores.PqUsersStore{Store: s},\n\n\t\tSessions: auth.NewSessionStore(),\n\t}\n}", "func CreateNewStore(storePath string) (*Store, error) {\n\tstore := &Store{}\n\n\tjsonData, err := json.Marshal(store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = ioutil.WriteFile(storePath, jsonData, os.FileMode(0600))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore.Path, err = filepath.Abs(storePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn store, nil\n}", "func newStore(config *Config) *store {\n\treturn &store{\n\t\thashMap: hash.NewUnsafeHash(config.Capacity),\n\t\texpireHeap: newTimeHeap(config.Capacity),\n\t\texpireTimer: new(refreshTimer),\n\t}\n}" ]
[ "0.7291816", "0.6905972", "0.6899488", "0.67929655", "0.67790884", "0.67777205", "0.6675343", "0.665811", "0.6637835", "0.66238797", "0.6618824", "0.66062766", "0.6601314", "0.6590567", "0.6570274", "0.656122", "0.65415144", "0.6537513", "0.6537513", "0.6536951", "0.65154004", "0.6496847", "0.64712757", "0.6457483", "0.6444514", "0.6423096", "0.64202553", "0.6401602", "0.63987017", "0.6387678", "0.63862336", "0.63802284", "0.6375148", "0.63683206", "0.6364516", "0.63351536", "0.63052523", "0.62918", "0.628442", "0.6262459", "0.62508464", "0.6248807", "0.6248714", "0.6247723", "0.62461585", "0.624255", "0.6221852", "0.62150234", "0.62063664", "0.62034136", "0.62029487", "0.6199701", "0.61951315", "0.61912096", "0.61879057", "0.6184061", "0.61813116", "0.617842", "0.6177819", "0.6177819", "0.6177819", "0.6168068", "0.6167812", "0.6166304", "0.6158796", "0.61560833", "0.6155689", "0.61556613", "0.6139186", "0.61384135", "0.6111823", "0.6110252", "0.610899", "0.6102989", "0.6101651", "0.609118", "0.6081767", "0.6078331", "0.60597295", "0.60545254", "0.6049186", "0.6048597", "0.60444355", "0.6025412", "0.6019858", "0.59998155", "0.5997142", "0.5982416", "0.5981549", "0.5980135", "0.5971398", "0.5966803", "0.594961", "0.59495544", "0.59453183", "0.592319", "0.5920884", "0.5908183", "0.5901296", "0.58868223" ]
0.6265493
39
Easier getters to check whether those commands should be started or not
func (d *DB) startHourly(key string) bool { // Try to load this key, if not present then it can be started lastCall, err := d.loadTime(key) if err != nil { Log.Println("Command: ", key, " was never called") return true } return int(time.Since(lastCall).Hours()) > 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsCommand(cmd string) bool {\n for val := range DaemonizedCommands() {\n if val == cmd {\n return true\n }\n }\n for val := range InfoCommands() {\n if val == cmd {\n return true\n }\n }\n\n return false\n}", "func isCommand(name string) bool {\n\tfor _, cmd := range []string{\"_hooks\", \"_forward\"} {\n\t\tif strings.Compare(name, cmd) == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isCommand(c *yaml.Container) bool {\n\treturn len(c.Commands) != 0\n}", "func (c *Command) IsStarted() bool {\n\treturn c.Started\n}", "func isCommandStillInUse(id string) (bool, error) {\n\tdp, err := dbClient.GetDeviceProfilesByCommandId(id)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(dp) == 0 {\n\t\treturn false, err\n\t}\n\n\treturn true, err\n}", "func (o *WorkflowCliCommandAllOf) HasCommand() bool {\n\tif o != nil && o.Command != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (pkg *goPackage) isCommand() bool {\n\treturn pkg.name == \"main\" && pkg.hasMainFunction\n}", "func CheckCommands(command string) {\n\tprintCmdSelected(command)\n\tif command == \"help\" {\n\t\tcommands.Help()\n\t} else if command == \"sum\" {\n\t\tcommands.Sum()\n\t} else if command == \"create\" {\n\t\tcommands.CreateFile()\n\t} else if command == \"rename\" {\n\t\tcommands.RenameFile()\n\t} else if command == \"delete\" {\n\t\tcommands.RemoveFile()\n\t} else {\n\t\tcolors.Error(\"✘ Choose an available command!\", false)\n\t}\n}", "func checkCommand(cmd string) bool {\n\tr := false\n\tfor _, c := range CMDS {\n\t\tif c == cmd {\n\t\t\tr = true\n\t\t}\n\t}\n\treturn r\n}", "func (r Response) IsCommand() bool {\n\treturn r.Act == ActCommand\n}", "func run_command(command string)bool{\n\treturn true\n\n}", "func (o *WorkflowSshCmdAllOf) HasCommand() bool {\n\tif o != nil && o.Command != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (b *Bot) getAlive() bool {\n\tif len(b.getCommands()) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (tok Token) IsCommand() bool {\n\treturn commandBegin < tok && tok < commandEnd\n}", "func (b *Bot) getCommands() []BotCommandType {\n\tbcts := make([]BotCommandType, 0)\n\tfor _, bct := range b.Commands {\n\t\tif bct.isRunning() {\n\t\t\tbcts = append(bcts, bct)\n\t\t}\n\t}\n\treturn bcts\n}", "func (connManager *ConnectionManager) canRunCommand() bool {\n\tif !connManager.acc.isValidAccount() {\n\t\tconnManager.sendStr(\"530 Need Auth\\r\\n\")\n\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (p Typed) IsCommand() bool {\n\treturn p.Kind() == TypeIDKindCommand\n}", "func (strokeClient *StrokeClient) IsCommand(message string) bool {\n\treturn strings.HasPrefix(message, strokeCommand)\n}", "func (self *AdminCmdHandler) IsAdminCmd(cmd *proxy.RedisCmd) bool {\n\tif cmd.Cmd == \"yundisctl\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func validateCommandSemanticsAndGenerateOutput(args ...string) (bool, string) {\n\tnewArgs := args\n\tif newArgs[0] == \"sudo\" && newArgs[1] == \"--preserve-env\" {\n\t\tnewArgs = newArgs[2:]\n\t}\n\n\terrMsg := fmt.Sprintf(\"Can't execute command '%v' when the guestfish status is %s\", strings.Join(args, \" \"), currentGuestfishStatus)\n\n\tif newArgs[1] == \"--listen\" {\n\t\tif Stopped == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Added\n\t\t\treturn true, \"GUESTFISH_PID=4513; export GUESTFISH_PID\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"run\" {\n\t\tif Added == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Started\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"findfs-label\" {\n\t\tif Started == currentGuestfishStatus {\n\t\t\treturn true, \"/dev/sda1\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"mount\" {\n\t\tif Started == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Mounted\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"mkdir-p\" {\n\t\tif Mounted == currentGuestfishStatus {\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"upload\" {\n\t\tif Mounted == currentGuestfishStatus {\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"umount-all\" {\n\t\tif Mounted == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Started\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"exit\" {\n\t\tif Stopped < currentGuestfishStatus {\n\t\t\treturn true, \"\"\n\t\t}\n\n\t\treturn false, errMsg\n\t}\n\treturn true, \"\"\n}", "func (p pkgInfo) IsCommand() bool { return p.Name == \"main\" }", "func IsOnCommand(msgText string, cmdList []string) (string, bool) {\n\t// lets find delimiter position\n\tcmdSymbIdx := strings.IndexAny(msgText, \"/\"+cmdSymbol)\n\tif cmdSymbIdx == -1 {\n\t\treturn \"\", false\n\t}\n\n\tfound := false\n\tmsg := msgText[cmdSymbIdx+len(cmdSymbol) : len(msgText)] //remove cmd string from query\n\tvar strToFind string\n\tfor _, cmd := range cmdList {\n\t\tif strings.HasPrefix(msg, cmd) {\n\t\t\tstrToFind = strings.TrimSpace(strings.TrimPrefix(msg, cmd))\n\t\t\tfound = true\n\t\t}\n\t}\n\n\treturn strToFind, found\n}", "func (bc *BotCommand) isRunning() bool {\n\tbc.Lock()\n\tdefer bc.Unlock()\n\treturn bc.running\n}", "func IsCommandSet() PredicateFunc {\n\treturn func(v *VolumeGetProperty) bool {\n\t\treturn len(v.Command) != 0\n\t}\n}", "func (p *Project) IsCommand(name string) bool {\n\tif t, exists := p.Targets[name]; exists {\n\t\treturn t.Command\n\t}\n\treturn false\n}", "func (o *WorkflowSshCmd) HasCommand() bool {\n\tif o != nil && o.Command != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (authClient *AuthClient) IsCommand(message string) bool {\n\treturn strings.HasPrefix(message, authCommand)\n}", "func getCommandFlows(m *api.Message, s *sessionManager) (Flow, bool) {\n\tcommand := m.GetCommand()\n\tif command == \"\" {\n\t\treturn Flow{}, false\n\t}\n\tvalue, ok := s.telego.commandFlows[command]\n\treturn value, ok\n}", "func checkCommands() {\n\tswitch command {\n\tcase \"find\":\n\t\toutput = jq.Find(find)\n\tcase \"first\":\n\t\toutput = jq.First()\n\tcase \"last\":\n\t\toutput = jq.Last()\n\tcase \"count\":\n\t\toutput = jq.Count()\n\tcase \"pluck\":\n\t\toutput = jq.Pluck(aggregateColumn)\n\tcase \"avg\":\n\t\tif aggregateColumn != \"\" {\n\t\t\toutput = jq.Avg(aggregateColumn)\n\t\t} else {\n\t\t\toutput = jq.Avg()\n\t\t}\n\tcase \"sum\":\n\t\tif aggregateColumn != \"\" {\n\t\t\toutput = jq.Sum(aggregateColumn)\n\t\t} else {\n\t\t\toutput = jq.Sum()\n\t\t}\n\tcase \"min\":\n\t\tif aggregateColumn != \"\" {\n\t\t\toutput = jq.Min(aggregateColumn)\n\t\t} else {\n\t\t\toutput = jq.Min()\n\t\t}\n\tcase \"max\":\n\t\tif aggregateColumn != \"\" {\n\t\t\toutput = jq.Max(aggregateColumn)\n\t\t} else {\n\t\t\toutput = jq.Max()\n\t\t}\n\tdefault:\n\t\toutput = jq.Get()\n\t}\n}", "func (self *Controller) HasCommand(notificationName string) bool {\n\tself.commandMapMutex.RLock()\n\tdefer self.commandMapMutex.RUnlock()\n\n\treturn self.commandMap[notificationName] != nil\n}", "func hasSeeAlso(cmd *cobra.Command) bool {\n\tif cmd.HasParent() {\n\t\treturn true\n\t}\n\tfor _, c := range cmd.Commands() {\n\t\tif !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {\n\t\t\tcontinue\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func (c *Client) IsCommandSupported(command string) bool {\n\t_, ok := c.funcMap[command]\n\treturn ok\n}", "func isRunning(on_node, operation, rcCode string) bool {\n\treturn on_node != \"\" && (operation == \"start\" || (operation == \"monitor\" && rcCode == \"0\"))\n}", "func startCmd(name string) []string {\n\tswitch initSystem {\n\tcase \"systemd\":\n\t\treturn []string{\"systemctl\", \"start\", fmt.Sprintf(\"%s.service\", name)}\n\tcase \"upstart\":\n\t\treturn []string{\"initctl\", \"start\", name}\n\t}\n\n\treturn nil\n}", "func (c *CmdHandle) CmdHealthMonitor(){\n// log.Println(\"CmdHealthMonitor: Start\")\n\n //Check if any of the commands have been running for ever avg timout time 20s\n// log.Println(\"Monitored Cmd Processing Health\")\n\n // Check the system load if less send more commands for processing\n c.CmdProcessPendingCommands()\n\n// log.Println(\"CmdHealthMonitor: Complete\")\n}", "func containsCommand(components []string) bool {\n\tfor _, comp := range components {\n\t\tif isCommand(comp) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r *MessageExecuteCommand) HasCommand() bool {\n\treturn r.hasCommand\n}", "func (c *Command) Start() error {\n\tif utils.StringInSlice(command, configuration.Config.Commands) {\n\t\tc.Started = true\n\t}\n\treturn nil\n}", "func (o *WorkflowCliCommandAllOf) HasTerminalStart() bool {\n\tif o != nil && o.TerminalStart != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsCmd() bool {\n\tproc, _ := ps.FindProcess(os.Getppid())\n\tif proc != nil && !strings.Contains(proc.Executable(), \"cmd.exe\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func IsCommandFeasibleInState(currentState GameState, command string) bool {\n\tswitch currentState {\n\tcase InitState:\n\t\tif command == \"aloita\" {\n\t\t\treturn true\n\t\t}\n\tcase WaitingForPlayers:\n\t\tswitch command {\n\t\tcase \"aloita\", \"jatka\", \"lopeta\":\n\t\t\treturn true\n\t\t}\n\tcase WaitingForSongs:\n\t\tswitch command {\n\t\tcase \"esitys\", \"esitä\", \"jatka\", \"lopeta\":\n\t\t\treturn true\n\t\t}\n\tcase WaitingForReviews:\n\t\tswitch command {\n\t\tcase \"arvio\", \"arvioi\", \"arvostele\", \"jatka\", \"lopeta\":\n\t\t\treturn true\n\t\t}\n\tcase PublishingSong, StopGame:\n\t\treturn false\n\t}\n\treturn false\n}", "func status(cmd *cobra.Command, _ []string) error {\n\tif err := daemonStatus(cmd); err != nil {\n\t\treturn err\n\t}\n\n\tif err := connectorStatus(cmd); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func checkCommand(m discord.Message) {\n\t// Checks if the message starts with the command prefix\n\tif !strings.HasPrefix(m.Content, discord.Prefix) {\n\n\t\t// If not, return prematurely finishing the function\n\t\treturn\n\t}\n\t// Remove the prefix to be left with the message content\n\tcontent := strings.TrimPrefix(m.Content, discord.Prefix)\n\t// Split the message by whitespace, allows us to access the command\n\t// and subsequent inputs\n\tcommand := strings.Split(content, \" \")\n\n\t// Check if the command array contains at least one value, this would be\n\t// the command name\n\tif len(command) < 1 {\n\t\t// If not, return prematurely finishing the function\n\t\treturn\n\t}\n\n\t// Run the command in a non-blocking function, passing the message and\n\t// the command parameters, containing the command name\n\tgo runCommand(m, command)\n}", "func hasSeeAlso(c *cobra.Command) bool {\n\tif c.HasParent() {\n\t\treturn true\n\t}\n\tfor _, c := range c.Commands() {\n\t\tif !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {\n\t\t\tcontinue\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func GetCommands(app *App) []cli.Command {\n\t// if app.IsService {\n\t// \treturn []cli.Command{\n\t// \t\t*vmgo.MakeRequireMongo(serviceStartCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(createUserCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(setupCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(resetCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(overridePasswordCmd()),\n\t// \t\t*testEMail(),\n\t// \t}\n\t// }\n\treturn []cli.Command{\n\t\t*vmgo.MakeRequireMongo(createUserCmd()),\n\t\t*vmgo.MakeRequireMongo(setupCmd()),\n\t\t*vmgo.MakeRequireMongo(resetCmd()),\n\t\t*vmgo.MakeRequireMongo(overridePasswordCmd()),\n\t\t*testEMail(),\n\t}\n}", "func IsCmd(cmd string)(b bool) {\n\n\tvar i uint32\n\n\tprogPath := \"/programs/\"\n\n\tfiles, status := altEthos.SubFiles(progPath)\n\tif status != syscall.StatusOk {\n\n\t\tshellStatus := String(\"Subfiles failed\\n\")\n\t\taltEthos.WriteStream(syscall.Stdout, &shellStatus)\n\n\t}\n\n\tb = false\n\n\tfor i=0; i<uint32(len(files)); i++ {\n\n\t\tif files[i] == cmd {\n\t\t\tb = true\n\t\t}\n\n\t}\n\n\treturn\n}", "func statusCmd(name string) []string {\n\tswitch initSystem {\n\tcase \"systemd\":\n\t\treturn []string{\"systemctl\", \"--no-pager\", \"status\", fmt.Sprintf(\"%s.service\", name)}\n\tcase \"upstart\":\n\t\treturn []string{\"initctl\", \"status\", name}\n\t}\n\n\treturn nil\n}", "func (r *Runner) Started() bool {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\treturn r.Cmd.Process != nil\n}", "func (c *Compatibility) GetValidCommands() []string {\n\treturn []string{\n\t\t\"apply\",\n\t\t\"destroy\",\n\t\t\"env\",\n\t\t\"get\",\n\t\t\"graph\",\n\t\t\"import\",\n\t\t\"init\",\n\t\t\"output\",\n\t\t\"plan\",\n\t\t\"providers\",\n\t\t\"refresh\",\n\t\t\"show\",\n\t\t\"taint\",\n\t\t\"untaint\",\n\t\t\"workspace\",\n\t\t\"force-unlock\",\n\t\t\"state\",\n\t}\n}", "func DaemonizedCommands() map[string]Executor {\n return available.daemonized\n}", "func disCmd(cmd string) bool {\n\tswitch cmd {\n\tcase \"exit\":\n\t\tfmt.Println(\"Caesar exit! Bye!\")\n\t\tlogOff()\n\t\tos.Exit(0)\n\t\treturn true\n\tcase \"info\":\n\t\tif DefautStatus.Login == true {\n\t\t\tlog.Log(\"info\", \"You are online.\", log.Fields{\"Login Name\": DefautStatus.LoginName})\n\t\t} else {\n\t\t\tlog.Log(\"info\", \"You are offline.\", nil)\n\t\t}\n\t\treturn true\n\tcase \"test\":\n\t\tvar s string\n\t\tuser := &command.User{\"vvvvvvdfdf\", \"1111111\"}\n\t\trpcClient.Call(\"Test.Login\", user, &s)\n\t\tfmt.Println(s)\n\t\treturn true\n\tcase \"login\":\n\t\tif DefautStatus.Login == true {\n\t\t\tlog.Log(\"info\", \"You are already online.\", log.Fields{\"Login Name\": DefautStatus.LoginName})\n\t\t} else {\n\t\t\t//input name and password\n\t\t\tfmt.Println(\"Please input name:\")\n\t\t\tline, _, err := r.ReadLine()\n\t\t\thandleError(err)\n\t\t\tusername := string(line)\n\t\t\tfmt.Println(\"Please input password:\")\n\t\t\tline, _, err = r.ReadLine()\n\t\t\thandleError(err)\n\t\t\tpassword := string(line)\n\n\t\t\tvar res string\n\t\t\t//user := &object.User{0, username, password, \"regular\", \"unknow\",genKey()}\n\t\t\tMe.Name = username\n\t\t\tMe.Password = password\n\t\t\t\n\t\t\t//fmt.Println(Me)\n\t\t\trpcClient.Call(\"Users.Login\", Me, &res)\n\t\t\tif res == \"Login success.\" {StatusLine = 1}\n\t\t\t//fmt.Println(Me.Id)\n\t\t\t//fmt.Println(Me.Key)\n\t\t\tlog.Log(\"info\", res, nil)\n\t\t}\n\t\treturn true\n\tcase \"myqueue\":\n\t\tvar simRes object.SimResult\n\t\trpcClient.Call(\"Users.MyMQ\", Me, &simRes)\n\t\tif simRes.LogInfo != \"\" {\n\t\t\tfmt.Print(simRes.LogInfo)\n\t\t}else {\n\t\t\tfmt.Printf(simRes.Res)\n\t\t}\n\t\treturn true\t\n\tcase \"users\":\n\t\tif Me.Name != \"admin\" {\n\t\t\tlog.Log(\"err\", \"You are not administrator, no right!\", nil)\n\t\t\treturn true\n\t\t} \n\t\tvar simRes object.SimResult\n\t\trpcClient.Call(\"Users.Users\", Me, &simRes)\n\t\tif simRes.LogInfo != \"\" {\n\t\t\tfmt.Print(simRes.LogInfo)\n\t\t}else {\n\t\t\tfmt.Printf(simRes.Res)\n\t\t}\n\t\treturn true\t\n\tcase \"newqueue\":\n\t\tfmt.Println(\"Please input name:\")\n\t\treturn true\n\n\tdefault:\n\t\tfmt.Printf(\"Command \\\"%s\\\" not found!\\n\", cmd)\n\t\treturn false\n\t}\n}", "func startSubcommand(c *subcommand) bool {\n\tc.flags.Parse(os.Args[2:])\n\tif !c.flags.Parsed() {\n\t\tc.flags.Usage()\n\t\treturn false\n\t}\n\n\tif !quiet {\n\t\tprintBanner()\n\t}\n\treturn true\n}", "func (c *CurrentTrackCommand) IsAdminCommand() bool {\n\treturn viper.GetBool(\"commands.currenttrack.is_admin\")\n}", "func (m *Image) ValidCommands() {\n\t//normarize command\n\tfor cmd := range m.Commands {\n\t\tif !CheckCmdName(cmd) {\n\t\t\tfmt.Println(\"Invalid command. Removed:\", cmd)\n\t\t\tdelete(m.Commands, cmd)\n\t\t}\n\t}\n\treturn\n}", "func commandExists(name string) bool {\n\t_, err := exec.LookPath(name)\n\treturn err == nil\n}", "func isCommandAvailable(name string) bool {\n\tcmd := exec.Command(\"command\", name, \"-V\")\n\tif err := cmd.Run(); err != nil {\n\t\tFatalf(\"%s executable is not installed on this box, please run 'yum install -y %[1]s to install it'\", name, name)\n\t}\n\treturn true\n}", "func (handler commandHandler) get(name string) (*command, bool) {\n\tcmd, found := handler.Cmds[name]\n\treturn &cmd, found\n}", "func (handler commandHandler) get(name string) (*command, bool) {\n\tcmd, found := handler.Cmds[name]\n\treturn &cmd, found\n}", "func init() {\n\tAddCommand(\"mds\", \"Check metadata service process\", cmdMds, nil)\n}", "func getCommands() []cli.Command {\n\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"create_room\",\n\t\t\tAliases: []string{\"cr\"},\n\t\t\tUsage: \"amity-golang create_room <room_name> <office|Livingspace>\",\n\t\t\tAction: createRoom,\n\t\t},\n\t\t{\n\t\t\tName: \"add_person\",\n\t\t\tAliases: []string{\"ap\"},\n\t\t\tUsage: \"amity-golang add_person <First_Name> <Last_Name> <fellow|staff> <office_Name> <livingSpace_Name>\",\n\t\t\tAction: addPerson,\n\t\t},\n\t\t{\n\t\t\tName: \"print_allocations\",\n\t\t\tAliases: []string{\"pa\"},\n\t\t\tUsage: \"amity-golang print_allocations\",\n\t\t\tAction: printAllocations,\n\t\t},\n\t\t{\n\t\t\tName: \"print_room\",\n\t\t\tAliases: []string{\"pr\"},\n\t\t\tUsage: \"amity-golang print_room <room_name>\",\n\t\t\tAction: printRoom,\n\t\t},\n\t\t{\n\t\t\tName: \"print_unallocated_people\",\n\t\t\tAliases: []string{\"pu\"},\n\t\t\tUsage: \"amity-golang print_unallocated_people\",\n\t\t\tAction: printUnallocatedPeople,\n\t\t},\n\t\t{\n\t\t\tName: \"reallocate_person\",\n\t\t\tAliases: []string{\"rp\"},\n\t\t\tUsage: \"amity-golang reallocate_person <First_Name> <Last_Name> <New_Room_Name>\",\n\t\t\tAction: reallocatePerson,\n\t\t},\n\t}\n}", "func (o *WindowsMobileMsi) HasCommandLine() bool {\n\tif o != nil && o.CommandLine != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (e *Engine) startable() bool {\n\tif e.Configuration.InputDeviceNamesUnset() && e.Configuration.InputValueDescriptorNamesUnset() {\n\t\treturn false\n\t}\n\n\tif e.booted {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func CmdAvailability(cmd *TermDependant) string {\n\tswitch GetTerminal() {\n\tcase TermBash:\n\t\tif cmd.Bash == \"\" {\n\t\t\tif GetTerminalHasWSL() && cmd.Powershell != \"\" {\n\t\t\t\treturn orange + \" (windows wsl)\" + reset\n\t\t\t}\n\t\t\treturn red + \" (windows only)\" + reset\n\t\t}\n\tcase TermCmd, TermPowershell:\n\t\tif cmd.Powershell == \"\" {\n\t\t\tif GetTerminalHasWSL() && cmd.Bash != \"\" {\n\t\t\t\treturn orange + \" (bash wsl)\" + reset\n\t\t\t}\n\t\t\treturn red + \" (bash only)\" + reset\n\t\t}\n\t}\n\treturn \"\"\n}", "func isPlugin(c *yaml.Container) bool {\n\treturn len(c.Commands) == 0 || len(c.Vargs) != 0\n}", "func (i *invocation) isActive() bool {\n\treturn i.queuedOperations.Len() > 0 || i.executingWorkersCount > 0\n}", "func (m Modifiers) CommandDown() bool {\n\treturn m&CommandModifier == CommandModifier\n}", "func (c *Completed) Commands() []string {\n\treturn c.cs.s\n}", "func CheckCommands(commands []structs.Command, commandName string) (bool, structs.Command) {\n\tfor _, c := range commands {\n\t\tif strings.EqualFold(c.ID, commandName) {\n\t\t\treturn true, c\n\t\t}\n\t}\n\treturn false, structs.Command{}\n}", "func (cl *commandList) GetDebugCommands(mode string) [][]string {\n\tvar allCommands [][]string\n\tfor _, def := range cl.definitions {\n\t\t// TODO: incorporate query commands into e2e testing once proxy access is implemented\n\t\tif def.commandGroup == query {\n\t\t\tcontinue\n\t\t}\n\t\tif mode == runtime.ModeController && def.use == \"log-level\" {\n\t\t\t// log-level command does not support remote execution.\n\t\t\tcontinue\n\t\t}\n\t\tif mode == runtime.ModeAgent && def.agentEndpoint != nil ||\n\t\t\tmode == runtime.ModeController && def.controllerEndpoint != nil ||\n\t\t\tmode == runtime.ModeFlowAggregator && def.flowAggregatorEndpoint != nil {\n\t\t\tvar currentCommand []string\n\t\t\tif group, ok := groupCommands[def.commandGroup]; ok {\n\t\t\t\tcurrentCommand = append(currentCommand, group.Use)\n\t\t\t}\n\t\t\tcurrentCommand = append(currentCommand, def.use)\n\t\t\tallCommands = append(allCommands, currentCommand)\n\t\t}\n\t}\n\tfor _, cmd := range cl.rawCommands {\n\t\tif cmd.cobraCommand.Use == \"proxy\" {\n\t\t\t// proxy will keep running until interrupted so it\n\t\t\t// cannot be used as is in e2e tests.\n\t\t\tcontinue\n\t\t}\n\t\tif mode == runtime.ModeController && cmd.supportController ||\n\t\t\tmode == runtime.ModeAgent && cmd.supportAgent {\n\t\t\tvar currentCommand []string\n\t\t\tif group, ok := groupCommands[cmd.commandGroup]; ok {\n\t\t\t\tcurrentCommand = append(currentCommand, group.Use)\n\t\t\t}\n\t\t\tcurrentCommand = append(currentCommand, strings.Split(cmd.cobraCommand.Use, \" \")[0])\n\t\t\tallCommands = append(allCommands, currentCommand)\n\t\t}\n\t}\n\treturn allCommands\n}", "func (o *InputEventWithModifiers) GetCommand() gdnative.Bool {\n\t//log.Println(\"Calling InputEventWithModifiers.GetCommand()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"InputEventWithModifiers\", \"get_command\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}", "func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool {\n\tselector, indent, err := gosec.GetCallInfo(n, ctx)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif selector == \"exec\" && indent == \"CommandContext\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func (cli *CLI) CmdExist(cmds ...string) bool {\n\tcmdExist := false\n\tfor _, cmd := range cmds {\n\t\t_, exist := cli.cmdMap[cmd]\n\t\tif exist {\n\t\t\tcmdExist = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !cmdExist {\n\t\tfor _, cm := range cli.cmdMap {\n\t\t\t//KV: string->string\n\t\t\tif cmStr, isStr := cm.(string); isStr && util.ListIndex(cmds, cmStr) > -1 {\n\t\t\t\tcmdExist = true\n\t\t\t\tbreak\n\t\t\t} else if cmStrQue, isStrArray := cm.([]string); isStrArray {\n\t\t\t\tfor _, cStr := range cmds {\n\t\t\t\t\tif util.ListIndex(cmStrQue, cStr) > -1 {\n\t\t\t\t\t\tcmdExist = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn cmdExist\n}", "func (s SystemdInitSystem) isEnabled() (bool, error) {\n\tservice := s.unitName()\n\targs := []string{\"is-enabled\", service}\n\tif err := exec.Command(\"systemctl\", args...).Run(); err != nil {\n\t\tswitch v := err.(type) {\n\t\tcase *exec.Error:\n\t\t\treturn false, fmt.Errorf(\"failed to run command %q: %s\", v.Name, v.Err)\n\t\tcase *exec.ExitError:\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, err\n\t\t}\n\t}\n\treturn true, nil\n}", "func (this *PathParser) ReadCommand() bool {\n\n\tif this.tokenIndex >= len(this.tokens) {\n\t\treturn false\n\t}\n\n\tcommandString := this.tokens[this.tokenIndex]\n\tthis.tokenIndex++\n\tthis.currentCommand = ParseCommand(commandString)\n\tif this.currentCommand == NotAValidCommand {\n\t\tpanic(fmt.Sprint(\"Unexpected command, saw \", commandString))\n\t}\n\n\treturn true\n}", "func init() {\n\tnewCommand(\"ping\", 0, false, false, ping).setHelp(\"\\\"Pong!\\\"\").add()\n\tnewCommand(\"pong\", 0, false, false, ping).setHelp(\"\\\"Ping!\\\"\").add()\n\tnewCommand(\"help\", 0, false, false, msgHelp).add()\n\tnewCommand(\"git\", 0, false, false, gitHubLink).setHelp(\n\t\t\"Displays the github link where I'm being developed.\",\n\t).add()\n\t/*newCommand(\"request\", 0, false, false, featureRequest).setHelp(\n\t\"Requests a feature.\").add()*/\n\tnewCommand(\"report\", 0, false, false, bugReport).setHelp(\n\t\t\"Report a bug.\").add()\n\tnewCommand(\"woot\", 0, false, false, celebration).setHelp(\n\t\t\"Starts a celebration!\").add()\n}", "func CmdExists(name string) bool {\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"command -v \"+name)\n\tif err := cmd.Run(); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (c *Cmd) Start() error", "func getHealthCheckCommand(command []string) []string {\n\tif len(command) == 1 {\n\t\t// command/test was specified as a single string which wraps it in /bin/sh (CMD-SHELL)\n\t\tcommand = append([]string{\"CMD-SHELL\"}, command...)\n\t}\n\treturn command\n}", "func getCommand(args []string) (*command, []string, error) {\n\tif len(args) < 2 {\n\t\treturn nil, nil, fmt.Errorf(\"Too few arguments: %q\", args)\n\t}\n\n\tfor _, c := range commands {\n\t\tif c.flag == args[1] {\n\t\t\treturn &c, args[2:], nil\n\t\t}\n\t}\n\n\t// command not found\n\treturn nil, nil, fmt.Errorf(\"Command not found: %q\", args)\n}", "func (e Event) IsSlashCommand() bool {\n\treturn e.Is(SlashCommand)\n}", "func (p *adapter) Running() bool {\n\tif p.cmd == nil || p.cmd.Process == nil || p.cmd.Process.Pid == 0 || p.state() != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func validateCommandSyntax(args ...string) bool {\n\tif len(args) < 4 {\n\t\treturn false\n\t}\n\n\tnewArgs := args\n\tif newArgs[0] == \"sudo\" && newArgs[1] == \"--preserve-env\" {\n\t\tnewArgs = newArgs[2:]\n\t}\n\n\tcmdTree := supportedCommandTree\n\t// check each argument one by one\n\tfor index, arg := range newArgs {\n\t\tif v, ok := cmdTree[arg]; ok {\n\t\t\tif index+1 == len(newArgs) {\n\t\t\t\treturn v == nil\n\t\t\t}\n\t\t\tcmdTree = v.(map[string]interface{})\n\t\t} else {\n\t\t\tmatched := false\n\t\t\tfor k, v := range cmdTree {\n\t\t\t\tif matched, _ = regexp.MatchString(k, arg); matched {\n\t\t\t\t\tif index+1 == len(newArgs) {\n\t\t\t\t\t\treturn v == nil\n\t\t\t\t\t}\n\t\t\t\t\tcmdTree = v.(map[string]interface{})\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !matched {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}", "func IsPluginCommand(cmd *cobra.Command) bool {\n\treturn cmd.Annotations[CommandAnnotationPlugin] == \"true\"\n}", "func ContainsCmd(aa []api.Command, c api.Command) bool {\n\tfor _, v := range aa {\n\t\tif c.Parent == v.Parent && c.Usage == v.Usage {\n\t\t\treturn true\n\t\t}\n\n\t\tcoreCmd := fmt.Sprintf(\"%s_%s\", v.Parent, v.Usage)\n\t\tif c.Parent == coreCmd { // Ensures that no core commands will be added\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func getAvailableCommands() (map[string]string, error) {\n\n\t// Check if run.yaml exists\n\tfile := \"run.yaml\"\n\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\t// If run.yaml does not exit, check for run.yml\n\t\tfile = \"run.yml\"\n\t\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\t\treturn nil, errors.New(\"unable to find \\\"run.yaml\\\" in the current directory\")\n\t\t}\n\t}\n\n\t// Read run.yaml\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, errors.New(\"unable to read \\\"run.yaml\\\"\")\n\t}\n\n\t// Parse run.yaml\n\tcommands := make(map[string]string)\n\tif err := yaml.Unmarshal(data, &commands); err != nil {\n\t\treturn nil, errors.New(\"unable to parse \\\"run.yaml\\\"\")\n\t}\n\n\t// Returned parsed commands\n\treturn commands, nil\n}", "func InfoCommands() map[string]Executor {\n return available.info\n}", "func init() {\n\tping := Command{\n\t\tName: \"ping\",\n\t\tUsage: \"ping\",\n\t\tDescription: \"see how long the bot takes to respond.\",\n\t\tCategory: \"General\",\n\t\tNeedArgs: false,\n\t\tArgs: map[string]bool{},\n\t\tOwnerOnly: false,\n\t\tEnabled: true,\n\t\tRun: pingCommand,\n\t}\n\n\tping.Register()\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n rf.mu.Lock()\n defer rf.mu.Unlock()\n\n if rf.state != StateLeader {\n return nilIndex, nilIndex, false\n }\n\n // Your code here (2B).\n\n logLen := len(rf.log)\n index := logLen\n term := rf.currentTerm\n isLeader := true\n\n thisEntry := LogEntry{rf.currentTerm, command}\n rf.log = append(rf.log, thisEntry)\n rf.matchIndex[rf.me] = len(rf.log)\n\n rf.persist()\n\n // rf.print(\"Client start command %v\", command)\n\n return index, term, isLeader\n}", "func IsCmd(c *config.Config, message string) (bool, string) {\n\tcmdcs := c.GetArray(\"CommandChar\", []string{\"!\"})\n\tbotnick := strings.ToLower(c.Get(\"Nick\", \"bot\"))\n\tif botnick == \"\" {\n\t\tlog.Fatal().\n\t\t\tMsgf(`You must run catbase -set nick -val <your bot nick>`)\n\t}\n\tiscmd := false\n\tlowerMessage := strings.ToLower(message)\n\n\tif strings.HasPrefix(lowerMessage, botnick) &&\n\t\tlen(lowerMessage) > len(botnick) &&\n\t\t(lowerMessage[len(botnick)] == ',' || lowerMessage[len(botnick)] == ':') {\n\n\t\tiscmd = true\n\t\tmessage = message[len(botnick):]\n\n\t\t// trim off the customary addressing punctuation\n\t\tif message[0] == ':' || message[0] == ',' {\n\t\t\tmessage = message[1:]\n\t\t}\n\t} else {\n\t\tfor _, cmdc := range cmdcs {\n\t\t\tif strings.HasPrefix(lowerMessage, cmdc) && len(cmdc) > 0 {\n\t\t\t\tiscmd = true\n\t\t\t\tmessage = message[len(cmdc):]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// trim off any whitespace left on the message\n\tmessage = strings.TrimSpace(message)\n\n\treturn iscmd, message\n}", "func (o *WorkflowCliCommandAllOf) GetCommandOk() (*string, bool) {\n\tif o == nil || o.Command == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Command, true\n}", "func isRootCommand(values []string, registry Registry) bool {\n\n\t// FALSE: if the root command is not registered\n\tif _, ok := registry[\"\"]; !ok {\n\t\treturn false\n\t}\n\n\t// TRUE: if all `values` are empty or the first `value` is a flag\n\tif len(values) == 0 || isFlag(values[0]) {\n\t\treturn true\n\t}\n\n\t// get root `CommandConfig` value from the registry\n\trootCommandConfig := registry[\"\"]\n\n\t// TRUE: if the first value is not a registered command\n\t// and some arguments are registered for the root command\n\tif _, ok := registry[values[0]]; len(rootCommandConfig.Args) > 0 && !ok {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkflowSshCmdAllOf) HasCommandType() bool {\n\tif o != nil && o.CommandType != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *Command) Runnable() bool {\n\treturn c.Run != nil\n}", "func (c *Command) Runnable() bool {\n\treturn c.Run != nil\n}", "func (c *client) command(msg string) bool {\n\tswitch {\n\tcase msg == \"/list\":\n\t\tc.Conn.Write([]byte(\"-------------------\\n\"))\n\t\tfor k := range roomList {\n\t\t\tcount := 0\n\t\t\tfor range roomList[k].members {\n\t\t\t\tcount++\n\t\t\t}\n\t\t\tc.Conn.Write([]byte(k + \" : online members(\" + strconv.Itoa(count) + \")\\n\"))\n\t\t}\n\t\tc.Conn.Write([]byte(\"-------------------\\n\"))\n\t\treturn false\n\tcase msg == \"/join\":\n\t\tc.join()\n\t\treturn false\n\tcase msg == \"/help\":\n\t\twriteFormattedMsg(c.Conn, help)\n\t\treturn false\n\tcase msg == \"/create\":\n\t\tc.create()\n\t\treturn false\n\tcase msg == \"/leave\":\n\t\tc.leave()\n\t\treturn false\n\t}\n\treturn true\n}", "func TestHiddenCommandIsHidden(t *testing.T) {\n\tc := &Command{Use: \"c\", Hidden: true, Run: emptyRun}\n\tif c.IsAvailableCommand() {\n\t\tt.Errorf(\"Hidden command should be unavailable\")\n\t}\n}", "func (p *procBase) Running() bool {\n\treturn p.cmd != nil\n}", "func (handler commandHandler) getCmds() cmdMap {\n\treturn handler.Cmds\n}", "func (handler commandHandler) getCmds() cmdMap {\n\treturn handler.Cmds\n}", "func isVaildCmd(c string) bool {\n\tif len(c) == 0 || c[0:1] == \"-\" {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *WorkflowSshCmd) HasCommandType() bool {\n\tif o != nil && o.CommandType != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}" ]
[ "0.6994663", "0.6508269", "0.6502186", "0.63603437", "0.6129595", "0.6112796", "0.6082147", "0.60248846", "0.6018903", "0.5977348", "0.5970306", "0.5944507", "0.59397984", "0.593732", "0.5927465", "0.59095526", "0.58668464", "0.5851266", "0.5814823", "0.5798538", "0.5794736", "0.57912266", "0.5787956", "0.57588416", "0.57392424", "0.573836", "0.5738252", "0.57267857", "0.5700379", "0.569883", "0.56975913", "0.5693839", "0.56890047", "0.5681753", "0.5678374", "0.56220496", "0.5616871", "0.55800045", "0.5574369", "0.5574249", "0.5573573", "0.5572828", "0.5560734", "0.5557919", "0.55488425", "0.5539682", "0.5535733", "0.5511275", "0.5506459", "0.54953796", "0.5484882", "0.54810226", "0.5479189", "0.5475807", "0.5474763", "0.54684", "0.5464376", "0.5464376", "0.54470634", "0.5445099", "0.5435254", "0.5430047", "0.5428912", "0.542732", "0.5424266", "0.5419231", "0.53957874", "0.53695613", "0.5362757", "0.5340816", "0.53399426", "0.5338233", "0.53354067", "0.5334913", "0.5328363", "0.5324983", "0.5317225", "0.5310388", "0.5308967", "0.53002053", "0.52980775", "0.52946365", "0.5293449", "0.5293015", "0.5291705", "0.5288549", "0.5276917", "0.5272387", "0.5272219", "0.5270468", "0.52691156", "0.52639353", "0.5256065", "0.5256065", "0.52422416", "0.5238799", "0.52341855", "0.52321887", "0.52321887", "0.5230901", "0.5225387" ]
0.0
-1
findConflict finds the index of the conflict. It returns the first pair of conflicting entries between the existing entries and the given entries, if there are any. If there is no conflicting entries, and the existing entries contains all the given entries, zero will be returned. If there is no conflicting entries, but the given entries contains new entries, the index of the first new entry will be returned. An entry is considered to be conflicting if it has the same index but a different term. The first entry MUST have an index equal to the argument 'from'. The index of the given entries MUST be continuously increasing.
func (l *LogStore) findConflict(entries []*pb.Entry) uint64 { // TODO: 会有第0个冲突么? for _, ne := range entries { if !l.matchTerm(ne.Index, ne.Term) { if ne.Index <= l.lastIndex() { l.logger.Info("log found conflict", zap.Uint64("conflictIndex", ne.Index), zap.Uint64("conflictTerm", ne.Term), zap.Uint64("existTerm", l.termOrPanic(l.term(ne.Index)))) } return ne.Index } } return 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func FindConflictsByUser(entries []*RenderedScheduleEntry) map[string][]*Conflict {\n\tentriesByUser := RenderedScheduleEntries(entries).GroupBy(func(entry *RenderedScheduleEntry) string {\n\t\treturn entry.User.ID\n\t})\n\n\tvar (\n\t\tm sync.Mutex\n\t\twg sync.WaitGroup\n\t\tresults = make(map[string][]*Conflict, len(entriesByUser))\n\t)\n\n\tfor userID, entries := range entriesByUser {\n\t\twg.Add(1)\n\n\t\tgo func(userID string, entries []*RenderedScheduleEntry) {\n\t\t\tdefer wg.Done()\n\n\t\t\tconflicts := []*Conflict{}\n\n\t\t\tsort.Slice(entries, func(i, j int) bool {\n\t\t\t\treturn entries[i].Start.Before(entries[j].Start)\n\t\t\t})\n\n\t\t\tfor i, left := range entries {\n\t\t\t\tfor j := i + 1; j < len(entries); j++ {\n\t\t\t\t\tright := entries[j]\n\n\t\t\t\t\tif !right.Start.Before(left.End) { // if left.End <= right.Start\n\t\t\t\t\t\t// All good, RHS doesn't start until at least after LHS\n\t\t\t\t\t\t// ends. Stop scanning for conflicts related to LHS.\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Printf(\"CONFLICT: %s is in both %q and %q from %s to %s\\n\", left.User.Summary, left.Schedule, right.Schedule, right.Start, left.End)\n\n\t\t\t\t\tconflicts = append(conflicts, &Conflict{Left: left, Right: right})\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.Lock()\n\t\t\tdefer m.Unlock()\n\n\t\t\tresults[userID] = conflicts\n\t\t}(userID, entries)\n\t}\n\n\twg.Wait()\n\n\treturn results\n}", "func NewCreateMailerEntryConflict() *CreateMailerEntryConflict {\n\n\treturn &CreateMailerEntryConflict{}\n}", "func (tr *TransactionRepository) FindBetween(start int64, end int64) ([]*types.Transaction, *rTypes.Error) {\n\tif start > end {\n\t\treturn nil, errors.Errors[errors.StartMustNotBeAfterEnd]\n\t}\n\tvar transactions []transaction\n\ttr.dbClient.Where(whereClauseBetweenConsensus, start, end).Find(&transactions)\n\n\tsameHashMap := make(map[string][]transaction)\n\tfor _, t := range transactions {\n\t\th := t.getHashString()\n\t\tsameHashMap[h] = append(sameHashMap[h], t)\n\t}\n\tres := make([]*types.Transaction, 0, len(sameHashMap))\n\tfor _, sameHashTransactions := range sameHashMap {\n\t\ttransaction, err := tr.constructTransaction(sameHashTransactions)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, transaction)\n\t}\n\treturn res, nil\n}", "func solveProblem(from, to int, pos int, c chan bool) {\n\tfromPos := getCeilPos(from)\n\ttoPos := getCeilPos(to)\n\tif almostPrime[fromPos] < from {\n\t\tfromPos += 1\n\t}\n\tif toPos+1 < len(almostPrime) && almostPrime[toPos+1] == to {\n\t\ttoPos += 1\n\t}\n\n\tif fromPos == toPos && almostPrime[fromPos] != from {\n\t\taddSolution(pos, \"0\", c)\n\t\treturn\n\t}\n\t//fmt.Println(\"Search\", from, to, fromPos, toPos, almostPrime[fromPos:toPos+1])\n\n\taddSolution(pos, fmt.Sprintf(\"%d\", len(almostPrime[fromPos:toPos+1])), c)\n}", "func search(graph []*node, from int) []int {\n\t// fmt.Printf(\"starting at node %d in a graph of size %d\\n\", from, len(graph))\n\tvar queue []*node\n\tqueue = append(queue, graph[from])\n\tfor len(queue) > 0 {\n\t\tcurrent := queue[0]\n\t\tqueue = queue[1:]\n\t\tfor _, neigh := range current.neighbors {\n\t\t\tif neigh.dist == 0 {\n\t\t\t\tneigh.dist = 6 + current.dist\n\t\t\t\tqueue = append(queue, neigh)\n\t\t\t}\n\n\t\t}\n\t}\n\tvar result []int\n\tfor _, n := range graph {\n\t\tif n.id == from {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, n.dist)\n\t}\n\treturn result\n}", "func (s *schedule) getConflicts(timestamp uint32, length uint32) (conflicts uint) {\n\ts.RLock()\n\tdefer s.RUnlock()\n\tfor _, item := range s.items {\n\t\tscheduledFrom := uint64(item.timestamp) % uintmax\n\t\tscheduledTo := scheduledFrom + uint64(item.length)\n\t\tfrom := uint64(timestamp)\n\t\tto := from + uint64(length)\n\n\t\tif scheduledTo > uintmax || to > uintmax {\n\t\t\tif scheduledTo-uintmax <= from || scheduledFrom >= to-uintmax {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if scheduledTo <= from || scheduledFrom >= to {\n\t\t\tcontinue\n\t\t}\n\n\t\tif item.payload == nil {\n\t\t\tconflicts++\n\t\t} else {\n\t\t\tconflicts += 100\n\t\t}\n\t}\n\treturn\n}", "func (ml *messageLog) FromIndex(index int, exclusive bool) defs.MessageFindFunc {\r\n\tif index < 0 {\r\n\t\tindex = len(ml.log.entries) + index\r\n\t\tif index < 0 {\r\n\t\t\tindex = 0\r\n\t\t}\r\n\t}\r\n\tif exclusive {\r\n\t\tindex += 1\r\n\t}\r\n\treturn func() (int, bool) {\r\n\t\tif index < len(ml.log.entries) {\r\n\t\t\treturn index, true\r\n\t\t}\r\n\t\treturn 0, false\r\n\t}\r\n}", "func (re *raftEngine) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) {\r\n\tif len(ents) == 0 {\r\n\t\treturn\r\n\t}\r\n\tfirstIndex := ents[0].Index\r\n\tif firstIndex > re.appliedIndex+1 {\r\n\t\tlog.ZAPSugaredLogger().Errorf(\"Error raised when processing entries to apply, first index of committed entry [%d] should <= appliedIndex [%d].\", firstIndex, re.appliedIndex)\r\n\t\treturn\r\n\t}\r\n\tif re.appliedIndex-firstIndex+1 < uint64(len(ents)) {\r\n\t\tnents = ents[re.appliedIndex-firstIndex+1:]\r\n\t}\r\n\treturn\r\n}", "func ConflictFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (conflict Conflict, err error) {\n\treadStartOffset := marshalUtil.ReadOffset()\n\n\tconflict = Conflict{}\n\tbytesID, err := marshalUtil.ReadBytes(int(ledgerstate.TransactionIDLength))\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse ID from conflict: %w\", err)\n\t\treturn\n\t}\n\tconflict.ID, _, err = ledgerstate.TransactionIDFromBytes(bytesID)\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse ID from bytes: %w\", err)\n\t\treturn\n\t}\n\n\tconflict.Opinion, err = OpinionFromMarshalUtil(marshalUtil)\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse opinion from conflict: %w\", err)\n\t\treturn\n\t}\n\n\t// return the number of bytes we processed\n\tparsedBytes := marshalUtil.ReadOffset() - readStartOffset\n\tif parsedBytes != ConflictLength {\n\t\terr = errors.Errorf(\"parsed bytes (%d) did not match expected size (%d): %w\", parsedBytes, ConflictLength, cerrors.ErrParseBytesFailed)\n\t\treturn\n\t}\n\n\treturn\n}", "func ConflictsFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil, n uint32) (conflicts Conflicts, err error) {\n\treadStartOffset := marshalUtil.ReadOffset()\n\n\tconflicts = Conflicts{}\n\tfor i := 0; i < int(n); i++ {\n\t\tconflict, e := ConflictFromMarshalUtil(marshalUtil)\n\t\tif e != nil {\n\t\t\terr = fmt.Errorf(\"failed to parse conflict from marshalutil: %w\", e)\n\t\t\treturn\n\t\t}\n\t\tconflicts = append(conflicts, conflict)\n\t}\n\n\t// return the number of bytes we processed\n\tparsedBytes := marshalUtil.ReadOffset() - readStartOffset\n\tif parsedBytes != int(ConflictLength*n) {\n\t\terr = errors.Errorf(\"parsed bytes (%d) did not match expected size (%d): %w\", parsedBytes, ConflictLength*n, cerrors.ErrParseBytesFailed)\n\t\treturn\n\t}\n\n\treturn\n}", "func NewGetWaitlistEntryConflict(body *GetWaitlistEntryConflictResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func sol2(nums []int, target int) []int {\n\tmemo := make(map[int]int)\n\tfor i, n := range nums {\n\t\tmemo[n] = i\n\t}\n\tfor i, n := range nums {\n\t\tif _, ok := memo[target-n]; ok && i != memo[target-n] {\n\t\t\treturn []int{i, memo[target-n]}\n\t\t}\n\t}\n\treturn nil\n}", "func searchInIndex(r io.ReadSeeker, from, to int, searchKey []byte) (int, bool, error) {\n\tif _, err := r.Seek(int64(from), io.SeekStart); err != nil {\n\t\treturn 0, false, fmt.Errorf(\"failed to seek: %w\", err)\n\t}\n\n\tfor {\n\t\tkey, value, err := decode(r)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn 0, false, fmt.Errorf(\"failed to read: %w\", err)\n\t\t}\n\t\tif err == io.EOF {\n\t\t\treturn 0, false, nil\n\t\t}\n\t\toffset := decodeInt(value)\n\n\t\tif bytes.Equal(key, searchKey) {\n\t\t\treturn offset, true, nil\n\t\t}\n\n\t\tif to > from {\n\t\t\tcurrent, err := r.Seek(0, io.SeekCurrent)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, false, fmt.Errorf(\"failed to seek: %w\", err)\n\t\t\t}\n\n\t\t\tif current > int64(to) {\n\t\t\t\treturn 0, false, nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) buildAppendEntriesReplyWhenNotSuccess(reply *AppendEntriesReply, PrevLogIndex int, PrevLogTerm int) {\n\tif PrevLogIndex > rf.getLastIndex() {\n\t\t// this raft do not know about the PrevLogIndex\n\t\treply.SuggestPrevLogIndex = rf.getLastIndex()\n\t\treply.SuggestPrevLogTerm = rf.getLastTerm()\n\t} else {\n\t\t// there is conflict!\n\t\tConflictTerm := rf.getTermForIndex(PrevLogIndex)\n\t\tAssertF(ConflictTerm != PrevLogTerm, \"\")\n\t\tAssertF(PrevLogIndex > rf.commitIndex, \"\")\n\n\t\t// TODO: change to (ConflictTerm, FirstIndex)\n\t\tif ConflictTerm > PrevLogTerm {\n\t\t\t// T1 -- PrevLogTerm, T2 -- ConflictTerm, T1<T2\n\t\t\t// any (i1,t1) in leaders log, if i1<=PrevLogIndex, then t1<=PrevLogTerm\n\t\t\t// Then we find SuggestPrevLogIndex, in tuple (SuggestPrevLogIndex, t2),\n\t\t\t// that satisfies t2<=T1, and SuggestPrevLogIndex is the large one\n\t\t\t// suggestTerm = the max index ( <= PrevLogTerm )\n\t\t\treply.SuggestPrevLogIndex = PrevLogIndex\n\t\t\tfor ; reply.SuggestPrevLogIndex > rf.commitIndex && rf.getTermForIndex(reply.SuggestPrevLogIndex) > PrevLogTerm; reply.SuggestPrevLogIndex-- {\n\t\t\t}\n\t\t\treply.SuggestPrevLogTerm = rf.getTermForIndex(reply.SuggestPrevLogIndex) // term 0 if index 0\n\t\t} else {\n\t\t\treply.SuggestPrevLogIndex = PrevLogIndex - 1\n\t\t\treply.SuggestPrevLogTerm = rf.getTermForIndex(reply.SuggestPrevLogIndex) // term 0 if index 0\n\t\t}\n\n\t\tAssertF(reply.SuggestPrevLogIndex >= rf.commitIndex,\n\t\t\t\"reply.SuggestPrevLogIndex {%d} >= rf.commitIndex {%d}\",\n\t\t\treply.SuggestPrevLogIndex, rf.commitIndex)\n\t}\n\tAssertF(reply.SuggestPrevLogIndex < PrevLogIndex,\n\t\t\"reply.SuggestPrevLogIndex {%d} < PrevLogIndex {%d}\",\n\t\treply.SuggestPrevLogIndex, PrevLogIndex)\n}", "func FindNumberRange(nums []int, key int) [2]int {\n\tresult := [2]int{-1, -1}\n\n\tresult[0] = getFirstInstance(nums, key)\n\tif result[0] != -1 {\n\t\tresult[1] = getLastInstance(nums, key)\n\t}\n\n\treturn result\n}", "func (c *causality) detectConflict(keys [][]byte) (bool, int) {\n\tif len(keys) == 0 {\n\t\treturn false, 0\n\t}\n\n\tfirstIdx := -1\n\tfor _, key := range keys {\n\t\tif idx, ok := c.relations[string(key)]; ok {\n\t\t\tif firstIdx == -1 {\n\t\t\t\tfirstIdx = idx\n\t\t\t} else if firstIdx != idx {\n\t\t\t\treturn true, -1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn firstIdx != -1, firstIdx\n}", "func searchRange(nums []int, target int) []int {\n\tresult := []int{-1, -1}\n\tif len(nums) <= 0 {\n\t\treturn result\n\t}\n\n\tfor i := 0; i < len(nums); i++ {\n\t\tif nums[i] == target {\n\t\t\tresult[0] = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor j := len(nums) - 1; j >= 0; j-- {\n\t\tif nums[j] == target {\n\t\t\tresult[1] = j\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn result\n}", "func searchRange(nums []int, target int) []int {\n\tres := []int{-1, -1}\n\tif len(nums) == 0 {\n\t\treturn res\n\t}\n\n\treturn search(nums, 0, len(nums)-1, target)\n}", "func NewConflict(parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(nil, DefaultConflict, wparams.NewParamStorer(parameters...))\n}", "func (gui *Gui) findNewSelectedIdx(prevNodes []*filetree.FileNode, currNodes []*filetree.FileNode) int {\n\tgetPaths := func(node *filetree.FileNode) []string {\n\t\tif node == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif node.File != nil && node.File.IsRename() {\n\t\t\treturn node.File.Names()\n\t\t} else {\n\t\t\treturn []string{node.Path}\n\t\t}\n\t}\n\n\tfor _, prevNode := range prevNodes {\n\t\tselectedPaths := getPaths(prevNode)\n\n\t\tfor idx, node := range currNodes {\n\t\t\tpaths := getPaths(node)\n\n\t\t\t// If you started off with a rename selected, and now it's broken in two, we want you to jump to the new file, not the old file.\n\t\t\t// This is because the new should be in the same position as the rename was meaning less cursor jumping\n\t\t\tfoundOldFileInRename := prevNode.File != nil && prevNode.File.IsRename() && node.Path == prevNode.File.PreviousName\n\t\t\tfoundNode := utils.StringArraysOverlap(paths, selectedPaths) && !foundOldFileInRename\n\t\t\tif foundNode {\n\t\t\t\treturn idx\n\t\t\t}\n\t\t}\n\t}\n\n\treturn -1\n}", "func (ta TableAliases) findConflicts(other TableAliases) (conflicts []string, nonConflicted []string) {\n\tconflicts = []string{}\n\tnonConflicted = []string{}\n\n\tfor alias := range other {\n\t\tif _, ok := ta[alias]; ok {\n\t\t\tconflicts = append(conflicts, alias)\n\t\t} else {\n\t\t\tnonConflicted = append(nonConflicted, alias)\n\t\t}\n\t}\n\n\treturn\n}", "func (d *dirInode) lookUpConflicting(\n\tctx context.Context,\n\tname string) (result LookUpResult, err error) {\n\tstrippedName := strings.TrimSuffix(name, ConflictingFileNameSuffix)\n\n\t// In order to a marked name to be accepted, we require the conflicting\n\t// directory to exist.\n\tvar dirResult LookUpResult\n\tdirResult, err = d.lookUpChildDir(ctx, strippedName)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"lookUpChildDir for stripped name: %v\", err)\n\t\treturn\n\t}\n\n\tif !dirResult.Exists() {\n\t\treturn\n\t}\n\n\t// The directory name exists. Find the conflicting file.\n\tresult, err = d.lookUpChildFile(ctx, strippedName)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"lookUpChildFile for stripped name: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}", "func (rs Ranges) Find(r Range) (curr, next Range, present bool) {\n\tif r.IsEmpty() {\n\t\treturn r, next, false\n\t}\n\tvar intersection Range\n\ti := rs.search(r)\n\tif i > 0 {\n\t\tprev := rs[i-1]\n\t\t// we know prev.Pos < r.Pos so intersection.Pos == r.Pos\n\t\tintersection = prev.Intersection(r)\n\t\tif !intersection.IsEmpty() {\n\t\t\tr.Pos = intersection.End()\n\t\t\tr.Size -= intersection.Size\n\t\t\treturn intersection, r, true\n\t\t}\n\t}\n\tif i >= len(rs) {\n\t\treturn r, Range{}, false\n\t}\n\tfound := rs[i]\n\tintersection = found.Intersection(r)\n\tif intersection.IsEmpty() {\n\t\treturn r, Range{}, false\n\t}\n\tif r.Pos < intersection.Pos {\n\t\tcurr = Range{\n\t\t\tPos: r.Pos,\n\t\t\tSize: intersection.Pos - r.Pos,\n\t\t}\n\t\tr.Pos = curr.End()\n\t\tr.Size -= curr.Size\n\t\treturn curr, r, false\n\t}\n\tr.Pos = intersection.End()\n\tr.Size -= intersection.Size\n\treturn intersection, r, true\n}", "func NewConflictR(field string, message string, args ...interface{}) *AppError {\n\treturn NewError(AlreadyExists, field, message, args...)\n}", "func search(ints []uint32, needle uint32) int {\n\t// Define f(-1) == false and f(n) == true.\n\t// Invariant: f(i-1) == false, f(j) == true.\n\ti, j := 0, len(ints)\n\tfor i < j {\n\t\th := int(uint(i+j) >> 1) // avoid overflow when computing h\n\t\t// i ≤ h < j\n\t\tif !(ints[h] >= needle) {\n\t\t\ti = h + 1 // preserves f(i-1) == false\n\t\t} else {\n\t\t\tj = h // preserves f(j) == true\n\t\t}\n\t}\n\t// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.\n\treturn i\n}", "func (f Formatter) FindBeginIndex(content []byte) int {\n\tbeginIndex := bytes.IndexByte(content, beginSymbol[0])\n\ttempIndex := bytes.IndexByte(content, beginSymbol[1])\n\tif beginIndex > tempIndex && tempIndex != -1 {\n\t\tbeginIndex = tempIndex\n\t}\n\treturn beginIndex\n}", "func NewConflictResolver(\n\tconfig Config, fbo *folderBranchOps) *ConflictResolver {\n\t// make a logger with an appropriate module name\n\tbranchSuffix := \"\"\n\tif fbo.branch() != data.MasterBranch {\n\t\tbranchSuffix = \" \" + string(fbo.branch())\n\t}\n\ttlfStringFull := fbo.id().String()\n\tlog := config.MakeLogger(\n\t\tfmt.Sprintf(\"CR %s%s\", tlfStringFull[:8], branchSuffix))\n\n\tcr := &ConflictResolver{\n\t\tconfig: config,\n\t\tfbo: fbo,\n\t\tprepper: folderUpdatePrepper{\n\t\t\tconfig: config,\n\t\t\tfolderBranch: fbo.folderBranch,\n\t\t\tblocks: &fbo.blocks,\n\t\t\tlog: log,\n\t\t\tvlog: config.MakeVLogger(log),\n\t\t},\n\t\tlog: traceLogger{log},\n\t\tdeferLog: traceLogger{log.CloneWithAddedDepth(1)},\n\t\tmaxRevsThreshold: crMaxRevsThresholdDefault,\n\t\tcurrInput: conflictInput{\n\t\t\tunmerged: kbfsmd.RevisionUninitialized,\n\t\t\tmerged: kbfsmd.RevisionUninitialized,\n\t\t},\n\t}\n\n\tif fbo.bType == standard && config.Mode().ConflictResolutionEnabled() {\n\t\tcr.startProcessing(libcontext.BackgroundContextWithCancellationDelayer())\n\t}\n\treturn cr\n}", "func (w *Writer) linesToKeepRange(partialMatchIndexes map[int]bool) int {\n\tfirst := -1\n\n\tfor lineIdx := range partialMatchIndexes {\n\t\tif first == -1 {\n\t\t\tfirst = lineIdx\n\t\t\tcontinue\n\t\t}\n\n\t\tif first > lineIdx {\n\t\t\tfirst = lineIdx\n\t\t}\n\t}\n\n\treturn first\n}", "func findOverlappingHunkSet(oldFileDiff, newFileDiff *diff.FileDiff, i, j *int) (oldHunks, newHunks []*diff.Hunk) {\n\t// Collecting overlapped hunks into two arrays\n\n\toldHunks = append(oldHunks, oldFileDiff.Hunks[*i])\n\tnewHunks = append(newHunks, newFileDiff.Hunks[*j])\n\t*i++\n\t*j++\n\nLoop:\n\tfor {\n\t\tswitch {\n\t\t// Starting line of oldHunk is in previous newHunk body (between start and last lines)\n\t\tcase *i < len(oldFileDiff.Hunks) && oldFileDiff.Hunks[*i].OrigStartLine >= newFileDiff.Hunks[*j-1].OrigStartLine &&\n\t\t\toldFileDiff.Hunks[*i].OrigStartLine < newFileDiff.Hunks[*j-1].OrigStartLine+newFileDiff.Hunks[*j-1].OrigLines:\n\t\t\toldHunks = append(oldHunks, oldFileDiff.Hunks[*i])\n\t\t\t*i++\n\t\t// Starting line of newHunk is in previous oldHunk body (between start and last lines)\n\t\tcase *j < len(newFileDiff.Hunks) && newFileDiff.Hunks[*j].OrigStartLine >= oldFileDiff.Hunks[*i-1].OrigStartLine &&\n\t\t\tnewFileDiff.Hunks[*j].OrigStartLine < oldFileDiff.Hunks[*i-1].OrigStartLine+oldFileDiff.Hunks[*i-1].OrigLines:\n\t\t\tnewHunks = append(newHunks, newFileDiff.Hunks[*j])\n\t\t\t*j++\n\t\tdefault:\n\t\t\t// No overlapping hunks left\n\t\t\tbreak Loop\n\t\t}\n\t}\n\n\treturn oldHunks, newHunks\n}", "func FindReplications(items []string) []int {\n\t// hash method\n\thashMap := make(map[string]*[2]int)\n\t// int8[0]: 0=none, 1=first_encounter, 2=duplicated,\n\t// int8[1]: first_index\n\tresult := make([]int, 0, len(items))\n\tfor i, item := range items {\n\t\tkey, ok := hashMap[item]\n\t\tif !ok {\n\t\t\thashMap[item] = &[2]int{1, i}\n\t\t\tcontinue\n\t\t}\n\t\tswitch key[0] {\n\t\tcase 1:\n\t\t\thashMap[item][0] = 2\n\t\t\tresult = append(result, hashMap[item][1], i)\n\t\tcase 2:\n\t\t\tresult = append(result, i)\n\t\tdefault:\n\t\t\tpanic(\"???how???\")\n\t\t}\n\t}\n\tsort.Ints(result)\n\treturn result\n}", "func (logSeeker *LogSeeker) BSearchBegin(begin int64, end int64, startValue string, fieldSep rune, fieldIndex int, jsonField string) (offset int64, err error) {\n\n\tif begin > end {\n\t\t//not found\n\t\treturn -1, nil\n\t}\n\n\toffset, err = logSeeker.SeekLinePosition(begin)\n\n\tfield, err := logSeeker.readLineField(offset, fieldSep, fieldIndex, jsonField)\n\n\tif startValue < field {\n\t\t//found\n\t\treturn 0, nil\n\t}\n\n\toffset, err = logSeeker.SeekLinePosition(end - 2)\n\n\tfield, err = logSeeker.readLineField(offset, fieldSep, fieldIndex, jsonField)\n\n\t// fmt.Printf(\"scan end %d-%d ,%s %d\\n\", end, offset, field, fieldIndex)\n\n\tif startValue > field {\n\t\t//not found\n\t\treturn -1, nil\n\t}\n\n\tmid := (begin + end) / 2\n\n\tvar lastOffset int64 = -1\n\n\tfor end > begin {\n\n\t\toffset, err = logSeeker.SeekLinePosition(mid)\n\t\t// fmt.Printf(\"offset:lastOffset %d %d \\n\", offset, lastOffset)\n\t\tif lastOffset >= 0 && lastOffset == offset {\n\t\t\t// repeat find the same row\n\t\t\tbreak\n\t\t}\n\n\t\tfield, err = logSeeker.readLineField(offset, fieldSep, fieldIndex, jsonField)\n\t\t// fmt.Printf(\"scan-b %s, begin %d, %d mid:%d\\n\", field, begin, end, mid)\n\n\t\tif field < startValue && offset == begin {\n\t\t\treturn\n\t\t}\n\n\t\tif offset == begin {\n\t\t\toffset = lastOffset\n\t\t\treturn\n\t\t}\n\n\t\tif field >= startValue {\n\t\t\tlastOffset = offset\n\t\t\tend = mid\n\t\t} else {\n\t\t\tbegin = mid + 1\n\t\t}\n\n\t\tmid = (begin + end) / 2\n\n\t}\n\treturn lastOffset, nil\n}", "func findConflicts(leaves []*Transaction) (map[string]*SyncBool, error) {\n\tvar conflicts = make(map[string]*SyncBool)\n\tfor i := 1; i < len(leaves); i++ {\n\t\tx, y := leaves[i-1].ID, leaves[i].ID\n\t\tk := len(x)\n\t\t// This was originally len(leaves)...\n\t\tfor idx := 0; idx < len(x); idx++ {\n\t\t\tvar a, b byte = x[idx], y[idx]\n\t\t\tif a != b {\n\t\t\t\tk = idx\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tconflicts[x[0:k]] = &SyncBool{lock: &sync.Mutex{}, visited: false}\n\t}\n\treturn conflicts, nil\n}", "func (q *QQwry) searchIndex(ip uint32) uint32 {\n\theader := q.readData(8, 0)\n\n\tstart := binary.LittleEndian.Uint32(header[:4])\n\tend := binary.LittleEndian.Uint32(header[4:])\n\n\tbuf := make([]byte, INDEX_LEN)\n\tmid := uint32(0)\n\t_ip := uint32(0)\n\n\tfor {\n\t\tmid = q.getMiddleOffset(start, end)\n\t\tbuf = q.readData(INDEX_LEN, int64(mid))\n\t\t_ip = binary.LittleEndian.Uint32(buf[:4])\n\n\t\tif end-start == INDEX_LEN {\n\t\t\toffset := byte3ToUInt32(buf[4:])\n\t\t\tbuf = q.readData(INDEX_LEN)\n\t\t\tif ip < binary.LittleEndian.Uint32(buf[:4]) {\n\t\t\t\treturn offset\n\t\t\t}\n\t\t\treturn 0\n\t\t}\n\n\t\t// greater than ip, so move before\n\t\tif _ip > ip {\n\t\t\tend = mid\n\t\t} else if _ip < ip { // less than ip, so move after\n\t\t\tstart = mid\n\t\t} else if _ip == ip {\n\t\t\treturn byte3ToUInt32(buf[4:])\n\t\t}\n\t}\n}", "func (t *BPTree) findRange(start, end []byte) (numFound int, keys [][]byte, pointers []interface{}) {\n\tvar (\n\t\tn *Node\n\t\ti, j int\n\t\tscanFlag bool\n\t)\n\n\tif n = t.FindLeaf(start); n == nil {\n\t\treturn 0, nil, nil\n\t}\n\n\tfor j = 0; j < n.KeysNum && compare(n.Keys[j], start) < 0; {\n\t\tj++\n\t}\n\n\tscanFlag = true\n\tfor n != nil && scanFlag {\n\t\tfor i = j; i < n.KeysNum; i++ {\n\t\t\tif compare(n.Keys[i], end) > 0 {\n\t\t\t\tscanFlag = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tkeys = append(keys, n.Keys[i])\n\t\t\tpointers = append(pointers, n.pointers[i])\n\t\t\tnumFound++\n\t\t}\n\n\t\tn, _ = n.pointers[order-1].(*Node)\n\n\t\tj = 0\n\t}\n\n\treturn\n}", "func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision {\n\tif toRevision == 0 && len(allHistory) <= 1 {\n\t\treturn nil\n\t}\n\n\t// Find the history to rollback to\n\tvar toHistory *appsv1.ControllerRevision\n\tif toRevision == 0 {\n\t\t// If toRevision == 0, find the latest revision (2nd max)\n\t\tsort.Sort(historiesByRevision(allHistory))\n\t\ttoHistory = allHistory[len(allHistory)-2]\n\t} else {\n\t\tfor _, h := range allHistory {\n\t\t\tif h.Revision == toRevision {\n\t\t\t\t// If toRevision != 0, find the history with matching revision\n\t\t\t\treturn h\n\t\t\t}\n\t\t}\n\t}\n\n\treturn toHistory\n}", "func pathsFrom(idx int) int {\n\t// are we already at the end? there's only one path then.\n\tif idx == len(numbers)-1 {\n\t\treturn 1\n\t}\n\n\t// did we already calculate the paths from idx to the end? not calculate\n\t// again.\n\tpaths, alreadyCalculated := pathsFromIndex[idx]\n\tif alreadyCalculated {\n\t\treturn paths\n\t}\n\n\tfor nxt := idx + 1; nxt < len(numbers); nxt++ {\n\t\t// paths from idx to the end are the sum of the paths from all my\n\t\t// connections to the end.\n\t\tif numbers[nxt]-numbers[idx] <= 3 {\n\t\t\tpaths += pathsFrom(nxt)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\t// store so we don't calculate again for this idx.\n\tpathsFromIndex[idx] = paths\n\treturn paths\n}", "func find(s [][]int, e []int) int {\n\tfor i, n := range s {\n\t\tif equals(n, e) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}", "func Find(needle *types.Entry, haystack []*types.Entry) (*types.Entry, bool) {\n\tvar found bool\n\tvar target *types.Entry\n\tfor _, e := range haystack {\n\t\tif types.EntriesAreEqual(e, needle) {\n\t\t\tfound = true\n\t\t\ttarget = e\n\t\t\tbreak\n\t\t}\n\t}\n\treturn target, found\n}", "func IndexFrom(s, substr []byte, from int) int {\n\tif from >= len(s) {\n\t\treturn -1\n\t}\n\tif from <= 0 {\n\t\treturn bytes.Index(s, substr)\n\t}\n\ti := bytes.Index(s[from:], substr)\n\tif i == -1 {\n\t\treturn -1\n\t}\n\treturn from + i\n}", "func NewUpdateRequestsRequestIDConflict() *UpdateRequestsRequestIDConflict {\n\treturn &UpdateRequestsRequestIDConflict{}\n}", "func searchSocketId(idx int, socketedItems []models.Item) int {\n\tfor socketedIndex, v := range socketedItems {\n\t\tif idx == v.Socket {\n\t\t\treturn socketedIndex\n\t\t}\n\t}\n\treturn -1\n}", "func (s *LogStore) prevIndexMatchingManifests(index int, mns model.ManifestNameSet) int {\n\tif len(mns) == 0 || index == 0 {\n\t\treturn index - 1\n\t}\n\n\tfor i := index - 1; i >= 0; i-- {\n\t\tspan, ok := s.spans[s.segments[i].SpanID]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tmn := span.ManifestName\n\t\tif mns[mn] {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}", "func search(s []int, e int) int {\n\tstart := 0\n\tend := len(s)\n\tfor start < end {\n\t\ti := start + (end - start)/2\n\t\tif e > s[i] {\n\t\t\tstart = i+1\n\t\t} else if e < s[i] {\n\t\t\tend = i\n\t\t} else {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}", "func (h *Handler) syncFrom(to []*key.Identity, lastBeacon *Beacon) (*Beacon, error) {\n\th.Lock()\n\tpub := h.pub\n\th.Unlock()\n\tcurrentRound := lastBeacon.Round\n\tcurrentSig := lastBeacon.Signature\n\tvar currentBeacon = lastBeacon\n\tinitRound := currentRound\n\n\tfor _, id := range to {\n\t\tif h.addr == id.Addr {\n\t\t\tcontinue\n\t\t}\n\t\t//fmt.Println(\" TRYING TO SYNC TO \", id.Address())\n\t\t// if node doesn't answer quickly, we move on\n\t\t//h.client.SetTimeout(1 * time.Second)\n\t\th.l.Debug(\"sync_from\", \"try_sync\", \"to\", id.Addr, \"from_round\", currentRound)\n\t\t//ctx, cancel := context.WithCancel(context.Background())\n\t\tctx, cancel := context.Background(), func() {}\n\t\trequest := &proto.SyncRequest{\n\t\t\t// we ask rounds from at least one round more than what we already\n\t\t\t// have\n\t\t\tFromRound: currentRound + 1,\n\t\t}\n\t\trespCh, err := h.client.SyncChain(ctx, id, request)\n\t\tif err != nil {\n\t\t\th.l.Error(\"sync_from\", currentRound, \"error\", err, \"from\", id.Address())\n\t\t\tfmt.Println(\" CAN NOT SYNC TO \", id.Address())\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Println(\" LISTENING TO SYNC CHANNEL FROM \", id.Address())\n\t\tfor syncReply := range respCh {\n\t\t\t// we only sync for increasing round numbers\n\t\t\t// there might be gaps so we dont check for sequentiality but our\n\t\t\t// chain from the round we have should be valid\n\t\t\tif syncReply.Round <= currentRound {\n\t\t\t\th.l.Debug(\"sync_round\", currentRound, \"from\", id.Address(), \"invalid-reply\")\n\t\t\t\tcancel()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// we want answers consistent from our round that we have\n\t\t\tprevSig := syncReply.GetPreviousSig()\n\t\t\tprevRound := syncReply.GetPreviousRound()\n\t\t\tif currentRound != prevRound || !bytes.Equal(prevSig, currentSig) {\n\t\t\t\th.l.Error(\"sync_round\", currentRound, \"from\", id.Address(), \"want_prevRound\", currentRound, \"got_prevRound\", prevRound, \"want_prevSig\", shortSigStr(currentSig), \"got_prevSig\", shortSigStr(prevSig), \"got_sig\", shortSigStr(syncReply.GetSignature()), \"round\", syncReply.GetRound())\n\t\t\t\tcancel()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tmsg := Message(prevSig, prevRound, syncReply.GetRound())\n\t\t\tif err := h.conf.Scheme.VerifyRecovered(pub.Commit(), msg, syncReply.GetSignature()); err != nil {\n\t\t\t\th.l.Error(\"sync_round\", currentRound, \"invalid_sig\", err, \"from\", id.Address())\n\t\t\t\tcancel()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\th.l.Debug(\"sync_round\", syncReply.GetRound(), \"valid_sync\", id.Address())\n\t\t\tbeacon := &Beacon{\n\t\t\t\tPreviousSig: syncReply.GetPreviousSig(),\n\t\t\t\tPreviousRound: syncReply.GetPreviousRound(),\n\t\t\t\tRound: syncReply.GetRound(),\n\t\t\t\tSignature: syncReply.GetSignature(),\n\t\t\t}\n\t\t\th.store.Put(beacon)\n\n\t\t\tcurrentBeacon = beacon\n\t\t\tcurrentRound = syncReply.GetRound()\n\t\t\tcurrentSig = syncReply.GetSignature()\n\t\t\t// we check each time that we haven't advanced a round in the\n\t\t\t// syncing process\n\t\t\tnextRound, _ := NextRound(h.conf.Clock.Now().Unix(), h.conf.Group.Period, h.conf.Group.GenesisTime)\n\t\t\t// if it gave us the round just before the next one, then we are\n\t\t\t// synced!\n\t\t\tif currentRound+1 == nextRound {\n\t\t\t\th.l.Debug(\"sync\", \"to_head\", \"round\", currentRound, \"sig\", shortSigStr(currentSig))\n\t\t\t\tcancel()\n\t\t\t\treturn currentBeacon, nil\n\t\t\t}\n\t\t}\n\t}\n\n\tnextRound, _ := NextRound(h.conf.Clock.Now().Unix(), h.conf.Group.Period, h.conf.Group.GenesisTime)\n\treturn currentBeacon, fmt.Errorf(\"syncing went from %d to %d whereas current round is %d: network is down\", initRound, currentRound, nextRound-1)\n}", "func (tv *TextView) MatchFromPos(matches []FileSearchMatch, cpos TextPos) (int, bool) {\n\tfor i, m := range matches {\n\t\treg := tv.Buf.AdjustReg(m.Reg)\n\t\tif reg.Start == cpos || cpos.IsLess(reg.Start) {\n\t\t\treturn i, true\n\t\t}\n\t}\n\treturn 0, false\n}", "func ConflictsFromBytes(bytes []byte, n uint32) (conflicts Conflicts, consumedBytes int, err error) {\n\tif len(bytes)/ConflictLength < int(n) {\n\t\terr = errors.Errorf(\"not enough bytes to parse %d conflicts\", n)\n\t\treturn\n\t}\n\n\tmarshalUtil := marshalutil.New(bytes)\n\tif conflicts, err = ConflictsFromMarshalUtil(marshalUtil, n); err != nil {\n\t\terr = errors.Errorf(\"failed to parse Conflicts from MarshalUtil: %w\", err)\n\t\treturn\n\t}\n\tconsumedBytes = marshalUtil.ReadOffset()\n\n\treturn\n}", "func (f Formatter) FindEndIndex(content []byte) int {\n\tsignIndex := bytes.IndexByte(beginSymbol, content[0])\n\tif signIndex != -1 {\n\t\tbeginsCount := 0\n\t\tfor i, symbol := range content {\n\t\t\tswitch symbol {\n\t\t\tcase beginSymbol[signIndex]:\n\t\t\t\tbeginsCount++\n\t\t\tcase endSymbol[signIndex]:\n\t\t\t\tbeginsCount--\n\t\t\t}\n\t\t\tif beginsCount == 0 {\n\t\t\t\treturn i\n\t\t\t} else if beginsCount < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn -1\n}", "func (d *dataUpdateTracker) filterFrom(ctx context.Context, oldest, newest uint64) *bloomFilterResponse {\n\tbf := d.newBloomFilter()\n\tbfr := bloomFilterResponse{\n\t\tOldestIdx: oldest,\n\t\tCurrentIdx: d.Current.idx,\n\t\tComplete: true,\n\t}\n\t// Loop through each index requested.\n\tfor idx := oldest; idx <= newest; idx++ {\n\t\tv := d.History.find(idx)\n\t\tif v == nil {\n\t\t\tif d.Current.idx == idx {\n\t\t\t\t// Merge current.\n\t\t\t\terr := bf.Merge(d.Current.bf.BloomFilter)\n\t\t\t\tlogger.LogIf(ctx, err)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbfr.Complete = false\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbfr.Complete = false\n\t\t\tbfr.OldestIdx = idx + 1\n\t\t\tcontinue\n\t\t}\n\n\t\terr := bf.Merge(v.bf.BloomFilter)\n\t\tif err != nil {\n\t\t\tbfr.Complete = false\n\t\t\tlogger.LogIf(ctx, err)\n\t\t\tcontinue\n\t\t}\n\t\tbfr.NewestIdx = idx\n\t}\n\tvar dst bytes.Buffer\n\t_, err := bf.WriteTo(&dst)\n\tif err != nil {\n\t\tlogger.LogIf(ctx, err)\n\t\treturn nil\n\t}\n\tbfr.Filter = dst.Bytes()\n\n\treturn &bfr\n}", "func (rl RangeList) findPosition(number int) int {\n\tif len(rl.list) <= 0 {\n\t\treturn -1\n\t}\n\n\tleft := 0\n\tright := len(rl.list) - 1\n\tfor left <= right {\n\t\tmid := (left + right) / 2\n\t\tif number < rl.list[mid][leftIdx] {\n\t\t\tright = mid - 1\n\t\t}\n\t\tif number > rl.list[mid][leftIdx] {\n\t\t\tleft = mid + 1\n\t\t}\n\t\tif number == rl.list[mid][leftIdx] {\n\t\t\treturn mid\n\t\t}\n\t}\n\n\tif left < right {\n\t\treturn left\n\t} else {\n\t\treturn right\n\t}\n}", "func (v *VersionHistory) FindLCAItem(\n\tremote *VersionHistory,\n) (*VersionHistoryItem, error) {\n\n\tlocalIndex := len(v.Items) - 1\n\tremoteIndex := len(remote.Items) - 1\n\n\tfor localIndex >= 0 && remoteIndex >= 0 {\n\t\tlocalVersionItem := v.Items[localIndex]\n\t\tremoteVersionItem := remote.Items[remoteIndex]\n\n\t\tif localVersionItem.Version == remoteVersionItem.Version {\n\t\t\tif localVersionItem.EventID > remoteVersionItem.EventID {\n\t\t\t\treturn remoteVersionItem.Duplicate(), nil\n\t\t\t}\n\t\t\treturn localVersionItem.Duplicate(), nil\n\t\t} else if localVersionItem.Version > remoteVersionItem.Version {\n\t\t\tlocalIndex--\n\t\t} else {\n\t\t\t// localVersionItem.Version < remoteVersionItem.Version\n\t\t\tremoteIndex--\n\t\t}\n\t}\n\n\treturn nil, &shared.BadRequestError{\n\t\tMessage: \"version history is malformed. No joint point found.\",\n\t}\n}", "func (o *ordering) find(str string) *entry {\n\te := o.entryMap[str]\n\tif e == nil {\n\t\tr := []rune(str)\n\t\tif len(r) == 1 {\n\t\t\tconst (\n\t\t\t\tfirstHangul = 0xAC00\n\t\t\t\tlastHangul = 0xD7A3\n\t\t\t)\n\t\t\tif r[0] >= firstHangul && r[0] <= lastHangul {\n\t\t\t\tce := []rawCE{}\n\t\t\t\tnfd := norm.NFD.String(str)\n\t\t\t\tfor _, r := range nfd {\n\t\t\t\t\tce = append(ce, o.find(string(r)).elems...)\n\t\t\t\t}\n\t\t\t\te = o.newEntry(nfd, ce)\n\t\t\t} else {\n\t\t\t\te = o.newEntry(string(r[0]), []rawCE{\n\t\t\t\t\t{w: []int{\n\t\t\t\t\t\timplicitPrimary(r[0]),\n\t\t\t\t\t\tdefaultSecondary,\n\t\t\t\t\t\tdefaultTertiary,\n\t\t\t\t\t\tint(r[0]),\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\te.modified = true\n\t\t\t}\n\t\t\te.exclude = true // do not index implicits\n\t\t}\n\t}\n\treturn e\n}", "func (fr *FileRing) Search(start, end int64) []File {\n\tfr.lock.RLock()\n\tdefer fr.lock.RUnlock()\n\n\tr := make([]File, 0)\n\tfor k, v := range fr.hashMap {\n\t\tif InRange(k, start, end, M) {\n\t\t\tr = append(r, v.SortedFiles()...)\n\t\t}\n\t}\n\treturn CopyAll(r)\n}", "func NewCreateInputPortConflict() *CreateInputPortConflict {\n\treturn &CreateInputPortConflict{}\n}", "func (service *ResultService) ResolveConflict(in *proto_job.ResultRequest) (*proto_job.ResultReply, error) {\n\tresult, err := service.accessor.GetByID(uint(in.Id))\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result.ID == 0 {\n\t\tlog.Fatal(\"Conflict not found in SetResultState\")\n\t}\n\n\tresult.State = \"RESOLVED\"\n\tresult.TaxonID = uint(in.TaxonId)\n\terr = service.accessor.Save(result)\n\n\treturn converters.ResultModelToProto(result), err\n}", "func NewUpdateAppConflict() *UpdateAppConflict {\n\treturn &UpdateAppConflict{}\n}", "func findAndIndex(abc *st.Art) (index1 int, index2 int) {\n\tsourse := abc.Flag.Color.ByIndex.Range\n\tlSourse := len(sourse)\n\n\tindex1 = sourse[0]\n\tindex2 = sourse[lSourse-1]\n\t// if wrong range, fix it\n\tif index1 > index2 {\n\t\tindex1, index2 = index2, index1\n\t}\n\treturn index1, index2\n}", "func (f intFinder) find(rid int) (int, bool) {\n\tline, ok := f.m[rid]\n\treturn line, ok\n}", "func NewConflict(field string) *AppError {\n\treturn NewError(AlreadyExists, field, \"already exists\")\n}", "func (finder InMemoryFinder) FindFixedPaths(\n\tctx context.Context,\n\tsourceAsset xdr.Asset,\n\tamountToSpend xdr.Int64,\n\tdestinationAssets []xdr.Asset,\n\tmaxLength uint,\n) ([]paths.Path, uint32, error) {\n\tif finder.graph.IsEmpty() {\n\t\treturn nil, 0, ErrEmptyInMemoryOrderBook\n\t}\n\n\tif maxLength == 0 {\n\t\tmaxLength = MaxInMemoryPathLength\n\t}\n\tif maxLength > MaxInMemoryPathLength {\n\t\treturn nil, 0, errors.New(\"invalid value of maxLength\")\n\t}\n\n\torderbookPaths, lastLedger, err := finder.graph.FindFixedPaths(\n\t\tctx,\n\t\tint(maxLength),\n\t\tsourceAsset,\n\t\tamountToSpend,\n\t\tdestinationAssets,\n\t\tmaxAssetsPerPath,\n\t\tfinder.includePools,\n\t)\n\tresults := make([]paths.Path, len(orderbookPaths))\n\tfor i, path := range orderbookPaths {\n\t\tresults[i] = paths.Path{\n\t\t\tPath: path.InteriorNodes,\n\t\t\tSource: path.SourceAsset,\n\t\t\tSourceAmount: path.SourceAmount,\n\t\t\tDestination: path.DestinationAsset,\n\t\t\tDestinationAmount: path.DestinationAmount,\n\t\t}\n\t}\n\treturn results, lastLedger, err\n}", "func (f *asyncFinder) At(ctx context.Context, at int64, after uint64) (ch swarm.Chunk, cur, next feeds.Index, err error) {\n\t// first lookup update at the 0 index\n\t// TODO: consider receive after as uint\n\tch, err = f.get(ctx, at, after)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tif ch == nil {\n\t\treturn nil, nil, &index{after}, nil\n\t}\n\t// if chunk exists construct an initial interval with base=0\n\tc := make(chan *result)\n\ti := newInterval(0)\n\ti.found = &result{ch, nil, 0, 0}\n\n\tquit := make(chan struct{})\n\tdefer close(quit)\n\n\t// launch concurrent request at doubling intervals\n\tgo f.at(ctx, at, 0, i, c, quit)\n\tfor r := range c {\n\t\t// collect the results into the interval\n\t\ti = r.interval\n\t\tif r.chunk == nil {\n\t\t\tif i.notFound < r.level {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ti.notFound = r.level - 1\n\t\t} else {\n\t\t\tif i.found.level > r.level {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// if a chunk is found on the max level, and this is already a subinterval\n\t\t\t// then found.index+1 is already known to be not found\n\t\t\tif i.level == r.level && r.level < DefaultLevels {\n\t\t\t\treturn r.chunk, &index{r.index}, &index{r.index + 1}, nil\n\t\t\t}\n\t\t\ti.found = r\n\t\t}\n\t\t// below applies even if i.latest==ceilingLevel in which case we just continue with\n\t\t// DefaultLevel lookaheads\n\t\tif i.found.level == i.notFound {\n\t\t\tif i.found.level == 0 {\n\t\t\t\treturn i.found.chunk, &index{i.found.index}, &index{i.found.index + 1}, nil\n\t\t\t}\n\t\t\tgo f.at(ctx, at, 0, i.next(), c, quit)\n\t\t}\n\t\t// inconsistent feed, retry\n\t\tif i.notFound < i.found.level {\n\t\t\tgo f.at(ctx, at, i.found.level, i.retry(), c, quit)\n\t\t}\n\t}\n\treturn nil, nil, nil, nil\n}", "func (idx uniqIndex) findCollisions(ctx context.Context, key, value val.Tuple, cb collisionFn) error {\n\tindexKey := idx.secondaryBld.SecondaryKeyFromRow(key, value)\n\tif idx.prefixDesc.HasNulls(indexKey) {\n\t\treturn nil // NULLs cannot cause unique violations\n\t}\n\n\t// This code uses the secondary index to iterate over all rows (key/value pairs) that have the same prefix.\n\t// The prefix here is all the value columns this index is set up to track\n\tcollisions := make([]val.Tuple, 0)\n\terr := idx.secondary.GetPrefix(ctx, indexKey, idx.prefixDesc, func(k, _ val.Tuple) (err error) {\n\t\tif k != nil {\n\t\t\tcollisions = append(collisions, k)\n\t\t}\n\t\treturn\n\t})\n\tif err != nil || len(collisions) == 0 {\n\t\treturn err\n\t}\n\n\tcollisionDetected := false\n\tfor _, collision := range collisions {\n\t\t// Next find the key in the primary (aka clustered) index\n\t\tclusteredKey := idx.clusteredBld.ClusteredKeyFromIndexKey(collision)\n\t\tif bytes.Equal(key, clusteredKey) {\n\t\t\tcontinue // collided with ourselves\n\t\t}\n\n\t\t// |prefix| was non-unique, find the clustered index row that\n\t\t// collided with row(|key|, |value|) and pass both to |cb|\n\t\terr = idx.clustered.Get(ctx, clusteredKey, func(k val.Tuple, v val.Tuple) error {\n\t\t\tif k == nil {\n\t\t\t\ts := idx.clusteredKeyDesc.Format(clusteredKey)\n\t\t\t\treturn errors.New(\"failed to find key: \" + s)\n\t\t\t}\n\t\t\tcollisionDetected = true\n\t\t\treturn cb(k, v)\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif collisionDetected {\n\t\treturn cb(key, value)\n\t} else {\n\t\treturn nil\n\t}\n}", "func NewUpdateMTOServiceItemStatusConflict() *UpdateMTOServiceItemStatusConflict {\n\treturn &UpdateMTOServiceItemStatusConflict{}\n}", "func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {\n\tdst = dst[:0]\n\tfor i := 0; i < len(tf); {\n\t\tt := tf[i]\n\t\tif t.overlaps(icmp, umin, umax) {\n\t\t\tif umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {\n\t\t\t\tumin = t.imin.ukey()\n\t\t\t\tdst = dst[:0]\n\t\t\t\ti = 0\n\t\t\t\tcontinue\n\t\t\t} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {\n\t\t\t\tumax = t.imax.ukey()\n\t\t\t\t// Restart search if it is overlapped.\n\t\t\t\tif overlapped {\n\t\t\t\t\tdst = dst[:0]\n\t\t\t\t\ti = 0\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdst = append(dst, t)\n\t\t}\n\t\ti++\n\t}\n\n\treturn dst\n}", "func NewGetFullTextIndexStatusConfigurationConflict() *GetFullTextIndexStatusConfigurationConflict {\n\treturn &GetFullTextIndexStatusConfigurationConflict{}\n}", "func (a *IntegerArray) FindRange(min, max int64) (int, int) {\n\tif a.Len() == 0 || min > max {\n\t\treturn -1, -1\n\t}\n\n\tminVal := a.MinTime()\n\tmaxVal := a.MaxTime()\n\n\tif maxVal < min || minVal > max {\n\t\treturn -1, -1\n\t}\n\n\treturn a.search(min), a.search(max)\n}", "func searchRange(nums []int, target int) []int {\n\tans := []int{-1, -1}\n\tif len(nums) == 0 {\n\t\treturn ans\n\t}\n\tfirstHit := -1\n\tlo, hi, mid := 0, len(nums)-1, 0\n\tfor lo <= hi {\n\t\tmid = (lo + hi) >> 1\n\t\tif nums[mid] == target {\n\t\t\tfirstHit = mid\n\t\t\tbreak\n\t\t} else if nums[mid] < target {\n\t\t\tlo = mid + 1\n\t\t} else {\n\t\t\thi = mid - 1\n\t\t}\n\t}\n\n\tif firstHit == -1 {\n\t\treturn ans\n\t}\n\n\tsearchBoundary := func(l, h, t int) int {\n\t\tmid := 0\n\t\tfor l < h {\n\t\t\tmid = (l + h) >> 1\n\t\t\tif nums[mid] < t {\n\t\t\t\tl = mid + 1\n\t\t\t} else {\n\t\t\t\th = mid\n\t\t\t}\n\t\t}\n\t\treturn l\n\t}\n\n\tans[0] = searchBoundary(lo, firstHit, target)\n\tans[1] = searchBoundary(firstHit, hi+1, target+1) - 1\n\treturn ans\n}", "func searchLinear(nums []uint, n uint) int {\n\tfor i := 0; i < len(nums); i++ {\n\t\tif nums[i] == n {\n\t\t\treturn int(n)\n\t\t}\n\t}\n\treturn -1\n}", "func NewCreateWaitlistEntryConflict(body *CreateWaitlistEntryConflictResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func IndexLookup(x *suffixarray.Index, s []byte, n int) []int", "func NewCreateRuleSetConflict() *CreateRuleSetConflict {\n\treturn &CreateRuleSetConflict{}\n}", "func findPosition(value int, data []int) int {\n\tif len(data) == 0 {\n\t\treturn -1\n\t}\n\tfor index := 0; index < len(data); index++ {\n\t\tif data[index] == value {\n\t\t\treturn index\n\t\t}\n\t}\n\treturn -1\n}", "func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) {\n\tif len(tgt) < tgtOffset+s {\n\t\treturn 0, len(tgt) - tgtOffset\n\t}\n\n\tif len(src) < blksz {\n\t\treturn 0, -1\n\t}\n\n\tif len(tgt) >= tgtOffset+s && len(src) >= blksz {\n\t\th := hashBlock(tgt, tgtOffset)\n\t\ttIdx := h & idx.mask\n\t\teIdx := idx.table[tIdx]\n\t\tif eIdx != 0 {\n\t\t\tsrcOffset = idx.entries[eIdx]\n\t\t} else {\n\t\t\treturn\n\t\t}\n\n\t\tl = matchLength(src, tgt, tgtOffset, srcOffset)\n\t}\n\n\treturn\n}", "func (a *FloatArray) FindRange(min, max int64) (int, int) {\n\tif a.Len() == 0 || min > max {\n\t\treturn -1, -1\n\t}\n\n\tminVal := a.MinTime()\n\tmaxVal := a.MaxTime()\n\n\tif maxVal < min || minVal > max {\n\t\treturn -1, -1\n\t}\n\n\treturn a.search(min), a.search(max)\n}", "func indexOf(fd *os.File, phoneNumber string) (string, int64, error) {\n\tfd.Seek(0, 0)\n\treader := bufio.NewReader(fd)\n\tvar line string\n\tvar cursorPos int64 = 0\n\tvar err error\n\tfor {\n\t\tline, err = reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif isDuplicate(line, phoneNumber) {\n\t\t\treturn line, cursorPos, nil\n\t\t}\n\t\tcursorPos += int64(len(line))\n\t}\n\treturn \"\", -1, nil\n}", "func Overlaps(trees map[string]*interval.IntTree, chrom string, start, end int, result *[]irange) {\n\tvar tree = trees[chrom]\n\tif len(*result) != 0 {\n\t\t*result = (*result)[:0]\n\t}\n\tif tree == nil {\n\t\tif strings.HasPrefix(chrom, \"chr\") {\n\t\t\ttree = trees[chrom[3:len(chrom)]]\n\t\t}\n\t\tif tree == nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tq := irange{Start: start, End: end, UID: uintptr(tree.Len() + 1)}\n\n\ttree.DoMatching(func(iv interval.IntInterface) bool {\n\t\t*result = append(*result, iv.(irange))\n\t\treturn false\n\t}, q)\n\n}", "func (n *node) parentEntriesIdx() (int, error) {\n\tp := n.parent\n\tif p != nil {\n\t\tfor idx, e := range p.entries {\n\t\t\tif e.child == n {\n\t\t\t\treturn idx, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn -1, errors.New(\"This node is not found in parent's entries\")\n}", "func getOccurrencePosition(arr []int, key, firstPos, lastPos, res int, findFirstOccr bool) int {\n\n\tif firstPos <= lastPos {\n\t\tmidIdx := (firstPos+lastPos)/2\n\t\tif arr[midIdx] == key {\n\t\t\tres = midIdx\n\t\t\tif findFirstOccr {\n\t\t\t\treturn getOccurrencePosition(arr, key, firstPos, midIdx-1, res, findFirstOccr)\n\t\t\t} else {\n\t\t\t\treturn getOccurrencePosition(arr, key, midIdx+1, lastPos, res, findFirstOccr)\n\t\t\t}\n\t\t} else if arr[midIdx] < key {\n\t\t\t// search in right hand side of array in case array is sorted in ascending order\n\t\t\treturn getOccurrencePosition(arr, key, midIdx+1, lastPos, res, findFirstOccr)\n\t\t} else {\n\t\t\t// arr[midIdx] > key, i.e. search in first half of the array\n\t\t\treturn getOccurrencePosition(arr, key, firstPos, midIdx-1, res, findFirstOccr)\n\t\t}\n\t}\n\treturn res\n}", "func findstart(a []int) int {\n\tfmt.Println(a)\n\tif len(a) == 0 {\n\t\treturn -1\n\t}\n\tif len(a) == 1 {\n\t\treturn a[0]\n\t}\n\n\tmid := len(a) / 2 //7/2 = mid = 3, a[3] = 7\n\tstart := 0\n\tend := len(a) - 1\n\n\tif a[mid-1] > a[mid] && a[mid] < a[(mid+1)%len(a)] {\n\t\treturn a[mid]\n\t} else if a[mid] > a[start] && a[mid] > a[end] {\n\t\treturn findstart(a[mid:])\n\t} else if a[mid] < a[start] && a[mid] < a[end] {\n\t\treturn findstart(a[start:mid])\n\t}\n\n\tif (a[mid] > a[start]) && mid > start {\n\t\treturn findstart(a[start:mid])\n\t}\n\n\tif a[mid] < a[start] && mid < end {\n\t\treturn findstart(a[mid:])\n\t}\n\treturn -1\n\n}", "func (c *seriesStore) calculateIndexEntries(ctx context.Context, from, through model.Time, chunk Chunk) (WriteBatch, []string, error) {\n\tseenIndexEntries := map[string]struct{}{}\n\tentries := []IndexEntry{}\n\n\tmetricName := chunk.Metric.Get(labels.MetricName)\n\tif metricName == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"no MetricNameLabel for chunk\")\n\t}\n\n\tkeys, labelEntries, err := c.schema.GetCacheKeysAndLabelWriteEntries(from, through, chunk.UserID, metricName, chunk.Metric, chunk.ExternalKey())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\t_, _, missing := c.writeDedupeCache.Fetch(ctx, keys)\n\t// keys and labelEntries are matched in order, but Fetch() may\n\t// return missing keys in any order so check against all of them.\n\tfor _, missingKey := range missing {\n\t\tfor i, key := range keys {\n\t\t\tif key == missingKey {\n\t\t\t\tentries = append(entries, labelEntries[i]...)\n\t\t\t}\n\t\t}\n\t}\n\n\tchunkEntries, err := c.schema.GetChunkWriteEntries(from, through, chunk.UserID, metricName, chunk.Metric, chunk.ExternalKey())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tentries = append(entries, chunkEntries...)\n\n\tindexEntriesPerChunk.Observe(float64(len(entries)))\n\n\t// Remove duplicate entries based on tableName:hashValue:rangeValue\n\tresult := c.index.NewWriteBatch()\n\tfor _, entry := range entries {\n\t\tkey := fmt.Sprintf(\"%s:%s:%x\", entry.TableName, entry.HashValue, entry.RangeValue)\n\t\tif _, ok := seenIndexEntries[key]; !ok {\n\t\t\tseenIndexEntries[key] = struct{}{}\n\t\t\tresult.Add(entry.TableName, entry.HashValue, entry.RangeValue, entry.Value)\n\t\t}\n\t}\n\n\treturn result, missing, nil\n}", "func code2index(code uint32) int {\n\tranges := []codeRange{\n\t\tcodeRange{0x4e00, 0x9fff},\n\t\tcodeRange{0x2e80, 0x2eff},\n\t\tcodeRange{0x3000, 0x303f},\n\t\tcodeRange{0x31c0, 0x31ff},\n\t\tcodeRange{0x3200, 0x32ff},\n\t\tcodeRange{0x3300, 0x33ff},\n\t\tcodeRange{0x3400, 0x4dff},\n\t\tcodeRange{0xf900, 0xfaff},\n\t\tcodeRange{0xfe30, 0xfe4f},\n\t\tcodeRange{0x20000, 0x2a6df},\n\t\tcodeRange{0x2a700, 0x2b73f},\n\t\tcodeRange{0x2b740, 0x2b81f},\n\t\tcodeRange{0x2b820, 0x2ceaf},\n\t\tcodeRange{0x2ceb0, 0x2ebef},\n\t\tcodeRange{0x2f800, 0x2fa1f},\n\t\tcodeRange{endOfIndex, endOfIndex},\n\t}\n\n\toffset := 0\n\tfor _, r := range ranges {\n\t\tif code >= r.From && code <= r.To {\n\t\t\treturn offset + int(code) - int(r.From)\n\t\t}\n\t\toffset += 1 + int(r.To) - int(r.From)\n\t}\n\treturn 0\n}", "func searchRange(nums []int, target int) []int {\n\trst := make([]int, 2)\n\trst[0] = -1\n\trst[1] = -1\n\n\tif len(nums) == 0 {\n\t\treturn rst\n\t}\n\n\t// search for left bound\n\tl, r := 0, len(nums)-1\n\tmid := 0\n\tfor l < r {\n\t\tmid = l + (r-l)/2\n\t\tif nums[mid] < target {\n\t\t\tl = mid + 1\n\t\t} else {\n\t\t\t// target <= nums[mid]\n\t\t\tr = mid\n\t\t}\n\t}\n\tif nums[l] != target {\n\t\treturn rst\n\t} else {\n\t\trst[0] = l\n\t}\n\n\t// search for right bound\n\tl, r = 0, len(nums)-1\n\tfor l < r {\n\t\tmid = l + (r-l)/2 + 1 // trick to make mid bias to right bound\n\t\tif target < nums[mid] {\n\t\t\tr = mid - 1\n\t\t} else {\n\t\t\t// nums[mid] <= target\n\t\t\tl = mid\n\t\t}\n\t}\n\trst[1] = l\n\n\treturn rst\n}", "func (logSeeker *LogSeeker) BSearchEnd(begin int64, end int64, endValue string, fieldSep rune, fieldIndex int, jsonField string) (offset int64, err error) {\n\n\tif begin > end {\n\t\t//not found\n\t\treturn -1, nil\n\t}\n\n\toffset, err = logSeeker.SeekLinePosition(end - 2)\n\n\tfield, err := logSeeker.readLineField(offset, fieldSep, fieldIndex, jsonField)\n\n\t// fmt.Printf(\"scan end %d-%d ,%s %d\\n\", end, offset, field, fieldIndex)\n\n\tif endValue > field {\n\t\t//found\n\t\treturn end, nil\n\t}\n\n\tmid := (begin + end) / 2\n\n\tvar lastOffset int64 = -1\n\n\tfor end > begin {\n\n\t\toffset, err = logSeeker.SeekLinePosition(mid)\n\t\t// fmt.Printf(\"offset:lastOffset %d %d \\n\", offset, lastOffset)\n\t\tif lastOffset >= 0 && lastOffset == offset {\n\t\t\t// repeat find the same row\n\t\t\tbreak\n\t\t}\n\n\t\tfield, err = logSeeker.readLineField(offset, fieldSep, fieldIndex, jsonField)\n\t\t// fmt.Printf(\"scan %s begin %d offset %d,end:%d mid:%d\\n\", field, begin, offset, end, mid)\n\n\t\tif field <= endValue && offset == begin {\n\t\t\treturn\n\t\t}\n\n\t\tif offset == begin {\n\t\t\treturn\n\t\t}\n\n\t\tif field >= endValue {\n\t\t\tlastOffset = offset\n\t\t\tend = mid\n\t\t} else {\n\t\t\tbegin = mid + 1\n\t\t}\n\n\t\tmid = (begin + end) / 2\n\n\t}\n\treturn lastOffset, nil\n}", "func (t *Table) ResolveConflicts(ctx context.Context, pkTuples []types.Value) (invalid, notFound []types.Value, tbl *Table, err error) {\n\tremoved := 0\n\tconflictSchema, confIdx, err := t.GetConflicts(ctx)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tif confIdx.Format() == types.Format_DOLT {\n\t\tpanic(\"resolve conflicts not implemented for new storage format\")\n\t}\n\n\tconfData := durable.NomsMapFromConflictIndex(confIdx)\n\n\tconfEdit := confData.Edit()\n\tfor _, pkTupleVal := range pkTuples {\n\t\tif has, err := confData.Has(ctx, pkTupleVal); err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t} else if has {\n\t\t\tremoved++\n\t\t\tconfEdit.Remove(pkTupleVal)\n\t\t} else {\n\t\t\tnotFound = append(notFound, pkTupleVal)\n\t\t}\n\t}\n\n\tif removed == 0 {\n\t\treturn invalid, notFound, tbl, ErrNoConflictsResolved\n\t}\n\n\tconflicts, err := confEdit.Map(ctx)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tif conflicts.Len() == 0 {\n\t\ttable, err := t.table.ClearConflicts(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\treturn invalid, notFound, &Table{table: table}, nil\n\t}\n\n\ttable, err := t.table.SetConflicts(ctx, conflictSchema, durable.ConflictIndexFromNomsMap(conflicts, t.ValueReadWriter()))\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\treturn invalid, notFound, &Table{table: table}, nil\n}", "func NewGetLimitsConflict() *GetLimitsConflict {\n\treturn &GetLimitsConflict{}\n}", "func (self *OCSPResponder) getIndexEntry(s *big.Int) (*IndexEntry, error) {\n\tlog.Println(fmt.Sprintf(\"Looking for serial 0x%x\", s))\n\tif err := self.parseIndex(); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, ent := range self.IndexEntries {\n\t\tif ent.Serial.Cmp(s) == 0 {\n\t\t\treturn &ent, nil\n\t\t}\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Serial 0x%x not found\", s))\n}", "func (g *Git) GetCommitsBetween(from *plumbing.Reference, to *plumbing.Reference) ([]*ChangelogItem, error) {\n\tvar history []*ChangelogItem\n\tvar exists bool\n\n\tcommits, err := g.repo.Log(&git.LogOptions{From: from.Hash()})\n\tif err != nil {\n\t\treturn history, err\n\t}\n\n\t// Iterate over all commits\n\t// Break when `to` has been found\n\terr = commits.ForEach(func(commit *object.Commit) error {\n\t\tif commit.Hash == to.Hash() {\n\t\t\texists = true\n\t\t\treturn errors.New(\"ErrStop\")\n\t\t}\n\n\t\t// Check if commit message contains issue in form `(#0..9)`\n\t\t// and add commit as a changelog item\n\t\tif hasIssue(commit.Message) {\n\t\t\thistory = append(history, &ChangelogItem{\n\t\t\t\tHash: commit.Hash.String(),\n\t\t\t\tText: commit.Message,\n\t\t\t\tIssueID: getIssueFrom(commit.Message),\n\t\t\t\tAuthor: commit.Author.Name,\n\t\t\t})\n\t\t}\n\t\treturn nil\n\t})\n\n\tif exists {\n\t\treturn history, nil\n\t}\n\n\treturn history, errors.Errorf(\"Unable to compare references, %v not found in history of %v\", to.Name().Short(), from.Name().Short())\n}", "func (rs Ranges) search(r Range) int {\n\treturn sort.Search(len(rs), func(i int) bool {\n\t\treturn rs[i].Pos >= r.Pos\n\t})\n}", "func (a *UnsignedArray) FindRange(min, max int64) (int, int) {\n\tif a.Len() == 0 || min > max {\n\t\treturn -1, -1\n\t}\n\n\tminVal := a.MinTime()\n\tmaxVal := a.MaxTime()\n\n\tif maxVal < min || minVal > max {\n\t\treturn -1, -1\n\t}\n\n\treturn a.search(min), a.search(max)\n}", "func NewCreatePeerConflict() *CreatePeerConflict {\n\n\treturn &CreatePeerConflict{}\n}", "func (a *StringArray) FindRange(min, max int64) (int, int) {\n\tif a.Len() == 0 || min > max {\n\t\treturn -1, -1\n\t}\n\n\tminVal := a.MinTime()\n\tmaxVal := a.MaxTime()\n\n\tif maxVal < min || minVal > max {\n\t\treturn -1, -1\n\t}\n\n\treturn a.search(min), a.search(max)\n}", "func (f *asyncFinder) At(ctx context.Context, at, after int64) (ch swarm.Chunk, cur, next feeds.Index, err error) {\n\tch, diff, err := f.get(ctx, at, 0)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tif ch == nil {\n\t\treturn nil, nil, nil, nil\n\t}\n\tif diff == 0 {\n\t\treturn ch, &index{0}, &index{1}, nil\n\t}\n\tc := make(chan result)\n\tp := newPath(0)\n\tp.latest.chunk = ch\n\tfor p.level = 1; diff>>p.level > 0; p.level++ {\n\t}\n\tquit := make(chan struct{})\n\tdefer close(quit)\n\tgo f.at(ctx, at, p, c, quit)\n\tfor r := range c {\n\t\tp = r.path\n\t\tif r.chunk == nil {\n\t\t\tif r.level == 0 {\n\t\t\t\treturn p.latest.chunk, &index{p.latest.seq}, &index{p.latest.seq + 1}, nil\n\t\t\t}\n\t\t\tif p.level < r.level {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.level = r.level - 1\n\t\t} else {\n\t\t\tif r.diff == 0 {\n\t\t\t\treturn r.chunk, &index{r.seq}, &index{r.seq + 1}, nil\n\t\t\t}\n\t\t\tif p.latest.level > r.level {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.close()\n\t\t\tp.latest = r\n\t\t}\n\t\t// below applies even if p.latest==maxLevel\n\t\tif p.latest.level == p.level {\n\t\t\tif p.level == 0 {\n\t\t\t\treturn p.latest.chunk, &index{p.latest.seq}, &index{p.latest.seq + 1}, nil\n\t\t\t}\n\t\t\tp.close()\n\t\t\tnp := newPath(p.latest.seq)\n\t\t\tnp.level = p.level\n\t\t\tnp.latest.chunk = p.latest.chunk\n\t\t\tgo f.at(ctx, at, np, c, quit)\n\t\t}\n\t}\n\treturn nil, nil, nil, nil\n}", "func NewUpdateUserIssueSearchOptionsConflict() *UpdateUserIssueSearchOptionsConflict {\n\treturn &UpdateUserIssueSearchOptionsConflict{}\n}", "func indexOf(list []int, valueToBeFound int) int {\n\tfor i, value := range list {\n\t\tif value == valueToBeFound {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}", "func Tianyun_searchRange(nums []int, target int) []int {\n\n\t//O(n)\n//\tstartIndex:=-1\n//\tcount:=0\n//\n//\tfor i:=0;i<len(nums);i++{\n//\t\tif count==0&&nums[i]==target{\n//\t\t\tstartIndex = i\n//\t\t\tcount++\n//\n//\t\t}else if nums[i]==target{\n//\t\t\tcount++\n//\t\t}\n//}\n//\n// if count==0{\n// \treturn []int{-1,-1}\n// }else{\n// \treturn []int{startIndex,startIndex+count}\n// }\n\n\n\n//O(logn)\nif len(nums)==0{\n\treturn []int{-1,-1}\n}\n startIndex:=-1\n left:=0\n right:=len(nums)-1\n\n for left +1 <right{\n \tmid:=(left+right)/2\n \tif nums[mid]>=target{\n right = mid\n\t\t}else{\n\t\t\tleft =mid+1\n\t\t}\n\t }\n\t if nums[left]==target{\n\t \tstartIndex =left\n\t }else if nums[right]==target{\n\t \tstartIndex = right\n\t }else{\n\t \treturn []int{-1,-1}\n\t }\n\n\n\tleft=0\n\tright=len(nums)-1\n\n\tfor left +1 <right{\n\t\tmid:=(left+right)/2\n\t\tif nums[mid]<=target{\n\t\t\tleft = mid\n\t\t}else{\n\t\t\tright = mid\n\t\t}\n\t}\n\n\tif nums[right]==target{\n\t\treturn []int{startIndex,right}\n\t}else if nums[left]==target{\n\t\treturn []int{startIndex,left}\n\t}else{\n\t\treturn []int{-1,-1}\n\t}\n}", "func findInsertionIndex(list []int, item int) int {\n\tif len(list) == 0 {\n\t\treturn 0\n\t}\n\n\thigh := len(list) - 1\n\tlow := 0\n\tmid := len(list) / 2\n\n\tfor {\n\t\tif item < list[mid] {\n\t\t\tif mid == 0 || item >= list[mid-1] {\n\t\t\t\treturn mid\n\t\t\t} else if mid-low == 1 {\n\t\t\t\tmid = low\n\t\t\t} else {\n\t\t\t\thigh, mid = mid, mid-((mid-low)/2)\n\t\t\t}\n\t\t} else if item > list[mid] {\n\t\t\tif mid == len(list)-1 || item <= list[mid+1] {\n\t\t\t\treturn mid + 1\n\t\t\t} else if high-mid == 1 {\n\t\t\t\tmid = high\n\t\t\t} else {\n\t\t\t\tlow, mid = mid, mid+((high-mid)/2)\n\t\t\t}\n\t\t} else {\n\t\t\treturn mid\n\t\t}\n\t}\n}", "func Conflict(msg string) Error {\n\te := err{msg: msg, code: conflictCode, group: generic, kind: conflict}\n\treturn &e\n}", "func (t *Trie) Find(dst []int, prefix string) []int {\n\treturn t.root.find(dst, prefix)\n}", "func Search(n int, f func(int) int) int {\n\tlow, high := 0, n\n\tfor low < high {\n\t\tmid := int(uint(low+high) >> 1)\n\t\td := f(mid)\n\t\tif d < 0 {\n\t\t\tlow = mid + 1\n\t\t} else if d > 0 {\n\t\t\thigh = mid\n\t\t} else {\n\t\t\treturn mid\n\t\t}\n\t}\n\treturn -1\n}", "func (s strings) Find(in []string, what string) int {\n\tfor i, entry := range in {\n\t\tif entry == what {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}", "func (ml *messageLog) FromID(id uuid.UUID, exclusive bool) defs.MessageFindFunc {\r\n\treturn func() (int, bool) {\r\n\t\tif index, entry := ml.log.findIndexByID(id); entry != nil {\r\n\t\t\tif exclusive {\r\n\t\t\t\tindex += 1\r\n\t\t\t\tif index >= len(ml.log.entries) {\r\n\t\t\t\t\treturn 0, false\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\treturn index, true\r\n\t\t}\r\n\t\treturn 0, false\r\n\t}\r\n}" ]
[ "0.46936142", "0.42734542", "0.42118675", "0.41973543", "0.41771796", "0.41696936", "0.41489935", "0.41426665", "0.41424075", "0.41103593", "0.4097609", "0.40779626", "0.40646788", "0.406086", "0.40478206", "0.40368852", "0.4034429", "0.40295273", "0.40270522", "0.40177962", "0.4014723", "0.400891", "0.40077984", "0.39957234", "0.39880976", "0.39875504", "0.39840326", "0.39836645", "0.39775836", "0.3970373", "0.3958581", "0.39452016", "0.39302468", "0.39176843", "0.3912693", "0.39066282", "0.3899207", "0.38960433", "0.38936397", "0.38910073", "0.3889739", "0.38621652", "0.38613197", "0.38610592", "0.38511506", "0.3849866", "0.38484377", "0.3848054", "0.38435373", "0.38435218", "0.38384375", "0.38374206", "0.38340276", "0.38304955", "0.38299587", "0.38288993", "0.38237986", "0.38209683", "0.38188818", "0.38106334", "0.38063627", "0.3800119", "0.37921163", "0.3786155", "0.3781607", "0.37782553", "0.3775914", "0.3767737", "0.3763683", "0.37630466", "0.3760482", "0.37603334", "0.3753161", "0.37523213", "0.37515745", "0.37456572", "0.3737113", "0.3731396", "0.3729292", "0.3725772", "0.3722277", "0.3718618", "0.37115166", "0.37072295", "0.37071875", "0.37057492", "0.3705472", "0.37021065", "0.3701769", "0.36945492", "0.36924988", "0.36894208", "0.36852986", "0.36766687", "0.36759597", "0.36673248", "0.36666456", "0.3664239", "0.36588308", "0.36541858" ]
0.6421718
0
New creates a new crawler
func New(ctx context.Context, logger yolo.Logger, client *http.Client, filter FilterFunc, mapper Mapper) *Crawler { return &Crawler{ ctx: ctx, logger: logger, client: client, filter: filter, mapper: mapper, visitChan: make(chan visit, visitChanBuffer), toVisit: make(map[string]struct{}), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New() (*SingleCrawler, error) {\n\n defer glog.Flush()\n\n var crawler SingleCrawler\n startURL := *UrlPtr\n maxp := *MaxpPtr\n maxc := *MaxcPtr\n maxt := *MaxtPtr\n Filename := *OutfilePtr\n NumWorkers := *NumwPtr\n\n // validate the user input URL and decide if it's okay to use\n if govalidator.IsURL(startURL) == false {\n glog.Error(\"The starting URL is invalid. Please enter a valid URL.\")\n return nil, errors.New(\"Bad starting URL.\")\n }\n if maxp < 0 || maxc < 0 || maxt < 0 {\n glog.Error(\"Please pass in values > = 0 for max constraints (max print, max pages, max time). Please pass > 0 for the number of workers.\")\n return nil, errors.New(\"Bad values for maxprint, maxpages, maxtime, or NumWorkers\")\n }\n if NumWorkers <= 0 || NumWorkers > MAX_WORKERS {\n glog.Error(\"Number of workes is invalid. Must be > 0, and less that MAX_WORKERS.\")\n return nil, errors.New(\"Bad value for NumWorkers\")\n }\n if len(Filename) >= 255 {\n glog.Error(\"Filename can't be larger than 255 characters. Trimming Filename.\")\n Filename = Filename[0:100]\n }\n\n\n crawler.MAX_PAGES = maxc\n crawler.PRINT_LIMIT = maxp\n crawler.NumPages = 0\n crawler.NumWorkers = NumWorkers\n crawler.MAX_TIME = time.Duration(maxt) * time.Second\n crawler.Sitemap = make( [] Page, crawler.MAX_PAGES)\n \n\n // Parse the URL - make sure it's ok to use\n domain, err := url.Parse(startURL)\n if err != nil {\n glog.Error(\"Error parsing domain of starting URL\")\n return nil, errors.New(\"Unable to parse domain of start URL.\")\n }\n err = DomainCheck( domain )\n if err != nil {\n glog.Error(\"Error parsing domain of starting URL\")\n return nil, err\n }\n crawler.Site = domain\n \n if Filename != \"\" {\n crawler.Filename = Filename\n } else {\n crawler.Filename = crawler.Site.Host + \".txt\"\n if len( crawler.Filename ) >= 255 {\n crawler.Filename = crawler.Filename[0:100]\n }\n }\n\n if err = IsOk( &crawler ); err!=nil{\n return nil, err\n }\n\n return &crawler, nil\n\n}", "func New(startURL string, host string) *Crawler {\n\treturn &Crawler{\n\t\tRequester: request.HTTPRequest{},\n\t\tStartURL: startURL,\n\t\tLinks: make(PageLinks),\n\t\thost: host,\n\t\tmaxGoRoutines: 20,\n\t}\n}", "func New() (crawl *Crawl) {\n\tc := &http.Client{\n\t\tTransport: http.DefaultTransport,\n\t}\n\tc.Jar, _ = cookiejar.New(nil)\n\n\tcrawl = &Crawl{\n\t\tClient: c,\n\t\tmutex: new(sync.RWMutex),\n\t\thandlers: make(map[interface{}]Handler),\n\t\tcloseCh: make(chan bool, 1),\n\t\tdoneCh: make(chan bool, 1),\n\t}\n\tcrawl.SetOptions(DefaultOptions)\n\treturn\n}", "func (_ *_Crawler) New(_ *cli.Context, client http.Client, logger glog.Log) (crawler.Crawler, error) {\n\tc := _Crawler{\n\t\thttpClient: client,\n\t\t// this regular used to match category page url path\n\t\tcategoryPathMatcher: regexp.MustCompile(`^/collections(/[a-zA-Z0-9_-]+){1,6}$`),\n\t\t// this regular used to match product page url path\n\t\tproductPathMatcher: regexp.MustCompile(`^(/[/a-zA-Z0-9_-]+)?/products(/[a-zA-Z0-9_-]+){1,3}$`),\n\t\tlogger: logger.New(\"_Crawler\"),\n\t}\n\treturn &c, nil\n}", "func (_ *_Crawler) New(_ *cli.Context, client http.Client, logger glog.Log) (crawler.Crawler, error) {\n\tc := _Crawler{\n\t\thttpClient: client,\n\t\t// this regular used to match category page url path\n\t\tcategoryPathMatcher: regexp.MustCompile(`^/en-us/shop(.*)`),\n\t\t// this regular used to match product page url path\n\t\tproductPathMatcher: regexp.MustCompile(`^(/en-us/p(.*)(&lvrid=_p)(.*)) | (/en-us/p(.*))$`),\n\t\tlogger: logger.New(\"_Crawler\"),\n\t}\n\treturn &c, nil\n}", "func New(u url.URL, ignoreRobotsTxt bool, maxWorkers int, userAgent string) *Crawler {\n\tu.Path = \"/\"\n\treturn &Crawler{\n\t\turl: u,\n\t\tignoreRobotsTxt: ignoreRobotsTxt,\n\t\tqueued: sync.Map{},\n\t\tpool: worker.NewPool(maxWorkers),\n\t\tpagesWithErr: make(map[string]bool),\n\t\tSiteMap: make(map[string]*Page),\n\t\tuserAgent: userAgent,\n\t}\n}", "func New(h, o string, s *State) (*Crawler, error) {\n\tm, err := url.Parse(h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif m.Host == \"\" {\n\t\treturn nil, errors.New(\"empty main host\")\n\t}\n\treturn &Crawler{\n\t\tendpoint: h,\n\t\tmainURL: m,\n\t\toutput: o,\n\t\tuploadPageCh: make(chan string, 1024),\n\t\tuploadAssetCh: make(chan string, 1024),\n\t\tsaveCh: make(chan File, 128),\n\t\tUploadWorkers: DefaultWorkersCount,\n\t\tSaveWorkers: DefaultWorkersCount,\n\t\tIncludeSubDomains: false,\n\t\tEnableGzip: true,\n\t\tstate: s,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDial: (&net.Dialer{\n\t\t\t\t\tTimeout: 15 * time.Second,\n\t\t\t\t\tKeepAlive: 180 * time.Second,\n\t\t\t\t}).Dial,\n\t\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\t\tResponseHeaderTimeout: 10 * time.Second,\n\t\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func NewCrawler(siteURL string) *Crawler {\n\n\tsiteURL = strings.Trim(siteURL, \" \")\n\tsiteURL = strings.TrimRight(siteURL, \"/\")\n\n\tvar siteURLWWW string\n\n\tvar baseURL string\n\n\tif strings.Index(siteURL, \"http://\") == 0 {\n\t\tsiteURL = strings.Replace(siteURL, \"https://\", \"\", 1)\n\t} else if strings.Index(siteURL, \"https://\") == 0 {\n\t\tsiteURL = strings.Replace(siteURL, \"https://\", \"\", 1)\n\t}\n\n\tfmt.Printf(\"URL %s \\n\", siteURL)\n\n\tif strings.Contains(siteURL, \"www.\") {\n\t\tsiteURLWWW = siteURL\n\t\tsiteURL = strings.Replace(siteURL, \"www.\", \"\", 1)\n\t} else {\n\t\tsiteURLWWW = \"www.\" + siteURL\n\t}\n\n\tbaseURL = \"https://\" + siteURL + \"/\"\n\n\tfmt.Printf(\"Crawling %s\\n\", siteURL)\n\n\treturn &Crawler{\n\t\tBaseURL: baseURL,\n\t\tSiteURL: siteURL,\n\t\tSiteURLWWW: siteURLWWW,\n\t\tCrawlQueue: list.New(),\n\t\tURLResponseList: make(map[string]int),\n\t}\n}", "func NewCrawler() (crawler *Crawler) {\n\tcrawler = new(Crawler)\n\tcrawler.Timeout = 60 * time.Second\n\tcrawler.MaxDepth = -1\n\tcrawler.Auth = nil\n\tcrawler.Workers = 10\n\tcrawler.HashLoopCheck = false\n\tcrawler.FollowExternal = false\n\tcrawler.UserAgent = nil\n\tcrawler.Retries = 0\n\tcrawler.SleepBetweenRetries = 0\n\treturn\n}", "func New(inputConfigs []*common.Config, beatVersion string, beatDone chan struct{}, once bool, outSQS chan *pipeline.SQS, outS3List chan *pipeline.S3List, allowedTypes []string) (*Crawler, error) {\n\treturn &Crawler{\n\t\tinputs: map[uint64]*input.Runner{},\n\t\tinputConfigs: inputConfigs,\n\t\tonce: once,\n\t\tbeatVersion: beatVersion,\n\t\tbeatDone: beatDone,\n\t\toutSQS: outSQS,\n\t\toutS3List: outS3List,\n\t\tallowedTypes: allowedTypes,\n\t}, nil\n}", "func NewCrawler(config *Config, client *s3.S3, sqsClient *sqs.SQS) *Crawler {\n\treturn &Crawler{config, client, sqsClient}\n}", "func NewCrawler(ip string, port int16, \n\tcallback func(string, net.IP, int)) *Crawler {\n\n\ttransaction := newCrawlTransaction(tools.RandomString(2), callback)\n\tdht := newDHTCore()\n\t\n\tdht.IP\t = ip\n\tdht.Port = port\n\n\tdht.AddTransaction(transaction)\n\tdht.RequestHandler = transaction.OnRequest\n\t\n\treturn &Crawler{dht}\n}", "func NewCrawler(ctx *pulumi.Context,\n\tname string, args *CrawlerArgs, opts ...pulumi.ResourceOption) (*Crawler, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.DatabaseName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'DatabaseName'\")\n\t}\n\tif args.Role == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Role'\")\n\t}\n\tvar resource Crawler\n\terr := ctx.RegisterResource(\"aws:glue/crawler:Crawler\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New() *Scraper {\n\tseedURL, _ := env.GetCrawlerVars(env.SeedURL)\n\treturn &Scraper{\n\t\tlock: &sync.RWMutex{},\n\t\tvisitsCount: 0,\n\t\tseedURL: seedURL.(string),\n\t\trequests: make(scrapingRequests, 0),\n\t\tacquiredProducts: make(item.Items, 0),\n\t}\n}", "func newScraper(u *url.URL, timeout int) (*scraper, error) {\n\tvar title string\n\tvar language string\n\tvar author string\n\tvar description string\n\tvar generator string\n\tvar feed string\n\tcharset := \"utf-8\"\n\tlinks := make([]string, 0)\n\timages := make([]string, 0)\n\tkeywords := make([]string, 0)\n\tcompatibility := make(map[string]string)\n\n\tscrpr := func(n *html.Node) {\n\t\tswitch n.Data {\n\t\tcase \"html\":\n\t\t\tlanguage = findAttribute(n, \"lang\")\n\t\tcase \"title\":\n\t\t\ttitle = n.FirstChild.Data\n\t\tcase \"a\":\n\t\t\tlinks = addElement(links, u, n, \"href\")\n\t\tcase \"img\":\n\t\t\timages = addElement(images, u, n, \"src\")\n\t\tcase \"link\":\n\t\t\ttyp := findAttribute(n, \"type\")\n\t\t\tswitch typ {\n\t\t\tcase \"application/rss+xml\":\n\t\t\t\tfeed = findAttribute(n, \"href\")\n\t\t\t}\n\t\tcase \"meta\":\n\t\t\tname := findAttribute(n, \"name\")\n\t\t\tswitch name {\n\t\t\tcase \"author\":\n\t\t\t\tauthor = findAttribute(n, \"content\")\n\t\t\tcase \"keywords\":\n\t\t\t\tkeywords = strings.Split(findAttribute(n, \"content\"), \", \")\n\t\t\tcase \"description\":\n\t\t\t\tdescription = findAttribute(n, \"content\")\n\t\t\tcase \"generator\":\n\t\t\t\tgenerator = findAttribute(n, \"content\")\n\t\t\t}\n\n\t\t\thttpEquiv := findAttribute(n, \"http-equiv\")\n\t\t\tswitch httpEquiv {\n\t\t\tcase \"Content-Type\":\n\t\t\t\tcharset = findCharset(findAttribute(n, \"content\"))\n\t\t\tcase \"X-UA-Compatible\":\n\t\t\t\tcompatibility = mapifyStr(findAttribute(n, \"content\"))\n\t\t\t}\n\t\t}\n\t}\n\n\tcl := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: timeoutDialer(timeout),\n\t\t},\n\t}\n\n\tresp, err := cl.Get(u.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\ttree, err := h5.New(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttree.Walk(scrpr)\n\n\treturn &scraper{title,\n\t\tlanguage,\n\t\tauthor,\n\t\tdescription,\n\t\tgenerator,\n\t\tfeed,\n\t\tcharset,\n\t\tlinks,\n\t\timages,\n\t\tkeywords,\n\t\tcompatibility}, nil\n}", "func NewCrawler(config rpcclient.ConnConfig, startHeight uint64, prevBlockHash chainhash.Hash) (*Crawler, error) {\n\n\tblockQueue := queue.New()\n\tblockQueue.PushBack(prevBlockHash)\n\n\n\tcraw := &Crawler{\n\t\tfetcherStop: nil,\n\t\tfetcherBlocks: nil,\n\t\trpcConfig: config,\n\t\theight: startHeight,\n\t\tsubscribers: make(map [UpdateChan]bool),\n\t\tblockQueue: blockQueue,\n\n\t\t//\n\t\tsubscribeChan: make(chan UpdateChan),\n\t\tunsubscribeChan: make(chan UpdateChan),\n\t\tstartChan: make(chan chan bool),\n\t\tstopChan: make(chan chan bool),\n\t}\n\n\tgo craw.crawlerRoutine()\n\n\treturn craw, nil\n}", "func (s WashingtonPostScraper) CreateNewWashingtonPostScraper() *WashingtonPostScraper {\n\tc := colly.NewCollector()\n\t// c := colly.NewCollector(colly.Debugger(&debug.LogDebugger{}))\n\tc.UserAgent = s.UserAgent()\n\tc.IgnoreRobotsTxt = false\n\n\t// Adding this wait so AJAX can load, might need to look at https://github.com/chromedp/chromedp in the future\n\tc.Limit(&colly.LimitRule{\n\t\tDelay: 5 * time.Second,\n\t})\n\n\tscraper := WashingtonPostScraper{\n\t\tcollector: c,\n\t}\n\treturn &scraper\n}", "func New() *Scraper {\n\treturn &Scraper{\n\t\tclient: &http.Client{Timeout: 10 * time.Second},\n\t}\n}", "func New() (Scraper, error) {\n\treturn &scraper{}, nil\n}", "func (c *Crawler) newFetcher(height uint64) {\n\t\n\t// Stop previous fetcher\n\tif c.fetcherStop != nil {\n\t\tc.fetcherStop <- true\n\t}\n\t\n\t// Both channels to be closed by fetcher task\n\tc.fetcherStop = make(chan bool)\n\tc.fetcherBlocks = make(chan blockRecord, FetcherBlockBufferSize)\n\n\t//\n\tgo fetcher(c.rpcConfig, height, c.fetcherBlocks, c.fetcherStop)\n}", "func newSchedulesCrawler(schedules *map[ScheduleID]ScheduleTimes) *gocrawl.Crawler {\n\tschedulesCrawler := &schedulesCrawler{\n\t\tschedules: schedules}\n\topts := gocrawl.NewOptions(schedulesCrawler)\n\topts.UserAgent = userAgent\n\topts.CrawlDelay = 0\n\topts.LogFlags = gocrawl.LogError\n\topts.SameHostOnly = true\n\treturn gocrawl.NewCrawlerWithOptions(opts)\n}", "func New(logger *log.Logger, cfg Config) (*Scraper, error) {\n\tvar errs []error\n\n\tu, err := url.Parse(cfg.URL)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tincludes, err := compileRegexps(cfg.Includes)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\texcludes, err := compileRegexps(cfg.Excludes)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tproxyURL, err := url.Parse(cfg.Proxy)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tif errs != nil {\n\t\treturn nil, errors.Join(errs...)\n\t}\n\n\tif u.Scheme == \"\" {\n\t\tu.Scheme = \"http\" // if no URL scheme was given default to http\n\t}\n\n\tif cfg.UserAgent == \"\" {\n\t\tcfg.UserAgent = agent.GoogleBot()\n\t}\n\n\tb := surf.NewBrowser()\n\tb.SetUserAgent(cfg.UserAgent)\n\tb.SetTimeout(time.Duration(cfg.Timeout) * time.Second)\n\n\tif cfg.Proxy != \"\" {\n\t\tdialer, err := proxy.FromURL(proxyURL, proxy.Direct)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.SetTransport(&http.Transport{\n\t\t\tDial: dialer.Dial,\n\t\t})\n\t}\n\n\ts := &Scraper{\n\t\tconfig: cfg,\n\n\t\tbrowser: b,\n\t\tlogger: logger,\n\t\tprocessed: make(map[string]struct{}),\n\t\tURL: u,\n\t\tcssURLRe: regexp.MustCompile(`^url\\(['\"]?(.*?)['\"]?\\)$`),\n\t\tincludes: includes,\n\t\texcludes: excludes,\n\t}\n\treturn s, nil\n}", "func (f Feeds) New(url string) error {\n\tfeed, err := gofeed.NewParser().ParseURL(url)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"gofeed parse %v\", err)\n\t}\n\tf[url] = NewFeed(feed.Title)\n\treturn nil\n}", "func NewCrawlerState() *CrawlerState {\n\treturn &CrawlerState{\n\t\turlMap: make(map[string]struct{}),\n\t}\n}", "func New(url string, client *http.Client) *Rietveld {\n\turl = strings.TrimRight(url, \"/\")\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\treturn &Rietveld{\n\t\turl: url,\n\t\tclient: client,\n\t}\n}", "func TestNew(t *testing.T) {\n\tcrawler := NewYaml([]byte(sampleYml))\n\n\t// inspect Crawler options\n\texpect, err := json.Marshal(expectedCrawl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgot, err := json.Marshal(crawler)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !reflect.DeepEqual(expect, got) {\n\t\tt.Errorf(\"expecting %s, got %s\",\n\t\t\tstring(expect), string(got))\n\t}\n}", "func New(r io.Reader, useragent string) *Robots {\n\tgroup := NewGroups(r).Find(useragent)\n\tif group == nil {\n\t\treturn &Robots{}\n\t}\n\tresult := &Robots{}\n\tfor _, value := range group.Allow {\n\t\tresult.add(NewRule(TypeAllow, value))\n\t}\n\tfor _, value := range group.Disallow {\n\t\tresult.add(NewRule(TypeDisallow, value))\n\t}\n\tif group.CrawlDelay != \"\" {\n\t\tif secs, err := strconv.Atoi(group.CrawlDelay); err == nil {\n\t\t\tresult.CrawlDelay = time.Duration(secs) * time.Second\n\t\t}\n\t}\n\treturn result\n}", "func main() {\n\n\tlog.SetOutput(os.Stdout)\n\n\ttoCrawl, _ := url.Parse(\"http://www.monzo.com\")\n\tvar filter crawler.Restriction = func(url *url.URL) bool {\n\t\treturn url.Host == toCrawl.Host\n\t}\n\tvar op1 crawler.Operation = func(in *url.URL) *url.URL {\n\t\tif in != nil {\n\t\t\thashIndex := strings.Index(in.String(), \"#\")\n\t\t\tif hashIndex > 0 {\n\t\t\t\tout, err := url.Parse(in.String()[:hashIndex])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn in\n\t\t\t\t}\n\t\t\t\treturn out\n\t\t\t}\n\t\t}\n\t\treturn in\n\t}\n\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tout := make(chan model.CrawlerOutput, 100)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor each := range out {\n\t\t\tconsumer.CreateSiteMap(path, each.URL, each.PageLinks, each.ResponseBody)\n\t\t}\n\t}()\n\tdone := make(chan struct{})\n\n\tc := crawler.NewCrawler(nil, crawler.Setting{\n\t\tRestrictions: []crawler.Restriction{filter},\n\t\tOperation: op1,\n\t\tWaitTimes: 100 * time.Millisecond,\n\t\tWorkers: 10,\n\t\tGetResponseBody: true,\n\t})\n\tgo c.Crawl(toCrawl, out, done)\n\n\tselect {\n\tcase <-time.After(10 * time.Second):\n\t\tdone <- struct{}{}\n\t}\n\twg.Wait()\n}", "func NewCfnCrawler(scope constructs.Construct, id *string, props *CfnCrawlerProps) CfnCrawler {\n\t_init_.Initialize()\n\n\tj := jsiiProxy_CfnCrawler{}\n\n\t_jsii_.Create(\n\t\t\"aws-cdk-lib.aws_glue.CfnCrawler\",\n\t\t[]interface{}{scope, id, props},\n\t\t&j,\n\t)\n\n\treturn &j\n}", "func NewBrowserSiteList()(*BrowserSiteList) {\n m := &BrowserSiteList{\n Entity: *NewEntity(),\n }\n return m\n}", "func New(url string) *Client {\n\treturn &Client{&http.Client{}, url, func(r *http.Request) *http.Request { return r }}\n}", "func newMux(c *Crawl, stop chan bool) *fetchbot.Mux {\n\t// Create the muxer\n\tmux := fetchbot.NewMux()\n\n\t// Handle all errors the same\n\tmux.HandleErrors(fetchbot.HandlerFunc(func(ctx *fetchbot.Context, res *http.Response, err error) {\n\t\tif !strings.Contains(err.Error(), errAlreadyFetched.Error()) {\n\t\t\tlog.Infof(\"[ERR] %s %s - %s\", ctx.Cmd.Method(), ctx.Cmd.URL(), err.Error())\n\t\t\tc.urlLock.Lock()\n\t\t\tc.urls[ctx.Cmd.URL().String()] = &Url{Error: err.Error()}\n\t\t\tc.urlLock.Unlock()\n\t\t}\n\n\t\tc.unqueURLs(ctx.Cmd.URL().String())\n\t\tgo c.queNextURL(ctx.Q)\n\t}))\n\n\t// Handle GET requests for html responses, to parse the body and enqueue all links as HEAD requests.\n\tmux.Response().Method(\"GET\").Handler(fetchbot.HandlerFunc(\n\t\tfunc(ctx *fetchbot.Context, res *http.Response, err error) {\n\n\t\t\tu := &Url{Url: ctx.Cmd.URL().String()}\n\n\t\t\tif c.cfg.RecordRedirects {\n\t\t\t\tu = &Url{Url: NormalizeURLString(res.Request.URL)}\n\t\t\t}\n\n\t\t\tlog.Infof(\"[%d] %s %s\", res.StatusCode, ctx.Cmd.Method(), u.Url)\n\n\t\t\tvar st time.Time\n\t\t\tif timedCmd, ok := ctx.Cmd.(*TimedCmd); ok {\n\t\t\t\tst = timedCmd.Started\n\t\t\t}\n\n\t\t\tif err := u.HandleGetResponse(st, res, c.cfg.RecordResponseHeaders); err != nil {\n\t\t\t\tlog.Debugf(\"error handling get response: %s - %s\", ctx.Cmd.URL().String(), err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlinks := CandidateLinks(u.Links, c.urlStringIsCandidate)\n\t\t\tunwritten := make([]string, len(links))\n\n\t\t\tc.urlLock.Lock()\n\t\t\tc.urls[u.Url] = u\n\t\t\tc.finished++\n\t\t\tc.urlsWritten++\n\n\t\t\ti := 0\n\t\t\tfor _, l := range links {\n\t\t\t\tif c.urls[l] == nil {\n\t\t\t\t\tunwritten[i] = l\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t}\n\t\t\tunwritten = unwritten[:i]\n\n\t\t\tc.urlLock.Unlock()\n\n\t\t\tfor _, resc := range c.cfg.BackoffResponseCodes {\n\t\t\t\tif res.StatusCode == resc {\n\t\t\t\t\tlog.Infof(\"encountered %d response. backing off\", resc)\n\t\t\t\t\tc.setCrawlDelay(c.crawlDelay + ((time.Duration(c.cfg.CrawlDelayMilliseconds) * time.Millisecond) / 2))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !c.stopping && len(c.next) < queueBufferSize {\n\t\t\t\tgo func() {\n\t\t\t\t\tfor _, l := range unwritten {\n\t\t\t\t\t\tc.next <- l\n\t\t\t\t\t}\n\t\t\t\t\tlog.Infof(\"seeded %d/%d links for source: %s\", len(unwritten), len(u.Links), u.Url)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\tif c.finished == c.cfg.StopAfterEntries {\n\t\t\t\tstop <- true\n\t\t\t}\n\n\t\t\tif c.cfg.BackupWriteInterval > 0 && (c.urlsWritten%c.cfg.BackupWriteInterval == 0) {\n\t\t\t\tgo func() {\n\t\t\t\t\tpath := fmt.Sprintf(\"%s.backup\", c.cfg.DestPath)\n\t\t\t\t\tlog.Infof(\"writing backup sitemap: %s\", path)\n\t\t\t\t\tif err := c.WriteJSON(path); err != nil {\n\t\t\t\t\t\tlog.Errorf(\"error writing backup sitemap: %s\", err.Error())\n\t\t\t\t\t}\n\t\t\t\t\tc.batchCount++\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\tc.unqueURLs(u.Url)\n\t\t\tgo c.queNextURL(ctx.Q)\n\n\t\t}))\n\n\treturn mux\n}", "func (w *Hostworker) NewReference(foundUrl *url.URL, sourceItem *CrawlItem, internal bool) (*CrawlItem, error) {\n\t// Create copy\n\tcc := *foundUrl\n\tfoundUrl = &cc\n\n\tif w.IsInFailTimeout() {\n\t\t// failStreak detecteren en referenties gewoon\n\t\t// wegsmijten als we in timeout interval zitten\n\t\treturn nil, nil\n\t}\n\n\tif !w.InMemory {\n\t\tcount := w.NewItems.stack(foundUrl)\n\t\tif count > 50 {\n\t\t\tw.cachedWantsToGetUp = true\n\t\t}\n\t\treturn nil, nil\n\t}\n\n\tif !foundUrl.IsAbs() {\n\t\treturn nil, nil\n\t}\n\n\tsubdomain, subdomainFound := w.Subdomains[foundUrl.Host]\n\tvar item *CrawlItem\n\tvar found bool\n\n\tif !subdomainFound {\n\t\tsubdomainUrl := splitUrlRelative(foundUrl)\n\t\tsubdomain = &Subdomain{Url: subdomainUrl, AlreadyFound: make(map[string]*CrawlItem)}\n\t\tw.Subdomains[subdomainUrl.Host] = subdomain\n\t} else {\n\t\tmakeRelative(foundUrl)\n\t}\n\n\t// Vanaf nu mag foundUrl.Host niet meer gebruikt worden! Deze bestaat niet meer\n\turi := cleanURLPath(foundUrl)\n\n\tif subdomainFound {\n\t\titem, found = subdomain.AlreadyFound[uri]\n\t}\n\n\tif !found {\n\t\titem = NewCrawlItem(foundUrl)\n\t\titem.Subdomain = subdomain\n\n\t\tif internal {\n\t\t\titem.Cycle = sourceItem.Cycle\n\t\t} else {\n\t\t\t// New introduction point\n\t\t\titem.Cycle = w.LatestCycle\n\n\t\t\t// Schema meteen juist zetten\n\t\t\tif !subdomainFound {\n\t\t\t\tsubdomain.Url.Scheme = w.Scheme\n\t\t\t}\n\t\t}\n\n\t\tsubdomain.AlreadyFound[uri] = item\n\t} else {\n\t\tif item.IsUnavailable() {\n\t\t\t// Deze url is onbereikbaar, ofwel geen HTML bestand\n\t\t\t// dat weten we omdat we deze al eerder hebben gecrawled\n\t\t\treturn item, nil\n\t\t}\n\n\t\tif item.FailCount > 0 && !item.NeedsRetry() {\n\t\t\treturn item, nil\n\t\t}\n\t}\n\n\t// Depth aanpassen\n\tif !internal {\n\t\t// Referentie vanaf een ander domein\n\t\titem.Depth = 0\n\n\t} else {\n\t\tif !found || item.Depth > sourceItem.Depth+1 {\n\t\t\titem.Depth = sourceItem.Depth + 1\n\t\t}\n\t}\n\n\tif internal && item.Cycle < sourceItem.Cycle {\n\t\t// Als een nieuwere cycle refereert naar deze pagina, dan kan\n\t\t// die de depth verhogen. Dit kan slechts één keer gebeuren,\n\t\t// aangezien hierna de cycle terug wordt gelijk gesteld\n\t\t// Daarna kan de depth enkel nog verlagen tot de volgende cycle\n\t\t// Op die manier houdt het systeem rekening met verloren / gewijzigde referenties\n\n\t\titem.Depth = sourceItem.Depth + 1\n\t}\n\n\tif item.Depth < maxRecrawlDepth && (item.Queue == w.Queue || item.Queue == w.LowPriorityQueue) {\n\t\t// Dit item staat nog in de gewone queue, maar heeft nu wel prioriteit\n\t\t// we verplaatsen het\n\t\titem.Remove()\n\t\tw.PriorityQueue.Push(item)\n\n\t} else if item.Queue == nil && (!found || (internal && item.Cycle < sourceItem.Cycle)) {\n\t\t// Recrawl enkel toelaten als we dit item nog niet gevonden hebben\n\t\t// of we hebben het wel al gevonden en het is een interne link afkomstig van een\n\t\t// hogere cycle (recrawl). Externe links die we al gecrawled hebben\n\t\t// negeren we, die staan in de introduction queue\n\n\t\tif item.Depth < maxRecrawlDepth {\n\t\t\tw.PriorityQueue.Push(item)\n\t\t} else {\n\t\t\tif !found {\n\t\t\t\tw.Queue.Push(item)\n\t\t\t} else {\n\t\t\t\tw.LowPriorityQueue.Push(item)\n\t\t\t}\n\t\t}\n\t} else if item.Queue != nil && item.Queue.Name == \"failqueue\" {\n\t\t// Uit huidige wachtrij verwijderen\n\t\titem.Remove()\n\n\t\tif item.Depth < maxRecrawlDepth {\n\t\t\tw.PriorityQueue.Push(item)\n\t\t} else {\n\t\t\tw.LowPriorityQueue.Push(item)\n\t\t}\n\t}\n\n\t// Cycle aanpassen\n\tif internal && item.Cycle < sourceItem.Cycle {\n\t\titem.Cycle = sourceItem.Cycle\n\t}\n\n\treturn item, nil\n}", "func NewCrawlHandler(queue *bokchoy.Queue, parser parser.Parser, timeout time.Duration) *CrawlHandler {\n\treturn &CrawlHandler{\n\t\tclt: &http.Client{\n\t\t\tTimeout: time.Second * timeout,\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDial: (&net.Dialer{\n\t\t\t\t\tTimeout: timeout * time.Second,\n\t\t\t\t}).Dial,\n\t\t\t\tTLSHandshakeTimeout: timeout * time.Second,\n\t\t\t},\n\t\t},\n\t\tcrawls: map[string]int{},\n\t\tqueue: queue,\n\t\tparser: parser,\n\t}\n}", "func New(w http.ResponseWriter, r *http.Request) {\r\n\ttmpl.ExecuteTemplate(w, \"New\", nil)\r\n}", "func New(w http.ResponseWriter, r *http.Request) {\r\n\ttmpl.ExecuteTemplate(w, \"New\", nil)\r\n}", "func New(t *testing.T, baseURL string) *T {\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 5,\n\t}\n\n\tu, err := url.Parse(baseURL)\n\trequire.NoError(t, err)\n\n\treturn &T{\n\t\tT: t,\n\t\tURL: u,\n\t\tClient: client,\n\t}\n}", "func NewCrawlerWorker(c *Crawler) *CrawlerWorker {\n\treturn &CrawlerWorker{\n\t\t1,\n\t\tc,\n\t\tmake([]*CrawlerJob, 0),\n\t\tmake(map[int]bool),\n\t\tmake(map[string]bool),\n\t}\n}", "func ( crawler *SingleCrawler ) Start()(error) {\n\n defer glog.Flush()\n\n if err1 := IsOk( crawler ); err1!=nil{\n return err1\n }\n\n // Stats for termination conditions \n t0 := time.Now() //Terminate after a given time\n noIncrease := 0 //Make sure we are finding unique sites, and not in an inf loop \n last_pagecount := 0 //Keep track of the last # of pages\n var wg sync.WaitGroup //For termination, to wait on workers\n\n // Channels for communication to workers\n pages := make( chan Page, crawler.NumWorkers*10 )\n rurls := make( chan string, crawler.NumWorkers*10 )\n surls := make( chan string, crawler.NumWorkers*10 )\n shutdown := make( chan bool, crawler.NumWorkers )\n\n // Map for making pages and urls unique\n assets := make( map[string][]string )\n vList := make( map[string]int )\n \n // Start the crawling, by providing the inital site URL\n surls <- crawler.Site.String()\n vList[crawler.Site.String()]++\n \n\n // Spawn the requested number of workers for the program\n for i:= 0; i< crawler.NumWorkers; i++ {\n wg.Add(1)\n go Worker( i, surls, rurls, crawler.Site, pages, shutdown, &wg )\n }\n\n\n for {\n\n select { \n\n case link := <- rurls:\n // Receive a link to crawl, make sure it's unvisited, then send back\n if _, ok := vList[link]; ok == false {\n glog.Info( fmt.Sprintf(\"starting crawler for %s\\n\", link))\n surls <- link\n vList[link]++\n } \n\n case p := <- pages:\n //receive a page in the page channel, append it to the crawler's sitemap, if it's unique.\n ind := strings.Join(p.Assets, \" \")\n if crawler.NumPages < len(crawler.Sitemap){\n if _, ok := assets[ind]; ok == false {\n assets[ind] = p.BabyUrls\n crawler.Sitemap[crawler.NumPages] = p\n crawler.NumPages += 1\n }\n }\n default:\n // Print status update. \n // Check termination conditions: time, space, nonincreasing, no urls left\n if time.Since(t0) % 1000000 == 0 {\n if crawler.NumPages == last_pagecount {\n noIncrease +=1\n }\n last_pagecount = crawler.NumPages\n }\n\n if noIncrease > 7 || time.Since(t0) >= crawler.MAX_TIME || crawler.NumPages >= crawler.MAX_PAGES {\n\n glog.Info(\"Terminating crawler on a specified condition (time/no URLs left to crawl/ reached max)\")\n glog.Info(\"Total time spent crawling is \", time.Since(t0))\n fmt.Printf(\"Status Update. Pages collected %d. Visited %d.\\n\", crawler.NumPages, len(vList))\n fmt.Println(\"Total time: \", time.Since(t0))\n\n // Tell workers to quit\n for i:= 0; i< crawler.NumWorkers; i++ {\n shutdown <- true\n }\n\n // Wait for workers to quit\n wg.Wait()\n\n // Close all channels\n close(rurls)\n close(surls)\n close(shutdown)\n close(pages)\n fmt.Println(\"Done\\n\\n\")\n return nil\n }\n }\n }\n}", "func NewMockCrawler(ctrl *gomock.Controller) *MockCrawler {\n\tmock := &MockCrawler{ctrl: ctrl}\n\tmock.recorder = &MockCrawlerMockRecorder{mock}\n\treturn mock\n}", "func New(url string) *Client {\n\treturn &Client{url: url, httpC: http.DefaultClient}\n}", "func New(client *http.Client, req *http.Request, check RespCheck, urls []*url.URL) *FastestURL {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\tif req == nil {\n\t\treq = &http.Request{}\n\t}\n\tif check == nil {\n\t\tcheck = func(resp *http.Response) bool {\n\t\t\treturn resp.StatusCode == http.StatusOK\n\t\t}\n\t}\n\treturn &FastestURL{\n\t\tClient: client,\n\t\tURLs: urls,\n\t\tRequest: req,\n\t\tRespCheck: check,\n\t}\n}", "func New(w http.ResponseWriter, r *http.Request) {\n\tgetTemplates().ExecuteTemplate(w, \"New\", nil)\n}", "func (api *API) launchCrawler(done chan bool) error {\n\t// location of the crawler script wrt localgoogoo root dir\n\tvar path = \"/php/start_crawler.php\"\n\n\tvar formData = url.Values{\n\t\t\"web_name\": {api.Payload.siteName},\n\t\t\"web_url\": {api.Payload.siteURL},\n\t}\n\n\tvar urlString = fmt.Sprintf(\"%s%s\", api.BaseURL, path)\n\n\tfmt.Print(\"\\nCrawling website...\\n\\n\")\n\n\t_, err := api.Client.PostForm(urlString, formData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdone <- true\n\treturn nil\n}", "func (this *MatterController) Crawl(writer http.ResponseWriter, request *http.Request) *result.WebResult {\n\n\turl := request.FormValue(\"url\")\n\tdestPath := request.FormValue(\"destPath\")\n\tfilename := request.FormValue(\"filename\")\n\n\tuser := this.checkUser(request)\n\n\tdirMatter := this.matterService.CreateDirectories(request, user, destPath)\n\n\tif url == \"\" || (!strings.HasPrefix(url, \"http://\") && !strings.HasPrefix(url, \"https://\")) {\n\t\tpanic(\" url must start with http:// or https://\")\n\t}\n\n\tif filename == \"\" {\n\t\tpanic(\"filename cannot be null\")\n\t}\n\n\tmatter := this.matterService.AtomicCrawl(request, url, filename, user, dirMatter, true)\n\n\treturn this.Success(matter)\n}", "func New(url string) (data.Feed, error) {\n\tlog.Info().Str(\"url\", url).Msg(\"using rss feed\")\n\n\thttpClient, err := httpclient.New(httpclient.RSSPolicy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &feed{\n\t\tURL: url,\n\t\tHTTPClient: httpClient,\n\t}, nil\n}", "func New(r io.Reader) (*Tree, error) {\n\tn, err := html.Parse(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n == nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing html from reader\")\n\t}\n\treturn &Tree{n}, nil\n}", "func newMemoryScraper(_ context.Context, settings receiver.CreateSettings, cfg *Config) *scraper {\n\treturn &scraper{settings: settings, config: cfg, bootTime: host.BootTimeWithContext, virtualMemory: mem.VirtualMemoryWithContext}\n}", "func New(sources, exclusions []string) *Agent {\n\t// Create the agent, insert the sources and remove the excluded sources\n\tagent := &Agent{sources: make(map[string]subscraping.Source)}\n\n\tagent.addSources(sources)\n\tagent.removeSources(exclusions)\n\n\treturn agent\n}", "func NewPage(url string) *Page {\n\tp := Page{\n\t\tUrl: url,\n\t\tArticles: make([]*Article, 0),\n\t}\n\n\turl = YC_ROOT + url\n\n\thead, _ := http.NewRequest(\"HEAD\", url, nil)\n\n\tif resp, err := client.Do(head); err == nil && len(resp.Cookies()) > 0 {\n\t\tc := resp.Cookies()\n\t\tcfduid = c[0].Raw\n\t} /*else {\n\t\tgoncurses.End()\n\t\tlog.Println(resp)\n\t\tlog.Println(err)\n\t}*/\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdoc := doReq(req)\n\n\t//Get all the trs with subtext for children then go back one (for the first row)\n\trows := doc.Find(\".subtext\").ParentsFilteredUntil(\"tr\", \"tbody\").Prev()\n\n\tvar a bool\n\n\tp.NextUrl, a = doc.Find(\"td.title\").Last().Find(\"a\").Attr(\"href\")\n\n\tif !a {\n\t\tgoncurses.End()\n\t\tlog.Println(\"Could not retreive next hackernews page. Time to go outside?\")\n\t}\n\n\tfor len(p.NextUrl) > 0 && p.NextUrl[0] == '/' {\n\t\tp.NextUrl = p.NextUrl[1:]\n\t}\n\n\trows.Each(func(i int, row *goquery.Selection) {\n\t\tar := Article{\n\t\t\tRank: len(p.Articles) + i,\n\t\t}\n\n\t\ttitle := row.Find(\".title\").Eq(1)\n\t\tlink := title.Find(\"a\").First()\n\n\t\tar.Title = link.Text()\n\n\t\tif url, exists := link.Attr(\"href\"); exists {\n\t\t\tar.Url = url\n\t\t}\n\n\t\trow = row.Next()\n\n\t\trow.Find(\"span.score\").Each(func(i int, s *goquery.Selection) {\n\t\t\tif karma, err := strconv.Atoi(strings.Split(s.Text(), \" \")[0]); err == nil {\n\t\t\t\tar.Karma = karma\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Error getting karma count:\", err)\n\t\t\t}\n\n\t\t\tif idSt, exists := s.Attr(\"id\"); exists {\n\t\t\t\tif id, err := strconv.Atoi(strings.Split(idSt, \"_\")[1]); err == nil {\n\t\t\t\t\tar.Id = id\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tsub := row.Find(\"td.subtext\")\n\t\tt := sub.Text()\n\n\t\tar.Created = parseCreated(t)\n\n\t\tar.User = sub.Find(\"a\").First().Text()\n\n\t\tcomStr := strings.Split(sub.Find(\"a\").Last().Text(), \" \")[0]\n\n\t\tif comNum, err := strconv.Atoi(comStr); err == nil {\n\t\t\tar.NumComments = comNum\n\t\t}\n\n\t\tp.Articles = append(p.Articles, &ar)\n\n\t})\n\n\treturn &p\n}", "func New(url string) (*Handler, error) {\n\tc, err := golf.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = c.Dial(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl, err := c.NewLogger()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Handler{\n\t\tlogger: l,\n\t\tclient: c,\n\t}, nil\n}", "func New(url string) (*Handler, error) {\n\tc, err := golf.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = c.Dial(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl, err := c.NewLogger()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Handler{\n\t\tlogger: l,\n\t\tclient: c,\n\t}, nil\n}", "func (s *server) Crawl(ctx context.Context, in *pb.LinkRequest) (*pb.CrawlerResponse, error) {\n\tmapLock.RLock()\n\t// given a URL checks to see if its currently being crawled\n\t_, exists := s.spiderPtr.siteURLIndex[in.Url]\n\tmapLock.RUnlock()\n\tif exists {\n\t\tmsg := fmt.Sprintf(\"Site %s is already being crawled\", in.Url)\n\t\treturn &pb.CrawlerResponse{Message: msg}, nil\n\t}\n\t// put new site on channel\n\tnewsites <- in.Url\n\treturn &pb.CrawlerResponse{Message: \"Crawler started crawling\"}, nil\n}", "func NewPage(url *Url) *Page {\n\tlogger := logrus.WithField(\"page\", url.String())\n\treturn &Page{Url: url, Logger: logger}\n}", "func NewPage(title, content, burl, url string) *Page {\n\treturn &Page{title, content, burl, url}\n}", "func New(b builder.Builder, tracker, hosted string) *Builder {\n\t//create our new builder\n\tn := &Builder{\n\t\tb: b,\n\t\tbase: hosted,\n\t\trpc: gorpc.NewServer(),\n\t\ttcl: client.New(tracker, http.DefaultClient, client.JsonCodec),\n\t\tbq: rpc.NewBuilderQueue(),\n\t\tmux: http.NewServeMux(),\n\t\tdler: newDownloader(),\n\t}\n\n\t//register the build service in the rpc\n\tif err := n.rpc.RegisterService(n.bq, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//make sure we respond to pings\n\tif err := n.rpc.RegisterService(pinger.Pinger{}, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register the codec\n\tn.rpc.RegisterCodec(json.NewCodec(), \"application/json\")\n\n\t//add the handlers to our mux\n\tn.mux.Handle(\"/\", n.rpc)\n\tn.mux.Handle(\"/download/\", http.StripPrefix(\"/download/\", n.dler))\n\n\t//start processing tasks\n\tgo n.run()\n\n\treturn n\n}", "func New(crawl crawl.Crawl, maxWorkers int, logger log.Logger) Service {\n\tvar svc Service\n\t{\n\t\tsvc = NewBasicService(crawl, maxWorkers)\n\t\tsvc = LoggingMiddleware(logger)(svc)\n\t}\n\treturn svc\n}", "func NewScrape(cfg *domain.Config) *Scrape {\n\treturn &Scrape{\n\t\tcfg: cfg,\n\t}\n}", "func NewScraper(opts ...Option) *Scraper {\n\tm := &Scraper{\n\t\turl: \"\",\n\t\texpectedStatusCode: http.StatusOK,\n\t\ttargetPrice: DefaultTargetPrice,\n\t\tselector: DefaultSelector,\n\t\tfindText: DefaultFindText,\n\t\tmaxRetries: DefaultMaxRetries,\n\t\tretrySeconds: DefaultRetrySeconds,\n\t\tLogger: &utils.DefaultLogger{},\n\t\tclient: new(http.Client),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func Crawler(ticker string) {\n\t// Instantiate default collector\n\tc := colly.NewCollector(\n\t\tcolly.Async(true),\n\t)\n\t// On every a element which has href attribute call callback\n\tc.OnHTML(\"a[href]\", func(e *colly.HTMLElement) {\n\t\tlink := e.Attr(\"href\")\n\t\t// Print link\n\t\tfmt.Printf(\"Link found: %q -> %s\\n\", e.Text, link)\n\t})\n\t// Before making a request print \"Visiting ...\"\n\tc.OnRequest(func(r *colly.Request) {\n\t\tfmt.Println(\"Visiting\", r.URL.String())\n\t})\n\t// Start scraping\n\tc.Visit(wsjURL + ticker)\n\tc.Wait()\n}", "func New(context *contexter.Context) (*Client) {\n return &Client {\n urlBaseIndex: 0,\n\t\tcontext: context,\n }\n}", "func RunNew(args []string) {\n\n\t// Remove fragmenta backup from args list\n\targs = args[2:]\n\n\t// We expect two args left:\n\tif len(args) < 2 {\n\t\tlog.Printf(\"Both a project path and a project type or URL are required to create a new site\\n\")\n\t\treturn\n\t}\n\n\trepo := args[0]\n\tprojectPath, err := filepath.Abs(args[1])\n\tif err != nil {\n\t\tlog.Printf(\"Error expanding file path\\n\")\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(projectPath, filepath.Join(os.Getenv(\"GOPATH\"), \"src\")) {\n\t\tlog.Printf(\"WARNING: You should create your project in $GOPATH/src\\n\")\n\t}\n\n\tif fileExists(projectPath) {\n\t\tlog.Printf(\"A folder already exists at path %s\\n\", projectPath)\n\t\treturn\n\t}\n\n\tswitch repo {\n\tcase \"app\":\n\t\trepo = \"github.com/fragmenta/fragmenta-app\"\n\tcase \"cms\":\n\t\trepo = \"github.com/fragmenta/fragmenta-cms\"\n\t\t// TODO: Blog example does not exist yet\n\t\t//\tcase \"blog\":\n\t\t//\t\trepo = \"github.com/fragmenta/fragmenta-blog\"\n\tdefault:\n\t\t// TODO clean repo if it contains https or .git...\n\t}\n\n\t// Log fetching our files\n\tlog.Printf(\"Fetching from url: %s\\n\", repo)\n\n\t// Go get the project url, to make sure it is up to date, should use -u\n\t_, err = runCommand(\"go\", \"get\", repo)\n\tif err != nil {\n\t\tlog.Printf(\"Error calling go get %s\", err)\n\t\treturn\n\t}\n\n\t// Copy the pristine new site over\n\tgoProjectPath := filepath.Join(os.Getenv(\"GOPATH\"), \"src\", repo)\n\terr = copyNewSite(goProjectPath, projectPath)\n\tif err != nil {\n\t\tlog.Printf(\"Error copying project %s\", err)\n\t\treturn\n\t}\n\n\t// Generate config files\n\terr = generateConfig(projectPath)\n\tif err != nil {\n\t\tlog.Printf(\"Error generating config %s\", err)\n\t\treturn\n\t}\n\n\t// Generate a migration AND run it\n\terr = generateCreateSQL(projectPath)\n\tif err != nil {\n\t\tlog.Printf(\"Error generating migrations %s\", err)\n\t\treturn\n\t}\n\n\t// Output instructions to let them change setup first if they wish\n\tshowNewSiteHelp(projectPath)\n\n}", "func crawl(\n\tfetcher Fetcher, initUrl *url.URL, out chan<- Page, follower Follower,\n) {\n\tlogger.Info(\"Starting crawl\", \"url\", initUrl)\n\n\tunexplored := sync.WaitGroup{}\n\tunexplored.Add(1)\n\n\t// Seed the work queue.\n\tpending := make(chan Task, 100)\n\tpending <- Task{initUrl, 0}\n\n\t// Request pending, and requeue discovered pages.\n\tgo func() {\n\t\tfor task := range pending {\n\t\t\tgo func(task Task) {\n\t\t\t\tlogger.Debug(\"Starting\", \"url\", task.URL)\n\t\t\t\tpage := fetcher.Fetch(&task)\n\t\t\t\tout <- page\n\n\t\t\t\tfor _, link := range page.Links {\n\t\t\t\t\tif err := follower.Follow(link); err != nil {\n\t\t\t\t\t\tlogger.Debug(\"Not following link\", \"link\", link, \"reason\", err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tunexplored.Add(1)\n\t\t\t\t\t\tpending <- LinkTask(link)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tunexplored.Done()\n\t\t\t}(task)\n\t\t}\n\t}()\n\n\t// Tie eveything off so that we exit clearly.\n\tunexplored.Wait()\n\tclose(pending)\n}", "func NewTarget(url string) (t *Target) {\n t = &Target{Url:url, method:defaultMethod, header:http.Header{}}\n return t\n}", "func (r *Relwarc) NewBrowser() *Browser {\n\tctx, cancel := chromedp.NewContext(r.ctx)\n\n\t// make sure a browser and its first tab are created.\n\tif err := chromedp.Run(ctx); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// enable network by default.\n\tif err := chromedp.Run(ctx, network.Enable()); err != nil {\n\t\tpanic(err)\n\t}\n\n\ttgt := chromedp.FromContext(ctx).Target\n\n\ttab := Tab{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\ttarget: tgt,\n\t\trequestMap: map[network.RequestID]*Request{},\n\t}\n\n\tchromedp.ListenTarget(ctx, tab.onTargetEvent)\n\n\tbrowser := Browser{\n\t\tctx: ctx,\n\t\tfirst: &tab,\n\t\ttabs: map[target.ID]*Tab{},\n\t}\n\n\treturn &browser\n}", "func New(url string) Client {\n\treturn &client{\n\t\tbaseURL: url,\n\t}\n}", "func New(url string) error {\n\tvar err error\n\tlog.Println(\"connect to db\")\n\tdb, err = sql.Open(\"mysql\", url)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = GetNases()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func New(scrapeManager *scrape.Manager, log logrus.FieldLogger) *Explore {\n\treturn &Explore{\n\t\tlogger: log,\n\t\tscrapeManager: scrapeManager,\n\t\tneedExplore: make(chan *exploringTarget, 10000),\n\t\tretryInterval: time.Second * 5,\n\t\ttargets: map[string]map[uint64]*exploringTarget{},\n\t\texplore: explore,\n\t}\n}", "func New(p *Page) (Poller, error) {\n\tid := \"\"\n\tl := log.With().Str(\"func\", \"poller.New\").Logger()\n\n\tif p.ID != nil && len(*p.ID) > 0 {\n\t\tid = *p.ID\n\t} else {\n\t\tid = randomdata.SillyName()\n\t\tl.Info().Msg(\"generating random name...\")\n\t}\n\tl = l.With().Str(\"id\", id).Logger()\n\n\t// -- Validation\n\tmethod, err := parseHTTPMethod(p.Method)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparsedURL, err := parseURL(p.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// -- Set ups\n\trandomFrequency, ticks, offset := parsePollOptions(id, p.PollOptions)\n\tif randomFrequency {\n\t\tticks = nextRandomTick(ticks-offset, ticks+offset)\n\t}\n\n\trandUA, userAgents := parseUserAgentOptions(id, p.UserAgentOptions)\n\n\theaders := http.Header{}\n\tif p.Headers == nil {\n\t\tl.Warn().Msg(\"no headers provided\")\n\t} else {\n\t\tfor headerKey, headerVal := range p.Headers {\n\t\t\theaders[headerKey] = []string{headerVal}\n\t\t}\n\t\tswitch hlen := len(p.Headers); {\n\t\tcase hlen == 0:\n\t\t\tl.Warn().Msg(\"no headers provided\")\n\t\tcase hlen < 3:\n\t\t\tl.Warn().Msg(\"few headers provided\")\n\t\t}\n\t}\n\n\thttpClient := &http.Client{\n\t\tTimeout: time.Duration(defaultHTTPClientTimeout) * time.Second,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\tif p.FollowRedirect {\n\t\thttpClient.CheckRedirect = nil\n\t}\n\n\trequest, err := http.NewRequestWithContext(context.Background(), method, parsedURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.Header = headers\n\n\t// -- Complete and return\n\treturn &pagePoller{\n\t\tid: id,\n\t\thttpClient: httpClient,\n\t\trequest: request,\n\t\tuserAgents: userAgents,\n\t\tticks: ticks,\n\t\trandTick: randomFrequency,\n\t\toffsetRange: offset,\n\t\tlastUAIndex: -1,\n\t\trandUa: randUA,\n\t}, nil\n}", "func (p *Postgres) CreateCrawlRequest(urlString string, levels int) (int, error) {\n\tvar id int\n\t// clean url and make sure there's a scheme attached\n\tpageURL, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn id, fmt.Errorf(\"Unable to parse url %s: %v\", urlString, err)\n\t}\n\tpageURL.Fragment = \"\"\n\tif pageURL.Scheme == \"\" {\n\t\tpageURL.Scheme = \"http\"\n\t}\n\n\t// create new crawl request\n\tresult := p.db.QueryRow(\n\t\t`INSERT INTO crawl_requests\n\t\t(id, url, levels)\n\t\tVALUES (DEFAULT, $1, $2)\n\t\tRETURNING id`, pageURL.String(), levels)\n\terr = result.Scan(&id)\n\tif err != nil {\n\t\treturn id, fmt.Errorf(\"Unable to create crawl request with url %s and level %d: %v\", urlString, levels, err)\n\t}\n\t// create first task for crawl request\n\terr = p.CreateTask(id, pageURL.String(), 0, false)\n\tif err != nil {\n\t\treturn id, fmt.Errorf(\"Unable to create task for crawl request %d: %v\", id, err)\n\t}\n\treturn id, err\n}", "func NewFromURL(url string) (r *Recipe, err error) {\n\tclient := http.Client{\n\t\tTimeout: 10 * time.Second,\n\t}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\thtml, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn NewFromHTML(url, string(html))\n}", "func NewCfnCrawler_Override(c CfnCrawler, scope constructs.Construct, id *string, props *CfnCrawlerProps) {\n\t_init_.Initialize()\n\n\t_jsii_.Create(\n\t\t\"aws-cdk-lib.aws_glue.CfnCrawler\",\n\t\t[]interface{}{scope, id, props},\n\t\tc,\n\t)\n}", "func newNodes(c *Client) *nodes {\n\treturn &nodes{c}\n}", "func newTestFetcher() *FetcherTest {\n\treturn &FetcherTest{}\n}", "func (h *MovieHandler) new(w http.ResponseWriter, r *http.Request) {\n\t// Render a HTML response and set status code.\n\trender.HTML(w, http.StatusOK, \"movie/new.html\", nil)\n}", "func New(ctx context.Context, s *Session, health healthcheck.Handler) Controller {\n\ttemp := controller{\n\t\tcmd: make(chan int),\n\t\tsong: make(chan string),\n\t\tplaylist: make(chan string),\n\t\tready: make(chan struct{}),\n\t\tcurrentToken: make(chan oauth2.Token),\n\t\thealth: health,\n\t}\n\n\tgo run(ctx, s, temp)\n\treturn temp\n}", "func New(url, token string, mock bool, l *logrus.Logger) Nest {\n\n\tinitLog(l)\n\n\tlogDebug(funcName(), \"New nest structure\", url)\n\n\t// Read mock file\n\tif mock {\n\t\tlogWarn(funcName(), \"Mock activated !!!\")\n\t\tmockFileByte = readFile(mockFile)\n\t}\n\n\trest = http.New(log)\n\n\treturn &nest{url: url, token: token, mock: mock}\n\n}", "func CNew(\n\tfetcher CFetcher,\n\tss ...Setting,\n) CFetchCloser {\n\tsetting := &fetcherSetting{\n\t\tbucketNum: 10,\n\t\tttl: 1 * time.Minute,\n\t\tinterval: 1 * time.Second,\n\t}\n\n\tfor _, set := range ss {\n\t\tset(setting)\n\t}\n\n\tfs := make([]CFetcher, setting.bucketNum)\n\tfor i := range fs {\n\t\tfs[i] = NewCachedCFetcher(fetcher, setting.ttl, setting.interval)\n\t}\n\n\treturn NewBucketedCFetcher(fs)\n}", "func New(options ...func(*Loader)) *Loader {\n\tweb := http.New()\n\tloader := &Loader{\n\t\tclients: map[string]Downloader{\n\t\t\t\"file\": file.New(),\n\t\t\t\"http\": web,\n\t\t\t\"https\": web,\n\t\t},\n\t}\n\n\tfor _, option := range options {\n\t\toption(loader)\n\t}\n\n\treturn loader\n}", "func Crawl(id int, userAgent string, waiting <-chan *url.URL, processed chan<- *url.URL, content chan<- string) {\n\n\tvar u *url.URL\n\tvar open bool\n\tvar httpClient = utils.NewHttpClient(userAgent)\n\n\tfor {\n\t\tselect {\n\t\tcase u, open = <-waiting:\n\n\t\t\tif open {\n\n\t\t\t\tc, err := httpClient.RetrieveContent(u.String())\n\n\t\t\t\tlog.Printf(\"Crawl Worker-[%v] parsed content for [%v]\", id, u.String())\n\n\t\t\t\t//TODO: deal with failed crawls, e.g. log with special value in key-store\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t} else {\n\n\t\t\t\t\tcontent <- c\n\t\t\t\t\tlog.Printf(\"Crawl Worker-[%v] added html from [%v] to content\", id, u.String())\n\n\t\t\t\t\tprocessed <- u\n\t\t\t\t\tlog.Printf(\"Crawl Worker-[%v] added [%v] to crawled pages\", id, u.String())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Crawl Worker-[%v] is exiting\", id)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t}\n\t}\n}", "func (this *DefaultNode) StartCrawl() {\n\tgo this.Crawler.Start()\n}", "func New(r *url.URL, p parser.ServiceParse, l *log.Logger,\n\tc *CondConfig) *Service {\n\treturn &Service{\n\t\troot: r,\n\t\tparser: p,\n\t\tlog: l,\n\t\tsm: sitemap.New(),\n\t\tc: c,\n\t}\n}", "func (c Client) New(params *stripe.TreasuryCreditReversalParams) (*stripe.TreasuryCreditReversal, error) {\n\tcreditreversal := &stripe.TreasuryCreditReversal{}\n\terr := c.B.Call(\n\t\thttp.MethodPost,\n\t\t\"/v1/treasury/credit_reversals\",\n\t\tc.Key,\n\t\tparams,\n\t\tcreditreversal,\n\t)\n\treturn creditreversal, err\n}", "func newBacon(c Client) *Bacon {\n\treturn &Bacon{\n\t\tconn: c.Conn,\n\t\turl: c.url,\n\t\toldlistquery: \"MATCH (r:Restaurant)-->(b:Bacon) where b.last_points is not null return r.id as Rid, r.name as Restaurant ORDER BY b.last_points DESC\",\n\t\tnewlistquery: \"MATCH (r:Restaurant)-->(b:Bacon) where b.points is not null return r.id as Rid, r.name as Restaurant ORDER BY b.points DESC\",\n\t}\n}", "func New(conf config.Config) Parser {\n\tmethods := map[string]bool{\n\t\thttp.MethodGet: true,\n\t\thttp.MethodHead: true,\n\t\thttp.MethodPost: true,\n\t\thttp.MethodPut: true,\n\t\thttp.MethodPatch: true,\n\t\thttp.MethodDelete: true,\n\t\thttp.MethodConnect: true,\n\t\thttp.MethodOptions: true,\n\t\thttp.MethodTrace: true,\n\t}\n\n\treturn Parser{\n\t\tconf: conf,\n\t\tmethods: methods,\n\t}\n}", "func New(it *msvc.ProjectIterator) *Graph {\n\tgr := &Graph{\n\t\tg: simple.NewDirectedGraph(),\n\t\tallNodes: rbtree.New(),\n\t\tnextID: 1,\n\t}\n\n\tit.Foreach(gr.newNode)\n\tait := rbtree.NewWalkInorder(gr.allNodes)\n\tait.Foreach(gr.newEdges)\n\n\treturn gr\n}", "func New(uri string) (*Client, error) {\n\tu, e := url.Parse(uri)\n\tif e != nil {\n\t\treturn nil, fmt.Errorf(\"url.Parse: %w\", e)\n\t}\n\n\tc := &Client{\n\t\turi: u.String(),\n\t}\n\treturn c, nil\n}", "func NewVisit(c *fiber.Ctx) {\n\tv := new(Visit)\n\tv.IDSite = 25\n\tv.Rec = 1\n\tv.ActionName = \"\"\n\tv.URL = string(c.Request().URI().FullURI())\n\tv.Rand = rand.Uint64()\n\tv.Version = 1\n\tv.Ref = string(c.Context().Referer())\n\tv.Agent = c.Get(\"User-Agent\")\n\tv.Lang = c.Get(\"Accept-Language\")\n\n\tgo v.send()\n}", "func New(url string) *Client {\n\treturn &Client{\n\t\tclient: http2.NewClient(nil),\n\t\turl: url,\n\t}\n}", "func New(\n\tmid module.MID,\n\tclient *http.Client,\n\tscoreCalculator module.CalculateScore,\n\tmaxThread int) (downloader module.Downloader, yierr *constant.YiError) {\n\tmoduleBase, yierr := stub.NewModuleInternal(mid, scoreCalculator)\n\t//check whether the args are vaild\n\tif yierr != nil {\n\t\treturn\n\t}\n\tif client == nil {\n\t\tyierr = constant.NewYiErrorf(constant.ERR_NEW_DOWNLOADER_FAIL, \"Client is nil.\")\n\t\treturn\n\t}\n\n\treturn &myDownloader{\n\t\tModuleInternal: moduleBase,\n\t\thttpClient: client,\n\t\tPool: *pool.NewPool(maxThread),\n\t}, nil\n}", "func New(text string) error {\n\t// 没有取地址\n\treturn errorString(text)\n}", "func newPost(body string, timestamp int64, next *post) *post {\n\treturn &post{body, timestamp, next}\n}", "func (c *Crawler) Run() error {\n\tif !c.ignoreRobotsTxt {\n\t\terr := c.robotsInit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tc.queue = make(chan url.URL)\n\tdefer close(c.queue)\n\n\tc.results = make(chan Page)\n\tdefer close(c.results)\n\n\tc.pool.Start()\n\tc.addJob(c.url)\n\n\tc.waitForResults()\n\tc.cleanUpResults()\n\n\tc.generateLinksFrom()\n\n\treturn nil\n}", "func (s *MongoService) New(link *Link) (*Link, error) {\n\tif link == nil {\n\t\treturn nil, errors.New(\"empty link\")\n\t}\n\n\t_, err := s.Get(link.Slug)\n\tswitch err {\n\tcase ErrNotFound:\n\t\t// continue\n\tcase nil:\n\t\treturn nil, ErrAlreadyExists\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\tlinkCollection := s.client.Database(\"shortinho\").Collection(\"link\")\n\tlink.Active = true\n\tlink.CreatedAt = int(time.Now().UnixNano() / 1e6)\n\tlink.UpdatedAt = int(time.Now().UnixNano() / 1e6)\n\n\t_, err = linkCollection.InsertOne(context.Background(), link)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn link, nil\n}", "func cloneSite(args []string) {\n\turl := args[0]\n\n\tif Serve == true {\n\t\t// grab the url from the\n\t\tif !parser.ValidateURL(url) && !parser.ValidateDomain(url) {\n\t\t\tfmt.Println(\"goclone <url>\")\n\t\t} else if parser.ValidateDomain(url) {\n\t\t\t// use the domain as the project name\n\t\t\tname := url\n\t\t\t// CreateProject\n\t\t\tprojectPath := file.CreateProject(name)\n\t\t\t// create the url\n\t\t\tvalidURL := parser.CreateURL(name)\n\t\t\t// Crawler\n\t\t\tcrawler.Crawl(validURL, projectPath)\n\t\t\t// Restructure html\n\t\t\thtml.LinkRestructure(projectPath)\n\t\t\terr := exec.Command(\"open\", \"http://localhost:5000\").Start()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tserver.Serve(projectPath)\n\n\t\t} else if parser.ValidateURL(url) {\n\t\t\t// get the hostname\n\t\t\tname := parser.GetDomain(url)\n\t\t\t// create project\n\t\t\tprojectPath := file.CreateProject(name)\n\t\t\t// Crawler\n\t\t\tcrawler.Crawl(url, projectPath)\n\t\t\t// Restructure html\n\t\t\thtml.LinkRestructure(projectPath)\n\t\t\terr := exec.Command(\"open\", \"http://localhost:5000\").Start()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tserver.Serve(projectPath)\n\t\t} else {\n\t\t\tfmt.Print(url)\n\t\t}\n\t} else {\n\t\t// grab the url from the\n\t\tif !parser.ValidateURL(url) && !parser.ValidateDomain(url) {\n\t\t\tfmt.Println(\"goclone <url>\")\n\t\t} else if parser.ValidateDomain(url) {\n\t\t\t// use the domain as the project name\n\t\t\tname := url\n\t\t\t// CreateProject\n\t\t\tprojectPath := file.CreateProject(name)\n\t\t\t// create the url\n\t\t\tvalidURL := parser.CreateURL(name)\n\t\t\t// Crawler\n\t\t\tcrawler.Crawl(validURL, projectPath)\n\t\t\t// Restructure html\n\t\t\thtml.LinkRestructure(projectPath)\n\t\t\tif Open {\n\t\t\t\t// automatically open project\n\t\t\t\terr := exec.Command(\"open\", projectPath+\"/index.html\").Start()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else if parser.ValidateURL(url) {\n\t\t\t// get the hostname\n\t\t\tname := parser.GetDomain(url)\n\t\t\t// create project\n\t\t\tprojectPath := file.CreateProject(name)\n\t\t\t// Crawler\n\t\t\tcrawler.Crawl(url, projectPath)\n\t\t\t// Restructure html\n\t\t\thtml.LinkRestructure(projectPath)\n\t\t\tif Open {\n\t\t\t\terr := exec.Command(\"open\", projectPath+\"/index.html\").Start()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Print(url)\n\t\t}\n\t}\n}", "func (c *crawling) crawl(s site) {\n\turls := c.crawlSite(s) // the core crawl process\n\tc.Feed(urls, s.URL, s.Depth-1) // new urls enter crawling - circular feedback\n\ttime.Sleep(c.Delay) // have a gentle nap\n}", "func RunWebCrawler() {\n\tCrawl(\"https://golang.org/\", 4, fetcher)\n}", "func newConn(w http.ResponseWriter, r *http.Request, p url.Values) Conn {\n\treturn Conn{Writer: w, Request: r, Params: p, halted: false}\n}", "func newcomputer(brand string) *computer {\n\treturn &computer{brand: brand}\n}", "func (c MethodsCollection) New() pNew {\n\treturn pNew{\n\t\tMethod: c.MustGet(\"New\"),\n\t}\n}" ]
[ "0.73082036", "0.72865236", "0.7228419", "0.7207095", "0.719269", "0.70568573", "0.7003078", "0.6945463", "0.68530333", "0.6658365", "0.6632175", "0.6534232", "0.6392795", "0.63671196", "0.63658637", "0.6364868", "0.6352427", "0.6246249", "0.6245264", "0.5961489", "0.5958432", "0.5874036", "0.577267", "0.5770553", "0.57469785", "0.56928307", "0.56090206", "0.56037426", "0.5582896", "0.5575072", "0.556397", "0.55502975", "0.5549005", "0.55316657", "0.5528617", "0.5528617", "0.550921", "0.54747665", "0.54254425", "0.5398783", "0.53973824", "0.5393022", "0.53818", "0.5366917", "0.5362299", "0.53611785", "0.534836", "0.5347361", "0.5337845", "0.53352666", "0.53316605", "0.53316605", "0.53214043", "0.5296184", "0.52797025", "0.5276059", "0.5261625", "0.52609444", "0.5251947", "0.52501726", "0.5247892", "0.5242703", "0.5228151", "0.5224492", "0.5224216", "0.52117604", "0.51987785", "0.5196047", "0.5193043", "0.51923305", "0.5189008", "0.5180587", "0.5177233", "0.5166956", "0.51598793", "0.51579297", "0.5150805", "0.5150164", "0.51500416", "0.5134349", "0.5133581", "0.5132094", "0.5119971", "0.5115305", "0.511466", "0.5112857", "0.51104385", "0.5106893", "0.5103186", "0.5100483", "0.50973636", "0.5083503", "0.5082964", "0.507955", "0.5076229", "0.50759596", "0.50707895", "0.50696456", "0.5068251", "0.5067724" ]
0.69358087
8
Add adds one or more previously unadded urls to crawler to visit. source can be nil to indicate root. Returns a list of errors if any occured.
func (c *Crawler) Add(source *url.URL, uri ...*url.URL) []error { var errs []error for _, u := range uri { var err error u := u u.Fragment = "" // reset fragment, we don't want it messing our visited list if source != nil { u = source.ResolveReference(u) } if u.Scheme != "http" && u.Scheme != "https" { err = ErrUnsupportedScheme } else if err == nil && c.filter != nil && !c.filter(u) { err = ErrFilteredOut } us := u.String() // For the already-visited test we need to clean up each URL a bit vkey := strings.TrimRight(us[strings.Index(us, ":")+1:], "/") // Remove scheme and trailing slash if err == nil { c.toVisitMu.RLock() if _, ok := c.toVisit[vkey]; ok { err = ErrAlreadyInList } c.toVisitMu.RUnlock() } if err == nil { c.logger.Debugf("Add(%v %v): OK", source, us) atomic.AddUint64(&c.numQueued, 1) } else if err != nil { //c.logger.Warnf("Add(%v %v): %v", source, us, err) atomic.AddUint64(&c.numEncountered, 1) errs = append(errs, errors.Wrapf(err, "Invalid URL %v", u)) continue } c.toVisitMu.Lock() c.toVisit[vkey] = struct{}{} c.toVisitMu.Unlock() { uu := *u uu.Scheme = "" if source != nil && source.Host == uu.Host { uu.Host = "" } if source == nil { c.mapper.Add("<root>", uu.String()) } else { c.mapper.Add(source.String(), uu.String()) } } v := visit{ source: source, target: u, } select { case c.visitChan <- v: case <-c.ctx.Done(): return append(errs, c.ctx.Err()) } } return errs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (os *OriginChecker) AddRawURLs(urls []string) {\n\tos.Lock()\n\tdefer os.Unlock()\n\n\tfor _, u := range urls {\n\t\tclean, err := cleanOrigin(u)\n\t\tif err == nil {\n\t\t\tos.origins[clean] = true\n\t\t}\n\t}\n}", "func (r *RssFeedEmitter) Add(url string) {\n\tfor _, feed := range r.feeds {\n\t\tif feed.Link == url {\n\t\t\treturn\n\t\t}\n\t}\n\tnewFeed, err := r.parser.ParseURL(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tr.feeds = append(r.feeds, *newFeed)\n}", "func (me *Crawler) AddUrl(URL string) (err error) {\n\tfor range only.Once {\n\t\tvar u *url.URL\n\t\tu, err = url.Parse(URL)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif u.Path == \"\" {\n\t\t\tu.Path = \"/\"\n\t\t}\n\t\tr := &colly.Request{\n\t\t\tURL: u,\n\t\t\tMethod: \"GET\",\n\t\t}\n\t\tvar b []byte\n\t\tb, err = r.Marshal()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\terr = me.Storage.AddRequest(b)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}", "func (h *CrawlHandler) AddCrawl(url string, statusCode int) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\th.crawls[url] = statusCode\n}", "func (f *frontier) Add(uri ...string) {\n\tfor _, i := range uri {\n\t\tu, err := f.filter(f, i)\n\t\tif err != nil {\n\t\t\tcontinue // do nothing\n\t\t}\n\t\tf.lk.Lock()\n\t\tf.nbs = append(f.nbs, &visitable{uri: u})\n\t\tf.lk.Unlock()\n\t}\n}", "func (r *result) Add(url, body string) {\n\tr.mux.Lock()\n\tr.Sites[url] = body\n\tr.give(url)\n\tr.mux.Unlock()\n}", "func (r *Repository) AddImages(urls []string) []error {\n\tvar errors []error\n\terrChan := make(chan error, len(urls))\n\tvar wg sync.WaitGroup\n\tfor _, url := range urls {\n\t\turl := url\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := r.addImage(url)\n\t\t\terrChan <- err\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errChan)\n\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\treturn errors\n}", "func (self *errorList) Add(err error) {\n\tif err != nil {\n\t\tself.list = append(self.list, err.Error())\n\t}\n\t//return err\n}", "func (s *FeedService) AddFeed(url string) error {\n\texists, err := s.dbClient.CheckWhetherSourceExist(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\treturn fmt.Errorf(\"api: feed %s already exists\", url)\n\t}\n\tfp := gofeed.NewParser()\n\tfp.Client = s.httpClient\n\tf, err := fp.ParseURL(url)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"add: cannot parse URL: %v\", err)\n\t}\n\tfavIcon, err := getFavIcon(f.Link)\n\tif err != nil {\n\t\treturn err\n\t}\n\titem := &db.FeedSource{\n\t\tTitle: f.Title,\n\t\tUrlSource: url,\n\t\tDescription: f.Description,\n\t\tLastUpdated: f.UpdatedParsed,\n\t\tActive: true,\n\t\tLastChecked: time.Now(),\n\t\tFavIcon: favIcon,\n\t}\n\tfeedID, err := s.dbClient.AddFeed(item)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, i := range f.Items {\n\t\tfixFeedItem(i)\n\t\terr := s.dbClient.AddNews(feedID, db.ToFeedItem(item.Id, i))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tlog.Printf(\"Added %d items\", len(f.Items))\n\treturn err\n}", "func (a *Agent) addSources(sources []string) {\n\tfor _, source := range sources {\n\t\tswitch source {\n\t\tcase \"webscan\":\n\t\t\ta.sources[source] = &webscan.Source{}\n\t\tcase \"hackertarget\":\n\t\t\ta.sources[source] = &hackertarget.Source{}\n\t\tcase \"dnsgrep\":\n\t\t\ta.sources[source] = &dnsgrep.Source{}\n\t\tcase \"rapiddns\":\n\t\t\ta.sources[source] = &rapiddns.Source{}\n\t\tcase \"c99\":\n\t\t\ta.sources[source] = &c99.Source{}\n\t\tcase \"ip138\":\n\t\t\ta.sources[source] = &ip138.Source{}\n\t\tcase \"aizhan\":\n\t\t\ta.sources[source] = &aizhan.Source{}\n\t\tcase \"omnisint\":\n\t\t\ta.sources[source] = &omnisint.Source{}\n\t\tcase \"viewdns\":\n\t\t\ta.sources[source] = &viewdns.Source{}\n\t\tcase \"bugscaner\":\n\t\t\ta.sources[source] = &bugscaner.Source{}\n\t\tcase \"dnslytics\":\n\t\t\ta.sources[source] = &dnslytics.Source{}\n\t\tcase \"domaintools\":\n\t\t\ta.sources[source] = &domaintools.Source{}\n\t\tcase \"yougetsignal\":\n\t\t\ta.sources[source] = &yougetsignal.Source{}\n\t\tcase \"chinaz\":\n\t\t\ta.sources[source] = &chinaz.Source{}\n\t\tcase \"securitytrails\":\n\t\t\ta.sources[source] = &securitytrails.Source{}\n\t\t}\n\t}\n}", "func (c *Cache) Add(cr CrawlResult) {\n\tc.mutex.Lock()\n\tc.c[cr.url] = cr\n\tc.mutex.Unlock()\n}", "func (e *Errors) Add(errs ...error) {\n\t*e = append(*e, errs...)\n}", "func (errs Errors) Add(newErrors ...error) Errors {\n\tfor _, err := range newErrors {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif errors, ok := err.(Errors); ok {\n\t\t\terrs = errs.Add(errors...)\n\t\t} else {\n\t\t\tok = true\n\t\t\tfor _, e := range errs {\n\t\t\t\tif err == e {\n\t\t\t\t\tok = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn errs\n}", "func (errs Errors) Add(newErrors ...error) Errors {\n\tfor _, err := range newErrors {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif errors, ok := err.(Errors); ok {\n\t\t\terrs = errs.Add(errors...)\n\t\t} else {\n\t\t\tok = true\n\t\t\tfor _, e := range errs {\n\t\t\t\tif err == e {\n\t\t\t\t\tok = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn errs\n}", "func (s *Spider) addSkippedURL(u *url.URL) {\n\ts.Lock()\n\ts.skippedURLs[u.Path] = struct{}{}\n\ts.Unlock()\n}", "func (v *ValidationErrors) Add(err ...string) {\n\t*v = append(*v, err...)\n}", "func (results *Results) Add(result *Result) {\n\tif !result.Passed || result.Error != nil {\n\t\tresults.Passed = false\n\t}\n\tresults.List = append(results.List, result)\n}", "func (target *LinkStatistics) Add(source LinkStatistics) {\n\tif target.Name == \"\" {\n\t\ttarget.Name = source.Name\n\t}\n\tif target.URI == \"\" {\n\t\ttarget.URI = source.URI\n\t}\n\ttarget.AnnotatedValuesWaiting += source.AnnotatedValuesWaiting\n\ttarget.AnnotatedValuesInProgress += source.AnnotatedValuesInProgress\n\ttarget.AnnotatedValuesAcknowledged += source.AnnotatedValuesAcknowledged\n}", "func (m refCountedUrlSet) addUrl(urlStr string) bool {\n\tm[urlStr]++\n\treturn m[urlStr] == 1\n}", "func (f *FFS) Addrs(ctx context.Context) ([]api.AddrInfo, error) {\n\tresp, err := f.client.Addrs(ctx, &rpc.AddrsRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddrs := make([]api.AddrInfo, len(resp.Addrs))\n\tfor i, addr := range resp.Addrs {\n\t\taddrs[i] = api.AddrInfo{\n\t\t\tName: addr.Name,\n\t\t\tAddr: addr.Addr,\n\t\t\tType: addr.Type,\n\t\t}\n\t}\n\treturn addrs, nil\n}", "func SetTotalURLs(total int) (err error) {\n\tclient := getClient()\n\terr = client.Set(\"TotalUrls\", total, 0).Err()\n\treturn\n}", "func (list *ValidationErrors) Add(err *FieldError) {\n\t*list = append(*list, err)\n}", "func updateSource(source *Source, newSource *Source) {\n\tfor _, newEntry := range newSource.Entries {\n\t\tvar exists = false\n\t\tfor _, entry := range source.Entries {\n\t\t\tif entry.Url == newEntry.Url {\n\t\t\t\texists = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exists {\n\t\t\tsource.Entries = append(source.Entries, newEntry)\n\t\t}\n\t}\n}", "func (m *MultiError) Add(err error) {\n\tif err != nil {\n\t\tm.errors = append(m.errors, err)\n\t}\n}", "func (qi *Items) Add(r *pageloader.Request) {\n\n\tif qi.haveSeen(r.URL) {\n\t\treturn\n\t}\n\n\tqi.Lock()\n\tlog.Println(\"adding request to the queue\")\n\tqi.Stack = append(qi.Stack, r)\n\tqi.Seen[r.URL] = true\n\tqi.Length++\n\tlog.Printf(\"queue length now: %d\\n\", qi.Length)\n\tqi.Unlock()\n}", "func addUrl(writer http.ResponseWriter, request *http.Request) {\n\tvars := mux.Vars(request)\n\thostname := vars[\"hostname\"]\n\tquerypath := vars[\"querypath\"]\n\n\tresponse := APIResponse{}\n\terr := utils.ValidateUrl(hostname)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\tdecodedPath, err := utils.URLDecode(querypath)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\t// Generate URL service for querying the URL\n\turlService, err := services.NewUrlService(hostname, decodedPath, config.DBType, config.CacheType)\n\tif err != nil {\n\t\tutils.LogError(utils.LogFields{\"hostname\": hostname, \"path\": decodedPath}, err, \"Error getting URL\")\n\t\tresponse.InternalError(errors.New(\"An error occurred\"))\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\terr = urlService.AddUrl()\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t} else {\n\t\tresponse.Success(StringData{Message: \"Successfully added URL\"})\n\t}\n\n\thttp_respond(response, writer)\n}", "func (prCtx *ParseResultContext) AddResults(source string, parsedResList []*ParseResult) {\n\t// If there is no source, the configMap is probably a platform scan map, in that case\n\t// treat all the results as consistent.\n\tif source == \"\" {\n\t\tprCtx.addConsistentResults(source, parsedResList)\n\t\treturn\n\t}\n\n\t// Treat the first batch of results as consistent\n\tif len(prCtx.inconsistent) == 0 && len(prCtx.consistent) == 0 {\n\t\tprCtx.addConsistentResults(source, parsedResList)\n\t} else {\n\t\tprCtx.addParsedResults(source, parsedResList)\n\t}\n}", "func ParseSyndicationSource(ctx context.Context, repos *repository.Repositories, r *http.Result, s *syndication.Source) ([]*url.URL, error) {\n\tvar urls []*url.URL\n\n\tif err := handleFeedHTTPErrors(ctx, repos, r, s); err != nil {\n\t\treturn urls, err\n\t}\n\n\t// We only want successful requests at this point\n\tif !r.RequestWasSuccessful() {\n\t\treturn urls, fmt.Errorf(\"%s\", r.GetFailureReason())\n\t}\n\n\tif r.RequestWasRedirected() {\n\t\tvar err error\n\t\ts, err = handleDuplicateFeed(ctx, repos, r.FinalURI, s)\n\t\tif err != nil {\n\t\t\treturn urls, err\n\t\t}\n\t}\n\n\tpr, err := repos.Botlogs.FindPreviousByURL(ctx, s.URL, r)\n\tif err != nil && err != sql.ErrNoRows {\n\t\treturn urls, err\n\t}\n\n\tif r.IsContentDifferent(pr) {\n\t\tvar c *gofeed.Feed\n\t\tc, err = gofeed.NewParser().Parse(r.Content)\n\t\tif err != nil {\n\t\t\treturn urls, fmt.Errorf(\"Parsing error: %s - URL %s\", err, s.URL)\n\t\t}\n\n\t\tif c.Title != \"\" {\n\t\t\tif s.Title == \"\" || s.Title == syndication.DefaultWPFeedTitle {\n\t\t\t\ts.Title = c.Title\n\t\t\t}\n\t\t}\n\n\t\tif c.Link != \"\" {\n\t\t\tvar l *url.URL\n\t\t\tl, err = url.FromRawURL(c.Link)\n\t\t\tif err == nil {\n\t\t\t\ts.Domain = l\n\t\t\t}\n\t\t}\n\n\t\tif s.Type == \"\" {\n\t\t\tvar feedType syndication.Type\n\t\t\tfeedType, err = syndication.FromGoFeedType(c.FeedType)\n\t\t\tif err == nil {\n\t\t\t\ts.Type = feedType\n\t\t\t} else {\n\t\t\t\tlogger.Error(err)\n\t\t\t}\n\t\t}\n\n\t\tfor _, item := range c.Items {\n\t\t\tvar u *url.URL\n\t\t\tu, err = url.FromRawURL(item.Link)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(err)\n\t\t\t\tcontinue // Just skip invalid URLs\n\t\t\t}\n\n\t\t\t// @TODO Add a list of Source proxy and resolve source's URLs before pushing to the queue\n\t\t\tvar b bool\n\t\t\tb, err = repos.Documents.ExistWithURL(ctx, u)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !b {\n\t\t\t\tlogger.Info(fmt.Sprintf(\"Adding URL [%s]\", u))\n\t\t\t\turls = append(urls, u)\n\t\t\t} else {\n\t\t\t\tlogger.Info(fmt.Sprintf(\"URL [%s] already exists\", u))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogger.Info(\"Feed content has not changed\")\n\t}\n\n\t// Reverse results\n\tfor l, r := 0, len(urls)-1; l < r; l, r = l+1, r-1 {\n\t\turls[l], urls[r] = urls[r], urls[l]\n\t}\n\n\tvar results []*http.Result\n\tresults, err = repos.Botlogs.FindByURL(ctx, s.URL)\n\tif err != nil {\n\t\treturn urls, err\n\t}\n\n\tf := http.CalculateFrequency(results)\n\tlogger.Warn(fmt.Sprintf(\"Source frequency: [%s], previous: [%s]\", f, s.Frequency))\n\n\ts.Frequency = f\n\ts.ParsedAt = time.Now()\n\n\tif err := repos.Syndication.Update(ctx, s); err != nil {\n\t\treturn urls, err\n\t}\n\n\treturn urls, nil\n}", "func addNewFeed(ctx *web.Context) string {\n\turl := ctx.Params[\"url\"]\n\tsource, err := loadFeed(url)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\tlock.Lock()\n\tfolders[\"uncategorized\"] = append(folders[\"uncategorized\"], source)\n\tlock.Unlock()\n\n\tctx.Redirect(303, \"/\")\n\treturn \"\"\n}", "func (c *CrawlerState) AddURL(url string) bool {\n\tc.Lock()\n\tif _, ok := c.urlMap[url]; ok {\n\t\t// URL already present. Return false indicating the new url was already present\n\t\tc.Unlock()\n\t\treturn false\n\t}\n\tc.urlMap[url] = struct{}{}\n\tc.seenURLCount++\n\tc.urls = append(c.urls, url)\n\tc.Unlock()\n\treturn true\n}", "func (i *Index) AddSrc(src Src) bool {\n\tfor _, s := range i.Srcs {\n\t\tif s.SrcID == src.SrcID {\n\t\t\treturn false\n\t\t}\n\t}\n\ti.Srcs = append(i.Srcs, src)\n\treturn true\n}", "func (tr *TestRunner) Add(addrs ...*net.UDPAddr) {\n\ttr.mutex.Lock()\n\tdefer tr.mutex.Unlock()\n\ttr.targets = append(tr.targets, addrs...)\n}", "func (e *Errors) Add(err error) bool {\n\tif err != nil {\n\t\te.lock.Lock()\n\t\tdefer e.lock.Unlock()\n\t\te.errors = append(e.errors, err)\n\t\treturn true\n\t}\n\treturn false\n}", "func (m *Metadata) Addrs() []string {\n\taddrs := make([]string, 0, len(m.addrs))\n\tfor addr := range m.addrs {\n\t\taddrs = append(addrs, addr)\n\t}\n\treturn addrs\n}", "func (e *ValidationError) Add(s string) {\n\te.ErrorList = append(e.ErrorList, s)\n}", "func (t *targetBuilder) addSrcs(srcs *treeset.Set) *targetBuilder {\n\tit := srcs.Iterator()\n\tfor it.Next() {\n\t\tt.srcs.Add(it.Value().(string))\n\t}\n\treturn t\n}", "func (t *AuroraTask) AddURIs(extract bool, cache bool, values ...string) *AuroraTask {\n\tfor _, value := range values {\n\t\tt.task.MesosFetcherUris = append(\n\t\t\tt.task.MesosFetcherUris,\n\t\t\t&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})\n\t}\n\treturn t\n}", "func (ps *PeerStore) Addrs() []string {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\taddrs := make([]string, 0)\n\tfor addr := range ps.peers {\n\t\taddrs = append(addrs, addr)\n\t}\n\treturn addrs\n}", "func newURLs(isRaw bool, versionID string, isAllVersions bool, sources ...string) ([]*url.URL, error) {\n\tvar urls []*url.URL\n\tfor _, src := range sources {\n\t\tsrcurl, err := url.New(src, url.WithRaw(isRaw), url.WithVersion(versionID),\n\t\t\turl.WithAllVersions(isAllVersions))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := checkVersinoningURLRemote(srcurl); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\turls = append(urls, srcurl)\n\t}\n\treturn urls, nil\n}", "func (r *Paths) Add(url string, handler interface{}) error {\n\tif handler == nil {\n\t\treturn errors.New(\"nil handler\")\n\t}\n\tparts := splitter(url) // нормализуем путь и разбиваем его на части\n\t// проверяем, что количество получившихся частей не превышает максимально\n\t// поддерживаемое количество\n\tlevel := uint16(len(parts)) // всего элементов пути\n\tif level > (1<<15 - 1) {\n\t\treturn fmt.Errorf(\"path parts overflow: %d\", len(parts))\n\t}\n\t// считаем количество параметров в определении пути\n\tvar params uint16\n\tfor i, value := range parts {\n\t\tif strings.HasPrefix(value, NamedParamFlag) {\n\t\t\tparams++ // увеличиваем счетчик параметров\n\t\t} else if strings.HasPrefix(value, CatchAllParamFlag) {\n\t\t\t// такой параметр должен быть самым последним в определении путей\n\t\t\tif uint16(i) != level-1 {\n\t\t\t\treturn errors.New(\"catch-all parameter must be last\")\n\t\t\t}\n\t\t\tparams |= 1 << 15 // взводим флаг динамического параметра\n\t\t\t// запоминаем позицию самого раннего встреченного динамического\n\t\t\t// параметра во всех добавленных путях\n\t\t\tif r.catchAll == 0 || r.catchAll > level {\n\t\t\t\tr.catchAll = level\n\t\t\t}\n\t\t}\n\t}\n\t// если в пути нет параметров, то добавляем в статические обработчики\n\tif params == 0 {\n\t\tif r.static == nil {\n\t\t\tr.static = make(map[string]interface{})\n\t\t}\n\t\tr.static[strings.Join(parts, PathDelimeter)] = handler\n\t\treturn nil\n\t}\n\t// запоминаем максимальное количество элементов пути во всех определениях\n\tif r.maxParts < level {\n\t\tr.maxParts = level\n\t}\n\t// инициализируем динамические пути, если не сделали этого раньше\n\tif r.fields == nil {\n\t\tr.fields = make(map[uint16]records)\n\t}\n\t// добавляем в массив обработчиков с таким же количеством параметров\n\tr.fields[level] = append(r.fields[level], &record{params, parts, handler})\n\tsort.Stable(r.fields[level]) // сортируем по количеству параметров\n\treturn nil\n}", "func (errors *Errors) Add(u uint64) uint64 {\n\terrCount := errors.counter.Add(u)\n\terrors.checkMaxError(errCount)\n\treturn errCount\n}", "func (l *errList) Push(pc uintptr, err error, file string, line int) {\n\titem := &Node{\n\t\tPC: pc,\n\t\tErr: err,\n\t\tFile: file,\n\t\tLine: line,\n\t}\n\n\tif l.head == nil {\n\t\tl.head = item\n\t}\n\n\tif l.tail != nil {\n\t\tl.tail.next = item\n\t}\n\n\tl.tail = item\n\tl.len++\n}", "func (sdr *CSdRegistry) AddSources(vt *vtree.Vtree) {\n\tC.sd_registry_add_sources(sdr.sdr, node(vt))\n}", "func (g *Graph) Add(addr *url.URL) {\n\ta := *addr\n\tsum := urlsum(a)\n\tg.Lock()\n\tdefer g.Unlock()\n\tif _, ok := g.nodes[sum]; !ok {\n\t\tg.nodes[sum] = &Page{\n\t\t\tweight: 0,\n\t\t\toutbound: 0,\n\t\t\taddr: a,\n\t\t}\n\t}\n}", "func (s *Scraper) addRequest(rt *requestTracker) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.requests = append(s.requests, rt)\n}", "func (r *Robots) add(rule *Rule) {\n\tif rule.Length > 0 {\n\t\tr.Rules = append(r.Rules, rule)\n\t}\n}", "func (e *ErrorList) Push(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tswitch v := err.(type) {\n\tcase *ErrorList:\n\t\te.errs = append(e.errs, v.errs...)\n\tdefault:\n\t\te.errs = append(e.errs, err)\n\t}\n}", "func (s *Service) AddURL(url string) error {\n\tindex := s.indexOfURL(url)\n\n\tif index != -1 {\n\t\treturn fmt.Errorf(\"URL '%s' is already registered\", url)\n\t}\n\n\ts.URLs = append(s.URLs, url)\n\treturn nil\n}", "func (collection *RemoteRepoCollection) Add(repo *RemoteRepo) error {\n\tfor _, r := range collection.list {\n\t\tif r.Name == repo.Name {\n\t\t\treturn fmt.Errorf(\"mirror with name %s already exists\", repo.Name)\n\t\t}\n\t}\n\n\terr := collection.Update(repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcollection.list = append(collection.list, repo)\n\treturn nil\n}", "func (be *Batch) Add(errs ...error) {\n\tfor _, err := range errs {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar batch Batch\n\t\tif errors.As(err, &batch) {\n\t\t\tbe.addBatch(batch)\n\t\t} else {\n\t\t\tbe.errors = append(be.errors, err)\n\t\t}\n\t}\n}", "func (r *URef) AddSrc(src SrcItem) bool {\n\tfor i, s := range r.Srcs {\n\t\tif s.EqualKey(src) {\n\t\t\tif s.Equal(src) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Update mutable attributes.\n\t\t\tr.Srcs[i] = src\n\t\t\treturn true\n\t\t}\n\t}\n\tr.Srcs = append(r.Srcs, src)\n\treturn true\n}", "func (s *basicService) PostURLs(urls []string) ([]crawl.Result, error) {\n\tif len(urls) == 0 {\n\t\treturn nil, ErrEmptyURLs\n\t}\n\tcrawlResult := []crawl.Result{}\n\n\turlLen := len(urls)\n\n\tjobs := make(chan string, urlLen)\n\tjobResults := make(chan jobResult, urlLen)\n\n\tworkers := urlLen\n\tif workers > s.maxWorkers {\n\t\tworkers = s.maxWorkers\n\t}\n\n\tfor w := 1; w <= workers; w++ {\n\t\tgo s.worker(jobs, jobResults)\n\t}\n\n\tfor _, url := range urls {\n\t\tjobs <- url\n\t}\n\tclose(jobs)\n\n\tfor i := 1; i <= urlLen; i++ {\n\t\tjobResult := <-jobResults\n\t\tif jobResult.err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tcrawlResult = append(crawlResult, jobResult.result)\n\t}\n\n\treturn crawlResult, nil\n}", "func ParseURLs(content string) []string { return parseURLsMax(content, -1) }", "func (list *ErrorList) Collect(args ...interface{}) {\n\tfor _, a := range args {\n\t\tif err, _ := a.(error); err != nil {\n\t\t\t*list = append(*list, err)\n\t\t}\n\t}\n}", "func (m *Mux) Add(ds *discordgo.Session, dm *discordgo.Message, ctx *Context) {\n\tif !authorized(dm) {\n\t\tds.ChannelMessageSend(dm.ChannelID, \"Only the bot owner can do that.\")\n\t\treturn\n\t}\n\n\t//http://code.9front.org/hg/plan9front/rss-log\n\tresp := \"```\\n\"\n\t// URL to feed should be last item\n\turl := ctx.Fields[len(ctx.Fields) -1]\n\tfmt.Println(\"Proposed addition for: \", url)\n\t\n\tfor _, v := range Config.Feeds {\n\t\t// this is bad matching, can't have two bitbucket url's?\n\t\tif strings.Contains(url, v.Feed.UpdateURL) {\n\t\t\t//fmt.Println(url)\n\t\t\t//fmt.Println(v.Link)\n\t\t\tresp += \"Denied! Feed already subscribed to.\"\n\t\t\tresp += \"```\\n\"\n\t\t\tds.ChannelMessageSend(dm.ChannelID, resp)\n\t\t\treturn\n\t\t}\n\t}\n\t\n\tfeed, err := rss.Fetch(url)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error in reading RSS feed, see: x/mux/commits.go\")\n\t\tfmt.Printf(\"%s\\n\\n\", err)\n\t\tresp += \"Denied! Could not parse feed.\"\n\t\tresp += \"```\\n\"\n\t\tds.ChannelMessageSend(dm.ChannelID, resp)\n\t\treturn\n\t}\n\t\n\t// Might not be thread safe\n\tvar tmpFeed Feed\n\ttmpFeed.Feed = *feed\n\t// Maybe make the size here a Config variable\n\ttmpFeed.Recent = make([]string, 3)\n\tConfig.Feeds = append(Config.Feeds, tmpFeed)\n\tresp += \"Added.\"\n\t\n\tresp += \"```\\n\"\n\tds.ChannelMessageSend(dm.ChannelID, resp)\n\n\treturn\n}", "func (c *cache) add(stories []item) {\n\tsize := c.curSize\n\tfor _, story := range stories {\n\t\tfound := false\n\t\tfor i := 0; i < size; i++ {\n\t\t\tif c.stories[i].id == story.id {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tif c.curSize < c.maxSize {\n\t\t\t\tc.mutx.Lock()\n\t\t\t\tc.stories[c.curSize] = story\n\t\t\t\tc.curSize++\n\t\t\t\tc.mutx.Unlock()\n\t\t\t}\n\t\t\tif c.curSize >= c.maxSize {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *Manager) AddFromURL(purl string) {\n\tproxy := p.parseURL(purl)\n\tp.Add(proxy)\n}", "func CrawlFromSource(w http.ResponseWriter, r *http.Request) {\n\tsubBuf := new(bytes.Buffer)\n\tappConfig := config.ReadConfig()\n\tpubSubConfig := appConfig.PubSubConfig\n\n\terrGettingSrc := subscriber.PullCrawlFromSourceMsgs(subBuf, pubSubConfig.ProjectID, pubSubConfig.Topics.UpsertLink)\n\tif errGettingSrc != nil {\n\t\tfmt.Println(\"Epic failure, you should probably look into it: \", errGettingSrc)\n\t}\n\n\tfmt.Println(\"LINKS PUBLISHED IN GOOGLE CLOUD PLATFORM'S PUB/SUB `upsert_link` TASK\")\n}", "func (ucc *UpdateClientConfig) Add(url string) error {\n\tparts := repoRegexp.FindStringSubmatch(url)\n\tif len(parts) != 6 {\n\t\treturn ErrorsUCInvalidURL\n\t}\n\n\tvar err error\n\tif !ucc.exist() {\n\t\terr = ucc.Init()\n\t} else {\n\t\terr = ucc.Load()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, repo := range ucc.Repos {\n\t\tif repo == url {\n\t\t\treturn ErrorsUCRepoExist\n\t\t}\n\t}\n\tucc.Repos = append(ucc.Repos, url)\n\n\treturn ucc.save()\n}", "func Addrs(addrs []string) ([]*net.TCPAddr, error) {\n\tnetAddrs := make([]*net.TCPAddr, 0, len(addrs))\n\tnErrs := 0\n\tfor _, a := range addrs {\n\t\tnetAddr, err := net.ResolveTCPAddr(\"tcp4\", a)\n\t\tif err != nil {\n\t\t\tnErrs++\n\t\t\tif nErrs == len(addrs) {\n\t\t\t\t// bail if none of the addrs could be parsed\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tnetAddrs = append(netAddrs, netAddr)\n\t}\n\treturn netAddrs, nil\n}", "func (p *Pipeline) Add(nodes ...Node) {\n\tfor _, node := range nodes {\n\t\tif l := len(p.nodes); (l == 0) || !p.nodes[l-1].TryMerge(node) {\n\t\t\tp.nodes = append(p.nodes, node)\n\t\t}\n\t}\n}", "func (p *Pipeline) Add(nodes ...Node) {\n\tfor _, node := range nodes {\n\t\tif l := len(p.nodes); (l == 0) || !p.nodes[l-1].TryMerge(node) {\n\t\t\tp.nodes = append(p.nodes, node)\n\t\t}\n\t}\n}", "func (e *RateLimitErrorLogging) AddEvents(ctx context.Context, points []*event.Event, next Sink) error {\n\terr := next.AddEvents(ctx, points)\n\tif err != nil {\n\t\tnow := time.Now()\n\t\tlastLogTimeNs := atomic.LoadInt64(&e.lastLogTimeNs)\n\t\tsinceLastLogNs := now.UnixNano() - lastLogTimeNs\n\t\tif sinceLastLogNs > e.LogThrottle.Nanoseconds() {\n\t\t\tnowUnixNs := now.UnixNano()\n\t\t\tif atomic.CompareAndSwapInt64(&e.lastLogTimeNs, lastLogTimeNs, nowUnixNs) {\n\t\t\t\te.Callback(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}", "func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {\n\tfor _, value := range values {\n\t\tj.jobConfig.TaskConfig.MesosFetcherUris = append(j.jobConfig.TaskConfig.MesosFetcherUris,\n\t\t\t&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})\n\t}\n\treturn j\n}", "func (s *MemStateStore) Add(state, url string) error {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"State argument not provided\")\n\t}\n\n\ts.states[state] = url\n\n\treturn nil\n}", "func (ucc *UpdateClientConfig) Add(url string) error {\n\tif url == \"\" {\n\t\treturn ErrorsUCEmptyURL\n\t}\n\n\tvar err error\n\tif !ucc.exist() {\n\t\terr = ucc.Init()\n\t} else {\n\t\terr = ucc.Load()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, repo := range ucc.Repos {\n\t\tif repo == url {\n\t\t\treturn ErrorsUCRepoExist\n\t\t}\n\t}\n\tucc.Repos = append(ucc.Repos, url)\n\n\treturn ucc.save()\n}", "func (c *concurrentStorage) add(u url.URL) (bool) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tif _, ok := c.urls[u]; ok{\n\t\treturn false\n\t}\n\tc.urls[u] = true\n\tc.urlsSize++\n\treturn true\n}", "func visit(url string) ([]string, error) {\n\tvar links []string\n\n\tif !urlRegex.MatchString(url) {\n\t\treturn links, fmt.Errorf(\"Not a valid url to visit : %s\", url)\n\t}\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn links, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tresp.Body.Close()\n\t\treturn links, fmt.Errorf(\"Received status code %s when fetching url %s\", resp.Status, url)\n\t}\n\n\tdoc, err := html.Parse(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn links, err\n\t}\n\n\tlinks = traverse(doc, nil)\n\treturn links, nil\n}", "func ParseHTMLFromSource(r io.Reader) ([]HTMLhrefEntries, error) {\n\thtmlReader := html.NewTokenizer(r)\n\tvar found = []HTMLhrefEntries{}\n\tfound, err := findHrefs(found, \"\", htmlReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn found, nil\n}", "func (h *Handler) Add(from, to string, code Code) *Handler {\n\th.Redirects[normalise(from)] = redirect{\n\t\tURL: to,\n\t\tCode: code,\n\t}\n\treturn h\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func appendSources(srcs [][]byte) YAMLOption {\n\treturn optionFunc(func(c *config) {\n\t\tfor _, src := range srcs {\n\t\t\tc.sources = append(c.sources, source{bytes: src})\n\t\t}\n\t})\n}", "func AddLastErr(source, msg string) {\n\tlastErrorVec.WithLabelValues(source, msg).Set(float64(time.Now().Unix()))\n\tlastErrorCountVec.WithLabelValues(source).Inc()\n\n\tif atomic.AddInt64(&lastErrorCount, int64(1))%MaxLastErrorCount == 0 { // clean\n\t\tResetLastErrors()\n\t}\n}", "func (d *Sources) Add(ctx context.Context, source chronograf.Source) (chronograf.Source, error) {\n\tgenID, err := d.IDs.Generate()\n\tif err != nil {\n\t\td.Logger.\n\t\t\tWithField(\"component\", \"source\").\n\t\t\tError(\"Unable to generate ID\")\n\t\treturn chronograf.Source{}, err\n\t}\n\n\tid, err := strconv.Atoi(genID)\n\tif err != nil {\n\t\td.Logger.\n\t\t\tWithField(\"component\", \"source\").\n\t\t\tError(\"Unable to convert ID\")\n\t\treturn chronograf.Source{}, err\n\t}\n\n\tsource.ID = id\n\n\tfile := sourceFile(d.Dir, source)\n\tif err = d.Create(file, source); err != nil {\n\t\tif err == chronograf.ErrSourceInvalid {\n\t\t\td.Logger.\n\t\t\t\tWithField(\"component\", \"source\").\n\t\t\t\tWithField(\"name\", file).\n\t\t\t\tError(\"Invalid Source: \", err)\n\t\t} else {\n\t\t\td.Logger.\n\t\t\t\tWithField(\"component\", \"source\").\n\t\t\t\tWithField(\"name\", file).\n\t\t\t\tError(\"Unable to write source:\", err)\n\t\t}\n\t\treturn chronograf.Source{}, err\n\t}\n\treturn source, nil\n}", "func (c *Controller) AddNewURLEntry(urlEntry *URLEntry) {\n\tselect {\n\tcase <-c.ctx.Done():\n\t\treturn\n\tcase c.subTree <- urlEntry:\n\t}\n}", "func (sr *ScraperRegistry) Add(s *types.Scraper) {\n\t// Initialize the Prometheus metric pointer\n\ts.InitializeMetrics()\n\t// Append initialized scraper to the slice of all scrapers\n\tsr.Scrapers = append(sr.Scrapers, s)\n}", "func (y *YggdrasilAdminAPI) AddPeer(uri string) ([]string, error) {\n\treq := fmt.Sprintf(`{\"keepalive\":true, \"request\":\"addpeer\", \"uri\":\"%s\"}`, uri)\n\tresp, err := y.execReq(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(string(resp.Response))\n\tadded := struct {\n\t\tAdded []string `json:\"added\"`\n\t}{}\n\tif err := json.Unmarshal(resp.Response, &added); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn added.Added, nil\n}", "func (s internalSelector) Add(reqs ...Requirement) Selector {\n\tret := make(internalSelector, 0, len(s)+len(reqs))\n\tret = append(ret, s...)\n\tret = append(ret, reqs...)\n\tsort.Sort(ByKey(ret))\n\treturn ret\n}", "func main() {\n\tsource := make(chan interface{}, 0)\n\n\t// flag.Parse()\n\tfmt.Fprintln(os.Stderr, \"starting up crawler\")\n\n\tresults := output.NewStorage()\n\n\tsteps := []pipeline.Handler{\n\t\tprocess.FetchUrl,\n\t\tprocess.ParseHTML,\n\t\tprocess.CompileNodeInfo,\n\t\tprocess.FilterLinks,\n\t}\n\n\tunique := pipeline.NewPipeline(source, 1, results.IsUnique, process.MaxDepth(5))\n\trest := pipeline.NewPipeline(unique.Output(), 4, steps...)\n\n\tpending := 0\n\tfor _, arg := range os.Args[1:] {\n\t\tURL, err := url.Parse(arg)\n\t\tif err == nil && (URL.Scheme == \"http\" || URL.Scheme == \"https\") {\n\t\t\tpending++\n\t\t\tif URL.Path == \"\" {\n\t\t\t\tURL.Path = \"/\"\n\t\t\t}\n\t\t\tsource <- URL.String()\n\t\t\tbreak\n\t\t}\n\t}\n\tif pending == 0 {\n\t\tfmt.Fprintln(os.Stderr, \"a valid http url was not provided\")\n\t\treturn\n\t}\n\n\thalt := make(chan os.Signal, 0)\n\tsignal.Notify(halt, os.Interrupt)\n\tfinish := make(chan interface{}, 0)\n\tgo func() {\n\t\t<-halt\n\t\tfmt.Fprintln(os.Stderr, \"waiting for current jobs to finish...\")\n\t\tclose(finish)\n\t}()\n\n\tfor pending > 0 {\n\t\tselect {\n\t\tcase <-unique.Err():\n\t\t\t// if we already have visited the link, we don't care about the error\n\t\tcase err := <-rest.Err():\n\t\t\t// other errors cause the program to exit, these could be closed connections etc.\n\t\t\tfmt.Fprintln(os.Stderr, \"unable to continue: \", err)\n\t\t\treturn\n\t\tcase out, open := <-rest.Output():\n\t\t\tif !open {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewLinks := out.([]string)\n\t\t\tpending += len(newLinks)\n\t\t\t// we don't want to block the pipeline so we do this in a goroutine\n\t\t\tgo func() {\n\t\t\t\tfor _, link := range newLinks {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-finish:\n\t\t\t\t\t\tpending--\n\t\t\t\t\tcase source <- link:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\tpending--\n\t}\n\tclose(source)\n\terr := results.Dump(\"dot\", os.Stdout)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Unable to write:\", err)\n\t}\n}", "func (w *Watcher) Add(sr *ldap.SearchRequest, c Checker) (Watch, error) {\n\twatch := Watch{\n\t\twatcher: w,\n\t\tsearchRequest: sr,\n\t\tchecker: c,\n\t\tdone: make(chan struct{}),\n\t}\n\tw.watches = append(w.watches, &watch)\n\treturn watch, nil\n}", "func (m *EdiscoverySearch) SetAdditionalSources(value []DataSourceable)() {\n m.additionalSources = value\n}", "func (h *History) Add(action []*Action) {\n\th.actions = append(h.actions, action)\n\th.head++\n}", "func (r *SyncParseQueue) AddSyncUrl(url string, dest string) {\n\n\tr.syncUrlsMutex.Lock()\n\tdefer r.syncUrlsMutex.Unlock()\n\tdefer func() {\n\t\tbelogs.Debug(\"AddSyncUrl():defer rpQueue.SyncingAndParsingCount:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\tif atomic.LoadInt64(&r.SyncingAndParsingCount) == 0 {\n\t\t\tr.SyncAndParseEndChan <- SyncAndParseEndChan{}\n\t\t}\n\t}()\n\tbelogs.Debug(\"AddSyncUrl():url:\", url, \" dest:\", dest)\n\tif len(url) == 0 || len(dest) == 0 {\n\t\tbelogs.Error(\"AddSyncUrl():len(url) == 0 || len(dest) == 0, before SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\tatomic.AddInt64(&r.SyncingAndParsingCount, -1)\n\t\tbelogs.Debug(\"AddSyncUrl():len(url) == 0 || len(dest) == 0, after SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\treturn\n\t}\n\n\te := r.syncUrls.Front()\n\tfor e != nil {\n\t\tif strings.Contains(url, e.Value.(SyncChan).Url) {\n\t\t\tbelogs.Debug(\"AddSyncUrl():have existed:\", url, \" in \", e.Value.(SyncChan).Url,\n\t\t\t\t\" len(r.SyncChan):\", len(r.SyncChan))\n\t\t\tbelogs.Debug(\"AddSyncUrl():have existed, before SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\t\tatomic.AddInt64(&r.SyncingAndParsingCount, -1)\n\t\t\tbelogs.Debug(\"AddSyncUrl():have existed, after SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\t\treturn\n\t\t}\n\t\te = e.Next()\n\t}\n\n\tsyncChan := SyncChan{Url: url, Dest: dest}\n\te = r.syncUrls.PushBack(syncChan)\n\tbelogs.Info(\"AddSyncUrl():will send to syncChan:\", syncChan,\n\t\t\" len(syncUrls):\", r.syncUrls.Len())\n\tr.SyncChan <- syncChan\n\tbelogs.Debug(\"AddSyncUrl():after send to syncChan:\", syncChan,\n\t\t\" syncUrls:\", r.syncUrls)\n\treturn\n}", "func CrawlEachURLFound(url string, fetcher Fetcher, ch chan []string) {\n\tbody, urls, err := fetcher.Fetch(url)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Printf(\"Found: %s %q\\n\", url, body)\n\t}\n\tch <- urls\n}", "func (m *MockDiscovery) Addrs(id peer.ID) []ma.Multiaddr {\n\treturn m.Peers[id]\n}", "func GetTotalURLs() (total int, err error) {\n\tclient := getClient()\n\tv, err := client.Get(\"TotalUrls\").Result()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttotal, err = strconv.Atoi(v)\n\n\treturn\n}", "func AddHTTP(src string) string {\n\tre := regexp.MustCompile(\"^https?://\")\n\tif re.MatchString(src) {\n\t\treturn src\n\t}\n\n\treturn fmt.Sprintf(\"http://%s\", src)\n}", "func (r *HelpResolver) Add(target string) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tfor _, addr := range r.addrs {\n\t\tif addr == target {\n\t\t\treturn errors.New(\"target is existed\")\n\t\t}\n\t}\n\n\tupdates := []*naming.Update{&naming.Update{Op: naming.Add, Addr: target}}\n\tr.watcher.updatesChan <- updates\n\treturn nil\n}", "func (mr *MockHostMockRecorder) Addrs() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Addrs\", reflect.TypeOf((*MockHost)(nil).Addrs))\n}", "func (sc *SourceCreate) AddChildren(s ...*Source) *SourceCreate {\n\tids := make([]int, len(s))\n\tfor i := range s {\n\t\tids[i] = s[i].ID\n\t}\n\treturn sc.AddChildIDs(ids...)\n}", "func (prCtx *ParseResultContext) addParsedResults(source string, newResults []*ParseResult) {\n\tfor _, consistentResult := range prCtx.consistent {\n\t\tconsistentResult.processed = false\n\t}\n\n\tfor _, pr := range newResults {\n\t\tconsistentPr, ok := prCtx.consistent[pr.Id]\n\t\tif !ok {\n\t\t\t// This either already inconsistent result or an extra\n\t\t\t// this batch has an extra item, save it as a diff with (only so far) this source\n\t\t\tprCtx.addInconsistentResult(pr.Id, pr, source)\n\t\t\tcontinue\n\t\t}\n\t\tconsistentPr.processed = true\n\n\t\tok = diffChecks(consistentPr.CheckResult, pr.CheckResult) && diffRemediations(consistentPr.Remediations, pr.Remediations)\n\t\tif !ok {\n\t\t\t// remove the check from consistent, add it to diff, but TWICE\n\t\t\t// once for the sources from the consistent list and once for the new source\n\t\t\tprCtx.addInconsistentResult(pr.Id, &consistentPr.ParseResult, consistentPr.sources...)\n\t\t\tdelete(prCtx.consistent, pr.Id)\n\t\t\tprCtx.addInconsistentResult(pr.Id, pr, source)\n\t\t\tcontinue\n\t\t}\n\n\t\t// OK, same as a previous result in consistent, just append the source\n\t\tconsistentPr.sources = append(consistentPr.sources, source)\n\t}\n\n\t// Make sure all previously consistent items were touched, IOW we didn't receive\n\t// fewer items by moving all previously untouched items to the inconsistent list\n\tfor _, consistentResult := range prCtx.consistent {\n\t\tif consistentResult.processed == true {\n\t\t\tcontinue\n\t\t}\n\t\t// Deleting an item from a map while iterating over it is safe, see https://golang.org/doc/effective_go.html#for\n\t\tprCtx.addInconsistentResult(consistentResult.Id, &consistentResult.ParseResult, consistentResult.sources...)\n\t\tdelete(prCtx.consistent, consistentResult.Id)\n\t}\n}", "func (c *Closer) Add(closers ...io.Closer) {\n\tc.closers = append(c.closers, closers...)\n}", "func (h CRConfigHistoryThreadsafe) Add(i *CRConfigStat) {\n\th.m.Lock()\n\tdefer h.m.Unlock()\n\n\tif *h.length != 0 {\n\t\tlast := (*h.hist)[(*h.pos-1)%*h.limit]\n\t\tdatesEqual := (i.Stats.DateUnixSeconds == nil && last.Stats.DateUnixSeconds == nil) || (i.Stats.DateUnixSeconds != nil && last.Stats.DateUnixSeconds != nil && *i.Stats.DateUnixSeconds == *last.Stats.DateUnixSeconds)\n\t\tcdnsEqual := (i.Stats.CDNName == nil && last.Stats.CDNName == nil) || (i.Stats.CDNName != nil && last.Stats.CDNName != nil && *i.Stats.CDNName == *last.Stats.CDNName)\n\t\treqAddrsEqual := i.ReqAddr == last.ReqAddr\n\t\tif reqAddrsEqual && datesEqual && cdnsEqual {\n\t\t\treturn\n\t\t}\n\t}\n\n\t(*h.hist)[*h.pos] = *i\n\t*h.pos = (*h.pos + 1) % *h.limit\n\tif *h.length < *h.limit {\n\t\t*h.length++\n\t}\n}", "func (ec *ErrorCollection) addError(err error) {\n\t\n\tif err == nil {\n\t\treturn\n\t}\n\t\n\tif ec.DuplicatationOptions != AllowDuplicates {\n\t\t//Don't append if err is a duplicate\n\t\tfor i, containedErr := range ec.Errors {\n\n\t\t\tvar je1 *JE\n\t\t\tvar je2 *JE\n\n\t\t\ts, ok := err.(JE)\n\t\t\tif ok {\n\t\t\t\tje1 = &s\n\t\t\t} else {\n\t\t\t\ts, ok := err.(*JE)\n\t\t\t\tif ok {\n\t\t\t\t\tje1 = s\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_, ok = containedErr.(JE)\n\t\t\tif ok {\n\t\t\t\tt := (ec.Errors[i]).(JE)\n\t\t\t\tje2 = &t\n\t\t\t} else {\n\t\t\t\t_, ok := containedErr.(*JE)\n\t\t\t\tif ok {\n\t\t\t\t\tje2 = (ec.Errors[i]).(*JE)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif je1 != nil && je2 != nil {\n\t\t\t\t//Don't use Reflection since both are JE structs\n\t\t\t\tif (*je1).Code == (*je2).Code && (*je1).Domain == (*je2).Domain && (*je1).error == (*je2).error && (*je1).message == (*je2).message {\n\t\t\t\t\tif ec.DuplicatationOptions == RejectDuplicates {\n\t\t\t\t\t\tif (*je1).time.Equal((*je2).time) {\n\t\t\t\t\t\t\t//Both JE structs are 100% identical including timestamp\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t//We don't care about timestamps\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t//Use Reflection\n\t\t\t\tif reflect.DeepEqual(containedErr, err) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tec.Errors = append(ec.Errors, err)\n}", "func (fs *FileSystemWatch) Add(ms ...string) error {\n\tif fs.notifier == nil {\n\t\tfs.files = append(fs.files, ms...)\n\t\treturn nil\n\t}\n\n\tfs.files = append(fs.files, ms...)\n\n\tfor _, file := range ms {\n\t\tif err := fs.notifier.Add(file); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (pm *PoolManager) Add(ScrapeHost, ScrapePath, cacheScriptPath string) Pool {\n\tp := Pool{\n\t\tScrapeHost: ScrapeHost,\n\t\tScrapePath: ScrapePath,\n\t\tCacheScriptPath: cacheScriptPath,\n\t}\n\tpm.Pools = append(pm.Pools, p)\n\treturn p\n}" ]
[ "0.5309802", "0.52734095", "0.52264124", "0.5196098", "0.51324165", "0.5123398", "0.5056944", "0.496427", "0.49161023", "0.48829758", "0.48795125", "0.4875261", "0.48127764", "0.48127764", "0.48080775", "0.47849753", "0.4765869", "0.47221217", "0.47214398", "0.47201884", "0.47096452", "0.46895826", "0.4675403", "0.46564314", "0.4654779", "0.4653746", "0.4650115", "0.4638114", "0.4633953", "0.46239084", "0.46201015", "0.4606775", "0.46022898", "0.45853668", "0.45792556", "0.45748436", "0.45697913", "0.45678234", "0.4567757", "0.45554778", "0.4555303", "0.4541003", "0.45368978", "0.45353812", "0.45289403", "0.45092964", "0.448991", "0.4480876", "0.44708228", "0.44641632", "0.4453543", "0.44437236", "0.44415545", "0.44315085", "0.44297802", "0.4421511", "0.44206128", "0.44184056", "0.44171345", "0.44154152", "0.44140923", "0.44140923", "0.4410008", "0.44088596", "0.44068736", "0.44067925", "0.4398198", "0.4389686", "0.43884915", "0.43754396", "0.43638223", "0.43638223", "0.43638223", "0.43638223", "0.43638223", "0.43544486", "0.43536243", "0.43451083", "0.43285382", "0.4322499", "0.43134764", "0.43131253", "0.4308513", "0.4306928", "0.42859128", "0.42849472", "0.4284435", "0.42826417", "0.4271841", "0.4271731", "0.42658633", "0.42613795", "0.42565775", "0.42537677", "0.42500865", "0.42478463", "0.4225554", "0.4220542", "0.42124784", "0.42116183" ]
0.7516235
0
Run launches the worker pool and blocks until they all finish.
func (c *Crawler) Run(numWorkers int) { // Here we create a new cancellable context to control goroutines and requests. // We can't just rely on the parent context because we want to shutdown goroutines if there's no activity for some time. // Closing the channel would introduce "send on closed chan" errors since channel consumers also produce new messages. ctx, cancel := context.WithCancel(c.ctx) c.wg.Add(numWorkers) for i := 0; i < numWorkers; i++ { go c.worker(ctx) } // Check every second if we're still actively crawling pages limit := 1.5 * c.client.Timeout.Seconds() for { if len(c.visitChan) == 0 && time.Since(c.lastActivity()).Seconds() > limit { break } select { case <-c.ctx.Done(): goto endfor case <-time.After(time.Second): } } endfor: cancel() // cancel goroutines and in-flight requests (if any) c.wg.Wait() // wait for shutdown close(c.visitChan) // close after we're done (not before) to prevent send on closed channel errors }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Pool) Run() {\n\t// starting n number of workers\n\tfor i := 0; i < p.maxWorkers; i++ {\n\t\tworker := NewWorker(i+1, p.workerPool, true)\n\t\tworker.Start()\n\t\tp.workers = append(p.workers, worker)\n\t}\n\n\tgo p.dispatch()\n\n\tp.status = Started\n}", "func (p *Pool) Run(w Worker) {\n p.job <- w\n}", "func (p *Pool) Run() {\n\tstart := time.Now()\n\tfor i := 0; i < p.Concurrency; i++ {\n\t\tworker := NewWorker(p.Collector, i)\n\t\tworker.Start(&p.WaitGroup)\n\t}\n\n\tfor _, task := range p.Tasks {\n\t\tp.Collector <-task\n\t}\n\tclose(p.Collector)\n\n\tp.WaitGroup.Wait()\n\telapsed := time.Since(start)\n\tlog.Printf(\"took %s\\n\", elapsed)\n}", "func (w *Worker) run(){\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase f := <- w.task:\n\t\t\t\tif f == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tf()\n\t\t\t\tw.pool.putWorker(w)\n\t\t\tcase args := <- w.args:\n\t\t\t\tif args == nil {\n\t\t\t\t\tw.pool.decRunning()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.pool.poolFunc(args)\n\t\t\t\tw.pool.putWorker(w)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (p *Pool) Run() ([]string, map[string]error, error) {\n\tvar (\n\t\tok = []string{}\n\t\tfailures = map[string]error{}\n\t)\n\n\tfor w := 0; w < p.size; w++ {\n\t\tw := w\n\t\tgo p.newWorker(w)\n\t}\n\tclose(p.jobs)\n\tp.wg.Wait()\n\n\tclose(p.results)\n\tfor r := range p.results {\n\t\tif r.Err == nil {\n\t\t\tok = append(ok, r.Job.ID)\n\t\t} else {\n\t\t\tfailures[r.Job.ID] = r.Err\n\t\t}\n\t}\n\n\tif logrus.GetLevel() == logrus.DebugLevel {\n\t\tfor i, f := range failures {\n\t\t\tlogrus.Debugf(\"Pool[%s, %s: %s]\", p.id, i, f.Error())\n\t\t}\n\t}\n\n\treturn ok, failures, nil\n}", "func (m *Manager) run() {\n\tfor i := 0; i < m.workerPool.MaxWorker; i++ {\n\t\twID := i + 1\n\t\t//log.Printf(\"[workerPool] worker %d spawned\", wID)\n\t\tgo func(workerID int) {\n\t\t\tfor task := range m.workerPool.queuedTaskC {\n\t\t\t\tlog.Printf(\"[workerPool] worker %d is processing task\", wID)\n\t\t\t\ttask()\n\t\t\t\tlog.Printf(\"[workerPool] worker %d has finished processing task\", wID)\n\t\t\t}\n\t\t}(wID)\n\t}\n}", "func (d *Dispatcher) Run(){\n\t// Starting n number of workers\n\tfor i := 0; i < d.MaxWorkers; i++ {\n\t\tworker := NewWorker(d.WorkerPool)\n\t\tworker.ID = \"worker-\" + strconv.Itoa(i + 1)\n\t\td.Workers = append(d.Workers, worker)\n\t\tworker.Start()\n\t}\n\n\t// Start select job from JobQueue push to WorkerPool\n\tgo d.dispatch()\n}", "func (p *dynamicWorkerPool) Run(ctx context.Context, params StageParams) {\nstop:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak stop\n\t\tcase payloadIn, ok := <-params.Input():\n\t\t\tif !ok {\n\t\t\t\tbreak stop\n\t\t\t}\n\t\t\tvar token struct{}\n\t\t\tselect {\n\t\t\tcase token = <-p.tokenPool:\n\t\t\tcase <-ctx.Done():\n\t\t\t}\n\t\t\tgo func(payloadIn Payload, token struct{}) {\n\t\t\t\tdefer func() { p.tokenPool <- token }()\n\t\t\t\tpayloadOut, err := p.proc.Process(ctx, payloadIn)\n\t\t\t\tif err != nil {\n\t\t\t\t\twrappedErr := xerrors.Errorf(\"pipeline stage: %d : %w \", params.StageIndex(), err)\n\t\t\t\t\tmaybeEmitError(wrappedErr, params.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif payloadOut == nil {\n\t\t\t\t\tpayloadIn.MarkAsProcessed()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase params.Output() <- payloadOut:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t}\n\t\t\t}(payloadIn, token)\n\t\t}\n\t}\n\tfor i := 0; i < cap(p.tokenPool); i++ {\n\t\t<-p.tokenPool\n\t}\n}", "func (m *Manager) Run(ctx context.Context) error {\n\t// start log broadcaster\n\t_ = utils.Pool.Submit(func() { m.logBroadcaster.run(ctx) })\n\n\t// initWorkloadStatus container\n\tif err := m.initWorkloadStatus(ctx); err != nil {\n\t\treturn err\n\t}\n\n\t// start status watcher\n\t_ = utils.Pool.Submit(func() { m.monitor(ctx) })\n\n\t// start health check\n\t_ = utils.Pool.Submit(func() { m.healthCheck(ctx) })\n\n\t// wait for signal\n\t<-ctx.Done()\n\tlog.WithFunc(\"Run\").Info(ctx, \"exiting\")\n\treturn nil\n}", "func (wq *queuedWorkerPool[T, U]) Run(input T, onComplete chan<- U) error {\n\tif wq.isShutdown.Load() {\n\t\treturn fmt.Errorf(\"WorkerPoolShutdown\")\n\t}\n\n\twq.queue.Enqueue(entry[T, U]{\n\t\tpayload: input,\n\t\tonComplete: onComplete,\n\t\tclose: false,\n\t})\n\n\treturn nil\n}", "func RunPool(number int, paths <-chan string) <-chan string {\n\tresults := make(chan string, 10)\n\n\tgo func() {\n\t\twg := new(sync.WaitGroup)\n\n\t\tfor i := 0; i < number; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo runWorker(paths, results, wg)\n\t\t}\n\n\t\twg.Wait()\n\n\t\tclose(results)\n\t}()\n\n\treturn results\n}", "func (pm *PipelineManager) Run(threadiness int, stopCh <-chan struct{}) error {\n\t// Start the informer factories to begin populating the informer caches\n\tlog.Info(\"[PipelineManager.Run] Starting...\")\n\n\t// Wait for the caches to be synced before starting workers\n\tlog.Info(\"[PipelineManager.Run] Waiting for informer caches to sync\")\n\n\tif ok := cache.WaitForCacheSync(stopCh, pm.deploymentSynced, pm.podSynced); !ok {\n\t\treturn fmt.Errorf(\"[PipelineManager.Run] failed to wait for caches to sync\")\n\t}\n\n\tlog.Info(\"[PipelineManager.Run] Starting workers\")\n\tgo func() {\n\t\t<-stopCh\n\t\tlog.Info(\"[PipelineManager] shutdown work queue\")\n\t\tpm.workqueue.ShutDown()\n\t}()\n\n\t// Launch two workers to process Pipeline resources\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(pm.runWorker, time.Second, stopCh)\n\t}\n\n\tlog.Info(\"[PipelineManager.Run] Started workers\")\n\treturn nil\n}", "func (p *Pool) Run(t *tomb.Tomb) error {\n\treturn p.inner.run(t)\n}", "func (wp *WorkerPool) Start(){\n\tDebug(fmt.Sprintf(\"[%s] Starting pool with %d workers & que = %d\", wp.Name, len(wp.workerList), wp.buffSize))\n\n\t// start the worker channels\n\tfor i := 0; i < len(wp.workerList); i++ {\n\t\tgo wp.startWorker(wp.workerChan)\n\t}\n\n\t// send work to the channels\n\tgo func() {\n\t\tfor _, w := range wp.workerList {\n\t\t\twp.workerChan <- w\n\t\t}\n\t}()\n\n\tfor _, w := range wp.workerList {\n\t\t<- w.doneChan\n\t}\n\n\tDebug(fmt.Sprintf(\"[%s] workers working: %d\", wp.Name, len(wp.workerChan)))\n\tDebug(fmt.Sprintf(\"[%s] workers requested: %d\", wp.Name, len(wp.workerList)))\n}", "func (w *ThreadPoolWorker) run() {\n\n\tdefer func() {\n\t\t// Remove worker from workerMap\n\n\t\tw.pool.workerMapLock.Lock()\n\t\tdelete(w.pool.workerMap, w.id)\n\t\tw.pool.workerMapLock.Unlock()\n\t}()\n\n\tfor true {\n\n\t\t// Try to get the next task\n\n\t\ttask := w.pool.getTask()\n\n\t\t// Exit if there is not new task\n\n\t\tif task == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t_, isIdleTask := task.(*idleTask)\n\n\t\tif isIdleTask {\n\n\t\t\t// Register this worker as idle\n\n\t\t\tw.pool.workerMapLock.Lock()\n\t\t\tw.pool.workerIdleMap[w.id] = w\n\t\t\tw.pool.workerMapLock.Unlock()\n\t\t}\n\n\t\t// Run the task\n\n\t\tif err := task.Run(w.id); err != nil {\n\t\t\ttask.HandleError(err)\n\t\t}\n\n\t\tif isIdleTask {\n\t\t\tw.pool.workerMapLock.Lock()\n\t\t\tdelete(w.pool.workerIdleMap, w.id)\n\t\t\tw.pool.workerMapLock.Unlock()\n\t\t}\n\t}\n}", "func (d *Dispatcher) Run() {\n\tfor i := 0; i < d.maxWorkers; i++ {\n\t\tworker := NewWorker(d.workerPool)\n\t\tworker.Start()\n\t}\n\n\tgo d.dispatch()\n}", "func runPool(wg sync.WaitGroup, name string) {\n\tvar err error\n\n\t//set up queue of workers\n\tcluster, err := helpers.ReadConfig(name) //TODO - ugh, naming\n\tif err != nil {\n\t\t//log the bad, do the bad, this is bad\n\t}\n\n\tStandbyQueue = cluster.Nodes\n\n\t//this is how we do graceful shutdown, waiting to close channels until all workers goroutines have stopped\n\ttotalWorkers := len(StandbyQueue)\n\tstoppedWorkers := 0\n\n\tmessages := make(chan MessageData, len(StandbyQueue))\n\tstopped := make(chan struct{}, len(StandbyQueue))\n\tstopWorkers := make(chan struct{}, len(StandbyQueue))\n\n\tgo func() {\n\t\tdefer close(stopWorkers)\n\n\t\tdefer func() {\n\t\t\twg.Done()\n\t\t}()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg := <-messages:\n\t\t\t\tif msg.RawText == \"eagle has flopped\" {\n\t\t\t\t\t//do the next worker\n\t\t\t\t\tif len(StandbyQueue) > 0 {\n\t\t\t\t\t\tnext := CreateWorker(StandbyQueue[0], messages, stopped, stopWorkers)\n\t\t\t\t\t\tStandbyQueue = StandbyQueue[1:]\n\n\t\t\t\t\t\tnext.WorkerUp()\n\t\t\t\t\t} else {\n\t\t\t\t\t\t//error, something is wrong we have spun up too many\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"%s: %s\\n\", msg.CommandName, msg.RawText)\n\t\t\t\t}\n\t\t\tcase <-stopped:\n\t\t\t\tfmt.Println(\"stopped message received\")\n\t\t\t\tstoppedWorkers++\n\t\t\t\tif stoppedWorkers == totalWorkers {\n\t\t\t\t\tfmt.Println(\"all workers have closed, ready for restart\")\n\t\t\t\t\t//close everything down\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}()\n\n\t//run the first worker, wait for a response to run the rest\n\tfmt.Printf(\"starting worker: %s\\n\", StandbyQueue[0].Name)\n\tnworker := CreateWorker(StandbyQueue[0], messages, stopped, stopWorkers)\n\tStandbyQueue = StandbyQueue[1:]\n\tnworker.WorkerUp()\n\n\twg.Wait()\n\n\t// return \"restart\"\n\n}", "func (d *Data) Run() {\n\tgo func() {\n\t\tfor channelData := range d.channels {\n\t\t\tif err := d.pools.Invoke(channelData); err != nil {\n\t\t\t\tlog.Error(fmt.Sprintf(\"update data error(%s)\", err.Error()))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfor {\n\t\t\tlog.Error(fmt.Sprintf(\"update run count(%d)\", d.pools.Running()))\n\t\t\tif d.pools.Running() <= 0 {\n\t\t\t\td.pools.Release()\n\t\t\t\td.done <- true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t}\n\t}()\n}", "func (w *ImageWorker) Run(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase img := <-w.in:\n\t\t\tif err := w.limit.Wait(ctx); err != nil {\n\t\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\t\tlog.Err(err).Send()\n\t\t\t\t}\n\t\t\t}\n\t\t\terr := w.imageDownload(img)\n\t\t\tif err != nil {\n\t\t\t\tlog.Err(err).Send()\n\t\t\t}\n\t\t\tw.done <- struct{}{}\n\t\t}\n\t}\n}", "func (g *Group) Run() {\n\tg.mu.Lock()\n\tg.runned = true\n\tg.ctx, g.stop = context.WithCancel(context.Background())\n\tfor _, w := range g.workers {\n\t\tg.wg.AddWithContext(g.ctx, w.Run)\n\t}\n\tg.mu.Unlock()\n}", "func (p *WorkerPool[T, R]) Start() {\n\tif p.taskChan == nil {\n\t\tp.taskChan = make(chan T)\n\t}\n\n\tif p.resChan == nil {\n\t\tvar zero R\n\t\tvar r interface{} = zero\n\t\tif _, ok := r.(None); !ok {\n\t\t\tp.resChan = make(chan R)\n\t\t}\n\t}\n\n\tfor i := 0; i < int(p.numWorkers); i++ {\n\t\tp.runAWorker()\n\t}\n}", "func (p *fixedWorkerPool) Run(ctx context.Context, params StageParams) {\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < len(p.fifos); i++ {\n\t\twg.Add(1)\n\t\tgo func(fifoIndex int) {\n\t\t\tp.fifos[fifoIndex].Run(ctx, params)\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\twg.Wait()\n}", "func (w *CrawlerWorker) Run() {\n\tfor _, j := range w.jobs {\n\t\tgo w.crawler.ProcessJob(*j)\n\t}\n\tw.ClearJobs()\n}", "func (d *Dispatcher) Run() {\n\t// starting n number of workers\n\tfor i := 0; i < d.maxWorkers; i++ {\n\t\t//RunningWorkers.WithLabelValues(\"Emails\").Inc()\n\t\tworker := NewWorker(d.WorkerPool)\n\t\tworker.Start()\n\t\td.Workers = append(d.Workers, worker)\n\t}\n\n\tgo d.dispatch()\n}", "func (q *ChannelQueue) Run(atShutdown, atTerminate func(func())) {\n\tpprof.SetGoroutineLabels(q.baseCtx)\n\tatShutdown(q.Shutdown)\n\tatTerminate(q.Terminate)\n\tlog.Debug(\"ChannelQueue: %s Starting\", q.name)\n\t_ = q.AddWorkers(q.workers, 0)\n}", "func Run(ctx context.Context) error {\n\targsLen := len(mngr.args)\n\tmngr.wkrs = make([]*worker, argsLen)\n\tmngr.pbs = make([]*progressbar, argsLen)\n\tmngr.errChs = make([]chan error, argsLen)\n\twg := new(sync.WaitGroup)\n\tfor i := 0; i < argsLen; i++ {\n\t\twg.Add(1)\n\t\targ := mngr.args[i]\n\t\terrCh := make(chan error, 1)\n\t\tmngr.wkrs[i] = newWorker(arg.name, arg.src, arg.dst, errCh)\n\t\tmngr.pbs[i] = newProgressbar(arg.name, arg.src.Size(), arg.dst, errCh)\n\t\tmngr.errChs[i] = errCh\n\t}\n\n\tlimit := make(chan struct{}, mngr.concurrent)\n\tgo func() {\n\t\tfor i := 0; i < argsLen; i++ {\n\t\t\tlimit <- struct{}{}\n\t\t\tgo func(i int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tmngr.wkrs[i].run(ctx)\n\t\t\t\tclose(mngr.errChs[i])\n\t\t\t\t<-limit\n\t\t\t}(i)\n\t\t}\n\t}()\n\n\tif mngr.disableProgressbar {\n\t\twg.Wait()\n\t} else {\n\t\tmngr.printProgressbars(wg)\n\t}\n\n\treturn mngr.composeErrors()\n}", "func (e *Executor) Run() { e.loop() }", "func (b *Task) Run(manager WorkManager) {\n\tb.stopCh = make(chan struct{}, 1)\n\tb.Start = time.Now()\n\n\tb.runWorkers(manager)\n\tb.Finish(manager)\n}", "func Run(run func()) {\n\t// Note: Initializing global `callQueue`. This is potentially unsafe, as `callQueue` might\n\t// have been already initialized.\n\t// TODO(yarcat): Decide whether we should panic at this point or do something else.\n\tcallQueue = make(chan func())\n\n\ttasks := make(chan func())\n\tdone := make(chan struct{})\n\tgo transferTasks(tasks, done)\n\n\tgo func() {\n\t\trun()\n\t\tclose(done) // `close` broadcasts it to all receivers.\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase f := <-tasks:\n\t\t\tf()\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (a App) Run() error {\n\ta.log.Printf(\"config %+v\", a.params)\n\twg := &sync.WaitGroup{}\n\tqueue := make(chan string)\n\tresults := make(chan result)\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer close(queue)\n\t\ta.log.Printf(\"queue sender started\")\n\t\tfor _, url := range a.params.URLs {\n\t\t\ta.log.Printf(\"send to queue: %s\", url)\n\t\t\tqueue <- url\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tfor i := 0; i < a.params.Parallel; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func(queue <-chan string, results chan<- result, wg *sync.WaitGroup) {\n\t\t\ta.log.Printf(\"worker %d started\", i)\n\t\t\tfor job := range queue {\n\t\t\t\tif requestedURL, body, err := download(a.client, job); err != nil {\n\t\t\t\t\ta.log.Printf(\"downloaded with error: %s\", err)\n\t\t\t\t\tresults <- result{\n\t\t\t\t\t\terr: err,\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ta.log.Printf(\"%s downloaded successfully\", requestedURL)\n\t\t\t\t\tresults <- result{\n\t\t\t\t\t\tbody: fmt.Sprintf(\"%x\", md5.Sum(body)),\n\t\t\t\t\t\turl: requestedURL,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t\ta.log.Printf(\"worker done: %d\", i)\n\t\t}(queue, results, wg)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\ta.log.Printf(\"close results\")\n\t\tclose(results)\n\t}()\n\n\tfor r := range results {\n\t\tif r.err != nil {\n\t\t\ta.log.Printf(\"error: %s\", r.err)\n\t\t} else {\n\t\t\tif _, err := fmt.Fprintf(a.w, \"%s %s\\n\", r.url, r.body); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error writing results: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (pm *PipelineManager) runWorker() {\n\tfor pm.processNextWorkItem() {\n\t}\n}", "func (d *Dispatcher) Run() {\n\t// starting n number of workers\n\tfor i := 0; i < d.MaxWorkers; i++ {\n\t\tworker := NewWorker(d.WorkerPool, d.WaitGroup)\n\t\tworker.Start()\n\t}\n\n\t// start the dispatcher routine\n\tgo d.dispatch()\n}", "func (t *TestRun) Run() {\n\tstartedAt := time.Now()\n\n\tlog.Printf(\"Spawning %d clients\", t.ConcurrencyLevel)\n\tt.waiting.Add(t.ConcurrencyLevel)\n\tfor i := 0; i < t.ConcurrencyLevel; i++ {\n\t\tgo t.startWorker(i)\n\t}\n\tt.waiting.Wait()\n\n\tduration := time.Since(startedAt)\n\tlog.Printf(\"TESTRUN - FINISHED (took %dms %v)\", durationInMillis(duration), duration)\n}", "func (q *Queue) Run() {\n\tfor i := uint8(0); i < q.concurrency; i++ {\n\t\tgo q.work()\n\t}\n\tfor {\n\t\t// dequeue the job\n\t\t// jobJSONSlice will always be 2 length\n\t\tjobJSONSlice, err := client.BLPop(0, q.queueName).Result()\n\t\tif err != nil {\n\t\t\tq.errorHandler(q, \"\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tq.jobChannel <- jobJSONSlice[1]\n\t}\n}", "func (p *GaugeCollectionProcess) Run() {\n\tdefer close(p.stopped)\n\n\t// Wait a random amount of time\n\tstopReceived := p.delayStart()\n\tif stopReceived {\n\t\treturn\n\t}\n\n\t// Create a ticker to start each cycle\n\tp.resetTicker()\n\n\t// Loop until we get a signal to stop\n\tfor {\n\t\tselect {\n\t\tcase <-p.ticker.C:\n\t\t\tp.collectAndFilterGauges()\n\t\tcase <-p.stop:\n\t\t\t// Can't use defer because this might\n\t\t\t// not be the original ticker.\n\t\t\tp.ticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (a *asyncProvidersFinder) Run(ctx context.Context, numWorkers int) {\n\ta.ctx = ctx\n\tfor i := 0; i < numWorkers; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase req := <-a.workQueue:\n\t\t\t\t\ta.handleRequest(ctx, req)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\t// periodic metric publishing\n\tgo func() {\n\t\tfor {\n\t\t\tdefer a.metricsTicker.Stop()\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-a.metricsTicker.C:\n\t\t\t\ta.pendingMut.RLock()\n\t\t\t\tpending := len(a.pending)\n\t\t\t\ta.pendingMut.RUnlock()\n\n\t\t\t\tstats.Record(ctx, metrics.PrefetchesPending.M(int64(pending)))\n\t\t\t\tstats.Record(ctx, metrics.PrefetchNegativeCacheSize.M(int64(a.negativeCache.Len())))\n\t\t\t\tstats.Record(ctx, metrics.PrefetchNegativeCacheTTLSeconds.M(int64(a.negativeCacheTTL.Seconds())))\n\t\t\t\tstats.Record(ctx, metrics.PrefetchesPendingLimit.M(int64(a.workQueueSize)))\n\n\t\t\t\ta.onMetricsPublished()\n\t\t\t}\n\t\t}\n\t}()\n}", "func (d *Dispatcher) Run() {\n\t// starting n number of workers\n\tfor i := 0; i < d.Config.MaxWorkers; i++ {\n\t\tworker := NewWorker(fmt.Sprintf(\"NO_%d\", i), d.WorkerPool, d.handler, uint64(d.Config.WorkerRate), true)\n\t\tworker.Start()\n\t}\n\n\tgo d.dispatch()\n}", "func (worker *Worker) Run() error {\n\tworker.log.WithField(\"queueNames\", worker.getQueueNames()).Info(\"Run\")\n\tdefer func() {\n\t\tworker.log.Info(\"Exiting\")\n\t\tif worker.onStop != nil {\n\t\t\tworker.onStop()\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-worker.StopChan:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tif attemptedJob, err := worker.PerformNextJob(); err != nil {\n\t\t\t\treturn errorx.Decorate(err, \"exiting job runner\")\n\t\t\t} else if !attemptedJob {\n\t\t\t\t// we didn't find a job. Take a nap.\n\t\t\t\ttime.Sleep(worker.jobPollingInterval)\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *Pool) Start() {\n\tfor i := 0; i < p.nWorkers; i++ {\n\t\tw := newWorker(p.WorkerPool, p.jobfunc, &p.WaitGroup)\n\t\tw.Start()\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase job := <-p.jobs:\n\t\t\t\tp.WaitGroup.Add(1)\n\t\t\t\tgo func(job interface{}) {\n\t\t\t\t\tjobChan := <-p.WorkerPool\n\t\t\t\t\tjobChan <- job\n\t\t\t\t}(job)\n\t\t\tcase <-p.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (g *Worker) Run() {\n\tgo func() {\n\t\tfor {\n\t\t\t// Be part of the Worker Queue\n\t\t\tg.WorkerQueue <- g.JobChan\n\n\t\t\tselect {\n\n\t\t\tcase job := <-g.JobChan:\n\t\t\t\t// Received a Job Request, process it\n\t\t\t\tlogrus.Infof(\"Worker: %d, Received job request %d\", g.WorkerID, job.JobID)\n\n\t\t\t\tg.Callback(job.context, job.operation, job.key, job.value)\n\n\t\t\t\td := 10 * time.Millisecond\n\t\t\t\ttime.Sleep(d)\n\t\t\t\tlogrus.Debugf(\"Worker: %d, Slept for %s\", g.WorkerID, d)\n\n\t\t\tcase <-g.ExitChan:\n\t\t\t\tlogrus.Infof(\"Worker: %d exiting\", g.WorkerID)\n\t\t\t\treturn\n\n\t\t\t}\n\t\t}\n\t}()\n}", "func (worker *Worker) Run(ctx context.Context) error {\n\tgroup, ctx := errgroup.WithContext(ctx)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn group.Wait()\n\t\tcase task := <-worker.taskCh:\n\t\t\tgroup.Go(func() error {\n\t\t\t\tdefer worker.RemoveTask(task)\n\n\t\t\t\treturn task.Run(ctx)\n\t\t\t})\n\t\t}\n\t}\n}", "func (f *Fetcher) Run(ctx context.Context, fn func(EntryBatch)) error {\n\tklog.V(1).Infof(\"%s: Starting up Fetcher...\", f.uri)\n\tif _, err := f.Prepare(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tcctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tf.mu.Lock()\n\tf.cancel = cancel\n\tf.mu.Unlock()\n\n\t// Use a separately-cancelable context for the range generator, so we can\n\t// close it down (in Stop) but still let the fetchers below run to\n\t// completion.\n\tranges := f.genRanges(cctx)\n\n\t// Run fetcher workers.\n\tvar wg sync.WaitGroup\n\tfor w, cnt := 0, f.opts.ParallelFetch; w < cnt; w++ {\n\t\twg.Add(1)\n\t\tgo func(idx int) {\n\t\t\tdefer wg.Done()\n\t\t\tklog.V(1).Infof(\"%s: Fetcher worker %d starting...\", f.uri, idx)\n\t\t\tf.runWorker(ctx, ranges, fn)\n\t\t\tklog.V(1).Infof(\"%s: Fetcher worker %d finished\", f.uri, idx)\n\t\t}(w)\n\t}\n\twg.Wait()\n\n\tklog.V(1).Infof(\"%s: Fetcher terminated\", f.uri)\n\treturn nil\n}", "func (e *Signer) Run(ctx context.Context) {\n\t// Shut down queues\n\tdefer utilruntime.HandleCrash()\n\tdefer e.syncQueue.ShutDown()\n\n\tif !cache.WaitForNamedCacheSync(\"bootstrap_signer\", ctx.Done(), e.configMapSynced, e.secretSynced) {\n\t\treturn\n\t}\n\n\tlogger := klog.FromContext(ctx)\n\tlogger.V(5).Info(\"Starting workers\")\n\tgo wait.UntilWithContext(ctx, e.serviceConfigMapQueue, 0)\n\t<-ctx.Done()\n\tlogger.V(1).Info(\"Shutting down\")\n}", "func (p *Pool) Start() {\n\tgo func() {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\t\t\n\t\t// create workers\n\t\tfor i := uint32(0); i < p.workerCount; i++ {\n\t\t\tw := NewWorker(p)\n\t\t\tw.Run(ctx)\n\t\t}\n\t\t\n\t\tvar (\n\t\t\tactiveWorker chan Task\n\t\t\tactiveTask Task\n\t\t)\n\t\t\n\t\tfor {\n\t\t\tif !p.workerQ.IsEmpty() && !p.taskQ.IsEmpty() {\n\t\t\t\tw := p.workerQ.GetTop().(*Worker)\n\t\t\t\tactiveWorker = w.GetChan()\n\t\t\t\tactiveTask = p.taskQ.GetTop().(Task)\n\t\t\t} else {\n\t\t\t\t// activeTask = nil\n\t\t\t\tactiveWorker = nil\n\t\t\t}\n\t\t\t\n\t\t\tselect {\n\t\t\tcase task := <-p.taskChan:\n\t\t\t\tp.taskQ.Push(task)\n\t\t\tcase worker := <-p.workerChan:\n\t\t\t\tp.workerQ.Push(worker)\n\t\t\tcase activeWorker <- activeTask:\n\t\t\t\tp.workerQ.Pop()\n\t\t\t\tp.taskQ.Pop()\n\t\t\tcase <-p.exitChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (b *Boomer) Run() {\n\tb.results = make(chan *result, b.N)\n\n\ttotalRequests = 0\n\trequestCountChan = make(chan int)\n\tquit = make(chan bool)\n\n\tstart := time.Now()\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\n\tgo func() {\n\t\t<-c\n\t\tfmt.Print(\"quit\\n\")\n\t\t// TODO(jbd): Progress bar should not be finalized.\n\t\tnewReport(b.N, b.results, b.Output, time.Now().Sub(start)).finalize()\n\n\t\tclose(requestCountChan)\n\t\tos.Exit(1)\n\t}()\n\tgo showProgress(b.C) // sunny\n\n\tif b.EnableEngineIo {\n\t\tgo b.readConsole()\n\t\tb.clients = make([]*engineioclient2.Client, b.C)\n\t\tb.startTimes = make([]time.Time, b.C)\n\t\tb.durations = make([]time.Duration, b.C)\n\t}\n\tb.runWorkers()\n\n\tfmt.Printf(\"Finished %d requests\\n\\n\", totalRequests)\n\tclose(requestCountChan)\n\n\tnewReport(b.N, b.results, b.Output, time.Now().Sub(start)).finalize()\n\tclose(b.results)\n\t//<-quit\n}", "func (s *Worker) Run(_ chan struct{}) error {\n\tfor notification := range s.queueNotification {\n\t\t// run custom process function\n\t\t_ = s.runFunc(notification)\n\t}\n\treturn nil\n}", "func (b *Boomer) Run() {\n\tvar shutdownTimer *time.Timer\n\tb.results = make(chan *result, b.C)\n\tb.stop = make(chan struct{})\n\tb.startProgress()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\tgo func() {\n\t\t<-c\n\t\tshutdownTimer = time.AfterFunc(10*time.Second, func() {\n\t\t\tb.finalizeProgress()\n\t\t\tclose(b.stop)\n\t\t\tos.Exit(1)\n\t\t})\n\t\tb.finalizeProgress()\n\t\tclose(b.stop)\n\t}()\n\n\tr := newReport(b.N, b.results, b.Output)\n\tb.runWorkers()\n\tif shutdownTimer != nil {\n\t\tshutdownTimer.Stop()\n\t}\n\tclose(b.results)\n\tb.finalizeProgress()\n\tr.finalize()\n}", "func (w *Worker) Run() {\n wg.Add(1)\n go func() {\n defer wg.Done()\n for {\n w.WorkerQueue <- w.Work\n select {\n case work := <-w.Work:\n for attempt := 1 ; attempt <= w.Retry ; attempt++ {\n rc := w.ProcessWork(work, attempt)\n if rc == true {\n break\n }\n }\n case <-w.QuitChan:\n log.Debug(\"msg\", fmt.Sprintf(\"Worker #%d is stopping\", w.ID))\n return\n }\n }\n }()\n}", "func (w Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\tw.workerPool <- w.jobChannel\n\n\t\t\tselect {\n\t\t\tcase job := <-w.jobChannel:\n\t\t\t\tjob.Run()\n\t\t\tcase <-w.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (d *Deployer) Run(stopCh <-chan struct{}) {\n\t// handle a panic with logging and exiting\n\tdefer utilruntime.HandleCrash()\n\n\t// Start the deployQueue processing\n\tgo d.processDeployQueue(stopCh)\n\n\t// run the runWorker method every second with a stop channel\n\twait.Until(d.runWorker, time.Second, stopCh)\n}", "func (il *inputLoop) Run(ctx context.Context) error {\n\tdefer close(il.execQueue)\n\tg, lctx := errgroup.WithContext(ctx)\n\tif len(il.spec.Inputs) > 0 {\n\t\t// Watch inputs\n\t\tfor _, tis := range il.spec.Inputs {\n\t\t\ttis := tis // Bring in scope\n\t\t\tstats := il.statistics.InputByName(tis.Name)\n\t\t\tg.Go(func() error {\n\t\t\t\treturn il.watchInput(lctx, il.spec.SnapshotPolicy, tis, stats)\n\t\t\t})\n\t\t}\n\t}\n\tif il.spec.HasLaunchPolicyCustom() {\n\t\t// Custom launch policy, run executor all the time\n\t\tg.Go(func() error {\n\t\t\treturn il.runExecWithCustomLaunchPolicy(lctx)\n\t\t})\n\t}\n\tg.Go(func() error {\n\t\treturn il.processExecQueue(lctx)\n\t})\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *ProjectFinalizerController) Run(stopCh <-chan struct{}, workers int) {\n\tdefer runtime.HandleCrash()\n\tdefer c.queue.ShutDown()\n\n\t// Wait for the stores to fill\n\tif !cache.WaitForCacheSync(stopCh, c.controller.HasSynced) {\n\t\treturn\n\t}\n\n\tglog.V(5).Infof(\"Starting workers\")\n\tfor i := 0; i < workers; i++ {\n\t\tgo c.worker()\n\t}\n\t<-stopCh\n\tglog.V(1).Infof(\"Shutting down\")\n}", "func (d *balancerPools) Run(ctx context.Context) {\n\td.run()\n\tgo func(ctx context.Context) {\n\t\tfor {\n\t\t\ttime.Sleep(DefaultLifeSpanHalf)\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\td.run()\n\t\t\t}\n\t\t}\n\t}(ctx)\n}", "func (p *Pool) Run(j job) {\n\t_, from, to := j.info()\n\tp.sugar.Infow(\"putting new job to queue\",\n\t\t\"func\", caller.GetCurrentFunctionName(),\n\t\t\"from\", from.String(),\n\t\t\"to\", to.String())\n\tp.jobCh <- j\n}", "func (p Process) Start() {\n\n\tvar wg sync.WaitGroup\n\tgo func() {\n\t\tp.pollKEBForRuntimes()\n\t}()\n\n\tfor i := 0; i < p.WorkersPoolSize; i++ {\n\t\tj := i\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tp.execute(j)\n\t\t\tp.namedLogger().Debugf(\"######## Worker exits ########\")\n\t\t}()\n\t}\n\twg.Wait()\n}", "func (q Queue) Run() {\n\tif q.Concurrency < 1 {\n\t\tpanic(\"concurrency must not be less than 1\")\n\t}\n\n\tif q.AddJob == nil {\n\t\tpanic(\"AddJob() must not be nil\")\n\t}\n\n\tif q.DoJob == nil {\n\t\tpanic(\"DoJob() must not be nil\")\n\t}\n\n\tjobs := make(chan interface{})\n\tgo func() {\n\t\tdefer close(jobs)\n\t\tq.AddJob(&jobs)\n\t}()\n\n\tvar wg sync.WaitGroup\n\twg.Add(q.Concurrency)\n\tfor i := 0; i < q.Concurrency; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor job := range jobs {\n\t\t\t\tq.DoJob(&job)\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n}", "func Run(workers []Worker, duration time.Duration) *Result {\n\tdebug = os.Getenv(\"DEBUG\") != \"\"\n\tc := len(workers)\n\tlog.Printf(\"starting benchmark: concurrency: %d, time: %s, GOMAXPROCS: %d\", c, duration, runtime.GOMAXPROCS(0))\n\tstartCh := make(chan bool, c)\n\treadyCh := make(chan bool, c)\n\tvar stopFlag int32\n\tscoreCh := make(chan int, c)\n\tvar wg sync.WaitGroup\n\n\t// spawn worker goroutines\n\tfor i, w := range workers {\n\t\tdebugLog(\"spwan worker[%d]\", i)\n\t\tgo func(n int, worker Worker) {\n\t\t\twg.Add(1)\n\t\t\tdefer wg.Done()\n\t\t\tscore := 0\n\t\t\tworker.Setup()\n\t\t\treadyCh <- true // ready of worker:n\n\t\t\t<-startCh // notified go benchmark from Runner\n\t\t\tdebugLog(\"worker[%d] starting Benchmark()\", n)\n\t\t\tfor atomic.LoadInt32(&stopFlag) == 0 {\n\t\t\t\tscore += worker.Process()\n\t\t\t}\n\t\t\tscoreCh <- score\n\t\t\tdebugLog(\"worker[%d] done Benchmark() score: %d\", n, score)\n\t\t\tworker.Teardown()\n\t\t\tdebugLog(\"worker[%d] exit\", n)\n\t\t}(i, w)\n\t}\n\n\t// wait for ready of workres\n\tdebugLog(\"waiting for all workers finish Setup()\")\n\tfor i := 0; i < c; i++ {\n\t\t<-readyCh\n\t}\n\n\t// notify \"start\" to workers\n\tclose(startCh)\n\tstart := time.Now()\n\n\t// wait for catching signal or timed out\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh, TrapSignals...)\n\tselect {\n\tcase s := <-signalCh:\n\t\tswitch sig := s.(type) {\n\t\tcase syscall.Signal:\n\t\t\tlog.Printf(\"Got signal: %s(%d)\", sig, sig)\n\t\tdefault:\n\t\t\tlog.Printf(\"interrupted %s\", s)\n\t\t\tbreak\n\t\t}\n\tcase <-time.After(duration):\n\t\tdebugLog(\"timed out\")\n\t\tbreak\n\t}\n\n\t// notify \"stop\" to workers\n\tatomic.StoreInt32(&stopFlag, 1)\n\n\t// collect scores from workers\n\ttotalScore := 0\n\tfor i := 0; i < c; i++ {\n\t\ttotalScore += <-scoreCh\n\t}\n\tend := time.Now()\n\telapsed := end.Sub(start)\n\tlog.Printf(\"done benchmark: score %d, elapsed %s = %f / sec\\n\", totalScore, elapsed, float64(totalScore)/float64(elapsed)*float64(time.Second))\n\n\twg.Wait()\n\treturn &Result{Score: totalScore, Elapsed: elapsed}\n}", "func (w *worker) run() {\n\tfor {\n\t\tj, more := <-w.jobs\n\t\tif more {\n\t\t\terr := w.processJob(j)\n\t\t\tw.results <- &jobResult{job: j, err: err}\n\t\t} else {\n\t\t\tw.logger.Info(\"received all jobs, closing worker\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (w *Worker) Run() {\n\tconn := w.Pool.Get()\n\tdefer conn.Close()\n\n\tvar msgs = make([]Impression, 0, w.Size)\n\ttick := time.NewTicker(w.Interval)\n\n\tw.verbose(\"active\")\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-w.Queue:\n\t\t\tw.verbose(\"buffer (%d/%d) %v\", len(msgs), w.Size, msg)\n\t\t\tmsgs = append(msgs, msg)\n\t\t\tif len(msgs) >= w.Size {\n\t\t\t\tw.verbose(\"exceeded %d messages – flushing\", w.Size)\n\t\t\t\tw.send(conn, msgs)\n\t\t\t\tmsgs = make([]Impression, 0, w.Size)\n\t\t\t}\n\t\tcase <-tick.C:\n\t\t\tif len(msgs) > 0 {\n\t\t\t\tw.verbose(\"interval reached - flushing %d\", len(msgs))\n\t\t\t\tw.send(conn, msgs)\n\t\t\t\tmsgs = make([]Impression, 0, w.Size)\n\t\t\t} else {\n\t\t\t\tw.verbose(\"interval reached – nothing to send\")\n\t\t\t}\n\t\tcase <-w.quit:\n\t\t\ttick.Stop()\n\t\t\tw.verbose(\"exit requested – draining msgs\")\n\t\t\t// drain the msg channel.\n\t\t\tfor msg := range w.Queue {\n\t\t\t\tw.verbose(\"buffer (%d/%d) %v\", len(msgs), w.Size, msg)\n\t\t\t\tmsgs = append(msgs, msg)\n\t\t\t}\n\t\t\tw.verbose(\"exit requested – flushing %d\", len(msgs))\n\t\t\tw.send(conn, msgs)\n\t\t\tw.verbose(\"exit\")\n\t\t\tw.shutdown <- struct{}{}\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *parallel) Run() map[string]error {\n\tp.eC = make(chan err, len(p.f))\n\n\tif p.poolNum > len(p.f) {\n\t\tp.poolNum = len(p.f)\n\t}\n\n\tgo func() {\n\t\tfor i := range p.f {\n\t\t\tp.poolChan <- p.f[i]\n\t\t}\n\t\tclose(p.poolChan)\n\t}()\n\tp.wg.Add(p.poolNum)\n\tfor i := 0; i < p.poolNum; i++ {\n\t\tgo func() {\n\t\t\tdefer p.wg.Done()\n\t\t\tfor f := range p.poolChan {\n\t\t\t\tp.eC <- err{\n\t\t\t\t\ttag: f.Tag,\n\t\t\t\t\terr: handle(p.timeout, f.F),\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\tp.wg.Wait()\n\tclose(p.eC)\n\tfor e := range p.eC {\n\t\tp.e.errors[e.tag] = e.err\n\t}\n\n\tresult := p.e.errors\n\n\tp.init()\n\n\treturn result\n}", "func (p *Pump) Run() {\n\tgo p.run()\n}", "func (w *Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\tWorkerPool.workerChan <- w\n\t\t\tselect {\n\t\t\tcase jobID := <-w.RepJobs:\n\t\t\t\tlog.Debugf(\"worker: %d, will handle job: %d\", w.ID, jobID)\n\t\t\t\tw.handleRepJob(jobID)\n\t\t\tcase q := <-w.quit:\n\t\t\t\tif q {\n\t\t\t\t\tlog.Debugf(\"worker: %d, will stop.\", w.ID)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func Run(cfg Config, stdout, stderr io.Writer, maxWorkers uint64) error {\n\tgraph, err := NewGraph(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tdoneNodes := make(map[*Node]struct{})\n\twaitingNodes := make(map[*Node]struct{})\n\n\tqueueSize := 1024\n\tdoneQueue := make(chan ResultNode, queueSize)\n\tsubmit := PoolStart(ctx, maxWorkers)\n\tsubmitNode := func(n *Node) {\n\t\tsubmit(func(worker Worker) {\n\t\t\tworker.Stdout = stdout\n\t\t\tworker.Stderr = stderr\n\t\t\terr := worker.Execute(n)\n\t\t\tdoneQueue <- ResultNode{n, err}\n\t\t})\n\t}\n\n\tfor runnableNode := range graph.Dependents {\n\t\tsubmitNode(runnableNode)\n\t}\n\n\ttotalTasks := len(cfg.Jobs)\n\tfor len(doneNodes) < totalTasks {\n\t\tresult := <-doneQueue\n\t\tif result.Err != nil {\n\t\t\treturn result.Err\n\t\t}\n\n\t\tnode := result.Node\n\t\tdoneNodes[node] = struct{}{}\n\t\tfor dependent := range node.Dependents {\n\t\t\tif _, ok := doneNodes[dependent]; !ok {\n\t\t\t\twaitingNodes[dependent] = struct{}{}\n\t\t\t}\n\t\t}\n\n\t\trunnableNodes := nextRunnableNodes(waitingNodes, doneNodes)\n\t\tfor _, runnableNode := range runnableNodes {\n\t\t\tsubmitNode(runnableNode)\n\t\t\tdelete(waitingNodes, runnableNode)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *Runner) run() {\n\tfor {\n\t\ttask := r.rq.Pop()\n\t\tr.process(task)\n\t}\n}", "func (p *Pool) worker() {\n\tdefer p.wg.Done()\n\tfor job := range p.In {\n\t\tatomic.AddInt64(&p.Stats.Pending, -1)\n\t\tatomic.AddInt64(&p.Stats.Running, 1)\n\t\tjob.Result, job.Error = job.F(job.Args...)\n\t\tatomic.AddInt64(&p.Stats.Running, -1)\n\t\tatomic.AddInt64(&p.Stats.Completed, 1)\n\t\tp.Out <- job\n\t}\n}", "func Run(concurrent int, round int, interval int) {\n\tsenders := pickupPeersButMyself(peer.ID(\"\"), concurrent, mnet.Peers())\n\tlog.Info(senders)\n\tvar wg sync.WaitGroup\n\twg.Add(len(senders))\n\tfor _, pid := range senders {\n\t\t// 随机化开始时间\n\t\ttime.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)\n\t\tgo Work(&wg, pid, round, interval)\n\t}\n\twg.Wait()\n\tlog.Info(\"run ok\")\n}", "func (p *Pool) Start() {\n\tlog.Println(\"Starting pool\", p)\n\n\tfor index, worker := range p.Workers {\n\t\tlog.Println(\" Starting worker:\", worker, index)\n\t\tworker.Prepare()\n\t\tworker.Start()\n\t}\n}", "func (d *Downloads) Run() {\n\tfor {\n\t\tselect {\n\t\tcase performer := <-d.performers:\n\t\t\tgo d.start(performer)\n\t\tcase <-d.quit:\n\t\t\td.close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {\n\tlogger := gen.log\n\tdefer utilruntime.HandleCrash()\n\tlogger.Info(\"start\")\n\tdefer logger.Info(\"shutting down\")\n\n\tif !cache.WaitForCacheSync(stopCh, gen.reportReqSynced, gen.clusterReportReqSynced, gen.cpolListerSynced, gen.polListerSynced) {\n\t\tlogger.Info(\"failed to sync informer cache\")\n\t}\n\n\tfor i := 0; i < workers; i++ {\n\t\tgo wait.Until(gen.runWorker, time.Second, stopCh)\n\t}\n\n\tgo gen.requestCreator.run(stopCh)\n\n\t<-stopCh\n}", "func (b *Work) Run() {\n\t// append hey's user agent\n\tua := b.Request.UserAgent()\n\tif ua == \"\" {\n\t\tua = megSenderUA\n\t} else {\n\t\tua += \" \" + megSenderUA\n\t}\n\n\tb.results = make(chan *result)\n\tb.stopCh = make(chan struct{}, b.C)\n\tb.startTime = time.Now()\n\tb.report = newReport(b.writer(), b.results, b.Output)\n\tb.report.start()\n\n\tb.runWorkers()\n\tb.Finish()\n}", "func (d *Dispatcher) Run(ctx context.Context, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\t// Start workers\n\twwg := &sync.WaitGroup{}\n\twctx, stopWorkers := context.WithCancel(context.Background())\n\tfor _, w := range d.workers {\n\t\twwg.Add(1)\n\t\tgo w.Run(wctx, wwg)\n\t}\n\n\t// Stop workers when dispatcher is asked to stop\n\t<-ctx.Done()\n\tstopWorkers()\n\twwg.Wait()\n}", "func (w *WorkerContainer) Run(ctx context.Context, meta *scriptpb.RunMeta, chunkReader *common.ChunkReader) (<-chan interface{}, error) {\n\tworkerStream, err := w.Worker.scriptCli.Run(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Send meta header.\n\tif err := workerStream.Send(&scriptpb.RunRequest{\n\t\tValue: &scriptpb.RunRequest_Meta{\n\t\t\tMeta: meta,\n\t\t},\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Send chunks.\n\tif chunkReader != nil {\n\t\tvar chunk *scriptpb.RunChunk\n\n\t\tfor {\n\t\t\tchunk, err = chunkReader.Get()\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"reading script chunk failed: %w\", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err = workerStream.Send(&scriptpb.RunRequest{\n\t\t\t\tValue: &scriptpb.RunRequest_Chunk{\n\t\t\t\t\tChunk: chunk,\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\terr = fmt.Errorf(\"sending script request failed: %w\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := workerStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tch := make(chan interface{}, 1)\n\n\tgo func() {\n\t\tfor {\n\t\t\trunRes, err := workerStream.Recv()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tch <- err\n\t\t\t\t}\n\n\t\t\t\tclose(ch)\n\n\t\t\t\treturn\n\t\t\t}\n\t\t\tch <- runRes\n\t\t}\n\t}()\n\n\treturn ch, nil\n}", "func (d *Dispatcher) Run(ctx context.Context) error {\n\tcur := d.initInterval\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t// process pooled alerts before quit, if any.\n\t\t\tif d.alertPool.empty() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-time.After(cur):\n\t\t}\n\n\t\talerts := d.alertPool.take()\n\t\tif len(alerts) == 0 {\n\t\t\tcur = d.initInterval\n\t\t\tcontinue\n\t\t}\n\n\t\td.handler.Handle(alerts)\n\n\t\tcur = cur * 2\n\t\tif cur > d.maxInterval {\n\t\t\tcur = d.maxInterval\n\t\t}\n\t}\n}", "func (p *Parallel) Run() {\n\tfor _, child := range p.children {\n\t\t// this func will never panic\n\t\tgo func(ch *Parallel) {\n\t\t\tch.Run()\n\t\t\tp.wgChild.Done()\n\t\t}(child)\n\t}\n\tp.wgChild.Wait()\n\tp.do()\n\tp.wg.Wait()\n}", "func (wh *Webhooks) Run(ctx context.Context) {\n\twh.workersNum.Inc()\n\tdefer wh.workersNum.Dec()\n\tfor {\n\t\tenqueuedItem, err := wh.queue.Pop(ctx)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\twh.queuedNum.Dec()\n\t\ttmpFile, err := wh.openStoredRequestFile(enqueuedItem)\n\t\tif err != nil {\n\t\t\tlog.Println(\"failed to process\", enqueuedItem.RequestFile, \"-\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\twh.processRequestAsync(ctx, enqueuedItem.Manifest, tmpFile)\n\t\t_ = tmpFile.Close()\n\t\t_ = os.RemoveAll(tmpFile.Name())\n\t}\n}", "func (s *SubscriptionWatcher) Run(ctx context.Context) error {\n\tvar wg sync.WaitGroup\n\tdone := make(chan struct{})\n\tout := make(chan ResourceAudits)\n\n\t// setup worker pool\n\t// workers receive jobs and send results to output channel\n\tworkers := make(map[schema.ContentType]chan ResourceSubscription)\n\tcontentTypes := schema.GetContentTypes()\n\n\twg.Add(len(contentTypes))\n\tfor _, ct := range contentTypes {\n\t\ts.logger.WithField(\"content-type\", ct.String()).Info(\"starting worker\")\n\t\tch := make(chan ResourceSubscription, 1)\n\t\tworkers[ct] = ch\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor res := range ch {\n\t\t\t\tcontentCh := s.fetchContent(ctx, done, res)\n\t\t\t\tauditCh := s.fetchAudits(ctx, done, contentCh)\n\n\t\t\t\tfor a := range auditCh {\n\t\t\t\t\tout <- a\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t// this goroutine is responsible for closing output channel\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(out)\n\t}()\n\n\t// setup ticker that will periodically fetch subscriptions\n\t// and create jobs for workers.\n\t// this goroutine is responsible for closing worker channels\n\tgo func() {\n\t\ttickerDur := time.Duration(s.config.TickerIntervalSeconds) * time.Second\n\t\tticker := time.NewTicker(tickerDur)\n\t\tdefer ticker.Stop()\n\n\t\ts.logger.Infoln(\"start main\")\n\t\ts.logger.Infof(\"using config: %+v\", s.config)\n\n\t\tfetch := func(t time.Time) {\n\t\t\tsubCh := s.fetchSubscriptions(ctx, done, t)\n\t\t\tfor sub := range subCh {\n\t\t\t\tctLogger := s.logger.WithField(\"content-type\", sub.ContentType.String())\n\t\t\t\tworkerCh, ok := workers[*sub.ContentType]\n\t\t\t\tif !ok {\n\t\t\t\t\tctLogger.Error(\"no worker registered for content-type\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tdefault:\n\t\t\t\t\tctLogger.Warn(\"worker is busy, skipping\")\n\t\t\t\tcase workerCh <- sub:\n\t\t\t\t\tctLogger.Debugln(\"sent work\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfetch(time.Now())\n\tLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\tfor ct, workerCh := range workers {\n\t\t\t\t\ts.logger.WithField(\"content-type\", ct.String()).Info(\"closing worker\")\n\t\t\t\t\tclose(workerCh)\n\t\t\t\t}\n\t\t\t\tbreak Loop\n\t\t\tcase t := <-ticker.C:\n\t\t\t\tfetch(t)\n\t\t\t}\n\t\t}\n\t\ts.logger.Infoln(\"end main\")\n\t}()\n\n\t// this goroutine is responsible for notifying\n\t// everyone that we want to exit\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tclose(done)\n\t\t\treturn\n\t\t}\n\t}()\n\n\treturn s.Handler.Handle(out)\n}", "func workerpool() {\n\tworkers := 3\n\tworkchan := make(chan int)\n\tfor i := 0; i < workers; i++ {\n\t\tgo func() {\n\t\t\tfor i := range workchan {\n\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t\tfmt.Println(\"Workerpool worked on \", i)\n\t\t\t}\n\t\t}()\n\t}\n\tamountOfWork := 10\n\tfor i := 0; i < amountOfWork; i++ {\n\t\tworkchan <- i\n\t}\n\tfmt.Println(\"Finished workerpool work\")\n\t//Give some time for goroutines to finish. To avoid using WaitGroup and loosing focus.\n\ttime.Sleep(5 * time.Second)\n}", "func (s *Consumer) Run() error {\n\t// check queue status\n\tselect {\n\tcase <-s.stop:\n\t\treturn ErrQueueShutdown\n\tdefault:\n\t}\n\n\tfor task := range s.taskQueue {\n\t\tvar data Job\n\t\t_ = json.Unmarshal(task.Bytes(), &data)\n\t\tif v, ok := task.(Job); ok {\n\t\t\tif v.Task != nil {\n\t\t\t\tdata.Task = v.Task\n\t\t\t}\n\t\t}\n\t\tif err := s.handle(data); err != nil {\n\t\t\ts.logger.Error(err.Error())\n\t\t}\n\t}\n\treturn nil\n}", "func (f *Fetcher) Run() {\n\tLog.Println(\"Fetcher is running...\")\n\tf.status = RUNNING\n\tfor {\n\t\tselect {\n\t\tcase <-f.stop:\n\t\t\tLog.Println(\"the Fetcher is stop!\")\n\t\t\tf.stop = nil\n\t\t\tf.status = STOP\n\t\t\treturn\n\t\tcase reqs := <-f.pop:\n\t\t\tfor _, req := range reqs {\n\t\t\t\tcrawler := GetNodeInstance().GetCrawler(req.Crawler)\n\t\t\t\tttl := time.After(time.Second * time.Duration(crawler.TTL))\n\t\t\t\t// handle the req in goroutine\n\t\t\t\tgo f.handle(req)\n\t\t\t\t<-ttl\n\t\t\t}\n\t\t}\n\t}\n}", "func (t *Tapa) Run() {\n\n\tvar progressBars []*pb.ProgressBar\n\tfor _, req := range t.requests {\n\t\tsize := req.users * req.requestsPerUser\n\t\treq.progressBar = pb.New(size)\n\t\treq.progressBar.Width = 120\n\t\tprogressBars = append(progressBars, req.progressBar)\n\t}\n\n\tpool := pb.NewPool(progressBars...)\n\tpool.Start()\n\tdefer pool.Stop()\n\n\tjobs := make(chan *Request, t.concurrency)\n\tresponses := make(chan bool, len(t.requests))\n\n\tfor worker := 1; worker <= t.concurrency; worker++ {\n\t\tgo t.runRequest(jobs, responses)\n\t}\n\n\tfor _, req := range t.requests {\n\t\tjobs <- req\n\t}\n\tclose(jobs)\n\tfor worker := 1; worker <= len(t.requests); worker++ {\n\t\t<-responses\n\t}\n\n}", "func (p Parallel) Run() {\n\tvar wg sync.WaitGroup\n\twg.Add(len(p))\n\tfor _, f := range p {\n\t\tgo func(f func()) {\n\t\t\tf()\n\t\t\twg.Done()\n\t\t}(f)\n\t}\n\twg.Wait()\n}", "func (c *Consumer) Run() error {\n\tc.Add(c.NumWorkers) // HL\n\tfor idx := 0; idx < c.NumWorkers; idx++ {\n\t\tw := NewWorker(c.todo)\n\t\tgo eat(idx) // HL\n\t\tc.workers[idx] = w\n\t}\n\tgo c.feed() // HL\n\n\tgo func() {\n\t\tc.Wait() // HL\n\t\tclose(todo)\n\t}()\n\n\tc.loop() // HL\n}", "func (g *Gossiper) Run(ctx context.Context) {\n\tsths := make(chan sthInfo, g.bufferSize)\n\n\tvar wg sync.WaitGroup\n\twg.Add(1 + len(g.srcs))\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tglog.Info(\"starting Submitter\")\n\t\tg.Submitter(ctx, sths)\n\t\tglog.Info(\"finished Submitter\")\n\t}()\n\tfor _, src := range g.srcs {\n\t\tgo func(src *sourceLog) {\n\t\t\tdefer wg.Done()\n\t\t\tglog.Infof(\"starting Retriever(%s)\", src.Name)\n\t\t\tsrc.Retriever(ctx, g, sths)\n\t\t\tglog.Infof(\"finished Retriever(%s)\", src.Name)\n\t\t}(src)\n\t}\n\twg.Wait()\n}", "func (w *Worker) Run(done <-chan interface{}) error {\n\tdefer close(w.resultStream)\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tlog.Println(\n\t\t\t\t\"level\", \"INFO\",\n\t\t\t\t\"object\", \"workers.worker\",\n\t\t\t\t\"method\", \"Run\",\n\t\t\t\t\"msg\", \"terminating operations by application request\",\n\t\t\t)\n\t\t\treturn nil\n\t\tcase order, ok := <-w.orderStream:\n\t\t\tif !ok {\n\t\t\t\tlog.Println(\"level\", \"INFO\",\n\t\t\t\t\t\"object\", \"workers.worker\",\n\t\t\t\t\t\"method\", \"Run\",\n\t\t\t\t\t\"msg\", \"terminating operations because order stream was closed\",\n\t\t\t\t)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tw.processOrder(order)\n\t\t}\n\t}\n}", "func (w *worker) run() {\n\tlogrus.Infof(\"start prober worker %s\", w.thirdComponent.GetEndpointID(&w.endpoint))\n\n\tprobeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second\n\n\t// If kubelet restarted the probes could be started in rapid succession.\n\t// Let the worker wait for a random portion of tickerPeriod before probing.\n\ttime.Sleep(time.Duration(rand.Float64() * float64(probeTickerPeriod)))\n\n\tprobeTicker := time.NewTicker(probeTickerPeriod)\n\n\tdefer func() {\n\t\t// Clean up.\n\t\tprobeTicker.Stop()\n\t\tw.resultsManager.Remove(w.thirdComponent.GetEndpointID(&w.endpoint))\n\n\t\tw.probeManager.removeWorker(&w.endpoint)\n\t\tProberResults.Delete(w.proberResultsSuccessfulMetricLabels)\n\t\tProberResults.Delete(w.proberResultsFailedMetricLabels)\n\t\tProberResults.Delete(w.proberResultsUnknownMetricLabels)\n\t}()\n\nprobeLoop:\n\tfor w.doProbe() {\n\t\t// Wait for next probe tick.\n\t\tselect {\n\t\tcase <-w.stopCh:\n\t\t\tbreak probeLoop\n\t\tcase <-probeTicker.C:\n\t\t\t// continue\n\t\t}\n\t}\n}", "func (p *Producer) Run() {\n\tp.wg.Add(1)\n\tdefer p.wg.Done()\n\n\tsendMsg := func(routingKey string, data []byte) {\n\t\ttimeStamp := time.Now()\n\t\terr := p.rabbitChannel.Publish(\n\t\t\tp.rabbitExchange,\n\t\t\troutingKey,\n\t\t\tfalse,\n\t\t\tfalse,\n\t\t\tamqp.Publishing{\n\t\t\t\tDeliveryMode: amqp.Persistent,\n\t\t\t\tTimestamp: timeStamp,\n\t\t\t\tContentType: \"text/plain\",\n\t\t\t\tBody: data,\n\t\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error publishing %s\", string(data))\n\t\t\tp.writeFailure(\n\t\t\t\tfmt.Sprintf(\"%s/%s-%d.txt\",\n\t\t\t\t\tp.failureDir,\n\t\t\t\t\troutingKey,\n\t\t\t\t\ttimeStamp.UnixNano()),\n\t\t\t\tdata)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.eventsChan:\n\t\t\tsendMsg(\"raw_events\", event)\n\t\tcase meter := <-p.metersChan:\n\t\t\tsendMsg(\"raw_meters\", meter)\n\t\tcase <-p.quitChan:\n\t\t\tp.rabbitChannel.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (gs *ShutdownHandler) Run() {\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-gs.gracefulStop:\n\t\t\tgs.log.Warnf(\"Received shutdown request - waiting (max %d seconds) to finish processing ...\", waitToKillTimeInSeconds)\n\t\tcase msg := <-gs.nodeSelfShutdown:\n\t\t\tgs.log.Warnf(\"Node self-shutdown: %s; waiting (max %d seconds) to finish processing ...\", msg, waitToKillTimeInSeconds)\n\t\t}\n\n\t\tgo func() {\n\t\t\tstart := time.Now()\n\t\t\tfor x := range time.Tick(1 * time.Second) {\n\t\t\t\tsecondsSinceStart := x.Sub(start).Seconds()\n\n\t\t\t\tif secondsSinceStart <= waitToKillTimeInSeconds {\n\t\t\t\t\tprocessList := \"\"\n\t\t\t\t\trunningBackgroundWorkers := gs.daemon.GetRunningBackgroundWorkers()\n\t\t\t\t\tif len(runningBackgroundWorkers) >= 1 {\n\t\t\t\t\t\tprocessList = \"(\" + strings.Join(runningBackgroundWorkers, \", \") + \") \"\n\t\t\t\t\t}\n\n\t\t\t\t\tgs.log.Warnf(\"Received shutdown request - waiting (max %d seconds) to finish processing %s...\", waitToKillTimeInSeconds-int(secondsSinceStart), processList)\n\t\t\t\t} else {\n\t\t\t\t\tgs.log.Fatal(\"Background processes did not terminate in time! Forcing shutdown ...\")\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tgs.daemon.ShutdownAndWait()\n\t}()\n}", "func (c *Config) Worker(sig *signal.Config, cbPool *cbpool.Client, goPool *gopool.Client) {\n\tfor sig.Run {\n\t\tc.SetCBQueueCount(cbPool.GetQueueCount())\n\t\tc.SetCBWorkerCount(cbPool.GetWorkerCount())\n\t\tc.SetGoPoolQueueCount(goPool.GetQueueCount())\n\t\tc.SetGoPoolWorkerCount(goPool.GetWorkerCount())\n\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}", "func (c *Controller) RunWorker(stopCh <-chan struct{}) {\n\n\tgo c.waitForShutdown(stopCh)\n\n\tfor c.processNextWorkItem() {\n\t}\n}", "func (b Blackbox) Run() error {\n\tlistener, err := net.Listen(\"tcp\", b.cfg.PrometheusListenAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo b.runProbes()\n\n\treturn servePrometheus(listener)\n}", "func (w *Worker) Start(workersAmmount int) {\n\tclose(w.tasks)\n\n\tfor i := 0; i < workersAmmount; i++ {\n\t\tgo func(id int) {\n\t\t\tfor t := range w.tasks {\n\t\t\t\tw.results <- t.F(t.Args)\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tif w.ResultHandler == nil {\n\t\tw.ResultHandler = func(*Result) {}\n\t}\n\n\tfor r := 0; r < w.queueLength; r++ {\n\t\tw.ResultHandler(<-w.results)\n\t}\n\n\tclose(w.results)\n\n\tif w.FinishedHandler != nil {\n\t\tw.FinishedHandler()\n\t}\n}", "func Run(wg *sync.WaitGroup, ctl chan bool) {\n\tbroker.ctl = ctl\n\tbroker.wg = wg\n\tbroker.listen()\n}", "func (s *HelloWordModule) Run(mainParams *application.MainParams) error {\n\n\t// make the local context for this module instance\n\tvar localCtx context.Context\n\tlocalCtx, s.cancel = context.WithCancel(mainParams.Ctx)\n\n\t// run module workers\n\tmainParams.Wg.Add(1) // every time increment the WaitGroup before start goroutine\n\tgo printHello(localCtx, mainParams.Wg, cfg.Name) // run goroutine\n\n\treturn nil\n}", "func (j *Job) Run() {\n\tleftRunningTimes := j.times.Add(-1)\n\tif leftRunningTimes < 0 {\n\t\tj.status.Set(StatusClosed)\n\t\treturn\n\t}\n\t// This means it does not limit the running times.\n\t// I know it's ugly, but it is surely high performance for running times limit.\n\tif leftRunningTimes < 2000000000 && leftRunningTimes > 1000000000 {\n\t\tj.times.Set(math.MaxInt32)\n\t}\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tif err != panicExit {\n\t\t\t\t\tpanic(err)\n\t\t\t\t} else {\n\t\t\t\t\tj.Close()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif j.Status() == StatusRunning {\n\t\t\t\tj.SetStatus(StatusReady)\n\t\t\t}\n\t\t}()\n\t\tj.job()\n\t}()\n}", "func (p *Pool) Exec() {\n\tfor i := 0; i < p.workers; i++ {\n\t\t//creating workers to receive and execute the tasks\n\t\tgo p.work()\n\t}\n\n\tp.wg.Add(len(p.Tasks))\n\tfor _, task := range p.Tasks {\n\t\t//tasks are added in the channel.\n\t\t//workers are listening this channel and when a worker is idle\n\t\t//it will receive the task to execute\n\t\tp.queueTasks <- task\n\t}\n\n\t// close the channel when all task was executed\n\tclose(p.queueTasks)\n\n\tp.wg.Wait()\n}", "func (s *Server) Run() (chan *Message, error) {\n\ts.logger.Notice(\"Starting %v server...\", s.name)\n\ts.commsChan = make(chan *Message)\n\ts.killChan = make(chan bool)\n\tgo s.listenAndServe()\n\treturn s.commsChan, nil\n}", "func (mh* MsgHandle) StartWorkerPool() {\n\t// start worker individually, one worker one goroutine\n\tfor i:=0; i < int(mh.WorkerPoolSize); i++ {\n\t\t// create channel for new worker\n\t\tmh.TaskQueue[i] = make(chan viface.IRequest, utils.GlobalObject.MaxWorkerTaskLen)\n\t\t// start worker\n\t\tgo mh.StartOneWorker(i, mh.TaskQueue[i])\n\t}\n}", "func (w *worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// consume done ,then worker reenter workerPool\n\t\t\tw.workerPool <- w.taskChannel\n\t\t\tselect {\n\t\t\tcase task := <-w.taskChannel:\n\t\t\t\t// received a work request and consume it\n\t\t\t\tif err := task.Consume(); err != nil {\n\t\t\t\t\tlog.Printf(\"Task Consume fail: %v\", err.Error())\n\t\t\t\t}\n\t\t\tcase <-w.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (p *Poller) Run() {\n\tgo util.Forever(func() {\n\t\te, err := p.getFunc()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"failed to list: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tp.sync(e)\n\t}, p.period)\n}", "func (m *Manager) Run() {\n\tgo func() {\n\t\tif err := m.Start(context.Background()); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n}", "func (jr *joinReader) Run(wg *sync.WaitGroup) {\n\terr := jr.mainLoop()\n\tjr.output.Close(err)\n\tif wg != nil {\n\t\twg.Done()\n\t}\n}" ]
[ "0.8138579", "0.7590641", "0.7388474", "0.7091481", "0.70803887", "0.7080261", "0.6920038", "0.6850398", "0.67854327", "0.67674756", "0.67474246", "0.668862", "0.6676274", "0.6672302", "0.6565446", "0.65594107", "0.6554532", "0.6526274", "0.65239984", "0.6519984", "0.65196973", "0.6518286", "0.6511769", "0.65057427", "0.64827067", "0.64434016", "0.64142424", "0.64027846", "0.64011", "0.63929063", "0.63826394", "0.63798636", "0.6372741", "0.6355458", "0.6354919", "0.63456327", "0.6337341", "0.6319018", "0.63052386", "0.6305185", "0.630352", "0.62894535", "0.6280951", "0.6279224", "0.62704206", "0.62639236", "0.6252748", "0.6249709", "0.62368816", "0.6221174", "0.6217032", "0.6196866", "0.61963737", "0.61956596", "0.61714584", "0.61634743", "0.61630833", "0.6161481", "0.6155509", "0.6120705", "0.61196077", "0.61182874", "0.61125934", "0.6103553", "0.6091948", "0.6091869", "0.60847545", "0.608333", "0.60764116", "0.60725826", "0.60704815", "0.60615313", "0.6060669", "0.6032718", "0.60315776", "0.6027302", "0.6020428", "0.60174006", "0.6016168", "0.60001457", "0.5997574", "0.59969634", "0.5996789", "0.59850794", "0.5966064", "0.5965798", "0.5960869", "0.59595495", "0.5956672", "0.5949622", "0.5948387", "0.5945715", "0.5940854", "0.5937817", "0.59295505", "0.5924974", "0.5917243", "0.5916581", "0.5914217", "0.5914202", "0.59041137" ]
0.0
-1
getSourcegraphVersion queries the Sourcegraph GraphQL API to get the current version of the Sourcegraph instance.
func (svc *Service) getSourcegraphVersion(ctx context.Context) (string, error) { var result struct { Site struct { ProductVersion string } } ok, err := svc.client.NewQuery(sourcegraphVersionQuery).Do(ctx, &result) if err != nil || !ok { return "", err } return result.Site.ProductVersion, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (obj *Edge) GetVersion() int {\n\treturn obj.getVersion()\n}", "func GetVersion() string {\n\treturn version\n}", "func GetVersion() string {\n\treturn version\n}", "func (c *Context) GetVersion() string { // 获取版本号\n\treturn c.GetGinCtx().Param(\"version\")\n}", "func (o VirtualDatabaseSpecBuildSourceOutput) Version() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v VirtualDatabaseSpecBuildSource) *string { return v.Version }).(pulumi.StringPtrOutput)\n}", "func (a *BaseAggregateSourced) GetVersion() int {\n\treturn a.Version\n}", "func GetVersion() string {\n\treturn version.GetVersion()\n}", "func getVersion(agentInstall DotNetAgentInstall) (result tasks.Result) {\n\n\tagentVersion, err := tasks.GetFileVersion(agentInstall.AgentPath)\n\n\tif err != nil {\n\t\tresult.Status = tasks.Error\n\t\tresult.Summary = \"Error finding .Net Agent version\"\n\t\tlog.Info(\"Error finding .Net Agent version. The error is \", err)\n\t\treturn result\n\t}\n\n\tresult.Status = tasks.Info\n\tresult.Summary = agentVersion\n\tresult.Payload = agentVersion\n\treturn result\n\n}", "func (_Bridge *BridgeCallerSession) GetVersion() (string, error) {\n\treturn _Bridge.Contract.GetVersion(&_Bridge.CallOpts)\n}", "func SourceVersion() string {\n\treturn fmt.Sprintf(\"%s commit: %s / nearest-git-\"+\n\t\t\"tag: %s / branch: %s / %s\\n\",\n\t\tProgramName, LAST_GIT_COMMIT_HASH,\n\t\tNEAREST_GIT_TAG, GIT_BRANCH, GO_VERSION)\n}", "func GetVersion() string {\n\treturn Version\n}", "func GetVersion() string {\n\treturn Version\n}", "func (o VirtualDatabaseSpecBuildSourcePtrOutput) Version() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VirtualDatabaseSpecBuildSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Version\n\t}).(pulumi.StringPtrOutput)\n}", "func CurrentSourceVersion() string {\n\tif environ.HasValue(\"SOURCE_VERSION_OVERRIDE\") {\n\t\treturn environ.GetValueStr(\"SOURCE_VERSION_OVERRIDE\")\n\t}\n\n\tmanifestPath := path.Join(RootDir(), \"src\", \"appengine\", \"resources\", \"clusterfuzz-source.manifest\")\n\tresult, err := ioutil.ReadFile(manifestPath)\n\n\tif err != nil {\n\t\tlogs.Panicf(\"Failed to get current source version: %v\", err)\n\t}\n\n\treturn string(result)\n}", "func (u UserInviteCodeServiceServer) GetVersion(_ context.Context, _ *rfpb.VersionReq) (*rfpb.VersionRes, error) {\n\n\t_handleSimLoadLatency()\n\treturn &rfpb.VersionRes{Version: fmt.Sprintf(\"v%s\", metaServiceVersion)}, nil\n}", "func GetVersion() string {\n\treturn VersionString\n}", "func (_Bridge *BridgeCaller) GetVersion(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Bridge.contract.Call(opts, out, \"getVersion\")\n\treturn *ret0, err\n}", "func (_Bridge *BridgeSession) GetVersion() (string, error) {\n\treturn _Bridge.Contract.GetVersion(&_Bridge.CallOpts)\n}", "func getVersion() string {\n\tif metadata == \"\" {\n\t\treturn version\n\t}\n\treturn version + \"-\" + metadata\n}", "func (m *GraphDef) GetVersion() int32 {\n\tif m != nil {\n\t\treturn m.Version\n\t}\n\treturn 0\n}", "func Version() string {\n\treturn getData().Version\n}", "func (m *SynchronizationSchema) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GetVersion() string {\n\tif metadata == \"\" {\n\t\treturn version\n\t}\n\treturn version + \"+\" + metadata\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) GetVersion() (string, error) {\n\treturn _PlasmaFramework.Contract.GetVersion(&_PlasmaFramework.CallOpts)\n}", "func getVersion(driver *neo4j.Driver) (Version, error) {\n\tversion := Version{}\n\tsession := (*driver).NewSession(neo4j.SessionConfig{})\n\tdefer session.Close()\n\n\tresult, err := session.Run(VERSION_QUERY, nil)\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\trecord, err := result.Single()\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\tval, found := record.Get(\"version\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'version' in query results\")\n\t}\n\tdata, ok := val.([]interface{})\n\tif !ok {\n\t\treturn version, errors.New(\"'version' isn't an array\")\n\t}\n\tif len(data) < 2 {\n\t\treturn version, errors.New(\"'version' array is empty or too small\")\n\t}\n\n\tval, found = record.Get(\"extra\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'extra' version info\")\n\t}\n\textra, ok := val.(string)\n\tif !ok {\n\t\treturn version, errors.New(\"'extra' value isn't a string\")\n\t}\n\n\t// yolo for now\n\tversion.Major = uint8(data[0].(int64))\n\tversion.Minor = uint8(data[1].(int64))\n\n\tif len(data) > 2 {\n\t\tversion.Patch = uint8(data[2].(int64))\n\t}\n\tversion.Extra = extra\n\n\treturn version, nil\n}", "func (o *VirtualizationIweHost) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) GetVersion(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"getVersion\")\n\treturn *ret0, err\n}", "func (f *Features) getVersion(ctx context.Context, adminDB *mongo.Database) {\n\tcmd := bson.D{\n\t\t{\n\t\t\tKey: \"buildInfo\",\n\t\t\tValue: 1,\n\t\t},\n\t}\n\tvar result buildInfo\n\terr := adminDB.RunCommand(ctx, cmd).Decode(&result)\n\tif err != nil {\n\t\tf.MongoVersion = &semver.Version{}\n\t\treturn\n\t}\n\n\tf.MongoVersion = semver.MustParse(result.Version)\n}", "func (pr LocalPackageReference) GeneratorVersion() string {\n\treturn pr.generatorVersion\n}", "func (c *Connection) Version(ctx context.Context) (string, error) {\n\tresp, err := c.Request(ctx).\n\t\tSetResult(&api.VersionResponse{}).\n\t\tGet(\"/version\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.Result().(*api.VersionResponse).Version, nil\n}", "func (s *Store) GetVersion(ctx context.Context) (string, error) {\n\t// We treat the existence of cells_subscriptions as running on the initial\n\t// version, 1.0.0\n\tconst query = `\n\t\tSELECT EXISTS (\n \t\tSELECT *\n\t\t FROM information_schema.tables \n \t\tWHERE table_name = 'cells_subscriptions'\n )`\n\trow := s.db.QueryRowContext(ctx, query)\n\tvar ret bool\n\terr := row.Scan(&ret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif ret {\n\t\t// Base version\n\t\treturn \"v1.0.0\", nil\n\t}\n\t// Version without cells joins table.\n\t// TODO: leverage proper migrations and use something like the query below.\n\treturn \"v2.0.0\", nil\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) GetVersion() (string, error) {\n\treturn _PlasmaFramework.Contract.GetVersion(&_PlasmaFramework.CallOpts)\n}", "func GetVersion() string {\n\treturn __VERSION__\n}", "func (c *Container) GetVersion(ctx echo.Context) error {\n tabletServersFuture := make(chan helpers.TabletServersFuture)\n go helpers.GetTabletServersFuture(helpers.HOST, tabletServersFuture)\n\n // Get response from tabletServersFuture\n tabletServersResponse := <-tabletServersFuture\n if tabletServersResponse.Error != nil {\n return ctx.String(http.StatusInternalServerError,\n tabletServersResponse.Error.Error())\n }\n nodeList := helpers.GetNodesList(tabletServersResponse)\n versionInfoFutures := []chan helpers.VersionInfoFuture{}\n for _, nodeHost := range nodeList {\n versionInfoFuture := make(chan helpers.VersionInfoFuture)\n versionInfoFutures = append(versionInfoFutures, versionInfoFuture)\n go helpers.GetVersionFuture(nodeHost, versionInfoFuture)\n }\n smallestVersion := helpers.GetSmallestVersion(versionInfoFutures)\n return ctx.JSON(http.StatusOK, models.VersionInfo{\n Version: smallestVersion,\n })\n}", "func GetVersion() string {\n\tif len(Version) == 0 {\n\t\treturn \"dev\"\n\t}\n\treturn Version\n}", "func GetVersion() string {\n\tif len(Version) == 0 {\n\t\treturn \"dev\"\n\t}\n\treturn Version\n}", "func (d TinkDB) GetWorkflowDataVersion(ctx context.Context, workflowID string) (int32, error) {\n\treturn getLatestVersionWfData(ctx, d.instance, workflowID)\n}", "func (o *MicrosoftGraphSharedPcConfiguration) GetVersion() int32 {\n\tif o == nil || o.Version == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func GetVersion(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"version\": Version,\n\t\t\"merchant_name\": MerchantName,\n\t})\n}", "func (dataChannel *DataChannel) GetAgentVersion() string {\n\treturn dataChannel.agentVersion\n}", "func Version(node client.ABCIClient) (\n\tstring, *rpctypes.ResultABCIQuery, error,\n) {\n\t// perform the query\n\tres, err := node.ABCIQuery(query.VersionEndpoint, []byte{})\n\tif err != nil {\n\t\treturn \"\", res, err\n\t}\n\treturn string(res.Response.GetValue()), res, err\n}", "func (o InstanceS3ImportOutput) SourceEngineVersion() pulumi.StringOutput {\n\treturn o.ApplyT(func(v InstanceS3Import) string { return v.SourceEngineVersion }).(pulumi.StringOutput)\n}", "func (r Source) GetAPIVersion() string {\n\treturn r.APIVersion\n}", "func Version() string {\n\t// TODO: Implement version tracking\n\treturn \"0.0.1\"\n}", "func (store *GSStore) GetVersion() int {\n\treturn store.Version\n}", "func (m *GroupPolicyDefinition) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GetCurrentUsingVersion() (*string, error) {\n\tdenoFilepath := getInstalledDenoFilepath()\n\n\tif denoFilepath == \"\" {\n\t\treturn nil, nil\n\t}\n\n\targs := []string{\"--version\"}\n\tcmd := exec.Command(denoFilepath, args...)\n\n\toutput, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"`deno --version` failed\\n%s\", string(output))\n\t}\n\n\tif cmd.ProcessState.ExitCode() != 0 {\n\t\treturn nil, errors.New(string(output))\n\t}\n\n\tarr := strings.Split(strings.Split(string(output), \"\\n\")[0], \" \")\n\n\tversion := strings.TrimSpace(\"v\" + strings.TrimSpace(arr[1]))\n\n\treturn &version, nil\n}", "func (o *SoftwareTechs) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func GetImageVersion(image string) (string, error) {\n\treturn getImageLabel(image, alterVersionLabelKey)\n}", "func (o ClusterS3ImportOutput) SourceEngineVersion() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ClusterS3Import) string { return v.SourceEngineVersion }).(pulumi.StringOutput)\n}", "func (d Dispatcher) Version() (string, error) {\n\theight, err := d.GetBC().GetLatestHeight()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thashes, err := d.GetBC().GetBlockHashesHex()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tversionBytes, err := helpers.Serialize(NewVersion(GizoVersion, height, hashes))\n\treturn string(versionBytes), nil\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn versionNumber\n}", "func (p OpenFlow10Protocol) GetVersion() uint8 {\n\treturn goloxi.VERSION_1_0\n}", "func GitVersion() string { return gitVersion }", "func (o InstanceS3ImportPtrOutput) SourceEngineVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *InstanceS3Import) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.SourceEngineVersion\n\t}).(pulumi.StringPtrOutput)\n}", "func (api *API) Version(ctx context.Context) (string, error) {\n\taddr := api.host + \":\" + api.port\n\treturn version(addr)\n}", "func (o ClusterS3ImportPtrOutput) SourceEngineVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterS3Import) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.SourceEngineVersion\n\t}).(pulumi.StringPtrOutput)\n}", "func (m *DeviceManagementConfigurationSettingDefinition) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (o *LoraNetworkTrigger) GetVersion() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Version\n}", "func (c *ServerConfig) GetVersionEndpoint() string {\n\tnurl := *c.ParsedEndpoint\n\tnurl.Path = path.Join(nurl.Path, c.APIPaths.Version)\n\treturn nurl.String()\n}", "func (o ApplicationStatusWorkflowContextbackendOutput) ResourceVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusWorkflowContextbackend) *string { return v.ResourceVersion }).(pulumi.StringPtrOutput)\n}", "func (o *ConnectorTypeAllOf) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o *ConnectorTypeAllOf) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (self PostgresDatabase) getDBVersion() (version int) {\n var val string\n var vers int64\n err := self.conn.QueryRow(\"SELECT value FROM Settings WHERE name = $1\", \"version\").Scan(&val)\n if err == nil {\n vers, err = strconv.ParseInt(val, 10, 32)\n if err == nil {\n version = int(vers)\n } else {\n log.Fatal(\"cannot figure out db version\", err)\n }\n } else {\n version = -1\n }\n return\n}", "func (r *Resolver) Version() VersionResolver { return &versionResolver{r} }", "func (o *SchemaDefinitionRestDto) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o *Version) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o *Ga4ghExternalIdentifier) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o ApplicationStatusWorkflowContextbackendPtrOutput) ResourceVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusWorkflowContextbackend) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ResourceVersion\n\t}).(pulumi.StringPtrOutput)\n}", "func GetVersion() string {\n\tv := Map[\"version\"]\n\treturn v\n}", "func Version() (version string) {\n\treturn GetVersion()\n}", "func (p Plugin) GetVersion() string {\n\treturn \"v0.0.0\"\n}", "func getVersion() string {\n\tslurp, err := ioutil.ReadFile(filepath.Join(camRoot, \"VERSION\"))\n\tif err == nil {\n\t\treturn strings.TrimSpace(string(slurp))\n\t}\n\treturn gitVersion()\n}", "func (r *SoftwareVolumeResource) GetVersion() (string, error) {\n\tvar list SoftwareVolumeConfigList\n\tif err := r.c.ReadQuery(BasePath+SoftwareVolumeEndpoint, &list); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar version string\n\n\tfor _, vol := range list.Items {\n\t\tif vol.Active == true {\n\t\t\tversion = vol.Version + \" \" + vol.Build\n\t\t}\n\t}\n\n\treturn version, nil\n}", "func (o *SoftwarerepositoryCategoryMapper) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (terraformSource Source) EncodeSourceVersion() (string, error) {\n\tif IsLocalSource(terraformSource.CanonicalSourceURL) {\n\t\tsourceHash := sha256.New()\n\t\tsourceDir := filepath.Clean(terraformSource.CanonicalSourceURL.Path)\n\n\t\terr := filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\t// If we've encountered an error while walking the tree, give up\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\t// We don't use any info from directories to calculate our hash\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoid checking files in .terragrunt-cache directory since contents is auto-generated\n\t\t\tif strings.Contains(path, util.TerragruntCacheDir) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoid checking files in .terraform directory since contents is auto-generated\n\t\t\tif info.Name() == util.TerraformLockFile {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfileModified := info.ModTime().UnixMicro()\n\t\t\thashContents := fmt.Sprintf(\"%s:%d\", path, fileModified)\n\t\t\tsourceHash.Write([]byte(hashContents))\n\n\t\t\treturn nil\n\t\t})\n\n\t\tif err == nil {\n\t\t\thash := fmt.Sprintf(\"%x\", sourceHash.Sum(nil))\n\n\t\t\treturn hash, nil\n\t\t}\n\n\t\tterraformSource.Logger.WithError(err).Warningf(\"Could not encode version for local source\")\n\t\treturn \"\", err\n\t}\n\n\treturn util.EncodeBase64Sha1(terraformSource.CanonicalSourceURL.Query().Encode()), nil\n}", "func (o *TeamConfiguration) GetVersion() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Version\n}", "func (s *Structured) GetVersion() string {\n\treturn s.cloudEvent.CloudEventsVersion\n}", "func (o *DeviceResourceVersionValueWeb) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (pr LocalPackageReference) Version() string {\n\treturn pr.version\n}", "func (v *VersionSelector) getUpgradeVersion() (string, string, error) {\n\tvar selectedVersionSelector upgradeselectors.Interface = nil\n\n\tcurPriority := math.MinInt32\n\n\tversionSelectors := upgradeselectors.GetVersionSelectors()\n\n\tfor _, versionSelector := range versionSelectors {\n\t\tif versionSelector.ShouldUse() && versionSelector.Priority() > curPriority {\n\t\t\tselectedVersionSelector = versionSelector\n\t\t\tcurPriority = versionSelector.Priority()\n\t\t}\n\t}\n\n\t// If no version selector has been found for an upgrade, assume that an upgrade is not being asked for.\n\tif selectedVersionSelector == nil {\n\t\treturn \"\", \"\", nil\n\t}\n\n\trelease, selector, err := selectedVersionSelector.SelectVersion(spi.NewVersionBuilder().Version(v.clusterVersion).Build(), v.versionList)\n\n\tif release == nil || release.Version().Original() == \"\" {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error selecting version: %s\", err.Error())\n\t\t}\n\t\treturn util.NoVersionFound, \"\", err\n\t}\n\n\topenshiftRelease := fmt.Sprintf(\"openshift-v%s\", release.Version().Original())\n\n\tlog.Printf(\"Selected %s using selector `%s`\", openshiftRelease, selector)\n\n\treturn openshiftRelease, \"\", err\n}", "func (native *OpenGL) GetShadingLanguageVersion() string {\n\treturn gl.GoStr(gl.GetString(gl.SHADING_LANGUAGE_VERSION))\n}", "func (o *NetworkElementSummaryAllOf) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func detectClientVersion(gql *graphql.Client) (clientVersion, error) {\n\thandlesOwnerId, err := orbQueryHandleOwnerId(gql)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !handlesOwnerId {\n\t\treturn v1_string, nil\n\t}\n\treturn v2_string, nil\n}", "func (e *Event) GetVersion() string {\n\treturn e.BaseEvent.Version\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) Version() (string, error) {\n\treturn _PlasmaFramework.Contract.Version(&_PlasmaFramework.CallOpts)\n}", "func (s *AzsbSource) GetGroupVersionKind() schema.GroupVersionKind {\n\treturn SchemeGroupVersion.WithKind(\"AzsbSource\")\n}", "func Version() string {\n\treturn \"1.0.6\"\n}", "func (vdb *VspDatabase) Version() (uint32, error) {\n\tvar version uint32\n\terr := vdb.db.View(func(tx *bolt.Tx) error {\n\t\tbytes := tx.Bucket(vspBktK).Get(versionK)\n\t\tversion = bytesToUint32(bytes)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn version, nil\n}", "func (l *Libvirt) ConnectGetVersion() (rHvVer uint64, err error) {\n\tvar buf []byte\n\n\tvar r response\n\tr, err = l.requestStream(4, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// HvVer: uint64\n\t_, err = dec.Decode(&rHvVer)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (c *Client) GetVersion(ctx context.Context) (rpc.GetVersionResult, error) {\n\tres, err := c.RpcClient.GetVersion(ctx)\n\terr = checkRpcResult(res.GeneralResponse, err)\n\tif err != nil {\n\t\treturn rpc.GetVersionResult{}, err\n\t}\n\treturn res.Result, nil\n}", "func (db *DB) Version() (string, error) {\n\tvar v string\n\tif _, err := db.QueryOne(pg.Scan(&v), \"select version()\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn v, nil\n}" ]
[ "0.5781868", "0.5636606", "0.5636606", "0.5576799", "0.55693847", "0.5562794", "0.55050653", "0.549268", "0.54771435", "0.5471268", "0.5450064", "0.5450064", "0.5440955", "0.5429904", "0.541623", "0.5398498", "0.5388633", "0.53843856", "0.53595656", "0.5346013", "0.53183573", "0.5314673", "0.52918404", "0.52900976", "0.5273753", "0.52654076", "0.52536184", "0.524013", "0.5227394", "0.52224934", "0.5217413", "0.5206825", "0.51875746", "0.517901", "0.51703006", "0.51703006", "0.5136207", "0.5130812", "0.5127151", "0.51125", "0.5106135", "0.50981164", "0.50977176", "0.5088438", "0.50795656", "0.5071805", "0.50662947", "0.5063983", "0.50634223", "0.5062896", "0.5058294", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.5053434", "0.5049862", "0.50472075", "0.5035477", "0.5031258", "0.502338", "0.5020306", "0.50062615", "0.5000556", "0.49989983", "0.49975413", "0.49975413", "0.49969625", "0.49957564", "0.49889177", "0.49723837", "0.49667087", "0.49591374", "0.4958394", "0.49517083", "0.49514994", "0.49500775", "0.49481383", "0.4939891", "0.49380833", "0.49339134", "0.49336314", "0.4931089", "0.4927297", "0.49255714", "0.49226543", "0.49190333", "0.4915234", "0.4915011", "0.49137366", "0.49108097", "0.49107507", "0.49097213", "0.4904745", "0.49013713", "0.49012962" ]
0.80999446
0
DetermineFeatureFlags fetches the version of the configured Sourcegraph instance and then sets flags on the Service itself to use features available in that version, e.g. gzip compression.
func (svc *Service) DetermineFeatureFlags(ctx context.Context) error { version, err := svc.getSourcegraphVersion(ctx) if err != nil { return errors.Wrap(err, "failed to query Sourcegraph version to check for available features") } return svc.features.setFromVersion(version) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func InitFeatureFlags(flag *pflag.FlagSet) {\n\tflag.Bool(FeatureFlagAccessCode, false, \"Flag (bool) to enable requires-access-code\")\n\tflag.Bool(FeatureFlagRoleBasedAuth, false, \"Flag (bool) to enable role-based-auth\")\n\tflag.Bool(FeatureFlagConvertPPMsToGHC, false, \"Flag (bool) to enable convert-ppms-to-ghc\")\n}", "func (o *Options) AddFlags(fs *pflag.FlagSet) {\n\tfs.BoolVar(&o.InstallCRDs, \"install-crds\", true, \"install the CRDs used by the controller as part of startup\")\n\tfs.Var(flagutil.NewMapStringBool(&o.FeatureGates), \"feature-gates\", \"A set of key=value pairs that describe feature gates for alpha/experimental features. \"+\n\t\t\"Options are:\\n\"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), \"\\n\"))\n\n\tfs.StringVar(&o.Config.FederationNamespace, \"federation-namespace\", util.DefaultFederationSystemNamespace, \"The namespace the federation control plane is deployed in.\")\n\tfs.StringVar(&o.Config.ClusterNamespace, \"registry-namespace\", util.MulticlusterPublicNamespace, \"The cluster registry namespace.\")\n\tfs.DurationVar(&o.Config.ClusterAvailableDelay, \"cluster-available-delay\", util.DefaultClusterAvailableDelay, \"Time to wait before reconciling on a healthy cluster.\")\n\tfs.DurationVar(&o.Config.ClusterUnavailableDelay, \"cluster-unavailable-delay\", util.DefaultClusterUnavailableDelay, \"Time to wait before giving up on an unhealthy cluster.\")\n\n\tfs.BoolVar(&o.LimitedScope, \"limited-scope\", false, \"Whether the federation namespace will be the only target for federation.\")\n\tfs.DurationVar(&o.ClusterMonitorPeriod, \"cluster-monitor-period\", time.Second*40, \"How often to monitor the cluster health\")\n\n\tfs.DurationVar(&o.LeaderElection.LeaseDuration, \"leader-elect-lease-duration\", util.DefaultLeaderElectionLeaseDuration, \"\"+\n\t\t\"The duration that non-leader candidates will wait after observing a leadership \"+\n\t\t\"renewal until attempting to acquire leadership of a led but unrenewed leader \"+\n\t\t\"slot. This is effectively the maximum duration that a leader can be stopped \"+\n\t\t\"before it is replaced by another candidate. This is only applicable if leader \"+\n\t\t\"election is enabled.\")\n\tfs.DurationVar(&o.LeaderElection.RenewDeadline, \"leader-elect-renew-deadline\", util.DefaultLeaderElectionRenewDeadline, \"\"+\n\t\t\"The interval between attempts by the acting master to renew a leadership slot \"+\n\t\t\"before it stops leading. This must be less than or equal to the lease duration. \"+\n\t\t\"This is only applicable if leader election is enabled.\")\n\tfs.DurationVar(&o.LeaderElection.RetryPeriod, \"leader-elect-retry-period\", util.DefaultLeaderElectionRetryPeriod, \"\"+\n\t\t\"The duration the clients should wait between attempting acquisition and renewal \"+\n\t\t\"of a leadership. This is only applicable if leader election is enabled.\")\n\tfs.StringVar(&o.LeaderElection.ResourceLock, \"leader-elect-resource-lock\", \"configmaps\", \"\"+\n\t\t\"The type of resource object that is used for locking during \"+\n\t\t\"leader election. Supported options are `configmaps` (default) and `endpoints`.\")\n}", "func (a *AdminApiService) GetAllFeatureFlags(ctx _context.Context) (FeatureFlag, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue FeatureFlag\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func InitFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&metricsBindAddr, \"metrics-bind-addr\", \":8080\",\n\t\t\"The address the metric endpoint binds to.\")\n\n\tfs.BoolVar(&enableLeaderElection, \"leader-elect\", false,\n\t\t\"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.\")\n\n\tfs.StringVar(&watchNamespace, \"namespace\", \"\",\n\t\t\"Namespace that the controller watches to reconcile objects. If unspecified, the controller watches for objects across all namespaces.\")\n\n\tfs.StringVar(&profilerAddress, \"profiler-address\", \"\",\n\t\t\"Bind address to expose the pprof profiler (e.g. localhost:6060)\")\n\n\tfs.IntVar(&eksControlPlaneConcurrency, \"ekscontrolplane-concurrency\", 10,\n\t\t\"Number of EKS control planes to process simultaneously\")\n\n\tfs.DurationVar(&syncPeriod, \"sync-period\", 10*time.Minute,\n\t\t\"The minimum interval at which watched resources are reconciled (e.g. 15m)\")\n\n\tfs.IntVar(&webhookPort, \"webhook-port\", 9443,\n\t\t\"Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.\")\n\n\tfs.StringVar(&webhookCertDir, \"webhook-cert-dir\", \"/tmp/k8s-webhook-server/serving-certs/\",\n\t\t\"Webhook cert dir, only used when webhook-port is specified.\")\n\n\tfs.StringVar(&serviceEndpoints, \"service-endpoints\", \"\",\n\t\t\"Set custom AWS service endpoins in semi-colon separated format: ${SigningRegion1}:${ServiceID1}=${URL},${ServiceID2}=${URL};${SigningRegion2}...\")\n\n\tfs.StringVar(\n\t\t&watchFilterValue,\n\t\t\"watch-filter\",\n\t\t\"\",\n\t\tfmt.Sprintf(\"Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.\", clusterv1.WatchLabel),\n\t)\n\n\tfeature.MutableGates.AddFlag(fs)\n}", "func InitializeFeatures(featuresClient managementv3.FeatureClient, featureArgs string) {\n\t// applies any default values assigned in --features flag to feature map\n\tif err := applyArgumentDefaults(featureArgs); err != nil {\n\t\tlogrus.Errorf(\"failed to apply feature args: %v\", err)\n\t}\n\n\tif featuresClient == nil {\n\t\treturn\n\t}\n\n\t// creates any features in map that do not exist, updates features with new default value\n\tfor key, f := range features {\n\t\tfeatureState, err := featuresClient.Get(key, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\tlogrus.Errorf(\"unable to retrieve feature %s in initialize features: %v\", f.name, err)\n\t\t\t}\n\n\t\t\tif f.install {\n\t\t\t\t// value starts off as nil, that way rancher can determine if value has been manually assigned\n\t\t\t\tnewFeature := &v3.Feature{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: f.name,\n\t\t\t\t\t},\n\t\t\t\t\tSpec: v3.FeatureSpec{\n\t\t\t\t\t\tValue: nil,\n\t\t\t\t\t},\n\t\t\t\t\tStatus: v3.FeatureStatus{\n\t\t\t\t\t\tDefault: f.def,\n\t\t\t\t\t\tDynamic: f.dynamic,\n\t\t\t\t\t\tDescription: f.description,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tif _, err := featuresClient.Create(newFeature); err != nil {\n\t\t\t\t\tlogrus.Errorf(\"unable to create feature %s in initialize features: %v\", f.name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnewFeatureState := featureState.DeepCopy()\n\t\t\t// checks if default value has changed\n\t\t\tif featureState.Status.Default != f.def {\n\t\t\t\tnewFeatureState.Status.Default = f.def\n\t\t\t}\n\n\t\t\t// checks if developer has changed dynamic value from previous rancher version\n\t\t\tif featureState.Status.Dynamic != f.dynamic {\n\t\t\t\tnewFeatureState.Status.Dynamic = f.dynamic\n\t\t\t}\n\n\t\t\t// checks if developer has changed description value from previous rancher version\n\t\t\tif featureState.Status.Description != f.description {\n\t\t\t\tnewFeatureState.Status.Description = f.description\n\t\t\t}\n\n\t\t\tnewFeatureState, err = featuresClient.Update(newFeatureState)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"unable to update feature %s in initialize features: %v\", f.name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif newFeatureState.Status.LockedValue != nil {\n\t\t\t\tf.Set(*newFeatureState.Status.LockedValue)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif featureState.Spec.Value == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif *featureState.Spec.Value == f.val {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tf.Set(*featureState.Spec.Value)\n\t\t}\n\t}\n}", "func BuildServerFlags(cmd *cobra.Command, srv *server.Command) {\n\tflags := cmd.Flags()\n\tflags.StringVar(&srv.Config.Name, \"name\", srv.Config.Name, \"Name of the node in the cluster.\")\n\tflags.StringVarP(&srv.Config.DataDir, \"data-dir\", \"d\", srv.Config.DataDir, \"Directory to store FeatureBase data files.\")\n\tflags.StringVarP(&srv.Config.Bind, \"bind\", \"b\", srv.Config.Bind, \"Default URI on which FeatureBase should listen.\")\n\tflags.StringVar(&srv.Config.BindGRPC, \"bind-grpc\", srv.Config.BindGRPC, \"URI on which FeatureBase should listen for gRPC requests.\")\n\tflags.StringVar(&srv.Config.Advertise, \"advertise\", srv.Config.Advertise, \"Address to advertise externally.\")\n\tflags.StringVar(&srv.Config.AdvertiseGRPC, \"advertise-grpc\", srv.Config.AdvertiseGRPC, \"Address to advertise externally for gRPC.\")\n\tflags.IntVar(&srv.Config.MaxWritesPerRequest, \"max-writes-per-request\", srv.Config.MaxWritesPerRequest, \"Number of write commands per request.\")\n\tflags.StringVar(&srv.Config.LogPath, \"log-path\", srv.Config.LogPath, \"Log path\")\n\tflags.BoolVar(&srv.Config.Verbose, \"verbose\", srv.Config.Verbose, \"Enable verbose logging\")\n\tflags.Uint64Var(&srv.Config.MaxMapCount, \"max-map-count\", srv.Config.MaxMapCount, \"Limits the maximum number of active mmaps. FeatureBase will fall back to reading files once this is exhausted. Set below your system's vm.max_map_count.\")\n\tflags.Uint64Var(&srv.Config.MaxFileCount, \"max-file-count\", srv.Config.MaxFileCount, \"Soft limit on the maximum number of fragment files FeatureBase keeps open simultaneously.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.LongQueryTime), \"long-query-time\", time.Duration(srv.Config.LongQueryTime), \"Duration that will trigger log and stat messages for slow queries. Zero to disable.\")\n\tflags.IntVar(&srv.Config.QueryHistoryLength, \"query-history-length\", srv.Config.QueryHistoryLength, \"Number of queries to remember in history.\")\n\tflags.Int64Var(&srv.Config.MaxQueryMemory, \"max-query-memory\", srv.Config.MaxQueryMemory, \"Maximum memory allowed per Extract() or SELECT query.\")\n\n\t// TLS\n\tSetTLSConfig(flags, \"\", &srv.Config.TLS.CertificatePath, &srv.Config.TLS.CertificateKeyPath, &srv.Config.TLS.CACertPath, &srv.Config.TLS.SkipVerify, &srv.Config.TLS.EnableClientVerification)\n\n\t// Handler\n\tflags.StringSliceVar(&srv.Config.Handler.AllowedOrigins, \"handler.allowed-origins\", []string{}, \"Comma separated list of allowed origin URIs (for CORS/Web UI).\")\n\n\t// Cluster\n\tflags.IntVar(&srv.Config.Cluster.ReplicaN, \"cluster.replicas\", 1, \"Number of hosts each piece of data should be stored on.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.Cluster.LongQueryTime), \"cluster.long-query-time\", time.Duration(srv.Config.Cluster.LongQueryTime), \"RENAMED TO 'long-query-time': Duration that will trigger log and stat messages for slow queries.\") // negative duration indicates invalid value because 0 is meaningful\n\tflags.StringVar(&srv.Config.Cluster.Name, \"cluster.name\", srv.Config.Cluster.Name, \"Human-readable name for the cluster.\")\n\tflags.StringVar(&srv.Config.Cluster.PartitionToNodeAssignment, \"cluster.partition-to-node-assignment\", srv.Config.Cluster.PartitionToNodeAssignment, \"How to assign partitions to nodes. jmp-hash or modulus\")\n\n\t// Translation\n\tflags.StringVar(&srv.Config.Translation.PrimaryURL, \"translation.primary-url\", srv.Config.Translation.PrimaryURL, \"DEPRECATED: URL for primary translation node for replication.\")\n\tflags.IntVar(&srv.Config.Translation.MapSize, \"translation.map-size\", srv.Config.Translation.MapSize, \"Size in bytes of mmap to allocate for key translation.\")\n\n\t// Etcd\n\t// Etcd.Name used Config.Name for its value.\n\tflags.StringVar(&srv.Config.Etcd.Dir, \"etcd.dir\", srv.Config.Etcd.Dir, \"Directory to store etcd data files. If not provided, a directory will be created under the main data-dir directory.\")\n\t// Etcd.ClusterName uses Cluster.Name for its value\n\tflags.StringVar(&srv.Config.Etcd.LClientURL, \"etcd.listen-client-address\", srv.Config.Etcd.LClientURL, \"Listen client address.\")\n\tflags.StringVar(&srv.Config.Etcd.AClientURL, \"etcd.advertise-client-address\", srv.Config.Etcd.AClientURL, \"Advertise client address. If not provided, uses the listen client address.\")\n\tflags.StringVar(&srv.Config.Etcd.LPeerURL, \"etcd.listen-peer-address\", srv.Config.Etcd.LPeerURL, \"Listen peer address.\")\n\tflags.StringVar(&srv.Config.Etcd.APeerURL, \"etcd.advertise-peer-address\", srv.Config.Etcd.APeerURL, \"Advertise peer address. If not provided, uses the listen peer address.\")\n\tflags.StringVar(&srv.Config.Etcd.ClusterURL, \"etcd.cluster-url\", srv.Config.Etcd.ClusterURL, \"Cluster URL to join.\")\n\tflags.StringVar(&srv.Config.Etcd.InitCluster, \"etcd.initial-cluster\", srv.Config.Etcd.InitCluster, \"Initial cluster name1=apurl1,name2=apurl2\")\n\tflags.Int64Var(&srv.Config.Etcd.HeartbeatTTL, \"etcd.heartbeat-ttl\", srv.Config.Etcd.HeartbeatTTL, \"Timeout used to determine cluster status\")\n\n\tflags.StringVar(&srv.Config.Etcd.Cluster, \"etcd.static-cluster\", srv.Config.Etcd.Cluster, \"EXPERIMENTAL static featurebase cluster name1=apurl1,name2=apurl2\")\n\t_ = flags.MarkHidden(\"etcd.static-cluster\")\n\tflags.StringVar(&srv.Config.Etcd.EtcdHosts, \"etcd.etcd-hosts\", srv.Config.Etcd.EtcdHosts, \"EXPERIMENTAL etcd server host:port comma separated list\")\n\t_ = flags.MarkHidden(\"etcd.etcd-hosts\") // TODO (twg) expose when ready for public consumption\n\n\t// External postgres database for ExternalLookup\n\tflags.StringVar(&srv.Config.LookupDBDSN, \"lookup-db-dsn\", \"\", \"external (postgres) database DSN to use for ExternalLookup calls\")\n\n\t// AntiEntropy\n\tflags.DurationVar((*time.Duration)(&srv.Config.AntiEntropy.Interval), \"anti-entropy.interval\", (time.Duration)(srv.Config.AntiEntropy.Interval), \"Interval at which to run anti-entropy routine.\")\n\n\t// Metric\n\tflags.StringVar(&srv.Config.Metric.Service, \"metric.service\", srv.Config.Metric.Service, \"Where to send stats: can be expvar (in-memory served at /debug/vars), prometheus, statsd or none.\")\n\tflags.StringVar(&srv.Config.Metric.Host, \"metric.host\", srv.Config.Metric.Host, \"URI to send metrics when metric.service is statsd.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.Metric.PollInterval), \"metric.poll-interval\", (time.Duration)(srv.Config.Metric.PollInterval), \"Polling interval metrics.\")\n\tflags.BoolVar((&srv.Config.Metric.Diagnostics), \"metric.diagnostics\", srv.Config.Metric.Diagnostics, \"Enabled diagnostics reporting.\")\n\n\t// Tracing\n\tflags.StringVar(&srv.Config.Tracing.AgentHostPort, \"tracing.agent-host-port\", srv.Config.Tracing.AgentHostPort, \"Jaeger agent host:port.\")\n\tflags.StringVar(&srv.Config.Tracing.SamplerType, \"tracing.sampler-type\", srv.Config.Tracing.SamplerType, \"Jaeger sampler type (remote, const, probabilistic, ratelimiting) or 'off' to disable tracing completely.\")\n\tflags.Float64Var(&srv.Config.Tracing.SamplerParam, \"tracing.sampler-param\", srv.Config.Tracing.SamplerParam, \"Jaeger sampler parameter.\")\n\n\t// Profiling\n\tflags.IntVar(&srv.Config.Profile.BlockRate, \"profile.block-rate\", srv.Config.Profile.BlockRate, \"Sampling rate for goroutine blocking profiler. One sample per <rate> ns.\")\n\tflags.IntVar(&srv.Config.Profile.MutexFraction, \"profile.mutex-fraction\", srv.Config.Profile.MutexFraction, \"Sampling fraction for mutex contention profiling. Sample 1/<rate> of events.\")\n\n\tflags.StringVar(&srv.Config.Storage.Backend, \"storage.backend\", storage.DefaultBackend, \"Storage backend to use: 'rbf' is only supported value.\")\n\tflags.BoolVar(&srv.Config.Storage.FsyncEnabled, \"storage.fsync\", true, \"enable fsync fully safe flush-to-disk\")\n\n\t// RBF specific flags. See pilosa/rbf/cfg/cfg.go for definitions.\n\tsrv.Config.RBFConfig.DefineFlags(flags)\n\n\tflags.BoolVar(&srv.Config.SQL.EndpointEnabled, \"sql.endpoint-enabled\", srv.Config.SQL.EndpointEnabled, \"Enable FeatureBase SQL /sql endpoint (default false)\")\n\n\t// Future flags.\n\tflags.BoolVar(&srv.Config.Future.Rename, \"future.rename\", false, \"Present application name as FeatureBase. Defaults to false, will default to true in an upcoming release.\")\n\n\t// OAuth2.0 identity provider configuration\n\tflags.BoolVar(&srv.Config.Auth.Enable, \"auth.enable\", false, \"Enable AuthN/AuthZ of featurebase, disabled by default.\")\n\tflags.StringVar(&srv.Config.Auth.ClientId, \"auth.client-id\", srv.Config.Auth.ClientId, \"Identity Provider's Application/Client ID.\")\n\tflags.StringVar(&srv.Config.Auth.ClientSecret, \"auth.client-secret\", srv.Config.Auth.ClientSecret, \"Identity Provider's Client Secret.\")\n\tflags.StringVar(&srv.Config.Auth.AuthorizeURL, \"auth.authorize-url\", srv.Config.Auth.AuthorizeURL, \"Identity Provider's Authorize URL.\")\n\tflags.StringVar(&srv.Config.Auth.RedirectBaseURL, \"auth.redirect-base-url\", srv.Config.Auth.RedirectBaseURL, \"Base URL of the featurebase instance used to redirect IDP.\")\n\tflags.StringVar(&srv.Config.Auth.TokenURL, \"auth.token-url\", srv.Config.Auth.TokenURL, \"Identity Provider's Token URL.\")\n\tflags.StringVar(&srv.Config.Auth.GroupEndpointURL, \"auth.group-endpoint-url\", srv.Config.Auth.GroupEndpointURL, \"Identity Provider's Group endpoint URL.\")\n\tflags.StringVar(&srv.Config.Auth.LogoutURL, \"auth.logout-url\", srv.Config.Auth.LogoutURL, \"Identity Provider's Logout URL.\")\n\tflags.StringSliceVar(&srv.Config.Auth.Scopes, \"auth.scopes\", srv.Config.Auth.Scopes, \"Comma separated list of scopes obtained from IdP\")\n\tflags.StringVar(&srv.Config.Auth.SecretKey, \"auth.secret-key\", srv.Config.Auth.SecretKey, \"Secret key used for auth.\")\n\tflags.StringVar(&srv.Config.Auth.PermissionsFile, \"auth.permissions\", srv.Config.Auth.PermissionsFile, \"Permissions' file with group authorization.\")\n\tflags.StringVar(&srv.Config.Auth.QueryLogPath, \"auth.query-log-path\", srv.Config.Auth.QueryLogPath, \"Path to log user queries\")\n\tflags.StringSliceVar(&srv.Config.Auth.ConfiguredIPs, \"auth.configured-ips\", srv.Config.Auth.ConfiguredIPs, \"List of configured IPs allowed for ingest\")\n\n\tflags.BoolVar(&srv.Config.DataDog.Enable, \"datadog.enable\", false, \"enable continuous profiling with DataDog cloud service, Note you must have DataDog agent installed\")\n\tflags.StringVar(&srv.Config.DataDog.Service, \"datadog.service\", \"default-service\", \"The Datadog service name, for example my-web-app\")\n\tflags.StringVar(&srv.Config.DataDog.Env, \"datadog.env\", \"default-env\", \"The Datadog environment name, for example, production\")\n\tflags.StringVar(&srv.Config.DataDog.Version, \"datadog.version\", \"default-version\", \"The version of your application\")\n\tflags.StringVar(&srv.Config.DataDog.Tags, \"datadog.tags\", \"molecula\", \"The tags to apply to an uploaded profile. Must be a list of in the format <KEY1>:<VALUE1>,<KEY2>:<VALUE2>\")\n\tflags.BoolVar(&srv.Config.DataDog.CPUProfile, \"datadog.cpu-profile\", true, \"golang pprof cpu profile \")\n\tflags.BoolVar(&srv.Config.DataDog.HeapProfile, \"datadog.heap-profile\", true, \"golang pprof heap profile\")\n\tflags.BoolVar(&srv.Config.DataDog.MutexProfile, \"datadog.mutex-profile\", false, \"golang pprof mutex profile\")\n\tflags.BoolVar(&srv.Config.DataDog.GoroutineProfile, \"datadog.goroutine-profile\", false, \"golang pprof goroutine profile\")\n\tflags.BoolVar(&srv.Config.DataDog.BlockProfile, \"datadog.block-profile\", false, \"golang pprof goroutine \")\n}", "func (b *AdapterBase) InstallFlags() {\n\tb.initFlagSet()\n\tb.flagOnce.Do(func() {\n\t\tif b.CustomMetricsAdapterServerOptions == nil {\n\t\t\tb.CustomMetricsAdapterServerOptions = server.NewCustomMetricsAdapterServerOptions()\n\t\t\tb.CustomMetricsAdapterServerOptions.OpenAPIConfig = b.OpenAPIConfig\n\t\t}\n\n\t\tb.SecureServing.AddFlags(b.FlagSet)\n\t\tb.Authentication.AddFlags(b.FlagSet)\n\t\tb.Authorization.AddFlags(b.FlagSet)\n\t\tb.Audit.AddFlags(b.FlagSet)\n\t\tb.Features.AddFlags(b.FlagSet)\n\n\t\tb.FlagSet.StringVar(&b.RemoteKubeConfigFile, \"lister-kubeconfig\", b.RemoteKubeConfigFile,\n\t\t\t\"kubeconfig file pointing at the 'core' kubernetes server with enough rights to list \"+\n\t\t\t\t\"any described objects\")\n\t\tb.FlagSet.DurationVar(&b.DiscoveryInterval, \"discovery-interval\", b.DiscoveryInterval,\n\t\t\t\"interval at which to refresh API discovery information\")\n\t})\n}", "func (s *VMTServer) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&s.ClusterKeyInjected, \"cluster-key-injected\", \"\", \"Injected cluster key to enable pod move across cluster\")\n\tfs.IntVar(&s.Port, \"port\", s.Port, \"The port that kubeturbo's http service runs on.\")\n\tfs.StringVar(&s.Address, \"ip\", s.Address, \"the ip address that kubeturbo's http service runs on.\")\n\t// TODO: The flagset that is included by vendoring k8s uses the same names i.e. \"master\" and \"kubeconfig\".\n\t// This for some reason conflicts with the names introduced by kubeturbo after upgrading the k8s vendored code\n\t// to version 1.19.1. Right now we have changed the names of kubeturbo flags as a quick fix. These flags are\n\t// not user facing and are useful only when running kubeturbo outside the cluster. Find a better solution\n\t// when need be.\n\tfs.StringVar(&s.Master, \"k8s-master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig).\")\n\tfs.StringVar(&s.K8sTAPSpec, \"turboconfig\", s.K8sTAPSpec, \"Path to the config file.\")\n\tfs.StringVar(&s.TestingFlagPath, \"testingflag\", s.TestingFlagPath, \"Path to the testing flag.\")\n\tfs.StringVar(&s.KubeConfig, \"k8s-kubeconfig\", s.KubeConfig, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.BoolVar(&s.EnableProfiling, \"profiling\", false, \"Enable profiling via web interface host:port/debug/pprof/.\")\n\tfs.BoolVar(&s.UseUUID, \"stitch-uuid\", true, \"Use VirtualMachine's UUID to do stitching, otherwise IP is used.\")\n\tfs.IntVar(&s.KubeletPort, \"kubelet-port\", DefaultKubeletPort, \"The port of the kubelet runs on.\")\n\tfs.BoolVar(&s.EnableKubeletHttps, \"kubelet-https\", DefaultKubeletHttps, \"Indicate if Kubelet is running on https server.\")\n\tfs.BoolVar(&s.UseNodeProxyEndpoint, \"use-node-proxy-endpoint\", false, \"Indicate if Kubelet queries should be routed through APIServer node proxy endpoint.\")\n\tfs.BoolVar(&s.ForceSelfSignedCerts, \"kubelet-force-selfsigned-cert\", true, \"Indicate if we must use self-signed cert.\")\n\tfs.BoolVar(&s.FailVolumePodMoves, \"fail-volume-pod-moves\", true, \"Indicate if kubeturbo should fail to move pods which have volumes attached. Default is set to true.\")\n\tfs.BoolVar(&s.UpdateQuotaToAllowMoves, \"update-quota-to-allow-moves\", true, \"Indicate if kubeturbo should try to update namespace quotas to allow pod moves when quota(s) is/are full. Default is set to true.\")\n\tfs.StringVar(&k8sVersion, \"k8sVersion\", k8sVersion, \"[deprecated] the kubernetes server version; for openshift, it is the underlying Kubernetes' version.\")\n\tfs.StringVar(&noneSchedulerName, \"noneSchedulerName\", noneSchedulerName, \"[deprecated] a none-exist scheduler name, to prevent controller to create Running pods during move Action.\")\n\tfs.IntVar(&s.DiscoveryIntervalSec, \"discovery-interval-sec\", defaultDiscoveryIntervalSec, \"The discovery interval in seconds.\")\n\tfs.IntVar(&s.ValidationWorkers, \"validation-workers\", DefaultValidationWorkers, \"The validation workers\")\n\tfs.IntVar(&s.ValidationTimeout, \"validation-timeout-sec\", DefaultValidationTimeout, \"The validation timeout in seconds.\")\n\tfs.IntVar(&s.DiscoveryWorkers, \"discovery-workers\", DefaultDiscoveryWorkers, \"The number of discovery workers.\")\n\tfs.IntVar(&s.DiscoveryTimeoutSec, \"discovery-timeout-sec\", DefaultDiscoveryTimeoutSec, \"The discovery timeout in seconds for each discovery worker.\")\n\tfs.IntVar(&s.DiscoverySamples, \"discovery-samples\", DefaultDiscoverySamples, \"The number of resource usage data samples to be collected from kubelet in each full discovery cycle. This should be no larger than 60.\")\n\tfs.IntVar(&s.DiscoverySampleIntervalSec, \"discovery-sample-interval\", DefaultDiscoverySampleIntervalSec, \"The discovery interval in seconds to collect additional resource usage data samples from kubelet. This should be no smaller than 10 seconds.\")\n\tfs.IntVar(&s.GCIntervalMin, \"garbage-collection-interval\", DefaultGCIntervalMin, \"The garbage collection interval in minutes for possible leaked pods from actions failed because of kubeturbo restarts. Default value is 20 mins.\")\n\tfs.IntVar(&s.ItemsPerListQuery, \"items-per-list-query\", 0, \"Number of workload controller items the list api call should request for.\")\n\tfs.StringSliceVar(&s.sccSupport, \"scc-support\", defaultSccSupport, \"The SCC list allowed for executing pod actions, e.g., --scc-support=restricted,anyuid or --scc-support=* to allow all. Default allowed scc is [*].\")\n\t// So far we have noticed cluster api support only in openshift clusters and our implementation works only for openshift\n\t// It thus makes sense to have openshifts machine api namespace as our default cluster api namespace\n\tfs.StringVar(&s.ClusterAPINamespace, \"cluster-api-namespace\", \"openshift-machine-api\", \"The Cluster API namespace.\")\n\tfs.StringVar(&s.BusyboxImage, \"busybox-image\", \"busybox\", \"The complete image uri used for fallback node cpu frequency getter job.\")\n\tfs.StringVar(&s.BusyboxImagePullSecret, \"busybox-image-pull-secret\", \"\", \"The name of the secret that stores the image pull credentials for busybox image.\")\n\tfs.StringVar(&s.CpufreqJobExcludeNodeLabels, \"cpufreq-job-exclude-node-labels\", \"\", \"The comma separated list of key=value node label pairs for the nodes (for example windows nodes) to be excluded from running job based cpufrequency getter.\")\n\tfs.StringVar(&s.containerUtilizationDataAggStrategy, \"cnt-utilization-data-agg-strategy\", agg.DefaultContainerUtilizationDataAggStrategy, \"Container utilization data aggregation strategy.\")\n\tfs.StringVar(&s.containerUsageDataAggStrategy, \"cnt-usage-data-agg-strategy\", agg.DefaultContainerUsageDataAggStrategy, \"Container usage data aggregation strategy.\")\n\tfs.IntVar(&s.readinessRetryThreshold, \"readiness-retry-threshold\", DefaultReadinessRetryThreshold, \"When the pod readiness check fails, Kubeturbo will try readinessRetryThreshold times before giving up. Defaults to 60.\")\n\t// Flags for gitops based action execution\n\tfs.StringVar(&s.gitConfig.GitSecretNamespace, \"git-secret-namespace\", \"\", \"The namespace of the secret which holds the git credentials.\")\n\tfs.StringVar(&s.gitConfig.GitSecretName, \"git-secret-name\", \"\", \"The name of the secret which holds the git credentials.\")\n\tfs.StringVar(&s.gitConfig.GitUsername, \"git-username\", \"\", \"The user name to be used to push changes to git.\")\n\tfs.StringVar(&s.gitConfig.GitEmail, \"git-email\", \"\", \"The email to be used to push changes to git.\")\n\tfs.StringVar(&s.gitConfig.CommitMode, \"git-commit-mode\", \"direct\", \"The commit mode that should be used for git action executions. One of request|direct. Defaults to direct.\")\n\t// CpuFreqGetter image and secret\n\tfs.StringVar(&s.CpuFrequencyGetterImage, \"cpufreqgetter-image\", \"icr.io/cpopen/turbonomic/cpufreqgetter\", \"The complete cpufreqgetter image uri used for fallback node cpu frequency getter job.\")\n\tfs.StringVar(&s.CpuFrequencyGetterPullSecret, \"cpufreqgetter-image-pull-secret\", \"\", \"The name of the secret that stores the image pull credentials for cpufreqgetter image.\")\n\tfs.BoolVar(&s.CleanupSccRelatedResources, \"cleanup-scc-impersonation-resources\", true, \"Enable cleanup the resources for scc impersonation.\")\n}", "func (o *FailoverAgentOptions) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&o.ClusterName, \"cluster-name\", o.ClusterName,\n\t\t\"If non-empty, will use as cluster name instead of generated random name.\")\n\tfs.StringVar(&o.BootstrapKubeconfig, \"bootstrap-kubeconfig\", o.BootstrapKubeconfig,\n\t\t\"The path of the kubeconfig file for agent bootstrap.\")\n\tfs.StringVar(&o.HubKubeconfigSecret, \"hub-kubeconfig-secret\", o.HubKubeconfigSecret,\n\t\t\"The name of secret in component namespace storing kubeconfig for hub.\")\n\tfs.StringVar(&o.HubKubeconfigDir, \"hub-kubeconfig-dir\", o.HubKubeconfigDir,\n\t\t\"The mount path of hub-kubeconfig-secret in the container.\")\n\tfs.StringArrayVar(&o.SpokeExternalServerURLs, \"spoke-external-server-urls\", o.SpokeExternalServerURLs,\n\t\t\"A list of reachable spoke cluster api server URLs for hub cluster.\")\n\tfs.DurationVar(&o.ClusterHealthCheckPeriod, \"cluster-healthcheck-period\", o.ClusterHealthCheckPeriod,\n\t\t\"The period to check managed cluster kube-apiserver health\")\n\tfs.IntVar(&o.MaxCustomClusterClaims, \"max-custom-cluster-claims\", o.MaxCustomClusterClaims,\n\t\t\"The max number of custom cluster claims to expose.\")\n}", "func (c *client) GetFeatureFlags() (map[string]string, error) {\n\tjsonFile, err := os.Open(c.featureFlagConfigPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to open feature flag file\")\n\t}\n\tdefer jsonFile.Close()\n\tbyteValue, err := io.ReadAll(jsonFile)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to read feature flag file\")\n\t}\n\tresult := make(map[string]string)\n\terr = json.Unmarshal(byteValue, &result)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal feature flag json data\")\n\t}\n\treturn result, nil\n}", "func (s *VMTServer) AddFlags(fs *pflag.FlagSet) {\n\tfs.IntVar(&s.Port, \"port\", s.Port, \"The port that kubeturbo's http service runs on\")\n\tfs.StringVar(&s.Address, \"ip\", s.Address, \"the ip address that kubeturbo's http service runs on\")\n\tfs.IntVar(&s.CAdvisorPort, \"cadvisor-port\", K8sCadvisorPort, \"The port of the cadvisor service runs on\")\n\tfs.StringVar(&s.Master, \"master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig)\")\n\tfs.StringVar(&s.K8sTAPSpec, \"turboconfig\", s.K8sTAPSpec, \"Path to the config file.\")\n\tfs.StringVar(&s.TestingFlagPath, \"testingflag\", s.TestingFlagPath, \"Path to the testing flag.\")\n\tfs.StringVar(&s.KubeConfig, \"kubeconfig\", s.KubeConfig, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.BoolVar(&s.EnableProfiling, \"profiling\", false, \"Enable profiling via web interface host:port/debug/pprof/.\")\n\tfs.BoolVar(&s.UseVMWare, \"usevmware\", false, \"If the underlying infrastructure is VMWare.\")\n\tfs.IntVar(&s.KubeletPort, \"kubelet-port\", kubelet.DefaultKubeletPort, \"The port of the kubelet runs on\")\n\tfs.BoolVar(&s.EnableKubeletHttps, \"kubelet-https\", kubelet.DefaultKubeletHttps, \"Indicate if Kubelet is running on https server\")\n\tfs.StringVar(&s.K8sVersion, \"k8sVersion\", executor.HigherK8sVersion, \"the kubernetes server version; for openshift, it is the underlying Kubernetes' version.\")\n\tfs.StringVar(&s.NoneSchedulerName, \"noneSchedulerName\", executor.DefaultNoneExistSchedulerName, \"a none-exist scheduler name, to prevent controller to create Running pods during move Action.\")\n\n\t//leaderelection.BindFlags(&s.LeaderElection, fs)\n}", "func (a *AssembliesApiService) GetFeatures(ctx _context.Context, did string, wvm string, wvmid string, eid string, localVarOptionals *GetFeaturesOpts) (BtAssemblyFeatureListResponse1174, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue BtAssemblyFeatureListResponse1174\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/assemblies/d/{did}/{wvm}/{wvmid}/e/{eid}/features\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"did\"+\"}\", _neturl.QueryEscape(parameterToString(did, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvm\"+\"}\", _neturl.QueryEscape(parameterToString(wvm, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvmid\"+\"}\", _neturl.QueryEscape(parameterToString(wvmid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"eid\"+\"}\", _neturl.QueryEscape(parameterToString(eid, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.FeatureId.IsSet() {\n\t\tt:=localVarOptionals.FeatureId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"featureId\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"featureId\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LinkDocumentId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"linkDocumentId\", parameterToString(localVarOptionals.LinkDocumentId.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/vnd.onshape.v2+json;charset=UTF-8;qs=0.2\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v BtAssemblyFeatureListResponse1174\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func ParseFeatures(queryString string) error {\n\tfeatureMutex.Lock()\n\tdefer featureMutex.Unlock()\n\n\tfeatures := map[Feature]bool{}\n\t// copy the defaults into this map\n\tfor k, v := range featureDefaults {\n\t\tfeatures[k] = v\n\t}\n\n\tvalues, err := url.ParseQuery(queryString)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error parsing query string for feature gates\")\n\t}\n\n\tfor k := range values {\n\t\tf := Feature(k)\n\n\t\tif _, ok := featureDefaults[f]; !ok {\n\t\t\treturn errors.Errorf(\"Feature Gate %q is not a valid Feature Gate\", f)\n\t\t}\n\n\t\tb, err := strconv.ParseBool(values.Get(k))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error parsing bool value from flag %s \", k)\n\t\t}\n\t\tfeatures[f] = b\n\t}\n\n\tfeatureGates = features\n\treturn nil\n}", "func (o *Options) InitFlags(fs *flag.FlagSet) {\n\tif fs == nil {\n\t\tfs = flag.CommandLine\n\t}\n\n\tflag.StringVar(\n\t\t&o.MetricsAddr,\n\t\t\"metrics-addr\",\n\t\t\":8080\",\n\t\t\"The address the metric endpoint binds to.\")\n\tflag.BoolVar(\n\t\t&o.LeaderElectionEnabled,\n\t\t\"enable-leader-election\",\n\t\ttrue,\n\t\t\"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.\")\n\tflag.StringVar(\n\t\t&o.LeaderElectionID,\n\t\t\"leader-election-id\",\n\t\t\"\",\n\t\t\"Name of the config map to use as the locking resource when configuring leader election.\")\n\tflag.StringVar(\n\t\t&o.LeaderElectionNamespace,\n\t\t\"leader-election-namespace\",\n\t\t\"\",\n\t\t\"Name of the namespace to use for the configmap locking resource when configuring leader election.\")\n\tflag.StringVar(\n\t\t&o.WatchNamespace,\n\t\t\"namespace\",\n\t\t\"\",\n\t\t\"Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.\")\n\tflag.DurationVar(\n\t\t&o.SyncPeriod,\n\t\t\"sync-period\",\n\t\tDefaultSyncPeriod,\n\t\t\"The interval at which cluster-api objects are synchronized\")\n\tflag.IntVar(\n\t\t&o.MaxConcurrentReconciles,\n\t\t\"max-concurrent-reconciles\",\n\t\t10,\n\t\t\"The maximum number of allowed, concurrent reconciles.\")\n\tflag.StringVar(\n\t\t&o.PodNameSuffix,\n\t\t\"pod-name-suffix\",\n\t\t\"controller-manager\",\n\t\t\"The suffix name of the pod running the controller manager.\")\n\tflag.StringVar(\n\t\t&o.PodNamespaceSuffix,\n\t\t\"pod-namespace-suffix\",\n\t\t\"controller-manager\",\n\t\t\"The suffix name of the pod namespace running the controller manager.\")\n\tflag.IntVar(\n\t\t&o.WebhookPort,\n\t\t\"webhook-port\",\n\t\tDefaultWebhookServiceContainerPort,\n\t\t\"Webhook Server port (set to 0 to disable)\")\n\tflag.StringVar(\n\t\t&o.HealthAddr,\n\t\t\"health-addr\",\n\t\t\":9440\",\n\t\t\"The address the health endpoint binds to.\",\n\t)\n}", "func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) {\n\tfs.Var(&s.RuntimeConfig, \"runtime-config\", \"\"+\n\t\t\"A set of key=value pairs that describe runtime configuration that may be passed \"+\n\t\t\"to apiserver. apis/<groupVersion> key can be used to turn on/off specific api versions. \"+\n\t\t\"apis/<groupVersion>/<resource> can be used to turn on/off specific resources. api/all and \"+\n\t\t\"api/legacy are special keys to control all and legacy api versions respectively.\")\n}", "func (t *T) AddFlags(fs *flag.FlagSet) {\n\tt.RequirementLevels.AddFlags(fs)\n\tt.FeatureStates.AddFlags(fs)\n}", "func (a *AdminApiService) GetFeatureFlag(ctx _context.Context, id string) (FeatureFlag, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue FeatureFlag\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func collectFeaturesInfo(gates []corev1alpha2.FeatureGate, features []corev1alpha2.Feature) map[string]*FeatureInfo {\n\tinfos := map[string]*FeatureInfo{}\n\n\tfor i := range features {\n\t\tpolicy := corev1alpha2.GetPolicyForStabilityLevel(features[i].Spec.Stability)\n\n\t\tinfos[features[i].Name] = &FeatureInfo{\n\t\t\tName: features[i].Name,\n\t\t\tDescription: features[i].Spec.Description,\n\t\t\tStability: features[i].Spec.Stability,\n\t\t\tActivated: features[i].Status.Activated,\n\t\t\tImmutable: policy.Immutable,\n\t\t\tDiscoverable: policy.Discoverable,\n\t\t\tFeatureGate: \"--\",\n\t\t}\n\t}\n\n\tfor i := range gates {\n\t\tfor _, featRef := range gates[i].Spec.Features {\n\t\t\tinfo, ok := infos[featRef.Name]\n\t\t\tif ok {\n\t\t\t\t// FeatureGate referenced Feature is in cluster.\n\t\t\t\tinfo.FeatureGate = gates[i].Name\n\t\t\t}\n\n\t\t\tif !ok {\n\t\t\t\t// FeatureGate referenced Feature is not in cluster. Since the Discoverable policy\n\t\t\t\t// cannot be known until the Feature shows up in cluster, set it to true for now.\n\t\t\t\tinfos[featRef.Name] = &FeatureInfo{\n\t\t\t\t\tName: featRef.Name,\n\t\t\t\t\tDiscoverable: true,\n\t\t\t\t\tFeatureGate: gates[i].Name,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn infos\n}", "func WithFeatureFlags() metadata.MD {\n\treturn metadata.Pairs(\"bigtable-features\", featureFlags)\n}", "func ParseFeaturesFromEnv() error {\n\treturn ParseFeatures(viper.GetString(FeatureGateFlag))\n}", "func NewFromFlags() (*Framework, error) {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tviper.SetDefault(kubeconfigFlag, filepath.Join(usr.HomeDir, \".kube\", \"config\"))\n\tviper.SetDefault(gsimageFlag, \"us-docker.pkg.dev/agones-images/examples/simple-game-server:0.17\")\n\tviper.SetDefault(pullSecretFlag, \"\")\n\tviper.SetDefault(stressTestLevelFlag, 0)\n\tviper.SetDefault(perfOutputDirFlag, \"\")\n\tviper.SetDefault(versionFlag, \"\")\n\tviper.SetDefault(runtime.FeatureGateFlag, \"\")\n\tviper.SetDefault(namespaceFlag, \"\")\n\tviper.SetDefault(cloudProductFlag, \"generic\")\n\n\tpflag.String(kubeconfigFlag, viper.GetString(kubeconfigFlag), \"kube config path, e.g. $HOME/.kube/config\")\n\tpflag.String(gsimageFlag, viper.GetString(gsimageFlag), \"gameserver image to use for those tests\")\n\tpflag.String(pullSecretFlag, viper.GetString(pullSecretFlag), \"optional secret to be used for pulling the gameserver and/or Agones SDK sidecar images\")\n\tpflag.Int(stressTestLevelFlag, viper.GetInt(stressTestLevelFlag), \"enable stress test at given level 0-100\")\n\tpflag.String(perfOutputDirFlag, viper.GetString(perfOutputDirFlag), \"write performance statistics to the specified directory\")\n\tpflag.String(versionFlag, viper.GetString(versionFlag), \"agones controller version to be tested, consists of release version plus a short hash of the latest commit\")\n\tpflag.String(namespaceFlag, viper.GetString(namespaceFlag), \"namespace is used to isolate test runs to their own namespaces\")\n\tpflag.String(cloudProductFlag, viper.GetString(cloudProductFlag), \"cloud product of cluster references by kubeconfig; defaults to 'generic'; options are 'generic', 'gke-autopilot'\")\n\truntime.FeaturesBindFlags()\n\tpflag.Parse()\n\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\"-\", \"_\"))\n\truntime.Must(viper.BindEnv(kubeconfigFlag))\n\truntime.Must(viper.BindEnv(gsimageFlag))\n\truntime.Must(viper.BindEnv(pullSecretFlag))\n\truntime.Must(viper.BindEnv(stressTestLevelFlag))\n\truntime.Must(viper.BindEnv(perfOutputDirFlag))\n\truntime.Must(viper.BindEnv(versionFlag))\n\truntime.Must(viper.BindEnv(namespaceFlag))\n\truntime.Must(viper.BindEnv(cloudProductFlag))\n\truntime.Must(viper.BindPFlags(pflag.CommandLine))\n\truntime.Must(runtime.FeaturesBindEnv())\n\truntime.Must(runtime.ParseFeaturesFromEnv())\n\n\tframework, err := newFramework(viper.GetString(kubeconfigFlag), 0, 0)\n\tif err != nil {\n\t\treturn framework, err\n\t}\n\tframework.GameServerImage = viper.GetString(gsimageFlag)\n\tframework.PullSecret = viper.GetString(pullSecretFlag)\n\tframework.StressTestLevel = viper.GetInt(stressTestLevelFlag)\n\tframework.PerfOutputDir = viper.GetString(perfOutputDirFlag)\n\tframework.Version = viper.GetString(versionFlag)\n\tframework.Namespace = viper.GetString(namespaceFlag)\n\tframework.CloudProduct = viper.GetString(cloudProductFlag)\n\tframework.WaitForState = 5 * time.Minute\n\tif framework.CloudProduct == \"gke-autopilot\" {\n\t\tframework.WaitForState = 10 * time.Minute // Autopilot can take a little while due to autoscaling, be a little liberal.\n\t}\n\n\tlogrus.WithField(\"gameServerImage\", framework.GameServerImage).\n\t\tWithField(\"pullSecret\", framework.PullSecret).\n\t\tWithField(\"stressTestLevel\", framework.StressTestLevel).\n\t\tWithField(\"perfOutputDir\", framework.PerfOutputDir).\n\t\tWithField(\"version\", framework.Version).\n\t\tWithField(\"namespace\", framework.Namespace).\n\t\tWithField(\"cloudProduct\", framework.CloudProduct).\n\t\tWithField(\"featureGates\", runtime.EncodeFeatures()).\n\t\tInfo(\"Starting e2e test(s)\")\n\n\treturn framework, nil\n}", "func (ctx *zedmanagerContext) AddAgentSpecificCLIFlags(flagSet *flag.FlagSet) {\n\tctx.versionPtr = flagSet.Bool(\"v\", false, \"Version\")\n}", "func (ctx *verifierContext) AddAgentSpecificCLIFlags(flagSet *flag.FlagSet) {\n\tctx.versionPtr = flagSet.Bool(\"v\", false, \"Version\")\n}", "func AddAndParseFlags(fs *flag.FlagSet) error { return nil }", "func (c *cmdVersion) AddFlags(fs *flag.FlagSet) {\n\t// no flags\n}", "func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tfs.IPVar(&s.BindAddress, \"insecure-bind-address\", s.BindAddress, \"\"+\n\t\t\"The IP address on which to serve the --insecure-port (set to 0.0.0.0 or :: for listening on all interfaces and IP address families).\")\n\t// Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784\n\tfs.MarkDeprecated(\"insecure-bind-address\", \"This flag will be removed in a future version.\")\n\tfs.Lookup(\"insecure-bind-address\").Hidden = false\n\n\tfs.IntVar(&s.BindPort, \"insecure-port\", s.BindPort, \"\"+\n\t\t\"The port on which to serve unsecured, unauthenticated access.\")\n\t// Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784\n\tfs.MarkDeprecated(\"insecure-port\", \"This flag will be removed in a future version.\")\n\tfs.Lookup(\"insecure-port\").Hidden = false\n}", "func (c *Cmd) Flags() *flag.FlagSet {\n\tif c.flags == nil {\n\t\tc.flags = flag.NewFlagSet(\"reflow\", flag.ExitOnError)\n\t\tc.flags.Usage = func() { c.usage(c.flags) }\n\t\tc.flags.StringVar(&flow.Universe, \"universe\", \"\", \"digest namespace\")\n\t\tc.flags.StringVar(&c.ConfigFile, \"config\", c.DefaultConfigFile, \"path to configuration file; otherwise use default (builtin) config\")\n\t\tc.flags.StringVar(&c.httpFlag, \"http\", \"\", \"run a diagnostic HTTP server on this port\")\n\t\tc.flags.StringVar(&c.cpuProfileFlag, \"cpuprofile\", \"\", \"capture a CPU profile and deposit it to the provided path\")\n\t\tc.flags.StringVar(&c.memProfileFlag, \"memprofile\", \"\", \"capture a Memory profile and deposit it to the provided path\")\n\t\tc.flags.DurationVar(&c.memStatsDuration, \"memstatsduration\", 0, \"log high-level memory stats at this frequency (eg: 100ms)\")\n\t\tc.flags.BoolVar(&c.memStatsGC, \"memstatsgc\", false, \"whether to GC before collecting memstats (at each memstatsduration interval)\")\n\t\tc.flags.StringVar(&c.logFlag, \"log\", \"info\", \"set the log level: off, error, info, debug\")\n\t\tc.flags.IntVar(&c.filesetOpLim, \"fileset_op_limit\", -1, \"set the number of concurrent reflow fileset operations allowed (if unset or non-positive, uses default which is number of CPUs)\")\n\n\t\t// Add flags to override configuration.\n\t\tc.configFlags = make(map[string]*string)\n\t\tfor key := range c.SchemaKeys {\n\t\t\tc.configFlags[key] = c.flags.String(key, \"\", fmt.Sprintf(\"override %s from config; see reflow config -help\", key))\n\t\t}\n\t}\n\treturn c.flags\n}", "func (s *ClusterOperatorServerRunOptions) AddFlags(fs *pflag.FlagSet) {\n\t// Add the generic flags.\n\ts.GenericServerRunOptions.AddUniversalFlags(fs)\n\ts.Etcd.AddFlags(fs)\n\ts.SecureServing.AddFlags(fs)\n\ts.SecureServing.AddDeprecatedFlags(fs)\n\t//s.InsecureServing.AddFlags(fs)\n\t//s.InsecureServing.AddDeprecatedFlags(fs)\n\ts.Audit.AddFlags(fs)\n\ts.Features.AddFlags(fs)\n\ts.Authentication.AddFlags(fs)\n\ts.Authorization.AddFlags(fs)\n\ts.Admission.AddFlags(fs)\n\n\t// Note: the weird \"\"+ in below lines seems to be the only way to get gofmt to\n\t// arrange these text blocks sensibly. Grrr.\n\n\tfs.BoolVar(&s.EnableLogsHandler, \"enable-logs-handler\", s.EnableLogsHandler,\n\t\t\"If true, install a /logs handler for the apiserver logs.\")\n\n\tfs.IntVar(&s.MasterCount, \"apiserver-count\", s.MasterCount,\n\t\t\"The number of apiservers running in the cluster, must be a positive number.\")\n\n\tfs.BoolVar(&s.DisableAuth, \"disable-auth\", false,\n\t\t\"Disable authentication and authorization for testing purposes\")\n}", "func FeatureFlag(flag string) string {\n\tsess := session.Must(session.NewSession(&aws.Config{\n\t\tRegion: aws.String(\"us-west-2\"),\n\t}))\n\n\tssmsvc := ssm.New(sess, aws.NewConfig().WithRegion(\"us-west-2\"))\n\tkeyname := flag\n\tdecryption := false\n\tparam, err := ssmsvc.GetParameter(&ssm.GetParameterInput{\n\t\tName: &keyname,\n\t\tWithDecryption: &decryption,\n\t})\n\n\tif err != nil {\n\t\t//Later, it may be worth syncing configs in another region if failed to\n\t\t//pick up in us-west-2\n\t\tlog.Print(err)\n\t\treturn \"Error\"\n\t}\n\n\tvalue := *param.Parameter.Value\n\treturn value\n}", "func ComputeFeatureStates(featureGateSpec configv1alpha1.FeatureGateSpec, features []configv1alpha1.Feature) (activated, deactivated, unavailable []string) {\n\t// Collect features to be activated/deactivated in the spec.\n\ttoActivate := sets.String{}\n\ttoDeactivate := sets.String{}\n\tfor _, f := range featureGateSpec.Features {\n\t\tif f.Activate {\n\t\t\ttoActivate.Insert(f.Name)\n\t\t} else {\n\t\t\ttoDeactivate.Insert(f.Name)\n\t\t}\n\t}\n\n\t// discovered is set a set of available features that are discoverable.\n\tdiscovered := sets.String{}\n\t// discoveredDefaultActivated is a set of available features that are discoverable and activated by default.\n\tdiscoveredDefaultActivated := sets.String{}\n\t// discoveredDefaultDeactivated is a set of available features that are discoverable and deactivated by default.\n\tdiscoveredDefaultDeactivated := sets.String{}\n\tfor i := range features {\n\t\tfeature := features[i]\n\t\tif !feature.Spec.Discoverable {\n\t\t\tcontinue\n\t\t}\n\t\tdiscovered.Insert(feature.Name)\n\t\tif feature.Spec.Activated {\n\t\t\tdiscoveredDefaultActivated.Insert(feature.Name)\n\t\t} else {\n\t\t\tdiscoveredDefaultDeactivated.Insert(feature.Name)\n\t\t}\n\t}\n\n\t// activate is all the features that the spec intends to be activated and features that are default activated.\n\tactivate := discoveredDefaultActivated.Union(toActivate)\n\t// activationCandidates are features that are discovered, but are explicitly set *not* to be activated in this feature gate.\n\t// Only these features can be activated regardless of what the intent in the spec is.\n\tactivationCandidates := discovered.Difference(toDeactivate)\n\t// Intersection gives us the actual activated features.\n\tactivated = activationCandidates.Intersection(activate).List()\n\n\t// deactivate is all the features that the spec intends to be deactivated and features that are default deactivated.\n\tdeactivate := discoveredDefaultDeactivated.Union(toDeactivate)\n\t// deactivationCandidates are features that are discovered, but are explicitly set *not* to be deactivated in this feature gate.\n\t// Only these features can be deactivated regardless of what the intent in the spec is.\n\tdeactivationCandidates := discovered.Difference(toActivate)\n\t// Intersection gives us the actual deactivated features.\n\tdeactivated = deactivationCandidates.Intersection(deactivate).List()\n\n\t// Set of all features specified in the current spec.\n\tallFeaturesInSpec := toActivate.Union(toDeactivate)\n\t// Set difference with all the discovered features gives unavailable features.\n\tunavailable = allFeaturesInSpec.Difference(discovered).List()\n\n\treturn activated, deactivated, unavailable\n}", "func (c *client) WriteFeatureFlags(featureFlags map[string]string) error {\n\tfeatureFlagsList := make(map[string]string)\n\tfor feature, value := range featureFlags {\n\t\tfeatureFlagsList[strings.TrimSpace(feature)] = value\n\t}\n\tjsonString, err := json.Marshal(featureFlagsList)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse the feature flags as valid json\")\n\t}\n\tfile, err := os.Create(c.featureFlagConfigPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create feature flag file\")\n\t}\n\tdefer file.Close()\n\t_, err = file.WriteString(string(jsonString))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to write to feature flag file\")\n\t}\n\treturn nil\n}", "func (f *Factory) SetFlags() {\n}", "func InitFlags() *FactoryOptions {\n\ttesting.Init()\n\t_, err := types.NewAttachedGinkgoFlagSet(flag.CommandLine, types.GinkgoFlags{}, nil, types.GinkgoFlagSections{}, types.GinkgoFlagSection{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ttestOptions := &FactoryOptions{}\n\ttestOptions.BindFlags(flag.CommandLine)\n\tflag.Parse()\n\n\treturn testOptions\n}", "func FeaturesBindFlags() {\n\tviper.SetDefault(FeatureGateFlag, \"\")\n\tpflag.String(FeatureGateFlag, viper.GetString(FeatureGateFlag), \"Flag to pass in the url query list of feature flags to enable or disable\")\n}", "func addFlags(s *server.Server, fs *pflag.FlagSet) {\n\tfs.StringVar(&s.APIServer, \"api-server\", s.APIServer, \"Endpoint for the api server\")\n\tfs.StringVar(&s.APIToken, \"api-token\", s.APIToken, \"Token to authenticate with the api server\")\n\tfs.StringVar(&s.AppPort, \"app-port\", s.AppPort, \"Kube2iam server http port\")\n\tfs.StringVar(&s.MetricsPort, \"metrics-port\", s.MetricsPort, \"Metrics server http port (default: same as kube2iam server port)\")\n\tfs.StringVar(&s.BaseRoleARN, \"base-role-arn\", s.BaseRoleARN, \"Base role ARN\")\n\tfs.BoolVar(&s.Debug, \"debug\", s.Debug, \"Enable debug features\")\n\tfs.StringVar(&s.DefaultIAMRole, \"default-role\", s.DefaultIAMRole, \"Fallback role to use when annotation is not set\")\n\tfs.StringVar(&s.IAMRoleKey, \"iam-role-key\", s.IAMRoleKey, \"Pod annotation key used to retrieve the IAM role\")\n\tfs.StringVar(&s.IAMExternalID, \"iam-external-id\", s.IAMExternalID, \"Pod annotation key used to retrieve the IAM ExternalId\")\n\tfs.DurationVar(&s.IAMRoleSessionTTL, \"iam-role-session-ttl\", s.IAMRoleSessionTTL, \"TTL for the assume role session\")\n\tfs.BoolVar(&s.Insecure, \"insecure\", false, \"Kubernetes server should be accessed without verifying the TLS. Testing only\")\n\tfs.StringVar(&s.MetadataAddress, \"metadata-addr\", s.MetadataAddress, \"Address for the ec2 metadata\")\n\tfs.BoolVar(&s.AddIPTablesRule, \"iptables\", false, \"Add iptables rule (also requires --host-ip)\")\n\tfs.BoolVar(&s.AutoDiscoverBaseArn, \"auto-discover-base-arn\", false, \"Queries EC2 Metadata to determine the base ARN\")\n\tfs.BoolVar(&s.AutoDiscoverDefaultRole, \"auto-discover-default-role\", false, \"Queries EC2 Metadata to determine the default Iam Role and base ARN, cannot be used with --default-role, overwrites any previous setting for --base-role-arn\")\n\tfs.StringVar(&s.HostInterface, \"host-interface\", \"docker0\", \"Host interface for proxying AWS metadata\")\n\tfs.BoolVar(&s.NamespaceRestriction, \"namespace-restrictions\", false, \"Enable namespace restrictions\")\n\tfs.StringVar(&s.NamespaceRestrictionFormat, \"namespace-restriction-format\", s.NamespaceRestrictionFormat, \"Namespace Restriction Format (glob/regexp)\")\n\tfs.StringVar(&s.NamespaceKey, \"namespace-key\", s.NamespaceKey, \"Namespace annotation key used to retrieve the IAM roles allowed (value in annotation should be json array)\")\n\tfs.DurationVar(&s.CacheResyncPeriod, \"cache-resync-period\", s.CacheResyncPeriod, \"Kubernetes caches resync period\")\n\tfs.BoolVar(&s.ResolveDupIPs, \"resolve-duplicate-cache-ips\", false, \"Queries the k8s api server to find the source of truth when the pod cache contains multiple pods with the same IP\")\n\tfs.StringVar(&s.HostIP, \"host-ip\", s.HostIP, \"IP address of host\")\n\tfs.StringVar(&s.NodeName, \"node\", s.NodeName, \"Name of the node where kube2iam is running\")\n\tfs.DurationVar(&s.BackoffMaxInterval, \"backoff-max-interval\", s.BackoffMaxInterval, \"Max interval for backoff when querying for role.\")\n\tfs.DurationVar(&s.BackoffMaxElapsedTime, \"backoff-max-elapsed-time\", s.BackoffMaxElapsedTime, \"Max elapsed time for backoff when querying for role.\")\n\tfs.StringVar(&s.LogFormat, \"log-format\", s.LogFormat, \"Log format (text/json)\")\n\tfs.StringVar(&s.LogLevel, \"log-level\", s.LogLevel, \"Log level\")\n\tfs.BoolVar(&s.UseRegionalStsEndpoint, \"use-regional-sts-endpoint\", false, \"use the regional sts endpoint if AWS_REGION is set\")\n\tfs.BoolVar(&s.Verbose, \"verbose\", false, \"Verbose\")\n\tfs.BoolVar(&s.Version, \"version\", false, \"Print the version and exits\")\n}", "func (ss *SousServer) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&ss.flags.laddr, `listen`, `:80`, \"The address to listen on, like '127.0.0.1:https'\")\n\tfs.StringVar(&ss.flags.gdmRepo, \"gdm-repo\", \"\", \"Git repo containing the GDM (cloned into config.SourceLocation)\")\n}", "func (v *VersionCommand) addFlags() {\n\t// TODO: add flags here\n}", "func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {\n\tfs.Var(componentconfig.IPVar{&s.BindAddress}, \"bind-address\", \"The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)\")\n\tfs.StringVar(&s.Master, \"master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig)\")\n\tfs.IntVar(&s.HealthzPort, \"healthz-port\", s.HealthzPort, \"The port to bind the health check server. Use 0 to disable.\")\n\tfs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, \"healthz-bind-address\", \"The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)\")\n\tfs.IntVar(s.OOMScoreAdj, \"oom-score-adj\", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), \"The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]\")\n\tfs.StringVar(&s.ResourceContainer, \"resource-container\", s.ResourceContainer, \"Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).\")\n\tfs.MarkDeprecated(\"resource-container\", \"This feature will be removed in a later release.\")\n\tfs.StringVar(&s.Kubeconfig, \"kubeconfig\", s.Kubeconfig, \"Path to kubeconfig file with authorization information (the master location is set by the master flag).\")\n\tfs.Var(componentconfig.PortRangeVar{&s.PortRange}, \"proxy-port-range\", \"Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\")\n\tfs.StringVar(&s.HostnameOverride, \"hostname-override\", s.HostnameOverride, \"If non-empty, will use this string as identification instead of the actual hostname.\")\n\tfs.Var(&s.Mode, \"proxy-mode\", \"Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '\"+ExperimentalProxyModeAnnotation+\"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\")\n\tfs.DurationVar(&s.IPTablesSyncPeriod.Duration, \"iptables-sync-period\", s.IPTablesSyncPeriod.Duration, \"How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.\")\n\tfs.DurationVar(&s.ConfigSyncPeriod, \"config-sync-period\", s.ConfigSyncPeriod, \"How often configuration from the apiserver is refreshed. Must be greater than 0.\")\n\tfs.BoolVar(&s.MasqueradeAll, \"masquerade-all\", false, \"If using the pure iptables proxy, SNAT everything\")\n\tfs.BoolVar(&s.CleanupAndExit, \"cleanup-iptables\", false, \"If true cleanup iptables rules and exit.\")\n\tfs.Float32Var(&s.KubeAPIQPS, \"kube-api-qps\", s.KubeAPIQPS, \"QPS to use while talking with kubernetes apiserver\")\n\tfs.IntVar(&s.KubeAPIBurst, \"kube-api-burst\", s.KubeAPIBurst, \"Burst to use while talking with kubernetes apiserver\")\n\tfs.DurationVar(&s.UDPIdleTimeout.Duration, \"udp-timeout\", s.UDPIdleTimeout.Duration, \"How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\")\n\tfs.IntVar(&s.ConntrackMax, \"conntrack-max\", s.ConntrackMax, \"Maximum number of NAT connections to track (0 to leave as-is)\")\n\tfs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, \"conntrack-tcp-timeout-established\", s.ConntrackTCPEstablishedTimeout.Duration, \"Idle timeout for established TCP connections (0 to leave as-is)\")\n}", "func (m *UserExperienceAnalyticsDeviceStartupHistory) SetIsFeatureUpdate(value *bool)() {\n err := m.GetBackingStore().Set(\"isFeatureUpdate\", value)\n if err != nil {\n panic(err)\n }\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.Int(collectorQueueSize, DefaultQueueSize, \"The queue size of the collector\")\n\tflags.Int(collectorNumWorkers, DefaultNumWorkers, \"The number of workers pulling items from the queue\")\n\tflags.Int(collectorHTTPPort, 0, collectorHTTPPortWarning+\" see --\"+CollectorHTTPHostPort)\n\tflags.Int(collectorGRPCPort, 0, collectorGRPCPortWarning+\" see --\"+CollectorGRPCHostPort)\n\tflags.Int(collectorZipkinHTTPPort, 0, collectorZipkinHTTPPortWarning+\" see --\"+CollectorZipkinHTTPHostPort)\n\tflags.Uint(collectorDynQueueSizeMemory, 0, \"(experimental) The max memory size in MiB to use for the dynamic queue.\")\n\tflags.String(collectorTags, \"\", \"One or more tags to be added to the Process tags of all spans passing through this collector. Ex: key1=value1,key2=${envVar:defaultValue}\")\n\tflags.String(collectorZipkinAllowedOrigins, \"*\", \"Comma separated list of allowed origins for the Zipkin collector service, default accepts all\")\n\tflags.String(collectorZipkinAllowedHeaders, \"content-type\", \"Comma separated list of allowed headers for the Zipkin collector service, default content-type\")\n\tAddOTELJaegerFlags(flags)\n\tAddOTELZipkinFlags(flags)\n}", "func processFlags() *flags {\n\tflags := flags{}\n\n\tflag.StringVar(&flags.configFile, \"configfile\", \"\", \"Config File to process\")\n\tflag.StringVar(&flags.consulAddress, \"consuladdr\", \"\", \"Consul Address\")\n\tflag.StringVar(&flags.consulDatacenter, \"consuldc\", \"global\", \"Consul Datacentre\")\n\tflag.StringVar(&flags.consulPrefix, \"consulprefix\", \"\", \"Consul Prefix\")\n\tflag.StringVar(&flags.consulScheme, \"consulscheme\", \"http\", \"Consul Scheme\")\n\tflag.StringVar(&flags.consulToken, \"consultoken\", \"\", \"Consul Token\")\n\tflag.BoolVar(&flags.displayVer, \"version\", false, \"Display version and exit\")\n\tflag.Parse()\n\n\tif flags.displayVer == true {\n\t\tfmt.Printf(\"Build Type: %s\\n\", buildinfo.BuildType)\n\t\tfmt.Printf(\"Build TimeStamp: %s\\n\", buildinfo.BuildStamp)\n\t\tfmt.Printf(\"Build Revision: %s\\n\", buildinfo.BuildRevision)\n\t\tos.Exit(0)\n\t}\n\n\treturn &flags\n}", "func configureFlags(api *operations.EsiAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (o *ClientOptions) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&o.LogID, \"cloud-logging-logs-id\", o.LogID,\n\t\t\"For cloud logging, the log stream ID.\")\n\tfs.StringVar(&o.ServiceName, \"cloud-logging-service\", o.ServiceName,\n\t\t\"For cloud logging, the service name.\")\n\tfs.StringVar(&o.ProjectID, \"cloud-logging-project-name\", o.ProjectID,\n\t\t\"For cloud logging, the project name.\")\n\tfs.StringVar(&o.ResourceType, \"cloud-logging-resource-type\", o.ResourceType,\n\t\t\"For cloud logging, the instance name.\")\n\tfs.StringVar(&o.ResourceID, \"cloud-logging-resource-id\", o.ResourceID,\n\t\t\"For cloud logging, the instance ID.\")\n\tfs.StringVar(&o.Region, \"cloud-logging-region\", o.Region,\n\t\t\"For cloud logging, the region.\")\n\tfs.StringVar(&o.UserID, \"cloud-logging-user\", o.UserID,\n\t\t\"For cloud logging, the user ID.\")\n\tfs.StringVar(&o.Zone, \"cloud-logging-zone\", o.Zone,\n\t\t\"For cloud logging, the zone.\")\n}", "func (s *StatsGraph) SetFlags() {\n\tif !(s.ZoomToken == \"\") {\n\t\ts.Flags.Set(0)\n\t}\n}", "func (d *Driver) GetCreateFlags() []mcnflag.Flag {\n\treturn []mcnflag.Flag{\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_CPU_COUNT\",\n\t\t\tName: \"vmwarevsphere-cpu-count\",\n\t\t\tUsage: \"vSphere CPU number for docker VM\",\n\t\t\tValue: defaultCpus,\n\t\t},\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_MEMORY_SIZE\",\n\t\t\tName: \"vmwarevsphere-memory-size\",\n\t\t\tUsage: \"vSphere size of memory for docker VM (in MB)\",\n\t\t\tValue: defaultMemory,\n\t\t},\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_DISK_SIZE\",\n\t\t\tName: \"vmwarevsphere-disk-size\",\n\t\t\tUsage: \"vSphere size of disk for docker VM (in MB)\",\n\t\t\tValue: defaultDiskSize,\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_BOOT2DOCKER_URL\",\n\t\t\tName: \"vmwarevsphere-boot2docker-url\",\n\t\t\tUsage: \"vSphere URL for boot2docker image\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_VCENTER\",\n\t\t\tName: \"vmwarevsphere-vcenter\",\n\t\t\tUsage: \"vSphere IP/hostname for vCenter\",\n\t\t},\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_VCENTER_PORT\",\n\t\t\tName: \"vmwarevsphere-vcenter-port\",\n\t\t\tUsage: \"vSphere Port for vCenter\",\n\t\t\tValue: defaultSDKPort,\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_USERNAME\",\n\t\t\tName: \"vmwarevsphere-username\",\n\t\t\tUsage: \"vSphere username\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_PASSWORD\",\n\t\t\tName: \"vmwarevsphere-password\",\n\t\t\tUsage: \"vSphere password\",\n\t\t},\n\t\tmcnflag.StringSliceFlag{\n\t\t\tEnvVar: \"VSPHERE_NETWORK\",\n\t\t\tName: \"vmwarevsphere-network\",\n\t\t\tUsage: \"vSphere network where the docker VM will be attached\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_DATASTORE\",\n\t\t\tName: \"vmwarevsphere-datastore\",\n\t\t\tUsage: \"vSphere datastore for docker VM\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_DATACENTER\",\n\t\t\tName: \"vmwarevsphere-datacenter\",\n\t\t\tUsage: \"vSphere datacenter for docker VM\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_POOL\",\n\t\t\tName: \"vmwarevsphere-pool\",\n\t\t\tUsage: \"vSphere resource pool for docker VM\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_HOSTSYSTEM\",\n\t\t\tName: \"vmwarevsphere-hostsystem\",\n\t\t\tUsage: \"vSphere compute resource where the docker VM will be instantiated. This can be omitted if using a cluster with DRS.\",\n\t\t},\n\t\tmcnflag.StringSliceFlag{\n\t\t\tEnvVar: \"VSPHERE_CFGPARAM\",\n\t\t\tName: \"vmwarevsphere-cfgparam\",\n\t\t\tUsage: \"vSphere vm configuration parameters (used for guestinfo)\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_CLOUDINIT\",\n\t\t\tName: \"vmwarevsphere-cloudinit\",\n\t\t\tUsage: \"vSphere cloud-init file or url to set in the guestinfo\",\n\t\t},\n\t}\n}", "func (o *Options) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&o.HelperImage, \"helper-image\", defaultHelperImage,\n\t\t\"The image that instrumentate mysql.\")\n\n\tfs.StringVar(&o.MetricsExporterImage, \"metrics-exporter-image\", defaultExporterImage,\n\t\t\"The image for mysql metrics exporter.\")\n\tfs.StringVar(&o.ImagePullSecretName, \"image-pull-secret\", \"\",\n\t\t\"The secret name for used as pull secret.\")\n\n\tfs.VarP(newPullPolicyValue(defaultImagePullPolicy, &o.ImagePullPolicy),\n\t\t\"image-pull-policy\", \"\", \"Set image pull policy.\")\n\n\tfs.StringVar(&o.OrchestratorURI, \"orchestrator-uri\", \"\",\n\t\t\"The orchestrator uri\")\n\tfs.StringVar(&o.OrchestratorTopologyPassword, \"orchestrator-topology-password\", defaultOrchestratorTopologyUser,\n\t\t\"The orchestrator topology password. Can also be set as ORC_TOPOLOGY_PASSWORD environment variable.\")\n\tfs.StringVar(&o.OrchestratorTopologyUser, \"orchestrator-topology-user\", defaultOrchestratorTopologyPassword,\n\t\t\"The orchestrator topology user. Can also be set as ORC_TOPOLOGY_USER environment variable.\")\n\tfs.DurationVar(&o.JobCompleteSuccessGraceTime, \"job-grace-time\", defaultJobGraceTime,\n\t\t\"The time in hours how jobs after completion are keept.\")\n\n\tfs.StringVar(&o.HTTPServeAddr, \"http-serve-addr\", defaultHTTPServerAddr,\n\t\t\"The address for http server.\")\n\n\tfs.StringVar(&o.LeaderElectionNamespace, \"leader-election-namespace\", defaultLeaderElectionNamespace,\n\t\t\"The leader election namespace.\")\n\tfs.StringVar(&o.LeaderElectionID, \"leader-election-id\", defaultLeaderElectionID,\n\t\t\"The leader election id.\")\n}", "func FeatureOverride(dda *DatadogAgentSpec, dso *DatadogAgentSpec) {\n\tif dda.Features.NetworkMonitoring != nil && apiutils.BoolValue(dda.Features.NetworkMonitoring.Enabled) {\n\t\t// If the Network Monitoring Feature is enabled, enable the System Probe.\n\t\tif !apiutils.BoolValue(dda.Agent.Enabled) {\n\t\t\tif dda.Agent.SystemProbe == nil {\n\t\t\t\tdda.Agent.SystemProbe = DefaultDatadogAgentSpecAgentSystemProbe(&dda.Agent)\n\t\t\t}\n\t\t\tdda.Agent.SystemProbe.Enabled = apiutils.NewBoolPointer(true)\n\t\t\tdso.Agent.SystemProbe = DefaultDatadogAgentSpecAgentSystemProbe(&dda.Agent)\n\t\t\tdso.Agent.SystemProbe.Enabled = apiutils.NewBoolPointer(true)\n\t\t}\n\t}\n\tif dda.Features.NetworkMonitoring != nil && apiutils.BoolValue(dda.Features.NetworkMonitoring.Enabled) ||\n\t\tdda.Features.OrchestratorExplorer != nil && apiutils.BoolValue(dda.Features.OrchestratorExplorer.Enabled) {\n\t\t// If the Network Monitoring or the Orchestrator Explorer Feature is enabled, enable the Process Agent.\n\t\tif !apiutils.BoolValue(dda.Agent.Enabled) {\n\t\t\tif dda.Agent.Process == nil {\n\t\t\t\tdda.Agent.Process = DefaultDatadogAgentSpecAgentProcess(&dda.Agent)\n\t\t\t}\n\t\t\tdda.Agent.Process.Enabled = apiutils.NewBoolPointer(true)\n\t\t\tdso.Agent.Process = DefaultDatadogAgentSpecAgentProcess(&dda.Agent)\n\t\t\tdso.Agent.Process.Enabled = apiutils.NewBoolPointer(true)\n\t\t}\n\t}\n}", "func CheckFeatureFlag(v *viper.Viper) error {\n\treturn nil\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.Int(collectorQueueSize, app.DefaultQueueSize, \"The queue size of the collector\")\n\tflags.Int(collectorNumWorkers, app.DefaultNumWorkers, \"The number of workers pulling items from the queue\")\n\tflags.Duration(collectorWriteCacheTTL, time.Hour*12, \"The duration to wait before rewriting an existing service or operation name\")\n\tflags.Int(collectorPort, 14267, \"The tchannel port for the collector service\")\n\tflags.Int(collectorHTTPPort, 14268, \"The http port for the collector service\")\n\tflags.Int(collectorZipkinHTTPort, 0, \"The http port for the Zipkin collector service e.g. 9411\")\n\tflags.Int(collectorHealthCheckHTTPPort, 14269, \"The http port for the health check service\")\n\tflags.Bool(collectorAuthSpan, false, \"Defines if incoming spans should be authenticated\")\n\tflags.String(collectorSpanAuthTagKey, app.DefaultSpanAuthTagKey, \"The name of the tag's key associated with password / api token\")\n\tflags.Int(collectorAuthManagerCacheSize, 1000, \"The size of the authentication manager cache\")\n\tflags.Duration(collectorAuthManagerCacheTTL, time.Second * 3600, \"The TTL of the auth manager cache items\")\n}", "func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tfs.StringSliceVar(&s.EtcdServersOverrides, \"etcd-servers-overrides\", s.EtcdServersOverrides, \"\"+\n\t\t\"Per-resource etcd servers overrides, comma separated. The individual override \"+\n\t\t\"format: group/resource#servers, where servers are URLs, semicolon separated. \"+\n\t\t\"Note that this applies only to resources compiled into this server binary. \")\n\n\tfs.StringVar(&s.DefaultStorageMediaType, \"storage-media-type\", s.DefaultStorageMediaType, \"\"+\n\t\t\"The media type to use to store objects in storage. \"+\n\t\t\"Some resources or storage backends may only support a specific media type and will ignore this setting.\")\n\tfs.IntVar(&s.DeleteCollectionWorkers, \"delete-collection-workers\", s.DeleteCollectionWorkers,\n\t\t\"Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.\")\n\n\tfs.BoolVar(&s.EnableGarbageCollection, \"enable-garbage-collector\", s.EnableGarbageCollection, \"\"+\n\t\t\"Enables the generic garbage collector. MUST be synced with the corresponding flag \"+\n\t\t\"of the kube-controller-manager.\")\n\n\tfs.BoolVar(&s.EnableWatchCache, \"watch-cache\", s.EnableWatchCache,\n\t\t\"Enable watch caching in the apiserver\")\n\n\tfs.IntVar(&s.DefaultWatchCacheSize, \"default-watch-cache-size\", s.DefaultWatchCacheSize,\n\t\t\"Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.\")\n\n\tfs.StringSliceVar(&s.WatchCacheSizes, \"watch-cache-sizes\", s.WatchCacheSizes, \"\"+\n\t\t\"Watch cache size settings for some resources (pods, nodes, etc.), comma separated. \"+\n\t\t\"The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), \"+\n\t\t\"group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, \"+\n\t\t\"and size is a number. It takes effect when watch-cache is enabled. \"+\n\t\t\"Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) \"+\n\t\t\"have system defaults set by heuristics, others default to default-watch-cache-size\")\n\n\tfs.StringVar(&s.StorageConfig.Type, \"storage-backend\", s.StorageConfig.Type,\n\t\t\"The storage backend for persistence. Options: 'etcd3' (default).\")\n\n\tfs.StringSliceVar(&s.StorageConfig.Transport.ServerList, \"etcd-servers\", s.StorageConfig.Transport.ServerList,\n\t\t\"List of etcd servers to connect with (scheme://ip:port), comma separated.\")\n\n\tfs.StringVar(&s.StorageConfig.Prefix, \"etcd-prefix\", s.StorageConfig.Prefix,\n\t\t\"The prefix to prepend to all resource paths in etcd.\")\n\n\tfs.StringVar(&s.StorageConfig.Transport.KeyFile, \"etcd-keyfile\", s.StorageConfig.Transport.KeyFile,\n\t\t\"SSL key file used to secure etcd communication.\")\n\n\tfs.StringVar(&s.StorageConfig.Transport.CertFile, \"etcd-certfile\", s.StorageConfig.Transport.CertFile,\n\t\t\"SSL certification file used to secure etcd communication.\")\n\n\tfs.StringVar(&s.StorageConfig.Transport.TrustedCAFile, \"etcd-cafile\", s.StorageConfig.Transport.TrustedCAFile,\n\t\t\"SSL Certificate Authority file used to secure etcd communication.\")\n\n\tfs.StringVar(&s.EncryptionProviderConfigFilepath, \"encryption-provider-config\", s.EncryptionProviderConfigFilepath,\n\t\t\"The file containing configuration for encryption providers to be used for storing secrets in etcd\")\n\n\tfs.DurationVar(&s.StorageConfig.CompactionInterval, \"etcd-compaction-interval\", s.StorageConfig.CompactionInterval,\n\t\t\"The interval of compaction requests. If 0, the compaction request from apiserver is disabled.\")\n\n\tfs.DurationVar(&s.StorageConfig.CountMetricPollPeriod, \"etcd-count-metric-poll-period\", s.StorageConfig.CountMetricPollPeriod, \"\"+\n\t\t\"Frequency of polling etcd for number of resources per type. 0 disables the metric collection.\")\n\n\tfs.DurationVar(&s.StorageConfig.DBMetricPollInterval, \"etcd-db-metric-poll-interval\", s.StorageConfig.DBMetricPollInterval,\n\t\t\"The interval of requests to poll etcd and update metric. 0 disables the metric collection\")\n\n\tfs.DurationVar(&s.StorageConfig.HealthcheckTimeout, \"etcd-healthcheck-timeout\", s.StorageConfig.HealthcheckTimeout,\n\t\t\"The timeout to use when checking etcd health.\")\n\n\tfs.Int64Var(&s.StorageConfig.LeaseManagerConfig.ReuseDurationSeconds, \"lease-reuse-duration-seconds\", s.StorageConfig.LeaseManagerConfig.ReuseDurationSeconds,\n\t\t\"The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer.\")\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.String(reporterType, string(GRPC), fmt.Sprintf(\"Reporter type to use e.g. %s\", string(GRPC)))\n\tif !setupcontext.IsAllInOne() {\n\t\tflags.String(agentTags, \"\", \"One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}\")\n\t}\n}", "func (fft FeatureFlagToggles) SetRouteServices(toggle bool) {\n\tfft[routeServicesFlag] = toggle\n}", "func (s *cpuSource) Discover() error {\n\ts.features = nfdv1alpha1.NewFeatures()\n\n\t// Detect CPUID\n\ts.features.Flags[CpuidFeature] = nfdv1alpha1.NewFlagFeatures(getCpuidFlags()...)\n\n\t// Detect CPU model\n\ts.features.Attributes[Cpumodel] = nfdv1alpha1.NewAttributeFeatures(getCPUModel())\n\n\t// Detect cstate configuration\n\tcstate, err := detectCstate()\n\tif err != nil {\n\t\tklog.ErrorS(err, \"failed to detect cstate\")\n\t} else {\n\t\ts.features.Attributes[CstateFeature] = nfdv1alpha1.NewAttributeFeatures(cstate)\n\t}\n\n\t// Detect pstate features\n\tpstate, err := detectPstate()\n\tif err != nil {\n\t\tklog.ErrorS(err, \"failed to detect pstate\")\n\t}\n\ts.features.Attributes[PstateFeature] = nfdv1alpha1.NewAttributeFeatures(pstate)\n\n\t// Detect RDT features\n\ts.features.Attributes[RdtFeature] = nfdv1alpha1.NewAttributeFeatures(discoverRDT())\n\n\t// Detect available guest protection(SGX,TDX,SEV) features\n\ts.features.Attributes[SecurityFeature] = nfdv1alpha1.NewAttributeFeatures(discoverSecurity())\n\n\t// Detect SGX features\n\t//\n\t// DEPRECATED in v0.12: will be removed in the future\n\tif val, ok := s.features.Attributes[SecurityFeature].Elements[\"sgx.enabled\"]; ok {\n\t\ts.features.Attributes[SgxFeature] = nfdv1alpha1.NewAttributeFeatures(map[string]string{\"enabled\": val})\n\t}\n\n\t// Detect Secure Execution features\n\t//\n\t// DEPRECATED in v0.12: will be removed in the future\n\tif val, ok := s.features.Attributes[SecurityFeature].Elements[\"se.enabled\"]; ok {\n\t\ts.features.Attributes[SeFeature] = nfdv1alpha1.NewAttributeFeatures(map[string]string{\"enabled\": val})\n\t}\n\n\t// Detect SST features\n\ts.features.Attributes[SstFeature] = nfdv1alpha1.NewAttributeFeatures(discoverSST())\n\n\t// Detect hyper-threading\n\ts.features.Attributes[TopologyFeature] = nfdv1alpha1.NewAttributeFeatures(discoverTopology())\n\n\t// Detect Coprocessor features\n\ts.features.Attributes[CoprocessorFeature] = nfdv1alpha1.NewAttributeFeatures(discoverCoprocessor())\n\n\tklog.V(3).InfoS(\"discovered features\", \"featureSource\", s.Name(), \"features\", utils.DelayedDumper(s.features))\n\n\treturn nil\n}", "func featureInfoList(ctx context.Context, cl *featuregateclient.FeatureGateClient, featuregate string) ([]FeatureInfo, error) {\n\tclusterFeatures, err := cl.GetFeatureList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgateList, err := cl.GetFeatureGateList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfeatureInfos := collectFeaturesInfo(gateList.Items, clusterFeatures.Items)\n\n\tsetShowInList(featureInfos, includeExperimental, featuregate)\n\n\tfilteredList := featuresFilteredByFlags(featureInfos, activated, deactivated)\n\treturn filteredList, nil\n}", "func configureFlags(api *operations.CalculatorAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func configureFlags(api *operations.ConfigServerAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (f *EnvFlags) Feature() string {\n\treturn f.feature\n}", "func configureFlags(api *operations.SwaggertestAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (sc *ServerConn) Features(ctx context.Context) (*ServerFeatures, error) {\n\tvar feats ServerFeatures\n\terr := sc.Request(ctx, \"server.features\", nil, &feats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &feats, nil\n}", "func (sd *SousNewDeploy) AddFlags(fs *flag.FlagSet) {\n\tMustAddFlags(fs, &sd.DeployFilterFlags, NewDeployFilterFlagsHelp)\n\n\tfs.BoolVar(&sd.force, \"force\", false,\n\t\t\"force deploy no matter if GDM already is at the correct version\")\n\tfs.BoolVar(&sd.waitStable, \"wait-stable\", true,\n\t\t\"wait for the deploy to complete before returning (otherwise, use --wait-stable=false)\")\n\tfs.StringVar(&sd.dryrunOption, \"dry-run\", \"none\",\n\t\t\"prevent rectify from actually changing things - \"+\n\t\t\t\"values are none,scheduler,registry,both\")\n}", "func featuresFilteredByFlags(infos map[string]*FeatureInfo, activated, deactivated bool) []FeatureInfo {\n\tvar filteredList []FeatureInfo\n\tfor _, v := range infos {\n\t\tif activated && v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\tif deactivated && !v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\t// No flags were provided, so only filter out features that shouldn't be listed.\n\t\tif !activated && !deactivated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\t}\n\treturn filteredList\n}", "func ServerFlags(cfg *config.Config) []cli.Flag {\n\treturn []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"web.address\",\n\t\t\tValue: \"0.0.0.0:9000\",\n\t\t\tUsage: \"Address to bind the metrics server\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_WEB_ADDRESS\"},\n\t\t\tDestination: &cfg.Server.Addr,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"web.path\",\n\t\t\tValue: \"/metrics\",\n\t\t\tUsage: \"Path to bind the metrics server\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_WEB_PATH\"},\n\t\t\tDestination: &cfg.Server.Path,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"web.config\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Path to web-config file\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_WEB_CONFIG\"},\n\t\t\tDestination: &cfg.Server.Web,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"output.engine\",\n\t\t\tValue: \"file\",\n\t\t\tUsage: \"Enabled engine like file or http\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_OUTPUT_ENGINE\"},\n\t\t\tDestination: &cfg.Target.Engine,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"output.file\",\n\t\t\tValue: \"/etc/prometheus/scw.json\",\n\t\t\tUsage: \"Path to write the file_sd config\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_OUTPUT_FILE\"},\n\t\t\tDestination: &cfg.Target.File,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName: \"output.refresh\",\n\t\t\tValue: 30,\n\t\t\tUsage: \"Discovery refresh interval in seconds\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_OUTPUT_REFRESH\"},\n\t\t\tDestination: &cfg.Target.Refresh,\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"scw.check_instance\",\n\t\t\tValue: true,\n\t\t\tUsage: \"Enable instance gathering\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_CHECK_INSTANCE\"},\n\t\t\tDestination: &cfg.Target.CheckInstance,\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"scw.check_baremetal\",\n\t\t\tValue: true,\n\t\t\tUsage: \"Enable baremetal gathering\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_CHECK_BAREMETAL\"},\n\t\t\tDestination: &cfg.Target.CheckBaremetal,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.access_key\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Access key for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_ACCESS_KEY\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.secret_key\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Secret key for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_SECRET_KEY\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.org\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Organization for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_ORG\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.zone\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Zone for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_ZONE\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.config\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Path to Scaleway configuration file\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_CONFIG\"},\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName: \"scw.instance_zone\",\n\t\t\tValue: cli.NewStringSlice(\"fr-par-1\", \"nl-ams-1\"),\n\t\t\tUsage: \"List of available zones for instance API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_INSTANCE_ZONES\"},\n\t\t\tHidden: true,\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName: \"scw.baremetal_zone\",\n\t\t\tValue: cli.NewStringSlice(\"fr-par-2\"),\n\t\t\tUsage: \"List of available zones for baremetal API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_BAREMETAL_ZONES\"},\n\t\t\tHidden: true,\n\t\t},\n\t}\n}", "func (s *ServerOption) AddFlags(fs *flag.FlagSet) {\n\tfs.BoolVar(&s.JsonLogFormat, \"json-log-format\", true, \"Set true to use json style log format. Set false to use plaintext style log format\")\n\tfs.StringVar(&s.AreaConfigPath, \"area-config-path\", \"https://raw.githubusercontent.com/kubeflow/community/master/labels-owners.yaml\", \"Path to the YAML file mapping area labels to owners.\")\n\tfs.IntVar(&s.Port, \"port\", 8080, \"The port to use for an http server.\")\n}", "func processCommandLineFlags(s *suite.Suite) {\n\tgetopt.HelpColumn = 35\n\tgetopt.DisplayWidth = 120\n\tgetopt.SetParameters(\"\")\n\tgetopt.Parse()\n\n\t// Lets check to see if the version command line flag was given. If it is\n\t// lets print out the version information and exit.\n\tif *bOptVer {\n\t\tprintOutputHeader()\n\t\tos.Exit(0)\n\t}\n\n\t// Lets check to see if the help command line flag was given. If it is lets\n\t// print out the help information and exit.\n\tif *bOptHelp {\n\t\tprintOutputHeader()\n\t\tgetopt.Usage()\n\t\tos.Exit(0)\n\t}\n\n\t// ------------------------------------------------------------\n\t// Map command line parameters to struct values\n\t// ------------------------------------------------------------\n\ts.Verbose = *bOptVerbose\n\ts.Debug = *bOptDebug\n\n\ts.Settings.URL = *sOptURL\n\ts.Settings.Proxy = *sOptProxy\n\ts.Settings.Discovery = *sOptDiscovery\n\ts.Settings.APIRoot = *sOptAPIRoot\n\ts.Settings.Username = *sOptUsername\n\ts.Settings.Password = *sOptPassword\n\n\ts.CollectionIDs.ReadOnly = *sOptReadOnly\n\ts.CollectionIDs.WriteOnly = *sOptWriteOnly\n\ts.CollectionIDs.ReadWrite = *sOptReadWrite\n}", "func (s *StorageSerializationOptions) AddFlags(fs *pflag.FlagSet) {\n\t// Note: the weird \"\"+ in below lines seems to be the only way to get gofmt to\n\t// arrange these text blocks sensibly. Grrr.\n\n\tdeprecatedStorageVersion := \"\"\n\tfs.StringVar(&deprecatedStorageVersion, \"storage-version\", deprecatedStorageVersion,\n\t\t\"DEPRECATED: the version to store the legacy v1 resources with. Defaults to server preferred.\")\n\tfs.MarkDeprecated(\"storage-version\", \"--storage-version is deprecated and will be removed when the v1 API \"+\n\t\t\"is retired. Setting this has no effect. See --storage-versions instead.\")\n\n\tfs.StringVar(&s.StorageVersions, \"storage-versions\", s.StorageVersions, \"\"+\n\t\t\"The per-group version to store resources in. \"+\n\t\t\"Specified in the format \\\"group1/version1,group2/version2,...\\\". \"+\n\t\t\"In the case where objects are moved from one group to the other, \"+\n\t\t\"you may specify the format \\\"group1=group2/v1beta1,group3/v1beta1,...\\\". \"+\n\t\t\"You only need to pass the groups you wish to change from the defaults. \"+\n\t\t\"It defaults to a list of preferred versions of all registered groups, \"+\n\t\t\"which is derived from the KUBE_API_VERSIONS environment variable.\")\n\n}", "func (s *Server) ConfigureFlags() {\n\tif s.api != nil {\n\t\tconfigureFlags(s.api)\n\t}\n}", "func (cfg *Config) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&cfg.GatewayIP, \"gateway.ip\", cfg.GatewayIP, \"ScaleIO Gateway IP\")\n\tfs.StringVar(&cfg.Username, \"gateway.username\", cfg.Username, \"ScaleIO Gateway Username\")\n\tfs.StringVar(&cfg.Password, \"gateway.password\", cfg.Password, \"ScaleIO Gateway Password\")\n\tfs.StringVar(&cfg.Version, \"gateway.version\", cfg.Version, \"ScaleIO Gateway Version\")\n\tfs.StringVar(&cfg.SdsList, \"gateway.sds\", cfg.SdsList, \"ScaleIO SDS List\")\n}", "func configureFlags(api *operations.ControlAsistenciaAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (a *AdminApiService) CreateFeatureFlag(ctx _context.Context, featureFlag FeatureFlag) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &featureFlag\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 409 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (s *MesosTurboService) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&s.MesosMasterConfig, \"mesosconfig\", s.MesosMasterConfig, \"Path to the mesos config file.\")\n\tfs.StringVar(&s.TurboCommConfig, \"turboconfig\", s.TurboCommConfig, \"Path to the turbo config flag.\")\n\n\tfs.StringVar(&s.Master, \"mesostype\", s.Master, \"Mesos Master Type 'Apache Mesos'|'Mesosphere DCOS'\")\n\tfs.StringVar(&s.MasterIPPort, \"masteripport\", s.MasterIPPort, \"Comma separated list of IP:port of each Mesos Master in the cluster\")\n\tfs.StringVar(&s.MasterUsername, \"masteruser\", s.MasterUsername, \"User for the Mesos Master\")\n\tfs.StringVar(&s.MasterPassword, \"masterpwd\", s.MasterPassword, \"Password for the Mesos Master\")\n\n\tfs.StringVar(&s.TurboServerUrl, \"turboserverurl\", s.TurboServerUrl, \"Url for Turbo Server\")\n\tfs.StringVar(&s.TurboServerVersion, \"turboserverversion\", s.TurboServerVersion, \"Version for Turbo Server\")\n\tfs.StringVar(&s.OpsManagerUsername, \"opsmanagerusername\", s.OpsManagerUsername, \"Username for Ops Manager\")\n\tfs.StringVar(&s.OpsManagerPassword, \"opsmanagerpassword\", s.OpsManagerPassword, \"Password for Ops Manager\")\n}", "func (b *AdapterBase) Flags() *pflag.FlagSet {\n\tb.initFlagSet()\n\tb.InstallFlags()\n\n\treturn b.FlagSet\n}", "func (o *Options) AddFlags(fs *pflag.FlagSet) {\n\tif o == nil {\n\t\treturn\n\t}\n\tfs.StringVar(&o.ShowHiddenMetricsForVersion, \"show-hidden-metrics-for-version\", o.ShowHiddenMetricsForVersion,\n\t\t\"The previous version for which you want to show hidden metrics. \"+\n\t\t\t\"Only the previous minor version is meaningful, other values will not be allowed. \"+\n\t\t\t\"The format is <major>.<minor>, e.g.: '1.16'. \"+\n\t\t\t\"The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, \"+\n\t\t\t\"rather than being surprised when they are permanently removed in the release after that.\")\n\tfs.StringSliceVar(&o.DisabledMetrics,\n\t\t\"disabled-metrics\",\n\t\to.DisabledMetrics,\n\t\t\"This flag provides an escape hatch for misbehaving metrics. \"+\n\t\t\t\"You must provide the fully qualified metric name in order to disable it. \"+\n\t\t\t\"Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.\")\n\tfs.StringToStringVar(&o.AllowListMapping, \"allow-metric-labels\", o.AllowListMapping,\n\t\t\"The map from metric-label to value allow-list of this label. The key's format is <MetricName>,<LabelName>. \"+\n\t\t\t\"The value's format is <allowed_value>,<allowed_value>...\"+\n\t\t\t\"e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.\")\n}", "func (fft FeatureFlagToggles) RouteServices() *FeatureFlag {\n\treturn &FeatureFlag{\n\t\tName: routeServicesFlag,\n\t\tDefault: false,\n\t\tisEnabled: func(ff *FeatureFlag) bool {\n\t\t\tif setValue, ok := fft[routeServicesFlag]; ok {\n\t\t\t\treturn setValue\n\t\t\t}\n\t\t\treturn ff.Default\n\t\t},\n\t}\n}", "func configureFlags(api *operations.JiliAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (c *cmdCreate) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&(c.fileName), \"f\", \"\", \"gateway app file\")\n\tfs.StringVar(&(c.pingport), \"pingport\", \"\", \"ping port\")\n}", "func (o *SAControllerOptions) AddFlags(fs *pflag.FlagSet) {\n\tif o == nil {\n\t\treturn\n\t}\n\n\tfs.StringVar(&o.ServiceAccountKeyFile, \"service-account-private-key-file\", o.ServiceAccountKeyFile, \"Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.\")\n\tfs.Int32Var(&o.ConcurrentSATokenSyncs, \"concurrent-serviceaccount-token-syncs\", o.ConcurrentSATokenSyncs, \"The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load\")\n\tfs.StringVar(&o.RootCAFile, \"root-ca-file\", o.RootCAFile, \"If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.\")\n}", "func (a *AssembliesApiService) UpdateFeature(ctx _context.Context, did string, wid string, eid string, fid string, localVarOptionals *UpdateFeatureOpts) (BtFeatureDefinitionResponse1617, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue BtFeatureDefinitionResponse1617\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/assemblies/d/{did}/w/{wid}/e/{eid}/features/featureid/{fid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"did\"+\"}\", _neturl.QueryEscape(parameterToString(did, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wid\"+\"}\", _neturl.QueryEscape(parameterToString(wid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"eid\"+\"}\", _neturl.QueryEscape(parameterToString(eid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"fid\"+\"}\", _neturl.QueryEscape(parameterToString(fid, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1\", \"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Body.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Body.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\t\tvar v BtFeatureDefinitionResponse1617\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (a *AdminApiService) DeleteFeatureFlag(ctx _context.Context, id string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func SetFeatureGates(flags map[string][]string, featureGates featuregate.MutableFeatureGate) ([]string, error) {\n\tfeatureGatesMap := map[string]bool{}\n\tfeatureGateParser := flag.NewMapStringBool(&featureGatesMap)\n\tfor _, val := range flags[\"feature-gates\"] {\n\t\tif err := featureGateParser.Set(val); err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t}\n\n\treturn setFeatureGates(featureGatesMap, featureGates)\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.String(\n\t\thttpServerHostPort,\n\t\tdefaultHTTPServerHostPort,\n\t\t\"host:port of the http server (e.g. for /sampling point and /baggageRestrictions endpoint)\")\n\n\tfor _, p := range defaultProcessors {\n\t\tprefix := fmt.Sprintf(processorPrefixFmt, p.model, p.protocol)\n\t\tflags.Int(prefix+suffixWorkers, defaultServerWorkers, \"how many workers the processor should run\")\n\t\tflags.Int(prefix+suffixServerQueueSize, defaultQueueSize, \"length of the queue for the UDP server\")\n\t\tflags.Int(prefix+suffixServerMaxPacketSize, defaultMaxPacketSize, \"max packet size for the UDP server\")\n\t\tflags.Int(prefix+suffixServerSocketBufferSize, 0, \"socket buffer size for UDP packets in bytes\")\n\t\tflags.String(prefix+suffixServerHostPort, \":\"+strconv.Itoa(p.port), \"host:port for the UDP server\")\n\t}\n}", "func NewSvcFlag(p *SvcFlagParam) ServiceFlags {\n\tvar flags ServiceFlags\n\n\tswitch p.SvcType {\n\tcase SVCTypeExternalIPs:\n\t\tflags |= serviceFlagExternalIPs\n\tcase SVCTypeNodePort:\n\t\tflags |= serviceFlagNodePort\n\tcase SVCTypeLoadBalancer:\n\t\tflags |= serviceFlagLoadBalancer\n\tcase SVCTypeHostPort:\n\t\tflags |= serviceFlagHostPort\n\t\tif p.LoopbackHostport {\n\t\t\tflags |= serviceFlagLoopback\n\t\t}\n\tcase SVCTypeLocalRedirect:\n\t\tflags |= serviceFlagLocalRedirect\n\t}\n\n\tswitch p.SvcNatPolicy {\n\tcase SVCNatPolicyNat46:\n\t\tfallthrough\n\tcase SVCNatPolicyNat64:\n\t\tflags |= serviceFlagNat46x64\n\t}\n\n\tif p.SvcExtLocal {\n\t\tflags |= serviceFlagExtLocalScope\n\t}\n\tif p.SvcIntLocal {\n\t\tflags |= serviceFlagIntLocalScope\n\t}\n\tif p.SessionAffinity {\n\t\tflags |= serviceFlagSessionAffinity\n\t}\n\tif p.IsRoutable {\n\t\tflags |= serviceFlagRoutable\n\t}\n\tif p.CheckSourceRange {\n\t\tflags |= serviceFlagSourceRange\n\t}\n\tif p.L7LoadBalancer {\n\t\tflags |= serviceFlagL7LoadBalancer\n\t}\n\tif p.SvcExtLocal != p.SvcIntLocal && p.SvcType != SVCTypeClusterIP {\n\t\tflags |= serviceFlagTwoScopes\n\t}\n\n\treturn flags\n}", "func (sdr SDR) HandleFlags() (err error) {\n\t// Catch any errors panicked while visiting flags.\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = r.(error)\n\t\t}\n\t}()\n\n\tflag.CommandLine.Visit(func(f *flag.Flag) {\n\t\tvar err error\n\t\tswitch f.Name {\n\t\tcase \"centerfreq\":\n\t\t\terr = sdr.SetCenterFreq(uint32(sdr.Flags.CenterFreq))\n\t\tcase \"samplerate\":\n\t\t\terr = sdr.SetSampleRate(uint32(sdr.Flags.SampleRate))\n\t\tcase \"tunergainmode\":\n\t\t\terr = sdr.SetGainMode(sdr.Flags.TunerGainMode)\n\t\tcase \"tunergain\":\n\t\t\terr = sdr.SetGain(uint32(sdr.Flags.TunerGain * 10.0))\n\t\tcase \"freqcorrection\":\n\t\t\terr = sdr.SetFreqCorrection(uint32(sdr.Flags.FreqCorrection))\n\t\tcase \"testmode\":\n\t\t\terr = sdr.SetTestMode(sdr.Flags.TestMode)\n\t\tcase \"agcmode\":\n\t\t\terr = sdr.SetAGCMode(sdr.Flags.AgcMode)\n\t\tcase \"directsampling\":\n\t\t\terr = sdr.SetDirectSampling(sdr.Flags.DirectSampling)\n\t\tcase \"offsettuning\":\n\t\t\terr = sdr.SetOffsetTuning(sdr.Flags.OffsetTuning)\n\t\tcase \"rtlxtalfreq\":\n\t\t\terr = sdr.SetRTLXtalFreq(uint32(sdr.Flags.RtlXtalFreq))\n\t\tcase \"tunerxtalfreq\":\n\t\t\terr = sdr.SetTunerXtalFreq(uint32(sdr.Flags.TunerXtalFreq))\n\t\tcase \"gainbyindex\":\n\t\t\terr = sdr.SetGainByIndex(uint32(sdr.Flags.GainByIndex))\n\t\t}\n\n\t\t// If we encounter an error, panic to catch in parent scope.\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n\n\treturn\n}", "func (s *LifecyclerRPCServer) Flags(args interface{}, resp *PluginFlags) (err error) {\n\t*resp, err = s.Plugin.Flags()\n\treturn err\n}", "func configureFlags(api *operations.LolchestWinAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (o *ResourcepoolPoolMember) SetFeatures(v []string) {\n\to.Features = v\n}", "func configureFlags(api *operations.ReservoirAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func configureFlags(api *operations.KubernikusAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (o *SetupOptions) AddFlags(flagSet *pflag.FlagSet) {\n\t// Add flags for generic options\n\tflagSet.StringVarP(&o.LogLevel, \"log-level\", \"l\", \"INFO\", \"log print level\")\n\tflagSet.StringVarP(&o.ConfigPath, \"config\", \"c\", \"./cmd/client/conf/config-dev.toml\", \"init client by given config\")\n}", "func registerFlags(td *OsmTestData) {\n\tflag.BoolVar(&td.CleanupTest, \"cleanupTest\", true, \"Cleanup test resources when done\")\n\tflag.BoolVar(&td.WaitForCleanup, \"waitForCleanup\", true, \"Wait for effective deletion of resources\")\n\tflag.BoolVar(&td.IgnoreRestarts, \"ignoreRestarts\", false, \"When true, will not make tests fail if restarts of control plane processes are observed\")\n\n\tflag.StringVar(&td.TestDirBase, \"testDirBase\", testFolderBase, \"Test directory base. Test directory name will be created inside.\")\n\n\tflag.StringVar((*string)(&td.InstType), \"installType\", string(SelfInstall), \"Type of install/deployment for OSM\")\n\tflag.StringVar((*string)(&td.CollectLogs), \"collectLogs\", string(CollectLogsIfErrorOnly), \"Defines if/when to collect logs.\")\n\n\tflag.StringVar(&td.ClusterName, \"kindClusterName\", \"osm-e2e\", \"Name of the Kind cluster to be created\")\n\n\tflag.BoolVar(&td.CleanupKindCluster, \"cleanupKindCluster\", true, \"Cleanup kind cluster upon exit\")\n\tflag.BoolVar(&td.CleanupKindClusterBetweenTests, \"cleanupKindClusterBetweenTests\", false, \"Cleanup kind cluster between tests\")\n\tflag.StringVar(&td.ClusterVersion, \"kindClusterVersion\", \"\", \"Kind cluster version, ex. v.1.20.2\")\n\n\tflag.StringVar(&td.CtrRegistryServer, \"ctrRegistry\", os.Getenv(\"CTR_REGISTRY\"), \"Container registry\")\n\tflag.StringVar(&td.CtrRegistryUser, \"ctrRegistryUser\", os.Getenv(\"CTR_REGISTRY_USER\"), \"Container registry\")\n\tflag.StringVar(&td.CtrRegistryPassword, \"ctrRegistrySecret\", os.Getenv(\"CTR_REGISTRY_PASSWORD\"), \"Container registry secret\")\n\n\tflag.StringVar(&td.OsmImageTag, \"osmImageTag\", utils.GetEnv(\"CTR_TAG\", defaultImageTag), \"OSM image tag\")\n\tflag.StringVar(&td.OsmNamespace, \"OsmNamespace\", utils.GetEnv(\"K8S_NAMESPACE\", defaultOsmNamespace), \"OSM Namespace\")\n\tflag.StringVar(&td.OsmMeshConfigName, \"OsmMeshConfig\", defaultMeshConfigName, \"OSM MeshConfig name\")\n\n\tflag.BoolVar(&td.EnableNsMetricTag, \"EnableMetricsTag\", true, \"Enable tagging Namespaces for metrics collection\")\n\tflag.BoolVar(&td.DeployOnOpenShift, \"deployOnOpenShift\", false, \"Configure tests to run on OpenShift\")\n\tflag.BoolVar(&td.DeployOnWindowsWorkers, \"deployOnWindowsWorkers\", false, \"Configure tests to run on Windows workers\")\n\tflag.BoolVar(&td.RetryAppPodCreation, \"retryAppPodCreation\", true, \"Retry app pod creation on error\")\n\tflag.BoolVar(&td.EnableSPIFFE, \"enableSPIFFE\", false, \"Globally Enables SPIFFE IDs when running tests\")\n}", "func (client BaseClient) CreateFeature(ctx context.Context, body *Feature) (result Feature, err error) {\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: body,\n\t\t\tConstraints: []validation.Constraint{{Target: \"body\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"body.Name\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t{Target: \"body.Version\", Name: validation.Null, Rule: true,\n\t\t\t\t\t\tChain: []validation.Constraint{{Target: \"body.Version\", Name: validation.Pattern, Rule: `^v?((\\d+)\\.(\\d+)\\.(\\d+))(?:-([\\dA-Za-z\\-]+(?:\\.[\\dA-Za-z\\-]+)*))?(?:\\+([\\dA-Za-z\\-]+(?:\\.[\\dA-Za-z\\-]+)*))?$`, Chain: nil}}},\n\t\t\t\t\t{Target: \"body.Path\", Name: validation.Null, Rule: false,\n\t\t\t\t\t\tChain: []validation.Constraint{{Target: \"body.Path\", Name: validation.Pattern, Rule: `^nrn:beacon:(?<tenant>[^:]+:(?<type>sys|exp|ftr|fin):(?<feature>[^:]+)?:(?<version>[^:]+)?:(?<instance>[^:]*)?:(?<system>[^:]*)?:(?<name>[^:]*)?)$`, Chain: nil}}},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"beacon.BaseClient\", \"CreateFeature\", err.Error())\n\t}\n\n\treq, err := client.CreateFeaturePreparer(ctx, body)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"CreateFeature\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.CreateFeatureSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"CreateFeature\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateFeatureResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"CreateFeature\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (a *AssembliesApiService) AddFeature(ctx _context.Context, did string, wvm string, wvmid string, eid string, localVarOptionals *AddFeatureOpts) (BtFeatureDefinitionResponse1617, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue BtFeatureDefinitionResponse1617\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/assemblies/d/{did}/{wvm}/{wvmid}/e/{eid}/features\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"did\"+\"}\", _neturl.QueryEscape(parameterToString(did, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvm\"+\"}\", _neturl.QueryEscape(parameterToString(wvm, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvmid\"+\"}\", _neturl.QueryEscape(parameterToString(wvmid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"eid\"+\"}\", _neturl.QueryEscape(parameterToString(eid, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1\", \"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Body.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Body.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\t\tvar v BtFeatureDefinitionResponse1617\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (ctx *Ctx) ParseFlags() {\n\tflag.Var(ctx.BugZilla.Origin, \"bugzilla-origin\", \"Bugzilla origin url\")\n\tflag.Var(ctx.BugZilla.EsIndex, \"bugzilla-es-index\", \"Bugzilla es index base name\")\n\tflag.Var(ctx.BugZilla.FromDate, \"bugzilla-from-date\", \"Optional, date to start syncing from\")\n\tflag.Var(ctx.BugZilla.Project, \"bugzilla-project\", \"Slug name of a project e.g. yocto\")\n\tflag.Var(ctx.BugZilla.DoFetch, \"bugzilla-do-fetch\", \"To decide whether will fetch raw data or not\")\n\tflag.Var(ctx.BugZilla.DoEnrich, \"bugzilla-do-enrich\", \"To decide whether will do enrich raw data or not.\")\n\tflag.Var(ctx.BugZilla.FetchSize, \"bugzilla-fetch-size\", \"Total number of fetched items per request.\")\n\tflag.Var(ctx.BugZilla.EnrichSize, \"bugzilla-enrich-size\", \"Total number of enriched items per request.\")\n\tflag.Var(ctx.PiperMail.ProjectSlug, \"bugzilla-slug\", \"Bugzilla project slug\")\n\n\tflag.Var(ctx.PiperMail.Origin, \"pipermail-origin\", \"Pipermail origin url\")\n\tflag.Var(ctx.PiperMail.ProjectSlug, \"pipermail-slug\", \"Pipermail project slug\")\n\tflag.Var(ctx.PiperMail.GroupName, \"pipermail-groupname\", \"Pipermail group name\")\n\tflag.Var(ctx.PiperMail.EsIndex, \"pipermail-es-index\", \"Pipermail es index base name\")\n\tflag.Var(ctx.PiperMail.FromDate, \"pipermail-from-date\", \"Optional, date to start syncing from\")\n\tflag.Var(ctx.PiperMail.Project, \"pipermail-project\", \"Slug name of a project e.g. yocto\")\n\tflag.Var(ctx.PiperMail.DoFetch, \"pipermail-do-fetch\", \"To decide whether will fetch raw data or not\")\n\tflag.Var(ctx.PiperMail.DoEnrich, \"pipermail-do-enrich\", \"To decide whether will do enrich raw data or not.\")\n\tflag.Var(ctx.PiperMail.FetchSize, \"pipermail-fetch-size\", \"Total number of fetched items per request.\")\n\tflag.Var(ctx.PiperMail.EnrichSize, \"pipermail-enrich-size\", \"Total number of enriched items per request.\")\n\n\tflag.Var(ctx.GoogleGroups.ProjectSlug, \"googlegroups-slug\", \"GoogleGroups project slug\")\n\tflag.Var(ctx.GoogleGroups.GroupName, \"googlegroups-groupname\", \"GoogleGroups email address\")\n\tflag.Var(ctx.GoogleGroups.EsIndex, \"googlegroups-es-index\", \"GoogleGroups es index base name\")\n\tflag.Var(ctx.GoogleGroups.FromDate, \"googlegroups-from-date\", \"Optional, date to start syncing from\")\n\tflag.Var(ctx.GoogleGroups.Project, \"googlegroups-project\", \"Slug name of a project e.g. yocto\")\n\tflag.Var(ctx.GoogleGroups.DoFetch, \"googlegroups-do-fetch\", \"To decide whether will fetch raw data or not\")\n\tflag.Var(ctx.GoogleGroups.DoEnrich, \"googlegroups-do-enrich\", \"To decide whether will do enrich raw data or not.\")\n\tflag.Var(ctx.GoogleGroups.FetchSize, \"googlegroups-fetch-size\", \"Total number of fetched items per request.\")\n\tflag.Var(ctx.GoogleGroups.EnrichSize, \"googlegroups-enrich-size\", \"Total number of enriched items per request.\")\n\n\tflag.Parse()\n}", "func FeatureVector(b *Board, version int) (f []float32) {\n\tif version > AllFeaturesDim {\n\t\tlog.Panicf(\"Requested %d features, but only know about %d\", version, AllFeaturesDim)\n\t}\n\tf = make([]float32, AllFeaturesDim)\n\tfor ii := range AllFeatures {\n\t\tfeatDef := &AllFeatures[ii]\n\t\tif featDef.Version <= version {\n\t\t\tfeatDef.Setter(b, featDef, f)\n\t\t}\n\t}\n\n\tif version != AllFeaturesDim {\n\t\t// Filter only features for given version.\n\t\tnewF := make([]float32, 0, version)\n\t\tfor ii := range AllFeatures {\n\t\t\tfeatDef := &AllFeatures[ii]\n\t\t\tif featDef.Version <= version {\n\t\t\t\tnewF = append(newF, f[featDef.VecIndex:featDef.VecIndex+featDef.Dim]...)\n\t\t\t}\n\t\t}\n\t\tf = newF\n\t}\n\n\treturn\n}", "func configureFlags(api *operations.OpenMockAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func configureFlags(api *operations.MonocularAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func setupFlags(params, paramsJSON string) *pflag.FlagSet {\n\tflagSet := pflag.NewFlagSet(\"TestGetParamsFromFlags\", pflag.PanicOnError)\n\tregisterParamsFlags(flagSet)\n\t// mirror actual usage by using Parse rather than Set\n\tcmdline := []string{\"apply\"}\n\tif params != \"\" {\n\t\tcmdline = append(cmdline, \"--params\", params)\n\t}\n\tif paramsJSON != \"\" {\n\t\tcmdline = append(cmdline, \"--paramsJSON\", paramsJSON)\n\t}\n\n\tif err := flagSet.Parse(append(cmdline, \"samples/test.hcl\")); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn flagSet\n}", "func (h *HyperCommand) Flags() *pflag.FlagSet {\n\treturn h.root.Flags()\n}", "func configureFlags(api *operations.OpenPitrixAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (VS *Server) features(c *gin.Context) {\n\trender(c, gin.H{}, \"presentation-features.html\")\n}", "func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flagstate.Option) flagstate.AllFlags {\n\tvalid := true\n\tif client.IsOffline() {\n\t\tclient.loggers.Warn(\"Called AllFlagsState in offline mode. Returning empty state\")\n\t\tvalid = false\n\t} else if !client.Initialized() {\n\t\tif client.store.IsInitialized() {\n\t\t\tclient.loggers.Warn(\"Called AllFlagsState before client initialization; using last known values from data store\")\n\t\t} else {\n\t\t\tclient.loggers.Warn(\"Called AllFlagsState before client initialization. Data store not available; returning empty state\") //nolint:lll\n\t\t\tvalid = false\n\t\t}\n\t}\n\n\tif !valid {\n\t\treturn flagstate.AllFlags{}\n\t}\n\n\titems, err := client.store.GetAll(datakinds.Features)\n\tif err != nil {\n\t\tclient.loggers.Warn(\"Unable to fetch flags from data store. Returning empty state. Error: \" + err.Error())\n\t\treturn flagstate.AllFlags{}\n\t}\n\n\tclientSideOnly := false\n\tfor _, o := range options {\n\t\tif o == flagstate.OptionClientSideOnly() {\n\t\t\tclientSideOnly = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tstate := flagstate.NewAllFlagsBuilder(options...)\n\tfor _, item := range items {\n\t\tif item.Item.Item != nil {\n\t\t\tif flag, ok := item.Item.Item.(*ldmodel.FeatureFlag); ok {\n\t\t\t\tif clientSideOnly && !flag.ClientSideAvailability.UsingEnvironmentID {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tresult := client.evaluator.Evaluate(flag, context, nil)\n\n\t\t\t\tstate.AddFlag(\n\t\t\t\t\titem.Key,\n\t\t\t\t\tflagstate.FlagState{\n\t\t\t\t\t\tValue: result.Detail.Value,\n\t\t\t\t\t\tVariation: result.Detail.VariationIndex,\n\t\t\t\t\t\tReason: result.Detail.Reason,\n\t\t\t\t\t\tVersion: flag.Version,\n\t\t\t\t\t\tTrackEvents: flag.TrackEvents || result.IsExperiment,\n\t\t\t\t\t\tTrackReason: result.IsExperiment,\n\t\t\t\t\t\tDebugEventsUntilDate: flag.DebugEventsUntilDate,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn state.Build()\n}" ]
[ "0.6283442", "0.58581585", "0.5759627", "0.56996477", "0.56361234", "0.5633944", "0.5601203", "0.5553357", "0.5548972", "0.55474377", "0.53957814", "0.53191704", "0.5276696", "0.5251556", "0.52426904", "0.5235017", "0.5213624", "0.5191827", "0.5179796", "0.5133154", "0.5120499", "0.51138586", "0.5092476", "0.5061599", "0.50592816", "0.50440687", "0.50232065", "0.49985152", "0.49919987", "0.49903297", "0.49874517", "0.49853578", "0.49526414", "0.49375442", "0.49143115", "0.48979914", "0.48936018", "0.48913407", "0.48873863", "0.48645976", "0.48630604", "0.4857825", "0.48540926", "0.4851144", "0.48507842", "0.4837795", "0.48188666", "0.48136187", "0.47989306", "0.47966653", "0.47813874", "0.4773839", "0.47676682", "0.4765815", "0.4745114", "0.4743934", "0.47432616", "0.47102305", "0.4708159", "0.47001722", "0.46999368", "0.4689274", "0.46850637", "0.46823114", "0.46820906", "0.46818152", "0.46815935", "0.46803293", "0.46718532", "0.4666791", "0.46652535", "0.46509233", "0.4648145", "0.46424124", "0.4641459", "0.4640468", "0.46400046", "0.46387824", "0.46294883", "0.46260738", "0.46257395", "0.4611324", "0.46098238", "0.4602865", "0.45977983", "0.45973852", "0.45812213", "0.45743674", "0.45725963", "0.45717955", "0.45549417", "0.45528668", "0.45518783", "0.45470873", "0.45296225", "0.452792", "0.4524855", "0.45227766", "0.45217437", "0.45112896" ]
0.78591985
0
Handler maps the different existing endpoints with the functions they must call
func Handler(a adding.Service, l listing.Service, d deleting.Service) *gin.Engine { router := gin.Default() router.GET("/movies", listMovies(l)) router.GET("/movies/:id", getMovie(l)) router.POST("/movies", addMovie(a)) router.DELETE("/movies/:id", deleteMovie(d)) return router }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (service *MetadataService) Endpoints() map[string]map[string]http.HandlerFunc {\n\thandlers := map[string]map[string]http.HandlerFunc{}\n\n\tfor index, value := range service.config.MetadataPrefixes {\n\t\tserver.Log.Info(\"adding Metadata prefix (\", index, \") \", value)\n\t\thandlers[value+\"/\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetMetadataIndex),\n\t\t}\n\t\thandlers[value+\"/ami-id\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetAmiId),\n\t\t}\n\t\thandlers[value+\"/iam\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetMetadataIAM),\n\t\t}\n\t\thandlers[value+\"/ami-launch-index\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetAmiLaunchIndex),\n\t\t}\n\t\thandlers[value+\"/ami-manifest-path\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetAmiManifestPath),\n\t\t}\n\t\thandlers[value+\"/placement/availability-zone\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetAvailabilityZone),\n\t\t}\n\t\thandlers[value+\"/hostname\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetHostName),\n\t\t}\n\t\thandlers[value+\"/public-hostname\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetPublicHostName),\n\t\t}\n\t\thandlers[value+\"/public-ipv4\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetPublicIpv4),\n\t\t}\n\t\thandlers[value+\"/instance-action\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetInstanceAction),\n\t\t}\n\t\thandlers[value+\"/instance-id\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetInstanceId),\n\t\t}\n\t\thandlers[value+\"/instance-type\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetInstanceType),\n\t\t}\n\t\thandlers[value+\"/iam/\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetIAM),\n\t\t}\n\t\thandlers[value+\"/iam/security-credentials\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": movedPermanently(value + \"/iam/security-credentials/\"),\n\t\t}\n\t\thandlers[value+\"/iam/security-credentials/\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetSecurityCredentials),\n\t\t}\n\t\thandlers[value+\"/iam/security-credentials/{username}\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": service.GetSecurityCredentialDetails,\n\t\t}\n\t\thandlers[value+\"/local-hostname\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetLocalHostName),\n\t\t}\n\t\thandlers[value+\"/local-ipv4\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetLocalIpv4),\n\t\t}\n\t\thandlers[value+\"/mac\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetMac),\n\t\t}\n\t\thandlers[value+\"/profile\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetProfile),\n\t\t}\n\t\thandlers[value+\"/reservation-id\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetReservationId),\n\t\t}\n\t\thandlers[value+\"/security-groups\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetSecurityGroups),\n\t\t}\n\t\thandlers[value+\"/network/interfaces/macs/{mac}/subnet-id\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetSubnetId),\n\t\t}\n\t\thandlers[value+\"/network/interfaces/macs/{mac}/vpc-id\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetVpcId),\n\t\t}\n\t}\n\n\tfor index, value := range service.config.UserdataPrefixes {\n\t\tserver.Log.Info(\"adding Userdata prefix (\", index, \") \", value)\n\n\t\thandlers[value+\"/\"] = map[string]http.HandlerFunc{\n\t\t\t\"GET\": plainText(service.GetUserData),\n\t\t}\n\t}\n\thandlers[\"/latest/dynamic/\"] = map[string]http.HandlerFunc{\n\t\t\"GET\": service.GetDynamicIndex,\n\t}\n\thandlers[\"/latest/dynamic/instance-identity/\"] = map[string]http.HandlerFunc{\n\t\t\"GET\": service.GetDynamicInstanceIdentityIndex,\n\t}\n\thandlers[\"/latest/dynamic/instance-identity/document\"] = map[string]http.HandlerFunc{\n\t\t\"GET\": service.GetDynamicDocument,\n\t}\n\thandlers[\"/latest/api/token\"] = map[string]http.HandlerFunc{\n\t\t\"PUT\": service.GetToken,\n\t}\n\thandlers[\"/\"] = map[string]http.HandlerFunc{\n\t\t\"GET\": service.GetIndex,\n\t}\n\treturn handlers\n}", "func (ro *Route) handler(uri []string, httpMethod string, ids idMap) (*handler, error) {\n\n\t//log.Println(\"Route Handling\", uri, \"in the\", ro)\n\n\t// Check if is trying to request some Handler of this Route\n\tif len(uri) == 0 {\n\t\th, exist := ro.Handlers[httpMethod]\n\t\tif !exist {\n\t\t\treturn nil, fmt.Errorf(\"Method %s not found in the %s\", httpMethod, ro)\n\t\t}\n\t\treturn h, nil\n\t}\n\n\t// Check if is trying to request some Action Handler of this Route\n\tif len(uri) == 1 {\n\n\t\th, exist := ro.Handlers[httpMethod+uri[0]]\n\t\tif exist {\n\t\t\treturn h, nil\n\t\t}\n\n\t\t// It is not an error, cause could have an resources with this name, not an action\n\t\t//log.Println(\"Action \" + httpMethod + uri[0] + \" NOT FOUND\")\n\t}\n\n\t// If we are in a Slice Route, get its ID and search in the Elem Route\n\tif ro.IsSlice {\n\t\t// Add its ID to the Map\n\t\tid := &ID{id: uri[0]}\n\t\tids[ro.Elem.Value.Type()] = reflect.ValueOf(id)\n\n\t\treturn ro.Elem.handler(uri[1:], httpMethod, ids)\n\t}\n\n\t// If we are in an Elem Route, the only possibility is to have a Child with this Name\n\tchild, exist := ro.Children[uri[0]]\n\tif exist {\n\t\treturn child.handler(uri[1:], httpMethod, ids)\n\t}\n\n\treturn nil, fmt.Errorf(\"Not exist any Child '%s' or Action '%s' in the %s\", uri[0], httpMethod+strings.Title(uri[0]), ro)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.Post = m(e.Post)\n\te.Show = m(e.Show)\n\te.TimeSearch = m(e.TimeSearch)\n\te.ListByTimeAndPath = m(e.ListByTimeAndPath)\n\te.ListByPath = m(e.ListByPath)\n\te.Update = m(e.Update)\n\te.Deactivate = m(e.Deactivate)\n\te.Delete = m(e.Delete)\n}", "func CreateEndpointHandlers(lbc *controller.LoadBalancerController) cache.ResourceEventHandlerFuncs {\n\treturn cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tendpoint := obj.(*api_v1.Endpoints)\n\t\t\tlog.Printf(\"Adding endpoints: %v\", endpoint.Name)\n\t\t\tlbc.AddSyncQueue(obj)\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tendpoint, isEndpoint := obj.(*api_v1.Endpoints)\n\t\t\tif !isEndpoint {\n\t\t\t\tdeletedState, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Printf(\"Error received unexpected object: %v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tendpoint, ok = deletedState.Obj.(*api_v1.Endpoints)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Printf(\"Error DeletedFinalStateUnknown contained non-Endpoints object: %v\", deletedState.Obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Printf(\"Removing endpoints: %v\", endpoint.Name)\n\t\t\tlbc.AddSyncQueue(obj)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tif !reflect.DeepEqual(old, cur) {\n\t\t\t\tlog.Printf(\"Endpoints %v changed, syncing\", cur.(*api_v1.Endpoints).Name)\n\t\t\t\tlbc.AddSyncQueue(cur)\n\t\t\t}\n\t\t},\n\t}\n}", "func SetupHandlers(r *mux.Router) {\n\t//object operations\n\tr.HandleFunc(\"/v1/file/upload/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(UploadHandler))))\n\tr.HandleFunc(\"/v1/file/download/{allocation}\", common.UserRateLimit(common.ToByteStream(WithConnection(DownloadHandler)))).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/file/rename/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(RenameHandler))))\n\tr.HandleFunc(\"/v1/file/copy/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CopyHandler))))\n\tr.HandleFunc(\"/v1/file/attributes/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(UpdateAttributesHandler))))\n\tr.HandleFunc(\"/v1/dir/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CreateDirHandler)))).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/dir/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CreateDirHandler)))).Methods(\"DELETE\")\n\tr.HandleFunc(\"/v1/dir/rename/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CreateDirHandler)))).Methods(\"POST\")\n\n\tr.HandleFunc(\"/v1/connection/commit/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CommitHandler))))\n\tr.HandleFunc(\"/v1/file/commitmetatxn/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CommitMetaTxnHandler))))\n\tr.HandleFunc(\"/v1/file/collaborator/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CollaboratorHandler))))\n\tr.HandleFunc(\"/v1/file/calculatehash/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CalculateHashHandler))))\n\n\t//object info related apis\n\tr.HandleFunc(\"/allocation\", common.UserRateLimit(common.ToJSONResponse(WithConnection(AllocationHandler))))\n\tr.HandleFunc(\"/v1/file/meta/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(FileMetaHandler))))\n\tr.HandleFunc(\"/v1/file/stats/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(FileStatsHandler))))\n\tr.HandleFunc(\"/v1/file/list/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ListHandler))))\n\tr.HandleFunc(\"/v1/file/objectpath/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ObjectPathHandler))))\n\tr.HandleFunc(\"/v1/file/referencepath/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ReferencePathHandler))))\n\tr.HandleFunc(\"/v1/file/objecttree/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ObjectTreeHandler))))\n\tr.HandleFunc(\"/v1/file/refs/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(RefsHandler)))).Methods(\"GET\")\n\t//admin related\n\tr.HandleFunc(\"/_debug\", common.UserRateLimit(common.ToJSONResponse(DumpGoRoutines)))\n\tr.HandleFunc(\"/_config\", common.UserRateLimit(common.ToJSONResponse(GetConfig)))\n\tr.HandleFunc(\"/_stats\", common.UserRateLimit(stats.StatsHandler))\n\tr.HandleFunc(\"/_statsJSON\", common.UserRateLimit(common.ToJSONResponse(stats.StatsJSONHandler)))\n\tr.HandleFunc(\"/_cleanupdisk\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(CleanupDiskHandler))))\n\tr.HandleFunc(\"/getstats\", common.UserRateLimit(common.ToJSONResponse(stats.GetStatsHandler)))\n\n\t//marketplace related\n\tr.HandleFunc(\"/v1/marketplace/shareinfo/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(MarketPlaceShareInfoHandler))))\n}", "func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {\n\treturn func(resp http.ResponseWriter, req *http.Request) {\n\t\tsetHeaders(resp, s.agent.config.HTTPResponseHeaders)\n\t\tsetTranslateAddr(resp, s.agent.config.TranslateWANAddrs)\n\n\t\t// Obfuscate any tokens from appearing in the logs\n\t\tformVals, err := url.ParseQuery(req.URL.RawQuery)\n\t\tif err != nil {\n\t\t\ts.agent.logger.Printf(\"[ERR] http: Failed to decode query: %s from=%s\", err, req.RemoteAddr)\n\t\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tlogURL := req.URL.String()\n\t\tif tokens, ok := formVals[\"token\"]; ok {\n\t\t\tfor _, token := range tokens {\n\t\t\t\tif token == \"\" {\n\t\t\t\t\tlogURL += \"<hidden>\"\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlogURL = strings.Replace(logURL, token, \"<hidden>\", -1)\n\t\t\t}\n\t\t}\n\t\tlogURL = aclEndpointRE.ReplaceAllString(logURL, \"$1<hidden>$4\")\n\n\t\tif s.blacklist.Block(req.URL.Path) {\n\t\t\terrMsg := \"Endpoint is blocked by agent configuration\"\n\t\t\ts.agent.logger.Printf(\"[ERR] http: Request %s %v, error: %v from=%s\", req.Method, logURL, err, req.RemoteAddr)\n\t\t\tresp.WriteHeader(http.StatusForbidden)\n\t\t\tfmt.Fprint(resp, errMsg)\n\t\t\treturn\n\t\t}\n\n\t\tisForbidden := func(err error) bool {\n\t\t\tif acl.IsErrPermissionDenied(err) || acl.IsErrNotFound(err) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t_, ok := err.(ForbiddenError)\n\t\t\treturn ok\n\t\t}\n\n\t\tisMethodNotAllowed := func(err error) bool {\n\t\t\t_, ok := err.(MethodNotAllowedError)\n\t\t\treturn ok\n\t\t}\n\n\t\tisBadRequest := func(err error) bool {\n\t\t\t_, ok := err.(BadRequestError)\n\t\t\treturn ok\n\t\t}\n\n\t\tisTooManyRequests := func(err error) bool {\n\t\t\t// Sadness net/rpc can't do nice typed errors so this is all we got\n\t\t\treturn err.Error() == consul.ErrRateLimited.Error()\n\t\t}\n\n\t\taddAllowHeader := func(methods []string) {\n\t\t\tresp.Header().Add(\"Allow\", strings.Join(methods, \",\"))\n\t\t}\n\n\t\thandleErr := func(err error) {\n\t\t\ts.agent.logger.Printf(\"[ERR] http: Request %s %v, error: %v from=%s\", req.Method, logURL, err, req.RemoteAddr)\n\t\t\tswitch {\n\t\t\tcase isForbidden(err):\n\t\t\t\tresp.WriteHeader(http.StatusForbidden)\n\t\t\t\tfmt.Fprint(resp, err.Error())\n\t\t\tcase structs.IsErrRPCRateExceeded(err):\n\t\t\t\tresp.WriteHeader(http.StatusTooManyRequests)\n\t\t\tcase isMethodNotAllowed(err):\n\t\t\t\t// RFC2616 states that for 405 Method Not Allowed the response\n\t\t\t\t// MUST include an Allow header containing the list of valid\n\t\t\t\t// methods for the requested resource.\n\t\t\t\t// https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html\n\t\t\t\taddAllowHeader(err.(MethodNotAllowedError).Allow)\n\t\t\t\tresp.WriteHeader(http.StatusMethodNotAllowed) // 405\n\t\t\t\tfmt.Fprint(resp, err.Error())\n\t\t\tcase isBadRequest(err):\n\t\t\t\tresp.WriteHeader(http.StatusBadRequest)\n\t\t\t\tfmt.Fprint(resp, err.Error())\n\t\t\tcase isTooManyRequests(err):\n\t\t\t\tresp.WriteHeader(http.StatusTooManyRequests)\n\t\t\t\tfmt.Fprint(resp, err.Error())\n\t\t\tdefault:\n\t\t\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tfmt.Fprint(resp, err.Error())\n\t\t\t}\n\t\t}\n\n\t\tstart := time.Now()\n\t\tdefer func() {\n\t\t\ts.agent.logger.Printf(\"[DEBUG] http: Request %s %v (%v) from=%s\", req.Method, logURL, time.Since(start), req.RemoteAddr)\n\t\t}()\n\n\t\tvar obj interface{}\n\n\t\t// if this endpoint has declared methods, respond appropriately to OPTIONS requests. Otherwise let the endpoint handle that.\n\t\tif req.Method == \"OPTIONS\" && len(methods) > 0 {\n\t\t\taddAllowHeader(append([]string{\"OPTIONS\"}, methods...))\n\t\t\treturn\n\t\t}\n\n\t\t// if this endpoint has declared methods, check the request method. Otherwise let the endpoint handle that.\n\t\tmethodFound := len(methods) == 0\n\t\tfor _, method := range methods {\n\t\t\tif method == req.Method {\n\t\t\t\tmethodFound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !methodFound {\n\t\t\terr = MethodNotAllowedError{req.Method, append([]string{\"OPTIONS\"}, methods...)}\n\t\t} else {\n\t\t\terr = s.checkWriteAccess(req)\n\n\t\t\tif err == nil {\n\t\t\t\t// Invoke the handler\n\t\t\t\tobj, err = handler(resp, req)\n\t\t\t}\n\t\t}\n\t\tcontentType := \"application/json\"\n\t\thttpCode := http.StatusOK\n\t\tif err != nil {\n\t\t\tif errPayload, ok := err.(CodeWithPayloadError); ok {\n\t\t\t\thttpCode = errPayload.StatusCode\n\t\t\t\tif errPayload.ContentType != \"\" {\n\t\t\t\t\tcontentType = errPayload.ContentType\n\t\t\t\t}\n\t\t\t\tif errPayload.Reason != \"\" {\n\t\t\t\t\tresp.Header().Add(\"X-Consul-Reason\", errPayload.Reason)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thandleErr(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif obj == nil {\n\t\t\treturn\n\t\t}\n\t\tvar buf []byte\n\t\tif contentType == \"application/json\" {\n\t\t\tbuf, err = s.marshalJSON(req, obj)\n\t\t\tif err != nil {\n\t\t\t\thandleErr(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tif strings.HasPrefix(contentType, \"text/\") {\n\t\t\t\tif val, ok := obj.(string); ok {\n\t\t\t\t\tbuf = []byte(val)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresp.Header().Set(\"Content-Type\", contentType)\n\t\tresp.WriteHeader(httpCode)\n\t\tresp.Write(buf)\n\t}\n}", "func (c *Operation) registerHandler() {\n\t// Add more protocol endpoints here to expose them as controller API endpoints\n\tc.handlers = []Handler{\n\t\tsupport.NewHTTPHandler(login, http.MethodGet, c.login),\n\t\tsupport.NewHTTPHandler(settings, http.MethodGet, c.settings),\n\t\tsupport.NewHTTPHandler(getCreditScore, http.MethodGet, c.getCreditScore),\n\t\tsupport.NewHTTPHandler(callback, http.MethodGet, c.callback),\n\t\tsupport.NewHTTPHandler(oidcRedirectPath, http.MethodGet, c.oidcRedirect),\n\n\t\t// issuer rest apis (html decoupled)\n\t\tsupport.NewHTTPHandler(authPath, http.MethodGet, c.auth),\n\t\tsupport.NewHTTPHandler(searchPath, http.MethodGet, c.search),\n\t\tsupport.NewHTTPHandler(verifyDIDAuthPath, http.MethodPost, c.verifyDIDAuthHandler),\n\t\tsupport.NewHTTPHandler(createCredentialPath, http.MethodPost, c.createCredentialHandler),\n\t\tsupport.NewHTTPHandler(generateCredentialPath, http.MethodPost, c.generateCredentialHandler),\n\n\t\t// chapi\n\t\tsupport.NewHTTPHandler(revoke, http.MethodPost, c.revokeVC),\n\t\tsupport.NewHTTPHandler(generate, http.MethodPost, c.generateVC),\n\n\t\t// didcomm\n\t\tsupport.NewHTTPHandler(didcommToken, http.MethodPost, c.didcommTokenHandler),\n\t\tsupport.NewHTTPHandler(didcommCallback, http.MethodGet, c.didcommCallbackHandler),\n\t\tsupport.NewHTTPHandler(didcommCredential, http.MethodPost, c.didcommCredentialHandler),\n\t\tsupport.NewHTTPHandler(didcommAssuranceData, http.MethodPost, c.didcommAssuraceHandler),\n\n\t\tsupport.NewHTTPHandler(didcommInit, http.MethodGet, c.initiateDIDCommConnection),\n\t\tsupport.NewHTTPHandler(didcommUserEndpoint, http.MethodGet, c.getIDHandler),\n\n\t\t// oidc\n\t\tsupport.NewHTTPHandler(oauth2GetRequestPath, http.MethodGet, c.createOIDCRequest),\n\t\tsupport.NewHTTPHandler(oauth2CallbackPath, http.MethodGet, c.handleOIDCCallback),\n\n\t\t// JSON-LD contexts API\n\t\tsupport.NewHTTPHandler(jsonldcontextrest.AddContextPath, http.MethodPost, c.addJSONLDContextHandler),\n\t}\n}", "func mapper(handler http.Handler, url url.URL, proxyConfig *Proxy) *httprouter.Router {\n\trouter := httprouter.New()\n\tif proxyConfig.Connect.To == url.Host {\n\t\tfor _, r := range proxyConfig.Routes {\n\t\t\t// link allow methods\n\t\t\tif r.Allow.Method != nil {\n\t\t\t\tfor _, m := range r.Allow.Method {\n\t\t\t\t\thttpMethodBuilder(m, r, handler, router, \"allow\", r.Route, proxyConfig)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn router\n}", "func Handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\t\t\n\t\n\n\tif request.HTTPMethod == \"GET\" {\n\t\tfmt.Printf(\"GET METHOD\\n\")\n\t\tShortLink := request.QueryStringParameters[\"ShortLink\"]\n\t\tPutLinkAnalyticsInDynamoDB(request, ShortLink)\n\t\tfmt.Printf(\"short link %s \", ShortLink)\n\t\tif ShortLink == \"\" {\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tBody: \"{\\\"error\\\" : \\\"Short link not provided.\\\"} \", \n\t\t\t\tStatusCode: 400,\n\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\"Access-Control-Allow-Origin\": \"*\",\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\titem, Err := GetItemFromDynamoDB(\"ShortLink\", ShortLink)\n\t\tif Err != nil {\n\t\t\tErrorMessage := fmt.Sprintf(\" { \\\"error\\\" : \\\"%s\\\" } \", Err.Error())\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tBody: ErrorMessage, \n\t\t\t\tStatusCode: 400,\n\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\tif (item == Link{}) {\n\t\t\tErrorMessage := fmt.Sprintf(\" { \\\"error\\\" : \\\"Short link not found.\\\" } \")\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tBody: ErrorMessage, \n\t\t\t\tStatusCode: 404,\n\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\tmessage := fmt.Sprintf(\" { \\\"ShortLink\\\" : \\\"%s\\\", \\\"LongURL\\\" : \\\"%s\\\" } \", item.ShortLink, item.LongURL)\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tBody: message, \n\t\t\tStatusCode: 200,\n\t\t\tHeaders: map[string]string{\n\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t},\n\t\t}, nil\n\t} else if request.HTTPMethod == \"POST\" {\n\t\tfmt.Printf(\"POST METHOD\\n\")\n\n\t\t// BodyRequest will be used to take the json response from client and build it\n\t\tbodyRequest := BodyRequest{\n\t\t\tRequestLongURL: \"\",\n\t\t}\n\t\tfmt.Printf(\"bodyRequest: %+v\\n\", bodyRequest)\n\t\tfmt.Printf(\"request.Body: %+v\\n\", request.Body)\n\t\t// Unmarshal the json, return 404 if Error\n\t\tErr := json.Unmarshal([]byte(request.Body), &bodyRequest)\n\t\tif Err != nil {\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tBody: Err.Error(), \n\t\t\t\tStatusCode: 404,\n\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\tfmt.Printf(\"bodyRequest: %+v\\n\", bodyRequest)\n\t\tLongURLErr := validation.Validate(bodyRequest.RequestLongURL,\n\t\t\t\t\t\tvalidation.Required, // not empty\t\t\t\t\t\t\n\t\t\t\t\t\tis.URL, // is a valid URL\n\t\t\t\t\t)\n\t\tfmt.Printf(\"LongURLErr: %s\\n\", LongURLErr)\n\t\tif LongURLErr != nil {\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tBody: \"{\\\"error\\\" : \\\"URL is not valid\\\"}\", \n\t\t\t\tStatusCode: 404,\n\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\tfmt.Printf(\"RequestLongURL: %s\\n\", bodyRequest.RequestLongURL)\t\t\n\t\tLongURLHashBytes := sha256.Sum256([]byte(bodyRequest.RequestLongURL))\n\t\tLongURLHash := hex.EncodeToString(LongURLHashBytes[:])\n\t\tfmt.Printf(\"LongURLHash %s\\n\", LongURLHash)\n\t\titem, Err := GetShortLinkFromHash(LongURLHash)\n\t\tif Err != nil {\n\t\t\tErrorMessage := fmt.Sprintf(\" { \\\"error\\\" : \\\"%s\\\" } \", Err.Error())\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tBody: ErrorMessage, \n\t\t\t\tStatusCode: 400,\n\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\tfmt.Printf(\"item %s\\n\", item)\n\t\tbodyResponse := BodyResponse{}\n\t\tif (item == Link{}) {\n\t\t\tShortLinkRedisVal := CalcBase64()\t\n\t\t\tfmt.Println(ShortLinkRedisVal)\n\t\t\titem := Link{\n\t\t\t\tShortLink: ShortLinkRedisVal,\n\t\t\t\tHash: LongURLHash,\n\t\t\t\tLongURL: bodyRequest.RequestLongURL,\n\t\t\t}\n\t\t\t_, Err = PutLinkItemFromDynamoDB(item)\n\t\t\tif Err != nil {\n\t\t\t\tErrorMessage := fmt.Sprintf(\" { \\\"error\\\" : \\\"%s\\\" } \", Err.Error())\n\t\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\t\tBody: ErrorMessage, \n\t\t\t\t\tStatusCode: 400,\n\t\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t}\n\t\t\tbodyResponse = BodyResponse{\n\t\t\t\tResponseShortLink: ShortLinkRedisVal,\n\t\t\t}\n\t\t} else {\n\t\t\t// We will build the BodyResponse and send it back in json form\n\t\t\tbodyResponse = BodyResponse{\n\t\t\t\tResponseShortLink: item.ShortLink,\n\t\t\t}\n\t\t\t\n\t\t}\n\t\tresponse, Err := json.Marshal(&bodyResponse)\n\t\tif Err != nil {\n\t\t\tErrorMessage := fmt.Sprintf(\" { \\\"error\\\" : \\\"%s\\\" } \", Err.Error())\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tBody: ErrorMessage, \n\t\t\t\tStatusCode: 404,\n\t\t\t\tHeaders: map[string]string{\n\t\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\t\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tBody: string(response), \n\t\t\tStatusCode: 200,\n\t\t\tHeaders: map[string]string{\n\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t},\n\t\t}, nil\n\t} else {\n\t\tfmt.Printf(\"NEITHER\\n\")\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: 200,\n\t\t\tHeaders: map[string]string{\n\t\t\t\t\"Access-Control-Allow-Origin\" : \"*\",\n\t\t\t},\n\t\t}, nil\n\t}\n}", "func (c *HAProxyController) handleEndpoints(namespace *store.Namespace, ingress *store.Ingress, path *store.IngressPath, service *store.Service, backendName string, newBackend bool) (reload bool) {\n\treload = newBackend\n\tendpoints := c.getEndpoints(namespace, ingress, path, service)\n\tif endpoints == nil {\n\t\tif c.Client.BackendServerDeleteAll(backendName) {\n\t\t\treload = true\n\t\t}\n\t\treturn reload\n\t}\n\t// Handle Backend servers\n\tendpoints.BackendName = backendName\n\tannotations, activeAnnotations := c.getServerAnnotations(ingress, service)\n\tsrvsNbrChanged := c.alignHAproxySrvs(endpoints)\n\treload = reload || srvsNbrChanged || activeAnnotations\n\tfor srvName, srv := range endpoints.HAProxySrvs {\n\t\tif srv.Modified || reload {\n\t\t\tc.handleHAProxSrv(srvName, srv.Address, backendName, endpoints.Port, annotations)\n\t\t}\n\t}\n\treturn reload\n}", "func Handler(ctx context.Context, payload events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tvar resp []byte\n\n\tswitch path := payload.Path; path {\n\tcase PRODUCT_PATH:\n\t\tswitch method := payload.HTTPMethod; method {\n\t\tcase GET:\n\t\t\tfmt.Printf(\"GET method for products.\\n\")\n\t\tcase POST:\n\t\t\tnewStock, err := event.CreateStock(payload.Body)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tresp, _ = json.Marshal(newStock)\n\t\tcase PUT:\n\t\t\tfmt.Printf(\"PUT method for products.\\n\")\n\t\tcase DELETE:\n\t\t\tfmt.Printf(\"DELETE method for products.\\n\")\n\t\t}\n\n\tcase STORE_PATH:\n\t\tswitch method := payload.HTTPMethod; method {\n\t\tcase GET:\n\t\t\tfmt.Printf(\"GET method for stocks.\\n\")\n\t\tcase POST:\n\t\t\tnewStockLoc, err := event.CreateStockLocation(payload.Body)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tresp, _ = json.Marshal(newStockLoc)\n\t\tcase PUT:\n\t\t\tfmt.Printf(\"PUT method for stocks.\\n\")\n\t\tcase DELETE:\n\t\t\tfmt.Printf(\"DELETE method for stocks.\\n\")\n\t\t}\n\tdefault:\n\t\tfmt.Printf(\"panik: %s.\\n\", path)\n\t}\n\n\treturn events.APIGatewayProxyResponse{\n\t\tBody: string(resp),\n\t\tStatusCode: 200,\n\t}, nil\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\n\tdefer catchPanic(w, r)\n\n\tif basePath := \"/Foogazi\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"GET\" {\n\n\t\ttools.ShortenPath(basePath, r)\n\n\t\tw.Write([]byte(\"Hello\"))\n\t\treturn\n\t}\n\tif basePath := \"/Foo\"; strings.Contains(r.URL.Path, basePath) {\n\n\t\tif basePath := \"/Foo/Bar\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"GET\" {\n\n\t\t\ttools.ShortenPath(basePath, r)\n\n\t\t\tw.Write([]byte(r.URL.Path))\n\t\t\treturn\n\t\t}\n\n\t\ttools.ShortenPath(basePath, r)\n\n\t\tw.Write([]byte(\"Hello world\"))\n\t\treturn\n\t}\n\tif basePath := \"/hello\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"GET\" {\n\n\t\ttools.ShortenPath(basePath, r)\n\n\t\tw.Write([]byte(\"Hello World\"))\n\t\treturn\n\t}\n\tif basePath := \"/hello_POST\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"POST\" {\n\n\t\ttools.ShortenPath(basePath, r)\n\t\tprintln(\"Request to Hello_post\")\n\t\tw.Write([]byte(\"Hello World\"))\n\t\treturn\n\t}\n}", "func (c *HAProxyController) handleEndpoints(namespace *store.Namespace, ingress *store.Ingress, path *store.IngressPath, service *store.Service, backendName string, newBackend bool) (reload bool) {\n\treload = newBackend\n\t// fetch Endpoints\n\tendpoints, ok := namespace.Endpoints[service.Name]\n\tif !ok {\n\t\tif service.DNS == \"\" {\n\t\t\tlogger.Warningf(\"No Endpoints for service '%s'\", service.Name)\n\t\t\treturn false // not an end of world scenario, just log this\n\t\t}\n\t\t//TODO: currently HAProxy will only resolve server name at startup/reload\n\t\t// This needs to be improved by using HAProxy resolvers to have resolution at runtime\n\t\tlogger.Debugf(\"Configuring service '%s', of type ExternalName\", service.Name)\n\t\tendpoints = &store.Endpoints{\n\t\t\tNamespace: \"external\",\n\t\t\tHAProxySrvs: map[string]*store.HAProxySrv{\n\t\t\t\t\"external-service\": &store.HAProxySrv{\n\t\t\t\t\tIP: service.DNS,\n\t\t\t\t\tDisabled: false,\n\t\t\t\t\tModified: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tnamespace.Endpoints[service.Name] = endpoints\n\t}\n\tendpoints.BackendName = backendName\n\t// resolve TargetPort\n\tportUpdated, err := c.setTargetPort(path, service, endpoints)\n\treload = reload || portUpdated\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn false\n\t}\n\t// Handle Backend servers\n\tif len(endpoints.HAProxySrvs) < len(endpoints.Addresses) {\n\t\treload = c.createSrvSlots(endpoints) || reload\n\t}\n\treload = c.alignSrvSlots(endpoints) || reload\n\tannotations, activeAnnotations := c.getServerAnnotations(ingress, service)\n\treload = reload || activeAnnotations\n\tfor srvName, srv := range endpoints.HAProxySrvs {\n\t\tif !srv.Modified && !reload {\n\t\t\tcontinue\n\t\t}\n\t\tc.handleHAProxSrv(endpoints, srvName, path.TargetPort, annotations)\n\t}\n\treturn reload\n}", "func RegisterRestEndpoints(endpointInsts map[string]RestEndpointInst) {\n\n\tfor url, endpointInst := range endpointInsts {\n\t\tregistered[url] = endpointInst\n\n\t\tHandleFunc(url, func() func(w http.ResponseWriter, r *http.Request) {\n\t\t\tvar handlerURL = url\n\t\t\tvar handlerInst = endpointInst\n\n\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\t\t// Create a new handler instance\n\n\t\t\t\thandler := handlerInst()\n\n\t\t\t\t// Handle request in appropriate method\n\n\t\t\t\tres := strings.TrimSpace(r.URL.Path[len(handlerURL):])\n\n\t\t\t\tif len(res) > 0 && res[len(res)-1] == '/' {\n\t\t\t\t\tres = res[:len(res)-1]\n\t\t\t\t}\n\n\t\t\t\tvar resources []string\n\n\t\t\t\tif res != \"\" {\n\t\t\t\t\tresources = strings.Split(res, \"/\")\n\t\t\t\t}\n\n\t\t\t\tswitch r.Method {\n\t\t\t\tcase \"GET\":\n\t\t\t\t\thandler.HandleGET(w, r, resources)\n\n\t\t\t\tcase \"POST\":\n\t\t\t\t\thandler.HandlePOST(w, r, resources)\n\n\t\t\t\tcase \"PUT\":\n\t\t\t\t\thandler.HandlePUT(w, r, resources)\n\n\t\t\t\tcase \"DELETE\":\n\t\t\t\t\thandler.HandleDELETE(w, r, resources)\n\n\t\t\t\tdefault:\n\t\t\t\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed),\n\t\t\t\t\t\thttp.StatusMethodNotAllowed)\n\t\t\t\t}\n\t\t\t}\n\t\t}())\n\t}\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.GetSimpleCardList = m(e.GetSimpleCardList)\n\te.GetCardInfo = m(e.GetCardInfo)\n\te.PostCardInfo = m(e.PostCardInfo)\n\te.PutCardInfo = m(e.PutCardInfo)\n\te.DeleteCardInfo = m(e.DeleteCardInfo)\n}", "func endpointManagement(r common.Router) common.Router {\n\t// programatically set swagger info\n\tdocs.SwaggerInfo.Title = \"gin swagger test\"\n\tdocs.SwaggerInfo.Description = \"This is a sample server for Swagger.!!!!!!\"\n\tdocs.SwaggerInfo.Version = \"1.0\"\n\tdocs.SwaggerInfo.Host = \"localhost:9000\"\n\tdocs.SwaggerInfo.BasePath = \"/v1\"\n\n\tr.Version = r.Engine.Group(\"/v1\")\n\n\t//SECTION x endpoints by function\n\tuser.NewUserV1Router(r, \"/user\")\n\tauth.NewAuthV1Router(r, \"/auth\")\n\n\tr.Engine.GET(\"/swagger/*any\", ginSwagger.WrapHandler(swaggerFiles.Handler))\n\n\treturn r\n}", "func Handler(ctx context.Context, req events.APIGatewayProxyRequest) (Response, error) {\n\tvar buf bytes.Buffer\n\n\tvar message string\n\tmessage = req.Path\n\n\tlog.Print(fmt.Sprint(\"Called with path: \", req.Path))\n\tstatusCode := 200\n\n\t// Could use a third party routing library at this point, but being hacky for now\n\titems := strings.Split(req.Path, \"/\")\n\tvar item string\n\tif len(items) > 1 {\n\t\titem = strings.Join(items[2:], \"/\")\n\t}\n\n\t// If we actually have an action to take\n\tif len(items) >= 1 {\n\t\tswitch items[1] {\n\t\tcase \"list\":\n\t\t\titems, err := List()\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = strings.Join(items, \"\\n\")\n\t\t\t}\n\t\tcase \"add\":\n\t\t\t// Should probably be doing this on PUT or POST only\n\t\t\terr := Add(item)\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = \"Added\"\n\t\t\t}\n\n\t\tcase \"complete\":\n\t\t\t// Should only be doing this on POST, but demo\n\t\t\terr := Complete(item)\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = \"Completed\"\n\t\t\t}\n\t\t}\n\t}\n\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"message\": message,\n\t})\n\tif err != nil {\n\t\treturn Response{StatusCode: 404}, err\n\t}\n\tjson.HTMLEscape(&buf, body)\n\n\tresp := Response{\n\t\tStatusCode: statusCode,\n\t\tIsBase64Encoded: false,\n\t\tBody: buf.String(),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"X-MyCompany-Func-Reply\": \"hello-handler\",\n\t\t},\n\t}\n\n\treturn resp, nil\n}", "func Use(handler http.HandlerFunc, mid ...func(http.Handler) http.HandlerFunc) http.HandlerFunc {\n\tfor _, m := range mid {\n\t\thandler = m(handler)\n\t}\n\treturn handler\n}", "func (api *API) Start(dir, defaultCert, defaultKey string) error {\n\tcontextLogger := api.log.WithFields(logrus.Fields{\n\t\tlog.FuncField: ref.GetFuncName(),\n\t\tlog.DefaultCertFileField: defaultCert,\n\t\tlog.DefaultKeyFileField: defaultKey,\n\t\tlog.APIDirField: dir,\n\t})\n\tcontextLogger.Debug(\"starting API\")\n\n\tapi.handlers[\"OPTIONS\"] = make(map[string]func(http.ResponseWriter, *http.Request))\n\tfor endpointName, endpoint := range api.endpoints {\n\t\tvar path string\n\t\tif len(api.baseURL) > 0 {\n\t\t\tpath = fmt.Sprintf(\"%s/%s\", api.baseURL, endpoint.Path)\n\t\t} else {\n\t\t\tpath = endpoint.Path\n\t\t}\n\t\tregisteredRoute := api.ensureRouteRegistered(path)\n\t\tfile := endpoint.File\n\t\tmethod := strings.ToUpper(endpoint.Method)\n\t\tcontextLoggerEndpoint := api.log.WithFields(logrus.Fields{\n\t\t\tlog.PathField: path,\n\t\t\tlog.RouteField: registeredRoute,\n\t\t\tlog.FileField: file,\n\t\t\tlog.MethodField: method,\n\t\t\tlog.EndpointNameField: endpointName,\n\t\t\tlog.ResponseHeadersField: endpoint.Headers,\n\t\t})\n\n\t\tcontextLoggerEndpoint.Debug(\"registering endpoint\")\n\t\tif route, methodExists := api.handlers[method]; !methodExists {\n\t\t\tapi.handlers[method] = make(map[string]func(http.ResponseWriter, *http.Request))\n\t\t} else {\n\t\t\tif _, routeExists := route[registeredRoute]; routeExists {\n\t\t\t\tcontextLoggerEndpoint.Warn(\"endpoint already exists, moving on to next endpoint...\")\n\t\t\t\tdelete(api.endpoints, endpointName)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tcontextLoggerEndpoint.Debug(\"registered endpoint; now assigning handler\")\n\t\tapi.handlers[method][registeredRoute] = api.creator.getHandler(endpoint.EnforceValidJSON, endpoint.AllowCORS, endpoint.HTTPStatusCode, endpoint.Headers, dir, file, api.file)\n\t\tif endpoint.AllowCORS {\n\t\t\tapi.handlers[\"OPTIONS\"][registeredRoute] = func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\t\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"*\")\n\t\t\t\tw.Header().Set(\"Access-Control-Allow-headers\", \"*\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn api.creator.startAPI(defaultCert, defaultKey, api.server, api.httpConfig)\n}", "func (r *Route) getHandler(method string, ex *routeExecution) {\n\t// check specific method match\n\tif h, ok := r.handlers[method]; ok {\n\t\tex.handler = h\n\t\treturn\n\t}\n\n\t// if this is a HEAD we can fall back on GET\n\tif method == http.MethodHead {\n\t\tif h, ok := r.handlers[http.MethodGet]; ok {\n\t\t\tex.handler = h\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check the ANY handler\n\tif h, ok := r.handlers[methodAny]; ok {\n\t\tex.handler = h\n\t\treturn\n\t}\n\n\t// last ditch effort is to generate our own method not allowed handler\n\t// this is regenerated each time in case routes are added during runtime\n\t// not generated if a previous handler is already set\n\tif ex.handler == nil {\n\t\tex.handler = r.methodNotAllowed()\n\t}\n\treturn\n}", "func mapRoutes() {\n\t//http.HandleFunc(\"/user\", controllers.GetUser)\n}", "func InitRoutes(echoEngine *echo.Echo, controller Controller) {\n\n //Login endpoints\n echoEngine.OPTIONS(\"/login\", web.OptionsMethodHandler)\n echoEngine.OPTIONS(\"logout\", web.OptionsMethodHandler)\n echoEngine.POST(\"/login\", controller.Login)\n echoEngine.POST(\"/logout\", controller.Logout)\n\n}", "func (self *CentralBooking) InstallHandlers(router *mux.Router) {\n router.\n Methods(\"POST\").\n Path(\"/register/instance\").\n HandlerFunc(self.RegisterInstance)\n\n // apeing vault\n router.\n Methods(\"GET\").\n Path(\"/sys/health\").\n HandlerFunc(self.CheckHealth)\n}", "func (engine *Engine) Any(relativePath string, handlers ...HandlerFunc) IRoutes {\n\tengine.handle(http.MethodGet, relativePath, handlers)\n\tengine.handle(http.MethodPost, relativePath, handlers)\n\tengine.handle(http.MethodPut, relativePath, handlers)\n\tengine.handle(http.MethodPatch, relativePath, handlers)\n\tengine.handle(http.MethodHead, relativePath, handlers)\n\tengine.handle(http.MethodOptions, relativePath, handlers)\n\tengine.handle(http.MethodDelete, relativePath, handlers)\n\tengine.handle(http.MethodConnect, relativePath, handlers)\n\tengine.handle(http.MethodTrace, relativePath, handlers)\n\treturn engine\n}", "func switchRouter(defaultHandler http.Handler, proxySrv *pServer.HttpServer) func(config dynamic.Configuration) {\n\treturn func(config dynamic.Configuration) {\n\t\tlog.Info(\"===Starting SwitchRouter====\")\n\t\trouterTemp, err := router.NewRouter()\n\t\tif err != nil {\n\t\t\tlog.Info(\"Failed to create router \", err)\n\t\t\t// return nil, err\n\t\t}\n\t\tlog.Infof(\"buildHandler : %v \\n\", config.Routers)\n\t\tfor name, value := range config.Routers {\n\t\t\tlog.Infof(\"Create Hypercloud proxy based on %v: %v \\n\", name, value)\n\t\t\tbackURL, err := url.Parse(value.Server)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(errors.Wrapf(err, \"URL Parsing failed for: %s\", value.Server))\n\t\t\t}\n\t\t\tdhconfig := &proxy.Config{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\tCipherSuites: crypto.DefaultCiphers(),\n\t\t\t\t},\n\t\t\t\tHeaderBlacklist: []string{\"X-CSRFToken\"},\n\t\t\t\tEndpoint: backURL,\n\t\t\t}\n\t\t\tdhproxy := proxy.NewProxy(dhconfig)\n\t\t\terr = routerTemp.AddRoute(value.Rule, 0, http.StripPrefix(value.Path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\ttoken := r.Header.Clone().Get(\"Authorization\")\n\t\t\t\ttemp := strings.Split(token, \"Bearer \")\n\t\t\t\tif len(temp) > 1 {\n\t\t\t\t\ttoken = temp[1]\n\t\t\t\t} else {\n\t\t\t\t\ttoken = temp[0]\n\t\t\t\t}\n\t\t\t\t// NOTE: query에 token 정보가 있을 시 해당 token으로 설정\n\t\t\t\tqueryToken := r.URL.Query().Get(\"token\")\n\t\t\t\tif queryToken != \"\" && token == \"\" {\n\t\t\t\t\tr.URL.Query().Del(\"token\")\n\t\t\t\t\ttoken = queryToken\n\t\t\t\t}\n\t\t\t\tr.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\t\t\t\tdhproxy.ServeHTTP(w, r)\n\t\t\t})))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"failed to put proxy handler into Router\", err)\n\t\t\t}\n\t\t}\n\t\terr = routerTemp.AddRoute(\"PathPrefix(`/api/console/dynamic`)\", 0, http.HandlerFunc(\n\t\t\tfunc(rw http.ResponseWriter, r *http.Request) {\n\t\t\t\trw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\terr := json.NewEncoder(rw).Encode(config)\n\t\t\t\tif err != nil {\n\t\t\t\t\thttp.NotFound(rw, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t},\n\t\t))\n\t\tif err != nil {\n\t\t\tlog.Error(\"/api/k8sAll/ has a problem\", err)\n\t\t}\n\n\t\terr = routerTemp.AddRoute(\"PathPrefix(`/`)\", 0, defaultHandler)\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed to put hypercloud proxy\", err)\n\t\t\t// return nil, err\n\t\t}\n\n\t\tlog.Info(\"===End SwitchRouter ===\")\n\t\tlog.Info(\"Call updateHandler --> routerTemp.Router\")\n\t\t// olderSrv:=proxySrv.Handler.Switcher.GetHandler()\n\n\t\tif proxySrv.Switcher.GetHandler() == nil {\n\t\t\tproxySrv.Switcher.UpdateHandler(http.NotFoundHandler())\n\t\t}\n\n\t\tproxySrv.Switcher.UpdateHandler(routerTemp)\n\n\t}\n}", "func (r *router) handle(c *Context){\n\tn, params := r.getRoute(c.Method, c.Path)\n\tif n != nil {\n\t\tc.Params = params\n\t\t// connection between Context and Router!\n\t\t// it's important\n\t\tkey := c.Method + \"-\" + n.pattern\n\t\t// 两种函数都放到一起了\n\t\tc.handlers = append(c.handlers, r.handlers[key])\n\t\t//r.handlers[key](c)\n\t}else{\n\t\tc.handlers = append(c.handlers, func(c *Context){\n\t\t\tc.String(http.StatusNotFound, \"404 NOT FOUND%s\\n\", c.Path)\n\t\t})\n\t}\n\t//放在这里一起执行, 中间执行, 其逻辑导致\"并行\"效果\n\tc.Next()\n}", "func (ro *Route) scanMethods(r *Resource) error {\n\n\tt := r.Value.Type()\n\n\t//log.Println(\"Scanning methods from type\", t, \"is slice:\", isSliceType(t))\n\n\tfor i := 0; i < t.NumMethod(); i++ {\n\n\t\tm := t.Method(i)\n\n\t\t//log.Println(\"Testing:\", m.Name, isMappedMethod(m))\n\n\t\t// We will accept all methods that\n\t\t// has GET, POST, PUT, DELETE, HEAD\n\t\t// in the prefix of the method name\n\t\tif isMappedMethod(m) {\n\n\t\t\th, err := newHandler(m, r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t//log.Printf(\"Adding Handler %s for route %s\\n\", h, ro)\n\n\t\t\t// Check if this new Handler will conflict with some address of Handler that already exist\n\t\t\t// Action Handlers Names could conflict with Children Names...\n\t\t\terr = ro.checkAddrConflict(h)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Index: GETLogin, POST, or POSTMessage...\n\t\t\tro.Handlers[h.Method.HTTPMethod+h.Method.Name] = h\n\t\t}\n\t}\n\n\treturn nil\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.GetPerson = m(e.GetPerson)\n\te.GetFam = m(e.GetFam)\n}", "func mapRoutes() {\n \n /*\n Add a pre-handler to save the referrer\n */\n goweb.MapBefore(func(c context.Context) error {\n \n // add a custom header\n c.HttpResponseWriter().Header().Set(\"X-Custom-Header\", \"Goweb\")\n \n return nil\n })\n \n /*\n Add a post-handler to log someBook\n */\n goweb.MapAfter(func(c context.Context) error {\n // TODO: log this\n return nil\n })\n \n /*\n Map the homepage...\n */\n goweb.Map(\"/\", func(c context.Context) error {\n return goweb.Respond.With(c, 200, []byte(\"Welcome to the Goweb example app - see the terminal for instructions.\"))\n })\n \n /*\n /status-code/xxx\n Where xxx is any HTTP status code.\n */\n goweb.Map(\"/status-code/{code}\", func(c context.Context) error {\n \n // get the path value as an integer\n statusCodeInt, statusCodeIntErr := strconv.Atoi(c.PathValue(\"code\"))\n if statusCodeIntErr != nil {\n return goweb.Respond.With(c, http.StatusInternalServerError, []byte(\"Failed to convert 'code' into a real status code number.\"))\n }\n \n // respond with the status\n return goweb.Respond.WithStatusText(c, statusCodeInt)\n })\n \n // /errortest should throw a system error and be handled by the\n // DefaultHttpHandler().ErrorHandler() Handler.\n\n goweb.Map(\"/errortest\", func(c context.Context) error {\n return errors.New(\"This is a test error!\")\n })\n \n /*\n Map a RESTful controller\n (see the BooksController for all the methods that will get\n mapped)\n */\n BooksController := new(BooksController)\n goweb.MapController(BooksController)\n \n goweb.Map(func(c context.Context) error {\n return goweb.API.RespondWithData(c, \"Just a number!\")\n }, goweb.RegexPath(`^[0-9]+$`))\n \n /*\n Catch-all handler for everything that we don't understand\n */\n goweb.Map(func(c context.Context) error {\n \n // just return a 404 message\n return goweb.API.Respond(c, 404, nil, []string{\"File not found\"})\n \n })\n \n}", "func (srv *Server) handleGet(res http.ResponseWriter, req *http.Request) {\n\tfor _, rute := range srv.routeGets {\n\t\tvals, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\trute.endpoint.call(res, req, srv.evals, vals)\n\t\t\treturn\n\t\t}\n\t}\n\n\tsrv.handleFS(res, req, RequestMethodGet)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.List = m(e.List)\n\te.Reserve = m(e.Reserve)\n\te.Pickup = m(e.Pickup)\n\te.Return = m(e.Return)\n\te.Subscribe = m(e.Subscribe)\n}", "func AddressBookHandler(w http.ResponseWriter, r *http.Request) {\n switch r.Method {\n case \"GET\":\n fetchData(r)\n case \"POST\":\n postData(r)\n default:\n fmt.Println(\"No match to route\")\n }\n\n}", "func routePath(w http.ResponseWriter, r *http.Request, trimURL string) {\n\n\t/***********************************************/\n\t//TODO: add your custom web API here:\n\t/**********************************************/\n\n\tif strings.HasPrefix(trimURL, \"login\") && webServer.IsPOST(r) { //>>>>authentication\n\t\tauthenticateHandler.HandleHTTPLogin(w, r)\n\t} else if strings.HasPrefix(trimURL, \"logout\") && webServer.IsPOST(r) {\n\t\tauthenticateHandler.HandleHTTPLogout(w, r)\n\t} else if strings.Compare(trimURL, \"current-user\") == 0 && webServer.IsGET(r) {\n\t\tauthenticateHandler.HandleCurrentUser(w, r)\n\t} else if strings.Compare(trimURL, \"role\") == 0 && webServer.IsPOST(r) { //>>>>authorization\n\t\tauthorizeHandler.HandleAddRole(w, r)\n\t} else if strings.Compare(trimURL, \"role\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetRole(w, r)\n\t} else if strings.Compare(trimURL, \"role-access\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccessRole(w, r)\n\t} else if strings.Compare(trimURL, \"role-access-count\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccessRoleCount(w, r)\n\t} else if strings.Compare(trimURL, \"access\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccess(w, r)\n\t} else if strings.HasPrefix(trimURL, \"meals\") { //>>>>sample return JSON\n\t\tw.Header().Set(\"Content-Type\", \"application/json\") //MIME to application/json\n\t\tw.WriteHeader(http.StatusOK) //status code 200, OK\n\t\tw.Write([]byte(\"{ \\\"msg\\\": \\\"this is meal A \\\" }\")) //body text\n\t\treturn\n\t} else if strings.HasPrefix(trimURL, \"img/\") { //>>>>sample return virtual JPG file to client\n\t\tlogicalFilePath := \"./logic-files/\"\n\t\tphysicalFileName := \"neon.jpg\"\n\n\t\t// try read file\n\t\tdata, err := ioutil.ReadFile(logicalFilePath + physicalFileName)\n\t\tif err != nil {\n\t\t\t// show error page if failed to read file\n\t\t\thandleErrorCode(500, \"Unable to retrieve image file\", w)\n\t\t} else {\n\t\t\t//w.Header().Set(\"Content-Type\", \"image/jpg\") // #optional HTTP header info\n\n\t\t\t// uncomment if image file is meant to download instead of display on web browser\n\t\t\t// clientDisplayFileName = \"customName.jpg\"\n\t\t\t//w.header().Set(\"Content-Disposition\", \"attachment; filename=\\\"\" + clientDisplayFileName + \"\\\"\")\n\n\t\t\t// write file (in binary format) direct into HTTP return content\n\t\t\tw.Write(data)\n\t\t}\n\t} else {\n\t\t// show error code 404 not found\n\t\t//(since the requested URL doesn't match any of it)\n\t\thandleErrorCode(404, \"Path not found.\", w)\n\t}\n\n}", "func (ws *WebServer) registerHandlers() {\n\t// --------------------------------\n\t// AVAILABLE WITHOUT AUTH\n\n\tws.router.Use(\n\t\tws.addHeaders, ws.optionsHandler,\n\t\tws.handlerFiles, ws.handleMetrics)\n\n\tws.router.Get(\"/ota\", ws.handlerGetOta)\n\n\timagestore := ws.router.Group(\"/imagestore\")\n\timagestore.\n\t\tGet(\"/<id>\", ws.handlerGetImage)\n\n\tutils := ws.router.Group(\"/api/util\")\n\tutils.\n\t\tGet(`/color/<hexcode:[\\da-fA-F]{6,8}>`, ws.handlerGetColor)\n\tutils.\n\t\tGet(\"/commands\", ws.handlerGetCommands)\n\tutils.\n\t\tGet(\"/landingpageinfo\", ws.handlerGetLandingPageInfo)\n\n\tws.router.Get(\"/invite\", ws.handlerGetInvite)\n\n\t// --------------------------------\n\t// ONLY AVAILABLE AFTER AUTH\n\n\tws.router.Get(endpointLogInWithDC, ws.dcoauth.HandlerInit)\n\tws.router.Get(endpointAuthCB, ws.dcoauth.HandlerCallback)\n\n\tws.router.Use(ws.auth.checkAuth)\n\tif !util.DevModeEnabled {\n\t\tws.router.Use(ws.af.Handler)\n\t}\n\n\tapi := ws.router.Group(\"/api\")\n\tapi.\n\t\tGet(\"/me\", ws.af.SessionSetHandler, ws.handlerGetMe)\n\tapi.\n\t\tPost(\"/logout\", ws.auth.LogOutHandler)\n\tapi.\n\t\tGet(\"/sysinfo\", ws.handlerGetSystemInfo)\n\n\tsettings := api.Group(\"/settings\")\n\tsettings.\n\t\tGet(\"/presence\", ws.handlerGetPresence).\n\t\tPost(ws.handlerPostPresence)\n\tsettings.\n\t\tGet(\"/noguildinvite\", ws.handlerGetInviteSettings).\n\t\tPost(ws.handlerPostInviteSettings)\n\n\tguilds := api.Group(\"/guilds\")\n\tguilds.\n\t\tGet(\"\", ws.handlerGuildsGet)\n\n\tguild := guilds.Group(\"/<guildid:[0-9]+>\")\n\tguild.\n\t\tGet(\"\", ws.handlerGuildsGetGuild)\n\tguild.\n\t\tGet(\"/permissions\", ws.handlerGetGuildPermissions).\n\t\tPost(ws.handlerPostGuildPermissions)\n\tguild.\n\t\tGet(\"/members\", ws.handlerGetGuildMembers)\n\tguild.\n\t\tPost(\"/inviteblock\", ws.handlerPostGuildInviteBlock)\n\tguild.\n\t\tGet(\"/scoreboard\", ws.handlerGetGuildScoreboard)\n\tguild.\n\t\tGet(\"/antiraid/joinlog\", ws.handlerGetGuildAntiraidJoinlog).\n\t\tDelete(ws.handlerDeleteGuildAntiraidJoinlog)\n\n\tguildUnbanRequests := guild.Group(\"/unbanrequests\")\n\tguildUnbanRequests.\n\t\tGet(\"\", ws.handlerGetGuildUnbanrequests)\n\tguildUnbanRequests.\n\t\tGet(\"/count\", ws.handlerGetGuildUnbanrequestsCount)\n\tguildUnbanRequests.\n\t\tGet(\"/<id:[0-9]+>\", ws.handlerGetGuildUnbanrequest).\n\t\tPost(ws.handlerPostGuildUnbanrequest)\n\n\tguildSettings := guild.Group(\"/settings\")\n\tguildSettings.\n\t\tGet(\"/karma\", ws.handlerGetGuildSettingsKarma).\n\t\tPost(ws.handlerPostGuildSettingsKarma)\n\tguildSettings.\n\t\tGet(\"/antiraid\", ws.handlerGetGuildSettingsAntiraid).\n\t\tPost(ws.handlerPostGuildSettingsAntiraid)\n\n\tguildSettingsKarmaBlocklist := guildSettings.Group(\"/karma/blocklist\")\n\tguildSettingsKarmaBlocklist.\n\t\tGet(\"\", ws.handlerGetGuildSettingsKarmaBlocklist)\n\tguildSettingsKarmaBlocklist.\n\t\tPut(\"/<memberid>\", ws.handlerPutGuildSettingsKarmaBlocklist).\n\t\tDelete(ws.handlerDeleteGuildSettingsKarmaBlocklist)\n\n\tguild.\n\t\tGet(\"/settings\", ws.handlerGetGuildSettings).\n\t\tPost(ws.handlerPostGuildSettings)\n\n\tguildReports := guild.Group(\"/reports\")\n\tguildReports.\n\t\tGet(\"\", ws.handlerGetReports)\n\tguildReports.\n\t\tGet(\"/count\", ws.handlerGetReportsCount)\n\n\tguildBackups := guild.Group(\"/backups\")\n\tguildBackups.\n\t\tGet(\"\", ws.handlerGetGuildBackups)\n\tguildBackups.\n\t\tPost(\"/toggle\", ws.handlerPostGuildBackupsToggle)\n\tguildBackups.\n\t\tGet(\"/<backupid:[0-9]+>/download\", ws.handlerGetGuildBackupDownload)\n\n\tmember := guilds.Group(\"/<guildid:[0-9]+>/<memberid:[0-9]+>\")\n\tmember.\n\t\tGet(\"\", ws.handlerGuildsGetMember)\n\tmember.\n\t\tGet(\"/permissions\", ws.handlerGetMemberPermissions)\n\tmember.\n\t\tGet(\"/permissions/allowed\", ws.handlerGetMemberPermissionsAllowed)\n\tmember.\n\t\tPost(\"/kick\", ws.handlerPostGuildMemberKick)\n\tmember.\n\t\tPost(\"/ban\", ws.handlerPostGuildMemberBan)\n\tmember.\n\t\tPost(\"/mute\", ws.handlerPostGuildMemberMute)\n\tmember.\n\t\tPost(\"/unmute\", ws.handlerPostGuildMemberUnmute)\n\tmember.\n\t\tGet(\"/unbanrequests\", ws.handlerGetGuildMemberUnbanrequests)\n\n\tmemberReports := member.Group(\"/reports\")\n\tmemberReports.\n\t\tGet(\"\", ws.handlerGetReports).\n\t\tPost(ws.handlerPostGuildMemberReport)\n\tmemberReports.\n\t\tGet(\"/count\", ws.handlerGetReportsCount)\n\n\treports := api.Group(\"/reports\")\n\treport := reports.Group(\"/<id:[0-9]+>\")\n\treport.\n\t\tGet(\"\", ws.handlerGetReport)\n\treport.\n\t\tPost(\"/revoke\", ws.handlerPostReportRevoke)\n\n\tunbanReqeusts := api.Group(\"/unbanrequests\")\n\tunbanReqeusts.\n\t\tGet(\"\", ws.handlerGetUnbanrequest).\n\t\tPost(ws.handlerPostUnbanrequest)\n\tunbanReqeusts.\n\t\tGet(\"/bannedguilds\", ws.handlerGetUnbanrequestBannedguilds)\n\n\tapi.\n\t\tGet(\"/token\", ws.handlerGetToken).\n\t\tPost(ws.handlerPostToken).\n\t\tDelete(ws.handlerDeleteToken)\n\n\tusersettings := api.Group(\"/usersettings\")\n\tusersettings.\n\t\tGet(\"/ota\", ws.handlerGetUsersettingsOta).\n\t\tPost(ws.handlerPostUsersettingsOta)\n}", "func mapUrls() {\n\t// Ping Test\n\trouter.GET(\"/gin/ping\", ping.Ping)\n\n\t// User Routes\n\trouter.GET(\"/gin/user/:user_id\", users.Get)\n\trouter.GET(\"/gin/internal/users/search\", users.Search)\n\trouter.POST(\"/gin/user/new\", users.Create)\n\trouter.PUT(\"/gin/user/:user_id\", users.Update)\n\trouter.PATCH(\"/gin/user/:user_id\", users.Update)\n\trouter.DELETE(\"/gin/user/:user_id\", users.Delete)\n}", "func Endpoint(url string, configureFunc func()) {\n\touterCurrentMockHandler := currentMockHandler\n\tSwitch(extractor.ExtractMethod(), configureFunc)\n\tcurrentMockery.Handle(url, currentMockHandler)\n\tcurrentMockHandler = outerCurrentMockHandler\n}", "func (uc UserController) Endpoints() map[string]map[string]http.HandlerFunc {\n\treturn map[string]map[string]http.HandlerFunc{\n\t\t\"/{id}\": {\n\t\t\t\"GET\": uc.ViewUser,\n\t\t},\n\t\t\"/login\": {\n\t\t\t\"POST\": uc.Login,\n\t\t},\n\t\t\"/new\": {\n\t\t\t\"POST\": uc.NewUser,\n\t\t},\n\t}\n}", "func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string) {\n\n\twrapper := ServerInterfaceWrapper{\n\t\tHandler: si,\n\t}\n\n\trouter.GET(baseURL+\"/accounts\", wrapper.GetAccounts)\n\trouter.GET(baseURL+\"/accounts/:account/:currency\", wrapper.GetAccountByTypeAndCurrency)\n\trouter.GET(baseURL+\"/deposits\", wrapper.GetDeposits)\n\trouter.POST(baseURL+\"/deposits/:currency\", wrapper.GetDepositAddress)\n\trouter.GET(baseURL+\"/deposits/:depositId\", wrapper.GetDepositById)\n\trouter.GET(baseURL+\"/fees\", wrapper.GetFees)\n\trouter.GET(baseURL+\"/fills\", wrapper.GetFills)\n\trouter.GET(baseURL+\"/l2/:symbol\", wrapper.GetL2OrderBook)\n\trouter.GET(baseURL+\"/l3/:symbol\", wrapper.GetL3OrderBook)\n\trouter.DELETE(baseURL+\"/orders\", wrapper.DeleteAllOrders)\n\trouter.GET(baseURL+\"/orders\", wrapper.GetOrders)\n\trouter.POST(baseURL+\"/orders\", wrapper.CreateOrder)\n\trouter.DELETE(baseURL+\"/orders/:orderId\", wrapper.DeleteOrder)\n\trouter.GET(baseURL+\"/orders/:orderId\", wrapper.GetOrderById)\n\trouter.GET(baseURL+\"/symbols\", wrapper.GetSymbols)\n\trouter.GET(baseURL+\"/symbols/:symbol\", wrapper.GetSymbolByName)\n\trouter.GET(baseURL+\"/tickers\", wrapper.GetTickers)\n\trouter.GET(baseURL+\"/tickers/:symbol\", wrapper.GetTickerBySymbol)\n\trouter.GET(baseURL+\"/trades\", wrapper.GetTrades)\n\trouter.GET(baseURL+\"/whitelist\", wrapper.GetWhitelist)\n\trouter.GET(baseURL+\"/whitelist/:currency\", wrapper.GetWhitelistByCurrency)\n\trouter.GET(baseURL+\"/withdrawals\", wrapper.GetWithdrawals)\n\trouter.POST(baseURL+\"/withdrawals\", wrapper.CreateWithdrawal)\n\trouter.GET(baseURL+\"/withdrawals/:withdrawalId\", wrapper.GetWithdrawalById)\n\n}", "func (a *Router) MultiMap(mapping map[string]restful.RouteFunction) error {\n\t// All the given keys must have a common root path, return it if it's ok\n\trootPath, err := checkMapping(mapping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Instanciate a new route at /{path} that returns json data\n\tws := new(restful.WebService)\n\tws.Path(a.prefix + rootPath).\n\t\tConsumes(\"*/*\").\n\t\tProduces(restful.MIME_JSON)\n\n\tfor request, callback := range mapping {\n\t\t// Extract common root path\n\t\thttpMethod, path, err := parseRequest(request)\n\t\t_, paramPath, err := parsePath(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Infof(\"Map %s endpoint (%s)\\n\", path, httpMethod)\n\t\terr = a.registerRoute(ws, httpMethod, paramPath, callback)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trestful.Add(ws)\n\treturn nil\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.Add = m(e.Add)\n\te.Resta = m(e.Resta)\n\te.Multiplicacion = m(e.Multiplicacion)\n\te.Division = m(e.Division)\n}", "func (*router) routing(\n\tname, _ string, m []string, h rest.Func,\n) http.Handler {\n\treturn http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tcode int\n\t\t\t)\n\n\t\t\tfor _, method := range m {\n\t\t\t\tif strings.EqualFold(r.Method, method) {\n\t\t\t\t\t// execute only if the request method is inside the method list\n\t\t\t\t\tcode, err = h(w, r)\n\t\t\t\t\tif err != nil && code != http.StatusServiceUnavailable {\n\t\t\t\t\t\terr = json.ErrorHandler(w, r,\n\t\t\t\t\t\t\terr.Error()+\" at handler \"+name,\n\t\t\t\t\t\t\tcode,\n\t\t\t\t\t\t\terr)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = json.ErrorHandler(w, r,\n\t\t\t\t\"Invalid Request Method\",\n\t\t\t\thttp.StatusMethodNotAllowed,\n\t\t\t\terrors.ErrInvalidRequest)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t})\n}", "func (r *Router) executeHandler(w http.ResponseWriter, req *http.Request) {\n\n\tvar url string\n\t// dont jump away from function if not necessary\n\tif r.prefix != \"\" {\n\t\turl = strings.TrimPrefix(req.URL.Path, r.prefix)\n\t} else {\n\t\turl = req.URL.Path\n\t}\n\n\tmethod := methodToInt(req.Method)\n\tvar parameters *ParameterList\n\n\tif method == -1 {\n\t\tr.notAllowedMethod(w, req, nil)\n\t\tlog.Println(\"Method not allowed: \" + req.Method)\n\t\treturn\n\t}\n\n\tcurrentNode := r.pathTrees[method]\n\t// check if there is an handler for the request method\n\tif currentNode == nil {\n\t\tr.notAllowedMethod(w, req, nil)\n\t\tlog.Println(\"Method not allowed, no handler set for: \" + req.Method)\n\t\treturn\n\t}\n\n\t// return index page\n\tif len(url) == 0 || url == \"/\" {\n\t\tstaticNode := currentNode.staticRoutes[1]\n\n\t\tif staticNode != nil {\n\t\t\tex := staticNode.get(\"/\")\n\t\t\tif ex != nil {\n\t\t\t\tex.handler(w, req, nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else if pn := currentNode.parameterHandler; pn != nil { // paramter node is set\n\t\t\tpn.handler(w, req, nil)\n\t\t\treturn\n\t\t}\n\t\tr.notFound(w, req, nil)\n\t\treturn\n\t}\n\n\tlastSlash := 0\n\t// start from one to skip the first /\n\tsize := len(url)\n\tvar sch string\n\n\tfor i := 1; i < size; i++ {\n\t\t// do something only when a / is found (or end of url is reached)\n\t\tif url[i] == '/' || i == size-1 {\n\t\t\t// grab the paramter from the url with a slice\n\t\t\tif i != size-1 {\n\t\t\t\tsch = url[lastSlash:i]\n\t\t\t} else {\n\t\t\t\tsch = url[lastSlash:]\n\t\t\t}\n\n\t\t\t// first search static nodes\n\t\t\tvar staticNode *pathNode\n\t\t\tif currentNode.staticRoutes != nil {\n\t\t\t\tsz := len(sch)\n\t\t\t\tif sz < len(currentNode.staticRoutes) {\n\t\t\t\t\tstaticNode = currentNode.staticRoutes[sz].get(sch)\n\t\t\t\t} else {\n\t\t\t\t\tstaticNode = nil\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstaticNode = nil\n\t\t\t}\n\n\t\t\tif staticNode != nil {\n\t\t\t\tcurrentNode = staticNode\n\t\t\t} else if currentNode.parameterHandler != nil { // then check if the value could be a paramter\n\t\t\t\tcurrentNode = currentNode.parameterHandler\n\t\t\t\tif parameters == nil {\n\t\t\t\t\tparameters = r.paramPool.Get()\n\t\t\t\t}\n\t\t\t\tif currentNode.name == \"*\" { // * parameter require that everything is matched\n\t\t\t\t\tparameters.Set(currentNode.name, url[lastSlash:])\n\t\t\t\t\tbreak // they also must stop the cycle and jump to handlers execution\n\t\t\t\t} else {\n\t\t\t\t\tparameters.Set(currentNode.name, sch[1:])\n\t\t\t\t}\n\t\t\t} else { // in nothing is found then we can print a not found message\n\t\t\t\tr.paramPool.Push(parameters)\n\t\t\t\tr.notFound(w, req, nil) // not found any possible match\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlastSlash = i // update last slash pos after operations\n\t\t}\n\t}\n\n\t// when we are here we are in the last node of the url so we can execute the action\n\tif currentNode.handler != nil {\n\t\tcurrentNode.handler(w, req, parameters)\n\t} else {\n\t\tr.notFound(w, req, nil)\n\t}\n\tr.paramPool.Push(parameters)\n}", "func pharosHandler(w http.ResponseWriter, r *http.Request) {\n\turl := r.URL.String()\n\tif strings.Contains(url, \"/item_state/\") {\n\t\tif r.Method == http.MethodGet {\n\t\t\tworkItemStateGetHandler(w, r)\n\t\t} else {\n\t\t\tworkItemStatePutHandler(w, r)\n\t\t}\n\t} else if strings.Contains(url, \"/items/\") {\n\t\tif r.Method == http.MethodGet {\n\t\t\tworkItemGetHandler(w, r)\n\t\t} else if r.Method == http.MethodPut {\n\t\t\tworkItemPutHandler(w, r)\n\t\t} else if r.Method == http.MethodPost {\n\t\t\tworkItemPostHandler(w, r)\n\t\t}\n\n\t} else if strings.Contains(url, \"/objects/\") {\n\t\tintellectualObjectGetHandler(w, r)\n\t} else if strings.Contains(url, \"/institutions/\") {\n\t\tinstitutionListHandler(w, r)\n\t} else if strings.Contains(url, \"/files/\") {\n\t\tgenericFileGetHandler(w, r)\n\t} else {\n\t\tpanic(fmt.Sprintf(\"Don't know how to handle request for %s\", url))\n\t}\n}", "func bindProxyRoutes(r *gin.Engine, h *app.Handlers) {\n\tqueue := h.Requester.Queue()\n\tproxy := r.Group(\"/proxy\", http.TraceEndpoint(h.ProxyAuthentication, queue))\n\n\tproxy.POST(\"/orders\", http.TraceEndpoint(h.CopyOrder, queue))\n\tproxy.POST(\"/orders/\", http.TraceEndpoint(h.CopyOrder, queue))\n\tproxy.GET(\"/orders/confirmation/:order_id\", http.TraceEndpoint(h.OrderConfirmation, queue))\n\tproxy.GET(\"/orders/confirmation/:order_id/\", http.TraceEndpoint(h.OrderConfirmation, queue))\n\tproxy.GET(\"/carts/orders/confirmation\", http.TraceEndpoint(h.CartOrderConfirmation, queue))\n\tproxy.GET(\"/carts/orders/confirmation/\", http.TraceEndpoint(h.CartOrderConfirmation, queue))\n\tproxy.POST(\"/cart/shipping\", http.TraceEndpoint(h.CartShippingOptions, queue))\n\tproxy.POST(\"/cart/shipping/\", http.TraceEndpoint(h.CartShippingOptions, queue))\n\tproxy.POST(\"/cart/tax\", http.TraceEndpoint(h.CartTaxTotal, queue))\n\tproxy.POST(\"/cart/tax/\", http.TraceEndpoint(h.CartTaxTotal, queue))\n\tproxy.POST(\"/cart/tax/draftorder\", http.TraceEndpoint(h.CartTaxTotalDraftOrder, queue))\n\tproxy.POST(\"/cart/tax/draftorder/\", http.TraceEndpoint(h.CartTaxTotalDraftOrder, queue))\n\n\tproxy.POST(\"/errors\", http.TraceEndpoint(h.LogFrontEndError, queue))\n\tproxy.POST(\"/errors/\", http.TraceEndpoint(h.LogFrontEndError, queue))\n}", "func mapUrlsToControllers(router *gin.Engine, gameController *controllers.GameController) {\n\trouter.GET(\"/ping\", gameController.Pong)\n\n\trouter.GET(\"minesweeper/users/:user_id/games\",\n\t\tmiddlewares.AdaptHandler(gameController.ValidateGetGamesByUserID),\n\t\tmiddlewares.AdaptHandler(gameController.GetGamesByUserID),\n\t)\n\n\trouter.GET(\"minesweeper/users/:user_id/games/:game_id\",\n\t\tmiddlewares.AdaptHandler(gameController.ValidateGetGameByGameID),\n\t\tmiddlewares.AdaptHandler(gameController.GetGameByGameID),\n\t)\n\n\trouter.GET(\"minesweeper/users/:user_id/games/:game_id/solution\",\n\t\tmiddlewares.AdaptHandler(gameController.ValidateGetGameByGameID),\n\t\tmiddlewares.AdaptHandler(gameController.ShowSolution),\n\t)\n\n\trouter.GET(\"minesweeper/users/:user_id/games/:game_id/status\",\n\t\tmiddlewares.AdaptHandler(gameController.ValidateGetGameByGameID),\n\t\tmiddlewares.AdaptHandler(gameController.ShowStatus),\n\t)\n\n\trouter.POST(\"minesweeper/users/:user_id/games\",\n\t\tmiddlewares.AdaptHandler(gameController.ValidatePost),\n\t\tmiddlewares.AdaptHandler(gameController.CreateNewGame),\n\t)\n\n\trouter.POST(\"minesweeper/users/:user_id/games/:game_id/flag\",\n\t\tmiddlewares.AdaptHandler(gameController.ValidateFlag),\n\t\tmiddlewares.AdaptHandler(gameController.FlagCell),\n\t)\n\n\trouter.POST(\"minesweeper/users/:user_id/games/:game_id/reveal\",\n\t\tmiddlewares.AdaptHandler(gameController.ValidateReveal),\n\t\tmiddlewares.AdaptHandler(gameController.RevealCell),\n\t)\n\n\trouter.DELETE(\"minesweeper/games\",\n\t\tmiddlewares.AdaptHandler(gameController.DeleteAllGames),\n\t)\n\n\trouter.GET(\"minesweeper/games\",\n\t\tmiddlewares.AdaptHandler(gameController.GetllGames),\n\t)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.Upload = m(e.Upload)\n\te.Download = m(e.Download)\n}", "func ApplyRoutes(r *gin.Engine, auth *auth.Authenticator, db *gorm.DB) {\n\tmodels.SetRepoDB(db)\n\tauthenticator = auth\n\tapiV1 := r.Group(\"/v1\")\n\t{\n\t\tapiV1.GET(\"/ping\", pingHandler)\n\t\tapiV1.POST(\"/login\", loginHandler)\n\t\tapiV1.POST(\"/comment\", commentHandler)\n\t\tapiV1.DELETE(\"/comment\", commentHandler)\n\t\tapiV1.POST(\"/users\", userHandler)\n\t\tapiV1.DELETE(\"/users\", userHandler)\n\n\t}\n}", "func (api *API) addEndpoint(endpoint APIEndpoint) {\n\t// httpMethod check\n\tif endpoint.httpMethod != http.MethodGet &&\n\t\tendpoint.httpMethod != http.MethodPost &&\n\t\tendpoint.httpMethod != http.MethodPatch &&\n\t\tendpoint.httpMethod != http.MethodPut &&\n\t\tendpoint.httpMethod != http.MethodDelete {\n\t\tapi.logger.Fatal(1, \"Cannot call 'AddHandler' an invalid method \\\"%s\\\" for URL %s/%s/%s\",\n\t\t\tendpoint.httpMethod, api.root, endpoint.version, endpoint.url)\n\t}\n\n\t// endpoint handler check\n\tvar handler http.HandlerFunc\n\n\tif endpoint.publicHandler != nil {\n\t\t// Public handler: leverage ServeHTTP method\n\t\thandler = endpoint.publicHandler\n\t} else if endpoint.protectedHandler != nil {\n\t\t// Protected handler\n\t\thandler = DoIfAccess(endpoint.accessChecker, endpoint.protectedHandler).ServeHTTP\n\t} else {\n\t\t// Error: missing handler\n\t\tapi.logger.Fatal(1, \"[API] Endpoint %s:%s does not have any handler\", endpoint.httpMethod, endpoint.url)\n\t\treturn\n\t}\n\n\t// CORS config is the same for both public and protected\n\tcorsConfig := CorsConfig{\n\t\tHosts: api.corsHosts,\n\t\tHeaders: api.corsHeaders,\n\t\tMethods: endpoint.httpMethod,\n\t}\n\n\t// Apply CORS handers\n\tendpoint.handler = AddCorsHeaders(handler, corsConfig).ServeHTTP\n\n\t// Add new endpoints to the list\n\tapi.endpoints = append(api.endpoints, endpoint)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.List = m(e.List)\n\te.Get = m(e.Get)\n\te.RandomFacts = m(e.RandomFacts)\n}", "func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string) {\n\n\twrapper := ServerInterfaceWrapper{\n\t\tHandler: si,\n\t}\n\n\trouter.GET(baseURL+\"/customers\", wrapper.GetCustomers)\n\trouter.POST(baseURL+\"/customers\", wrapper.PostCustomers)\n\trouter.DELETE(baseURL+\"/customers/:id\", wrapper.DeleteCustomersId)\n\trouter.GET(baseURL+\"/customers/:id\", wrapper.GetCustomersId)\n\trouter.PUT(baseURL+\"/customers/:id\", wrapper.PutCustomersId)\n\trouter.GET(baseURL+\"/employees\", wrapper.GetEmployees)\n\trouter.POST(baseURL+\"/employees\", wrapper.PostEmployees)\n\trouter.DELETE(baseURL+\"/employees/:id\", wrapper.DeleteEmployeesId)\n\trouter.GET(baseURL+\"/employees/:id\", wrapper.GetEmployeesId)\n\trouter.PUT(baseURL+\"/employees/:id\", wrapper.PutEmployeesId)\n\trouter.GET(baseURL+\"/expenses\", wrapper.GetExpenses)\n\trouter.POST(baseURL+\"/expenses\", wrapper.PostExpenses)\n\trouter.DELETE(baseURL+\"/expenses/:id\", wrapper.DeleteExpensesId)\n\trouter.GET(baseURL+\"/expenses/:id\", wrapper.GetExpensesId)\n\trouter.PUT(baseURL+\"/expenses/:id\", wrapper.PutExpensesId)\n\trouter.GET(baseURL+\"/invoices\", wrapper.GetInvoices)\n\trouter.POST(baseURL+\"/invoices\", wrapper.PostInvoices)\n\trouter.DELETE(baseURL+\"/invoices/:id\", wrapper.DeleteInvoicesId)\n\trouter.GET(baseURL+\"/invoices/:id\", wrapper.GetInvoicesId)\n\trouter.PUT(baseURL+\"/invoices/:id\", wrapper.PutInvoicesId)\n\trouter.GET(baseURL+\"/misc_records\", wrapper.GetMiscRecords)\n\trouter.POST(baseURL+\"/misc_records\", wrapper.PostMiscRecords)\n\trouter.DELETE(baseURL+\"/misc_records/:id\", wrapper.DeleteMiscRecordsId)\n\trouter.GET(baseURL+\"/misc_records/:id\", wrapper.GetMiscRecordsId)\n\trouter.PUT(baseURL+\"/misc_records/:id\", wrapper.PutMiscRecordsId)\n\trouter.GET(baseURL+\"/projects\", wrapper.GetProjects)\n\trouter.POST(baseURL+\"/projects\", wrapper.PostProjects)\n\trouter.DELETE(baseURL+\"/projects/:id\", wrapper.DeleteProjectsId)\n\trouter.GET(baseURL+\"/projects/:id\", wrapper.GetProjectsId)\n\trouter.PUT(baseURL+\"/projects/:id\", wrapper.PutProjectsId)\n\n}", "func (srv *Server) handlePost(res http.ResponseWriter, req *http.Request) {\n\tfor _, rute := range srv.routePosts {\n\t\tvals, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\trute.endpoint.call(res, req, srv.evals, vals)\n\t\t\treturn\n\t\t}\n\t}\n\tres.WriteHeader(http.StatusNotFound)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.Show = m(e.Show)\n\te.PlainList0 = m(e.PlainList0)\n\te.PlainList1 = m(e.PlainList1)\n\te.List0 = m(e.List0)\n\te.List1 = m(e.List1)\n\te.List2 = m(e.List2)\n\te.List3 = m(e.List3)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.DataEventsEndpoint = m(e.DataEventsEndpoint)\n\te.AddDataEvent = m(e.AddDataEvent)\n\te.UpdateDataEvent = m(e.UpdateDataEvent)\n\te.DeleteDataEvent = m(e.DeleteDataEvent)\n}", "func _initRoutes() {\n\t// e.Use(fasthttp.WrapMiddleware(server_stats.Handler))\n\n\te.Get(\"/stats\", func(c echo.Context) error {\n\t\treturn c.JSON(http.StatusOK, server_stats.Data())\n\t})\n\n\te.Post(\"/login\", login)\n\te.Get(\"/logout\", logout)\n\n\te.Post(\"/syslog\", querySyslog)\n\te.Post(\"/upload\", uploadDocument)\n\te.Get(\"/docs/:type/:id\", queryDocs)\n\te.Get(\"/wodocs/:id\", queryWODocs)\n\te.Get(\"/doc/:id\", serveDoc)\n\n\te.Get(\"/users\", queryUsers)\n\te.Get(\"/users/skill/:id\", queryUsersWithSkill)\n\te.Get(\"/users/:id\", getUser)\n\te.Post(\"/users\", newUser)\n\te.Put(\"/users/:id\", saveUser)\n\te.Delete(\"/users/:id\", deleteUser)\n\n\te.Get(\"/sites\", querySites)\n\te.Get(\"/sites/:id\", getSite)\n\te.Get(\"/site/supplies/:id\", querySiteSupplies)\n\te.Get(\"/site/users/:id\", querySiteUsers)\n\te.Post(\"/sites\", newSite)\n\te.Put(\"/sites/:id\", saveSite)\n\te.Delete(\"/sites/:id\", deleteSite)\n\te.Get(\"/site/status\", siteStatus)\n\n\te.Get(\"/skills\", querySkills)\n\te.Get(\"/skills/:id\", getSkill)\n\te.Post(\"/skills\", newSkill)\n\te.Put(\"/skills/:id\", saveSkill)\n\te.Delete(\"/skills/:id\", deleteSkill)\n\n\te.Get(\"/parts\", queryParts)\n\te.Get(\"/part/components/:id\", queryPartComponents)\n\te.Get(\"/part/vendors/:id\", queryPartVendors)\n\te.Get(\"/parts/:id\", getPart)\n\te.Post(\"/parts\", newPart)\n\te.Put(\"/parts/:id\", savePart)\n\te.Delete(\"/parts/:id\", deletePart)\n\n\te.Get(\"/machine\", queryMachineFull)\n\te.Get(\"/site/machines/:id\", querySiteMachines)\n\te.Get(\"/machine/:id\", getMachine)\n\te.Post(\"/machine\", newMachine)\n\te.Put(\"/machine/:id\", saveMachine)\n\te.Delete(\"/machine/:id\", deleteMachine)\n\te.Get(\"/machine/components/:id\", queryMachineComponents)\n\te.Get(\"/machine/parts/:id\", queryMachineParts)\n\te.Get(\"/machine/clear/:id\", clearMachine)\n\te.Get(\"/machine/tasks/:id\", queryMachineTasks)\n\n\te.Get(\"/component\", queryComponents)\n\te.Get(\"/component/:id\", getComponent)\n\te.Post(\"/component\", newComponent)\n\te.Put(\"/component/:id\", saveComponent)\n\te.Delete(\"/component/:id\", deleteComponent)\n\te.Get(\"/component/parts/:id\", queryComponentParts)\n\te.Get(\"/component/machine/:id\", getComponentMachine)\n\n\te.Get(\"/vendor\", queryVendor)\n\te.Get(\"/vendor/part/:id\", queryVendorParts)\n\te.Get(\"/vendor/:id\", getVendor)\n\te.Post(\"/vendor\", newVendor)\n\te.Post(\"/vendor/prices/:id\", newVendorPrices)\n\te.Put(\"/vendor/:id\", saveVendor)\n\te.Delete(\"/vendor/:id\", deleteVendor)\n\n\te.Get(\"/events\", queryEvents)\n\te.Get(\"/events/:id\", getEvent)\n\te.Put(\"/events/:id\", saveEvent)\n\te.Post(\"/event/raise/machine\", raiseEventMachine)\n\te.Post(\"/event/raise/tool\", raiseEventTool)\n\te.Delete(\"/event/raise/tool/:id\", clearTempEventTool)\n\te.Get(\"/machine/events/:id\", queryMachineEvents)\n\te.Get(\"/machine/compevents/:id/:type\", queryMachineCompEvents)\n\te.Get(\"/tool/events/:id\", queryToolEvents)\n\te.Post(\"/event/cost\", addCostToEvent)\n\te.Get(\"/eventdocs/:id\", queryEventDocs)\n\te.Get(\"/event/workorders/:id\", queryEventWorkorders)\n\te.Get(\"/workorder\", queryWorkOrders)\n\te.Post(\"/workorder\", newWorkOrder)\n\te.Get(\"/workorder/:id\", getWorkOrder)\n\te.Put(\"/workorder/:id\", updateWorkOrder)\n\n\t// Add a websocket handler\n\t// e.WebSocket(\"/ws\", webSocket)\n\te.Get(\"/ws\", standard.WrapHandler(websocket.Handler(webSocket)))\n\t// e.Get(\"/ws\", doNothing)\n\t// e.Get(\"/ws\", standard.WrapHandler(websocket.Handler(func(ws *websocket.Conn) {\n\t// \tfor {\n\t// \t\twebsocket.Message.Send(ws, \"Hello, Client!\")\n\t// \t\tmsg := \"\"\n\t// \t\twebsocket.Message.Receive(ws, &msg)\n\t// \t\tprintln(msg)\n\t// \t}\n\t// })))\n\n}", "func walkFunc(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error {\n\tlog.Printf(\"method: %s - - route: %s\\n\", method, route)\n\treturn nil\n}", "func svcHandler()", "func (o *Operation) registerHandler() {\n\t// Add more protocol endpoints here to expose them as controller API endpoints\n\to.handlers = []operation.Handler{\n\t\tsupport.NewHTTPHandler(createPublicDIDPath, http.MethodPost, o.CreatePublicDID),\n\t\tsupport.NewHTTPHandler(registerMsgService, http.MethodPost, o.RegisterMessageService),\n\t\tsupport.NewHTTPHandler(unregisterMsgService, http.MethodPost, o.UnregisterMessageService),\n\t\tsupport.NewHTTPHandler(msgServiceList, http.MethodGet, o.RegisteredServices),\n\t\tsupport.NewHTTPHandler(sendNewMsg, http.MethodPost, o.SendNewMessage),\n\t\tsupport.NewHTTPHandler(sendReplyMsg, http.MethodPost, o.SendReplyMessage),\n\t\tsupport.NewHTTPHandler(registerHTTPOverDIDCommService, http.MethodPost, o.RegisterHTTPMessageService),\n\t}\n}", "func Handlers(router *gin.Engine) {\n\t// GET /status\n\trouter.GET(\"/status\", controllers.Status)\n}", "func RegisterHandlers(router EchoRouter, si ServerInterface) {\n\n\twrapper := ServerInterfaceWrapper{\n\t\tHandler: si,\n\t}\n\n\trouter.GET(\"/v1/api/claims\", wrapper.GetClaims)\n\trouter.POST(\"/v1/api/claims\", wrapper.CreateClaim)\n\trouter.GET(\"/v1/api/claims/find\", wrapper.FindClaimByName)\n\trouter.DELETE(\"/v1/api/claims/:id\", wrapper.DeleteClaim)\n\trouter.GET(\"/v1/api/claims/:id\", wrapper.GetClaim)\n\trouter.PUT(\"/v1/api/claims/:id\", wrapper.UpdateClaim)\n\trouter.GET(\"/v1/api/scopes\", wrapper.GetScopes)\n\trouter.POST(\"/v1/api/scopes\", wrapper.CreateScope)\n\trouter.GET(\"/v1/api/scopes/find\", wrapper.FindScopeByName)\n\trouter.DELETE(\"/v1/api/scopes/:id\", wrapper.DeleteScope)\n\trouter.GET(\"/v1/api/scopes/:id\", wrapper.GetScope)\n\trouter.PUT(\"/v1/api/scopes/:id\", wrapper.UpdateScope)\n\trouter.POST(\"/v1/api/scopes/:id/claim\", wrapper.AddClaimToScope)\n\trouter.DELETE(\"/v1/api/scopes/:id/claim/:claimId\", wrapper.RemoveClaimFromScope)\n\trouter.GET(\"/v1/api/secretchannels\", wrapper.GetSecretChannels)\n\trouter.POST(\"/v1/api/secretchannels\", wrapper.CreateSecretChannel)\n\trouter.GET(\"/v1/api/secretchannels/find/algouse\", wrapper.FindSecretChannelByAlgouse)\n\trouter.GET(\"/v1/api/secretchannels/find/name\", wrapper.FindSecretChannelByName)\n\trouter.DELETE(\"/v1/api/secretchannels/:id\", wrapper.DeleteSecretChannel)\n\trouter.GET(\"/v1/api/secretchannels/:id\", wrapper.GetSecretChannel)\n\trouter.POST(\"/v1/api/secretchannels/:id\", wrapper.RenewSecretChannel)\n\trouter.GET(\"/v1/api/serviceproviders\", wrapper.GetServiceProviders)\n\trouter.POST(\"/v1/api/serviceproviders\", wrapper.CreateServiceProvider)\n\trouter.GET(\"/v1/api/serviceproviders/find\", wrapper.FindServiceProvider)\n\trouter.DELETE(\"/v1/api/serviceproviders/:id\", wrapper.DeleteServiceProvider)\n\trouter.GET(\"/v1/api/serviceproviders/:id\", wrapper.GetServiceProvider)\n\trouter.PATCH(\"/v1/api/serviceproviders/:id\", wrapper.PatchServiceProvider)\n\trouter.PUT(\"/v1/api/serviceproviders/:id\", wrapper.UpdateServiceProvider)\n\trouter.GET(\"/v1/api/serviceproviders/:id/credentials\", wrapper.GetCredentials)\n\trouter.POST(\"/v1/api/serviceproviders/:id/credentials\", wrapper.GenerateCredentials)\n\trouter.POST(\"/v1/api/serviceproviders/:id/status\", wrapper.UpdateServiceProviderStatus)\n\trouter.GET(\"/v1/api/users\", wrapper.GetUsers)\n\trouter.POST(\"/v1/api/users\", wrapper.CreateUser)\n\trouter.GET(\"/v1/api/users/find\", wrapper.FindUser)\n\trouter.POST(\"/v1/api/users/recover/password\", wrapper.InitiatePasswordRecovery)\n\trouter.PUT(\"/v1/api/users/recover/password\", wrapper.ResetUserPassword)\n\trouter.DELETE(\"/v1/api/users/:id\", wrapper.DeleteUser)\n\trouter.GET(\"/v1/api/users/:id\", wrapper.GetUser)\n\trouter.PUT(\"/v1/api/users/:id\", wrapper.UpdateUser)\n\trouter.POST(\"/v1/api/users/:id/password\", wrapper.ChangeUserPassword)\n\trouter.POST(\"/v1/api/users/:id/status\", wrapper.UpdateUserStatus)\n\n}", "func ExecEndpoint(i interface{}, m string, w http.ResponseWriter, r *http.Request) {\n\ta := reflect.ValueOf(i)\n\tlogger.Debugf(\"Method call : %s\", strings.Title(m))\n\tf := a.MethodByName(strings.Title(m))\n\tif f.IsZero() {\n\t\tDefaultHandler(w, r, m)\n\t} else {\n\t\tq := []reflect.Value{\n\t\t\treflect.ValueOf(w),\n\t\t\treflect.ValueOf(r),\n\t\t}\n\t\tf.Call(q)\n\t}\n}", "func (r *Router) RegisterHandler(irisHandler Handler) (*Route, error) {\n\tvar route *Route\n\tvar methods []string\n\tvar path string\n\tvar handleFunc reflect.Value\n\tvar template string\n\tvar templateIsGLob = false\n\tvar err = errors.New(\"\")\n\tval := reflect.ValueOf(irisHandler).Elem()\n\n\tfor i := 0; i < val.NumField(); i++ {\n\t\ttypeField := val.Type().Field(i)\n\n\t\tif typeField.Name == \"Handler\" {\n\t\t\ttags := strings.Split(strings.TrimSpace(string(typeField.Tag)), \" \")\n\t\t\t//we can have two keys, one is the tag starts with the method (GET,POST: \"/user/api/{userId(int)}\")\n\t\t\t//and the other if exists is the OPTIONAL TEMPLATE/TEMPLATE-GLOB: \"file.html\"\n\n\t\t\t//check for Template first because on the method we break and return error if no method found , for now.\n\t\t\tif len(tags) > 1 {\n\t\t\t\tsecondTag := tags[1]\n\n\t\t\t\ttemplateIdx := strings.Index(string(secondTag), \":\")\n\n\t\t\t\ttemplateTagName := strings.ToUpper(string(secondTag[:templateIdx]))\n\n\t\t\t\t//check if it's regex pattern\n\n\t\t\t\tif templateTagName == \"TEMPLATE-GLOB\" {\n\t\t\t\t\ttemplateIsGLob = true\n\t\t\t\t}\n\n\t\t\t\ttemlateTagValue, templateUnqerr := strconv.Unquote(string(secondTag[templateIdx+1:]))\n\n\t\t\t\tif templateUnqerr != nil {\n\t\t\t\t\terr = errors.New(err.Error() + \"\\niris.RegisterHandler: Error on getting template: \" + templateUnqerr.Error())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\ttemplate = temlateTagValue\n\t\t\t}\n\n\t\t\tfirstTag := tags[0]\n\n\t\t\tidx := strings.Index(string(firstTag), \":\")\n\n\t\t\ttagName := strings.ToUpper(string(firstTag[:idx]))\n\t\t\ttagValue, unqerr := strconv.Unquote(string(firstTag[idx+1:]))\n\n\t\t\tif unqerr != nil {\n\t\t\t\terr = errors.New(err.Error() + \"\\niris.RegisterHandler: Error on getting path: \" + unqerr.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpath = tagValue\n\n\t\t\tif strings.Index(tagName, \",\") != -1 {\n\t\t\t\t//has multi methods seperate by commas\n\n\t\t\t\tif !strings.Contains(avalaibleMethodsStr, tagName) {\n\t\t\t\t\t//wrong methods passed\n\t\t\t\t\terr = errors.New(err.Error() + \"\\niris.RegisterHandler: Wrong methods passed to Handler -> \" + tagName)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmethods = strings.Split(tagName, \",\")\n\t\t\t\terr = nil\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t//it is single 'GET','POST' .... method\n\t\t\t\tmethods = []string{tagName}\n\t\t\t\terr = nil\n\t\t\t\tbreak\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif err == nil {\n\t\t//route = r.server.Router.Route(path, irisHandler.Handle, methods...)\n\n\t\t//now check/get the Handle method from the irisHandler 'obj'.\n\t\thandleFunc = reflect.ValueOf(irisHandler).MethodByName(\"Handle\")\n\n\t\tif !handleFunc.IsValid() {\n\t\t\terr = errors.New(\"Missing Handle function inside iris.Handler\")\n\t\t}\n\n\t\tif err == nil {\n\t\t\troute = r.Handle(path, handleFunc.Interface(), methods...)\n\t\t\t//check if template string has stored by the tag ( look before this block )\n\n\t\t\tif template != \"\" {\n\t\t\t\tif templateIsGLob {\n\t\t\t\t\troute.Template().SetGlob(template)\n\t\t\t\t} else {\n\t\t\t\t\troute.Template().Add(template)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn route, err\n}", "func (s *Server) setupEndpoints(r *chi.Mux) {\n\tr.Route(\"/api/v1\", func(r chi.Router) {\n\t\tr.Route(\"/users\", func(r chi.Router) {\n\n\t\t})\n\t})\n}", "func WrapEndpoints(in svc.Endpoints) svc.Endpoints {\n\t\n\t// Pass a middleware you want applied to every endpoint.\n\t// optionally pass in endpoints by name that you want to be excluded\n\t// e.g.\n\t// in.WrapAllExcept(authMiddleware, \"Status\", \"Ping\")\n\t\n\t// Pass in a svc.LabeledMiddleware you want applied to every endpoint.\n\t// These middlewares get passed the endpoints name as their first argument when applied.\n\t// This can be used to write generic metric gathering middlewares that can\n\t// report the endpoint name for free.\n\t// github.com/metaverse/truss/_example/middlewares/labeledmiddlewares.go for examples.\n\t// in.WrapAllLabeledExcept(errorCounter(statsdCounter), \"Status\", \"Ping\")\n\t\n\t// How to apply a middleware to a single endpoint.\n\t// in.ExampleEndpoint = authMiddleware(in.ExampleEndpoint)\n\t\n\t//创建限流器 1r/s 每秒请求数\n\tlimiter := rate.NewLimiter(rate.Every(time.Second*1), 10)\n\t\n\t//通过DelayingLimiter中间件,在bookListEndPoint的外层再包裹一层限流的endPoint\n\tlimtMw := ratelimit.NewDelayingLimiter(limiter)\n\tin.GetBookInfoEndpoint = limtMw(in.GetBookInfoEndpoint)\n\tin.GetBookListEndpoint = limtMw(in.GetBookListEndpoint)\n\treturn in\n}", "func initApiHandlers(router *itineris.ApiRouter) {\n\trouter.SetHandler(\"pgsqlListDepartments\", apiListDepartments)\n\trouter.SetHandler(\"pgsqlCreateDepartment\", apiCreateDepartment)\n\trouter.SetHandler(\"pgsqlGetDepartment\", apiGetDepartment)\n\trouter.SetHandler(\"pgsqlUpdateDepartment\", apiUpdateDepartment)\n\trouter.SetHandler(\"pgsqlDeleteDepartment\", apiDeleteDepartment)\n}", "func (h *Handler) URLMapping(r *echo.Group) {\n\tr.GET(\"\", h.get, auth.CheckPrivilege(\"sales_return_read\"))\n\tr.GET(\"/:id\", h.show, auth.CheckPrivilege(\"sales_return_show\"))\n\tr.POST(\"\", h.create, auth.CheckPrivilege(\"sales_return_create\"))\n\tr.PUT(\"/:id\", h.put, auth.CheckPrivilege(\"sales_return_update\"))\n\tr.PUT(\"/:id/cancel\", h.cancel, auth.CheckPrivilege(\"sales_return_cancel\"))\n}", "func (router *Router) MapRoutes() {\n http.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n found := false\n url := html.EscapeString(r.URL.Path)\n log.Printf(\"%q %q\", r.Method, url)\n\n for _, route := range router.Routes {\n if url == route.Pattern && r.Method == route.Method {\n found = true\n route.Handler.ServeHTTP(w, r)\n }\n }\n\n if !found {\n http.NotFound(w, r)\n }\n }) \n}", "func (srv *Server) handlePatch(res http.ResponseWriter, req *http.Request) {\n\tfor _, rute := range srv.routePatches {\n\t\tvals, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\trute.endpoint.call(res, req, srv.evals, vals)\n\t\t\treturn\n\t\t}\n\t}\n\tres.WriteHeader(http.StatusNotFound)\n}", "func commonGetRouteSetup(routes ...*mux.Route) {\n\tfor _, route := range routes {\n\t\troute.\n\t\t\tMethods(\"GET\")\n\t}\n}", "func (s *WebService) Match(method string, route string, handler interface{}) {\r\n\ts.addRoute(route, method, handler)\r\n}", "func NewHandler(s service.Service) http.Handler {\n\tr := mux.NewRouter()\n\t// base handler\n\tbase := alice.New(newSetUserMid(s))\n\t// handler with auth required\n\tauthRequired := base.Append(newAuthRequiredMid)\n\n\th := &handler{s}\n\n\t// r.PathPrefix(\"/images\").Handler(httputil.NewSingleHostReverseProxy(proxyURL))\n\tr.Handle(\"/v1/login\", base.Then(errHandler(h.register))).Methods(http.MethodPost)\n\tr.Handle(\"/v1/me\", authRequired.Then(errHandler(h.me))).Methods(http.MethodGet)\n\tr.Handle(\"/v1/me\", authRequired.Then(errHandler(h.update))).Methods(http.MethodPatch)\n\tr.Handle(\"/v1/me/reacts\", authRequired.Then(errHandler(h.react))).Methods(http.MethodPost)\n\tr.Handle(\"/v1/me/abuses\", authRequired.Then(errHandler(h.reportAbuse))).Methods(http.MethodPost)\n\n\tr.Handle(\"/v1/me/discover-people\", authRequired.Then(errHandler(h.discoverPeople))).Methods(http.MethodGet)\n\n\tr.Handle(\"/v1/me/pictures\", authRequired.Then(errHandler(h.uploadPicture))).Methods(http.MethodPost)\n\tr.Handle(\"/v1/me/pictures\", authRequired.Then(errHandler(h.pictures))).Methods(http.MethodGet)\n\tr.Handle(\"/v1/me/pictures/{id}\", authRequired.Then(errHandler(h.deletePicture))).Methods(http.MethodDelete)\n\tr.Handle(\"/v1/me/pictures/{id}/profile\", authRequired.Then(errHandler(h.setProfilePicture))).Methods(http.MethodPut)\n\n\treturn r\n}", "func (o *Operation) registerHandler() {\n\t// Add more protocol endpoints here to expose them as controller API endpoints\n\to.handlers = []rest.Handler{\n\t\tcmdutil.NewHTTPHandler(RegisterPath, http.MethodPost, o.Register),\n\t\tcmdutil.NewHTTPHandler(UnregisterPath, http.MethodDelete, o.Unregister),\n\t\tcmdutil.NewHTTPHandler(GetConnectionsPath, http.MethodGet, o.Connections),\n\t\tcmdutil.NewHTTPHandler(ReconnectPath, http.MethodPost, o.Reconnect),\n\t\tcmdutil.NewHTTPHandler(StatusPath, http.MethodPost, o.Status),\n\t\tcmdutil.NewHTTPHandler(BatchPickupPath, http.MethodPost, o.BatchPickup),\n\t\tcmdutil.NewHTTPHandler(ReconnectAllPath, http.MethodGet, o.ReconnectAll),\n\t}\n}", "func (s *Server) createRoutes() {\n\tvar routes = util.Routes{\n\t\tutil.Route{\n\t\t\tName: \"pong\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/\",\n\t\t\tHandlerFunc: s.pong(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"healthz\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/healthz\",\n\t\t\tHandlerFunc: util.Healthz(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"getAllItems\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.getAllItems(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"setItemsPOST\",\n\t\t\tMethod: \"POST\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.setItem(false),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"setItemsPUT\",\n\t\t\tMethod: \"PUT\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.setItem(true),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"getItem\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/items/{id:[a-zA-Z0-9]+}\",\n\t\t\tHandlerFunc: s.getItem(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"delItem\",\n\t\t\tMethod: \"DELETE\",\n\t\t\tPattern: \"/items/{id:[a-zA-Z0-9]+}\",\n\t\t\tHandlerFunc: s.delItem(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"delay\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/delay\",\n\t\t\tHandlerFunc: s.delay(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"simulateError\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/error\",\n\t\t\tHandlerFunc: s.simulateError(),\n\t\t},\n\t}\n\n\tfor _, route := range routes {\n\t\th := route.HandlerFunc\n\n\t\t// Tracing each request\n\t\th = util.TracerMiddleware(h, route)\n\n\t\t// Logging each request\n\t\th = util.LoggerMiddleware(h, s.logger)\n\n\t\t// Assign requestID to each request\n\t\th = util.AssignRequestID(h, s.logger)\n\n\t\t// Monitoring each request\n\t\t// TODO: pass proper handler\n\t\tpromHandler := util.PrometheusMiddleware(h, route.Pattern, rm)\n\n\t\ts.router.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(promHandler)\n\t}\n\n\t// Prometheus endpoint\n\troute := util.Route{\n\t\tName: \"metrics\",\n\t\tMethod: \"GET\",\n\t\tPattern: \"/metrics\",\n\t\tHandlerFunc: nil,\n\t}\n\n\tpromHandler := promhttp.HandlerFor(s.promReg, promhttp.HandlerOpts{})\n\tpromHandler = promhttp.InstrumentMetricHandler(s.promReg, promHandler)\n\ts.router.\n\t\tMethods(route.Method).\n\t\tPath(route.Pattern).\n\t\tName(route.Name).\n\t\tHandler(promHandler)\n\n\t// 404 handler\n\tnotFound := util.PrometheusMiddleware(s.notFound(), \"metrics\", rm)\n\ts.router.NotFoundHandler = notFound\n}", "func (r *Router) handle(c *Ctx) {\n\tvar handler HandlerFunc\n\treq := c.Request()\n\tw := c.Writer()\n\tpath := req.URL.Path\n\tmethod := req.Method\n\tres := r.trie.Match(path)\n\n\tif res.Node == nil {\n\t\t// FixedPathRedirect or TrailingSlashRedirect\n\t\tif res.TSR != \"\" || res.FPR != \"\" {\n\t\t\treq.URL.Path = res.TSR\n\t\t\tif res.FPR != \"\" {\n\t\t\t\treq.URL.Path = res.FPR\n\t\t\t}\n\t\t\tcode := 301\n\t\t\tif method != \"GET\" {\n\t\t\t\tcode = 307\n\t\t\t}\n\t\t\thttp.Redirect(w, req, req.URL.String(), code)\n\t\t\treturn\n\t\t}\n\t\tif r.noRoute == nil {\n\t\t\thttp.Error(w, fmt.Sprintf(`\"%s\" not implemented`, path), 501)\n\t\t\treturn\n\t\t}\n\t\thandler = r.noRoute\n\t} else {\n\t\t// ok := false\n\t\thd := res.Node.GetHandler(method)\n\t\thandler, _ = hd.(HandlerFunc)\n\t\t// handler = r.wrapHandler(hd)\n\t\t// if !ok {\n\t\t// \tpanic(\"handler error\")\n\t\t// }\n\t\tif handler == nil {\n\t\t\t// OPTIONS support\n\t\t\tif method == http.MethodOptions {\n\t\t\t\tw.Header().Set(\"Allow\", res.Node.GetAllow())\n\t\t\t\tw.WriteHeader(204)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif r.noMethod == nil {\n\t\t\t\t// If no route handler is returned, it's a 405 error\n\t\t\t\tw.Header().Set(\"Allow\", res.Node.GetAllow())\n\t\t\t\thttp.Error(w, fmt.Sprintf(`\"%s\" not allowed in \"%s\"`, method, path), 405)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler = r.noMethod\n\t\t}\n\t}\n\n\tif len(res.Params) != 0 {\n\t\tc.params = res.Params\n\t}\n\tc.handlers = append(c.handlers, handler)\n\tc.Next()\n}", "func MakeHandler(svc Service, opts ...kithttp.ServerOption) http.Handler {\n\tr := mux.NewRouter()\n\tr.StrictSlash(true)\n\n\tr.Methods(\"GET\").Path(`/`).Name(\"ruleList\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tlistEndpoint(svc),\n\t\t\tdecodeListRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\topts...,\n\t\t),\n\t)\n\n\tr.Methods(\"GET\").Path(`/{id:[a-zA-Z0-9]+}`).Name(\"ruleGet\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tgetEndpoint(svc),\n\t\t\tdecodeGetRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\tr.Methods(\"PUT\").Path(`/{id:[a-zA-Z0-9]+}/activate`).Name(\"ruleActivate\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tactivateEndpoint(svc),\n\t\t\tdecodeActivateRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\tr.Methods(\"PUT\").Path(`/{id:[a-zA-Z0-9]+}/deactivate`).Name(\"ruleDeactivate\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tdeactivateEndpoint(svc),\n\t\t\tdecodeDeactivateRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\tr.Methods(\"PUT\").Path(`/{id:[a-zA-Z0-9]+}/rollout`).Name(\"ruleUpdateRollout\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tupdateRolloutEndpoint(svc),\n\t\t\tdecodeUpdateRolloutRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\treturn r\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.Login = m(e.Login)\n\te.UpdatePassword = m(e.UpdatePassword)\n\te.CaptchaImage = m(e.CaptchaImage)\n}", "func DockerEndpointHandler(w http.ResponseWriter, r *http.Request) {\n\treq_id := conf.GetReqId()\n\tglog.Infof(\"------> DockerEndpointHandler triggered, req_id=%s, URI=%s\\n\", req_id, r.RequestURI)\n\n\t// check if uri pattern is accepted\n\tif !IsSupportedPattern(r.RequestURI, dockerPatterns) {\n\t\tglog.Infof(\"Docker pattern not accepted, req_id=%s, URI=%s\", req_id, r.RequestURI)\n\t\tNoEndpointHandler(w, r)\n\t\tglog.Infof(\"------ Completed processing of request req_id=%s\\n\", req_id)\n\t\treturn\n\t}\n\n\tdata, _ := httputil.DumpRequest(r, true)\n\tglog.Infof(\"Request dump req_id=%s req_length=%d:\\n%s\", req_id, len(data), string(data))\n\n\tvar creds auth.Creds\n\n\t// workaround defective sharding in dev-mon\n\tcreds = auth.FileAuth(r)\n\tif creds.Status == 200 {\n\t\tglog.Infof(\"Authentication from FILE succeeded for req_id=%s status=%d\", req_id, creds.Status)\n\t\t//glog.Infof(\"***** creds: %+v\", creds)\n\t} else {\n\t\tglog.Errorf(\"Authentication failed for req_id=%s status=%d\", req_id, creds.Status)\n\t\tif creds.Status == 401 {\n\t\t\tNotAuthorizedHandler(w, r)\n\t\t} else {\n\t\t\tErrorHandler(w, r, creds.Status)\n\t\t}\n\t\tglog.Infof(\"------ Completed processing of request req_id=%s\\n\", req_id)\n\t\treturn\n\t}\n\n\tbody, _ := ioutil.ReadAll(r.Body)\n\n\t//Call conn limiting interceptor(s) pre-processing\n\t//\tif !limit.OpenConn(creds.Container, conf.GetMaxContainerConn()) {\n\t//\t\tglog.Infof(\"Max conn limit reached for container...aborting request\")\n\t//\t\tglog.Infof(\"------ Completed processing of request req_id=%s\\n\", req_id)\n\t//\t\treturn\n\t//\t}\n\t//\tif !limit.OpenConn(creds.Node, conf.GetMaxNodeConn()) {\n\t//\t\tglog.Infof(\"Max conn limit reached for host node...aborting request\")\n\t//\t\tglog.Infof(\"------ Completed processing of request req_id=%s\\n\", req_id)\n\t//\t\treturn\n\t//\t}\n\n\t//Handle request\n\t//dockerHandler(w, r, body, creds, nil /*vars*/, req_id)\n\tdockerRouter.DoRoute(w, r, body, creds, req_id)\n\n\t//Call conn limiting interceptor(s) post-processing, to decrement conn count(s)\n\t//\tlimit.CloseConn(creds.Container, conf.GetMaxContainerConn())\n\t//\tlimit.CloseConn(creds.Node, conf.GetMaxNodeConn())\n\n\tglog.Infof(\"------ Completed processing of request req_id=%s\\n\", req_id)\n}", "func (sim *SimService) handle_all_the_things(w http.ResponseWriter, r *http.Request) {\n\twat := r.URL.Path[1:]\n\tfmt.Println(\"Handling\", wat)\n\n\tmethod, exists := sim.methods[wat]\n\n\t// check some stuff\n\tif !exists {\n\t\thttp.Error(w, \"Method does not exist: \"+wat, http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif method.method_type != r.Method {\n\t\thttp.Error(w, \"Method type should be \"+method.method_type, http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\t// check params\n\tr.ParseMultipartForm(10 * 1024) // implicitly does ParseForm() in case there are some url params\n\n\tfor _, param := range method.required_parameters {\n\t\tif _, present := r.Form[param]; !present {\n\t\t\thttp.Error(w, \"Missing required parameter: \"+param, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// we're ok to handle it\n\tmethod.function(sim, w, r.Form)\n}", "func MapUrls(router *gin.Engine, authorizationController Authorization) *gin.RouterGroup {\n\n\tapiRoutes := router.Group(\"/api/authorization\")\n\t{\n\t\tapiRoutes.POST(\"/signin\", authorizationController.SignIn)\n\t\tapiRoutes.POST(\"/signup\", authorizationController.SignUp)\n\t}\n\n\treturn apiRoutes\n}", "func commonPostRouteSetup(routes ...*mux.Route) {\n\tfor _, route := range routes {\n\t\troute.\n\t\t\tMethods(\"POST\").\n\t\t\tHeaders(\"Content-Type\", \"application/json\")\n\t}\n}", "func (base linkableHandler) Post(handlerFunc ...PostHandler) linkableHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tbase(w, r)\n\t\tif r.Context().Err() != nil {\n\t\t\treturn\n\t\t}\n\t\tfor _, handler := range handlerFunc {\n\t\t\thandler(w, r)\n\t\t}\n\t}\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.CreateSession = m(e.CreateSession)\n\te.UseSession = m(e.UseSession)\n}", "func Handler(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tvar buf bytes.Buffer\n\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"message\": \"Okay so your other function also executed successfully!\",\n\t})\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 404}, err\n\t}\n\tjson.HTMLEscape(&buf, body)\n\n\tresp := events.APIGatewayProxyResponse{\n\t\tStatusCode: 200,\n\t\tIsBase64Encoded: false,\n\t\tBody: buf.String(),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"X-MyCompany-Func-Reply\": \"world-handler\",\n\t\t},\n\t}\n\n\treturn resp, nil\n}", "func MapHandler(pathsToUrls map[string]string) func(string) (string, bool) {\n\treturn func(path string) (string, bool) {\n\t\turl, exists := pathsToUrls[path]\n\t\tfmt.Println(pathsToUrls[path])\n\t\treturn url, exists\n\t}\n}", "func RequestHandler(inner http.Handler, name string, method string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\terr := false\n\n\t\t// Set server header\n\t\tw.Header().Add(\"Server\", \"Flock/\"+version)\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.Header().Set(\"Cache-control\", \"no-cache\")\n\t\tw.Header().Set(\"Pragma\", \"no-cache\")\n\n\t\tif r.Method != method {\n\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\tjson.NewEncoder(w).Encode(&ServiceError{\n\t\t\t\tMessage: \"Endpoint does not accept method\",\n\t\t\t\tSolution: \"Change HTTP method\",\n\t\t\t\tErrorCode: http.StatusMethodNotAllowed,\n\t\t\t})\n\t\t\terr = true\n\t\t}\n\n\t\t// Serve reponse\n\t\tif !err {\n\t\t\tinner.ServeHTTP(w, r)\n\t\t}\n\n\t\tif verbose {\n\t\t\tlog.Printf(\"%s -> %s\\t%s\\t%s\\t%s\",\n\t\t\t\tr.RemoteAddr,\n\t\t\t\tr.Method,\n\t\t\t\tr.RequestURI,\n\t\t\t\tname,\n\t\t\t\ttime.Since(start),\n\t\t\t)\n\t\t}\n\t})\n}", "func Handlers() *httprouter.Router {\n\n\t// Create a new router\n\tr := apirouter.New()\n\n\t// Based on service mode\n\tif config.Values.ServiceMode == config.ServiceModeAPI {\n\t\t// r.CrossOriginAllowOriginAll = false\n\t\t// r.CrossOriginAllowOrigin = \"*\"\n\n\t\t// This is used for the \"Origin\" to be returned as the origin\n\t\tr.CrossOriginAllowOriginAll = true\n\n\t\t// Create a middleware stack:\n\t\t// s := apirouter.NewStack()\n\n\t\t// Use your middleware:\n\t\t// s.Use(passThrough)\n\n\t\tapi.RegisterRoutes(r)\n\t\tpersons.RegisterRoutes(r)\n\n\t} // else (another service mode?)\n\n\t// Return the router\n\treturn r.HTTPRouter.Router\n}", "func ApplyRoutes(r *gin.RouterGroup) {\n\tr.GET(\"/status\", GetStatus)\n\tr.GET(\"/status-page\", GetStatusPage)\n\tr.GET(\"/info\", GetInfo)\n}", "func RegisterHandlers(router EchoRouter, si ServerInterface) {\n\n\twrapper := ServerInterfaceWrapper{\n\t\tHandler: si,\n\t}\n\n\trouter.GET(\"/gateways\", wrapper.ListGateways)\n\trouter.POST(\"/gateways\", wrapper.PostGateways)\n\trouter.PUT(\"/gateways\", wrapper.PutGateways)\n\trouter.DELETE(\"/gateways/:gatewayId\", wrapper.DeleteGatewayById)\n\trouter.GET(\"/gateways/:gatewayId\", wrapper.GetGatewayById)\n\trouter.PUT(\"/gateways/:gatewayId\", wrapper.PutGatewayById)\n\trouter.GET(\"/gateways/:gatewayId/endpoint\", wrapper.GetGatewayEndpoint1)\n\n}", "func (srv *Server) handlePut(res http.ResponseWriter, req *http.Request) {\n\tfor _, rute := range srv.routePuts {\n\t\tvals, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\trute.endpoint.call(res, req, srv.evals, vals)\n\t\t\treturn\n\t\t}\n\t}\n\tres.WriteHeader(http.StatusNotFound)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.Hello = m(e.Hello)\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.DeviceLayout = m(e.DeviceLayout)\n\te.FirmwareStatistics = m(e.FirmwareStatistics)\n}", "func (ms *ManagerService) setupRoutes(ws *fibre.WebService) {\n\t// [1, 2]: POST /api/0box/v1/mail/\n\tws.Router.HandleFunc(\"/api/0box/v1/mail/\", ms.PostMailHandler).Methods(\"POST\")\n\n\t// [3]: GET /api/0box/v1/mail/ - return list of boxes\n\tws.Router.HandleFunc(\"/api/0box/v1/mail/\", ms.GetMailboxesHandler).Methods(\"GET\")\n\n\t// [3]: GET /api/0box/v1/mail/<username from /var/spool/mail>[/mail #]\n\tws.Router.HandleFunc(\"/api/0box/v1/mail/{user}\", ms.GetMailUserHandler).Methods(\"GET\")\n\tws.Router.HandleFunc(\"/api/0box/v1/mail/{user}/{number}\", ms.GetMailUserNumberHandler).Methods(\"GET\")\n\n\t// [4]: DELETE /api/0box/v1/mail/<username>/<mail #>\n\tws.Router.HandleFunc(\"/api/0box/v1/mail/{user}\", ms.DeleteMailUserHandler).Methods(\"DELETE\")\n\tws.Router.HandleFunc(\"/api/0box/v1/mail/{user}/{number}\", ms.DeleteMailUserNumberHandler).Methods(\"DELETE\")\n\n\t// [6] Secure the API with an API key for all operations.\n\tws.Apikey = config.Getenv(\"0BOX_API_KEY\", ms.GetSectionPropertyOrDefault(\"0box\", \"apikey\", \"\"))\n\n\tws.Router.Use(ws.LogMiddleware)\n\tws.Router.Use(ws.APIKeyMiddleware)\n\n}", "func (this *Routes) Handler(app *iris.Application) {\n\tcontroller := Controller{DB: this.DB}\n\tapi := app.Party(this.RoutesPrefix + \"/connection\")\n\t{\n\t\tapi.Post(\"/show\", controller.Index)\n\t\tapi.Post(\"/\", controller.Create)\n\t\tapi.Delete(\"/\", controller.Remove)\n\t\tapi.Post(\"/common\", controller.Common)\n\t}\n}", "func main() {\n\t//Advertisement Handler\n\tadHandler := handler.NewAdHandler()\n\t//Regular Expression Handler for matching the pattern of restful type and fetching path params\n\tregexHnd := new(handler.RegexpHandler)\n\n\t//Chain Handlers\n\trequestValidatorHandler := handler.NewRequestValidatorHandler()\n\ttraceableHandler := handler.NewTraceableHandler()\n\theadersHandler := handler.NewResponseHeaderHandler()\n\n\t//Handler Chain configuration\n\thandlerChain := requestValidatorHandler.Next(\n\t\t\t\t\t\t\ttraceableHandler.Next(\n\t\t\t\t\t\t\theadersHandler.Next(regexHnd)))\n\n\tregexHnd.HandleFunc(\"/service$\", adHandler.FindAdByServiceHandler)\n\tregexHnd.HandleFunc(\"/service/[a-zA-Z_0-9]*$\", adHandler.FindAdByCategoryHandler)\n\tregexHnd.HandleFunc(\"/service/[a-zA-Z_0-9]*/[a-zA-Z._0-9]*$\", adHandler.SearchAdHandler)\n\tlog.Fatal(http.ListenAndServe(\":8080\", handlerChain))\n}", "func (c *Operation) registerHandler() {\n\t// Add more protocol endpoints here to expose them as controller API endpoints\n\tc.handlers = []rest.Handler{\n\t\tcmdutil.NewHTTPHandler(CreateBlocDIDPath, http.MethodPost, c.CreateTrustBlocDID),\n\t\tcmdutil.NewHTTPHandler(CreatePeerDIDPath, http.MethodPost, c.CreatePeerDID),\n\t}\n}", "func (h *Handler) serveInterfaces(w http.ResponseWriter, r *http.Request) {}", "func (s *Server) setupRoutes() {\n\ts.Router.Static(\"/app\", \"./public\")\n\trouter := s.ApiRouter\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps\", func(c *gin.Context) {\n\t\tapps, err := getAllApps()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tc.JSON(200, apps)\n\t})\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := c.Param(\"id\")\n\t\tidInt, err := strconv.Atoi(id)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tapp, err := getApp(uint(idInt))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tc.JSON(200, app)\n\t})\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps/:id/history\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\thistories, err := getAppHistory(uint(id))\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tc.JSON(200, histories)\n\t})\n\n\trouter.POST(\"/apps\", func(c *gin.Context) {\n\t\tvar app = domain.App{}\n\t\tif err := c.BindJSON(&app); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\terr := insertApp(&app)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tregisterCheck(app)\n\t\t\tc.JSON(http.StatusOK, app)\n\t\t}\n\t})\n\n\trouter.PUT(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\tvar app domain.App\n\t\tif err := c.BindJSON(&app); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\toldApp, _ := getApp(uint(id))\n\n\t\terr := updateApp(uint(id), app)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tif app.CheckStatus != oldApp.CheckStatus {\n\t\t\t\tlastApp, _ := getApp(uint(id))\n\t\t\t\tupdateCheck(lastApp)\n\t\t\t}\n\t\t\tc.JSON(http.StatusOK, app)\n\t\t}\n\t})\n\n\trouter.DELETE(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\terr := deleteApp(uint(id))\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"status\": \"ok\"})\n\t\t}\n\t})\n}", "func (e *Endpoints) Use(m func(goa.Endpoint) goa.Endpoint) {\n\te.Add = m(e.Add)\n}", "func Register(handler Handler) error {\n\tif (handler.Methods & ^ALL) != 0 {\n\t\treturn fmt.Errorf(\"Invalid handler method[s]: %b\", handler.Methods)\n\t}\n\n\tif len(handler.Path) == 0 {\n\t\treturn errors.New(\"Empty path is not supported\")\n\t}\n\tfor method := GET; method < ALL; method <<= 1 {\n\t\tif (method & handler.Methods) != 0 {\n\t\t\terr := register(registries[method], handler)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (srv *Server) handleOptions(res http.ResponseWriter, req *http.Request) {\n\tmethods := make(map[string]bool)\n\n\tnode := srv.getFSNode(req.URL.Path)\n\tif node != nil {\n\t\tmethods[http.MethodGet] = true\n\t\tmethods[http.MethodHead] = true\n\t}\n\n\tfor _, rute := range srv.routeDeletes {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodDelete] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routeGets {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodGet] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routePatches {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodPatch] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routePosts {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodPost] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routePuts {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodPut] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(methods) == 0 {\n\t\tres.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tmethods[http.MethodOptions] = true\n\n\tvar x int\n\tallows := make([]string, len(methods))\n\tfor k, v := range methods {\n\t\tif v {\n\t\t\tallows[x] = k\n\t\t\tx++\n\t\t}\n\t}\n\n\tsort.Strings(allows)\n\n\tres.Header().Set(\"Allow\", strings.Join(allows, \", \"))\n\tres.WriteHeader(http.StatusOK)\n}", "func APILogHandler(c echo.Context, req, res []byte) {\n\tc.Response().Header().Set(\"X-mobileloket-ResponseTime\", time.Now().Format(time.RFC3339))\n\treqTime, err := time.Parse(time.RFC3339, c.Request().Header.Get(\"X-mobileloket-RequestTime\"))\n\tvar elapstime time.Duration\n\tif err == nil {\n\t\telapstime = time.Since(reqTime)\n\t}\n\n\tvar handler string\n\tr := c.Echo().Routes()\n\tcpath := strings.Replace(c.Path(), \"/\", \"\", -1)\n\tfor _, v := range r {\n\t\tvpath := strings.Replace(v.Path, \"/\", \"\", -1)\n\t\tif vpath == cpath && v.Method == c.Request().Method {\n\t\t\thandler = v.Name\n\t\t\t// Handler for wrong route.\n\t\t\tif strings.Contains(handler, \"func1\") {\n\t\t\t\thandler = \"UndefinedRoute\"\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Get Handler Name\n\tdir, file := path.Split(handler)\n\tfileStrings := strings.Split(file, \".\")\n\tpackHandler := dir + fileStrings[0]\n\tfuncHandler := strings.Replace(handler, packHandler+\".\", \"\", -1)\n\n\trespHeader, _ := json.Marshal(c.Response().Header())\n\treqHeader := httpdump.DumpRequest(c.Request())\n\n\tInfo().\n\t\tStr(\"Identifier\", viper.GetString(\"log_identifier\")+\"_http\").\n\t\tStr(\"package\", packHandler).\n\t\tInt64(\"elapsed_time\", elapstime.Nanoseconds()/int64(time.Millisecond)).\n\t\tStr(\"handler\", funcHandler).\n\t\tStr(\"ip\", c.RealIP()).\n\t\tStr(\"host\", c.Request().Host).\n\t\tStr(\"method\", c.Request().Method).\n\t\tStr(\"url\", c.Request().RequestURI).\n\t\tStr(\"request_time\", c.Request().Header.Get(\"X-mobileloket-RequestTime\")).\n\t\tStr(\"request_header\", reqHeader).\n\t\tStr(\"request\", string(req)).\n\t\tInt(\"httpcode\", c.Response().Status).\n\t\tStr(\"response_time\", c.Response().Header().Get(\"X-mobileloket-ResponseTime\")).\n\t\tStr(\"response_header\", string(respHeader)).\n\t\tStr(\"response\", string(res)).\n\t\tMsg(\"\")\n}", "func Handler(ctx context.Context, request events.APIGatewayProxyRequest,\n\tdynamoDB *dynamodb.DynamoDB, cfg config.Configuration) (\n\tevents.APIGatewayProxyResponse, error) {\n\n\t//Instantiate item API Handler\n\tih, err := item.New(dynamoDB, cfg.AWS.DynamoDB.Table.Store)\n\tif err != nil {\n\t\treturn web.GetResponse(ctx, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tlog.Debug().Msgf(\"Executing method %s for path: %s with body: %v\",\n\t\trequest.HTTPMethod, request.Path, request.Body)\n\n\tswitch request.HTTPMethod {\n\tcase http.MethodGet:\n\n\t\treturn getItems(ctx, request, ih)\n\n\t}\n\n\t//APIGateway would not allow the function to get to this point\n\t//Since all the supported http methods are in the switch\n\treturn web.GetResponse(ctx, struct{}{}, http.StatusMethodNotAllowed)\n\n}" ]
[ "0.66714734", "0.6644675", "0.635691", "0.6305351", "0.62427664", "0.6203105", "0.61850226", "0.61681867", "0.616791", "0.6160118", "0.6154112", "0.6140186", "0.61255753", "0.6122404", "0.6095826", "0.60942966", "0.6088335", "0.60751134", "0.6050221", "0.6041102", "0.6012753", "0.6000956", "0.5979543", "0.5977173", "0.5973399", "0.595662", "0.5950311", "0.59452564", "0.5939493", "0.59365296", "0.5929921", "0.5919678", "0.5918461", "0.58946687", "0.5893986", "0.5888883", "0.5881638", "0.58463", "0.5835392", "0.5832495", "0.5830086", "0.5827052", "0.58262193", "0.5825027", "0.58203536", "0.58164763", "0.5813495", "0.5810605", "0.58081436", "0.5807829", "0.5796484", "0.5788971", "0.5784645", "0.57843703", "0.5780383", "0.57745236", "0.5766842", "0.57663995", "0.57592183", "0.57573617", "0.5754827", "0.57529074", "0.575217", "0.57411224", "0.5740669", "0.5739638", "0.5737041", "0.57341737", "0.57083714", "0.5699766", "0.5697007", "0.5690279", "0.5689329", "0.5688349", "0.56805706", "0.567741", "0.5676242", "0.56756866", "0.5674213", "0.56739616", "0.56519395", "0.56516767", "0.56502384", "0.56487787", "0.5648212", "0.5648134", "0.5645332", "0.5642332", "0.563684", "0.56345046", "0.5631675", "0.5628056", "0.5626928", "0.5618793", "0.56103325", "0.56079763", "0.5601923", "0.55991876", "0.55980366", "0.55948174", "0.55943006" ]
0.0
-1
ApplyOptions applies given opts and returns the resulting Options. This function should not be used directly by end users; it's only exposed as a side effect of Option.
func ApplyOptions(opt ...Option) Options { opts := Options{ MaxTraversalLinks: math.MaxInt64, //default: traverse all MaxAllowedHeaderSize: carv1.DefaultMaxAllowedHeaderSize, MaxAllowedSectionSize: carv1.DefaultMaxAllowedSectionSize, } for _, o := range opt { o(&opts) } // Set defaults for zero valued fields. if opts.IndexCodec == 0 { opts.IndexCodec = multicodec.CarMultihashIndexSorted } if opts.MaxIndexCidSize == 0 { opts.MaxIndexCidSize = DefaultMaxIndexCidSize } return opts }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *MatchOptions) ApplyOptions(opts []MatchOption) *MatchOptions {\n\tfor _, opt := range opts {\n\t\topt.ApplyToMatcher(o)\n\t}\n\treturn o\n}", "func (o *PatchOptions) ApplyOptions(opts []PatchOption) {\n\tfor _, opt := range opts {\n\t\topt.ApplyToHelper(o)\n\t}\n}", "func (uo *SubResourceUpdateOptions) ApplyOptions(opts []SubResourceUpdateOption) *SubResourceUpdateOptions {\n\tfor _, o := range opts {\n\t\to.ApplyToSubResourceUpdate(uo)\n\t}\n\n\treturn uo\n}", "func (po *SubResourcePatchOptions) ApplyOptions(opts []SubResourcePatchOption) *SubResourcePatchOptions {\n\tfor _, o := range opts {\n\t\to.ApplyToSubResourcePatch(po)\n\t}\n\n\treturn po\n}", "func (o *Options) Apply(opts ...Option) error {\n\tfor i, opt := range opts {\n\t\tif err := opt(o); err != nil {\n\t\t\treturn fmt.Errorf(\"option %d failed: %s\", i, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *JSONPb) ApplyOptions(options ...JSONPbOption) *JSONPb {\n\tfor _, opt := range options {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\topt.apply(o)\n\t}\n\treturn o\n}", "func (opts *Options) Apply(options ...Option) error {\n\tfor _, o := range options {\n\t\tif err := o(opts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func ApplyOptions(f *pflag.Flag, opts ...FlagOption) {\n\tfor _, opt := range opts {\n\t\topt(f)\n\t}\n}", "func applyOptions(c *Container, opts ...Option) error {\n\tfor _, opt := range opts {\n\t\tif err := opt.set(c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (getOpt *SubResourceGetOptions) ApplyOptions(opts []SubResourceGetOption) *SubResourceGetOptions {\n\tfor _, o := range opts {\n\t\to.ApplyToSubResourceGet(getOpt)\n\t}\n\n\treturn getOpt\n}", "func (cfg *Config) Apply(opts ...Option) error {\n\tfor _, opt := range opts {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := opt(cfg); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func NewApplyOptions() *ApplyOptions {\n\treturn &ApplyOptions{}\n}", "func (co *SubResourceCreateOptions) ApplyOptions(opts []SubResourceCreateOption) *SubResourceCreateOptions {\n\tfor _, o := range opts {\n\t\to.ApplyToSubResourceCreate(co)\n\t}\n\n\treturn co\n}", "func (o *Options) Apply() {\n\tif o == nil {\n\t\treturn\n\t}\n\tif len(o.ShowHiddenMetricsForVersion) > 0 {\n\t\tSetShowHidden()\n\t}\n\t// set disabled metrics\n\tfor _, metricName := range o.DisabledMetrics {\n\t\tSetDisabledMetric(metricName)\n\t}\n\tif o.AllowListMapping != nil {\n\t\tSetLabelAllowListFromCLI(o.AllowListMapping)\n\t}\n}", "func (o Opts) ApplyOpts(opts *redisconn.Opts) {\n\n\tif o.DB != nil {\n\t\topts.DB = *o.DB\n\t}\n\tif o.WritePause > 0 {\n\t\topts.WritePause = o.WritePause\n\t}\n\tif o.ReconnectPause > 0 {\n\t\topts.ReconnectPause = o.ReconnectPause\n\t}\n\tif o.TCPKeepAlive > 0 {\n\t\topts.TCPKeepAlive = o.TCPKeepAlive\n\t}\n\to.Timeouts.ApplyOpts(opts)\n}", "func (o *Options) Apply(opts ...Option) error {\n\tfor i, opt := range opts {\n\t\tif err := opt(o); err != nil {\n\t\t\treturn fmt.Errorf(\"dht option %d failed: %s\", i, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *DB) ApplyOptions(options ...DBOption) *DB {\n\tfor _, opt := range options {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\topt.apply(o)\n\t}\n\treturn o\n}", "func Options(opts ...Option) Option {\n\treturn optionFunc(func(app *App) {\n\t\tfor _, opt := range opts {\n\t\t\topt.apply(app)\n\t\t}\n\t})\n}", "func (c *AppConfig) Apply(opts []AppOption) error {\r\n\tfor _, o := range opts {\r\n\t\tif err := o(c); err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t}\r\n\treturn nil\r\n}", "func (ro *RequesterOptions) apply(opts ...RequesterOption) {\n\tfor _, opt := range opts {\n\t\topt(ro)\n\t}\n}", "func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) {\n\tfor _, opt := range opts {\n\t\topt.ApplyToDeleteOptions(o)\n\t}\n}", "func (o optionFunc) ApplyOption(opts *Options) {\n\to(opts)\n}", "func ApplyGaugeOptions(opts *Options, gos ...GaugeOptionApplier) {\n\tfor _, o := range gos {\n\t\to.ApplyGaugeOption(opts)\n\t}\n}", "func (o *ListImplementationRevisionsOptions) Apply(opts ...GetImplementationOption) {\n\tfor _, opt := range opts {\n\t\topt(o)\n\t}\n}", "func ApplyOptions() metav1.ApplyOptions {\n\treturn metav1.ApplyOptions{\n\t\tForce: true,\n\t\tFieldManager: ReflectionFieldManager,\n\t}\n}", "func (o *Number[T]) ApplyOptions(options ...NumberOption[T]) *Number[T] {\n\tfor _, opt := range options {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\topt.apply(o)\n\t}\n\treturn o\n}", "func ApplyLoggerOpts(opts ...Option) LoggerOpts {\n\t// set some defaults\n\tl := LoggerOpts{\n\t\tAdditionalLocationOffset: 1,\n\t\tIncludeLocation: true,\n\t\tIncludeTime: true,\n\t\tOutput: os.Stderr,\n\t}\n\tfor _, opt := range opts {\n\t\tl = opt(l)\n\t}\n\treturn l\n}", "func (op *ReadOptions) Apply(opts ...ReadOption) {\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n}", "func (r RequestCryptoSignerOpts) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) {\n\t*opts = r.opts\n}", "func (t Timeouts) ApplyOpts(opts *redisconn.Opts) {\n\tif t.Dial > 0 {\n\t\topts.DialTimeout = t.Dial\n\t}\n\tif t.IO > 0 {\n\t\topts.IOTimeout = t.IO\n\t}\n}", "func ApplyCounterOptions(opts *Options, cos ...CounterOptionApplier) {\n\tfor _, o := range cos {\n\t\to.ApplyCounterOption(opts)\n\t}\n}", "func (in *ApplyWaitOptions) DeepCopy() *ApplyWaitOptions {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ApplyWaitOptions)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Options(opts ...Option) Option {\n\treturn func(s *Settings) error {\n\t\tfor _, opt := range opts {\n\t\t\tif err := opt(s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}", "func ApplyOptions(optsGetter Getter, store *genericetcd.Etcd, etcdPrefix string) error {\n\tif store.QualifiedResource.IsEmpty() {\n\t\treturn fmt.Errorf(\"store must have a non-empty qualified resource\")\n\t}\n\tif store.NewFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewFunc set\", store.QualifiedResource.String())\n\t}\n\tif store.NewListFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewListFunc set\", store.QualifiedResource.String())\n\t}\n\tif store.CreateStrategy == nil {\n\t\treturn fmt.Errorf(\"store for %s must have CreateStrategy set\", store.QualifiedResource.String())\n\t}\n\n\topts, err := optsGetter.GetRESTOptions(store.QualifiedResource)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error building RESTOptions for %s store: %v\", store.QualifiedResource.String(), err)\n\t}\n\n\tstore.DeleteCollectionWorkers = opts.DeleteCollectionWorkers\n\tstore.Storage = opts.Decorator(opts.Storage, UseConfiguredCacheSize, store.NewFunc(), etcdPrefix, store.CreateStrategy, store.NewListFunc)\n\treturn nil\n\n}", "func ApplyMeasureOptions(opts *Options, mos ...MeasureOptionApplier) {\n\tfor _, o := range mos {\n\t\to.ApplyMeasureOption(opts)\n\t}\n}", "func (o Options) Apply(i *Important) {\n\tfor _, opt := range o {\n\t\topt(i)\n\t}\n}", "func (opts Options) ToOption() Option {\n\tvar opt Option\n\tfor idx := range opts {\n\t\tvar val = opts[idx]\n\t\tif val.Code > 0 {\n\t\t\topt.Code = val.Code\n\t\t}\n\n\t\tif val.HTTPCode > 0 {\n\t\t\topt.HTTPCode = val.HTTPCode\n\t\t}\n\n\t\tif val.Message != \"\" {\n\t\t\topt.Message = val.Message\n\t\t}\n\n\t\topt.IsTraced = val.IsTraced\n\n\t}\n\treturn opt\n}", "func (d *OverloadServiceDesc) Apply(oo ...transport.DescOption) {\n\tfor _, o := range oo {\n\t\to.Apply(&d.opts)\n\t}\n}", "func (o *OptionsProvider) Options(opts ...Option) *OptionsProvider {\n\tfor _, opt := range opts {\n\t\toptType := reflect.TypeOf(opt)\n\t\tif _, ok := optType.FieldByName(\"Value\"); !ok {\n\t\t\tpanic(fmt.Sprintf(\"Option %v doesn't have a Value field.\", optType.Name()))\n\t\t}\n\t\tfieldName := o.m[optType.Name()]\n\t\tfield := reflect.ValueOf(o.spec).Elem().FieldByName(fieldName)\n\t\tif !field.CanSet() || !field.IsValid() {\n\t\t\tpanic(fmt.Sprintf(\"There is no option %v.\", optType.Name()))\n\t\t}\n\t\tfield.Set(reflect.ValueOf(opt))\n\t\to.set[fieldName] = true\n\t}\n\treturn o\n}", "func (lOpts *logOptions) apply(options ...Option) {\n\tfor _, opt := range options {\n\t\topt(lOpts)\n\t}\n}", "func (r *Repo) Apply(opts ...func(*Repo)) {\n\tif r == nil {\n\t\treturn\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(r)\n\t}\n}", "func Apply(rw ReadWriter, options ...Option) ReadWriter {\n\tfor _, option := range options {\n\t\toption(rw)\n\t}\n\treturn applyOptionsForState(rw)\n}", "func (*SchematicsV1) NewApplyWorkspaceCommandOptions(wID string, refreshToken string) *ApplyWorkspaceCommandOptions {\n\treturn &ApplyWorkspaceCommandOptions{\n\t\tWID: core.StringPtr(wID),\n\t\tRefreshToken: core.StringPtr(refreshToken),\n\t}\n}", "func (opts RequestOpts) Apply(req *http.Request) {\n\t// apply per-request options\n\tfor _, o := range opts {\n\t\tif o != nil {\n\t\t\to(req)\n\t\t}\n\t}\n}", "func flattenOptions(dst, src Options) Options {\n\tfor _, opt := range src {\n\t\tswitch opt := opt.(type) {\n\t\tcase nil:\n\t\t\tcontinue\n\t\tcase Options:\n\t\t\tdst = flattenOptions(dst, opt)\n\t\tcase coreOption:\n\t\t\tdst = append(dst, opt)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid option type: %T\", opt))\n\t\t}\n\t}\n\treturn dst\n}", "func (in *ApplyPruneOptions) DeepCopy() *ApplyPruneOptions {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ApplyPruneOptions)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func CollectOptions(opts ...Option) Options {\n\tvar suiteOpts Options\n\tfor _, o := range opts {\n\t\to(&suiteOpts)\n\t}\n\treturn suiteOpts\n}", "func (o ClusterOpts) ApplyOpts(opts *rediscluster.Opts) {\n\tif o.Name != \"\" {\n\t\topts.Name = o.Name\n\t}\n}", "func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions {\n\tuOpts := Update()\n\tfor _, uo := range opts {\n\t\tif uo == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif uo.ArrayFilters != nil {\n\t\t\tuOpts.ArrayFilters = uo.ArrayFilters\n\t\t}\n\t\tif uo.BypassDocumentValidation != nil {\n\t\t\tuOpts.BypassDocumentValidation = uo.BypassDocumentValidation\n\t\t}\n\t\tif uo.Upsert != nil {\n\t\t\tuOpts.Upsert = uo.Upsert\n\t\t}\n\t}\n\n\treturn uOpts\n}", "func WithAttrs(a attrs.Attrs) Option {\n\treturn func(o *Options) {\n\t\to.Attrs = a\n\t}\n}", "func WithOptions(opt CliOptions) Option {\n\treturn func(a *App) {\n\t\ta.options = opt\n\t}\n}", "func (opts *lateInitOptions) apply(opt ...LateInitOption) {\n\tfor _, o := range opt {\n\t\to.apply(opts)\n\t}\n}", "func ApplyGatewayDefaultOptions(opts *Options) error {\n\tif opts.Stack.Tenants == nil {\n\t\treturn nil\n\t}\n\n\tif !opts.Gates.OpenShift.Enabled {\n\t\treturn nil\n\t}\n\n\to := openshift.NewOptions(\n\t\topts.Name,\n\t\topts.Namespace,\n\t\tGatewayName(opts.Name),\n\t\tserviceNameGatewayHTTP(opts.Name),\n\t\tgatewayHTTPPortName,\n\t\topts.Timeouts.Gateway.WriteTimeout,\n\t\tComponentLabels(LabelGatewayComponent, opts.Name),\n\t\tRulerName(opts.Name),\n\t)\n\n\tswitch opts.Stack.Tenants.Mode {\n\tcase lokiv1.Static, lokiv1.Dynamic:\n\t\t// Do nothing as per tenants provided by LokiStack CR\n\tcase lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:\n\t\ttenantData := make(map[string]openshift.TenantData)\n\t\tfor name, tenant := range opts.Tenants.Configs {\n\t\t\ttenantData[name] = openshift.TenantData{\n\t\t\t\tCookieSecret: tenant.OpenShift.CookieSecret,\n\t\t\t}\n\t\t}\n\n\t\to.WithTenantsForMode(opts.Stack.Tenants.Mode, opts.GatewayBaseDomain, tenantData)\n\t}\n\n\tif err := mergo.Merge(&opts.OpenShiftOptions, o, mergo.WithOverride); err != nil {\n\t\treturn kverrors.Wrap(err, \"failed to merge defaults for mode openshift\")\n\t}\n\n\treturn nil\n}", "func (opts *Opts) updateOptions() {\n\tfor option := range opts.rs.edges {\n\t\tif _, ok := opts.options[option]; !ok {\n\t\t\t// by default the option state is false\n\t\t\topts.options[option] = false\n\t\t\t// if a parent exist the new option takes its state\n\t\t\tif len(opts.rs.edges[option]) > 0 {\n\t\t\t\tparent := opts.rs.edges[option][0] // TODO: what if there are multiple parents?\n\t\t\t\topts.options[option] = opts.options[parent]\n\t\t\t}\n\t\t}\n\t}\n\n\tfor option := range opts.rs.conflicts {\n\t\tif _, ok := opts.options[option]; !ok {\n\t\t\topts.options[option] = false\n\t\t}\n\t}\n}", "func CollectOptions(opts []MarshalOption) (*MarshalConfig, error) {\n\tvar cfg MarshalConfig\n\tfor _, o := range opts {\n\t\tif err := o(&cfg); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t}\n\treturn &cfg, nil\n}", "func (s *snapshot) ApplyOptions() Actionable {\n\ts.options = s.context.Options().(*options.SnapshotOptions)\n\n\tif len(s.options.Name) == 0 {\n\t\ts.options.Name = \"github.com/alejandro-carstens/scrubber-\" + time.Now().Format(\"1992-06-02\")\n\t}\n\n\tif !s.options.Exists(\"wait_for_completion\") {\n\t\ts.options.WaitForCompletion = DEFAULT_WAIT_FOR_COMPLETION\n\t}\n\n\tif !s.options.WaitForCompletion {\n\t\tif !s.options.Exists(\"max_wait\") {\n\t\t\ts.options.MaxWait = DEFAULT_MAX_WAIT\n\t\t}\n\n\t\tif !s.options.Exists(\"wait_interval\") {\n\t\t\ts.options.WaitInterval = DEFAULT_WAIT_INTERVAL\n\t\t}\n\t}\n\n\ts.indexer.SetOptions(&golastic.IndexOptions{\n\t\tTimeout: s.options.TimeoutInSeconds(),\n\t\tWaitForCompletion: s.options.WaitForCompletion,\n\t\tPartial: s.options.Partial,\n\t\tIncludeGlobalState: s.options.IncludeGlobalState,\n\t\tIgnoreUnavailable: s.options.IgnoreUnavailable,\n\t})\n\n\treturn s\n}", "func (f OptionFunc) Apply(ratelimiter Ratelimiter) error {\n\treturn f(ratelimiter)\n}", "func ApplyAll(rw ReadWriter, optionSets ...[]Option) ReadWriter {\n\toptions := []Option{}\n\tfor _, optionSet := range optionSets {\n\t\toptions = append(options, optionSet...)\n\t}\n\treturn Apply(rw, options...)\n}", "func transformOptions(options ...interface{}) map[string]interface{} {\n\tvar base map[string]interface{}\n\tvar option interface{}\n\t// Case 1: No options are given\n\tif len(options) == 0 {\n\t\treturn make(map[string]interface{})\n\t}\n\tif len(options) == 1 {\n\t\t// Case 2: a single value (either struct or map) is given.\n\t\tbase = make(map[string]interface{})\n\t\toption = options[0]\n\t} else if len(options) == 2 {\n\t\t// Case 3: two values are given. The first one needs to be a map and the\n\t\t// second one can be a struct or map. It will be then get merged into the first\n\t\t// base map.\n\t\tbase = transformStructIntoMapIfNeeded(options[0])\n\t\toption = options[1]\n\t}\n\tv := reflect.ValueOf(option)\n\tif v.Kind() == reflect.Slice {\n\t\tif v.Len() == 0 {\n\t\t\treturn base\n\t\t}\n\t\toption = v.Index(0).Interface()\n\t}\n\n\tif option == nil {\n\t\treturn base\n\t}\n\tv = reflect.ValueOf(option)\n\n\tif v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\n\toptionMap := transformStructIntoMapIfNeeded(v.Interface())\n\tfor key, value := range optionMap {\n\t\tbase[key] = value\n\t}\n\treturn base\n}", "func mergeOptions(pas []v1.Patch) []resource.ApplyOption {\n\topts := make([]resource.ApplyOption, 0, len(pas))\n\tfor _, p := range pas {\n\t\tif p.Policy == nil || p.ToFieldPath == nil {\n\t\t\tcontinue\n\t\t}\n\t\topts = append(opts, withMergeOptions(*p.ToFieldPath, p.Policy.MergeOptions))\n\t}\n\treturn opts\n}", "func getOpts(opt ...wrapping.Option) (*options, error) {\n\t// First, separate out options into local and global\n\topts := getDefaultOptions()\n\tvar wrappingOptions []wrapping.Option\n\tvar localOptions []OptionFunc\n\tfor _, o := range opt {\n\t\tif o == nil {\n\t\t\tcontinue\n\t\t}\n\t\tiface := o()\n\t\tswitch to := iface.(type) {\n\t\tcase wrapping.OptionFunc:\n\t\t\twrappingOptions = append(wrappingOptions, o)\n\t\tcase OptionFunc:\n\t\t\tlocalOptions = append(localOptions, to)\n\t\t}\n\t}\n\n\t// Parse the global options\n\tvar err error\n\topts.Options, err = wrapping.GetOpts(wrappingOptions...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Don't ever return blank options\n\tif opts.Options == nil {\n\t\topts.Options = new(wrapping.Options)\n\t}\n\n\t// Local options can be provided either via the WithConfigMap field\n\t// (for over the plugin barrier or embedding) or via local option functions\n\t// (for embedding). First pull from the option.\n\tif opts.WithConfigMap != nil {\n\t\tfor k, v := range opts.WithConfigMap {\n\t\t\tswitch k {\n\t\t\tcase \"key_not_required\":\n\t\t\t\tkeyNotRequired, err := strconv.ParseBool(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\topts.withKeyNotRequired = keyNotRequired\n\t\t\tcase \"user_agent\":\n\t\t\t\topts.withUserAgent = v\n\t\t\tcase \"credentials\":\n\t\t\t\topts.withCredentials = v\n\t\t\tcase \"project\":\n\t\t\t\topts.withProject = v\n\t\t\tcase \"region\":\n\t\t\t\topts.withRegion = v\n\t\t\tcase \"key_ring\":\n\t\t\t\topts.withKeyRing = v\n\t\t\tcase \"crypto_key\":\n\t\t\t\topts.withCryptoKey = v\n\t\t\t}\n\t\t}\n\t}\n\n\t// Now run the local options functions. This may overwrite options set by\n\t// the options above.\n\tfor _, o := range localOptions {\n\t\tif o != nil {\n\t\t\tif err := o(&opts); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &opts, nil\n}", "func (ca *Appender) SetOptions(args ...interface{}) *Appender {\n\tops, idx, _ := driver.ArgsToMap(args...)\n\tfor _, k := range idx {\n\t\tca.Set(k, ops[k])\n\t}\n\treturn ca\n}", "func (bo *BoolOptions) Apply(n models.ConfigurationMap, changed ChangedFunc, data interface{}) int {\n\tchanges := []changedOptions{}\n\n\tbo.optsMU.Lock()\n\tfor k, v := range n {\n\t\tval, ok := bo.Opts[k]\n\n\t\tif boolVal, _ := NormalizeBool(v); boolVal {\n\t\t\t/* Only enable if not enabled already */\n\t\t\tif !ok || !val {\n\t\t\t\tbo.enable(k)\n\t\t\t\tchanges = append(changes, changedOptions{key: k, value: true})\n\t\t\t}\n\t\t} else {\n\t\t\t/* Only disable if enabled already */\n\t\t\tif ok && val {\n\t\t\t\tbo.disable(k)\n\t\t\t\tchanges = append(changes, changedOptions{key: k, value: false})\n\t\t\t}\n\t\t}\n\t}\n\tbo.optsMU.Unlock()\n\n\tfor _, change := range changes {\n\t\tchanged(change.key, change.value, data)\n\t}\n\n\treturn len(changes)\n}", "func (e *ExternalService) Apply(opts ...func(*ExternalService)) {\n\tif e == nil {\n\t\treturn\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(e)\n\t}\n}", "func CreateOptions(opts ...Option) *options {\n\tallOpts := &options{}\n\n\tfor _, opt := range opts {\n\t\topt.apply(allOpts)\n\t}\n\n\tsetDefaultOpts(allOpts)\n\treturn allOpts\n}", "func (c *Currency) Option(opts ...OptionFunc) (previous OptionFunc) {\n\tfor _, o := range opts {\n\t\tif o != nil {\n\t\t\tprevious = o(c)\n\t\t}\n\t}\n\treturn previous\n}", "func CommonOptions(ctx context.Context, scope kouch.TargetScope, flags *pflag.FlagSet) (*kouch.Options, error) {\n\to := kouch.NewOptions()\n\tvar err error\n\to.Target, err = kouch.NewTarget(ctx, scope, flags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif e := o.SetParam(flags, kouch.FlagRev); e != nil {\n\t\treturn nil, e\n\t}\n\tif e := setAutoRev(ctx, o, flags); e != nil {\n\t\treturn nil, e\n\t}\n\n\treturn o, nil\n}", "func newOptions(opts ...Option) Options {\n\topt := Options{}\n\n\tfor _, o := range opts {\n\t\to(&opt)\n\t}\n\n\treturn opt\n}", "func newOptions(opts ...Option) Options {\n\topt := Options{}\n\n\tfor _, o := range opts {\n\t\to(&opt)\n\t}\n\n\treturn opt\n}", "func NewOptions(opts ...Option) Options {\n\toptions := Options{\n\t\tContext: context.Background(),\n\t\tContentType: DefaultContentType,\n\t\tCodecs: make(map[string]codec.Codec),\n\t\tCallOptions: CallOptions{\n\t\t\tContext: context.Background(),\n\t\t\tBackoff: DefaultBackoff,\n\t\t\tRetry: DefaultRetry,\n\t\t\tRetries: DefaultRetries,\n\t\t\tRequestTimeout: DefaultRequestTimeout,\n\t\t\tDialTimeout: transport.DefaultDialTimeout,\n\t\t},\n\t\tLookup: LookupRoute,\n\t\tPoolSize: DefaultPoolSize,\n\t\tPoolTTL: DefaultPoolTTL,\n\t\tSelector: random.NewSelector(),\n\t\tLogger: logger.DefaultLogger,\n\t\tBroker: broker.DefaultBroker,\n\t\tMeter: meter.DefaultMeter,\n\t\tTracer: tracer.DefaultTracer,\n\t}\n\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\treturn options\n}", "func NewOptions() *Options {\n\treturn &Options{\n\t\tOverwrite: true,\n\t\tmergeFuncs: newFuncSelector(),\n\t}\n}", "func (c *MockClient) ApplyOption(opt MockClientOption) {\n\topt(c)\n}", "func newOptions(opts ...Option) Options {\n\topt := Options{}\n\n\tfor _, o := range opts {\n\t\to(&opt)\n\t}\n\treturn opt\n}", "func (e *Exclusive) CallWithOptions(options ...ExclusiveOption) <-chan *ExclusiveOutcome {\n\tvar config exclusiveConfig\n\tfor _, option := range options {\n\t\toption(&config)\n\t}\n\tfor _, wrapper := range config.wrappers {\n\t\tconfig.work = wrapper(config.work)\n\t}\n\tconfig.wrappers = nil\n\treturn e.call(config)\n}", "func BindOptions(fs *flag.FlagSet) *Options {\n\to := Options{}\n\tfs.StringVar(&o.SubDir, \"sub-dir\", \"\", \"Optional sub-directory of the job's path to which artifacts are uploaded\")\n\n\tfs.StringVar(&o.PathStrategy, \"path-strategy\", pathStrategyExplicit, \"how to encode org and repo into GCS paths\")\n\tfs.StringVar(&o.DefaultOrg, \"default-org\", \"\", \"optional default org for GCS path encoding\")\n\tfs.StringVar(&o.DefaultRepo, \"default-repo\", \"\", \"optional default repo for GCS path encoding\")\n\n\tfs.StringVar(&o.GcsBucket, \"gcs-bucket\", \"\", \"GCS bucket to upload into\")\n\tfs.StringVar(&o.GceCredentialsFile, \"gcs-credentials-file\", \"\", \"file where Google Cloud authentication credentials are stored\")\n\tfs.BoolVar(&o.DryRun, \"dry-run\", true, \"do not interact with GCS\")\n\treturn &o\n}", "func WithCallOptions(opts []grpc.CallOption) Option {\n\treturn func(c *cfg) {\n\t\tc.callOpts = opts\n\t}\n}", "func WithCallOptions(opts []grpc.CallOption) Option {\n\treturn func(c *cfg) {\n\t\tc.callOpts = opts\n\t}\n}", "func fromOptions(options []Option) *baseSettings {\n\t// Start from the default options:\n\topts := &baseSettings{\n\t\tconsumerOptions: []consumer.Option{consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})},\n\t}\n\n\tfor _, op := range options {\n\t\top(opts)\n\t}\n\n\treturn opts\n}", "func WithDialOpts(d ...grpc.DialOption) Option {\n\treturn func(opts *backendOptions) {\n\t\topts.dialOpts = append(opts.dialOpts, d...)\n\t}\n}", "func NewRequestOptions(opts []RequestOption) *RequestOptions {\n\toptions := &RequestOptions{\n\t\tcache: true,\n\t}\n\tfor _, o := range opts {\n\t\to.apply(options)\n\t}\n\treturn options\n}", "func makeOptions(opts ...ClientOption) (*clientOptions, error) {\n\to := &clientOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt.ApplyToGithubClientOptions(o); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn o, nil\n}", "func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) {\n\tvar (\n\t\tchainID = identity.ChainID(append(chain, layer.Diff.Digest)).String()\n\t\tapplied bool\n\t)\n\tif _, err := sn.Stat(ctx, chainID); err != nil {\n\t\tif !errdefs.IsNotFound(err) {\n\t\t\treturn false, fmt.Errorf(\"failed to stat snapshot %s: %w\", chainID, err)\n\t\t}\n\n\t\tif err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil {\n\t\t\tif !errdefs.IsAlreadyExists(err) {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t} else {\n\t\t\tapplied = true\n\t\t}\n\t}\n\treturn applied, nil\n\n}", "func (c *Default) Update(opts ...DefaultOption) {\n\tfor _, opt := range opts {\n\t\topt.ApplyDefault(c)\n\t}\n}", "func SetOptions(opts ...optionFn) {\n\tfor _, opt := range opts {\n\t\topt()\n\t}\n}", "func withMergeOptions(opts ...func(*mergo.Config)) func(*mergeConfig) {\n\treturn func(config *mergeConfig) {\n\t\tconfig.mergeOptions = opts\n\t}\n}", "func getOpts(opt ...wrapping.Option) (*options, error) {\n\t// First, separate out options into local and global\n\topts := getDefaultOptions()\n\tvar wrappingOptions []wrapping.Option\n\tvar localOptions []OptionFunc\n\tfor _, o := range opt {\n\t\tif o == nil {\n\t\t\tcontinue\n\t\t}\n\t\tiface := o()\n\t\tswitch to := iface.(type) {\n\t\tcase wrapping.OptionFunc:\n\t\t\twrappingOptions = append(wrappingOptions, o)\n\t\tcase OptionFunc:\n\t\t\tlocalOptions = append(localOptions, to)\n\t\t}\n\t}\n\n\t// Parse the global options\n\tvar err error\n\topts.Options, err = wrapping.GetOpts(wrappingOptions...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Don't ever return blank options\n\tif opts.Options == nil {\n\t\topts.Options = new(wrapping.Options)\n\t}\n\n\t// Local options can be provided either via the WithConfigMap field\n\t// (for over the plugin barrier or embedding) or via local option functions\n\t// (for embedding). First pull from the option.\n\tif opts.WithConfigMap != nil {\n\t\tvar err error\n\t\tfor k, v := range opts.WithConfigMap {\n\t\t\tswitch k {\n\t\t\tcase \"aead_type\":\n\t\t\t\topts.WithAeadType = wrapping.AeadTypeMap(v)\n\t\t\tcase \"hash_type\":\n\t\t\t\topts.WithHashType = wrapping.HashTypeMap(v)\n\t\t\tcase \"key\":\n\t\t\t\topts.WithKey, err = base64.StdEncoding.DecodeString(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error base64-decoding key value: %w\", err)\n\t\t\t\t}\n\t\t\tcase \"salt\":\n\t\t\t\topts.WithSalt, err = base64.StdEncoding.DecodeString(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error base64-decoding salt value: %w\", err)\n\t\t\t\t}\n\t\t\tcase \"info\":\n\t\t\t\topts.WithInfo, err = base64.StdEncoding.DecodeString(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error base64-decoding info value: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Now run the local options functions. This may overwrite options set by\n\t// the options above.\n\tfor _, o := range localOptions {\n\t\tif o != nil {\n\t\t\tif err := o(&opts); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &opts, nil\n}", "func (c *Config) SetOpt(opts ...Option) Option {\n\t// apply all the options, and replace each with its inverse\n\tfor i, opt := range opts {\n\t\topts[i] = opt(c)\n\t}\n\n\tfor i, j := 0, len(opts)-1; i <= j; i, j = i+1, j-1 {\n\t\topts[i], opts[j] = opts[j], opts[i]\n\t}\n\n\treturn func(c *Config) Option {\n\t\treturn c.SetOpt(opts...)\n\t}\n}", "func getOpts(opt ...wrapping.Option) (*options, error) {\n\t// First, separate out options into local and global\n\topts := getDefaultOptions()\n\tvar wrappingOptions []wrapping.Option\n\tvar localOptions []OptionFunc\n\tfor _, o := range opt {\n\t\tif o == nil {\n\t\t\tcontinue\n\t\t}\n\t\tiface := o()\n\t\tswitch to := iface.(type) {\n\t\tcase wrapping.OptionFunc:\n\t\t\twrappingOptions = append(wrappingOptions, o)\n\t\tcase OptionFunc:\n\t\t\tlocalOptions = append(localOptions, to)\n\t\t}\n\t}\n\n\t// Parse the global options\n\tvar err error\n\topts.Options, err = wrapping.GetOpts(wrappingOptions...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Don't ever return blank options\n\tif opts.Options == nil {\n\t\topts.Options = new(wrapping.Options)\n\t}\n\n\t// Local options can be provided either via the WithConfigMap field\n\t// (for over the plugin barrier or embedding) or via local option functions\n\t// (for embedding). First pull from the option.\n\tif opts.WithConfigMap != nil {\n\t\tfor k, v := range opts.WithConfigMap {\n\t\t\tswitch k {\n\t\t\tcase \"mount_path\":\n\t\t\t\topts.withMountPath = v\n\t\t\tcase \"key_name\":\n\t\t\t\topts.withKeyName = v\n\t\t\tcase \"disable_renewal\":\n\t\t\t\topts.withDisableRenewal = v\n\t\t\tcase \"namespace\":\n\t\t\t\topts.withNamespace = v\n\t\t\tcase \"address\":\n\t\t\t\topts.withAddress = v\n\t\t\tcase \"tls_ca_cert\":\n\t\t\t\topts.withTlsCaCert = v\n\t\t\tcase \"tls_ca_path\":\n\t\t\t\topts.withTlsCaPath = v\n\t\t\tcase \"tls_client_cert\":\n\t\t\t\topts.withTlsClientCert = v\n\t\t\tcase \"tls_client_key\":\n\t\t\t\topts.withTlsClientKey = v\n\t\t\tcase \"tls_server_name\":\n\t\t\t\topts.withTlsServerName = v\n\t\t\tcase \"tls_skip_verify\":\n\t\t\t\tvar err error\n\t\t\t\topts.withTlsSkipVerify, err = strconv.ParseBool(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\tcase \"token\":\n\t\t\t\topts.withToken = v\n\t\t\t}\n\t\t}\n\t}\n\n\t// Now run the local options functions. This may overwrite options set by\n\t// the options above.\n\tfor _, o := range localOptions {\n\t\tif o != nil {\n\t\t\tif err := o(&opts); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &opts, nil\n}", "func SetOptions(c Conv, opts []SchemaOption) Conv {\n\tfor _, opt := range opts {\n\t\tc = opt(c)\n\t}\n\treturn c\n}", "func NewOptions(opts ...Option) Options {\n\toptions := Options{\n\t\tLogger: logger.DefaultLogger,\n\t\tMeter: meter.DefaultMeter,\n\t\tTracer: tracer.DefaultTracer,\n\t\tContext: context.Background(),\n\t}\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\treturn options\n}", "func WithConnectionOptions(fn func(...ConnectionOption) []ConnectionOption) ServerOption {\n\treturn func(cfg *serverConfig) {\n\t\tcfg.connectionOpts = fn(cfg.connectionOpts...)\n\t}\n}", "func (c *operatorConfig) apply(options []OperatorOption) {\n\tfor _, option := range options {\n\t\toption(c)\n\t}\n}", "func (opts *clientOptions) ApplyToGithubClientOptions(target *clientOptions) error {\n\t// Apply common values, if any\n\tif err := opts.CommonClientOptions.ApplyToCommonClientOptions(&target.CommonClientOptions); err != nil {\n\t\treturn err\n\t}\n\n\tif opts.AuthTransport != nil {\n\t\t// Make sure the user didn't specify the AuthTransport twice\n\t\tif target.AuthTransport != nil {\n\t\t\treturn fmt.Errorf(\"option AuthTransport already configured: %w\", gitprovider.ErrInvalidClientOptions)\n\t\t}\n\t\ttarget.AuthTransport = opts.AuthTransport\n\t}\n\n\tif opts.EnableConditionalRequests != nil {\n\t\t// Make sure the user didn't specify the EnableConditionalRequests twice\n\t\tif target.EnableConditionalRequests != nil {\n\t\t\treturn fmt.Errorf(\"option EnableConditionalRequests already configured: %w\", gitprovider.ErrInvalidClientOptions)\n\t\t}\n\t\ttarget.EnableConditionalRequests = opts.EnableConditionalRequests\n\t}\n\treturn nil\n}", "func (o Options) Copy() Options {\n\tto := o\n\tto.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))\n\tcopy(to.APIOptions, o.APIOptions)\n\treturn to\n}", "func WithConnectionOptions(fn func(...ConnectionOption) []ConnectionOption) ServerOption {\n\treturn func(cfg *serverConfig) error {\n\t\tcfg.connectionOpts = fn(cfg.connectionOpts...)\n\t\treturn nil\n\t}\n}", "func (o *EphemeralVolumeControllerOptions) ApplyTo(cfg *ephemeralvolumeconfig.EphemeralVolumeControllerConfiguration) error {\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\tcfg.ConcurrentEphemeralVolumeSyncs = o.ConcurrentEphemeralVolumeSyncs\n\n\treturn nil\n}", "func (cu *CodeUtils) HandleOptions(args []string) error {\n\tvar name, value string\nnext:\n\tfor _, a := range args {\n\t\tparts := strings.SplitN(a, \"=\", 2)\n\t\tswitch len(parts) {\n\t\tcase 0:\n\t\t\tcontinue\n\t\tcase 1:\n\t\t\tname, value = parts[0], \"\"\n\t\tcase 2:\n\t\t\tname, value = parts[0], parts[1]\n\t\t}\n\n\t\tfor _, p := range allParams {\n\t\t\tif p.match(name) {\n\t\t\t\terr := p.action(value, cu)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcu.Info(\"option:\", a)\n\t\t\t\tcontinue next\n\t\t\t}\n\t\t}\n\t\tcu.Info(\"unsupported option:\", a)\n\t}\n\treturn nil\n}", "func NewOptions(opts ...Option) Options {\n\toptions := Options{\n\t\tLogger: logger.DefaultLogger,\n\t\tMeter: meter.DefaultMeter,\n\t\tTracer: tracer.DefaultTracer,\n\t}\n\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\treturn options\n}", "func (sso *StartSpanOptions) Apply(opt opentracing.StartSpanOption) {\n\topt.Apply(&sso.OpenTracingOptions)\n\tif o, ok := opt.(StartSpanOption); ok {\n\t\to.ApplyBP(sso)\n\t}\n}", "func (c ConvertOptions) With(other ConvertOptions) ConvertOptions {\n\tif other.Compile != nil {\n\t\tc.Compile = other.Compile\n\t}\n\tif other.FilterName != \"\" {\n\t\tc.FilterName = other.FilterName\n\t}\n\tif other.Skip != \"\" {\n\t\tc.Skip = other.Skip\n\t}\n\treturn c\n}" ]
[ "0.7181345", "0.7011728", "0.66453886", "0.6608931", "0.66003937", "0.6593034", "0.6515448", "0.6360162", "0.6322618", "0.63175213", "0.6217988", "0.6186789", "0.6160557", "0.6152657", "0.6120525", "0.60960215", "0.6058859", "0.6029311", "0.5828136", "0.57501745", "0.5745696", "0.5742247", "0.56983376", "0.5643337", "0.5609597", "0.5531804", "0.5448485", "0.54356515", "0.54131013", "0.540391", "0.5386442", "0.5325901", "0.5308208", "0.520992", "0.5200191", "0.5185803", "0.51783687", "0.5148837", "0.50565696", "0.50540763", "0.50334805", "0.50289416", "0.4983286", "0.4933984", "0.4929008", "0.48693347", "0.4865718", "0.48382825", "0.48070213", "0.4793664", "0.4750733", "0.47409517", "0.47389492", "0.47353494", "0.47283638", "0.47254065", "0.47239", "0.47203282", "0.47198975", "0.47059202", "0.4696868", "0.46959975", "0.46549916", "0.46395975", "0.46263498", "0.46220234", "0.4616623", "0.46030417", "0.46030417", "0.46020612", "0.45982587", "0.4577735", "0.45724326", "0.45720658", "0.4551774", "0.45450997", "0.45450997", "0.4539589", "0.4536336", "0.45318884", "0.45136616", "0.45058492", "0.44990933", "0.44981775", "0.44972005", "0.44890592", "0.4473952", "0.44733822", "0.44524708", "0.44510737", "0.44502664", "0.44452807", "0.4443537", "0.4439175", "0.44295833", "0.4425762", "0.44164675", "0.44137615", "0.44060475", "0.43801886" ]
0.6390833
7
ZeroLengthSectionAsEOF sets whether to allow the CARv1 decoder to treat a zerolength section as the end of the input CAR file. For example, this can be useful to allow "null padding" after a CARv1 without knowing where the padding begins.
func ZeroLengthSectionAsEOF(enable bool) Option { return func(o *Options) { o.ZeroLengthSectionAsEOF = enable } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}", "func IsEOF(c rune, n int) bool {\n\treturn n == 0\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func IsZeroFilled(b []byte) bool {\n\thdr := (*reflect.SliceHeader)((unsafe.Pointer)(&b))\n\tdata := unsafe.Pointer(hdr.Data)\n\tlength := hdr.Len\n\tif length == 0 {\n\t\treturn true\n\t}\n\n\tif uintptr(data)&0x07 != 0 {\n\t\t// the data is not aligned, fallback to a simple way\n\t\treturn isZeroFilledSimple(b)\n\t}\n\n\tdataEnd := uintptr(data) + uintptr(length)\n\tdataWordsEnd := uintptr(dataEnd) & ^uintptr(0x07)\n\t// example:\n\t//\n\t// 012345678901234567\n\t// wwwwwwwwWWWWWWWWtt : w -- word 0; W -- word 1; t -- tail\n\t// ^\n\t// |\n\t// +-- dataWordsEnd\n\tfor ; uintptr(data) < dataWordsEnd; data = unsafe.Pointer(uintptr(data) + 8) {\n\t\tif *(*uint64)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor ; uintptr(data) < dataEnd; data = unsafe.Pointer(uintptr(data) + 1) {\n\t\tif *(*uint8)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestZeroLength(t *testing.T) {\n\tkey1, err := NewFixedLengthKeyFromReader(os.Stdin, 0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key1.Wipe()\n\tif key1.data != nil {\n\t\tt.Error(\"Fixed length key from reader contained data\")\n\t}\n\n\tkey2, err := NewKeyFromReader(bytes.NewReader(nil))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key2.Wipe()\n\tif key2.data != nil {\n\t\tt.Error(\"Key from empty reader contained data\")\n\t}\n}", "func (d *Decoder) ZeroEmpty(z bool) {\n\td.zeroEmpty = z\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func TestIgnoreTruncatedPacketEOF(t *testing.T) {\n\toutputFile, err := ioutil.TempFile(\"\", \"joincap_output_\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\toutputFile.Close()\n\tdefer os.Remove(outputFile.Name())\n\n\terr = joincap([]string{\"joincap\",\n\t\t\"-v\", \"-w\", outputFile.Name(),\n\t\t\"test_pcaps/unexpected_eof_on_second_packet.pcap\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestIsOrdered(t, outputFile.Name())\n\n\tif packetCount(t, outputFile.Name()) != 1 {\n\t\tt.Fatal(\"error counting\")\n\t}\n}", "func (f *Feature) EndZero() uint64 {\n\treturn f.StartZero()\n}", "func (ps *Parser) EOF() bool {\n\treturn ps.Offset >= len(ps.Runes)\n}", "func ReadSection0(reader io.Reader) (section0 Section0, err error) {\n\tsection0.Indicator = 255\n\terr = binary.Read(reader, binary.BigEndian, &section0)\n\tif err != nil {\n\t\treturn section0, err\n\t}\n\n\tif section0.Indicator == Grib {\n\t\tif section0.Edition != SupportedGribEdition {\n\t\t\treturn section0, fmt.Errorf(\"Unsupported grib edition %d\", section0.Edition)\n\t\t}\n\t} else {\n\t\treturn section0, fmt.Errorf(\"Unsupported grib indicator %d\", section0.Indicator)\n\t}\n\n\treturn\n\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func (suite *RunePartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart := runePart{runeVal: 'a'}\n\tbuff := make([]byte, 0, 0)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(0, count)\n\tsuite.Equal(\"\", string(buff))\n}", "func AtEnd() OffsetOpt {\n\treturn offsetOpt{func(o *Offset) { o.request = -1 }}\n}", "func ZeroHeader() Header {\n\treturn Header{}\n}", "func eof(err error) bool { return err == io.EOF }", "func (h Header) IsOneway() bool {\n\treturn h[2]&0x20 == 0x20\n}", "func fmtChunkWithExtraOfZeroLen(t *testing.T) io.Reader {\n\tsrc := &bytes.Buffer{}\n\ttest.ReadFrom(t, src, Uint32(IDfmt)) // ( 0) 4 - Chunk ID\n\ttest.WriteUint32LE(t, src, 16+2) // ( 4) 4 - Chunk size\n\ttest.WriteUint16LE(t, src, CompPCM) // ( 6) 2 - CompCode\n\ttest.WriteUint16LE(t, src, 1) // ( 8) 2 - ChannelCnt\n\ttest.WriteUint32LE(t, src, 44100) // (10) 4 - SampleRate\n\ttest.WriteUint32LE(t, src, 88200) // (14) 4 - AvgByteRate\n\ttest.WriteUint16LE(t, src, 2) // (18) 2 - BlockAlign\n\ttest.WriteUint16LE(t, src, 16) // (20) 2 - BitsPerSample\n\ttest.WriteUint16LE(t, src, 0) // (22) 2 - ExtraBytes\n\t// Total length: 8+16+2+0=26\n\treturn src\n}", "func (d *Document) IsZero() bool {\n\treturn d == nil || (d.Version == \"\" && len(d.Markups) == 0 &&\n\t\tlen(d.Atoms) == 0 && len(d.Cards) == 0 && len(d.Sections) == 0)\n}", "func (c *Conn) parseEOFPacket(b []byte) bool {\n\tvar off int\n\n\toff++ // [fe] the EOF header (= _PACKET_EOF)\n\t// TODO: reset warning count\n\tc.warnings += binary.LittleEndian.Uint16(b[off : off+2])\n\toff += 2\n\tc.statusFlags = binary.LittleEndian.Uint16(b[off : off+2])\n\n\treturn c.reportWarnings()\n}", "func (dc *FixedLenByteArrayDictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.FixedLenByteArray)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (e LogEntry) IsEOF() bool {\n\treturn e.LineNo == -2\n}", "func (z *Stream) End() {\n\tC.lzma_end(z.C())\n}", "func (w *Writer) WriteZeros(len int) error {\n\tzeros := make([]byte, len)\n\t_, err := w.out.Write(zeros)\n\treturn err\n}", "func TestEOF(t *testing.T) {\n\tc, s := setUp(t)\n\t// Since we're not using tearDown() here, manually call Finish()\n\tdefer s.ctrl.Finish()\n\n\t// Set up a handler to detect whether disconnected handlers are called\n\tdcon := callCheck(t)\n\tc.Handle(DISCONNECTED, dcon)\n\n\t// Simulate EOF from server\n\ts.nc.Close()\n\n\t// Verify that disconnected handler was called\n\tdcon.assertWasCalled(\"Conn did not call disconnected handlers.\")\n\n\t// Verify that the connection no longer thinks it's connected\n\tif c.Connected() {\n\t\tt.Errorf(\"Conn still thinks it's connected to the server.\")\n\t}\n}", "func TestMultiReaderFinalEOF(t *testing.T) {\n\tr := MultiReader(bytes.NewReader(nil), byteAndEOFReader('a'))\n\tbuf := make([]byte, 2)\n\tn, err := r.Read(buf)\n\tif n != 1 || err != EOF {\n\t\tt.Errorf(\"got %v, %v; want 1, EOF\", n, err)\n\t}\n}", "func (h *Header) SetOneway(oneway bool) {\n\tif oneway {\n\t\th[2] = h[2] | 0x20\n\t} else {\n\t\th[2] = h[2] &^ 0x20\n\t}\n}", "func TestZeroLengthTagError(t *testing.T) {\n\tname := filepath.Join(*dataDir, \"corrupt/infinite_loop_exif.jpg\")\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\tt.Fatalf(\"%v\\n\", err)\n\t}\n\tdefer f.Close()\n\n\t_, err = Decode(f)\n\tif err == nil {\n\t\tt.Fatal(\"no error on bad exif data\")\n\t}\n\tif !strings.Contains(err.Error(), \"exif: decode failed (tiff: recursive IFD)\") {\n\t\tt.Fatal(\"wrong error:\", err.Error())\n\t}\n}", "func TestExactReadCloserExpectEOF(t *testing.T) {\n\tbuf := bytes.NewBuffer(make([]byte, 10))\n\trc := NewExactReadCloser(&readerNilCloser{buf}, 1)\n\tif _, err := rc.Read(make([]byte, 10)); err != ErrExpectEOF {\n\t\tt.Fatalf(\"expected %v, got %v\", ErrExpectEOF, err)\n\t}\n}", "func (dc *ByteArrayDictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.ByteArray)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (p *Buffer) EOF() bool {\n\treturn ulen(p.buf) == p.index\n}", "func (self *bipbuf_t) IsEmpty() bool {\n\treturn self.a_start >= self.a_end\n}", "func (b ByteSlice) IsZero() bool {\n\treturn !b.Valid || len(b.ByteSlice) == 0\n}", "func (ctx *Context) outzero(size uintptr) unsafe.Pointer {\n\tstart := ctx.off + int(headerOutSize)\n\tif size > 0 {\n\t\tbuf := ctx.buf[start : start+int(size)]\n\t\tfor i := range buf {\n\t\t\tbuf[i] = 0\n\t\t}\n\t}\n\treturn unsafe.Pointer(&ctx.buf[start])\n}", "func treatEOFErrorsAsNil(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif errors.Is(err, io.EOF) {\n\t\treturn nil\n\t}\n\tvar te TTransportException\n\tif errors.As(err, &te) && te.TypeId() == END_OF_FILE {\n\t\treturn nil\n\t}\n\treturn err\n}", "func (mes *MarkerEncodingScheme) EndOfStream() Marker { return mes.endOfStream }", "func (r *chanReader) eof() {\n\tif !r.dataClosed {\n\t\tr.dataClosed = true\n\t\tclose(r.data)\n\t}\n}", "func TestConnReadNonzeroAndEOF(t *testing.T) {\n\t// This test is racy: it assumes that after a write to a\n\t// localhost TCP connection, the peer TCP connection can\n\t// immediately read it. Because it's racy, we skip this test\n\t// in short mode, and then retry it several times with an\n\t// increasing sleep in between our final write (via srv.Close\n\t// below) and the following read.\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tvar err error\n\tfor delay := time.Millisecond; delay <= 64*time.Millisecond; delay *= 2 {\n\t\tif err = testConnReadNonzeroAndEOF(t, delay); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(err)\n}", "func TestConnReadNonzeroAndEOF(t *testing.T) {\n\t// This test is racy: it assumes that after a write to a\n\t// localhost TCP connection, the peer TCP connection can\n\t// immediately read it. Because it's racy, we skip this test\n\t// in short mode, and then retry it several times with an\n\t// increasing sleep in between our final write (via srv.Close\n\t// below) and the following read.\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tvar err error\n\tfor delay := time.Millisecond; delay <= 64*time.Millisecond; delay *= 2 {\n\t\tif err = testConnReadNonzeroAndEOF(t, delay); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(err)\n}", "func isEmptyOrEnd(line string) bool {\n\treturn len(line) == 0 || strings.HasPrefix(line, \"!\")\n}", "func IsZeroFilled(b []byte) bool {\n\treturn isZeroFilledSimple(b)\n}", "func EnsureEmpty(r io.Reader, stage string) error {\n\tbuf := bytesPool.Get().(*[]byte)\n\tdefer bytesPool.Put(buf)\n\n\tn, err := r.Read(*buf)\n\tif n > 0 {\n\t\treturn fmt.Errorf(\"found unexpected bytes after %s, found (upto 128 bytes): %x\", stage, (*buf)[:n])\n\t}\n\tif err == io.EOF {\n\t\treturn nil\n\t}\n\treturn err\n}", "func TestInvalidLength(t *testing.T) {\n\tkey, err := NewFixedLengthKeyFromReader(ConstReader(1), -1)\n\tif err == nil {\n\t\tkey.Wipe()\n\t\tt.Error(\"Negative lengths should cause failure\")\n\t}\n}", "func (me TClipFillRuleType) IsNonzero() bool { return me.String() == \"nonzero\" }", "func (b Bytes) IsEmpty() bool { return len(b) == 0 }", "func NewEmptyAcraBlock(length int) AcraBlock {\n\tb := make([]byte, length)\n\tcopy(b[:len(tagBegin)], tagBegin)\n\treturn b\n}", "func (e EndElement) isZero() bool {\n\treturn len(e.Name.Local) == 0\n}", "func (UTF8Decoder) FullRune(p []byte) bool { return utf8.FullRune(p) }", "func isEOF(tk Token) bool {\n\treturn tk.GetName() == EOF\n}", "func (b *buffer) isEmpty() bool {\n\tif b == nil {\n\t\treturn true\n\t}\n\tif len(b.buf)-b.offset <= 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func commitEOF() error {\n\treturn cliutil.ActionError(clitypes.CommitEOF)\n}", "func (h HexBytes) IsEmpty() bool { return len(h) == 0 }", "func headerWithNoFileMetaInformationGroupLength() (*headerData, error) {\n\theaderData := new(headerData)\n\n\telements := []*Element{\n\t\tmustNewElement(tag.MediaStorageSOPClassUID, []string{\"SecondaryCapture\"}),\n\t\tmustNewElement(tag.MediaStorageSOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t\tmustNewElement(tag.TransferSyntaxUID, []string{\"=RLELossless\"}),\n\t\tmustNewElement(tag.ImplementationClassUID, []string{\"1.6.6.1.4.1.9590.100.1.0.100.4.0\"}),\n\t\tmustNewElement(tag.SOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t}\n\tdata, err := writeElements(elements)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct valid DICOM header preamble.\n\tmagicWord := []byte(\"DICM\")\n\tpreamble := make([]byte, 128)\n\tpreamble = append(preamble, magicWord...)\n\theaderBytes := append(preamble, data...)\n\theaderData.HeaderBytes = bytes.NewBuffer(headerBytes)\n\theaderData.Elements = elements[0 : len(elements)-1]\n\treturn headerData, nil\n}", "func (pe *PEFile) calculateHeaderEnd(offset uint32) {\n\tvar rawDataPointers []uint32\n\tfor _, section := range pe.Sections {\n\t\tprd := section.Data.PointerToRawData\n\t\tif prd > uint32(0x0) {\n\t\t\trawDataPointers = append(rawDataPointers, pe.adjustFileAlignment(prd))\n\t\t}\n\t}\n\tminSectionOffset := uint32(0x0)\n\tif len(rawDataPointers) > 0 {\n\t\tminSectionOffset = rawDataPointers[0]\n\t\tfor _, pointer := range rawDataPointers {\n\t\t\tif pointer < minSectionOffset {\n\t\t\t\tminSectionOffset = pointer\n\t\t\t}\n\t\t}\n\t}\n\tif minSectionOffset == 0 || minSectionOffset < offset {\n\t\tpe.headerEnd = offset\n\t} else {\n\t\tpe.headerEnd = minSectionOffset\n\t}\n}", "func IsEOF(err error) bool {\n\terr = errs.Cause(err)\n\tif err == io.EOF {\n\t\treturn true\n\t}\n\tif ok, err := libCause(err); ok {\n\t\treturn IsEOF(err)\n\t}\n\treturn false\n}", "func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }", "func (r *Reader) NextSection() error {\n\tbeginOffset, err := r.fl.Seek(int64(r.nextOffset), io.SeekStart)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvals := make([]byte, 16)\n\tbytesRead, err := r.fl.Read(vals)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// end marker\n\tif bytesRead == 8 && bytes.Equal(vals[:8], []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) {\n\t\treturn io.EOF\n\t}\n\n\tsectionSize := binary.LittleEndian.Uint64(vals[:8])\n\trowCount := binary.LittleEndian.Uint64(vals[8:16])\n\n\tstr, err := readZeroTerminatedString(r.fl)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"EOF while reading string section (partial: %s)\", str)\n\t\t}\n\t\treturn err\n\t}\n\n\tr.nextOffset = uint64(beginOffset) + sectionSize + 8 // well well, sectionSize includes the rowCount I guess?\n\n\tr.CurrentSection = &Section{\n\t\tName: SectionName(strings.TrimRight(str, string([]byte{0x00}))),\n\t\tOffset: uint64(beginOffset),\n\t\tSize: sectionSize,\n\t\tRowCount: rowCount,\n\t\tBufferSize: sectionSize - uint64(len(str)) - 1 /* str-pad 0x00 byte */ - 8,\n\t\tBuffer: r.fl,\n\t}\n\treturn nil\n}", "func TestDecodeHeader(t *testing.T) {\n\tdata := []byte{\n\t\t// header\n\t\t0x00, 0x00, 0x00, 0x0B, 0x27, 0x00, 0x02, 0x00, 0x00, 0x00, 0x23,\n\n\t\t// data\n\t\t0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x04,\n\t\t0x00, 0x00, 0x00, 0x0B, 0x00, 0x08, 0x03, 0xFF, 0xFD, 0xFF, 0x02, 0xFE,\n\t\t0xFE, 0xFE, 0x04, 0xEE, 0xED, 0x87, 0xFB, 0xCB, 0x2B, 0xFF, 0xAC,\n\t}\n\n\tr := reader.New(data)\n\td := &document{}\n\th, err := NewHeader(d, r, 0, OSequential)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, int64(11), h.HeaderLength)\n\tassert.Equal(t, uint64(11), h.SegmentDataStartOffset)\n\n\ts, err := h.subInputReader()\n\trequire.NoError(t, err)\n\n\tb, err := s.ReadByte()\n\trequire.NoError(t, err)\n\tassert.Equal(t, byte(0x00), b)\n\n\tthree := make([]byte, 3)\n\tread, err := s.Read(three)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, 3, read)\n\tassert.Equal(t, byte(0x36), three[2])\n}", "func TestEmptyBlockMarshall(t *testing.T) {\n\tconst scheme = TestNetScheme\n\n\tb1 := Block{}\n\tbts, err := b1.MarshalBinary(scheme)\n\trequire.NoError(t, err)\n\n\tb2 := Block{}\n\terr = b2.UnmarshalBinary(bts, scheme)\n\trequire.Error(t, err)\n}", "func (p *Parser) jumpIfZero() {\n\tp.emitByte(OP_JUMPZ)\n\tp.primary()\n}", "func (m *ModifyBearerResponse) SetLength() {\n\tm.Header.Length = uint16(m.MarshalLen() - 4)\n}", "func emptyCf() []byte {\n\n\trc := &ringcf.Ring{\n\t\tVersion: ringcf.VERSION,\n\t\tParts: []ringcf.Part{\n\t\t\t{Shard: []uint32{0}},\n\t\t},\n\t}\n\n\tres, _ := ringcf.ToBytes(rc)\n\tres = append([]byte(\"# config not found\\n# autogenerated example\\n\"), res...)\n\treturn res\n}", "func (word ControlWord) IsLongOffset() bool {\n\treturn word.Count() == 0\n}", "func hasPESOptionalHeader(streamID uint8) bool {\n\treturn streamID != StreamIDPaddingStream && streamID != StreamIDPrivateStream2\n}", "func IsEndOfStream(msg []byte) bool {\n\treturn bytes.Equal(msg, EndStreamHeader)\n}", "func (options *Options) isIncludableZero() bool {\n\tb, ok := options.HashProp(\"includeZero\").(bool)\n\tif ok && b {\n\t\tnb, ok := options.Param(0).(int)\n\t\tif ok && nb == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (suite *IntPartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart, _ := newIntPartFromString(\"9\")\n\tbuff := make([]byte, 0, 0)\n\tcount, _ := part.Read(buff)\n\tsuite.Equal(0, count)\n}", "func indexNullTerminator(b []byte) int {\n\tif len(b) < 2 {\n\t\treturn -1\n\t}\n\n\tfor i := 0; i < len(b); i += 2 {\n\t\tif b[i] == 0 && b[i+1] == 0 {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}", "func (r *Reader) Len() int {\n\tif r.file_v0 != nil {\n\t\treturn r.file_v0.Len()\n\t}\n\treturn int(r.header.num)\n}", "func (lex *Lexer) IsEOF() bool {\n\treturn lex.Token == scanner.TEOF\n}", "func isZero(buffer []byte) bool {\n\tfor i := range buffer {\n\t\tif buffer[i] != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (o *OptionalString) IsZero() bool {\n\treturn len(*o) == 0\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func TestZeros(t *testing.T) {\n\tvect, err := os.Open(\"randvect.txt\")\n\tif err != nil {\n\t\tt.Error(\"could not find text vector file\")\n\t}\n\tdefer vect.Close()\n\tscanner := bufio.NewScanner(vect)\n\tscanner.Scan()\n\n\tvar rng ISAAC\n\trng.randInit(true)\n\n\tvar buf bytes.Buffer\n\tfor i := 0; i < 2; i++ {\n\t\trng.isaac()\n\t\tfor j := 0; j < 256; j++ {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%.8x\", rng.randrsl[j]))\n\t\t\tif (j & 7) == 7 {\n\t\t\t\tvar output = buf.String()\n\t\t\t\tif scanner.Text() == output {\n\t\t\t\t\tscanner.Scan()\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"o: \" + output + \"\\n\" + \"v: \" + scanner.Text() + \"\\n\")\n\t\t\t\t\tt.Fail()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (bc ByteCount) IsZero() bool {\n\treturn bc == 0\n}", "func (h *Headers) IsEmpty() bool {\n\tif h.public == true {\n\t\treturn false\n\t} else if h.private == true {\n\t\treturn false\n\t} else if h.maxAge.Valid {\n\t\treturn false\n\t} else if h.sharedMaxAge.Valid {\n\t\treturn false\n\t} else if h.noCache == true {\n\t\treturn false\n\t} else if h.noStore == true {\n\t\treturn false\n\t} else if h.noTransform == true {\n\t\treturn false\n\t} else if h.mustRevalidate == true {\n\t\treturn false\n\t} else if h.proxyRevalidate == true {\n\t\treturn false\n\t}\n\treturn true\n}", "func EndOfObject(b *bytes.Buffer) (int, error) {\n\treturn b.Write([]byte{0x00, 0x00, 0x09})\n}", "func (e *Encoder) avoidFlush() bool {\n\tswitch {\n\tcase e.tokens.last.length() == 0:\n\t\t// Never flush after ObjectStart or ArrayStart since we don't know yet\n\t\t// if the object or array will end up being empty.\n\t\treturn true\n\tcase e.tokens.last.needObjectValue():\n\t\t// Never flush before the object value since we don't know yet\n\t\t// if the object value will end up being empty.\n\t\treturn true\n\tcase e.tokens.last.needObjectName() && len(e.buf) >= 2:\n\t\t// Never flush after the object value if it does turn out to be empty.\n\t\tswitch string(e.buf[len(e.buf)-2:]) {\n\t\tcase `ll`, `\"\"`, `{}`, `[]`: // last two bytes of every empty value\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *FileType) IsEmpty() bool {\n\treturn len(t.FileTypes) < 1 &&\n\t\tt.MinLength == 0 &&\n\t\tt.MaxLength == 2147483647\n}", "func parsePESOptionalHeader(i []byte, offset *int) (h *PESOptionalHeader, dataStart int) {\n\t// Init\n\th = &PESOptionalHeader{}\n\n\t// Marker bits\n\th.MarkerBits = uint8(i[*offset]) >> 6\n\n\t// Scrambling control\n\th.ScramblingControl = uint8(i[*offset]) >> 4 & 0x3\n\n\t// Priority\n\th.Priority = uint8(i[*offset])&0x8 > 0\n\n\t// Data alignment indicator\n\th.DataAlignmentIndicator = uint8(i[*offset])&0x4 > 0\n\n\t// Copyrighted\n\th.IsCopyrighted = uint(i[*offset])&0x2 > 0\n\n\t// Original or copy\n\th.IsOriginal = uint8(i[*offset])&0x1 > 0\n\t*offset += 1\n\n\t// PTS DST indicator\n\th.PTSDTSIndicator = uint8(i[*offset]) >> 6 & 0x3\n\n\t// Flags\n\th.HasESCR = uint8(i[*offset])&0x20 > 0\n\th.HasESRate = uint8(i[*offset])&0x10 > 0\n\th.HasDSMTrickMode = uint8(i[*offset])&0x8 > 0\n\th.HasAdditionalCopyInfo = uint8(i[*offset])&0x4 > 0\n\th.HasCRC = uint8(i[*offset])&0x2 > 0\n\th.HasExtension = uint8(i[*offset])&0x1 > 0\n\t*offset += 1\n\n\t// Header length\n\th.HeaderLength = uint8(i[*offset])\n\t*offset += 1\n\n\t// Data start\n\tdataStart = *offset + int(h.HeaderLength)\n\n\t// PTS/DTS\n\tif h.PTSDTSIndicator == PTSDTSIndicatorOnlyPTS {\n\t\th.PTS = parsePTSOrDTS(i[*offset:])\n\t\t*offset += 5\n\t} else if h.PTSDTSIndicator == PTSDTSIndicatorBothPresent {\n\t\th.PTS = parsePTSOrDTS(i[*offset:])\n\t\t*offset += 5\n\t\th.DTS = parsePTSOrDTS(i[*offset:])\n\t\t*offset += 5\n\t}\n\n\t// ESCR\n\tif h.HasESCR {\n\t\th.ESCR = parseESCR(i[*offset:])\n\t\t*offset += 6\n\t}\n\n\t// ES rate\n\tif h.HasESRate {\n\t\th.ESRate = uint32(i[*offset])&0x7f<<15 | uint32(i[*offset+1])<<7 | uint32(i[*offset+2])>>1\n\t\t*offset += 3\n\t}\n\n\t// Trick mode\n\tif h.HasDSMTrickMode {\n\t\th.DSMTrickMode = parseDSMTrickMode(i[*offset])\n\t\t*offset += 1\n\t}\n\n\t// Additional copy info\n\tif h.HasAdditionalCopyInfo {\n\t\th.AdditionalCopyInfo = i[*offset] & 0x7f\n\t\t*offset += 1\n\t}\n\n\t// CRC\n\tif h.HasCRC {\n\t\th.CRC = uint16(i[*offset])>>8 | uint16(i[*offset+1])\n\t\t*offset += 2\n\t}\n\n\t// Extension\n\tif h.HasExtension {\n\t\t// Flags\n\t\th.HasPrivateData = i[*offset]&0x80 > 0\n\t\th.HasPackHeaderField = i[*offset]&0x40 > 0\n\t\th.HasProgramPacketSequenceCounter = i[*offset]&0x20 > 0\n\t\th.HasPSTDBuffer = i[*offset]&0x10 > 0\n\t\th.HasExtension2 = i[*offset]&0x1 > 0\n\t\t*offset += 1\n\n\t\t// Private data\n\t\tif h.HasPrivateData {\n\t\t\th.PrivateData = i[*offset : *offset+16]\n\t\t\t*offset += 16\n\t\t}\n\n\t\t// Pack field length\n\t\tif h.HasPackHeaderField {\n\t\t\th.PackField = uint8(i[*offset])\n\t\t\t*offset += 1\n\t\t}\n\n\t\t// Program packet sequence counter\n\t\tif h.HasProgramPacketSequenceCounter {\n\t\t\th.PacketSequenceCounter = uint8(i[*offset]) & 0x7f\n\t\t\th.MPEG1OrMPEG2ID = uint8(i[*offset+1]) >> 6 & 0x1\n\t\t\th.OriginalStuffingLength = uint8(i[*offset+1]) & 0x3f\n\t\t\t*offset += 2\n\t\t}\n\n\t\t// P-STD buffer\n\t\tif h.HasPSTDBuffer {\n\t\t\th.PSTDBufferScale = i[*offset] >> 5 & 0x1\n\t\t\th.PSTDBufferSize = uint16(i[*offset])&0x1f<<8 | uint16(i[*offset+1])\n\t\t\t*offset += 2\n\t\t}\n\n\t\t// Extension 2\n\t\tif h.HasExtension2 {\n\t\t\t// Length\n\t\t\th.Extension2Length = uint8(i[*offset]) & 0x7f\n\t\t\t*offset += 2\n\n\t\t\t// Data\n\t\t\th.Extension2Data = i[*offset : *offset+int(h.Extension2Length)]\n\t\t\t*offset += int(h.Extension2Length)\n\t\t}\n\t}\n\treturn\n}", "func ZeroTruncate(t BlockType, buf []byte) []byte {\n\tif t.depth() > 0 {\n\t\t// ignore slop at end of block\n\t\ti := (len(buf) / ScoreSize) * ScoreSize\n\t\tzero := ZeroScore()\n\t\tzeroBytes := zero.Bytes()\n\t\tfor i >= ScoreSize {\n\t\t\tif bytes.Equal(buf[i-ScoreSize:i], zeroBytes) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti -= ScoreSize\n\t\t}\n\t\treturn buf[:i]\n\t} else if t == RootType {\n\t\tif len(buf) < RootSize {\n\t\t\treturn buf\n\t\t}\n\t\treturn buf[:RootSize]\n\t} else {\n\t\tvar i int\n\t\tfor i = len(buf); i > 0; i-- {\n\t\t\tif buf[i-1] != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn buf[:i]\n\t}\n}", "func (s CommitmentLengthObject) IsEmpty() bool {\n\treturn s.commitmentLength == nil\n}", "func TestEOFOrLengthEncodedIntFuzz(t *testing.T) {\n\tfor i := 0; i < 100; i++ {\n\t\tbytes := make([]byte, rand.Intn(16)+1)\n\t\t_, err := crypto_rand.Read(bytes)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error doing rand.Read\")\n\t\t}\n\t\tbytes[0] = 0xfe\n\n\t\t_, _, isInt := readLenEncInt(bytes, 0)\n\t\tisEOF := isEOFPacket(bytes)\n\t\tif (isInt && isEOF) || (!isInt && !isEOF) {\n\t\t\tt.Fatalf(\"0xfe bytestring is EOF xor Int. Bytes %v\", bytes)\n\t\t}\n\t}\n}", "func (x *Secp256k1N) IsZero() bool {\n\tvar z Secp256k1N\n\tz.Set(x)\n\tz.Normalize()\n\treturn (z.limbs[0] | z.limbs[1] | z.limbs[2] | z.limbs[3] | z.limbs[4]) == 0\n}", "func (bio *BinaryIO) Zero(off int64, count int) {\n\tbuf := makeBuf(count)\n\tfor count > 0 {\n\t\tbuf = truncBuf(buf, count)\n\t\tbio.WriteAt(off, buf)\n\t\tcount -= len(buf)\n\t\toff += int64(len(buf))\n\t}\n}", "func HasZeroWidthCharacters(s string) bool {\n\treturn strings.ContainsRune(s, ZWSP) ||\n\t\tstrings.ContainsRune(s, ZWNBSP) ||\n\t\tstrings.ContainsRune(s, ZWJ) ||\n\t\tstrings.ContainsRune(s, ZWNJ)\n}", "func TestHB11ZeroHeader(t *testing.T) {\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tc, _ := Connect(n, conn_headers.Add(\"heart-beat\", \"0,0\"))\n\tif c.protocol == SPL_10 {\n\t\t_ = closeConn(t, n)\n\t\treturn\n\t}\n\tif c.hbd != nil {\n\t\tt.Errorf(\"Expected no heartbeats for 1.1, zero header\")\n\t}\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func (dc *Int96DictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.Int96)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (p *Parameter) IsDefaultHeaderEncoding() bool {\n\tif p.Explode == nil && (p.Style == \"\" || p.Style == \"simple\") {\n\t\treturn true\n\t}\n\tif p.Explode != nil && !*p.Explode && (p.Style == \"\" || p.Style == \"simple\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o ParserConfigOutput) AllowNullHeader() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v ParserConfig) *bool { return v.AllowNullHeader }).(pulumi.BoolPtrOutput)\n}", "func (n NoOp) OutputLength() int {\n\treturn 0\n}", "func (o ParserConfigResponseOutput) AllowNullHeader() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v ParserConfigResponse) bool { return v.AllowNullHeader }).(pulumi.BoolOutput)\n}", "func ScanNullTerminatedEntries(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tif i := bytes.IndexByte(data, 0); i >= 0 {\n\t\t// Valid record found.\n\t\treturn i + 1, data[0:i], nil\n\t} else if atEOF && len(data) != 0 {\n\t\t// Data at the end of the file without a null terminator.\n\t\treturn 0, nil, errors.New(\"Expected null byte terminator\")\n\t} else {\n\t\t// Request more data.\n\t\treturn 0, nil, nil\n\t}\n}", "func (d *MyDecimal) IsZero() bool {\n\tisZero := true\n\tfor _, val := range d.wordBuf {\n\t\tif val != 0 {\n\t\t\tisZero = false\n\t\t\tbreak\n\t\t}\n\t}\n\treturn isZero\n}", "func parseSectionV2(data []byte) ([]byte, []packetV2, error) {\n\tprevFieldType := fieldType(-1)\n\tvar packets []packetV2\n\tfor {\n\t\tif len(data) == 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"section extends past end of buffer\")\n\t\t}\n\t\trest, p, err := parsePacketV2(data)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif p.fieldType == fieldEOS {\n\t\t\treturn rest, packets, nil\n\t\t}\n\t\tif p.fieldType <= prevFieldType {\n\t\t\treturn nil, nil, fmt.Errorf(\"fields out of order\")\n\t\t}\n\t\tpackets = append(packets, p)\n\t\tprevFieldType = p.fieldType\n\t\tdata = rest\n\t}\n}", "func (a ACME) IsZero() bool {\n\treturn !a.Enable &&\n\t\ta.Endpoint == \"\" &&\n\t\ta.Dir == \"\" &&\n\t\ta.Email == \"\" &&\n\t\tlen(a.Hosts) == 0\n}" ]
[ "0.52038777", "0.4886099", "0.46940255", "0.45598406", "0.45515433", "0.44806105", "0.44333175", "0.44333175", "0.44333175", "0.44333175", "0.44333175", "0.4400239", "0.43830788", "0.43444872", "0.43374717", "0.43305448", "0.43090913", "0.42828125", "0.42710194", "0.4262915", "0.4258667", "0.42167443", "0.4212383", "0.41943243", "0.41935387", "0.41891184", "0.41706467", "0.41652796", "0.4159023", "0.41405106", "0.41369393", "0.41142532", "0.40852728", "0.4079865", "0.40797725", "0.4079137", "0.40747815", "0.40716282", "0.4052887", "0.40360418", "0.40203223", "0.40081647", "0.40081605", "0.39913192", "0.3982908", "0.39751372", "0.39555615", "0.39538753", "0.3949698", "0.39153326", "0.3911693", "0.3908169", "0.38899034", "0.38893932", "0.3873781", "0.38735417", "0.38670933", "0.38438997", "0.3841547", "0.38360387", "0.38334435", "0.38330272", "0.38322303", "0.38279572", "0.38278058", "0.38267866", "0.382449", "0.38227883", "0.38136756", "0.38132867", "0.38099095", "0.38086256", "0.37995127", "0.37992713", "0.37944978", "0.37887704", "0.37846586", "0.37728453", "0.37655768", "0.37521088", "0.3748824", "0.37387827", "0.37338495", "0.3733493", "0.3731067", "0.37243837", "0.37214983", "0.3719686", "0.37153205", "0.3710973", "0.37103164", "0.37053216", "0.37016818", "0.3697489", "0.36878216", "0.36808568", "0.3676672", "0.3671102", "0.36588928", "0.36528853" ]
0.81493616
0
UseDataPadding sets the padding to be added between CARv2 header and its data payload on Finalize.
func UseDataPadding(p uint64) Option { return func(o *Options) { o.DataPadding = p } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func padData(rawData []byte) []byte {\n\tneedPadding := aes.BlockSize - ((len(rawData) + 2) % aes.BlockSize)\n\n\tvar dataBuf bytes.Buffer\n\tdataBuf.Grow(2 + len(rawData) + (aes.BlockSize % (len(rawData) + 2)))\n\n\tdataBuf.Write([]byte(\"|\"))\n\tdataBuf.Write(rawData)\n\tdataBuf.Write([]byte(\"|\"))\n\n\tfor i := 0; i < needPadding; i++ {\n\t\tdataBuf.Write([]byte(\" \"))\n\t}\n\n\treturn dataBuf.Bytes()\n}", "func (d *DataPacket) SetData(data []byte) {\n\tif len(data) > 512 {\n\t\tdata = data[0:512]\n\t}\n\t//make the length a multiply of 2\n\tif len(data)%2 != 0 { //add a 0 to make the length sufficient\n\t\tdata = append(data, 0)\n\t}\n\td.setFAL(uint16(126 + len(data)))\n\td.replace(126, data)\n}", "func WithPaddingAllowed() ParserOption {\n\treturn func(p *Parser) {\n\t\tp.decodePaddingAllowed = true\n\t}\n}", "func UseIndexPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.IndexPadding = p\n\t}\n}", "func (ctc *CustomTransactionContext) SetData(data []byte) {\n\tctc.data = data\n}", "func PKCS(data []byte, mode string) (padded_data []byte) {\r\n\tvar pad_num int\r\n\r\n\tif mode == \"add\" {\r\n\t\trem := len(data) % userlib.AESBlockSizeBytes\r\n\t\tpad_num = userlib.AESBlockSizeBytes - rem //number to pad by\r\n\t\t//pad := make([]byte, pad_num) //pad array we are appending later\r\n\t\tpadded_data = data[:]\r\n\t\tfor i := 0; i < pad_num; i++ {\r\n\t\t\t//pad = append(pad, byte(pad_num))\r\n\t\t\tpadded_data = append(padded_data, byte(pad_num))\r\n\t\t}\r\n\r\n\t\t//userlib.DebugMsg(\"%d\", padded_data)\r\n\t} else { //remove padding\r\n\t\t//last byte is amount of padding there is\r\n\t\t//ex: d = [1022] means 2 bytes of padding so return d[:2] which is [10]\r\n\r\n\t\tnum := len(data) - 1\r\n\t\tpad_num = len(data) - int(data[num]) //piazza: convert to byte > hex string > int?\r\n\t\tpadded_data = data[:pad_num]\r\n\t}\r\n\r\n\treturn padded_data\r\n}", "func (p *IPv4) SetData(data []byte) {\n\tp.data = data\n}", "func (socket *Socket) SetFinalData(data string) {\n\tsocket.Lock()\n\tdefer socket.Unlock()\n\tsocket.finalData = data\n}", "func (p *Patch) SetPadding(value mat.AABB) {\n\tp.Padding = value\n\tp.SetRegion(p.Region)\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func (t DNSOverTCP) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func (p *Packet) SetData(data []byte) {\n\tp.Data = data\n}", "func padFile(data []byte) (padData []byte) {\n\tif len(data) % userlib.AESBlockSize != 0{\n\t\t//padding\n\t\tif len(data) < userlib.AESBlockSize {\n\t\t\tpad := userlib.AESBlockSize - len(data)\n\t\t\tfor i := 0; i < pad; i++ {\n\t\t\t\tdata = append(data, byte(pad))\n\t\t\t}\n\t\t} else {\n\t\t\ttemp := userlib.AESBlockSize\n\t\t\tfor temp < len(data){\n\t\t\t\ttemp += userlib.AESBlockSize\n\t\t\t}\n\t\t\tpad := temp - len(data)\n\t\t\tfor i := 0; i < pad; i++ {\n\t\t\t\tdata = append(data, byte(pad))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpad := 0\n\t\tfor i := 0; i < userlib.AESBlockSize; i++ {\n\t\t\tdata = append(data, byte(pad))\n\t\t}\n\t}\n\treturn data\n}", "func (key Key) SetPadding(padding C.DWORD) error {\n\tif C.CryptSetKeyParam(key.hKey, C.KP_PADDING, C.LPBYTE(unsafe.Pointer(&padding)), 0) == 0 {\n\t\treturn getErr(\"Error setting padding for key\")\n\t}\n\treturn nil\n}", "func EncodeBytesWithPadding(data []byte, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tfor i := 0; i < targetLength-len(data); i++ {\n\t\tbuf.WriteByte(0)\n\t}\n\n\tbuf.Write(data)\n\treturn buf.Bytes()\n}", "func (enc Encoding) WithPadding(padding rune) *Encoding {\n\tswitch {\n\tcase padding < NoPadding || padding == '\\r' || padding == '\\n' || padding > 0xff:\n\t\tpanic(\"invalid padding\")\n\tcase padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:\n\t\tpanic(\"padding contained in alphabet\")\n\t}\n\tenc.padChar = padding\n\treturn &enc\n}", "func (o *SecretBagWritable) SetData(v map[string]string) {\n\to.Data = v\n}", "func (t *DNSOverTCPTransport) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func (o *SecretBagPatchable) SetData(v map[string]string) {\n\to.Data = v\n}", "func (o *SwiftObject) SetData(size int64) (io.Writer, error) {\n\treturn o.newFile(\"data\", size)\n}", "func (znp *Znp) UtilDataReq(securityUse uint8) (rsp *StatusResponse, err error) {\n\treq := &UtilDataReq{SecurityUse: securityUse}\n\terr = znp.ProcessRequest(unp.C_SREQ, unp.S_UTIL, 0x11, req, &rsp)\n\treturn\n}", "func DecryptUseCBC(cipherText, key []byte, iv []byte) ([]byte, error) {\n\tblockKey, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblockSize := blockKey.BlockSize()\n\tif len(cipherText)%blockSize != 0 {\n\t\treturn nil, errors.New(\"cipher text is not an integral multiple of the block size\")\n\t}\n\tdecryptTool := cipher.NewCBCDecrypter(blockKey, iv)\n\t// CryptBlocks can work in-place if the two arguments are the same.\n\tdecryptTool.CryptBlocks(cipherText, cipherText)\n\treturn PKCS5UnPadding(cipherText), nil\n}", "func (k *Item) SetData(b []byte) {\n\tif b != nil {\n\t\tk.attr[DataKey] = b\n\t} else {\n\t\tdelete(k.attr, DataKey)\n\t}\n}", "func WithData(value string) OptFn {\n\treturn func(o *Opt) {\n\t\to.data = value\n\t}\n}", "func (_Withdrawable *WithdrawableSession) IsSigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Withdrawable.Contract.IsSigDataUsed(&_Withdrawable.CallOpts, arg0)\n}", "func setupPadding() {\n\n\tpaddingMap[0] = \"10101010101010101010101010101010\"\n\tpaddingMap[1] = \"0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f\"\n\tpaddingMap[2] = \"0e0e0e0e0e0e0e0e0e0e0e0e0e0e\"\n\tpaddingMap[3] = \"0d0d0d0d0d0d0d0d0d0d0d0d0d\"\n\tpaddingMap[4] = \"0c0c0c0c0c0c0c0c0c0c0c0c\"\n\tpaddingMap[5] = \"0b0b0b0b0b0b0b0b0b0b0b\"\n\tpaddingMap[6] = \"0a0a0a0a0a0a0a0a0a0a\"\n\tpaddingMap[7] = \"090909090909090909\"\n\tpaddingMap[8] = \"0808080808080808\"\n\tpaddingMap[9] = \"07070707070707\"\n\tpaddingMap[10] = \"060606060606\"\n\tpaddingMap[11] = \"0505050505\"\n\tpaddingMap[12] = \"04040404\"\n\tpaddingMap[13] = \"030303\"\n\tpaddingMap[14] = \"0202\"\n\tpaddingMap[15] = \"01\"\n}", "func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) {\n\treturn rfc3961.DES3DecryptData(key, data, e)\n}", "func (g *GroupedAVP) Padding() int {\n\treturn 0\n}", "func UseExternalData(flag bool) {\n\tuseExternalData = flag\n}", "func (o *PostHAProxyConfigurationParams) SetData(data string) {\n\to.Data = data\n}", "func (dc *dataContainer) SetForceData(key, value interface{}) {\n\tdc.data[key] = value\n}", "func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) {\n\treturn rfc8009.DecryptData(key, data, e)\n}", "func (c *digisparkI2cConnection) WriteBlockData(reg uint8, data []byte) error {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\tif len(data) > 32 {\n\t\tdata = data[:32]\n\t}\n\n\tbuf := make([]byte, len(data)+1)\n\tcopy(buf[1:], data)\n\tbuf[0] = reg\n\treturn c.writeAndCheckCount(buf, true)\n}", "func WithDataBus(conn stan.Conn) Option {\n\treturn func(a *App) {\n\t\ta.stanConn = conn\n\t}\n}", "func (_Withdrawable *WithdrawableCallerSession) IsSigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Withdrawable.Contract.IsSigDataUsed(&_Withdrawable.CallOpts, arg0)\n}", "func (d PrinterCallbacks) OnDataUsage(dloadKiB, uploadKiB float64) {\n\td.Logger.Infof(\"experiment: recv %s, sent %s\",\n\t\thumanizex.SI(dloadKiB*1024, \"byte\"),\n\t\thumanizex.SI(uploadKiB*1024, \"byte\"),\n\t)\n}", "func WithCreateData(val map[string][]byte) CreateOption {\n\treturn func(cfg *createConfig) {\n\t\tcfg.Data = val\n\t}\n}", "func (o *PrivilegedBagData) SetData(v map[string]string) {\n\to.Data = v\n}", "func pad(data []byte, blockSize int, padder Padder) []byte {\n \tdataLen := len(data)\n\tpadLen := blockSize - (dataLen % blockSize)\n\tpadding := padder(padLen)\n\treturn append(data, padding...)\n}", "func (_Vault *VaultSession) SigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Vault.Contract.SigDataUsed(&_Vault.CallOpts, arg0)\n}", "func EncodeStringWithPadding(data string, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tif len(data) < targetLength {\n\t\tfor i := 0; i < targetLength-len(data); i++ {\n\t\t\tbuf.WriteByte(0)\n\t\t}\n\t}\n\n\tbuf.Write([]byte(data))\n\treturn buf.Bytes()\n}", "func SetUserDataLayer(ud UserData) {\n\tuserData = ud\n}", "func PKCS7Padding(plainUnpaddedData []byte, blockSize int) []byte {\n\tpaddingSize := blockSize - len(plainUnpaddedData)%blockSize\n\tpadData := bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)\n\treturn append(plainUnpaddedData, padData...)\n}", "func PKCS7Padding(plainUnpaddedData []byte, blockSize int) []byte {\n\tpaddingSize := blockSize - len(plainUnpaddedData)%blockSize\n\tpadData := bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)\n\treturn append(plainUnpaddedData, padData...)\n}", "func (g *generator) InitData(kt *kit.Kit) error {\n\theader := http.Header{}\n\theader.Set(constant.UserKey, constant.BKUserForTestPrefix+\"gen-data\")\n\theader.Set(constant.RidKey, kt.Rid)\n\theader.Set(constant.AppCodeKey, \"test\")\n\theader.Add(\"Cookie\", \"bk_token=\"+constant.BKTokenForTest)\n\n\tg.data = make(AppReleaseMeta, 0)\n\n\t//if err := g.initApp1(kt.Ctx, header); err != nil {\n\t//\treturn err\n\t//}\n\n\tif err := g.initApp2(kt.Ctx, header); err != nil {\n\t\treturn err\n\t}\n\n\t//if err := g.initApp3(kt.Ctx, header); err != nil {\n\t//\treturn err\n\t//}\n\n\treturn nil\n}", "func ExpectKeyUsageUsageDataEncipherment(csr *certificatesv1.CertificateSigningRequest, _ crypto.Signer) error {\n\tcert, err := pki.DecodeX509CertificateBytes(csr.Status.Certificate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// taking the key usage here and use a binary OR to flip all non\n\t// KeyUsageDataEncipherment bits to 0 so if KeyUsageDataEncipherment the\n\t// value will be exactly x509.KeyUsageDataEncipherment\n\tusage := cert.KeyUsage\n\tusage &= x509.KeyUsageDataEncipherment\n\tif usage != x509.KeyUsageDataEncipherment {\n\t\treturn fmt.Errorf(\"Expected certificate to have KeyUsageDataEncipherment %#b, but got %v %#b\", x509.KeyUsageDataEncipherment, usage, usage)\n\t}\n\n\treturn nil\n}", "func (c *Context) WriteData(data interface{}) (err error) {\n\tvar bytes []byte\n\tif bytes, err = c.Serialize(data); err == nil {\n\t\t_, err = c.Write(bytes)\n\t}\n\treturn\n}", "func (decryptor *PgDecryptor) SetDataProcessor(processor base.DataProcessor) {\n\tdecryptor.dataProcessor = processor\n}", "func (b AcraBlock) SetDataEncryptionType(t DataEncryptionBackendType) error {\n\tdataEncryptionType, err := t.MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcopy(b[DataEncryptionTypePosition:DataEncryptionTypePosition+DataEncryptionTypeSize], dataEncryptionType[:DataEncryptionTypeSize])\n\treturn nil\n}", "func (o *V2TcpConfiguration) SetData(v string) {\n\to.Data = &v\n}", "func (d *weakChecksum) addData(p ...byte) {\n\ts1, s2 := d.digest&0xffff, d.digest>>16\n\tfor len(p) > 0 {\n\t\tvar q []byte\n\t\tif len(p) > nmax {\n\t\t\tp, q = p[:nmax], p[nmax:]\n\t\t}\n\t\tfor _, x := range p {\n\t\t\ts1 += uint32(x)\n\t\t\ts2 += s1\n\t\t}\n\t\ts1 %= mod\n\t\ts2 %= mod\n\t\tp = q\n\t}\n\n\td.digest = (s2<<16 | s1)\n}", "func MergeDataKeyOptions(opts ...*DataKeyOptions) *DataKeyOptions {\n\tdko := DataKey()\n\tfor _, opt := range opts {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif opt.MasterKey != nil {\n\t\t\tdko.MasterKey = opt.MasterKey\n\t\t}\n\t\tif opt.KeyAltNames != nil {\n\t\t\tdko.KeyAltNames = opt.KeyAltNames\n\t\t}\n\t}\n\n\treturn dko\n}", "func (client *Client) DescribeVodTranscodeDataWithOptions(request *DescribeVodTranscodeDataRequest, runtime *util.RuntimeOptions) (_result *DescribeVodTranscodeDataResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.EndTime)) {\n\t\tquery[\"EndTime\"] = request.EndTime\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Interval)) {\n\t\tquery[\"Interval\"] = request.Interval\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OwnerId)) {\n\t\tquery[\"OwnerId\"] = request.OwnerId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Region)) {\n\t\tquery[\"Region\"] = request.Region\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Specification)) {\n\t\tquery[\"Specification\"] = request.Specification\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.StartTime)) {\n\t\tquery[\"StartTime\"] = request.StartTime\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Storage)) {\n\t\tquery[\"Storage\"] = request.Storage\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"DescribeVodTranscodeData\"),\n\t\tVersion: tea.String(\"2017-03-21\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &DescribeVodTranscodeDataResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (_Vault *VaultCallerSession) SigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Vault.Contract.SigDataUsed(&_Vault.CallOpts, arg0)\n}", "func pkcs7Padding(ciphertext []byte, blockSize int) []byte {\n\t// The bytes need to padding.\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func (o *V2TcpConfiguration) HasData() bool {\n\tif o != nil && o.Data != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (d *DriverDMA) SetupData(mode sdcard.DataMode, buf []uint64, nbytes int) {\n\tif nbytes == 0 {\n\t\tpanicNoData()\n\t}\n\tif len(buf)*8 < nbytes {\n\t\tpanicShortBuf()\n\t}\n\tif uint(d.err)|uint(d.dmaErr) != 0 {\n\t\treturn\n\t}\n\td.dtc = DTEna | UseDMA | DataCtrl(mode)\n\tdmacfg := dma.PFC | dma.IncM\n\tif d.dtc&Recv == 0 {\n\t\tdmacfg |= dma.MTP\n\t}\n\tif nbytes&15 == 0 {\n\t\tdmacfg |= dma.FT4 | dma.PB4 | dma.MB4\n\t} else {\n\t\tdmacfg |= dma.FT2\n\t}\n\tch := d.dma\n\tch.Disable()\n\tch.Clear(dma.EvAll, dma.ErrAll)\n\tch.Setup(dmacfg)\n\tch.SetAddrP(unsafe.Pointer(&d.p.raw.FIFO))\n\tch.SetAddrM(unsafe.Pointer(&buf[0]))\n\tch.SetWordSize(4, 4)\n\t//ch.SetLen(len(buf) * 2) // Does STM32F1 require this? Use nbytes?\n\tch.Enable()\n\td.p.SetDataLen(nbytes)\n}", "func (o *CartaoProduto) SetDataNil() {\n\to.Data.Set(nil)\n}", "func (m *Msg) SetData(b []byte) {\n\tm.data = b\n}", "func (_Vault *VaultCaller) SigDataUsed(opts *bind.CallOpts, arg0 [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"sigDataUsed\", arg0)\n\treturn *ret0, err\n}", "func PKCS7Padding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func PKCS7Padding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func TestNoDataObfuscate(t *testing.T) {\n\tif *fstest.RemoteName != \"\" {\n\t\tt.Skip(\"Skipping as -remote set\")\n\t}\n\tif runtime.GOOS == \"darwin\" {\n\t\tt.Skip(\"Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with\")\n\t}\n\ttempdir := filepath.Join(os.TempDir(), \"rclone-crypt-test-obfuscate\")\n\tname := \"TestCrypt4\"\n\tfstests.Run(t, &fstests.Opt{\n\t\tRemoteName: name + \":\",\n\t\tNilObject: (*crypt.Object)(nil),\n\t\tExtraConfig: []fstests.ExtraConfigItem{\n\t\t\t{Name: name, Key: \"type\", Value: \"crypt\"},\n\t\t\t{Name: name, Key: \"remote\", Value: tempdir},\n\t\t\t{Name: name, Key: \"password\", Value: obscure.MustObscure(\"potato2\")},\n\t\t\t{Name: name, Key: \"filename_encryption\", Value: \"obfuscate\"},\n\t\t\t{Name: name, Key: \"no_data_encryption\", Value: \"true\"},\n\t\t},\n\t\tSkipBadWindowsCharacters: true,\n\t\tUnimplementableFsMethods: []string{\"OpenWriterAt\", \"OpenChunkWriter\"},\n\t\tUnimplementableObjectMethods: []string{\"MimeType\"},\n\t\tQuickTestOK: true,\n\t})\n}", "func blockPadding(offset int64) (n int64) {\n\treturn -offset & (blockSize - 1)\n}", "func (_Withdrawable *WithdrawableCaller) IsSigDataUsed(opts *bind.CallOpts, arg0 [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Withdrawable.contract.Call(opts, out, \"isSigDataUsed\", arg0)\n\treturn *ret0, err\n}", "func (o *SessionDataUpdateParams) WithTimeout(timeout time.Duration) *SessionDataUpdateParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func Data(data []byte, format string) (cleartext []byte, err error) {\n\t// Initialize a Sops JSON store\n\tvar store sops.Store\n\tswitch format {\n\tcase \"json\":\n\t\tstore = &sopsjson.Store{}\n\tcase \"yaml\":\n\t\tstore = &sopsyaml.Store{}\n\tdefault:\n\t\tstore = &sopsjson.BinaryStore{}\n\t}\n\t// Load SOPS file and access the data key\n\ttree, err := store.LoadEncryptedFile(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, err := tree.Metadata.GetDataKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Decrypt the tree\n\tcipher := aes.NewCipher()\n\tmac, err := tree.Decrypt(key, cipher)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compute the hash of the cleartext tree and compare it with\n\t// the one that was stored in the document. If they match,\n\t// integrity was preserved\n\toriginalMac, err := cipher.Decrypt(\n\t\ttree.Metadata.MessageAuthenticationCode,\n\t\tkey,\n\t\ttree.Metadata.LastModified.Format(time.RFC3339),\n\t)\n\tif originalMac != mac {\n\t\treturn nil, fmt.Errorf(\"Failed to verify data integrity. expected mac %q, got %q\", originalMac, mac)\n\t}\n\n\treturn store.EmitPlainFile(tree.Branch)\n}", "func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {\n\treturn cc(nilStr, nilStr, nilStr, 0, val, size, commit, []string{})\n}", "func getPadding(packetLen int) int {\n\tif packetLen%4 == 0 {\n\t\treturn 0\n\t}\n\treturn 4 - (packetLen % 4)\n}", "func (o *StorageHyperFlexStorageContainer) HasDataBlockSize() bool {\n\tif o != nil && o.DataBlockSize != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func prepareData(data []byte) ([]byte, error) {\n\n\t// convert nil data to empty data\n\tif data == nil {\n\t\tdata = []byte{}\n\t}\n\n\t// check data length\n\tif len(data) > MaxDataLength {\n\t\treturn []byte{}, errors.New(\"too much data for one image\")\n\t}\n\n\t//-------------------------------------------\n\n\t// extend data slice for header\n\tret := make([]byte, header+len(data))\n\tcopy(ret[header:], data)\n\n\t// add HEADER: version\n\tret[0] = 0x01 // add 1 byte\n\n\t// add HEADER: data length\n\tbinary.BigEndian.PutUint32(ret[1:5], uint32(len(data))) // add 4 byte\n\n\t// add HEADER: checksum\n\tsum := md5.Sum(data)\n\tcopy(ret[5:21], sum[:]) // add 16 byte\n\n\t//-------------------------------------------\n\n\t// final size check\n\tif len(ret) > dimension*dimension {\n\t\treturn []byte{}, errors.New(\"final size check fail\")\n\t}\n\n\t// success: return data with header\n\treturn ret, nil\n}", "func (o *IntegrationsManualHTTPSCreateParams) SetData(data IntegrationsManualHTTPSCreateBody) {\n\to.Data = data\n}", "func (o *CartaoProduto) HasData() bool {\n\tif o != nil && o.Data.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (_Vault *VaultCallerSession) IsSigDataUsed(hash [32]byte) (bool, error) {\n\treturn _Vault.Contract.IsSigDataUsed(&_Vault.CallOpts, hash)\n}", "func (_Vault *VaultSession) IsSigDataUsed(hash [32]byte) (bool, error) {\n\treturn _Vault.Contract.IsSigDataUsed(&_Vault.CallOpts, hash)\n}", "func (decryptor *PgDecryptor) ReadData(symmetricKey, zoneID []byte, reader io.Reader) ([]byte, error) {\n\t/* due to using two decryptors can be case when one decryptor match 2 bytes\n\tfrom TagBegin then didn't match anymore but another decryptor matched at\n\tthis time and was successfully used for decryption, we need return 2 bytes\n\tmatched and buffered by first decryptor and decrypted data from the second\n\n\tfor example case of matching begin tag:\n\tBEGIN_TA - failed decryptor1\n\t00BEGIN_TAG - successful decryptor2\n\tin this case first decryptor1 matched not full begin_tag and failed on 'G' but\n\tat this time was matched decryptor2 and successfully matched next bytes and decrypted data\n\tso we need return diff of two matches 'BE' and decrypted data\n\t*/\n\n\t// add zone_id to log if it used\n\tlogger := log.NewEntry(decryptor.logger.Logger)\n\tif decryptor.GetMatchedZoneID() != nil {\n\t\tlogger = decryptor.logger.WithField(\"zone_id\", string(decryptor.GetMatchedZoneID()))\n\t\t// use temporary logger in matched decryptor\n\t\tdecryptor.binaryDecryptor.SetLogger(logger)\n\t\t// reset to default logger without zone_id\n\t\tdefer decryptor.binaryDecryptor.SetLogger(decryptor.logger)\n\t}\n\n\t// take length of fully matched tag begin (each decryptor match tag begin with different length)\n\tcorrectMatchBeginTagLength := len(decryptor.binaryDecryptor.GetMatched())\n\t// take diff count of matched between two decryptors\n\tfalseBufferedBeginTagLength := decryptor.matchIndex - correctMatchBeginTagLength\n\tif falseBufferedBeginTagLength > 0 {\n\t\tlogger.Debugf(\"Return with false matched %v bytes\", falseBufferedBeginTagLength)\n\t\tdecrypted, err := decryptor.binaryDecryptor.ReadData(symmetricKey, zoneID, reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debugln(\"Decrypted AcraStruct\")\n\t\treturn append(decryptor.matchBuffer[:falseBufferedBeginTagLength], decrypted...), nil\n\t}\n\n\tdecrypted, err := decryptor.binaryDecryptor.ReadData(symmetricKey, zoneID, reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Debugln(\"Decrypted AcraStruct\")\n\treturn decrypted, nil\n}", "func DecryptUseCBCWithDefaultProtocol(cipherText, key []byte) ([]byte, error) {\n\tif len(cipherText) < 16 {\n\t\treturn nil, errors.New(\"decrypt excepted iv parameter\")\n\t}\n\tplainText, err := DecryptUseCBC(cipherText[16:], key, cipherText[:16])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn plainText, nil\n}", "func NewDnsViewparamDataData() *DnsViewparamDataData {\n\tthis := DnsViewparamDataData{}\n\treturn &this\n}", "func MaxDataBytes(maxBytes int64, keyType crypto.KeyType, evidenceBytes int64, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCoreChainLockSize -\n\t\tMaxCommitOverheadBytes -\n\t\tevidenceBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytes. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}", "func DataServiceDatasetV2DataTransferProtocol(value string) DataServiceDatasetV2Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"data_transfer_protocol\"] = value\n\t}\n}", "func PKCS7Padding(text string, length int) string {\n\tpaddingLength := length - (len(text) % length)\n\n\tbs := make([]byte, 1)\n\tbinary.PutUvarint(bs, uint64(paddingLength))\n\n\tpadding := bytes.Repeat(bs, paddingLength)\n\n\treturn text + string(padding)\n}", "func (o *StorageHyperFlexStorageContainer) SetDataBlockSize(v int64) {\n\to.DataBlockSize = &v\n}", "func (ic *Context) InitNonceData() {\n\tif tx, ok := ic.Container.(*transaction.Transaction); ok {\n\t\tcopy(ic.NonceData[:], tx.Hash().BytesBE())\n\t}\n\tif ic.Block != nil {\n\t\tnonce := ic.Block.Nonce\n\t\tnonce ^= binary.LittleEndian.Uint64(ic.NonceData[:])\n\t\tbinary.LittleEndian.PutUint64(ic.NonceData[:], nonce)\n\t}\n}", "func (self *BytecodeReader) SkipPadding() {\n\tfor self.pc%4 != 0 {\n\t\tself.ReadUint8()\n\t}\n}", "func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string,\n\topts ...*options.DataKeyOptions) (primitive.Binary, error) {\n\n\t// translate opts to mcopts.DataKeyOptions\n\tdko := options.MergeDataKeyOptions(opts...)\n\tco := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames)\n\tif dko.MasterKey != nil {\n\t\tkeyDoc, err := marshal(\n\t\t\tdko.MasterKey,\n\t\t\tce.keyVaultClient.bsonOpts,\n\t\t\tce.keyVaultClient.registry)\n\t\tif err != nil {\n\t\t\treturn primitive.Binary{}, err\n\t\t}\n\t\tco.SetMasterKey(keyDoc)\n\t}\n\tif dko.KeyMaterial != nil {\n\t\tco.SetKeyMaterial(dko.KeyMaterial)\n\t}\n\n\t// create data key document\n\tdataKeyDoc, err := ce.crypt.CreateDataKey(ctx, kmsProvider, co)\n\tif err != nil {\n\t\treturn primitive.Binary{}, err\n\t}\n\n\t// insert key into key vault\n\t_, err = ce.keyVaultColl.InsertOne(ctx, dataKeyDoc)\n\tif err != nil {\n\t\treturn primitive.Binary{}, err\n\t}\n\n\tsubtype, data := bson.Raw(dataKeyDoc).Lookup(\"_id\").Binary()\n\treturn primitive.Binary{Subtype: subtype, Data: data}, nil\n}", "func PKCSSPadding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func SetColumnPadding(padding int) {\n\tif padding < 0 {\n\t\tcolumnPadding = 2 // default value\n\t} else {\n\t\tcolumnPadding = padding\n\t}\n}", "func (b AcraBlock) setEncryptedData(data []byte) error {\n\tif len(b) < EncryptedDataEncryptionKeyPosition {\n\t\treturn ErrInvalidAcraBlock\n\t}\n\tkeySize := b.EncryptedDataEncryptionKeyLength()\n\tif len(b) < EncryptedDataEncryptionKeyPosition+keySize {\n\t\treturn ErrInvalidAcraBlock\n\t}\n\tif n := copy(b[EncryptedDataEncryptionKeyPosition+keySize:], data); n != len(data) {\n\t\treturn ErrInvalidAcraBlock\n\t}\n\treturn nil\n}", "func BufferData(target uint32, size int, data unsafe.Pointer, usage uint32) {\n\tsyscall.Syscall6(gpBufferData, 4, uintptr(target), uintptr(size), uintptr(data), uintptr(usage), 0, 0)\n}", "func (o *Wireless) SetData(v map[string]string) {\n\to.Data = &v\n}", "func (o *PrivilegedTextDataAllOf) SetData(v string) {\n\to.Data = &v\n}", "func NewDataEncryptor(keyStore keystore.DataEncryptorKeyStore) (*DataEncryptor, error) {\n\treturn &DataEncryptor{keyStore: keyStore, needSkipEncryptionFunc: encryptor.EmptyCheckFunction}, nil\n}", "func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"data_format\"] = value\n\t}\n}", "func (a *Attributes) RemoveDataAttribute(name string) bool {\n\tsuffix, _ := ToDataAttr(name)\n\tname = \"data-\" + suffix\n\treturn a.RemoveAttribute(name)\n}", "func (d *DV4Mini) WriteTXBufferData(data []byte) {\n\tvar packetSize int = 34 // full 36\n\t// var counter int = 0\n\t// var crcValue byte\n\n\t// []byte{0x04, data}\n\tcmd := []byte{ADFWRITE}\n\n\tfor i := 0; i < len(data); i += packetSize {\n\t\ttime.Sleep(time.Millisecond * 30)\n\n\t\tbatch := data[i:min(i+packetSize, len(data))]\n\n\t\t// if (counter % 2) == 0 {\n\t\t// \tbatch = append([]byte{0x91}, batch...)\n\t\t// } else {\n\t\t// \tbatch = append([]byte{0x23}, batch...)\n\t\t// }\n\n\t\t// counter++\n\t\tlog.Printf(\"[>>>] \\n%s\", hex.Dump(batch))\n\n\t\tfullPacket := cmd\n\t\tfullPacket = append(fullPacket, batch...)\n\n\t\td.sendCmd(fullPacket)\n\t}\n\n\t// d.FlushTXBuffer()\n\td.Port.Flush()\n}", "func (_TestClient *TestClientTransactorSession) SetOptEpochData(epoch *big.Int, fullSizeIn128Resultion *big.Int, branchDepth *big.Int, merkleNodes []*big.Int, start *big.Int, numElems *big.Int) (*types.Transaction, error) {\n\treturn _TestClient.Contract.SetOptEpochData(&_TestClient.TransactOpts, epoch, fullSizeIn128Resultion, branchDepth, merkleNodes, start, numElems)\n}", "func (_Vault *VaultCaller) IsSigDataUsed(opts *bind.CallOpts, hash [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"isSigDataUsed\", hash)\n\treturn *ret0, err\n}", "func (s *State) SetSpecData(data []byte) {\n\ts.specData = data\n}", "func (_ERC725 *ERC725TransactorSession) SetData(_key [32]byte, _value []byte) (*types.Transaction, error) {\n\treturn _ERC725.Contract.SetData(&_ERC725.TransactOpts, _key, _value)\n}", "func (o LicenseOutput) Datacenter() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *License) pulumi.StringPtrOutput { return v.Datacenter }).(pulumi.StringPtrOutput)\n}" ]
[ "0.5258283", "0.4555469", "0.4480234", "0.44573864", "0.43533745", "0.4337491", "0.4326558", "0.42585558", "0.42437828", "0.42047027", "0.41544282", "0.41253066", "0.41225743", "0.41110697", "0.40878302", "0.40734228", "0.4069261", "0.40326965", "0.40245518", "0.40231052", "0.4017667", "0.4004762", "0.40028828", "0.3981506", "0.39749408", "0.39524114", "0.39291194", "0.39017254", "0.3901331", "0.39003903", "0.38983864", "0.38898408", "0.3888365", "0.38866386", "0.38855574", "0.38787165", "0.38774544", "0.38761184", "0.38349083", "0.38249984", "0.38229233", "0.381717", "0.3800436", "0.3800436", "0.37910527", "0.37802708", "0.37755", "0.37729806", "0.37717575", "0.37642992", "0.3755665", "0.37515277", "0.3742783", "0.3741775", "0.37301615", "0.37272277", "0.37262666", "0.37131295", "0.37040007", "0.3703508", "0.3694991", "0.3694991", "0.36933163", "0.36739767", "0.36727476", "0.36682856", "0.36660922", "0.3662528", "0.36623806", "0.3659459", "0.36552295", "0.36548847", "0.3653241", "0.3645022", "0.36347246", "0.36323908", "0.36302263", "0.36103266", "0.36076522", "0.36073667", "0.3605999", "0.3605248", "0.35974205", "0.35925213", "0.35916698", "0.35916293", "0.35876635", "0.35860714", "0.35770276", "0.35668093", "0.35622153", "0.35488746", "0.35482797", "0.3547052", "0.3544767", "0.3544515", "0.35394973", "0.3536466", "0.3536421", "0.35343453" ]
0.7320334
0
UseIndexPadding sets the padding between data payload and its index on Finalize.
func UseIndexPadding(p uint64) Option { return func(o *Options) { o.IndexPadding = p } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *BasePlSqlParserListener) ExitUsing_index_clause(ctx *Using_index_clauseContext) {}", "func UseDataPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.DataPadding = p\n\t}\n}", "func (vm *vrfManager) releaseIndex(vrf *VRF) {\n\tvm.byIndex[int(vrf.index)] = nil\n}", "func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}", "func IndexFixer(index int, listSize int) int {\n\tindex = index - 1\n\n\tif index <= 0 {\n\t\tindex = 0\n\t} else if index > listSize-1 {\n\t\tindex = listSize - 1\n\t}\n\n\treturn index\n}", "func indexTruncateInTxn(\n\tctx context.Context,\n\ttxn *kv.Txn,\n\texecCfg *ExecutorConfig,\n\tevalCtx *tree.EvalContext,\n\ttableDesc catalog.TableDescriptor,\n\tidx *descpb.IndexDescriptor,\n\ttraceKV bool,\n) error {\n\talloc := &rowenc.DatumAlloc{}\n\tvar sp roachpb.Span\n\tfor done := false; !done; done = sp.Key == nil {\n\t\trd := row.MakeDeleter(execCfg.Codec, tableDesc, nil /* requestedCols */)\n\t\ttd := tableDeleter{rd: rd, alloc: alloc}\n\t\tif err := td.init(ctx, txn, evalCtx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar err error\n\t\tsp, err = td.deleteIndex(\n\t\t\tctx, idx, sp, indexTruncateChunkSize, traceKV,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Remove index zone configs.\n\treturn RemoveIndexZoneConfigs(ctx, txn, execCfg, tableDesc, []descpb.IndexDescriptor{*idx})\n}", "func (i *Index) Write(off uint32, pos uint64) error {\n\tif uint64(len(i.mmap)) < i.size+entWidth {\n\t\treturn lib.Wrap(io.EOF, \"Not enough space to append index data\")\n\t}\n\n\tenc.PutUint32(i.mmap[i.size:i.size+offWidth], off)\n\tenc.PutUint64(i.mmap[i.size+offWidth:i.size+entWidth], pos)\n\n\ti.size += entWidth\n\n\treturn nil\n}", "func IndexWrite(x *suffixarray.Index, w io.Writer) error", "func clearIndex(\n\tctx context.Context,\n\texecCfg *sql.ExecutorConfig,\n\ttableDesc catalog.TableDescriptor,\n\tindex descpb.IndexDescriptor,\n) error {\n\tlog.Infof(ctx, \"clearing index %d from table %d\", index.ID, tableDesc.GetID())\n\tif index.IsInterleaved() {\n\t\treturn errors.Errorf(\"unexpected interleaved index %d\", index.ID)\n\t}\n\n\tsp := tableDesc.IndexSpan(execCfg.Codec, index.ID)\n\tstart, err := keys.Addr(sp.Key)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index start: %v\", err)\n\t}\n\tend, err := keys.Addr(sp.EndKey)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index end: %v\", err)\n\t}\n\trSpan := roachpb.RSpan{Key: start, EndKey: end}\n\treturn clearSpanData(ctx, execCfg.DB, execCfg.DistSender, rSpan)\n}", "func (mc *MockContiv) SetPodAppNsIndex(pod podmodel.ID, nsIndex uint32) {\n\tmc.podAppNs[pod] = nsIndex\n}", "func (self *SinglePad) SetIndexA(member int) {\n self.Object.Set(\"index\", member)\n}", "func (s *BaseCymbolListener) ExitIndex(ctx *IndexContext) {}", "func WriteIndex(index common.Index) error {\n\tbytes, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(indexCachePath, bytes, 0600)\n\treturn err\n}", "func (rb *RB) CommitIndex_ignore_gap(index int) error {\n\tif index < 0 {\n\t\tmsg := fmt.Sprintf(\"index:%v < 0\", index)\n\t\treturn errors.New(msg)\n\t}\n\tidx := rb.Arrayindex(index)\n\trb.mu[idx].Lock()\n\tdefer rb.mu[idx].Unlock()\n\t// Index messed up\n\tif rb.idx[idx] != index {\n\t\tmsg := fmt.Sprintf(\"commit index:%v != stored index %v\", index, rb.idx[idx])\n\t\treturn errors.New(msg)\n\t}\n\trb.commit[idx] = true\n\treturn nil\n}", "func (s *BasevhdlListener) ExitIndex_specification(ctx *Index_specificationContext) {}", "func dataframeResetIndex(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tif err := starlark.UnpackArgs(\"reset_index\", args, kwargs); err != nil {\n\t\treturn nil, err\n\t}\n\tself := b.Receiver().(*DataFrame)\n\n\tif self.index == nil {\n\t\treturn self, nil\n\t}\n\n\tnewColumns := append([]string{\"index\"}, self.columns.texts...)\n\tnewBody := make([]Series, 0, self.numCols())\n\n\tnewBody = append(newBody, Series{which: typeObj, valObjs: self.index.texts})\n\tfor _, col := range self.body {\n\t\tnewBody = append(newBody, col)\n\t}\n\n\treturn &DataFrame{\n\t\tcolumns: NewIndex(newColumns, \"\"),\n\t\tbody: newBody,\n\t}, nil\n}", "func (_e *MockDataCoord_Expecter) DropIndex(ctx interface{}, req interface{}) *MockDataCoord_DropIndex_Call {\n\treturn &MockDataCoord_DropIndex_Call{Call: _e.mock.On(\"DropIndex\", ctx, req)}\n}", "func (w *worker) cleanupPhysicalTableIndex(t table.PhysicalTable, reorgInfo *reorgInfo) error {\n\tlogutil.BgLogger().Info(\"start to clean up index\", zap.String(\"category\", \"ddl\"), zap.String(\"job\", reorgInfo.Job.String()), zap.String(\"reorgInfo\", reorgInfo.String()))\n\treturn w.writePhysicalTableRecord(w.sessPool, t, typeCleanUpIndexWorker, reorgInfo)\n}", "func (du *DatumUpdate) SetNillableIndex(i *int) *DatumUpdate {\n\tif i != nil {\n\t\tdu.SetIndex(*i)\n\t}\n\treturn du\n}", "func (wou *WorkOrderUpdate) ClearIndex() *WorkOrderUpdate {\n\twou.index = nil\n\twou.clearindex = true\n\treturn wou\n}", "func (s *BasePlSqlParserListener) ExitAlter_index(ctx *Alter_indexContext) {}", "func (m *hasher) maskIndex(index []byte, depth int) []byte {\n\tif got, want := len(index), m.Size(); got != want {\n\t\tpanic(fmt.Sprintf(\"index len: %d, want %d\", got, want))\n\t}\n\tif got, want := depth, m.BitLen(); got < 0 || got > want {\n\t\tpanic(fmt.Sprintf(\"depth: %d, want <= %d && > 0\", got, want))\n\t}\n\n\t// Create an empty index Size() bytes long.\n\tret := make([]byte, m.Size())\n\tif depth > 0 {\n\t\t// Copy the first depthBytes.\n\t\tdepthBytes := (depth + 7) >> 3\n\t\tcopy(ret, index[:depthBytes])\n\t\t// Mask off unwanted bits in the last byte.\n\t\tret[depthBytes-1] = ret[depthBytes-1] & leftmask[depth%8]\n\t}\n\treturn ret\n}", "func (s *BasePlSqlParserListener) ExitDrop_index(ctx *Drop_indexContext) {}", "func (wou *WorkOrderUpdate) SetNillableIndex(i *int) *WorkOrderUpdate {\n\tif i != nil {\n\t\twou.SetIndex(*i)\n\t}\n\treturn wou\n}", "func (o *KeyValueOrdered) RemoveIndex(idx int) (cell KeyValueCapsule) {\n\tcell = o.s[idx]\n\tdelete(o.m, o.s[idx].K)\n\to.shift(idx+1, len(o.s), -1)\n\to.s = append(o.s[:idx], o.s[idx+1:]...)\n\treturn\n}", "func (x *Index) Write(w io.Writer) error", "func UseIndex(designDocument, name string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tif name == \"\" {\n\t\t\tpa.SetParameter(\"use_index\", designDocument)\n\t\t} else {\n\t\t\tpa.SetParameter(\"use_index\", []string{designDocument, name})\n\t\t}\n\t}\n}", "func (s *BasePlSqlParserListener) ExitDrop_index_partition(ctx *Drop_index_partitionContext) {}", "func UseIndexCodec(c multicodec.Code) Option {\n\treturn func(o *Options) {\n\t\to.IndexCodec = c\n\t}\n}", "func (o *Object) SetIdx(idx uint32, val interface{}) error {\n\treturn set(o, \"\", idx, val)\n}", "func (duo *DatumUpdateOne) SetNillableIndex(i *int) *DatumUpdateOne {\n\tif i != nil {\n\t\tduo.SetIndex(*i)\n\t}\n\treturn duo\n}", "func (mgr *Manager) ClosePIndex(pindex *PIndex) error {\n\treturn syncWorkReq(mgr.janitorCh, JANITOR_CLOSE_PINDEX,\n\t\t\"api-ClosePIndex\", pindex)\n}", "func (s *BasePlSqlParserListener) ExitIndex_properties(ctx *Index_propertiesContext) {}", "func (s *BasePlSqlParserListener) ExitModify_index_default_attrs(ctx *Modify_index_default_attrsContext) {\n}", "func TestAfterIndexWrapAroundCorrectIndex(t *testing.T) {\n\tremoveTestFiles()\n\tstartID := 999989\n\tendID := 1000000\n\n\t//Create some old files to clean up\n\tfor i := startID; i < endID; i = i + 1 {\n\t\tcreateTestFile(i, t)\n\t}\n\n\tfor i := 2; i < 10; i = i + 1 {\n\t\tcreateTestFile(i, t)\n\t}\n\n\tstorage := getNewStorageManager()\n\tlog.Println(storage.FileList())\n\n\tif storage.Index() != 10 {\n\t\tt.Errorf(\"Filename index (%d) is incorrect\", storage.Index())\n\t}\n\n\tfileList := storage.FileList()\n\n\tassert.NotNil(t, fileList, \"FileList should not be nil\")\n\tassert.NotEmpty(t, fileList, \"FileList should not be empty\")\n\n\tx := 0\n\tfor i := startID; i < endID; i = i + 1 {\n\t\tassert.Equal(t, fileList[x], storage.WorkDir()+C.SLASH+T_PREFIX+fmt.Sprintf(FILENAME_FORMAT, i)+T_SUFFIX)\n\t\tx = x + 1\n\t}\n}", "func (wouo *WorkOrderUpdateOne) SetNillableIndex(i *int) *WorkOrderUpdateOne {\n\tif i != nil {\n\t\twouo.SetIndex(*i)\n\t}\n\treturn wouo\n}", "func (x *Index) Write(w io.Writer) error {}", "func (i *index) Write(off uint32, pos uint64) error {\n\tif uint64(len(i.mmap)) < i.size+entWidth {\n\t\treturn io.EOF\n\t}\n\n\tenc.PutUint32(i.mmap[i.size:i.size+offWidth], off)\n\tenc.PutUint64(i.mmap[i.size+offWidth:i.size+entWidth], pos)\n\ti.size += uint64(entWidth)\n\treturn nil\n}", "func indexEnc() {\n\tfor i := 0; i < indexSize; i++ {\n\t\tindexItemEnc(testData[i], i)\n\t}\n}", "func DeleteIndex(a interface{}, index int) interface{} {\n\tswitch a.(type) {\n\tcase []int:\n\t\treturn DeleteIndexInt(a.([]int), index)\n\tdefault:\n\t\tpanic(\"not support type\")\n\t}\n}", "func (pw *PixelWand) SetIndex(index *IndexPacket) {\n\tC.PixelSetIndex(pw.pw, C.IndexPacket(*index))\n\truntime.KeepAlive(pw)\n}", "func (g *Index) ResetIndex(c *Client, tree Treeish) error {\n\tnewEntries, err := ExpandGitTreeIntoIndexes(c, tree, true, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.NumberIndexEntries = uint32(len(newEntries))\n\tg.Objects = newEntries\n\treturn nil\n}", "func (this *KeyspaceTerm) SetIndexJoinNest() {\n\tthis.property |= TERM_INDEX_JOIN_NEST\n}", "func (c *Chip8) SetIndex() {\n\tc.index = c.inst & 0x0FFF\n}", "func (i ImageIndexer) DeleteFromIndex(request DeleteFromIndexRequest) error {\n\tbuildDir, outDockerfile, cleanup, err := buildContext(request.Generate, request.OutDockerfile)\n\tdefer cleanup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdatabasePath, err := i.ExtractDatabase(buildDir, request.FromIndex, request.CaFile, request.SkipTLSVerify, request.PlainHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Run opm registry delete on the database\n\tdeleteFromRegistryReq := registry.DeleteFromRegistryRequest{\n\t\tPackages: request.Operators,\n\t\tInputDatabase: databasePath,\n\t\tPermissive: request.Permissive,\n\t}\n\n\t// Delete the bundles from the registry\n\terr = i.RegistryDeleter.DeleteFromRegistry(deleteFromRegistryReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// generate the dockerfile\n\tdockerfile := i.DockerfileGenerator.GenerateIndexDockerfile(request.BinarySourceImage, databasePath)\n\terr = write(dockerfile, outDockerfile, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif request.Generate {\n\t\treturn nil\n\t}\n\n\t// build the dockerfile\n\terr = build(outDockerfile, request.Tag, i.CommandRunner, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (va *VertexArray) SetIndexData(data []uint32) {\n\t// Index Buffer Object\n\tgl.GenBuffers(1, &va.ibo) // generates the buffer (or multiple)\n\tgl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, va.ibo) // tells OpenGL what kind of buffer this is\n\n\t// BufferData assigns data to the buffer.\n\tgl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(data)*4, gl.Ptr(data), gl.STATIC_DRAW)\n\n\tva.vertices = len(data)\n}", "func (wouo *WorkOrderUpdateOne) ClearIndex() *WorkOrderUpdateOne {\n\twouo.index = nil\n\twouo.clearindex = true\n\treturn wouo\n}", "func (ll *LevelLedger) SetClassIndex(ref *record.Reference, idx *index.ClassLifeline) error {\n\tk := prefixkey(scopeIDLifeline, ref.Key())\n\tencoded, err := index.EncodeClassLifeline(idx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ll.ldb.Put(k, encoded, nil)\n}", "func (c *index) Create(sctx sessionctx.Context, rm kv.RetrieverMutator, indexedValues []types.Datum, h int64, opts ...table.CreateIdxOptFunc) (int64, error) {\n\tvar opt table.CreateIdxOpt\n\tfor _, fn := range opts {\n\t\tfn(&opt)\n\t}\n\tss := opt.AssertionProto\n\twriteBufs := sctx.GetSessionVars().GetWriteStmtBufs()\n\tskipCheck := sctx.GetSessionVars().LightningMode || sctx.GetSessionVars().StmtCtx.BatchCheck\n\tkey, distinct, err := c.GenIndexKey(sctx.GetSessionVars().StmtCtx, indexedValues, h, writeBufs.IndexKeyBuf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tctx := opt.Ctx\n\tif opt.Untouched {\n\t\ttxn, err1 := sctx.Txn(true)\n\t\tif err1 != nil {\n\t\t\treturn 0, err1\n\t\t}\n\t\t// If the index kv was untouched(unchanged), and the key/value already exists in mem-buffer,\n\t\t// should not overwrite the key with un-commit flag.\n\t\t// So if the key exists, just do nothing and return.\n\t\t_, err = txn.GetMemBuffer().Get(ctx, key)\n\t\tif err == nil {\n\t\t\treturn 0, nil\n\t\t}\n\t}\n\n\t// save the key buffer to reuse.\n\twriteBufs.IndexKeyBuf = key\n\tif !distinct {\n\t\t// non-unique index doesn't need store value, write a '0' to reduce space\n\t\tvalue := []byte{'0'}\n\t\tif opt.Untouched {\n\t\t\tvalue[0] = kv.UnCommitIndexKVFlag\n\t\t}\n\t\terr = rm.Set(key, value)\n\t\tif ss != nil {\n\t\t\tss.SetAssertion(key, kv.None)\n\t\t}\n\t\treturn 0, err\n\t}\n\n\tif skipCheck {\n\t\tvalue := EncodeHandle(h)\n\t\tif opt.Untouched {\n\t\t\tvalue = append(value, kv.UnCommitIndexKVFlag)\n\t\t}\n\t\terr = rm.Set(key, value)\n\t\tif ss != nil {\n\t\t\tss.SetAssertion(key, kv.None)\n\t\t}\n\t\treturn 0, err\n\t}\n\n\tif ctx != nil {\n\t\tif span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {\n\t\t\tspan1 := span.Tracer().StartSpan(\"index.Create\", opentracing.ChildOf(span.Context()))\n\t\t\tdefer span1.Finish()\n\t\t\tctx = opentracing.ContextWithSpan(ctx, span1)\n\t\t}\n\t} else {\n\t\tctx = context.TODO()\n\t}\n\n\tvar value []byte\n\tvalue, err = rm.Get(ctx, key)\n\t// If (opt.Untouched && err == nil) is true, means the key is exists and exists in TiKV, not in txn mem-buffer,\n\t// then should also write the untouched index key/value to mem-buffer to make sure the data\n\t// is consistent with the index in txn mem-buffer.\n\tif kv.IsErrNotFound(err) || (opt.Untouched && err == nil) {\n\t\tv := EncodeHandle(h)\n\t\tif opt.Untouched {\n\t\t\tv = append(v, kv.UnCommitIndexKVFlag)\n\t\t}\n\t\terr = rm.Set(key, v)\n\t\tif ss != nil {\n\t\t\tss.SetAssertion(key, kv.NotExist)\n\t\t}\n\t\treturn 0, err\n\t}\n\n\thandle, err := DecodeHandle(value)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn handle, kv.ErrKeyExists\n}", "func (s *BasePlSqlParserListener) ExitCreate_index(ctx *Create_indexContext) {}", "func (k Keeper) RemoveCdpOwnerIndex(ctx sdk.Context, cdp types.CDP) {\n\tstore := prefix.NewStore(ctx.KVStore(k.key), types.CdpIDKeyPrefix)\n\tcdpIDs, found := k.GetCdpIdsByOwner(ctx, cdp.Owner)\n\tif !found {\n\t\treturn\n\t}\n\tupdatedCdpIds := []uint64{}\n\tfor _, id := range cdpIDs {\n\t\tif id != cdp.ID {\n\t\t\tupdatedCdpIds = append(updatedCdpIds, id)\n\t\t}\n\t}\n\tif len(updatedCdpIds) == 0 {\n\t\tstore.Delete(cdp.Owner)\n\t}\n\tstore.Set(cdp.Owner, k.cdc.MustMarshalBinaryLengthPrefixed(updatedCdpIds))\n\n}", "func (s *BasePlSqlParserListener) ExitCluster_index_clause(ctx *Cluster_index_clauseContext) {}", "func FixMaxEntryIndex(rdb *Store, profile *pb.Profile) error {\n\tuuid1, err := uuid.FromString(profile.Uuid)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// MAX Delimiter Key\n\tkey := MaxUUIDFlakeKey(TableEntryIndex, uuid1)\n\treturn rdb.Put(key.Bytes(), []byte(\"0000\"))\n}", "func UseIndex() *ishell.Cmd {\n\n\treturn &ishell.Cmd{\n\t\tName: \"use\",\n\t\tHelp: \"Select index to use for subsequent document operations\",\n\t\tFunc: func(c *ishell.Context) {\n\t\t\tif context == nil {\n\t\t\t\terrorMsg(c, errNotConnected)\n\t\t\t} else {\n\t\t\t\tdefer restorePrompt(c)\n\t\t\t\tif len(c.Args) < 1 {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Using index \", cy(context.ActiveIndex))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.Args[0] == \"--\" {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Index \", cy(context.ActiveIndex), \" is no longer in use\")\n\t\t\t\t\t\tcontext.ActiveIndex = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts, err := context.ResolveAndValidateIndex(c.Args[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorMsg(c, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontext.ActiveIndex = s\n\t\t\t\tif s != c.Args[0] {\n\t\t\t\t\tcprintlist(c, \"For alias \", cyb(c.Args[0]), \" selected index \", cy(s))\n\t\t\t\t} else {\n\t\t\t\t\tcprintlist(c, \"Selected index \", cy(s))\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n}", "func (g Index) WriteIndex(file io.Writer) error {\n\tsort.Sort(ByPath(g.Objects))\n\ts := sha1.New()\n\tw := io.MultiWriter(file, s)\n\tbinary.Write(w, binary.BigEndian, g.fixedGitIndex)\n\tfor _, entry := range g.Objects {\n\t\tbinary.Write(w, binary.BigEndian, entry.FixedIndexEntry)\n\t\tbinary.Write(w, binary.BigEndian, []byte(entry.PathName))\n\t\tpadding := 8 - ((82 + len(entry.PathName) + 4) % 8)\n\t\tp := make([]byte, padding)\n\t\tbinary.Write(w, binary.BigEndian, p)\n\t}\n\tbinary.Write(w, binary.BigEndian, s.Sum(nil))\n\treturn nil\n}", "func (s *BasevhdlListener) ExitIndex_constraint(ctx *Index_constraintContext) {}", "func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction {\n\ta.index = append(a.index, index...)\n\treturn a\n}", "func (s *BaseMySqlParserListener) ExitIndexHint(ctx *IndexHintContext) {}", "func (gen *AddressGenerator) SetIndex(i uint) *AddressGenerator {\n\tgen.state = addressState(i)\n\treturn gen\n}", "func (pal *CGBPalette) updateIndex(value byte) {\n\tpal.index = value & 0x3F\n\tpal.inc = bits.Test(value, 7)\n}", "func (s *BasePlSqlParserListener) ExitIndex_attributes(ctx *Index_attributesContext) {}", "func (rb *ShardsRecordBuilder) IndexingIndexTotal(indexingindextotal string) *ShardsRecordBuilder {\n\trb.v.IndexingIndexTotal = &indexingindextotal\n\treturn rb\n}", "func processIndex(length, index int) int {\n\tif index >= 0 {\n\t\tif index >= length {\n\t\t\treturn -1\n\t\t}\n\t\treturn index\n\t}\n\tindex = length + index\n\tif index < 0 || index >= length {\n\t\treturn -1\n\t}\n\treturn index\n}", "func (s *BaseDMLListener) ExitIndexType(ctx *IndexTypeContext) {}", "func (du *DatumUpdate) SetIndex(i int) *DatumUpdate {\n\tdu.mutation.ResetIndex()\n\tdu.mutation.SetIndex(i)\n\treturn du\n}", "func DecodeIndex(buf []byte) ([]byte, int64, int) {\n\tn := decodeInt(buf[0:4])\n\tif n+10 > len(buf) {\n\t\treturn nil, -1, 0\n\t}\n\tkey := buf[4 : n+4]\n\toff := decodeInt48(buf[n+4 : n+10])\n\treturn key, off, n + 10\n}", "func (b *FlushingBatch) Index(id string, data any) error {\n\tif err := b.batch.Index(id, data); err != nil {\n\t\treturn err\n\t}\n\treturn b.flushIfFull()\n}", "func (ci *createIndex) ApplyFilters() error {\n\treturn nil\n}", "func (s *StashList) RemoveStashAtIdx(ctx context.Context, vw types.ValueWriter, idx int) (hash.Hash, error) {\n\tamCount, err := s.am.Count()\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\tif amCount <= idx {\n\t\treturn hash.Hash{}, fmt.Errorf(\"fatal: log for 'stash' only has %v entries\", amCount)\n\t}\n\n\tstash, err := getNthStash(ctx, s.am, amCount, idx)\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\n\tame := s.am.Editor()\n\terr = ame.Delete(ctx, strconv.Itoa(stash.key))\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\n\ts.am, err = ame.Flush(ctx)\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\treturn s.updateStashListMap(ctx, vw)\n}", "func TestEnsureSkipListIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsureSkipListIndexOptions{\n\t\tnil,\n\t\t{Unique: true, Sparse: false, NoDeduplicate: true},\n\t\t{Unique: true, Sparse: true, NoDeduplicate: true},\n\t\t{Unique: false, Sparse: false, NoDeduplicate: false},\n\t\t{Unique: false, Sparse: true, NoDeduplicate: false},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"skiplist_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.SkipListIndex {\n\t\t\tt.Errorf(\"Expected SkipListIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.Unique() != options.Unique {\n\t\t\tt.Errorf(\"Expected Unique to be %t, found `%t`\", options.Unique, idx.Unique())\n\t\t}\n\t\tif options != nil && idx.Sparse() != options.Sparse {\n\t\t\tt.Errorf(\"Expected Sparse to be %t, found `%t`\", options.Sparse, idx.Sparse())\n\t\t}\n\t\tif options != nil && !idx.Deduplicate() != options.NoDeduplicate {\n\t\t\tt.Errorf(\"Expected NoDeduplicate to be %t, found `%t`\", options.NoDeduplicate, idx.Deduplicate())\n\t\t}\n\n\t\t// Index must exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func (p *Buffer) saveIndex(ptr unsafe.Pointer, idx uint) {\n\tif p.array_indexes == nil {\n\t\t// the 1st time we need to allocate\n\t\tp.array_indexes = make(map[unsafe.Pointer]uint)\n\t}\n\tp.array_indexes[ptr] = idx\n}", "func (s *BasePlSqlParserListener) ExitAlter_index_partitioning(ctx *Alter_index_partitioningContext) {\n}", "func poolSetIndex(a interface{}, i int) {\n\ta.(*freeClientPoolEntry).index = i\n}", "func (s *LDBStore) CleanGCIndex() error {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tbatch := leveldb.Batch{}\n\n\tvar okEntryCount uint64\n\tvar totalEntryCount uint64\n\n\t// throw out all gc indices, we will rebuild from cleaned index\n\tit := s.db.NewIterator()\n\tit.Seek([]byte{keyGCIdx})\n\tvar gcDeletes int\n\tfor it.Valid() {\n\t\trowType, _ := parseIdxKey(it.Key())\n\t\tif rowType != keyGCIdx {\n\t\t\tbreak\n\t\t}\n\t\tbatch.Delete(it.Key())\n\t\tgcDeletes++\n\t\tit.Next()\n\t}\n\tlog.Debug(\"gc\", \"deletes\", gcDeletes)\n\tif err := s.db.Write(&batch); err != nil {\n\t\treturn err\n\t}\n\tbatch.Reset()\n\n\tit.Release()\n\n\t// corrected po index pointer values\n\tvar poPtrs [256]uint64\n\n\t// set to true if chunk count not on 4096 iteration boundary\n\tvar doneIterating bool\n\n\t// last key index in previous iteration\n\tlastIdxKey := []byte{keyIndex}\n\n\t// counter for debug output\n\tvar cleanBatchCount int\n\n\t// go through all key index entries\n\tfor !doneIterating {\n\t\tcleanBatchCount++\n\t\tvar idxs []dpaDBIndex\n\t\tvar chunkHashes [][]byte\n\t\tvar pos []uint8\n\t\tit := s.db.NewIterator()\n\n\t\tit.Seek(lastIdxKey)\n\n\t\t// 4096 is just a nice number, don't look for any hidden meaning here...\n\t\tvar i int\n\t\tfor i = 0; i < 4096; i++ {\n\n\t\t\t// this really shouldn't happen unless database is empty\n\t\t\t// but let's keep it to be safe\n\t\t\tif !it.Valid() {\n\t\t\t\tdoneIterating = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// if it's not keyindex anymore we're done iterating\n\t\t\trowType, chunkHash := parseIdxKey(it.Key())\n\t\t\tif rowType != keyIndex {\n\t\t\t\tdoneIterating = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// decode the retrieved index\n\t\t\tvar idx dpaDBIndex\n\t\t\terr := decodeIndex(it.Value(), &idx)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"corrupt index: %v\", err)\n\t\t\t}\n\t\t\tpo := s.po(chunkHash)\n\t\t\tlastIdxKey = it.Key()\n\n\t\t\t// if we don't find the data key, remove the entry\n\t\t\t// if we find it, add to the array of new gc indices to create\n\t\t\tdataKey := getDataKey(idx.Idx, po)\n\t\t\t_, err = s.db.Get(dataKey)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"deleting inconsistent index (missing data)\", \"key\", chunkHash)\n\t\t\t\tbatch.Delete(it.Key())\n\t\t\t} else {\n\t\t\t\tidxs = append(idxs, idx)\n\t\t\t\tchunkHashes = append(chunkHashes, chunkHash)\n\t\t\t\tpos = append(pos, po)\n\t\t\t\tokEntryCount++\n\t\t\t\tif idx.Idx > poPtrs[po] {\n\t\t\t\t\tpoPtrs[po] = idx.Idx\n\t\t\t\t}\n\t\t\t}\n\t\t\ttotalEntryCount++\n\t\t\tit.Next()\n\t\t}\n\t\tit.Release()\n\n\t\t// flush the key index corrections\n\t\terr := s.db.Write(&batch)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbatch.Reset()\n\n\t\t// add correct gc indices\n\t\tfor i, okIdx := range idxs {\n\t\t\tgcIdxKey := getGCIdxKey(&okIdx)\n\t\t\tgcIdxData := getGCIdxValue(&okIdx, pos[i], chunkHashes[i])\n\t\t\tbatch.Put(gcIdxKey, gcIdxData)\n\t\t\tlog.Trace(\"clean ok\", \"key\", chunkHashes[i], \"gcKey\", gcIdxKey, \"gcData\", gcIdxData)\n\t\t}\n\n\t\t// flush them\n\t\terr = s.db.Write(&batch)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbatch.Reset()\n\n\t\tlog.Debug(\"clean gc index pass\", \"batch\", cleanBatchCount, \"checked\", i, \"kept\", len(idxs))\n\t}\n\n\tlog.Debug(\"gc cleanup entries\", \"ok\", okEntryCount, \"total\", totalEntryCount, \"batchlen\", batch.Len())\n\n\t// lastly add updated entry count\n\tvar entryCount [8]byte\n\tbinary.BigEndian.PutUint64(entryCount[:], okEntryCount)\n\tbatch.Put(keyEntryCnt, entryCount[:])\n\n\t// and add the new po index pointers\n\tvar poKey [2]byte\n\tpoKey[0] = keyDistanceCnt\n\tfor i, poPtr := range poPtrs {\n\t\tpoKey[1] = uint8(i)\n\t\tif poPtr == 0 {\n\t\t\tbatch.Delete(poKey[:])\n\t\t} else {\n\t\t\tvar idxCount [8]byte\n\t\t\tbinary.BigEndian.PutUint64(idxCount[:], poPtr)\n\t\t\tbatch.Put(poKey[:], idxCount[:])\n\t\t}\n\t}\n\n\t// if you made it this far your harddisk has survived. Congratulations\n\treturn s.db.Write(&batch)\n}", "func (o *IssueRemoveLabelParams) WithIndex(index int64) *IssueRemoveLabelParams {\n\to.SetIndex(index)\n\treturn o\n}", "func (s *store) afterIndex(index uint64) <-chan struct{} {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif index < s.data.Data.Index {\n\t\t// Client needs update so return a closed channel.\n\t\tch := make(chan struct{})\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\n\treturn s.dataChanged\n}", "func ConvertToIndexUsage(defn *common.IndexDefn, localMeta *LocalIndexMetadata) (*IndexUsage, error) {\n\n\t// find the topology metadata\n\ttopology := findTopologyByBucket(localMeta.IndexTopologies, defn.Bucket)\n\tif topology == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index topology for bucket %v.\", defn.Bucket)\n\t\treturn nil, nil\n\t}\n\n\t// find the index instance from topology metadata\n\tinst := topology.GetIndexInstByDefn(defn.DefnId)\n\tif inst == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index instance for definition %v.\", defn.DefnId)\n\t\treturn nil, nil\n\t}\n\n\t// Check the index state. Only handle index that is active or being built.\n\t// For index that is in the process of being deleted, planner expects the resource\n\t// will eventually be freed, so it won't included in planning.\n\tstate, _ := topology.GetStatusByDefn(defn.DefnId)\n\tif state != common.INDEX_STATE_CREATED &&\n\t\tstate != common.INDEX_STATE_DELETED &&\n\t\tstate != common.INDEX_STATE_ERROR &&\n\t\tstate != common.INDEX_STATE_NIL {\n\n\t\t// create an index usage object\n\t\tindex := newIndexUsage(defn.DefnId, common.IndexInstId(inst.InstId), defn.Name, defn.Bucket)\n\n\t\t// index is pinned to a node\n\t\tif len(defn.Nodes) != 0 {\n\t\t\tindex.Hosts = defn.Nodes\n\t\t}\n\n\t\t// update sizing\n\t\tindex.IsPrimary = defn.IsPrimary\n\t\tindex.IsMOI = (defn.Using == common.IndexType(common.MemoryOptimized) || defn.Using == common.IndexType(common.MemDB))\n\t\tindex.NoUsage = defn.Deferred && state == common.INDEX_STATE_READY\n\n\t\t// Is the index being deleted by user? Thsi will read the delete token from metakv. If untable read from metakv,\n\t\t// pendingDelete is false (cannot assert index is to-be-delete).\n\t\tpendingDelete, err := client.DeleteCommandTokenExist(defn.DefnId)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tindex.pendingDelete = pendingDelete\n\n\t\t// update internal info\n\t\tindex.Instance = &common.IndexInst{\n\t\t\tInstId: common.IndexInstId(inst.InstId),\n\t\t\tDefn: *defn,\n\t\t\tState: common.IndexState(inst.State),\n\t\t\tStream: common.StreamId(inst.StreamId),\n\t\t\tError: inst.Error,\n\t\t\tReplicaId: int(inst.ReplicaId),\n\t\t\tVersion: int(inst.Version),\n\t\t\tRState: common.RebalanceState(inst.RState),\n\t\t}\n\n\t\tlogging.Debugf(\"Create Index usage %v %v %v %v\", index.Name, index.Bucket, index.Instance.InstId, index.Instance.ReplicaId)\n\n\t\treturn index, nil\n\t}\n\n\treturn nil, nil\n}", "func (p Permutator) Index() int {\n\t<- p.idle\n\tj := p.index - 1\n\tp.idle <- true\n\treturn j\n}", "func (o *IssueRemoveLabelParams) SetIndex(index int64) {\n\to.Index = index\n}", "func (s *storageMgr) updateIndexSnapMapForIndex(idxInstId common.IndexInstId, idxInst common.IndexInst,\n\tpartnMap PartitionInstMap, streamId common.StreamId, keyspaceId string) {\n\n\tpartitionIDs, _ := idxInst.Pc.GetAllPartitionIds()\n\tlogging.Infof(\"StorageMgr::updateIndexSnapMapForIndex IndexInst %v Partitions %v\",\n\t\tidxInstId, partitionIDs)\n\n\tneedRestart := false\n\t//if keyspace and stream have been provided\n\tif keyspaceId != \"\" && streamId != common.ALL_STREAMS {\n\t\t//skip the index if either keyspaceId or stream don't match\n\t\tif idxInst.Defn.KeyspaceId(idxInst.Stream) != keyspaceId || idxInst.Stream != streamId {\n\t\t\treturn\n\t\t}\n\t\t//skip deleted indexes\n\t\tif idxInst.State == common.INDEX_STATE_DELETED {\n\t\t\treturn\n\t\t}\n\t}\n\n\tindexSnapMap := s.indexSnapMap.Clone()\n\tsnapC := indexSnapMap[idxInstId]\n\tif snapC != nil {\n\t\tsnapC.Lock()\n\t\tDestroyIndexSnapshot(snapC.snap)\n\t\tdelete(indexSnapMap, idxInstId)\n\t\ts.indexSnapMap.Set(indexSnapMap)\n\t\tsnapC.Unlock()\n\t\ts.notifySnapshotDeletion(idxInstId)\n\t}\n\n\tvar tsVbuuid *common.TsVbuuid\n\tvar err error\n\tpartnSnapMap := make(PartnSnapMap)\n\n\tfor _, partnInst := range partnMap {\n\t\tpartnSnapMap, tsVbuuid, err = s.openSnapshot(idxInstId, partnInst, partnSnapMap)\n\t\tif err != nil {\n\t\t\tif err == errStorageCorrupted {\n\t\t\t\tneedRestart = true\n\t\t\t} else {\n\t\t\t\tpanic(\"Unable to open snapshot -\" + err.Error())\n\t\t\t}\n\t\t}\n\n\t\tif partnSnapMap == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t//if OSO snapshot, rollback all partitions to 0\n\t\tif tsVbuuid != nil && tsVbuuid.GetSnapType() == common.DISK_SNAP_OSO {\n\t\t\tfor _, partnInst := range partnMap {\n\t\t\t\tpartnId := partnInst.Defn.GetPartitionId()\n\t\t\t\tsc := partnInst.Sc\n\n\t\t\t\tfor _, slice := range sc.GetAllSlices() {\n\t\t\t\t\t_, err := s.rollbackToSnapshot(idxInstId, partnId,\n\t\t\t\t\t\tslice, nil, false)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tpanic(\"Unable to rollback to 0 - \" + err.Error())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tpartnSnapMap = nil\n\t\t\tbreak\n\t\t}\n\t}\n\n\tbucket, _, _ := SplitKeyspaceId(keyspaceId)\n\tif len(partnSnapMap) != 0 {\n\t\tis := &indexSnapshot{\n\t\t\tinstId: idxInstId,\n\t\t\tts: tsVbuuid,\n\t\t\tpartns: partnSnapMap,\n\t\t}\n\t\tindexSnapMap = s.indexSnapMap.Clone()\n\t\tif snapC == nil {\n\t\t\tsnapC = &IndexSnapshotContainer{snap: is}\n\t\t} else {\n\t\t\tsnapC.Lock()\n\t\t\tsnapC.snap = is\n\t\t\tsnapC.Unlock()\n\t\t}\n\n\t\tindexSnapMap[idxInstId] = snapC\n\t\ts.indexSnapMap.Set(indexSnapMap)\n\t\ts.notifySnapshotCreation(is)\n\t} else {\n\t\tlogging.Infof(\"StorageMgr::updateIndexSnapMapForIndex IndexInst %v Adding Nil Snapshot.\",\n\t\t\tidxInstId)\n\t\ts.addNilSnapshot(idxInstId, bucket)\n\t}\n\n\tif needRestart {\n\t\tos.Exit(1)\n\t}\n}", "func (s *BasePlSqlParserListener) ExitTable_index_clause(ctx *Table_index_clauseContext) {}", "func ClearIndex(c float32) {\n\tsyscall.Syscall(gpClearIndex, 1, uintptr(math.Float32bits(c)), 0, 0)\n}", "func (d *Dao) ZRemIdx(c context.Context, category int, id int64) (err error) {\n\tvar (\n\t\tconn = d.redis.Get(c)\n\t\tkey = keyZone(category)\n\t)\n\tif _, err = conn.Do(\"ZREM\", key, id); err != nil {\n\t\tlog.Error(\"conn.Send(ZADD %s - %v) error(%v)\", key, id, err)\n\t}\n\tconn.Close()\n\treturn\n}", "func completeDroppedIndex(\n\tctx context.Context,\n\texecCfg *sql.ExecutorConfig,\n\ttable catalog.TableDescriptor,\n\tindexID descpb.IndexID,\n\tprogress *jobspb.SchemaChangeGCProgress,\n) error {\n\tif err := updateDescriptorGCMutations(ctx, execCfg, table.GetID(), indexID); err != nil {\n\t\treturn errors.Wrapf(err, \"updating GC mutations\")\n\t}\n\n\tmarkIndexGCed(ctx, indexID, progress)\n\n\treturn nil\n}", "func RemoveAtIndex(data interface{}, index int) (interface{}, error) {\n\t// Get concrete value of data\n\tvalue := reflect.ValueOf(data)\n\n\t// Get the type of value\n\tvalueType := value.Type()\n\n\tif valueType.Kind() != reflect.Array && valueType.Kind() != reflect.Slice {\n\t\terr := errors.New(\"Data parameter is not an array or slice\")\n\t\treturn nil, err\n\t}\n\n\tif index >= value.Len() {\n\t\terr := errors.New(\"Index is greater than data length\")\n\t\treturn nil, err\n\t}\n\n\t// Create slice from value\n\tresultSlice := reflect.AppendSlice(value.Slice(0, index), value.Slice(index+1, value.Len()))\n\n\treturn resultSlice.Interface(), nil\n}", "func (m *MockDriver) UseIndexPlaceholders() bool {\n\treturn false\n}", "func (i ImageIndexer) DeprecateFromIndex(request DeprecateFromIndexRequest) error {\n\tbuildDir, outDockerfile, cleanup, err := buildContext(request.Generate, request.OutDockerfile)\n\tdefer cleanup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdatabasePath, err := i.ExtractDatabase(buildDir, request.FromIndex, request.CaFile, request.SkipTLSVerify, request.PlainHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdeprecateFromRegistryReq := registry.DeprecateFromRegistryRequest{\n\t\tBundles: request.Bundles,\n\t\tInputDatabase: databasePath,\n\t\tPermissive: request.Permissive,\n\t\tAllowPackageRemoval: request.AllowPackageRemoval,\n\t}\n\n\t// Deprecate the bundles from the registry\n\terr = i.RegistryDeprecator.DeprecateFromRegistry(deprecateFromRegistryReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// generate the dockerfile\n\tdockerfile := i.DockerfileGenerator.GenerateIndexDockerfile(request.BinarySourceImage, databasePath)\n\terr = write(dockerfile, outDockerfile, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif request.Generate {\n\t\treturn nil\n\t}\n\n\t// build the dockerfile with requested tooling\n\terr = build(outDockerfile, request.Tag, i.CommandRunner, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (v Value) SetIndex(i int, x interface{}) {\n\tpanic(message)\n}", "func (rb *ShardsRecordBuilder) IndexingIndexFailed(indexingindexfailed string) *ShardsRecordBuilder {\n\trb.v.IndexingIndexFailed = &indexingindexfailed\n\treturn rb\n}", "func (w *Writer) writeIndex() (int64, error) {\n\tw.written = true\n\n\tbuf := new(bytes.Buffer)\n\tst := sst.NewWriter(buf)\n\n\tw.spaceIds.Sort()\n\n\t// For each defined space, we index the space's\n\t// byte offset in the file and the length in bytes\n\t// of all data in the space.\n\tfor _, spaceId := range w.spaceIds {\n\t\tb := new(bytes.Buffer)\n\n\t\tbinary.WriteInt64(b, w.spaceOffsets[spaceId])\n\t\tbinary.WriteInt64(b, w.spaceLengths[spaceId])\n\n\t\tif err := st.Set([]byte(spaceId), b.Bytes()); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif err := st.Close(); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn buf.WriteTo(w.file)\n}", "func (w *Writer) Close() (err error) {\n\tdefer func() {\n\t\tif w.closer == nil {\n\t\t\treturn\n\t\t}\n\t\terr1 := w.closer.Close()\n\t\tif err == nil {\n\t\t\terr = err1\n\t\t}\n\t\tw.closer = nil\n\t}()\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\n\t// Finish the last data block, or force an empty data block if there\n\t// aren't any data blocks at all.\n\tif w.nEntries > 0 || len(w.indexEntries) == 0 {\n\t\tbh, err := w.finishBlock()\n\t\tif err != nil {\n\t\t\tw.err = err\n\t\t\treturn w.err\n\t\t}\n\t\tw.pendingBH = bh\n\t\tw.flushPendingBH(nil)\n\t}\n\n\t// Write the (empty) metaindex block.\n\tmetaindexBlockHandle, err := w.finishBlock()\n\tif err != nil {\n\t\tw.err = err\n\t\treturn w.err\n\t}\n\n\t// Write the index block.\n\t// writer.append uses w.tmp[:3*binary.MaxVarintLen64].\n\ti0, tmp := 0, w.tmp[3*binary.MaxVarintLen64:5*binary.MaxVarintLen64]\n\tfor _, ie := range w.indexEntries {\n\t\tn := encodeBlockHandle(tmp, ie.bh)\n\t\ti1 := i0 + ie.keyLen\n\t\tw.append(w.indexKeys[i0:i1], tmp[:n], true)\n\t\ti0 = i1\n\t}\n\tindexBlockHandle, err := w.finishBlock()\n\tif err != nil {\n\t\tw.err = err\n\t\treturn w.err\n\t}\n\n\t// Write the table footer.\n\tfooter := w.tmp[:footerLen]\n\tfor i := range footer {\n\t\tfooter[i] = 0\n\t}\n\tn := encodeBlockHandle(footer, metaindexBlockHandle)\n\tencodeBlockHandle(footer[n:], indexBlockHandle)\n\tcopy(footer[footerLen-len(magic):], magic)\n\tif _, err := w.writer.Write(footer); err != nil {\n\t\tw.err = err\n\t\treturn w.err\n\t}\n\n\t// Flush the buffer.\n\tif w.bufWriter != nil {\n\t\tif err := w.bufWriter.Flush(); err != nil {\n\t\t\tw.err = err\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Make any future calls to Set or Close return an error.\n\tw.err = errors.New(\"leveldb/table: writer is closed\")\n\treturn nil\n}", "func blockPadding(offset int64) (n int64) {\n\treturn -offset & (blockSize - 1)\n}", "func (m *MockDBStorage) DeleteIndex(arg0 string, arg1, arg2 common.Resource, arg3 string) (sql.Result, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteIndex\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(sql.Result)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *BasePlSqlParserListener) EnterUsing_index_clause(ctx *Using_index_clauseContext) {}", "func (app *fetchRegistryBuilder) WithIndex(index IntPointer) FetchRegistryBuilder {\n\tapp.index = index\n\treturn app\n}", "func (t *BenchmarkerChaincode) updateIndex(stub shim.ChaincodeStubInterface, key, indexName string, indexValueSpace [][]string) error {\n\tif indexName == \"\" {\n\t\treturn nil\n\t}\n\n\tvar indexValues []string\n\tfor _, validValues := range indexValueSpace {\n\t\tchoice := rand.Intn(len(validValues))\n\t\tindexValues = append(indexValues, validValues[choice])\n\t}\n\n\tindexKey, err := stub.CreateCompositeKey(indexName+\"~id\", append(indexValues, key))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalue := []byte{0x00}\n\tif err := stub.PutState(indexKey, value); err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Set composite key '%s' to '%s' for key '%s'\\n\", indexKey, value, key)\n\n\treturn nil\n}", "func (e *Engine) setIndex(index int64) {\n\te.Index = index\n\te.Name = naming.Name(index)\n}", "func ListDiffOutIdx(value tf.DataType) ListDiffAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"out_idx\"] = value\n\t}\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func (duo *DatumUpdateOne) SetIndex(i int) *DatumUpdateOne {\n\tduo.mutation.ResetIndex()\n\tduo.mutation.SetIndex(i)\n\treturn duo\n}" ]
[ "0.496226", "0.49470598", "0.49359372", "0.4929836", "0.48813668", "0.48754716", "0.48505393", "0.47770718", "0.47507846", "0.4717412", "0.47086638", "0.47060275", "0.46133706", "0.45682114", "0.45536166", "0.45449305", "0.45405245", "0.45287335", "0.45117015", "0.45112503", "0.4510972", "0.44861117", "0.4467708", "0.44529444", "0.44493714", "0.44267884", "0.44216266", "0.44153744", "0.44027048", "0.43976918", "0.43774116", "0.43747947", "0.43645355", "0.4357643", "0.43573222", "0.43363515", "0.43358374", "0.43344334", "0.43200192", "0.4319228", "0.43187538", "0.43155512", "0.43152407", "0.42927882", "0.42908022", "0.42855412", "0.42820108", "0.42651019", "0.42601702", "0.42455602", "0.42396376", "0.423732", "0.42330626", "0.42318133", "0.42210773", "0.42208946", "0.4210823", "0.41934806", "0.41857693", "0.41814056", "0.4176728", "0.4170431", "0.41697422", "0.41654098", "0.41579354", "0.4157296", "0.41569877", "0.415525", "0.41478556", "0.41431764", "0.41425395", "0.41392374", "0.4136004", "0.41348842", "0.4128971", "0.41243795", "0.41213477", "0.41203418", "0.4119939", "0.4100516", "0.40961242", "0.4095788", "0.4088729", "0.40872386", "0.4082269", "0.4080401", "0.4079942", "0.40757367", "0.40735227", "0.40682673", "0.4065863", "0.4059869", "0.4059548", "0.40586948", "0.40577215", "0.40572336", "0.40524673", "0.40519613", "0.40494913", "0.4046687" ]
0.7548068
0
UseIndexCodec sets the codec used for index generation.
func UseIndexCodec(c multicodec.Code) Option { return func(o *Options) { o.IndexCodec = c } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func UseIndex(designDocument, name string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tif name == \"\" {\n\t\t\tpa.SetParameter(\"use_index\", designDocument)\n\t\t} else {\n\t\t\tpa.SetParameter(\"use_index\", []string{designDocument, name})\n\t\t}\n\t}\n}", "func (o *BlockBasedTableOptions) SetIndexType(value IndexType) {\n\tC.rocksdb_block_based_options_set_index_type(o.c, C.int(value))\n}", "func NewIndexDriver(root string) sql.IndexDriver {\n\treturn NewDriver(root, pilosa.DefaultClient())\n}", "func UseIndex() *ishell.Cmd {\n\n\treturn &ishell.Cmd{\n\t\tName: \"use\",\n\t\tHelp: \"Select index to use for subsequent document operations\",\n\t\tFunc: func(c *ishell.Context) {\n\t\t\tif context == nil {\n\t\t\t\terrorMsg(c, errNotConnected)\n\t\t\t} else {\n\t\t\t\tdefer restorePrompt(c)\n\t\t\t\tif len(c.Args) < 1 {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Using index \", cy(context.ActiveIndex))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.Args[0] == \"--\" {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Index \", cy(context.ActiveIndex), \" is no longer in use\")\n\t\t\t\t\t\tcontext.ActiveIndex = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts, err := context.ResolveAndValidateIndex(c.Args[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorMsg(c, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontext.ActiveIndex = s\n\t\t\t\tif s != c.Args[0] {\n\t\t\t\t\tcprintlist(c, \"For alias \", cyb(c.Args[0]), \" selected index \", cy(s))\n\t\t\t\t} else {\n\t\t\t\t\tcprintlist(c, \"Selected index \", cy(s))\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n}", "func WithoutIndex() Option {\n\treturn func(o *Options) {\n\t\to.IndexCodec = index.CarIndexNone\n\t}\n}", "func (s *Store) SetCodec(codec types.Codec) {\n\ts.codec = codec\n}", "func (idx *IndexMap) SetIndexType(indtype string) *IndexMap {\n\tidx.IndexType = indtype\n\treturn idx\n}", "func (c *Chip8) SetIndex() {\n\tc.index = c.inst & 0x0FFF\n}", "func WithIndexCtx(ctx context.Context, indexCtx IndexCtx) context.Context {\n\treturn context.WithValue(ctx, indexCtxKey{}, indexCtx)\n}", "func (o *NearestUsingGET1Params) SetIndexType(indexType *string) {\n\to.IndexType = indexType\n}", "func (u UserConfig) IndexType() string {\n\treturn \"hnsw\"\n}", "func (self *FileBaseDataStore) SetIndex(\n\tconfig_obj *api_proto.Config,\n\tindex_urn string,\n\tentity string,\n\tkeywords []string) error {\n\n\tfor _, keyword := range keywords {\n\t\tsubject := path.Join(index_urn, strings.ToLower(keyword), entity)\n\t\terr := writeContentToFile(config_obj, subject, []byte{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func setIndex(resp http.ResponseWriter, index uint64) {\n\t// If we ever return X-Consul-Index of 0 blocking clients will go into a busy\n\t// loop and hammer us since ?index=0 will never block. It's always safe to\n\t// return index=1 since the very first Raft write is always an internal one\n\t// writing the raft config for the cluster so no user-facing blocking query\n\t// will ever legitimately have an X-Consul-Index of 1.\n\tif index == 0 {\n\t\tindex = 1\n\t}\n\tresp.Header().Set(\"X-Consul-Index\", strconv.FormatUint(index, 10))\n}", "func (g *GenOpts) BlobIndex() (string, error) {\n\tbp, err := g.blobIndexPrefix()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tjk, err := g.jsonKey()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := bp + jk\n\treturn s, nil\n}", "func EncodingIndexer(encoding string) Indexer {\n\treturn func(r *http.Request) interface{} {\n\t\tp := r.Method\n\t\tif strings.Contains(r.Header.Get(header.AcceptEncoding), encoding) {\n\t\t\tp += \":\" + encoding\n\t\t}\n\t\tp += \":\" + path.Clean(r.URL.Path)\n\t\treturn p\n\t}\n}", "func (m *metricEventDimensions) SetIndex(val *int32) {\n\tm.indexField = val\n}", "func WriteIndex(index common.Index) error {\n\tbytes, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(indexCachePath, bytes, 0600)\n\treturn err\n}", "func WithIndexBy(val IndexBy) Option {\n\treturn func(o *Options) {\n\t\to.IndexBy = val\n\t}\n}", "func (e *Engine) setIndex(index int64) {\n\te.Index = index\n\te.Name = naming.Name(index)\n}", "func (d *dbBasePostgres) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {\n\tDebugLog.Println(\"[WARN] Not support any specifying index action, so that action is ignored\")\n\treturn ``\n}", "func NewIndexClient(name string, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, ownsTenantFn downloads.IndexGatewayOwnsTenant, registerer prometheus.Registerer) (index.Client, error) {\n\tswitch name {\n\tcase config.StorageTypeInMemory:\n\t\tstore := testutils.NewMockStorage()\n\t\treturn store, nil\n\tcase config.StorageTypeAWS, config.StorageTypeAWSDynamo:\n\t\tif cfg.AWSStorageConfig.DynamoDB.URL == nil {\n\t\t\treturn nil, fmt.Errorf(\"Must set -dynamodb.url in aws mode\")\n\t\t}\n\t\tpath := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, \"/\")\n\t\tif len(path) > 0 {\n\t\t\tlevel.Warn(util_log.Logger).Log(\"msg\", \"ignoring DynamoDB URL path\", \"path\", path)\n\t\t}\n\t\treturn aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer)\n\tcase config.StorageTypeGCP:\n\t\treturn gcp.NewStorageClientV1(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase config.StorageTypeGCPColumnKey, config.StorageTypeBigTable:\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase config.StorageTypeBigTableHashed:\n\t\tcfg.GCPStorageConfig.DistributeKeys = true\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase config.StorageTypeCassandra:\n\t\treturn cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg, registerer)\n\tcase config.StorageTypeBoltDB:\n\t\treturn local.NewBoltDBIndexClient(cfg.BoltDBConfig)\n\tcase config.StorageTypeGrpc:\n\t\treturn grpc.NewStorageClient(cfg.GrpcConfig, schemaCfg)\n\tcase config.BoltDBShipperType:\n\t\tif boltDBIndexClientWithShipper != nil {\n\t\t\treturn boltDBIndexClientWithShipper, nil\n\t\t}\n\n\t\tif shouldUseIndexGatewayClient(cfg.BoltDBShipperConfig.Config) {\n\t\t\tgateway, err := gatewayclient.NewGatewayClient(cfg.BoltDBShipperConfig.IndexGatewayClientConfig, registerer, util_log.Logger)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tboltDBIndexClientWithShipper = gateway\n\t\t\treturn gateway, nil\n\t\t}\n\n\t\tobjectClient, err := NewObjectClient(cfg.BoltDBShipperConfig.SharedStoreType, cfg, cm)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttableRanges := getIndexStoreTableRanges(config.BoltDBShipperType, schemaCfg.Configs)\n\n\t\tboltDBIndexClientWithShipper, err = shipper.NewShipper(cfg.BoltDBShipperConfig, objectClient, limits,\n\t\t\townsTenantFn, tableRanges, registerer)\n\n\t\treturn boltDBIndexClientWithShipper, err\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v\", name, config.StorageTypeAWS, config.StorageTypeCassandra, config.StorageTypeInMemory, config.StorageTypeGCP, config.StorageTypeBigTable, config.StorageTypeBigTableHashed)\n\t}\n}", "func NewIndexResetter(s dbworkerstore.Store, interval time.Duration, metrics *metrics, observationContext *observation.Context) *dbworker.Resetter {\n\treturn dbworker.NewResetter(s, dbworker.ResetterOptions{\n\t\tName: \"precise_code_intel_index_worker_resetter\",\n\t\tInterval: interval,\n\t\tMetrics: dbworker.ResetterMetrics{\n\t\t\tRecordResets: metrics.numIndexResets,\n\t\t\tRecordResetFailures: metrics.numIndexResetFailures,\n\t\t\tErrors: metrics.numErrors,\n\t\t},\n\t})\n}", "func (pw *PixelWand) SetIndex(index *IndexPacket) {\n\tC.PixelSetIndex(pw.pw, C.IndexPacket(*index))\n\truntime.KeepAlive(pw)\n}", "func (wouo *WorkOrderUpdateOne) SetIndex(i int) *WorkOrderUpdateOne {\n\twouo.index = &i\n\twouo.addindex = nil\n\treturn wouo\n}", "func CompressIndex(ctx context.Context, dbo Database) error {\n\tdb := dbo.(*database)\n\tsql := db.getRawDB()\n\n\tconn, err := sql.Conn(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\ttx, err := conn.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif tx != nil {\n\t\t\ttx.Rollback()\n\t\t}\n\t}()\n\n\t_, err = tx.ExecContext(ctx, `update docs set txt=compress(txt) where not iscompressed(txt)`)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx = nil\n\treturn nil\n}", "func (wou *WorkOrderUpdate) SetIndex(i int) *WorkOrderUpdate {\n\twou.index = &i\n\twou.addindex = nil\n\treturn wou\n}", "func (defintion *IndexDefinition) SetIndexOn(value IndexType) (outDef *IndexDefinition) {\n\toutDef = defintion\n\toutDef.IndexOn = value.String()\n\treturn\n}", "func (d *dbBase) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {\n\tvar s []string\n\tQ := d.TableQuote()\n\tfor _, index := range indexes {\n\t\ttmp := fmt.Sprintf(`%s%s%s`, Q, index, Q)\n\t\ts = append(s, tmp)\n\t}\n\n\tvar useWay string\n\n\tswitch useIndex {\n\tcase hints.KeyUseIndex:\n\t\tuseWay = `USE`\n\tcase hints.KeyForceIndex:\n\t\tuseWay = `FORCE`\n\tcase hints.KeyIgnoreIndex:\n\t\tuseWay = `IGNORE`\n\tdefault:\n\t\tDebugLog.Println(\"[WARN] Not a valid specifying action, so that action is ignored\")\n\t\treturn ``\n\t}\n\n\treturn fmt.Sprintf(` %s INDEX(%s) `, useWay, strings.Join(s, `,`))\n}", "func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (chunk.IndexClient, error) {\n\tif indexClientFactory, ok := customIndexStores[name]; ok {\n\t\tif indexClientFactory.indexClientFactoryFunc != nil {\n\t\t\treturn indexClientFactory.indexClientFactoryFunc()\n\t\t}\n\t}\n\n\tswitch name {\n\tcase StorageTypeInMemory:\n\t\tstore := chunk.NewMockStorage()\n\t\treturn store, nil\n\tcase StorageTypeAWS, StorageTypeAWSDynamo:\n\t\tif cfg.AWSStorageConfig.DynamoDB.URL == nil {\n\t\t\treturn nil, fmt.Errorf(\"Must set -dynamodb.url in aws mode\")\n\t\t}\n\t\tpath := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, \"/\")\n\t\tif len(path) > 0 {\n\t\t\tlevel.Warn(util_log.Logger).Log(\"msg\", \"ignoring DynamoDB URL path\", \"path\", path)\n\t\t}\n\t\treturn aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer)\n\tcase StorageTypeGCP:\n\t\treturn gcp.NewStorageClientV1(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase StorageTypeGCPColumnKey, StorageTypeBigTable:\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase StorageTypeBigTableHashed:\n\t\tcfg.GCPStorageConfig.DistributeKeys = true\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase StorageTypeCassandra:\n\t\treturn cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg, registerer)\n\tcase StorageTypeBoltDB:\n\t\treturn local.NewBoltDBIndexClient(cfg.BoltDBConfig)\n\tcase StorageTypeGrpc:\n\t\treturn grpc.NewStorageClient(cfg.GrpcConfig, schemaCfg)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v\", name, StorageTypeAWS, StorageTypeCassandra, StorageTypeInMemory, StorageTypeGCP, StorageTypeBigTable, StorageTypeBigTableHashed)\n\t}\n}", "func (dagOpts) StoreCodec(codec string) DagPutOption {\n\treturn func(opts *DagPutSettings) error {\n\t\topts.StoreCodec = codec\n\t\treturn nil\n\t}\n}", "func NewIndex(addr, name, typ string, md *index.Metadata) (*Index, error) {\n\n\tfmt.Println(\"Get a new index: \", addr, name)\n client := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\t//MaxIdleConnsPerHost: 200,\n\t\t\tMaxIdleConnsPerHost: 2000000,\n\t\t},\n\t\tTimeout: 2500000 * time.Millisecond,\n\t}\n\tconn, err := elastic.NewClient(elastic.SetURL(addr), elastic.SetHttpClient(client))\n\tif err != nil {\n fmt.Println(\"Get error here\");\n\t\treturn nil, err\n\t}\n\tret := &Index{\n\t\tconn: conn,\n\t\tmd: md,\n\t\tname: name,\n\t\ttyp: typ,\n\t}\n fmt.Println(\"get here ======\");\n\n\treturn ret, nil\n\n}", "func UseIndexPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.IndexPadding = p\n\t}\n}", "func NewAutoincrementIndex(o ...option.Option) index.Index {\n\topts := &option.Options{}\n\tfor _, opt := range o {\n\t\topt(opts)\n\t}\n\n\tu := &Autoincrement{\n\t\tindexBy: opts.IndexBy,\n\t\ttypeName: opts.TypeName,\n\t\tfilesDir: opts.FilesDir,\n\t\tbound: opts.Bound,\n\t\tindexBaseDir: path.Join(opts.DataDir, \"index.cs3\"),\n\t\tindexRootDir: path.Join(path.Join(opts.DataDir, \"index.cs3\"), strings.Join([]string{\"autoincrement\", opts.TypeName, opts.IndexBy}, \".\")),\n\t\tcs3conf: &Config{\n\t\t\tProviderAddr: opts.ProviderAddr,\n\t\t\tDataURL: opts.DataURL,\n\t\t\tDataPrefix: opts.DataPrefix,\n\t\t\tJWTSecret: opts.JWTSecret,\n\t\t\tServiceUser: opts.ServiceUser,\n\t\t},\n\t\tdataProvider: dataProviderClient{\n\t\t\tbaseURL: singleJoiningSlash(opts.DataURL, opts.DataPrefix),\n\t\t\tclient: http.Client{\n\t\t\t\tTransport: http.DefaultTransport,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn u\n}", "func (db *DB) Index(ctx context.Context, i services.Consumable) error {\n\tvar (\n\t\terr error\n\t\tjob = db.stream.NewJob(\"index\")\n\t\tsess = db.db.NewSession(job)\n\t)\n\tjob.KeyValue(\"id\", i.ID())\n\tjob.KeyValue(\"chain_id\", i.ChainID())\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tjob.CompleteKv(health.Error, health.Kvs{\"err\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tjob.Complete(health.Success)\n\t}()\n\n\t// Create db tx\n\tvar dbTx *dbr.Tx\n\tdbTx, err = sess.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dbTx.RollbackUnlessCommitted()\n\n\t// Ingest the tx and commit\n\terr = db.ingestTx(services.NewConsumerContext(ctx, job, dbTx, i.Timestamp()), i.Body())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = dbTx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t Ticker) IsIndex() bool {\r\n\treturn strings.Contains(t.Exchange, \"INDEX\")\r\n}", "func ShowIndex(ctx context.Context, db QueryExecutor, schemaName string, table string) ([]*IndexInfo, error) {\n\t/*\n\t\tshow index example result:\n\t\tmysql> show index from test;\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t\t| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment |\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t\t| test | 0 | PRIMARY | 1 | id | A | 0 | NULL | NULL | | BTREE | | |\n\t\t| test | 0 | aid | 1 | aid | A | 0 | NULL | NULL | YES | BTREE | | |\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t*/\n\tindices := make([]*IndexInfo, 0, 3)\n\tquery := fmt.Sprintf(\"SHOW INDEX FROM %s\", TableName(schemaName, table))\n\trows, err := db.QueryContext(ctx, query)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tfields, err1 := ScanRow(rows)\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tseqInIndex, err1 := strconv.Atoi(string(fields[\"Seq_in_index\"].Data))\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tcardinality, err1 := strconv.Atoi(string(fields[\"Cardinality\"].Data))\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tindex := &IndexInfo{\n\t\t\tTable: string(fields[\"Table\"].Data),\n\t\t\tNoneUnique: string(fields[\"Non_unique\"].Data) == \"1\",\n\t\t\tKeyName: string(fields[\"Key_name\"].Data),\n\t\t\tColumnName: string(fields[\"Column_name\"].Data),\n\t\t\tSeqInIndex: seqInIndex,\n\t\t\tCardinality: cardinality,\n\t\t}\n\t\tindices = append(indices, index)\n\t}\n\n\treturn indices, nil\n}", "func (i *Index) Encode() (string, error) {\n\tout, err := yaml.Marshal(i)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(out), nil\n}", "func (o *EmsEventCollectionGetParams) SetIndex(index *int64) {\n\to.Index = index\n}", "func (mc *MockContiv) SetContainerIndex(ci *containeridx.ConfigIndex) {\n\tmc.containerIndex = ci\n}", "func WithCodec(codec eh.EventCodec) Option {\n\treturn func(b *EventBus) error {\n\t\tb.codec = codec\n\n\t\treturn nil\n\t}\n}", "func (i indexer) Index(ctx context.Context, req IndexQuery) (\n\tresp *IndexResult, err error) {\n\n\tlog.Info(\"index [%v] root [%v] len_dirs=%v len_files=%v\",\n\t\treq.Key, req.Root, len(req.Dirs), len(req.Files))\n\tstart := time.Now()\n\t// Setup the response\n\tresp = NewIndexResult()\n\tif err = req.Normalize(); err != nil {\n\t\tlog.Info(\"index [%v] error: %v\", req.Key, err)\n\t\tresp.Error = errs.NewStructError(err)\n\t\treturn\n\t}\n\n\t// create index shards\n\tvar nshards int\n\tif nshards = i.cfg.NumShards; nshards == 0 {\n\t\tnshards = 1\n\t}\n\tnshards = utils.MinInt(nshards, maxShards)\n\ti.shards = make([]index.IndexWriter, nshards)\n\ti.root = getRoot(i.cfg, &req)\n\n\tfor n := range i.shards {\n\t\tname := path.Join(i.root, shardName(req.Key, n))\n\t\tixw, err := getIndexWriter(ctx, name)\n\t\tif err != nil {\n\t\t\tresp.Error = errs.NewStructError(err)\n\t\t\treturn resp, nil\n\t\t}\n\t\ti.shards[n] = ixw\n\t}\n\n\tfs := getFileSystem(ctx, i.root)\n\trepo := newRepoFromQuery(&req, i.root)\n\trepo.SetMeta(i.cfg.RepoMeta, req.Meta)\n\tresp.Repo = repo\n\n\t// Add query Files and scan Dirs for files to index\n\tnames, err := i.scanner(fs, &req)\n\tch := make(chan int, nshards)\n\tchnames := make(chan string, 100)\n\tgo func() {\n\t\tfor _, name := range names {\n\t\t\tchnames <- name\n\t\t}\n\t\tclose(chnames)\n\t}()\n\treqch := make(chan par.RequestFunc, nshards)\n\tfor _, shard := range i.shards {\n\t\treqch <- indexShard(&i, &req, shard, fs, chnames, ch)\n\t}\n\tclose(reqch)\n\terr = par.Requests(reqch).WithConcurrency(nshards).DoWithContext(ctx)\n\tclose(ch)\n\n\t// Await results, each indicating the number of files scanned\n\tfor num := range ch {\n\t\trepo.NumFiles += num\n\t}\n\n\trepo.NumShards = len(i.shards)\n\t// Flush our index shard files\n\tfor _, shard := range i.shards {\n\t\tshard.Flush()\n\t\trepo.SizeIndex += ByteSize(shard.IndexBytes())\n\t\trepo.SizeData += ByteSize(shard.DataBytes())\n\t\tlog.Debug(\"index flush %v (data) %v (index)\",\n\t\t\trepo.SizeData, repo.SizeIndex)\n\t}\n\trepo.ElapsedIndexing = time.Since(start)\n\trepo.TimeUpdated = time.Now().UTC()\n\n\tvar msg string\n\tif err != nil {\n\t\trepo.State = ERROR\n\t\tresp.SetError(err)\n\t\tmsg = \"error: \" + resp.Error.Error()\n\t} else {\n\t\trepo.State = OK\n\t\tmsg = \"ok \" + fmt.Sprintf(\n\t\t\t\"(%v files, %v data, %v index)\",\n\t\t\trepo.NumFiles, repo.SizeData, repo.SizeIndex)\n\t}\n\tlog.Info(\"index [%v] %v [%v]\", req.Key, msg, repo.ElapsedIndexing)\n\treturn\n}", "func (s *BasevhdlListener) EnterIndex_specification(ctx *Index_specificationContext) {}", "func (idx *ManualIndex) Index() error {\n\tvar buf bytes.Buffer\n\n\tfor pkg := range idx.packages {\n\t\t_, err := fmt.Fprintf(&buf, \"\\x00%s\", pkg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tidx.index = suffixarray.New(buf.Bytes())\n\treturn nil\n}", "func (engine *Engine) Index(docId uint64, data types.DocData,\n\tforceUpdate ...bool) {\n\n\tvar force bool\n\tif len(forceUpdate) > 0 {\n\t\tforce = forceUpdate[0]\n\t}\n\n\t// if engine.HasDoc(docId) {\n\t// \tengine.RemoveDoc(docId)\n\t// }\n\n\t// data.Tokens\n\tengine.internalIndexDoc(docId, data, force)\n\n\thash := murmur.Sum32(fmt.Sprintf(\"%d\", docId)) %\n\t\tuint32(engine.initOptions.StoreShards)\n\n\tif engine.initOptions.UseStore && docId != 0 {\n\t\tengine.storeIndexDocChans[hash] <- storeIndexDocReq{\n\t\t\tdocId: docId, data: data}\n\t}\n}", "func Codec(contentType string, c encoding.Codec) client.Option {\n\treturn func(o *client.Options) {\n\t\tcodecs := make(map[string]encoding.Codec)\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\tif v := o.Context.Value(codecsKey{}); v != nil {\n\t\t\tcodecs = v.(map[string]encoding.Codec)\n\t\t}\n\t\tcodecs[contentType] = c\n\t\to.Context = context.WithValue(o.Context, codecsKey{}, codecs)\n\t}\n}", "func (s *ChartStreamServer) IndexHandler(c *gin.Context) {\n\tindex, err := s.chartProvider.GetIndexFile()\n\tif err != nil {\n\t\tc.AbortWithError(500, err)\n\t}\n\n\tc.YAML(200, index)\n}", "func NewIndex(f *os.File, c Config) (*Index, error) {\n\tidx := &Index{\n\t\tfile: f,\n\t}\n\n\tfi, err := os.Stat(f.Name())\n\tif err != nil {\n\t\treturn nil, lib.Wrap(err, \"Unable to get file stats\")\n\t}\n\n\tidx.size = uint64(fi.Size())\n\tif err = os.Truncate(\n\t\tf.Name(), int64(c.Segment.MaxIndexBytes),\n\t); err != nil {\n\t\treturn nil, lib.Wrap(err, \"Unable to truncate file\")\n\t}\n\n\tif idx.mmap, err = gommap.Map(\n\t\tidx.file.Fd(),\n\t\tgommap.PROT_READ|gommap.PROT_WRITE,\n\t\tgommap.MAP_SHARED,\n\t); err != nil {\n\t\treturn nil, lib.Wrap(err, \"Unable to create gommap map\")\n\t}\n\n\treturn idx, nil\n}", "func (b *mysql) Index(table *Table, index *Index) string {\n\tlog.Printf(\"create index:%+v\", index)\n\tvar obj = \"INDEX\"\n\tif index.Unique {\n\t\tobj = \"UNIQUE INDEX\"\n\t}\n\treturn fmt.Sprintf(\"CREATE %s %s ON %s (%s);\", obj, index.Name, table.Name, b.columns(nil, index.Fields, true, false, false))\n}", "func indexHandler(w http.ResponseWriter, req *http.Request) {\n\tlayout, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_LAYOUT)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tindex, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_INDEX)\n\t//artical, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_ARTICAL)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tmapOutput := map[string]interface{}{\"Title\": \"炫酷的网站技术\" + TITLE, \"Keyword\": KEYWORD, \"Description\": DESCRIPTION, \"Base\": BASE_URL, \"Url\": BASE_URL, \"Carousel\": getAddition(PREFIX_INDEX), \"Script\": getAddition(PREFIX_SCRIPT), \"Items\": leveldb.GetRandomContents(20, &Filter{})}\n\tcontent := []byte(index.RenderInLayout(layout, mapOutput))\n\tw.Write(content)\n\tgo cacheFile(\"index\", content)\n}", "func (i *IndexDB) SetIndex(ctx context.Context, pn insolar.PulseNumber, bucket record.Index) error {\n\ti.lock.Lock()\n\tdefer i.lock.Unlock()\n\n\terr := i.setBucket(pn, bucket.ObjID, &bucket)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstats.Record(ctx, statIndexesAddedCount.M(1))\n\n\tinslogger.FromContext(ctx).Debugf(\"[SetIndex] bucket for obj - %v was set successfully. Pulse: %d\", bucket.ObjID.DebugString(), pn)\n\n\treturn nil\n}", "func (api *API) GetIndex(w http.ResponseWriter, r *http.Request) {\n\n\tinfo := Info{Port: api.Session.Config.API.Port, Versions: Version}\n\td := Metadata{Info: info}\n\n\tres := CodeToResult[CodeOK]\n\tres.Data = d\n\tres.Message = \"Documentation available at https://github.com/netm4ul/netm4ul\"\n\tw.WriteHeader(res.HTTPCode)\n\tjson.NewEncoder(w).Encode(res)\n}", "func (i *Index) Encode() []byte {\n\tvar buf bytes.Buffer\n\t_ = gob.NewEncoder(&buf).Encode(i)\n\treturn buf.Bytes()\n}", "func indexEnc() {\n\tfor i := 0; i < indexSize; i++ {\n\t\tindexItemEnc(testData[i], i)\n\t}\n}", "func (a *TarArchiver) Index(fn func(k string) error) error {\n\treturn fn(slashpath.Join(a.keyPrefix, TarArchiverKey))\n}", "func MakeIndex() error {\n\n\treturn nil\n}", "func Codec(contentType string, c codec.Codec) Option {\n\treturn func(o *Options) {\n\t\to.Codecs[contentType] = c\n\t}\n}", "func (bA *CompactBitArray) SetIndex(i int, v bool) bool {\n\tif bA == nil {\n\t\treturn false\n\t}\n\n\tif i < 0 || i >= bA.Count() {\n\t\treturn false\n\t}\n\n\tif v {\n\t\tbA.Elems[i>>3] |= (1 << uint8(7-(i%8)))\n\t} else {\n\t\tbA.Elems[i>>3] &= ^(1 << uint8(7-(i%8)))\n\t}\n\n\treturn true\n}", "func (es *Connection) Index(\n\tindex, docType, id string,\n\tparams map[string]string,\n\tbody interface{},\n) (int, *QueryResult, error) {\n\tmethod := \"PUT\"\n\tif id == \"\" {\n\t\tmethod = \"POST\"\n\t}\n\treturn withQueryResult(es.apiCall(method, index, docType, id, \"\", params, body))\n}", "func CacheIndex() ReadOption {\n\treturn func(r *Reader) {\n\t\tr.cacheIndex = true\n\t}\n}", "func (gen *AddressGenerator) SetIndex(i uint) *AddressGenerator {\n\tgen.state = addressState(i)\n\treturn gen\n}", "func loadIndex(ctx context.Context, repo restic.Repository, id restic.ID) (*index.Index, error) {\n\tbuf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tidx, oldFormat, err := index.DecodeIndex(buf, id)\n\tif oldFormat {\n\t\tfmt.Fprintf(os.Stderr, \"index %v has old format\\n\", id.Str())\n\t}\n\treturn idx, err\n}", "func NewIndex(mapping IndexMapping, opts ...IndexOption) *Index {\n\tindex := &Index{\n\t\tIndexMapping: mapping,\n\t\tpopulateBatchSize: defaultPopulateBatchSize,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(index)\n\t}\n\n\treturn index\n}", "func (o *Output) WriteIndex(ctx context.Context, cluster string, timestamp time.Time, clusterSummary *api.ClusterSummary) error {\n\tbuffer, err := o.exporter.ExportIndex(ctx, clusterSummary)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullpath := path.Join(o.path, \"index\", fmt.Sprintf(\"%s%s\", cluster, o.exporter.FileExtension()))\n\tinfo, err := os.Stat(fullpath)\n\n\tif os.IsNotExist(err) {\n\t\treturn writeBufferToPath(fullpath, buffer)\n\t} else if err != nil {\n\t\treturn err\n\t} else if info.IsDir() {\n\t\treturn fmt.Errorf(\"%q is an existing directory\", fullpath)\n\t} else {\n\t\tlog.Printf(\"%q is an existing index, overwriting...\", fullpath)\n\t\treturn writeBufferToPath(fullpath, buffer)\n\t}\n}", "func (c AppConfig) IndexGen() int {\n\tval, ok := c.ConfigVars[\"IndexGen\"]\n\tif !ok {\n\t\tval = \"0\"\n\t\tlog.Printf(\"config.IndexGen: no value found for IndexGen using %s\", val)\n\t}\n\tgen, err := strconv.Atoi(val)\n\tif err != nil {\n\t\tgen = 0\n\t\tlog.Printf(\"config.IndexGen: bad value %s found for IndexGen using %d\", val, gen)\n\t}\n\treturn gen\n}", "func (rc *Cache) PutIndex(key, name string) error {\n\tvar err error\n\tif _, err = rc.do(\"HSET\", key, name, \"1\"); err != nil {\n\t\treturn err\n\t}\n\treturn err\n}", "func New(indexRegistry *registry.IndexRegistry, options ...func(*Index)) (I *Index, err error) {\n\tI = &Index{\n\t\tindexRegistry: indexRegistry,\n\t}\n\n\tfor _, option := range options {\n\t\toption(I)\n\t}\n\n\treturn\n}", "func (as *API) Index(ctx context.Context, req *pbreq.Index) (*pbresp.Index, error) {\n\tswitch req.GetType() {\n\tcase \"ipld\":\n\t\tbreak\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid data type '%s'\", req.GetType())\n\t}\n\n\tvar name = req.GetIdentifier()\n\tvar reindex = req.GetReindex()\n\tmetaData, err := as.lens.Magnify(name, reindex)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to perform indexing for '%s': %s\",\n\t\t\tname, err.Error())\n\t}\n\n\tvar resp *lens.Object\n\tif !reindex {\n\t\tif resp, err = as.lens.Store(name, metaData); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tb, err := as.lens.Get(name)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to find ID for object '%s'\", name)\n\t\t}\n\t\tid, err := uuid.FromBytes(b)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid uuid found for '%s' ('%s'): %s\",\n\t\t\t\tname, string(b), err.Error())\n\t\t}\n\t\tif resp, err = as.lens.Update(id, name, metaData); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to update object: %s\", err.Error())\n\t\t}\n\t}\n\n\treturn &pbresp.Index{\n\t\tId: resp.LensID.String(),\n\t\tKeywords: metaData.Summary,\n\t}, nil\n}", "func (app *fetchRegistryBuilder) WithIndex(index IntPointer) FetchRegistryBuilder {\n\tapp.index = index\n\treturn app\n}", "func (es *ElasticSearch) Index(esIndex string, esType string, body interface{}) {\n\t// Add a document to the index\n\t_, err := client.Index().\n\t\tIndex(esIndex).\n\t\tType(\"project\").\n\t\tBodyJson(body).\n\t\tRefresh(true).\n\t\tDo()\n\tif err != nil {\n\t\t// TODO: Handle error\n\t\tpanic(err)\n\t}\n}", "func (m *MonkeyWrench) ReadUsingIndex(table, index string, keys []spanner.KeySet, columns []string) ([]*spanner.Row, error) {\n\t// Default to all keys.\n\tvar spannerKeys = spanner.AllKeys()\n\n\t// If we have some specified keys, use those instead.\n\tif len(keys) > 0 {\n\t\tspannerKeys = spanner.KeySets(keys...)\n\t}\n\n\t// Execute the query.\n\titer := m.Client.Single().ReadUsingIndex(m.Context, table, index, spannerKeys, columns)\n\treturn getResultSlice(iter)\n}", "func ConvertToIndexUsage(defn *common.IndexDefn, localMeta *LocalIndexMetadata) (*IndexUsage, error) {\n\n\t// find the topology metadata\n\ttopology := findTopologyByBucket(localMeta.IndexTopologies, defn.Bucket)\n\tif topology == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index topology for bucket %v.\", defn.Bucket)\n\t\treturn nil, nil\n\t}\n\n\t// find the index instance from topology metadata\n\tinst := topology.GetIndexInstByDefn(defn.DefnId)\n\tif inst == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index instance for definition %v.\", defn.DefnId)\n\t\treturn nil, nil\n\t}\n\n\t// Check the index state. Only handle index that is active or being built.\n\t// For index that is in the process of being deleted, planner expects the resource\n\t// will eventually be freed, so it won't included in planning.\n\tstate, _ := topology.GetStatusByDefn(defn.DefnId)\n\tif state != common.INDEX_STATE_CREATED &&\n\t\tstate != common.INDEX_STATE_DELETED &&\n\t\tstate != common.INDEX_STATE_ERROR &&\n\t\tstate != common.INDEX_STATE_NIL {\n\n\t\t// create an index usage object\n\t\tindex := newIndexUsage(defn.DefnId, common.IndexInstId(inst.InstId), defn.Name, defn.Bucket)\n\n\t\t// index is pinned to a node\n\t\tif len(defn.Nodes) != 0 {\n\t\t\tindex.Hosts = defn.Nodes\n\t\t}\n\n\t\t// update sizing\n\t\tindex.IsPrimary = defn.IsPrimary\n\t\tindex.IsMOI = (defn.Using == common.IndexType(common.MemoryOptimized) || defn.Using == common.IndexType(common.MemDB))\n\t\tindex.NoUsage = defn.Deferred && state == common.INDEX_STATE_READY\n\n\t\t// Is the index being deleted by user? Thsi will read the delete token from metakv. If untable read from metakv,\n\t\t// pendingDelete is false (cannot assert index is to-be-delete).\n\t\tpendingDelete, err := client.DeleteCommandTokenExist(defn.DefnId)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tindex.pendingDelete = pendingDelete\n\n\t\t// update internal info\n\t\tindex.Instance = &common.IndexInst{\n\t\t\tInstId: common.IndexInstId(inst.InstId),\n\t\t\tDefn: *defn,\n\t\t\tState: common.IndexState(inst.State),\n\t\t\tStream: common.StreamId(inst.StreamId),\n\t\t\tError: inst.Error,\n\t\t\tReplicaId: int(inst.ReplicaId),\n\t\t\tVersion: int(inst.Version),\n\t\t\tRState: common.RebalanceState(inst.RState),\n\t\t}\n\n\t\tlogging.Debugf(\"Create Index usage %v %v %v %v\", index.Name, index.Bucket, index.Instance.InstId, index.Instance.ReplicaId)\n\n\t\treturn index, nil\n\t}\n\n\treturn nil, nil\n}", "func (o SecondaryIndexOutput) IndexType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SecondaryIndex) pulumi.StringOutput { return v.IndexType }).(pulumi.StringOutput)\n}", "func (c ClientWrapper) Index() es.IndexService {\n\tr := elastic.NewBulkIndexRequest()\n\treturn WrapESIndexService(r, c.bulkService, c.esVersion)\n}", "func (o *LogQueryDefinition) SetIndex(v string) {\n\to.Index = &v\n}", "func (stqu *SurveyTemplateQuestionUpdate) SetIndex(i int) *SurveyTemplateQuestionUpdate {\n\tstqu.index = &i\n\tstqu.addindex = nil\n\treturn stqu\n}", "func (h *indexHandler) Index() gin.HandlerFunc {\n\treturn func(context *gin.Context) {\n\t\tvar requestFromJson indexRequest\n\n\t\tif err := context.ShouldBindJSON(&requestFromJson); nil != err {\n\t\t\th.errorDispatcher.Dispatch(context, err)\n\n\t\t\treturn\n\t\t}\n\n\t\tvar payload *index.Index = h.indexBuilder.Build(\n\t\t\trequestFromJson.BuilderContext,\n\t\t\trequestFromJson.Locale,\n\t\t)\n\n\t\tcontext.JSON(\n\t\t\thttp.StatusOK,\n\t\t\t&indexResponse{response.NewOkResponse(), *payload},\n\t\t)\n\t}\n}", "func (i ImageIndexer) ExportFromIndex(request ExportFromIndexRequest) error {\n\t// set a temp directory\n\tworkingDir, err := ioutil.TempDir(\"./\", tmpDirPrefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(workingDir)\n\n\t// extract the index database to the file\n\tdatabaseFile, err := i.getDatabaseFile(workingDir, request.Index, request.CaFile, request.SkipTLSVerify, request.PlainHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdb, err := sqlite.Open(databaseFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tdbQuerier := sqlite.NewSQLLiteQuerierFromDb(db)\n\n\t// fetch all packages from the index image if packages is empty\n\tif len(request.Packages) == 0 {\n\t\trequest.Packages, err = dbQuerier.ListPackages(context.TODO())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tbundles, err := getBundlesToExport(dbQuerier, request.Packages)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ti.Logger.Infof(\"Preparing to pull bundles %+q\", bundles)\n\n\t// Creating downloadPath dir\n\tif err := os.MkdirAll(request.DownloadPath, 0777); err != nil {\n\t\treturn err\n\t}\n\n\tvar errs []error\n\tvar wg sync.WaitGroup\n\twg.Add(len(bundles))\n\tvar mu = &sync.Mutex{}\n\n\tsem := make(chan struct{}, concurrencyLimitForExport)\n\n\tfor bundleImage, bundleDir := range bundles {\n\t\tgo func(bundleImage string, bundleDir bundleDirPrefix) {\n\t\t\tdefer wg.Done()\n\n\t\t\tsem <- struct{}{}\n\t\t\tdefer func() {\n\t\t\t\t<-sem\n\t\t\t}()\n\n\t\t\t// generate a random folder name if bundle version is empty\n\t\t\tif bundleDir.bundleVersion == \"\" {\n\t\t\t\tbundleDir.bundleVersion = strconv.Itoa(rand.Intn(10000))\n\t\t\t}\n\t\t\texporter := bundle.NewExporterForBundle(bundleImage, filepath.Join(request.DownloadPath, bundleDir.pkgName, bundleDir.bundleVersion), request.ContainerTool)\n\t\t\tif err := exporter.Export(request.SkipTLSVerify, request.PlainHTTP); err != nil {\n\t\t\t\terr = fmt.Errorf(\"exporting bundle image:%s failed with %s\", bundleImage, err)\n\t\t\t\tmu.Lock()\n\t\t\t\terrs = append(errs, err)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}(bundleImage, bundleDir)\n\t}\n\t// Wait for all the go routines to finish export\n\twg.Wait()\n\n\tif errs != nil {\n\t\treturn utilerrors.NewAggregate(errs)\n\t}\n\n\tfor _, packageName := range request.Packages {\n\t\terr := generatePackageYaml(dbQuerier, packageName, filepath.Join(request.DownloadPath, packageName))\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn utilerrors.NewAggregate(errs)\n}", "func (r *Search) Index(index string) *Search {\n\tr.paramSet |= indexMask\n\tr.index = index\n\n\treturn r\n}", "func (s *Storage) PutIndex(ctx context.Context, uri string, acl string, r io.Reader) error {\n\tif strings.HasPrefix(uri, \"index.yaml\") {\n\t\treturn errors.New(\"uri must not contain \\\"index.yaml\\\" suffix, it appends automatically\")\n\t}\n\turi += \"/index.yaml\"\n\n\tbucket, key, err := parseURI(uri)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = s3manager.NewUploader(s.session).UploadWithContext(\n\t\tctx,\n\t\t&s3manager.UploadInput{\n\t\t\tBucket: aws.String(bucket),\n\t\t\tKey: aws.String(key),\n\t\t\tACL: aws.String(acl),\n\t\t\tServerSideEncryption: getSSE(),\n\t\t\tBody: r,\n\t\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"upload index to S3 bucket\")\n\t}\n\n\treturn nil\n}", "func NewIndex(addrs []string, pass string, temporary int, name string, md *index.Metadata) *Index {\n\n\tret := &Index{\n\n\t\thosts: addrs,\n\n\t\tmd: md,\n\t\tpassword: pass,\n\t\ttemporary: temporary,\n\n\t\tname: name,\n\n\t\tcommandPrefix: \"FT\",\n\t}\n\tif md != nil && md.Options != nil {\n\t\tif opts, ok := md.Options.(IndexingOptions); ok {\n\t\t\tif opts.Prefix != \"\" {\n\t\t\t\tret.commandPrefix = md.Options.(IndexingOptions).Prefix\n\t\t\t}\n\t\t}\n\t}\n\t//ret.pool.MaxActive = ret.pool.MaxIdle\n\n\treturn ret\n\n}", "func (wc *WriterBase) ShouldWriteIndex() bool {\n\treturn wc.currentResolution == wc.resolutions[0][0] || len(wc.resolutions) == 1\n}", "func NewIndex(kind IndexKind, table string) Index {\n\treturn &index{\n\t\tkind: kind,\n\t\ttable: table,\n\t}\n}", "func Index(w http.ResponseWriter, data *IndexData) {\n\trender(tpIndex, w, data)\n}", "func Index(w http.ResponseWriter, data *IndexData) {\n\trender(tpIndex, w, data)\n}", "func SaveIndex(target string, source QueryList, verbose bool) {\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s...\", target), verbose)\n\tfile, err := os.Create(target)\n\tcheckResult(err)\n\tdefer file.Close()\n\n\tgr := gzip.NewWriter(file)\n\tdefer gr.Close()\n\n\tencoder := gob.NewEncoder(gr)\n\n\terr = encoder.Encode(source.Names)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v sequence names saved\", len(source.Names)), verbose)\n\n\terr = encoder.Encode(source.SeedSize)\n\tcheckResult(err)\n\n\terr = encoder.Encode(source.Cgst)\n\tcheckResult(err)\n\n\t// save the index, but go has a size limit\n\tindexSize := len(source.Index)\n\terr = encoder.Encode(indexSize)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v queries to save...\", indexSize), verbose)\n\n\tcount := 0\n\tfor key, value := range source.Index {\n\t\terr = encoder.Encode(key)\n\t\tcheckResult(err)\n\t\terr = encoder.Encode(value)\n\t\tcheckResult(err)\n\t\tcount++\n\t\tif count%10000 == 0 {\n\t\t\tlogm(\"INFO\", fmt.Sprintf(\"processing: saved %v items\", count), false)\n\t\t}\n\t}\n\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s: done\", target), verbose)\n}", "func OpenIndex(collectionName, indexName string, fd *feed.API, ai *account.Info, user utils.Address, client blockstore.Client, logger logging.Logger) (*Index, error) {\n\tactualIndexName := collectionName + indexName\n\tmanifest := getRootManifestOfIndex(actualIndexName, fd, user, client) // this will load the entire Manifest for immutable indexes\n\tif manifest == nil {\n\t\treturn nil, ErrIndexNotPresent\n\t}\n\n\tidx := &Index{\n\t\tname: manifest.Name,\n\t\tmutable: manifest.Mutable,\n\t\tindexType: manifest.IdxType,\n\t\tpodFile: manifest.PodFile,\n\t\tuser: user,\n\t\taccountInfo: ai,\n\t\tfeed: fd,\n\t\tclient: client,\n\t\tcount: 0,\n\t\tmemDB: manifest,\n\t\tlogger: logger,\n\t}\n\treturn idx, nil\n}", "func (b *Blueprint) Index(columns []string, name string, algorithm string) *Blueprint {\n\treturn b.indexCommand(\"index\", columns, name, algorithm)\n}", "func Codec(codec *encoding.Codec) Opt {\n\treturn func(c *Client) Opt {\n\t\told := c.codec\n\t\tc.codec = codec\n\t\treturn Codec(old)\n\t}\n}", "func (rb *ShardsRecordBuilder) IndexingIndexTime(indexingindextime string) *ShardsRecordBuilder {\n\trb.v.IndexingIndexTime = &indexingindextime\n\treturn rb\n}", "func (c *rawConnection) Index(repo string, idx []FileInfo) {\n\tc.imut.Lock()\n\tvar msgType int\n\tif c.indexSent[repo] == nil {\n\t\t// This is the first time we send an index.\n\t\tmsgType = messageTypeIndex\n\n\t\tc.indexSent[repo] = make(map[string][2]int64)\n\t\tfor _, f := range idx {\n\t\t\tc.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}\n\t\t}\n\t} else {\n\t\t// We have sent one full index. Only send updates now.\n\t\tmsgType = messageTypeIndexUpdate\n\t\tvar diff []FileInfo\n\t\tfor _, f := range idx {\n\t\t\tif vs, ok := c.indexSent[repo][f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {\n\t\t\t\tdiff = append(diff, f)\n\t\t\t\tc.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}\n\t\t\t}\n\t\t}\n\t\tidx = diff\n\t}\n\tc.imut.Unlock()\n\n\tc.send(header{0, -1, msgType}, IndexMessage{repo, idx})\n}", "func (m *RecurrencePattern) SetIndex(value *WeekIndex)() {\n m.index = value\n}", "func (i *Index) Index(docs []index.Document, options interface{}) error {\n\n\tvar opts IndexingOptions\n\thasOpts := false\n\tif options != nil {\n\t\tif opts, hasOpts = options.(IndexingOptions); !hasOpts {\n\t\t\treturn errors.New(\"invalid indexing options\")\n\t\t}\n\t}\n\n\tconn := i.getConn()\n\tdefer conn.Close()\n\n\tn := 0\n\n\tfor _, doc := range docs {\n\t\targs := make(redis.Args, 0, len(i.md.Fields)*2+4)\n\t\targs = append(args, i.name, doc.Id, doc.Score)\n\t\t// apply options\n\t\tif hasOpts {\n\t\t\tif opts.NoSave {\n\t\t\t\targs = append(args, \"NOSAVE\")\n\t\t\t}\n\t\t\tif opts.Language != \"\" {\n\t\t\t\targs = append(args, \"LANGUAGE\", opts.Language)\n\t\t\t}\n\t\t}\n\n\t\targs = append(args, \"FIELDS\")\n\n\t\tfor k, f := range doc.Properties {\n\t\t\targs = append(args, k, f)\n\t\t}\n\n\t\tif err := conn.Send(i.commandPrefix+\".ADD\", args...); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn++\n\t}\n\n\tif err := conn.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tfor n > 0 {\n\t\tif _, err := conn.Receive(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn--\n\t}\n\n\treturn nil\n}", "func (r *Redis) Index(i services.Consumable) error {\n\tvar (\n\t\tpipe = r.client.TxPipeline()\n\n\t\ttxByIDKey = redisIndexKeysTxByID(r.chainID.String(), i.ID())\n\t\ttxCountKey = redisIndexKeysTxCount(r.chainID.String())\n\t\trecentTxsKey = redisIndexKeysRecentTxs(r.chainID.String())\n\n\t\tctx, cancelFn = context.WithTimeout(context.Background(), redisTimeout)\n\t)\n\tdefer cancelFn()\n\n\tif err := pipe.Set(ctx, txByIDKey, i.Body(), 0).Err(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pipe.Incr(ctx, txCountKey).Err(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pipe.LPush(ctx, recentTxsKey, i.ID()).Err(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pipe.LTrim(ctx, recentTxsKey, 0, redisRecentTxsSize-1).Err(); err != nil {\n\t\treturn err\n\t}\n\n\t_, err := pipe.Exec(ctx)\n\treturn err\n}", "func (o *IssueRemoveLabelParams) SetIndex(index int64) {\n\to.Index = index\n}", "func generateIssueIndexMapping() (mapping.IndexMapping, error) {\n\tmapping := bleve.NewIndexMapping()\n\tdocMapping := bleve.NewDocumentMapping()\n\n\tnumericFieldMapping := bleve.NewNumericFieldMapping()\n\tnumericFieldMapping.IncludeInAll = false\n\tdocMapping.AddFieldMappingsAt(\"RepoID\", numericFieldMapping)\n\n\ttextFieldMapping := bleve.NewTextFieldMapping()\n\ttextFieldMapping.Store = false\n\ttextFieldMapping.IncludeInAll = false\n\tdocMapping.AddFieldMappingsAt(\"Title\", textFieldMapping)\n\tdocMapping.AddFieldMappingsAt(\"Content\", textFieldMapping)\n\tdocMapping.AddFieldMappingsAt(\"Comments\", textFieldMapping)\n\n\tif err := addUnicodeNormalizeTokenFilter(mapping); err != nil {\n\t\treturn nil, err\n\t} else if err = mapping.AddCustomAnalyzer(issueIndexerAnalyzer, map[string]interface{}{\n\t\t\"type\": custom.Name,\n\t\t\"char_filters\": []string{},\n\t\t\"tokenizer\": unicode.Name,\n\t\t\"token_filters\": []string{unicodeNormalizeName, camelcase.Name, lowercase.Name},\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmapping.DefaultAnalyzer = issueIndexerAnalyzer\n\tmapping.AddDocumentMapping(issueIndexerDocType, docMapping)\n\tmapping.AddDocumentMapping(\"_all\", bleve.NewDocumentDisabledMapping())\n\n\treturn mapping, nil\n}", "func (c *index) GenIndexKey(sc *stmtctx.StatementContext, indexedValues []types.Datum, h int64, buf []byte) (key []byte, distinct bool, err error) {\n\tif c.idxInfo.Unique {\n\t\t// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html\n\t\t// A UNIQUE index creates a constraint such that all values in the index must be distinct.\n\t\t// An error occurs if you try to add a new row with a key value that matches an existing row.\n\t\t// For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL.\n\t\tdistinct = true\n\t\tfor _, cv := range indexedValues {\n\t\t\tif cv.IsNull() {\n\t\t\t\tdistinct = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// For string columns, indexes can be created using only the leading part of column values,\n\t// using col_name(length) syntax to specify an index prefix length.\n\tindexedValues = TruncateIndexValuesIfNeeded(c.tblInfo, c.idxInfo, indexedValues)\n\tkey = c.getIndexKeyBuf(buf, len(c.prefix)+len(indexedValues)*9+9)\n\tkey = append(key, []byte(c.prefix)...)\n\tkey, err = codec.EncodeKey(sc, key, indexedValues...)\n\tif !distinct && err == nil {\n\t\tkey, err = codec.EncodeKey(sc, key, types.NewDatum(h))\n\t}\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\treturn\n}", "func (s *Server) RegisterCodec(codec Codec, contentType string) {\n s.codecs[strings.ToLower(contentType)] = codec\n}", "func Isindex(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"isindex\", Attributes: attrs, Children: children}\n}", "func (_e *MockDataCoord_Expecter) DescribeIndex(ctx interface{}, req interface{}) *MockDataCoord_DescribeIndex_Call {\n\treturn &MockDataCoord_DescribeIndex_Call{Call: _e.mock.On(\"DescribeIndex\", ctx, req)}\n}", "func (s ConsoleIndexStore) StoreIndex(name string, idx Index) error {\n\t_, err := idx.WriteTo(os.Stdout)\n\treturn err\n}" ]
[ "0.5502938", "0.52192473", "0.51957196", "0.51697326", "0.5116415", "0.5042837", "0.50319487", "0.48964846", "0.48657367", "0.48536038", "0.48389342", "0.4827276", "0.48076987", "0.4801819", "0.47965235", "0.4753826", "0.47460607", "0.4742304", "0.47193858", "0.47038877", "0.4692363", "0.46868733", "0.46837276", "0.46707043", "0.46681562", "0.46651402", "0.46597168", "0.46380782", "0.4627767", "0.4624632", "0.46226254", "0.4613507", "0.46049985", "0.46007589", "0.4594684", "0.45830384", "0.4570369", "0.45666355", "0.4563887", "0.45582423", "0.4557208", "0.4527146", "0.45203593", "0.451343", "0.45121947", "0.45016536", "0.4500156", "0.44881213", "0.44687417", "0.4468659", "0.44579467", "0.44543144", "0.44469458", "0.4436678", "0.44208673", "0.4416574", "0.441252", "0.44060874", "0.4402094", "0.44005042", "0.43964043", "0.43947968", "0.43873066", "0.43831855", "0.43789312", "0.43743363", "0.43726158", "0.43500626", "0.43313614", "0.432883", "0.4323144", "0.43074837", "0.42871547", "0.42828578", "0.42799073", "0.42763367", "0.42760184", "0.42684796", "0.42634934", "0.4257159", "0.4249881", "0.42488283", "0.42481467", "0.42481467", "0.42472848", "0.4247067", "0.42452997", "0.42407218", "0.42377353", "0.422243", "0.42178258", "0.42147046", "0.4211853", "0.42115742", "0.42098293", "0.4208507", "0.4207388", "0.42053095", "0.42048842", "0.42034593" ]
0.8574102
0
WithoutIndex flags that no index should be included in generation.
func WithoutIndex() Option { return func(o *Options) { o.IndexCodec = index.CarIndexNone } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IndexOptionsNone() IndexOptions {\n\tresult := IndexOptions{}\n\n\treturn result\n}", "func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}", "func (_m *DirectRepositoryWriter) DisableIndexRefresh() {\n\t_m.Called()\n}", "func (r *Search) AllowNoIndices(allownoindices bool) *Search {\n\tr.values.Set(\"allow_no_indices\", strconv.FormatBool(allownoindices))\n\n\treturn r\n}", "func (wou *WorkOrderUpdate) ClearIndex() *WorkOrderUpdate {\n\twou.index = nil\n\twou.clearindex = true\n\treturn wou\n}", "func (wouo *WorkOrderUpdateOne) ClearIndex() *WorkOrderUpdateOne {\n\twouo.index = nil\n\twouo.clearindex = true\n\treturn wouo\n}", "func WithoutPosition() OptionFunc {\n\treturn func(opt *Options) {\n\t\topt.ShowFlag = Fnopos\n\t}\n}", "func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func WithoutTimestamp() Option {\n\treturn func(l LoggerOpts) LoggerOpts {\n\t\tl.IncludeTime = false\n\t\treturn l\n\t}\n}", "func (o *DatasetEvent) UnsetSourceMapIndex() {\n\to.SourceMapIndex.Unset()\n}", "func (g *GeneratedFile) Unskip() {\n\tg.skip = false\n}", "func WithoutLocation() Option {\n\treturn func(l LoggerOpts) LoggerOpts {\n\t\tl.IncludeLocation = false\n\t\treturn l\n\t}\n}", "func (index *spdIndex) Clear() {\n\tindex.mapping.Clear()\n}", "func WithNoVersion() Option {\n\treturn func(a *App) {\n\t\ta.noVersion = true\n\t}\n}", "func TestEngine_WriteIndex_NoPoints(t *testing.T) {\n\te := OpenDefaultEngine()\n\tdefer e.Close()\n\tif err := e.WriteIndex(map[string][][]byte{\"cpu\": nil}, nil, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func NotIt() {\n\toutput.EmitLn(\"EOR #-1,D0\")\n}", "func DisableVertexAttribArray(index uint32) {\n C.glowDisableVertexAttribArray(gpDisableVertexAttribArray, (C.GLuint)(index))\n}", "func (s *IndicesSyncedFlushService) AllowNoIndices(allowNoIndices bool) *IndicesSyncedFlushService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func (b CreateIndexBuilder) IfNotExists() CreateIndexBuilder {\n\treturn builder.Set(b, \"IfNotExists\", true).(CreateIndexBuilder)\n}", "func (c RawConfiguration) WithoutNodes(ids ...uint32) NodeListOption {\n\trmIDs := make(map[uint32]bool)\n\tfor _, id := range ids {\n\t\trmIDs[id] = true\n\t}\n\tkeepIDs := make([]uint32, 0, len(c))\n\tfor _, cNode := range c {\n\t\tif !rmIDs[cNode.id] {\n\t\t\tkeepIDs = append(keepIDs, cNode.id)\n\t\t}\n\t}\n\treturn &nodeIDs{nodeIDs: keepIDs}\n}", "func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {\n\treturn func(c *Options) {\n\t\tc.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling\n\t}\n}", "func IndexNotIn(vs ...int) predicate.Step {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Step(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldIndex), v...))\n\t})\n}", "func GenerateNotFoundIndex(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"strconv.Itoa(100000)\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"100000\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func GetIndicesWithoutIgnored() (parsedIndices []types.Index, err string) {\n\tvar indices, getErr = GetIndices()\n\tcleanIndices := make([]types.Index, len(parsedIndices))\n\tfor _, indexed := range indices {\n\t\tvar ignorable bool\n\t\tfor _, ignored := range singleton.GetConfig().Parser.Ignorelist {\n\t\t\tif ignored != \"\" {\n\t\t\t\tr, _ := regexp.Compile(ignored)\n\n\t\t\t\tif r.MatchString(indexed.Name) {\n\n\t\t\t\t\tignorable = true\n\t\t\t\t\tif singleton.GetVerbose() {\n\t\t\t\t\t\tlog.Println(\"Index name: \" + indexed.Name + \" matches the regex: \" + ignored)\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !ignorable {\n\t\t\tcleanIndices = append(cleanIndices, indexed)\n\t\t}\n\t\tignorable = false\n\t}\n\treturn cleanIndices, getErr\n}", "func (list BuildpackV2List) WithoutDisabled() BuildpackV2List {\n\tvar out BuildpackV2List\n\n\tfor _, buildpack := range list {\n\t\tif !buildpack.Disabled {\n\t\t\tout = append(out, buildpack)\n\t\t}\n\t}\n\n\treturn out\n}", "func (n Noop) Index() int {\n\treturn 0\n}", "func (m *MockDriver) UseIndexPlaceholders() bool {\n\treturn false\n}", "func (ibt *IndexBehaviorTest) TestCheckIndexNoIndexNeeded(c *C) {\n\tctx := context.Background()\n\tdsClient, err := datastore.NewClient(ctx, TestProject)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tnoIndex := func(q *datastore.Query) {\n\t\tt := dsClient.Run(ctx, q)\n\t\tvar x testKind\n\t\t_, err := t.Next(&x)\n\t\tif err != iterator.Done {\n\t\t\tc.Assert(err, IsNil)\n\t\t}\n\t}\n\n\t// Kindless queries using only ancestor and key filters\n\t// (these are scary though, so we don't use them)\n\tnoIndex(datastore.NewQuery(\"\").Filter(\"__key__ >\", testKey).Ancestor(testKey))\n\n\t// Queries using only ancestor and equality filters\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\"))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0).Ancestor(testKey))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0).Filter(\"otherField =\", 1).Ancestor(testKey))\n\n\t// Queries using only inequality filters (which are limited to a single property)\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0))\n\t// Even if there's two on the same field.\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).Filter(\"singleField <\", 200))\n\n\t// Queries using only ancestor filters, equality filters on properties, and inequality filters on keys\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0).Filter(\"__key__ >\", testKey).Ancestor(testKey))\n\n\t// Queries with no filters and only one sort order on a property, either ascending or descending\n\t// (unless descending key)\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Order(\"singleField\"))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Order(\"-singleField\"))\n\n\t// Also with a filter on the ordered property (undocumented)\n\t// (Ordering of query results is undefined when no sort order is specified)\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).Order(\"singleField\"))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).Filter(\"singleField <\", 0).Order(\"-singleField\"))\n\n\t// If a query does not need an index, making it keys-only does not make you need one.\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").KeysOnly())\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).KeysOnly())\n\n\t// Single project + Order\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Project(\"A\").Order(\"-A\"))\n}", "func TestEnsureSkipListIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsureSkipListIndexOptions{\n\t\tnil,\n\t\t{Unique: true, Sparse: false, NoDeduplicate: true},\n\t\t{Unique: true, Sparse: true, NoDeduplicate: true},\n\t\t{Unique: false, Sparse: false, NoDeduplicate: false},\n\t\t{Unique: false, Sparse: true, NoDeduplicate: false},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"skiplist_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.SkipListIndex {\n\t\t\tt.Errorf(\"Expected SkipListIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.Unique() != options.Unique {\n\t\t\tt.Errorf(\"Expected Unique to be %t, found `%t`\", options.Unique, idx.Unique())\n\t\t}\n\t\tif options != nil && idx.Sparse() != options.Sparse {\n\t\t\tt.Errorf(\"Expected Sparse to be %t, found `%t`\", options.Sparse, idx.Sparse())\n\t\t}\n\t\tif options != nil && !idx.Deduplicate() != options.NoDeduplicate {\n\t\t\tt.Errorf(\"Expected NoDeduplicate to be %t, found `%t`\", options.NoDeduplicate, idx.Deduplicate())\n\t\t}\n\n\t\t// Index must exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func WithoutAll() Option {\n\treturn func(d *Decoder) {\n\t\td.proto = false\n\t\td.byteDec = false\n\t\td.hex = false\n\t\td.base64 = false\n\t}\n}", "func TestEngine_WriteIndex_NoKeys(t *testing.T) {\n\te := OpenDefaultEngine()\n\tdefer e.Close()\n\tif err := e.WriteIndex(nil, nil, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func WithoutFields(fields ...string) ReqOption {\n\treturn func(v url.Values) {\n\t\tv.Set(\"fields\", strings.Join(fields, \",\"))\n\t\tv.Set(\"include_fields\", \"false\")\n\t}\n}", "func (uh *UserHandler) IndexNot(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Not Sucessfull!\"))\n}", "func (s *ValidateService) AllowNoIndices(allowNoIndices bool) *ValidateService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func ignorePredicate() predicate.Predicate {\n\treturn predicate.Funcs{\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\treturn e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration()\n\t\t},\n\t}\n}", "func Not(p predicate.AllocationStrategy) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func DisableVertexAttribArray(index uint32) {\n\tgl.DisableVertexAttribArray(index)\n}", "func (s *IndicesClearCacheService) AllowNoIndices(allowNoIndices bool) *IndicesClearCacheService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func WithoutLogLocation() ServeOpt {\n\treturn serveConfigFunc(func(in *ServeConfig) error {\n\t\tin.disableLogLocation = true\n\t\treturn nil\n\t})\n}", "func generateNoOpBlock() {\n\tfmt.Println(\"Generating a NoOp Block...\")\n\tfmt.Println(\"Block chain size:\", len(blockChain), \"number transactions:\", len(transactions))\n\t// TODO this printstate() actually seemed to help performance... Maybe could use a tiny sleep here?\n\tprintState()\n\tif len(leafBlocks) > 1 {\n\t\tfmt.Println(\"We have a fork!!!!!!!!!!!!!!\")\n\t}\n\tnoOpBlock := Block{HashBlock: HashBlock{TxID: 0, NodeID: myNodeID, Nonce: 0}}\n\tnoOpBlock = setCorrectParentHashAndDepth(noOpBlock)\n\tfor isGenerateNoOps {\n\t\tsuccess, _ := generateBlock(&noOpBlock)\n\t\tif success {\n\t\t\treturn\n\t\t}\n\t}\n\t// received a call to commit or AddBlock which set isGenerateNoOps = false\n\treturn\n}", "func DisableVertexAttribArray(index uint32) {\n\tC.glowDisableVertexAttribArray(gpDisableVertexAttribArray, (C.GLuint)(index))\n}", "func DisableVertexAttribArray(index uint32) {\n\tC.glowDisableVertexAttribArray(gpDisableVertexAttribArray, (C.GLuint)(index))\n}", "func (b *ProxyBuilder) NoProxy(value string) *ProxyBuilder {\n\tb.noProxy = value\n\tb.bitmap_ |= 4\n\treturn b\n}", "func WithSingleSampleDisabled() Option {\n\treturn func(e *Engine) {\n\t\te.graph.singleSampleDisabled = true\n\t}\n}", "func (x EntIndex) IsUnique() bool { return (x.Flags & EntIndexUnique) != 0 }", "func Not(p predicate.OutcomeOverview) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (gq *GoodsQuery) OnlyIDX(ctx context.Context) string {\n\tid, err := gq.OnlyID(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn id\n}", "func (c Capabilities) Without(feature string) Capabilities {\n\tc[feature] = false\n\treturn c\n}", "func (g *Generator) generateDDLsForAbsentIndex(currentIndex Index, currentTable Table, desiredTable Table) ([]string, error) {\n\tddls := []string{}\n\n\tif currentIndex.primary {\n\t\tvar primaryKeyColumn *Column\n\t\tfor _, column := range desiredTable.columns {\n\t\t\tif column.keyOption == ColumnKeyPrimary {\n\t\t\t\tprimaryKeyColumn = &column\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// If nil, it will be `DROP COLUMN`-ed. Ignore it.\n\t\tif primaryKeyColumn != nil && primaryKeyColumn.name != currentIndex.columns[0].column { // TODO: check length of currentIndex.columns\n\t\t\t// TODO: handle this. Rename primary key column...?\n\t\t\treturn ddls, fmt.Errorf(\n\t\t\t\t\"primary key column name of '%s' should be '%s' but currently '%s'. This is not handled yet.\",\n\t\t\t\tcurrentTable.name, primaryKeyColumn.name, currentIndex.columns[0].column,\n\t\t\t)\n\t\t}\n\t} else if currentIndex.unique {\n\t\tvar uniqueKeyColumn *Column\n\t\tfor _, column := range desiredTable.columns {\n\t\t\tif column.name == currentIndex.columns[0].column && column.keyOption.isUnique() {\n\t\t\t\tuniqueKeyColumn = &column\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif uniqueKeyColumn == nil {\n\t\t\t// No unique column. Drop unique key index.\n\t\t\tddls = append(ddls, g.generateDropIndex(currentTable.name, currentIndex.name))\n\t\t}\n\t} else {\n\t\tddls = append(ddls, g.generateDropIndex(currentTable.name, currentIndex.name))\n\t}\n\n\treturn ddls, nil\n}", "func DisableVertexArrayAttrib(vaobj uint32, index uint32) {\n\tC.glowDisableVertexArrayAttrib(gpDisableVertexArrayAttrib, (C.GLuint)(vaobj), (C.GLuint)(index))\n}", "func DisableVertexArrayAttrib(vaobj uint32, index uint32) {\n\tC.glowDisableVertexArrayAttrib(gpDisableVertexArrayAttrib, (C.GLuint)(vaobj), (C.GLuint)(index))\n}", "func (adminAPIOp) SkipVerification() bool { return true }", "func (gsuo *GameServerUpdateOne) ClearDisabledAt() *GameServerUpdateOne {\n\tgsuo.mutation.ClearDisabledAt()\n\treturn gsuo\n}", "func DisableVertexAttribArray(index Uint) {\n\tcindex, _ := (C.GLuint)(index), cgoAllocsUnknown\n\tC.glDisableVertexAttribArray(cindex)\n}", "func (b *Base) Omit(keys ...string) Serializer {\n\treturn b.OmitIf(alwaysTrue, keys...)\n}", "func WithoutHeaderIgnore(ks []string) Option {\n\treturn func(c *Config) {\n\t\tc.withoutHeaderIgnore = ks\n\t}\n}", "func (h History) AttachIndexIfNoExists() {\n\tif len(h) != 0 && h[0].Index.Present() {\n\t\treturn\n\t}\n\tfor i := range h {\n\t\th[i].Index = IntOptional{i}\n\t}\n}", "func (td TupleDesc) WithoutFixedAccess() TupleDesc {\n\treturn TupleDesc{Types: td.Types, cmp: td.cmp}\n}", "func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"NonMaxSuppression\",\n\t\tInput: []tf.Input{\n\t\t\tboxes, scores, max_output_size,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func DisablesTestReadIndexTask_IgnoreHiddenPics(t *testing.T) {\n\tc := Container(t)\n\tdefer c.Close()\n\n\tu := c.CreateUser()\n\tu.User.Capability = append(u.User.Capability, schema.User_PIC_INDEX)\n\tu.Update()\n\n\tp1 := c.CreatePic()\n\tp3 := c.CreatePic()\n\t// A hard deletion\n\tp3.Pic.DeletionStatus = &schema.Pic_DeletionStatus{\n\t\tActualDeletedTs: schema.ToTs(time.Now()),\n\t}\n\tp3.Update()\n\n\ttask := ReadIndexPicsTask{\n\t\tDB: c.DB(),\n\t\tCtx: CtxFromUserID(context.Background(), u.User.UserId),\n\t}\n\n\tif err := task.Run(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(task.Pics) != 1 || !proto.Equal(p1.Pic, task.Pics[0]) {\n\t\tt.Fatalf(\"Unable to find %s in\\n %s\", p1, task.Pics)\n\t}\n}", "func clearIndex(\n\tctx context.Context,\n\texecCfg *sql.ExecutorConfig,\n\ttableDesc catalog.TableDescriptor,\n\tindex descpb.IndexDescriptor,\n) error {\n\tlog.Infof(ctx, \"clearing index %d from table %d\", index.ID, tableDesc.GetID())\n\tif index.IsInterleaved() {\n\t\treturn errors.Errorf(\"unexpected interleaved index %d\", index.ID)\n\t}\n\n\tsp := tableDesc.IndexSpan(execCfg.Codec, index.ID)\n\tstart, err := keys.Addr(sp.Key)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index start: %v\", err)\n\t}\n\tend, err := keys.Addr(sp.EndKey)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index end: %v\", err)\n\t}\n\trSpan := roachpb.RSpan{Key: start, EndKey: end}\n\treturn clearSpanData(ctx, execCfg.DB, execCfg.DistSender, rSpan)\n}", "func (h *Headers) NoTransform() *Headers {\n\th.noTransform = true\n\treturn h\n}", "func NoDecorator() Decorator { return func(c Context) Context { return c } }", "func (b *ShardBuilder) WithNoProgress() *ShardBuilder {\n\tb.shard.Progress = spanner.NullInt64{Valid: false}\n\treturn b\n}", "func NoopHook(index string) error {\n\treturn nil\n}", "func GenerateValidIndex(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"strconv.Itoa(0)\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"0\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func generateNoOpBlocks() {\n\tfor {\n\t\tif isGenerateNoOps && !isWorkingOnCommit {\n\t\t\tisWorkingOnNoOp = true\n\t\t\tgenerateNoOpBlock()\n\t\t\tisWorkingOnNoOp = false\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n}", "func (g *GeneratedFile) Skip() {\n\tg.skip = true\n}", "func (t *Tags) NoFinalize() {\n\tt.noFinalize = true\n\tfor _, tag := range t.values {\n\t\ttag.NoFinalize()\n\t}\n}", "func Without[T comparable](collection []T, exclude ...T) []T {\n\tresult := make([]T, 0, len(collection))\n\tfor _, e := range collection {\n\t\tif !Contains(exclude, e) {\n\t\t\tresult = append(result, e)\n\t\t}\n\t}\n\treturn result\n}", "func DeleteIndexWithoutOrder(a interface{}, index int) interface{} {\n\tswitch a.(type) {\n\tcase []int:\n\t\treturn DeleteIndexWithoutOrderInt(a.([]int), index)\n\tdefault:\n\t\tpanic(\"not support type\")\n\t}\n}", "func (w *Writer) ZeroUntil(index int64)", "func NoopFilter(index string, data []byte) ([]byte, error) {\n\treturn data, nil\n}", "func (h *Headers) NoStore() *Headers {\n\th.noStore = true\n\treturn h\n}", "func Exclude(attributes ...string) Options {\n\treturn exclude{attributes: attributes}\n}", "func Not(p predicate.MetaSchema) predicate.MetaSchema {\n\treturn predicate.MetaSchema(\n\t\tfunc(s *sql.Selector) {\n\t\t\tp(s.Not())\n\t\t},\n\t)\n}", "func NoOp(scope *Scope) (o *tf.Operation) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"NoOp\",\n\t}\n\treturn scope.AddOperation(opspec)\n}", "func (d *dbBasePostgres) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {\n\tDebugLog.Println(\"[WARN] Not support any specifying index action, so that action is ignored\")\n\treturn ``\n}", "func DisableVertexAttribArray(index uint32) {\n\tsyscall.Syscall(gpDisableVertexAttribArray, 1, uintptr(index), 0, 0)\n}", "func DisableVertexArrayAttrib(vaobj uint32, index uint32) {\n\tsyscall.Syscall(gpDisableVertexArrayAttrib, 2, uintptr(vaobj), uintptr(index), 0)\n}", "func noHeader(predeterminedHeader *Header) headerOption {\n\tif predeterminedHeader == nil {\n\t\tpanic(\"nil predeterminedHeader\")\n\t}\n\treturn headerOption{noHeader: true, predeterminedHeader: predeterminedHeader}\n}", "func NoParallel() TestOptionsFunc {\n\treturn func(_ *testing.T, test *Test) { test.RunOptions.NoParallel = true }\n}", "func Not(p predicate.Bulk) predicate.Bulk {\n\treturn predicate.Bulk(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func DropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) {\n\tif indexInfo.Primary {\n\t\tfor _, col := range indexInfo.Columns {\n\t\t\ttblInfo.Columns[col.Offset].DelFlag(mysql.PriKeyFlag)\n\t\t}\n\t} else if indexInfo.Unique && len(indexInfo.Columns) == 1 {\n\t\ttblInfo.Columns[indexInfo.Columns[0].Offset].DelFlag(mysql.UniqueKeyFlag)\n\t} else {\n\t\ttblInfo.Columns[indexInfo.Columns[0].Offset].DelFlag(mysql.MultipleKeyFlag)\n\t}\n\n\tcol := indexInfo.Columns[0]\n\t// other index may still cover this col\n\tfor _, index := range tblInfo.Indices {\n\t\tif index.Name.L == indexInfo.Name.L {\n\t\t\tcontinue\n\t\t}\n\n\t\tif index.Columns[0].Name.L != col.Name.L {\n\t\t\tcontinue\n\t\t}\n\n\t\tAddIndexColumnFlag(tblInfo, index)\n\t}\n}", "func NoTrace() zap.Option {\n\treturn zap.AddStacktrace(noTrace{})\n}", "func Not(d Dense) Dense {\n\tr := Dense{\n\t\tbits: make([]byte, 0, BytesFor(d.len)),\n\t\tlen: d.len,\n\t\tnegated: !d.negated,\n\t}\n\tfor i := range d.bits {\n\t\tr.bits = append(r.bits, ^d.bits[i])\n\t}\n\treturn r\n}", "func (HashValidationOption) NoCheck() HashValidationOption { return HashValidationOption(1) }", "func (m Map) Without(filter Coord) Map {\n\tfiltered := make(Map, 0)\n\n\tfor _, c := range m {\n\t\tif c != filter {\n\t\t\tfiltered = append(filtered, c)\n\t\t}\n\t}\n\n\treturn filtered\n}", "func GenMissingData(hasMissingData bool) Option {\n\treturn func(f *genBowOptions) { f.missingData = hasMissingData }\n}", "func WithoutProfilerEndpoints() Option {\n\treturn func(ctx context.Context, s *Server) error {\n\t\ts.noProfilerEndpoint = true\n\t\treturn nil\n\t}\n}", "func Exclude(fields []string) Option {\n\treturn func(op *PartialMutation) {\n\t\top.excludeFields = fields\n\t}\n}", "func (iob *IndexOptionsBuilder) Unique(unique bool) *IndexOptionsBuilder {\n\tiob.document = append(iob.document, bson.E{\"unique\", unique})\n\treturn iob\n}", "func WithDisable() *CallOption {\n\treturn WithMax(0)\n}", "func WithoutFileInfo() Option {\n\treturn func(o *options) {\n\t\to.withoutFileInfo = true\n\t}\n}", "func (d *Document) False() Node {\n\tid := uint(len(d.nodes))\n\tn := d.grow()\n\tn.reset(vBoolean|infRoot, strFalse, n.values[:0])\n\treturn d.Node(id)\n}", "func WithNoBuild(r *v1alpha1.Revision) {\n\tr.Status.PropagateBuildStatus(duckv1alpha1.KResourceStatus{\n\t\tConditions: []duckv1alpha1.Condition{{\n\t\t\tType: duckv1alpha1.ConditionSucceeded,\n\t\t\tStatus: corev1.ConditionTrue,\n\t\t\tReason: \"NoBuild\",\n\t\t}},\n\t})\n}", "func (t *Tag) NoFinalize() {\n\tt.noFinalize = true\n\tt.Name.NoFinalize()\n\tt.Value.NoFinalize()\n}", "func (gsu *GameServerUpdate) ClearDisabledAt() *GameServerUpdate {\n\tgsu.mutation.ClearDisabledAt()\n\treturn gsu\n}", "func (i *SGIndex) shouldIndexTombstones(useXattrs bool) bool {\n\treturn (i.flags&IdxFlagIndexTombstones != 0 && useXattrs)\n}", "func (s *sectionHeader) WithoutEnv() *sectionHeader {\n\ts.env = false\n\treturn s\n}" ]
[ "0.62528235", "0.6120058", "0.6103179", "0.5899328", "0.5636369", "0.56347483", "0.56101", "0.5556883", "0.54259133", "0.5418361", "0.54178435", "0.53887594", "0.5346785", "0.5267003", "0.52648", "0.52637637", "0.5242605", "0.5237273", "0.52297145", "0.5219901", "0.5217866", "0.5215443", "0.51945925", "0.51792794", "0.5176227", "0.5172019", "0.51641273", "0.5146433", "0.5145777", "0.51309407", "0.5123686", "0.5092554", "0.50898963", "0.50892127", "0.50815916", "0.5067341", "0.5066315", "0.5066008", "0.50632924", "0.50631475", "0.50372595", "0.50372595", "0.50094604", "0.50091827", "0.5001731", "0.49948606", "0.49848473", "0.49826127", "0.4972266", "0.49674815", "0.49674815", "0.49450326", "0.4942936", "0.49356323", "0.4930749", "0.4921363", "0.49126956", "0.4912399", "0.4911827", "0.4909832", "0.49091545", "0.49055356", "0.4899774", "0.48947293", "0.48906347", "0.48836088", "0.48827437", "0.48786896", "0.48778895", "0.48743352", "0.48720172", "0.48621243", "0.48569548", "0.4842319", "0.48255187", "0.48092124", "0.48062918", "0.4805452", "0.48043838", "0.47993225", "0.47985488", "0.47972873", "0.479307", "0.47862405", "0.47829956", "0.47778806", "0.47764432", "0.4774434", "0.47712976", "0.47702038", "0.47651252", "0.47602248", "0.47474357", "0.47454923", "0.47440064", "0.47373834", "0.47329834", "0.47303718", "0.47196382", "0.47185692" ]
0.7322742
0
StoreIdentityCIDs sets whether to persist sections that are referenced by CIDs with multihash.IDENTITY digest. When writing CAR files with this option, Characteristics.IsFullyIndexed will be set. By default, the blockstore interface will always return true for Has() called with identity CIDs, but when this option is turned on, it will defer to the index. When creating an index (or loading a CARv1 as a blockstore), when this option is on, identity CIDs will be included in the index. This option is disabled by default.
func StoreIdentityCIDs(b bool) Option { return func(o *Options) { o.StoreIdentityCIDs = b } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cosi *cosiAggregate) StoreIdentities(idents map[string]proto.Message) {\n\tfor k, v := range idents {\n\t\tpoint := suite.G2().Point()\n\t\terr := point.UnmarshalBinary(v.(*BdnIdentity).PublicKey)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcosi.skipchain.identities[k] = point\n\t}\n}", "func StoreIdentityInFiles(i *security.Identity, keyFile string, crtFile string, csrFile string) error {\n\tvar err error\n\n\tif i.Key != nil {\n\t\tif err = CreatePEM(keyFile, i.Key); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif i.Certificate != nil {\n\t\tif err = CreatePEM(crtFile, i.Certificate); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif i.Request != nil {\n\t\tif err = CreatePEM(csrFile, i.Request); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (k Keeper) SetIdentityCount(ctx sdk.Context, count int64) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.IdentityCountKey))\n\tbyteKey := types.KeyPrefix(types.IdentityCountKey)\n\tbz := []byte(strconv.FormatInt(count, 10))\n\tstore.Set(byteKey, bz)\n}", "func (m *cidsMap) Sync(vmis []*virtv1.VirtualMachineInstance) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tfor _, vmi := range vmis {\n\t\tif vmi.Status.VSOCKCID == nil {\n\t\t\tcontinue\n\t\t}\n\t\tkey := controller.VirtualMachineInstanceKey(vmi)\n\t\tm.cids[key] = *vmi.Status.VSOCKCID\n\t\tm.reverse[*vmi.Status.VSOCKCID] = key\n\t}\n}", "func (m *User) SetIdentities(value []ObjectIdentityable)() {\n m.identities = value\n}", "func (ic *IdentityCache) StoreIdentity(identity Identity) error {\n\tcache := cacheData{\n\t\tIdentity: identity,\n\t}\n\n\treturn ic.writeCache(cache)\n}", "func (s *Secrets) IdentityStoreID() (string, error) {\n\treturn s.getSecret(\"SSOSyncIdentityStoreID\")\n}", "func SetIdentity(storageDir string, cid, nid uint64) (err error) {\n\tif cid == 0 {\n\t\treturn errors.New(\"raft: cid is zero\")\n\t}\n\tif nid == 0 {\n\t\treturn errors.New(\"raft: nid is zero\")\n\t}\n\td, err := os.Stat(storageDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !d.IsDir() {\n\t\treturn fmt.Errorf(\"raft: %q is not a diretory\", storageDir)\n\t}\n\tif err := lockDir(storageDir); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\terr = unlockDir(storageDir)\n\t}()\n\tval, err := openValue(storageDir, \".id\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cid == val.v1 && nid == val.v2 {\n\t\treturn nil\n\t}\n\tif val.v1 != 0 && val.v2 != 0 {\n\t\treturn ErrIdentityAlreadySet\n\t}\n\treturn val.set(cid, nid)\n}", "func AddIndependentPropertyGeneratorsForManagedClusterStorageProfileFileCSIDriver(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (o GetKubernetesClusterIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetKubernetesClusterIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func (o *SparseClaims) Identity() elemental.Identity {\n\n\treturn ClaimsIdentity\n}", "func (o *VirtualizationBaseHostPciDeviceAllOf) SetIdentity(v string) {\n\to.Identity = &v\n}", "func StoreCAUniqueIDToCNMap(c context.Context, mapping map[int64]string) error {\n\tbuf := bytes.Buffer{}\n\tenc := gob.NewEncoder(&buf)\n\tif err := enc.Encode(mapping); err != nil {\n\t\treturn err\n\t}\n\t// Note that in practice 'mapping' is usually very small, so we are not\n\t// concerned about 1MB entity size limit.\n\treturn errors.WrapTransient(datastore.Get(c).Put(&CAUniqueIDToCNMap{\n\t\tGobEncodedMap: buf.Bytes(),\n\t}))\n}", "func (o *SnapmirrorCreateRequest) SetIdentityPreserve(newValue bool) *SnapmirrorCreateRequest {\n\to.IdentityPreservePtr = &newValue\n\treturn o\n}", "func (o KubernetesClusterIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v KubernetesClusterIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func NewAtomixStore(client primitive.Client) (Store, error) {\n\tconfigurations, err := _map.NewBuilder[configapi.ConfigurationID, *configapi.Configuration](client, \"configurations\").\n\t\tTag(\"onos-config\", \"configuration\").\n\t\tCodec(generic.Proto[*configapi.Configuration](&configapi.Configuration{})).\n\t\tGet(context.Background())\n\tif err != nil {\n\t\treturn nil, errors.FromAtomix(err)\n\t}\n\treturn &configurationStore{\n\t\tconfigurations: configurations,\n\t}, nil\n}", "func (cs *ClientStore) Set(id string, cli oauth2.ClientInfo) (err error) {\n\tcs.Lock()\n\tdefer cs.Unlock()\n\tcs.data[id] = cli\n\treturn\n}", "func (o *UserDisco) HasIdentity() bool {\n\tif o != nil && o.Identity != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (mvccs *KVMVCCStore) Set(datas *types.StoreSet, hash []byte, sync bool) ([]byte, error) {\n\tif hash == nil {\n\t\thash = calcHash(datas)\n\t}\n\tkvlist, err := mvccs.mvcc.AddMVCC(datas.KV, hash, datas.StateHash, datas.Height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmvccs.saveKVSets(kvlist, sync)\n\treturn hash, nil\n}", "func (c *Component) ClaimIDs(ctx context.Context, ids cluster.EntityIdentifiers) error {\n\treturn c.cluster.ClaimIDs(ctx, ids)\n}", "func SetIntInStore(id, key string, value int) error {\n\treturn ecs.AddOrUpdateIntInMapComponent(id, \"store\", key, value)\n}", "func (o *VirtualizationBaseHostPciDeviceAllOf) HasIdentity() bool {\n\tif o != nil && o.Identity != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ContactIdentityKey(id string) ([]byte, error) {\n\ts := textSecureStore\n\tidkeyfile := filepath.Join(s.identityDir, \"remote_\"+id)\n\tif !exists(idkeyfile) {\n\t\treturn nil, UnknownContactError{id}\n\t}\n\tb, err := s.readFile(idkeyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append([]byte{5}, b...), nil\n}", "func AddIndependentPropertyGeneratorsForManagedClusterIdentity(gens map[string]gopter.Gen) {\n\tgens[\"Type\"] = gen.PtrOf(gen.AlphaString())\n}", "func (o SparseClaimsList) Identity() elemental.Identity {\n\n\treturn ClaimsIdentity\n}", "func storeCandidates(candidateMap map[hash.Hash160]*state.Candidate, sm protocol.StateManager, blkHeight uint64) error {\n\tcandidateList, err := state.MapToCandidates(candidateMap)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to convert candidate map to candidate list\")\n\t}\n\tsort.Sort(candidateList)\n\tcandidatesKey := ConstructKey(blkHeight)\n\treturn sm.PutState(candidatesKey, &candidateList)\n}", "func (c *ClusterClient) IdentityProviders() *IdentityProvidersClient {\n\treturn NewIdentityProvidersClient(\n\t\tc.transport,\n\t\tpath.Join(c.path, \"identity_providers\"),\n\t)\n}", "func NewSparseClaims() *SparseClaims {\n\treturn &SparseClaims{}\n}", "func (s *Store) PutBulk(suffixes []string, cid string) error {\n\toperations := make([]storage.Operation, len(suffixes))\n\n\tfor i, suffix := range suffixes {\n\t\top := storage.Operation{\n\t\t\tKey: suffix,\n\t\t\tValue: []byte(cid),\n\t\t}\n\n\t\toperations[i] = op\n\t}\n\n\terr := s.store.Batch(operations)\n\tif err != nil {\n\t\treturn orberrors.NewTransient(fmt.Errorf(\"failed to add cid[%s] to suffixes%s: %w\", cid, suffixes, err))\n\t}\n\n\tlogger.Debugf(\"updated latest anchor[%s] for suffixes: %s\", cid, suffixes)\n\n\treturn nil\n}", "func (o ServiceIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v ServiceIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func (bc *Baiducloud) HasClusterID() bool {\n\treturn true\n}", "func (c *IdentityConfig) loadIdentityConfigEntities() error {\n\tconfigEntity := identityConfigEntity{}\n\n\terr := c.backend.UnmarshalKey(\"client\", &configEntity.Client)\n\tlogger.Debugf(\"Client is: %+v\", configEntity.Client)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to parse 'client' config item to identityConfigEntity.Client type\")\n\t}\n\n\terr = c.backend.UnmarshalKey(\"organizations\", &configEntity.Organizations)\n\tlogger.Debugf(\"organizations are: %+v\", configEntity.Organizations)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to parse 'organizations' config item to identityConfigEntity.Organizations type\")\n\t}\n\n\terr = c.backend.UnmarshalKey(\"certificateAuthorities\", &configEntity.CertificateAuthorities)\n\tlogger.Debugf(\"certificateAuthorities are: %+v\", configEntity.CertificateAuthorities)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to parse 'certificateAuthorities' config item to identityConfigEntity.CertificateAuthorities type\")\n\t}\n\t// Populate ID from the lookup keys\n\tfor caID := range configEntity.CertificateAuthorities {\n\t\tca := configEntity.CertificateAuthorities[caID]\n\t\tca.ID = caID\n\t\tconfigEntity.CertificateAuthorities[caID] = ca\n\t}\n\n\t//compile CA matchers\n\terr = c.compileMatchers()\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to compile certificate authority matchers\")\n\t}\n\n\terr = c.loadClientTLSConfig(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load client TLSConfig \")\n\t}\n\n\terr = c.loadCATLSConfig(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load CA TLSConfig \")\n\t}\n\n\terr = c.loadAllCAConfigs(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load all CA configs \")\n\t}\n\n\terr = c.loadTLSCertPool(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load TLS Cert Pool\")\n\t}\n\n\tc.caKeyStorePath = pathvar.Subst(c.backend.GetString(\"client.credentialStore.cryptoStore.path\"))\n\tc.credentialStorePath = pathvar.Subst(c.backend.GetString(\"client.credentialStore.path\"))\n\n\treturn nil\n}", "func (me TxsdComponentTransferFunctionAttributesType) IsIdentity() bool {\n\treturn me.String() == \"identity\"\n}", "func AddIndependentPropertyGeneratorsForManagedClusterStorageProfileDiskCSIDriver(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func AddIndependentPropertyGeneratorsForManagedClusterOIDCIssuerProfile(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (e *basicEvent) SetIdentity(i string) {\n\te.Ident = i\n}", "func (cs *ClientStore) Set(id string, cli oauth2.Client) (err error) {\n\tcs.Lock()\n\tdefer cs.Unlock()\n\tcs.data[id] = cli\n\treturn\n}", "func (o GetServiceIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetServiceIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func idcList() []string {\n\tm, _ := idcRegion.Load().(map[string]string)\n\tidcList := make([]string, 0, len(m))\n\tfor dc, _ := range m {\n\t\tidcList = append(idcList, dc)\n\t}\n\treturn idcList\n}", "func (j *DSRocketchat) HasIdentities() bool {\n\treturn true\n}", "func (sID SemanticID) Is(identity string) bool {\n\tif sID.IsNil() {\n\t\treturn false\n\t}\n\n\treturn fmt.Sprintf(\"%s.%s\", sID.Namespace, sID.Collection) == identity\n}", "func (o AnalyzerIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AnalyzerIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func (s *Cluster) SetIdentity(v *Identity) *Cluster {\n\ts.Identity = v\n\treturn s\n}", "func (ctx *Context) InitializeStores() error {\n\tvar err error\n\tctx.stores = make(map[string]*CoreStores)\n\tfor _, store := range ctx.config.Stores {\n\t\t// Initialize primary in memory store\n\t\tlog.Printf(\"Initializing Primary InMemoryStore %s\\n\", store.Name)\n\t\tnewstore := &CoreStores{}\n\t\tvar localerr error\n\t\tif newstore.primary, localerr = createStore(\"InMemory\", \"\"); localerr != nil {\n\t\t\terr = localerr\n\t\t}\n\t\t// Initialize backup store if defined\n\t\tif len(store.Backup) > 0 {\n\t\t\tlog.Printf(\"Initializing Backup Store %s of type %s, backup directory %s\\n\", store.Name, store.Backup, store.Backupdir)\n\t\t\tvar localerr error\n\t\t\tif newstore.backup, localerr = createStore(store.Backup, store.Backupdir); localerr != nil {\n\t\t\t\terr = localerr\n\t\t\t} else {\n\t\t\t\t// Once initialized we need to restore the primary store from backup store\n\t\t\t\tjsStore, serr := core.SerializeStore(newstore.backup)\n\t\t\t\tif serr != nil {\n\t\t\t\t\terr = serr\n\t\t\t\t} else {\n\t\t\t\t\tif dserr := core.DeSerializeStore(newstore.primary, jsStore); dserr != nil {\n\t\t\t\t\t\terr = dserr\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(store.AggregateURLs) > 0 {\n\t\t\tnewstore.shutdown = make(chan bool)\n\t\t\tgo ctx.SyncAggregateURLs(newstore, store.AggregateURLs, time.Duration(store.SyncIntervalSec)*time.Second)\n\t\t}\n\t\tctx.stores[store.Name] = newstore\n\t}\n\treturn err\n}", "func AddIndependentPropertyGeneratorsForManagedClusterSecurityProfileWorkloadIdentity(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (engine *Engine) InitStore() {\n\tengine.storeIndexDocChans = make(\n\t\t[]chan storeIndexDocReq, engine.initOptions.StoreShards)\n\n\tfor shard := 0; shard < engine.initOptions.StoreShards; shard++ {\n\t\tengine.storeIndexDocChans[shard] = make(\n\t\t\tchan storeIndexDocReq)\n\t}\n\tengine.storeInitChan = make(\n\t\tchan bool, engine.initOptions.StoreShards)\n}", "func (b *ClusterBuilder) IdentityProviders(value *IdentityProviderListBuilder) *ClusterBuilder {\n\tb.identityProviders = value\n\tb.bitmap_ |= 8388608\n\treturn b\n}", "func (o *UserDisco) SetIdentity(v FullIdentity) {\n\to.Identity = &v\n}", "func (m *ServerContext) CAS() batch.CASClient {\n\treturn m.StoreClient\n}", "func Store(ctx context.Context, isClient bool, config *VFConfig) {\n\tmetadata.Map(ctx, isClient).Store(key{}, config)\n}", "func AddIndependentPropertyGeneratorsForManagedClusterStorageProfileBlobCSIDriver(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (m *IosDeviceFeaturesConfiguration) SetIdentityCertificateForClientAuthentication(value IosCertificateProfileBaseable)() {\n err := m.GetBackingStore().Set(\"identityCertificateForClientAuthentication\", value)\n if err != nil {\n panic(err)\n }\n}", "func (az *Cloud) HasClusterID() bool {\n\treturn true\n}", "func (backend *ESClient) InitializeStore(ctx context.Context) {\n\tlogrus.Info(\"Initialize elastic with mappings\")\n\tif !backend.initialized {\n\t\tfor _, esMap := range mappings.AllMappings {\n\t\t\tbackend.CreateTemplate(ctx, esMap.Index, esMap.Mapping)\n\t\t\tif !esMap.Timeseries {\n\t\t\t\tbackend.createStoreIfNotExists(ctx, esMap.Index, esMap.Mapping)\n\t\t\t\tbackend.createStoreAliasIfNotExists(ctx, esMap.Alias, esMap.Index)\n\t\t\t}\n\t\t}\n\t}\n\tbackend.initialized = true\n}", "func (c *cbConfigStore) ConfigurationStore() clustering.ConfigurationStore {\n\treturn c\n}", "func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error {\n\tif isIdentiyCid(c) {\n\t\tdata, err := decodeIdentityCid(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn cb(data)\n\t}\n\n\terr := s.hot.View(s.ctx, c, cb)\n\tif ipld.IsNotFound(err) {\n\t\treturn s.cold.View(s.ctx, c, cb)\n\t}\n\treturn err\n}", "func (c *CIDOffer) GetCIDs() []cid.ContentID {\n\treturn c.cids\n}", "func (cs *ClientStore) Set(info oauth2.ClientInfo) (err error) {\n\tcs.cHandler(cs.ccfg.ClientsCName, func(c *mongo.Collection) {\n\t\tentity := &client{\n\t\t\tID: info.GetID(),\n\t\t\tSecret: info.GetSecret(),\n\t\t\tDomain: info.GetDomain(),\n\t\t\tUserID: info.GetUserID(),\n\t\t}\n\n\t\tif _, cerr := c.InsertOne(context.TODO(), entity); cerr != nil {\n\t\t\terr = cerr\n\t\t\treturn\n\t\t}\n\t})\n\n\treturn\n}", "func (d *Dao) ArcMetas(c context.Context, aids []int64) (metas map[int64]*model.ArcCMS, err error) {\n\tmetas = make(map[int64]*model.ArcCMS)\n\trows, err := d.db.Query(c, fmt.Sprintf(_arcMetas, xstr.JoinInts(aids)))\n\tif err != nil {\n\t\tlog.Error(\"ArcMetas d.db.Query error(%v)\", err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tli := &model.ArcCMS{}\n\t\tif err = rows.Scan(&li.Title, &li.AID, &li.Content, &li.Cover, &li.TypeID, &li.Pubtime, &li.Videos, &li.Valid, &li.Deleted, &li.Result); err != nil {\n\t\t\tlog.Error(\"ArcMetas row.Scan error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tmetas[li.AID] = li\n\t}\n\treturn\n}", "func (o SparseAPIChecksList) Identity() elemental.Identity {\n\n\treturn APICheckIdentity\n}", "func (o SparseOAUTHKeysList) Identity() elemental.Identity {\n\n\treturn OAUTHKeyIdentity\n}", "func (s *PollForDecisionTaskInput) SetIdentity(v string) *PollForDecisionTaskInput {\n\ts.Identity = &v\n\treturn s\n}", "func (o *SparseAPICheck) Identity() elemental.Identity {\n\n\treturn APICheckIdentity\n}", "func GetStoreIdentKey() []byte {\n\treturn storeIdentKey\n}", "func (o FluxConfigurationBlobStorageOutput) ManagedIdentity() FluxConfigurationBlobStorageManagedIdentityPtrOutput {\n\treturn o.ApplyT(func(v FluxConfigurationBlobStorage) *FluxConfigurationBlobStorageManagedIdentity {\n\t\treturn v.ManagedIdentity\n\t}).(FluxConfigurationBlobStorageManagedIdentityPtrOutput)\n}", "func (o SparseEnforcerReportsList) Identity() elemental.Identity {\n\n\treturn EnforcerReportIdentity\n}", "func (cloud *Cloud) HasClusterID() bool {\n\treturn false\n}", "func (m *ServicePrincipalRiskDetection) SetKeyIds(value []string)() {\n err := m.GetBackingStore().Set(\"keyIds\", value)\n if err != nil {\n panic(err)\n }\n}", "func (o SparseClaimsList) Append(objects ...elemental.Identifiable) elemental.Identifiables {\n\n\tout := append(SparseClaimsList{}, o...)\n\tfor _, obj := range objects {\n\t\tout = append(out, obj.(*SparseClaims))\n\t}\n\n\treturn out\n}", "func MakeIdentityFile(filePath string, key *Key, format IdentityFileFormat, certAuthorities []services.CertAuthority) (err error) {\n\tconst (\n\t\t// the files and the dir will be created with these permissions:\n\t\tfileMode = 0600\n\t\tdirMode = 0700\n\t)\n\n\tif filePath == \"\" {\n\t\treturn trace.BadParameter(\"identity location is not specified\")\n\t}\n\n\tvar output io.Writer = os.Stdout\n\tswitch format {\n\t// dump user identity into a single file:\n\tcase IdentityFormatFile:\n\t\tf, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\toutput = f\n\t\tdefer f.Close()\n\n\t\t// write key:\n\t\tif _, err = output.Write(key.Priv); err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\t// append ssh cert:\n\t\tif _, err = output.Write(key.Cert); err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\t// append tls cert:\n\t\tif _, err = output.Write(key.TLSCert); err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\t// append trusted host certificate authorities\n\t\tfor _, ca := range certAuthorities {\n\t\t\t// append ssh ca certificates\n\t\t\tfor _, publicKey := range ca.GetCheckingKeys() {\n\t\t\t\tdata, err := sshutils.MarshalAuthorizedHostsFormat(ca.GetClusterName(), publicKey, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t\tif _, err = output.Write([]byte(data)); err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t\tif _, err = output.Write([]byte(\"\\n\")); err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// append tls ca certificates\n\t\t\tfor _, keyPair := range ca.GetTLSKeyPairs() {\n\t\t\t\tif _, err = output.Write(keyPair.Cert); err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t// dump user identity into separate files:\n\tcase IdentityFormatOpenSSH:\n\t\tkeyPath := filePath\n\t\tcertPath := keyPath + \"-cert.pub\"\n\n\t\terr = ioutil.WriteFile(certPath, key.Cert, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\n\t\terr = ioutil.WriteFile(keyPath, key.Priv, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\n\tcase IdentityFormatTLS:\n\t\tkeyPath := filePath + \".key\"\n\t\tcertPath := filePath + \".crt\"\n\t\tcasPath := filePath + \".cas\"\n\n\t\terr = ioutil.WriteFile(certPath, key.TLSCert, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\n\t\terr = ioutil.WriteFile(keyPath, key.Priv, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\tvar caCerts []byte\n\t\tfor _, ca := range certAuthorities {\n\t\t\tfor _, keyPair := range ca.GetTLSKeyPairs() {\n\t\t\t\tcaCerts = append(caCerts, keyPair.Cert...)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(casPath, caCerts, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\tdefault:\n\t\treturn trace.BadParameter(\"unsupported identity format: %q, use one of %q, %q, or %q\",\n\t\t\tformat, IdentityFormatFile, IdentityFormatOpenSSH, IdentityFormatTLS)\n\t}\n\treturn nil\n}", "func Ocis(cfg *config.Config) map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"metadata_backend\": cfg.Drivers.OCIS.MetadataBackend,\n\t\t\"propagator\": cfg.Drivers.OCIS.Propagator,\n\t\t\"async_propagator_options\": map[string]interface{}{\n\t\t\t\"propagation_delay\": cfg.Drivers.OCIS.AsyncPropagatorOptions.PropagationDelay,\n\t\t},\n\t\t\"root\": cfg.Drivers.OCIS.Root,\n\t\t\"user_layout\": cfg.Drivers.OCIS.UserLayout,\n\t\t\"share_folder\": cfg.Drivers.OCIS.ShareFolder,\n\t\t\"personalspacealias_template\": cfg.Drivers.OCIS.PersonalSpaceAliasTemplate,\n\t\t\"generalspacealias_template\": cfg.Drivers.OCIS.GeneralSpaceAliasTemplate,\n\t\t\"treetime_accounting\": true,\n\t\t\"treesize_accounting\": true,\n\t\t\"permissionssvc\": cfg.Drivers.OCIS.PermissionsEndpoint,\n\t\t\"permissionssvc_tls_mode\": cfg.Commons.GRPCClientTLS.Mode,\n\t\t\"max_acquire_lock_cycles\": cfg.Drivers.OCIS.MaxAcquireLockCycles,\n\t\t\"lock_cycle_duration_factor\": cfg.Drivers.OCIS.LockCycleDurationFactor,\n\t\t\"max_concurrency\": cfg.Drivers.OCIS.MaxConcurrency,\n\t\t\"asyncfileuploads\": cfg.Drivers.OCIS.AsyncUploads,\n\t\t\"max_quota\": cfg.Drivers.OCIS.MaxQuota,\n\t\t\"statcache\": map[string]interface{}{\n\t\t\t\"cache_store\": cfg.StatCache.Store,\n\t\t\t\"cache_nodes\": cfg.StatCache.Nodes,\n\t\t\t\"cache_database\": cfg.StatCache.Database,\n\t\t\t\"cache_ttl\": cfg.StatCache.TTL / time.Second,\n\t\t\t\"cache_size\": cfg.StatCache.Size,\n\t\t},\n\t\t\"filemetadatacache\": map[string]interface{}{\n\t\t\t\"cache_store\": cfg.FilemetadataCache.Store,\n\t\t\t\"cache_nodes\": cfg.FilemetadataCache.Nodes,\n\t\t\t\"cache_database\": cfg.FilemetadataCache.Database,\n\t\t\t\"cache_ttl\": cfg.FilemetadataCache.TTL / time.Second,\n\t\t\t\"cache_size\": cfg.FilemetadataCache.Size,\n\t\t},\n\t\t\"idcache\": map[string]interface{}{\n\t\t\t\"cache_store\": cfg.IDCache.Store,\n\t\t\t\"cache_nodes\": cfg.IDCache.Nodes,\n\t\t\t\"cache_database\": cfg.IDCache.Database,\n\t\t\t\"cache_ttl\": cfg.IDCache.TTL / time.Second,\n\t\t\t\"cache_size\": cfg.IDCache.Size,\n\t\t},\n\t\t\"events\": map[string]interface{}{\n\t\t\t\"natsaddress\": cfg.Events.Addr,\n\t\t\t\"natsclusterid\": cfg.Events.ClusterID,\n\t\t\t\"tlsinsecure\": cfg.Events.TLSInsecure,\n\t\t\t\"tlsrootcacertificate\": cfg.Events.TLSRootCaCertPath,\n\t\t\t\"numconsumers\": cfg.Events.NumConsumers,\n\t\t},\n\t\t\"tokens\": map[string]interface{}{\n\t\t\t\"transfer_shared_secret\": cfg.Commons.TransferSecret,\n\t\t\t\"transfer_expires\": cfg.TransferExpires,\n\t\t\t\"download_endpoint\": cfg.DataServerURL,\n\t\t\t\"datagateway_endpoint\": cfg.DataGatewayURL,\n\t\t},\n\t}\n}", "func (m *OnAuthenticationMethodLoadStartExternalUsersSelfServiceSignUp) SetIdentityProviders(value []IdentityProviderBaseable)() {\n err := m.GetBackingStore().Set(\"identityProviders\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *Cluster) Store() error {\n\treturn c.PersistStore.Store(*c)\n}", "func IpfsClientBlockstore(ipfsMaddr string, onlineMode bool) func(helpers.MetricsCtx, fx.Lifecycle, dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) {\n\treturn func(mctx helpers.MetricsCtx, lc fx.Lifecycle, localStore dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) {\n\t\tvar err error\n\t\tvar ipfsbs blockstore.BasicBlockstore\n\t\tif ipfsMaddr != \"\" {\n\t\t\tvar ma multiaddr.Multiaddr\n\t\t\tma, err = multiaddr.NewMultiaddr(ipfsMaddr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, xerrors.Errorf(\"parsing ipfs multiaddr: %w\", err)\n\t\t\t}\n\t\t\tipfsbs, err = blockstore.NewRemoteIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), ma, onlineMode)\n\t\t} else {\n\t\t\tipfsbs, err = blockstore.NewLocalIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), onlineMode)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, xerrors.Errorf(\"constructing ipfs blockstore: %w\", err)\n\t\t}\n\t\treturn blockstore.WrapIDStore(ipfsbs), nil\n\t}\n}", "func (o *SparseOAUTHKey) Identity() elemental.Identity {\n\n\treturn OAUTHKeyIdentity\n}", "func (s *Store) Set(entity workloadmeta.Entity) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tentityID := entity.GetID()\n\n\tif _, ok := s.store[entityID.Kind]; !ok {\n\t\ts.store[entityID.Kind] = make(map[string]workloadmeta.Entity)\n\t}\n\n\ts.store[entityID.Kind][entityID.ID] = entity\n}", "func (o *SparseEnforcerReport) Identity() elemental.Identity {\n\n\treturn EnforcerReportIdentity\n}", "func (m *HeavySyncMock) StoreIndicesMinimockCounter() uint64 {\n\treturn atomic.LoadUint64(&m.StoreIndicesCounter)\n}", "func (m *User) GetIdentities()([]ObjectIdentityable) {\n return m.identities\n}", "func (mc *Chain) SaveClients(ctx context.Context, clients []*client.Client) error {\n\tvar err error\n\tclientKeys := make([]datastore.Key, len(clients))\n\tfor idx, c := range clients {\n\t\tclientKeys[idx] = c.GetKey()\n\t}\n\tclientEntityMetadata := datastore.GetEntityMetadata(\"client\")\n\tcEntities := datastore.AllocateEntities(len(clients), clientEntityMetadata)\n\tctx = memorystore.WithEntityConnection(common.GetRootContext(), clientEntityMetadata)\n\tdefer memorystore.Close(ctx)\n\terr = clientEntityMetadata.GetStore().MultiRead(ctx, clientEntityMetadata, clientKeys, cEntities)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx = datastore.WithAsyncChannel(ctx, client.ClientEntityChannel)\n\tfor idx, c := range clients {\n\t\tif !datastore.IsEmpty(cEntities[idx].GetKey()) {\n\t\t\tcontinue\n\t\t}\n\t\t_, cerr := client.PutClient(ctx, c)\n\t\tif cerr != nil {\n\t\t\terr = cerr\n\t\t}\n\t}\n\treturn err\n}", "func (j *DSGit) HasIdentities() bool {\n\treturn true\n}", "func (o KubernetesClusterIdentityPtrOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterIdentity) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.IdentityIds\n\t}).(pulumi.StringArrayOutput)\n}", "func (c *Cluster) StartStore(storeID uint64) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tif store := c.stores[storeID]; store != nil {\n\t\tstore.meta.State = metapb.StoreState_Up\n\t}\n}", "func (o AnalyzerIdentityPtrOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *AnalyzerIdentity) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.IdentityIds\n\t}).(pulumi.StringArrayOutput)\n}", "func (o *SparseDependencyMap) Identity() elemental.Identity {\n\n\treturn DependencyMapIdentity\n}", "func ManagedClusterIdentityGenerator() gopter.Gen {\n\tif managedClusterIdentityGenerator != nil {\n\t\treturn managedClusterIdentityGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForManagedClusterIdentity(generators)\n\tmanagedClusterIdentityGenerator = gen.Struct(reflect.TypeOf(ManagedClusterIdentity{}), generators)\n\n\t// The above call to gen.Struct() captures the map, so create a new one\n\tgenerators = make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForManagedClusterIdentity(generators)\n\tAddRelatedPropertyGeneratorsForManagedClusterIdentity(generators)\n\tmanagedClusterIdentityGenerator = gen.Struct(reflect.TypeOf(ManagedClusterIdentity{}), generators)\n\n\treturn managedClusterIdentityGenerator\n}", "func WithIDs(ids IDs) ContextOption {\n\treturn func(c *ContextConfig) {\n\t\tc.IDs = ids\n\t}\n}", "func (this *cbCluster) ConfigurationStoreId() string {\n\treturn this.configStore.Id()\n}", "func NewStore(\n\tcfg Config,\n\tstoreCfg chunk.StoreConfig,\n\tschemaCfg chunk.SchemaConfig,\n\tlimits StoreLimits,\n\treg prometheus.Registerer,\n\tcacheGenNumLoader chunk.CacheGenNumLoader,\n\tlogger log.Logger,\n) (chunk.Store, error) {\n\tchunkMetrics := newChunkClientMetrics(reg)\n\n\tindexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig, reg, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twriteDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig, reg, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchunkCacheCfg := storeCfg.ChunkCacheConfig\n\tchunkCacheCfg.Prefix = \"chunks\"\n\tchunksCache, err := cache.New(chunkCacheCfg, reg, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Cache is shared by multiple stores, which means they will try and Stop\n\t// it more than once. Wrap in a StopOnce to prevent this.\n\tindexReadCache = cache.StopOnce(indexReadCache)\n\tchunksCache = cache.StopOnce(chunksCache)\n\twriteDedupeCache = cache.StopOnce(writeDedupeCache)\n\n\t// Lets wrap all caches except chunksCache with CacheGenMiddleware to facilitate cache invalidation using cache generation numbers.\n\t// chunksCache is not wrapped because chunks content can't be anyways modified without changing its ID so there is no use of\n\t// invalidating chunks cache. Also chunks can be fetched only by their ID found in index and we are anyways removing the index and invalidating index cache here.\n\tindexReadCache = cache.NewCacheGenNumMiddleware(indexReadCache)\n\twriteDedupeCache = cache.NewCacheGenNumMiddleware(writeDedupeCache)\n\n\terr = schemaCfg.Load()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error loading schema config\")\n\t}\n\tstores := chunk.NewCompositeStore(cacheGenNumLoader)\n\n\tfor _, s := range schemaCfg.Configs {\n\t\tindexClientReg := prometheus.WrapRegistererWith(\n\t\t\tprometheus.Labels{\"component\": \"index-store-\" + s.From.String()}, reg)\n\n\t\tindex, err := NewIndexClient(s.IndexType, cfg, schemaCfg, indexClientReg)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error creating index client\")\n\t\t}\n\t\tindex = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits, logger)\n\n\t\tobjectStoreType := s.ObjectType\n\t\tif objectStoreType == \"\" {\n\t\t\tobjectStoreType = s.IndexType\n\t\t}\n\n\t\tchunkClientReg := prometheus.WrapRegistererWith(\n\t\t\tprometheus.Labels{\"component\": \"chunk-store-\" + s.From.String()}, reg)\n\n\t\tchunks, err := NewChunkClient(objectStoreType, cfg, schemaCfg, chunkClientReg)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error creating object client\")\n\t\t}\n\n\t\tchunks = newMetricsChunkClient(chunks, chunkMetrics)\n\n\t\terr = stores.AddPeriod(storeCfg, s, index, chunks, limits, chunksCache, writeDedupeCache)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn stores, nil\n}", "func (us *ClusterStore) GetAll() ([]model.Cluster, error) {\n\tvar cs []model.Cluster\n\tif err := us.db.Preload(clause.Associations).Find(&cs).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn cs, nil\n}", "func (o ServiceIdentityPtrOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *ServiceIdentity) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.IdentityIds\n\t}).(pulumi.StringArrayOutput)\n}", "func (v *version) AWSCognitoIdentitySources() AWSCognitoIdentitySourceInformer {\n\treturn &aWSCognitoIdentitySourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}", "func (v *version) AWSCognitoIdentitySources() AWSCognitoIdentitySourceInformer {\n\treturn &aWSCognitoIdentitySourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}", "func (o SyncAuthorizationOutput) Identities() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *SyncAuthorization) pulumi.StringArrayOutput { return v.Identities }).(pulumi.StringArrayOutput)\n}", "func (m *MacOSEnterpriseWiFiConfiguration) SetIdentityCertificateForClientAuthentication(value MacOSCertificateProfileBaseable)() {\n err := m.GetBackingStore().Set(\"identityCertificateForClientAuthentication\", value)\n if err != nil {\n panic(err)\n }\n}", "func NewAtomixStore(client atomix.Client) (Store, error) {\n\tconfigurations, err := client.GetMap(context.Background(), \"onos-config-configurations\")\n\tif err != nil {\n\t\treturn nil, errors.FromAtomix(err)\n\t}\n\treturn &configurationStore{\n\t\tconfigurations: configurations,\n\t}, nil\n}", "func (o *SecurityProblem) HasCveIds() bool {\n\tif o != nil && o.CveIds != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Store) KVSSetCAS(idx uint64, entry *structs.DirEntry) (bool, error) {\n\ttx := s.db.Txn(true)\n\tdefer tx.Abort()\n\n\tset, err := s.kvsSetCASTxn(tx, idx, entry)\n\tif !set || err != nil {\n\t\treturn false, err\n\t}\n\n\ttx.Commit()\n\treturn true, nil\n}", "func (c *cloud) HasClusterID() bool {\n\treturn true\n}", "func TestIndex_SeriesIDSet(t *testing.T) {\n\tengine := MustOpenEngine()\n\tdefer engine.Close()\n\n\t// Add some series.\n\tengine.MustAddSeries(\"cpu\", map[string]string{\"host\": \"a\", \"region\": \"west\"})\n\tengine.MustAddSeries(\"cpu\", map[string]string{\"host\": \"b\", \"region\": \"west\"})\n\tengine.MustAddSeries(\"cpu\", map[string]string{\"host\": \"b\"})\n\tengine.MustAddSeries(\"gpu\", nil)\n\tengine.MustAddSeries(\"gpu\", map[string]string{\"host\": \"b\"})\n\tengine.MustAddSeries(\"mem\", map[string]string{\"host\": \"z\"})\n\n\t// Collect series IDs.\n\tseriesIDMap := map[string]tsdb.SeriesID{}\n\tvar e tsdb.SeriesIDElem\n\tvar err error\n\n\titr := engine.sfile.SeriesIDIterator()\n\tfor e, err = itr.Next(); ; e, err = itr.Next() {\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if e.SeriesID.IsZero() {\n\t\t\tbreak\n\t\t}\n\n\t\tname, tags := tsdb.ParseSeriesKey(engine.sfile.SeriesKey(e.SeriesID))\n\t\tkey := fmt.Sprintf(\"%s%s\", name, tags.HashKey())\n\t\tseriesIDMap[key] = e.SeriesID\n\t}\n\n\tfor _, id := range seriesIDMap {\n\t\tif !engine.SeriesIDSet().Contains(id) {\n\t\t\tt.Fatalf(\"bitmap does not contain ID: %d\", id)\n\t\t}\n\t}\n\n\t// Drop all the series for the gpu measurement and they should no longer\n\t// be in the series ID set.\n\tif err := engine.DeleteMeasurement([]byte(\"gpu\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif engine.SeriesIDSet().Contains(seriesIDMap[\"gpu\"]) {\n\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", seriesIDMap[\"gpu\"], \"gpu\")\n\t} else if engine.SeriesIDSet().Contains(seriesIDMap[\"gpu,host=b\"]) {\n\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", seriesIDMap[\"gpu,host=b\"], \"gpu,host=b\")\n\t}\n\tdelete(seriesIDMap, \"gpu\")\n\tdelete(seriesIDMap, \"gpu,host=b\")\n\n\t// Drop the specific mem series\n\tditr := &seriesIterator{keys: [][]byte{[]byte(\"mem,host=z\")}}\n\tif err := engine.DeleteSeriesRange(ditr, math.MinInt64, math.MaxInt64); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif engine.SeriesIDSet().Contains(seriesIDMap[\"mem,host=z\"]) {\n\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", seriesIDMap[\"mem,host=z\"], \"mem,host=z\")\n\t}\n\tdelete(seriesIDMap, \"mem,host=z\")\n\n\t// The rest of the keys should still be in the set.\n\tfor key, id := range seriesIDMap {\n\t\tif !engine.SeriesIDSet().Contains(id) {\n\t\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", id, key)\n\t\t}\n\t}\n\n\t// Reopen the engine, and the series should be re-added to the bitmap.\n\tif err := engine.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check bitset is expected.\n\texpected := tsdb.NewSeriesIDSet()\n\tfor _, id := range seriesIDMap {\n\t\texpected.Add(id)\n\t}\n\n\tif !engine.SeriesIDSet().Equals(expected) {\n\t\tt.Fatalf(\"got bitset %s, expected %s\", engine.SeriesIDSet().String(), expected.String())\n\t}\n}" ]
[ "0.5328413", "0.48285127", "0.46915764", "0.460706", "0.45859408", "0.45038688", "0.4317314", "0.43106538", "0.4235155", "0.42342368", "0.42137128", "0.41937977", "0.41885242", "0.41864616", "0.41690555", "0.4165227", "0.41583925", "0.41418397", "0.41260913", "0.41253287", "0.41217098", "0.41095707", "0.41057232", "0.40949655", "0.40918368", "0.40892765", "0.40840143", "0.40777856", "0.40609792", "0.40491024", "0.40390414", "0.40334505", "0.40270102", "0.4021322", "0.40086365", "0.4008617", "0.40041596", "0.4004142", "0.3996454", "0.39925405", "0.3988772", "0.39771202", "0.39658627", "0.39635772", "0.39557546", "0.39268655", "0.39151704", "0.3909075", "0.390739", "0.390492", "0.39017266", "0.38969705", "0.3890941", "0.38896367", "0.3881301", "0.3871456", "0.38688153", "0.38638106", "0.38625348", "0.38542196", "0.38471228", "0.3845318", "0.38369405", "0.38313797", "0.38254425", "0.38252723", "0.38212258", "0.38183287", "0.38118747", "0.3809889", "0.38080397", "0.38061434", "0.38042086", "0.38032156", "0.37942684", "0.37903547", "0.37838894", "0.3778488", "0.3778108", "0.37744382", "0.37701714", "0.3768992", "0.3767646", "0.37670982", "0.3765687", "0.37606084", "0.37596768", "0.3755687", "0.37511492", "0.3745524", "0.37369764", "0.37364498", "0.37364498", "0.37346548", "0.37331414", "0.3730449", "0.37300745", "0.37297153", "0.37297025", "0.37290096" ]
0.7659749
0
MaxIndexCidSize specifies the maximum allowed size for indexed CIDs in bytes. Indexing a CID with larger than the allowed size results in ErrCidTooLarge error.
func MaxIndexCidSize(s uint64) Option { return func(o *Options) { o.MaxIndexCidSize = s } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Cache) MaxSize() (maxSize int64) {\n\tfor _, shard := range c.shards {\n\t\tmaxSize += shard.maxSize\n\t}\n\treturn int64(bytesToMB(int(maxSize)))\n}", "func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {\n\tstat, err := os.Stat(p.indexFilenameByMessageId(fileId))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tentriesInIndex := uint64(stat.Size() / int64(INDEX_ENTRY_SIZE))\n\n\treturn (entriesInIndex - 1 + fileId), nil\n}", "func MaxValSize(max int) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setMaxValSize(max)\n\t}\n}", "func (o ClusterNodeGroupOptionsOutput) MaxSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ClusterNodeGroupOptions) *int { return v.MaxSize }).(pulumi.IntPtrOutput)\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func (r *Redis) MaxSize() int64 {\n\treturn r.maxSize\n}", "func GetMaxIndexKey(shardID uint64, key []byte) []byte {\n\tkey = getKeySlice(key, idKeyLength)\n\treturn getIDKey(maxIndexSuffix, shardID, key)\n}", "func (ch *clientSecureChannel) MaxMessageSize() uint32 {\n\treturn ch.maxMessageSize\n}", "func (cc *ContinueCompress) MaxMessageSize() int {\n\treturn cc.maxMessageSize\n}", "func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func MaxBufferSize(size int) Options {\n\treturn func(c *config) {\n\t\tc.maxBufferSize = size\n\t}\n}", "func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}", "func (c PktCnf1) MaxLen() int {\n\treturn int(c & 0xff)\n}", "func (o ClusterNodeGroupOptionsPtrOutput) MaxSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ClusterNodeGroupOptions) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.MaxSize\n\t}).(pulumi.IntPtrOutput)\n}", "func GetMaxIndexes() int {\r\n\treturn converter.StrToInt(SysString(MaxIndexes))\r\n}", "func (c *Cache) SizeMaxBytes() int {\n\tn := 0\n\tfor _, shard := range c.shards {\n\t\tn += shard.SizeMaxBytes()\n\t}\n\treturn n\n}", "func RaggedCountSparseOutputMaxlength(value int64) RaggedCountSparseOutputAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"maxlength\"] = value\n\t}\n}", "func (group *NodeGroup) MaxSize() int {\n\tdefer group.lk.Unlock()\n\tgroup.lk.Lock()\n\treturn group.maxSize\n}", "func GetMaxBlockSize() int64 {\r\n\treturn converter.StrToInt64(SysString(MaxBlockSize))\r\n}", "func (ch *clientSecureChannel) MaxChunkCount() uint32 {\n\treturn ch.maxChunkCount\n}", "func SparseCountSparseOutputMaxlength(value int64) SparseCountSparseOutputAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"maxlength\"] = value\n\t}\n}", "func MaxLen(n int) PktCnf1 {\n\treturn PktCnf1(n & 0xff)\n}", "func (builder *Builder) MaxSizeInKb(maxSizeInKb uint64) *Builder {\n\tbuilder.maxSizeInKb = maxSizeInKb\n\treturn builder\n}", "func (o *VolumeInfinitevolAttributesType) SetMaxNamespaceConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType {\n\to.MaxNamespaceConstituentSizePtr = &newValue\n\treturn o\n}", "func getMaxSize() int {\n\tMaxInt := 1 << 31\n\tcount := 0\n\tfor MaxInt > 0 {\n\t\tMaxInt /= 10\n\t\tcount++\n\t}\n\treturn count\n}", "func MaxAllowedHeaderSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedHeaderSize = max\n\t}\n}", "func (w *Whisper) MaxMessageSize() uint32 {\n\tval, _ := w.settings.Load(maxMsgSizeIdx)\n\treturn val.(uint32)\n}", "func (c *Config) MaxSize(stream string) (uint, error) {\n\tkey, err := keyName(stream, \"maxsize\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.v.GetSizeInBytes(key), nil\n}", "func (m Logon) SetMaxMessageSize(v int) {\n\tm.Set(field.NewMaxMessageSize(v))\n}", "func (context *context) SetMaxSegmentLength(n uint) {\n\tcontext.params.SetMaxSegmentLength(int(n))\n}", "func (c Conn) MaxLength() int {\n\treturn MaxLength\n}", "func (o *VolumeInfinitevolAttributesType) SetMaxDataConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType {\n\to.MaxDataConstituentSizePtr = &newValue\n\treturn o\n}", "func NamespaceIDSize(size int) Option {\n\tif size < 0 || size > namespace.IDMaxSize {\n\t\tpanic(\"Got invalid namespace.IDSize. Expected 0 <= size <= namespace.IDMaxSize.\")\n\t}\n\treturn func(opts *Options) {\n\t\topts.NamespaceIDSize = namespace.IDSize(size)\n\t}\n}", "func NewMaxMessageSize(val int) MaxMessageSizeField {\n\treturn MaxMessageSizeField{quickfix.FIXInt(val)}\n}", "func (c ClientProperties) MaxMsgLength() uint32 {\n\treturn binary.LittleEndian.Uint32(c[:4])\n}", "func (asg *Asg) MaxSize() int {\n\treturn asg.maxSize\n}", "func MaxSize32(length int) int {\n\tnumControlBytes := (length + 3) / 4\n\tmaxNumDataBytes := 4 * length\n\treturn numControlBytes + maxNumDataBytes\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func (d *Decoder) SetMaxArraySize(size uint) {\n\td.maxArraySize = int(size)\n}", "func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Search {\n\tr.values.Set(\"max_concurrent_shard_requests\", maxconcurrentshardrequests)\n\n\treturn r\n}", "func (o *VolumeInfinitevolAttributesType) MaxNamespaceConstituentSize() SizeType {\n\tvar r SizeType\n\tif o.MaxNamespaceConstituentSizePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.MaxNamespaceConstituentSizePtr\n\treturn r\n}", "func (indexer Indexer) GetIndexSize() int {\n\tindex := *indexer.GetIndex()\n\tsize := 8 * len(index)\n\tfor _, postings := range index {\n\t\tsize += 16 * len(postings)\n\t}\n\tkb := int(math.Pow(2, 10))\n\tsize = int(size / kb)\n\treturn size\n}", "func DenseCountSparseOutputMaxlength(value int64) DenseCountSparseOutputAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"maxlength\"] = value\n\t}\n}", "func (query *ContractCallQuery) SetMaxResultSize(size uint64) *ContractCallQuery {\n\tquery.pb.MaxResultSize = int64(size)\n\treturn query\n}", "func MaxMsgSize(s int) server.Option {\n\treturn server.SetOption(maxMsgSizeKey{}, s)\n}", "func (ng *NodeGroup) MaxSize() int {\n\treturn int(ng.MaxNodes)\n}", "func MaxKeys(max int) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setMaxKeys(max)\n\t}\n}", "func (s *FilesystemStore) MaxLength(l int) {\n\tfor _, c := range s.Codecs {\n\t\tif codec, ok := c.(*securecookie.SecureCookie); ok {\n\t\t\tcodec.MaxLength(l)\n\t\t}\n\t}\n}", "func MaxAllowedSectionSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedSectionSize = max\n\t}\n}", "func (f *AnalyzerFingerprint) MaxOutputSize(maxOutputSize int) *AnalyzerFingerprint {\n\tf.maxOutputSize = &maxOutputSize\n\treturn f\n}", "func (e *BaseExecutor) SetMaxChunkSize(size int) {\n\te.maxChunkSize = size\n}", "func AthensMaxConcurrency() int {\n\tdefaultMaxConcurrency := runtime.NumCPU()\n\tmaxConcurrencyEnv, err := envy.MustGet(\"ATHENS_MAX_CONCURRENCY\")\n\tif err != nil {\n\t\treturn defaultMaxConcurrency\n\t}\n\n\tmc, err := strconv.Atoi(maxConcurrencyEnv)\n\tif err != nil {\n\t\treturn defaultMaxConcurrency\n\t}\n\n\treturn mc\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *cacheImpl) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func MaxMessageSize(size int64) Option {\n\tif size < 0 {\n\t\tpanic(\"size must be non-negative\")\n\t}\n\treturn func(ws *websocket) {\n\t\tws.options.maxMessageSize = size\n\t}\n}", "func (o *VolumeInfinitevolAttributesType) MaxDataConstituentSize() SizeType {\n\tvar r SizeType\n\tif o.MaxDataConstituentSizePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.MaxDataConstituentSizePtr\n\treturn r\n}", "func FixMaxEntryIndex(rdb *Store, profile *pb.Profile) error {\n\tuuid1, err := uuid.FromString(profile.Uuid)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// MAX Delimiter Key\n\tkey := MaxUUIDFlakeKey(TableEntryIndex, uuid1)\n\treturn rdb.Put(key.Bytes(), []byte(\"0000\"))\n}", "func getMaxID() int {\n\n\tif len(cdb.classMap) != 0 {\n\t\tkeys := make([]int, 0, len(cdb.classMap))\n\t\tfor k := range cdb.classMap {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Ints(keys)\n\t\treturn keys[len(keys)-1]\n\t}\n\n\treturn -1\n\n}", "func AllocateIndexID(tblInfo *model.TableInfo) int64 {\n\ttblInfo.MaxIndexID++\n\treturn tblInfo.MaxIndexID\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func SectorDealsMax(size abi.SectorSize) uint64 {\n\treturn max64(256, uint64(size/DealLimitDenominator))\n}", "func (o StorageClusterSpecCloudStorageCapacitySpecsOutput) MaxCapacityInGiB() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v StorageClusterSpecCloudStorageCapacitySpecs) *int { return v.MaxCapacityInGiB }).(pulumi.IntPtrOutput)\n}", "func (e SszNetworkEncoder) GetMaxChunkSize() uint64 {\n\treturn MaxChunkSize\n}", "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}", "func MaxCacheEntries(n int) CacheOption { return maxEntriesOption(n) }", "func (opts *FIFOCompactionOptions) GetMaxTableFilesSize() uint64 {\n\treturn uint64(C.rocksdb_fifo_compaction_options_get_max_table_files_size(opts.c))\n}", "func MaxCallRecvMsgSize(v int) Configer {\n\treturn func(c *clientv3.Config) {\n\t\tc.MaxCallRecvMsgSize = v\n\t}\n}", "func MaxSizeBatchOption(size int) BatchOption {\n\treturn func(o *batchOptions) {\n\t\to.maxSize = size\n\t}\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func (m *MessageReplies) SetMaxID(value int) {\n\tm.Flags.Set(2)\n\tm.MaxID = value\n}", "func (iob *IndexOptionsBuilder) Max(max float64) *IndexOptionsBuilder {\n\tiob.document = append(iob.document, bson.E{\"max\", max})\n\treturn iob\n}", "func MaxRequestMaxBytes(max int) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestMaxBytes = max\n\t\treturn nil\n\t}\n}", "func CfgSessionIdLength(length int64) ManagerConfigOpt {\n\treturn func(config *ManagerConfig) {\n\t\tconfig.SessionIDLength = length\n\t}\n}", "func IndexLimits(ctx context.Context, data id.ID, count int, size int, littleEndian bool) (*IndexRange, error) {\n\tobj, err := database.Build(ctx, &IndexLimitsResolvable{\n\t\tIndexSize: uint64(size),\n\t\tCount: uint64(count),\n\t\tLittleEndian: littleEndian,\n\t\tData: path.NewBlob(data),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*IndexRange), nil\n}", "func (b *IndexBuilder) ContentSize() uint32 {\n\t// Add the name too so we don't skip building index if we have\n\t// lots of empty files.\n\treturn b.contentEnd + b.nameEnd\n}", "func MaxRecvMsgSize(s int) client.Option {\n\treturn func(o *client.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, maxRecvMsgSizeKey{}, s)\n\t}\n}", "func (m Logon) GetMaxMessageSize() (v int, err quickfix.MessageRejectError) {\n\tvar f field.MaxMessageSizeField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func (o OceanOutput) MaxSize() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *Ocean) pulumi.IntOutput { return v.MaxSize }).(pulumi.IntOutput)\n}", "func (u *UserStories) SetMaxReadID(value int) {\n\tu.Flags.Set(0)\n\tu.MaxReadID = value\n}", "func MaxMsgSize(n int) Option {\n\treturn func(o *Options) {\n\t\to.MaxMsgSize = n\n\t}\n}", "func idIndex() mgo.Index {\n\treturn mgo.Index{\n\t\tKey: []string{\"id\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n}", "func (_SmartTgStats *SmartTgStatsCaller) MaxRequestID(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _SmartTgStats.contract.Call(opts, out, \"maxRequestID\")\n\treturn *ret0, err\n}", "func (mim *metricIDMapping) GetMaxSeriesIDsLimit() uint32 {\n\treturn mim.maxSeriesIDsLimit.Load()\n}", "func (o ClusterScalingConfigurationOutput) MaxCapacity() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ClusterScalingConfiguration) *int { return v.MaxCapacity }).(pulumi.IntPtrOutput)\n}", "func MaxConcurrency(n int) ParallelOption {\n\treturn func(p *ParallelConfig) *ParallelConfig {\n\t\tp.maxConcurrency = n\n\t\treturn p\n\t}\n}", "func MaxBlockLen(ct CompressionType) uint64 {\n\tif ct == Snappy {\n\t\t// https://github.com/golang/snappy/blob/2a8bb927dd31d8daada140a5d09578521ce5c36a/encode.go#L76\n\t\treturn 6 * (0xffffffff - 32) / 7\n\t}\n\treturn math.MaxUint64\n}", "func (gq *Dispatch) MaxLen() int {\n return gq.maxlength\n}", "func (e *Lint) MaxConcurrency() int {\n\tvar limit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\toldLimit := limit.Cur\n\tlimit.Cur = limit.Max\n\terr = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit)\n\tif err != nil {\n\t\tlog.Debug(err)\n\t\treturn convertLimit(oldLimit)\n\t}\n\n\treturn convertLimit(limit.Cur)\n}", "func (m *MailTips) GetMaxMessageSize()(*int32) {\n val, err := m.GetBackingStore().Get(\"maxMessageSize\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}", "func (c *gcsCore) getContainerIDFromIndex(index uint32) string {\n\tc.containerIndexMutex.Lock()\n\tdefer c.containerIndexMutex.Unlock()\n\n\tif int(index) < len(c.containerIndex) {\n\t\treturn c.containerIndex[index]\n\t}\n\n\treturn \"\"\n}", "func NewLabelSizesIndex(size, label uint64) dvid.IndexBytes {\n\tindex := make([]byte, 17)\n\tindex[0] = byte(KeyLabelSizes)\n\tbinary.BigEndian.PutUint64(index[1:9], size)\n\tbinary.BigEndian.PutUint64(index[9:17], label)\n\treturn dvid.IndexBytes(index)\n}", "func (_Contract *ContractSession) MaxOptions() (*big.Int, error) {\n\treturn _Contract.Contract.MaxOptions(&_Contract.CallOpts)\n}", "func (m *MailTips) SetMaxMessageSize(value *int32)() {\n err := m.GetBackingStore().Set(\"maxMessageSize\", value)\n if err != nil {\n panic(err)\n }\n}", "func (_SmartTgStats *SmartTgStatsSession) MaxRequestID() (*big.Int, error) {\n\treturn _SmartTgStats.Contract.MaxRequestID(&_SmartTgStats.CallOpts)\n}", "func (this AliasCodec) MaxEncodedLen(srcLen int) int {\n\treturn srcLen + 1024\n}", "func (c *ColumnMap) SetMaxSize(size int) *ColumnMap {\n\tc.MaxSize = size\n\treturn c\n}", "func (c *ColumnMap) SetMaxSize(size int) *ColumnMap {\n\tc.MaxSize = size\n\treturn c\n}", "func (o *KubernetesNodeGroupProfile) SetMaxsize(v int64) {\n\to.Maxsize = &v\n}", "func (s *Set) SetMaxLineSize(i int) {\n\ts.MaxLineSize = i\n}" ]
[ "0.54128367", "0.5239222", "0.5225754", "0.5207175", "0.517067", "0.5167994", "0.51654756", "0.514978", "0.5139812", "0.5123632", "0.51004505", "0.5087268", "0.50867826", "0.507669", "0.5049227", "0.50329137", "0.5025627", "0.5024108", "0.49574798", "0.49260557", "0.49127924", "0.4910644", "0.49033746", "0.48947075", "0.4872246", "0.48620787", "0.48423564", "0.48419514", "0.4841062", "0.4829615", "0.48268786", "0.47892594", "0.4788429", "0.47796482", "0.4775992", "0.47722614", "0.47634223", "0.4760452", "0.47583646", "0.47499043", "0.4729253", "0.4720951", "0.47206277", "0.47096175", "0.47038263", "0.46909288", "0.468887", "0.46855697", "0.4684044", "0.46717122", "0.46707758", "0.46693173", "0.46468368", "0.46363613", "0.46346977", "0.46225426", "0.46199554", "0.46146235", "0.46120372", "0.45987394", "0.45987394", "0.45959568", "0.4592378", "0.4588514", "0.4588392", "0.45825782", "0.45725843", "0.45687947", "0.45644632", "0.4564417", "0.45592505", "0.45562232", "0.45523256", "0.45332283", "0.45264214", "0.4516632", "0.4503763", "0.45023876", "0.44988713", "0.4493675", "0.4488355", "0.44858772", "0.44771847", "0.44752157", "0.44736332", "0.44629294", "0.44612032", "0.44571182", "0.44530383", "0.444591", "0.44380736", "0.4436111", "0.44211733", "0.44202057", "0.4418277", "0.44143802", "0.44136763", "0.44136763", "0.4413555", "0.43959624" ]
0.8566187
0
WithTraversalPrototypeChooser specifies the prototype chooser that should be used when performing traversals in writes from a linksystem.
func WithTraversalPrototypeChooser(t traversal.LinkTargetNodePrototypeChooser) Option { return func(o *Options) { o.TraversalPrototypeChooser = t } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewSocketsTraversalExtension() *SocketsTraversalExtension {\n\treturn &SocketsTraversalExtension{\n\t\tSocketsToken: traversalSocketsToken,\n\t}\n}", "func WithSortingByPathAscAndRevisionDesc() GetImplementationOption {\n\treturn func(options *ListImplementationRevisionsOptions) {\n\t\toptions.sortByPathAscAndRevisionDesc = true\n\t}\n}", "func (result *Result) WithGraphTraversal(graphTraversals []*GraphTraversal) *Result {\n\tresult.GraphTraversals = graphTraversals\n\treturn result\n}", "func (shim *QueryDirectClient) Traversal(ctx context.Context, in *GraphQuery, opts ...grpc.CallOption) (Query_TraversalClient, error) {\n md, _ := metadata.FromOutgoingContext(ctx)\n ictx := metadata.NewIncomingContext(ctx, md)\n\n\tw := &directQueryTraversal{ictx, make(chan *QueryResult, 100), in, nil}\n if shim.streamServerInt != nil {\n go func() {\n defer w.close()\n info := grpc.StreamServerInfo{\n FullMethod: \"/gripql.Query/Traversal\",\n IsServerStream: true,\n }\n w.e = shim.streamServerInt(shim.server, w, &info, _Query_Traversal_Handler)\n } ()\n return w, nil\n }\n\tgo func() {\n defer w.close()\n\t\tw.e = shim.server.Traversal(in, w)\n\t}()\n\treturn w, nil\n}", "func (_options *CreateConfigurationOptions) SetConfigurationPrototype(configurationPrototype ConfigurationPrototypeIntf) *CreateConfigurationOptions {\n\t_options.ConfigurationPrototype = configurationPrototype\n\treturn _options\n}", "func WithTransferCallback(callback base.TransferCallback) Option {\n\treturn func(node base.Node) {\n\t\tnode.SetTransferCallback(callback)\n\t}\n}", "func NewTraversal() (g String) {\n\tg.string = \"g\"\n\tg.buffer = bytes.NewBufferString(\"\")\n\treturn\n}", "func WithPrinter(p io.Writer) Option {\n\treturn func(s *initSpec) {\n\t\ts.Printer = p\n\t}\n}", "func AddIndependentPropertyGeneratorsForVirtualNetworkGateway_Spec(gens map[string]gopter.Gen) {\n\tgens[\"ActiveActive\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AzureName\"] = gen.AlphaString()\n\tgens[\"EnableBgp\"] = gen.PtrOf(gen.Bool())\n\tgens[\"EnableDnsForwarding\"] = gen.PtrOf(gen.Bool())\n\tgens[\"EnablePrivateIpAddress\"] = gen.PtrOf(gen.Bool())\n\tgens[\"GatewayType\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_GatewayType_ExpressRoute, VirtualNetworkGatewayPropertiesFormat_GatewayType_LocalGateway, VirtualNetworkGatewayPropertiesFormat_GatewayType_Vpn))\n\tgens[\"Location\"] = gen.PtrOf(gen.AlphaString())\n\tgens[\"Tags\"] = gen.MapOf(gen.AlphaString(), gen.AlphaString())\n\tgens[\"VpnGatewayGeneration\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_Generation1, VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_Generation2, VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_None))\n\tgens[\"VpnType\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_VpnType_PolicyBased, VirtualNetworkGatewayPropertiesFormat_VpnType_RouteBased))\n}", "func WithTracesURLPath(urlPath string) Option {\n\treturn wrappedOption{otlpconfig.WithTracesURLPath(urlPath)}\n}", "func WithProtocol(protocol Protocol) OptionPortScanner {\n\treturn protocolOption(protocol)\n}", "func (o *Outbound) Chooser() peer.Chooser {\n\treturn o.chooser\n}", "func (o *Object) SetPrototype(proto *Object) error {\n\treturn o.runtime.try(func() {\n\t\to.self.setProto(proto, true)\n\t})\n}", "func (_options *CreateConfigurationActionOptions) SetConfigActionPrototype(configActionPrototype ConfigurationActionPrototypeIntf) *CreateConfigurationActionOptions {\n\t_options.ConfigActionPrototype = configActionPrototype\n\treturn _options\n}", "func NavigateExtensionApprovalFlow(ctx context.Context, cr *chrome.Chrome, tconn *chrome.TestConn, bt browser.Type, parentEmail, parentPassword string) error {\n\ttesting.ContextLog(ctx, \"Adding extension as a supervised user\")\n\n\t// Reserve ten seconds for cleanup.\n\tcleanupCtx := ctx\n\tctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)\n\tdefer cancel()\n\n\t// Set up browser.\n\tbr, closeBrowser, err := browserfixt.SetUp(ctx, cr, bt)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to set up browser\")\n\t}\n\tdefer closeBrowser(cleanupCtx)\n\n\t// Open webstore in browser.\n\tconst extensionID = \"djflhoibgkdhkhhcedjiklpkjnoahfmg\" // Google-developed extension from Chrome Store.\n\tconst extensionURL = \"https://chrome.google.com/webstore/detail/\" + extensionID + \"?hl=en\"\n\tconn, err := br.NewConn(ctx, extensionURL, browser.WithNewWindow())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to open webstore\")\n\t}\n\tdefer conn.Close()\n\n\t// Load page contents.\n\tui := uiauto.New(tconn).WithTimeout(time.Minute)\n\n\t// Install extension parent permission flow.\n\ttesting.ContextLog(ctx, \"Finding button that adds the extension\")\n\taddButton := nodewith.Name(\"Add to Chrome\").Role(role.Button).First()\n\tif err := ui.WaitUntilExists(addButton)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to load page\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Clicking button that adds the extension\")\n\tif err := ui.LeftClick(addButton)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to click add extension\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Clicking ask parent\")\n\taskParentButton := nodewith.Name(\"Ask a parent\").Role(role.Button)\n\t// The \"Ask parent\" button may not immediately be clickable.\n\tif err := ui.LeftClickUntil(askParentButton, ui.Gone(askParentButton))(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to click ask parent\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Selecting parent email\"+strings.ToLower(parentEmail))\n\tparentEmailRadio := nodewith.Name(strings.ToLower(parentEmail)).Role(role.RadioButton)\n\tparentEmailText := nodewith.Name(strings.ToLower(parentEmail))\n\t// If there are two parents, the dialog contains a radio button with both parent emails.\n\tif err := ui.LeftClick(parentEmailRadio)(ctx); err != nil {\n\t\t// If there is no radio button, this indicates that there is only one parent. Verify\n\t\t// that the email is present as text, and return an error if it is not present.\n\t\tif err := ui.Exists(parentEmailText)(ctx); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to find parent email %q\", parentEmail)\n\t\t}\n\t}\n\n\ttesting.ContextLog(ctx, \"Clicking the parent password text field\")\n\tparentPasswordField := nodewith.Name(\"Enter password\").Role(role.TextField)\n\tif err := ui.LeftClick(parentPasswordField)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to click parent password text\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Setting up keyboard\")\n\tkb, err := input.Keyboard(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get keyboard\")\n\t}\n\tdefer kb.Close()\n\n\ttesting.ContextLog(ctx, \"Typing the parent password\")\n\tif err := kb.Type(ctx, parentPassword); err != nil {\n\t\treturn errors.Wrap(err, \"failed to type parent password\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Verifying Approve and Cancel buttons enabled\")\n\tapproveButton := nodewith.Name(\"Approve\").Role(role.Button)\n\tif err := ui.CheckRestriction(approveButton, restriction.None)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to verify Approve button enabled\")\n\t}\n\tcancelButton := nodewith.Name(\"Cancel\").Role(role.Button)\n\tif err := ui.CheckRestriction(cancelButton, restriction.None)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to verify Cancel button enabled\")\n\t}\n\n\treturn nil\n}", "func EchoPresenterWith(w io.Writer) Presenter {\n\treturn func(r interacter.Res) interacter.Res {\n\t\tbuf := []byte(fmt.Sprintln(r))\n\t\tw.Write(buf)\n\t\treturn r\n\t}\n}", "func (p Path) TraverseWithOptions(opts TraverseOptions) (creds.Creds, error) {\n\tlogger.InfoMsgf(\"traversing path %+v with options %+v\", p, opts)\n\n\terr := clearEnvironment()\n\tif err != nil {\n\t\treturn creds.Creds{}, err\n\t}\n\n\tprofileHop, stack := p[0], p[1:]\n\tlogger.InfoMsgf(\"loading origin hop: %+v\", profileHop)\n\tprofileCreds, err := opts.Store.Lookup(profileHop.Profile)\n\tif err != nil {\n\t\treturn creds.Creds{}, err\n\t}\n\n\tuai := []creds.UserAgentItem{{\n\t\tName: \"voyager\",\n\t\tVersion: version.Version,\n\t}}\n\tfor _, x := range opts.UserAgentItems {\n\t\tuai = append(uai, x)\n\t}\n\n\tc := creds.Creds{\n\t\tAccessKey: profileCreds.AccessKeyID,\n\t\tSecretKey: profileCreds.SecretAccessKey,\n\t\tUserAgentItems: uai,\n\t}\n\n\tfor _, thisHop := range stack {\n\t\tc, err = thisHop.Traverse(c, opts)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn c, err\n}", "func WithTransHandlerFactory(f remote.ServerTransHandlerFactory) Option {\n\treturn Option{F: func(o *internal_server.Options, di *utils.Slice) {\n\t\to.Once.OnceOrPanic()\n\t\tdi.Push(fmt.Sprintf(\"WithTransHandlerFactory(%T)\", f))\n\n\t\to.RemoteOpt.SvrHandlerFactory = f\n\t}}\n}", "func AddRelatedPropertyGeneratorsForVirtualNetworkGateway_Spec(gens map[string]gopter.Gen) {\n\tgens[\"BgpSettings\"] = gen.PtrOf(BgpSettingsGenerator())\n\tgens[\"CustomRoutes\"] = gen.PtrOf(AddressSpaceGenerator())\n\tgens[\"ExtendedLocation\"] = gen.PtrOf(ExtendedLocationGenerator())\n\tgens[\"GatewayDefaultSite\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"IpConfigurations\"] = gen.SliceOf(VirtualNetworkGatewayIPConfigurationGenerator())\n\tgens[\"Sku\"] = gen.PtrOf(VirtualNetworkGatewaySkuGenerator())\n\tgens[\"VpnClientConfiguration\"] = gen.PtrOf(VpnClientConfigurationGenerator())\n}", "func (codec *LibvirtProviderConfigCodec) EncodeToProviderSpec(in runtime.Object) (*machinev1.ProviderSpec, error) {\n\tvar buf bytes.Buffer\n\tif err := codec.encoder.Encode(in, &buf); err != nil {\n\t\treturn nil, fmt.Errorf(\"encoding failed: %v\", err)\n\t}\n\treturn &machinev1.ProviderSpec{\n\t\tValue: &runtime.RawExtension{Raw: buf.Bytes()},\n\t}, nil\n}", "func WithRoundTripper(roundTripper nethttp.RoundTripper) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http round tripper option can not set nil protocol\")\n\t\t}\n\t\tp.roundTripper = roundTripper\n\t\treturn nil\n\t}\n}", "func (_options *CreateSecretOptions) SetSecretPrototype(secretPrototype SecretPrototypeIntf) *CreateSecretOptions {\n\t_options.SecretPrototype = secretPrototype\n\treturn _options\n}", "func (m *PrinterDefaults) SetDuplexMode(value *PrintDuplexMode)() {\n err := m.GetBackingStore().Set(\"duplexMode\", value)\n if err != nil {\n panic(err)\n }\n}", "func WithPager(p string) Option {\n\treturn option{\n\t\ttable: func(enc *TableEncoder) error {\n\t\t\tenc.pagerCmd = p\n\t\t\treturn nil\n\t\t},\n\t\texpanded: func(enc *ExpandedEncoder) error {\n\t\t\tenc.pagerCmd = p\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func AddIndependentPropertyGeneratorsForVirtualNetworks_VirtualNetworkPeering_Spec(gens map[string]gopter.Gen) {\n\tgens[\"AllowForwardedTraffic\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AllowGatewayTransit\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AllowVirtualNetworkAccess\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AzureName\"] = gen.AlphaString()\n\tgens[\"DoNotVerifyRemoteGateways\"] = gen.PtrOf(gen.Bool())\n\tgens[\"OriginalVersion\"] = gen.AlphaString()\n\tgens[\"PeeringState\"] = gen.PtrOf(gen.AlphaString())\n\tgens[\"UseRemoteGateways\"] = gen.PtrOf(gen.Bool())\n}", "func WithWriter(w io.Writer) PrintingOpt {\n\treturn func(p *Printing) {\n\t\tp.writer = w\n\t}\n}", "func ImplementationWrapAlgCopy(pointer unsafe.Pointer) (Alg, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tshallowCopy := C.vscf_impl_shallow_copy(ctx)\n\treturn ImplementationWrapAlg(unsafe.Pointer(shallowCopy))\n}", "func WithProxy(p proxy.BackwardProxy) Option {\n\treturn Option{F: func(o *internal_server.Options, di *utils.Slice) {\n\t\to.Once.OnceOrPanic()\n\t\tdi.Push(fmt.Sprintf(\"WithProxy(%T)\", p))\n\n\t\tif o.Proxy != nil {\n\t\t\tpanic(fmt.Errorf(\"reassignment of Proxy is not allowed: %T -> %T\", o.Proxy, p))\n\t\t}\n\t\to.Proxy = p\n\t}}\n}", "func (v *vAVL) Traverser(root *vAVLNode, ch chan<- *Venue) {\r\n\tv.mu.RLock()\r\n\tdefer v.mu.RUnlock()\r\n\tv.traverse(root, ch)\r\n\tclose(ch)\r\n}", "func WithFollowing(following *url.URL) Opt {\n\treturn func(opts *Options) {\n\t\topts.Following = following\n\t}\n}", "func (s *BasevhdlListener) EnterSubprogram_kind(ctx *Subprogram_kindContext) {}", "func (_options *CreateSecretActionOptions) SetSecretActionPrototype(secretActionPrototype SecretActionPrototypeIntf) *CreateSecretActionOptions {\n\t_options.SecretActionPrototype = secretActionPrototype\n\treturn _options\n}", "func WithConnectObserver(observer gocql.ConnectObserver) Option {\n\treturn optionFunc(func(cfg *config) {\n\t\tcfg.connectObserver = observer\n\t})\n}", "func (p *Printer) WriteProtoFlow(f v1.Flow) error {\n\tswitch p.opts.output {\n\tcase TabOutput:\n\t\tif p.line == 0 {\n\t\t\t_, err := fmt.Fprint(p.tw,\n\t\t\t\t\"TIMESTAMP\", tab,\n\t\t\t\t\"SOURCE\", tab,\n\t\t\t\t\"DESTINATION\", tab,\n\t\t\t\t\"TYPE\", tab,\n\t\t\t\t\"VERDICT\", tab,\n\t\t\t\t\"SUMMARY\", newline,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsrc, dst := p.GetHostNames(f)\n\t\t_, err := fmt.Fprint(p.tw,\n\t\t\tgetTimestamp(f), tab,\n\t\t\tsrc, tab,\n\t\t\tdst, tab,\n\t\t\tGetFlowType(f), tab,\n\t\t\tf.GetVerdict().String(), tab,\n\t\t\tf.GetSummary(), newline,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write out packet: %v\", err)\n\t\t}\n\tcase DictOutput:\n\t\tif p.line != 0 {\n\t\t\t// TODO: line length?\n\t\t\t_, err := fmt.Fprintln(p.opts.w, \"------------\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsrc, dst := p.GetHostNames(f)\n\t\t// this is a little crude, but will do for now. should probably find the\n\t\t// longest header and auto-format the keys\n\t\t_, err := fmt.Fprint(p.opts.w,\n\t\t\t\" TIMESTAMP: \", getTimestamp(f), newline,\n\t\t\t\" SOURCE: \", src, newline,\n\t\t\t\"DESTINATION: \", dst, newline,\n\t\t\t\" TYPE: \", GetFlowType(f), newline,\n\t\t\t\" VERDICT: \", f.GetVerdict().String(), newline,\n\t\t\t\" SUMMARY: \", f.GetSummary(), newline,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write out packet: %v\", err)\n\t\t}\n\tcase CompactOutput:\n\t\tsrc, dst := p.GetHostNames(f)\n\t\t_, err := fmt.Fprintf(p.opts.w,\n\t\t\t\"%s [%s]: %s -> %s %s %s (%s)\\n\",\n\t\t\tgetTimestamp(f),\n\t\t\tf.GetNodeName(),\n\t\t\tsrc,\n\t\t\tdst,\n\t\t\tGetFlowType(f),\n\t\t\tf.GetVerdict().String(),\n\t\t\tf.GetSummary(),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write out packet: %v\", err)\n\t\t}\n\tcase JSONOutput:\n\t\treturn p.jsonEncoder.Encode(f)\n\t}\n\tp.line++\n\treturn nil\n}", "func WithIPPUSBDescriptors() Option {\n\treturn WithDescriptors(\"ippusb_printer.json\")\n}", "func (client *Client) DescribeExplorerWithCallback(request *DescribeExplorerRequest, callback func(response *DescribeExplorerResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeExplorerResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeExplorer(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func OptionUpstreamURLScheme(scheme string) Option {\n\treturn func(cfg *gwconfig) {\n\t\tcfg.upstreamURLScheme = scheme\n\t}\n}", "func WithFormatter(formatter Formatter) Option {\n\treturn option{\n\t\ttable: func(enc *TableEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\texpanded: func(enc *ExpandedEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\tjson: func(enc *JSONEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\tunaligned: func(enc *UnalignedEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\ttemplate: func(enc *TemplateEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\tcrosstab: func(view *CrosstabView) error {\n\t\t\tview.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func HandlerWithProtocPath(protocPath string) HandlerOption {\n\treturn func(handlerOptions *handlerOptions) {\n\t\thandlerOptions.protocPath = protocPath\n\t}\n}", "func (_options *CreateSecretVersionActionOptions) SetSecretVersionActionPrototype(secretVersionActionPrototype SecretVersionActionPrototypeIntf) *CreateSecretVersionActionOptions {\n\t_options.SecretVersionActionPrototype = secretVersionActionPrototype\n\treturn _options\n}", "func (e *Extension) Outgoing(ms *bayeux.Message) {\n\tswitch ms.Channel {\n\tcase bayeux.MetaHandshake:\n\t\text := ms.GetExt(true)\n\t\text[ExtensionName] = true\n\tcase bayeux.MetaSubscribe:\n\t\tif e.isSupported() {\n\t\t\text := ms.GetExt(true)\n\t\t\text[ExtensionName] = e.replayStore.AsMap()\n\t\t}\n\t}\n}", "func ProtoToOSPolicyAssignment(p *osconfigpb.OsconfigOSPolicyAssignment) *osconfig.OSPolicyAssignment {\n\tobj := &osconfig.OSPolicyAssignment{\n\t\tName: dcl.StringOrNil(p.GetName()),\n\t\tDescription: dcl.StringOrNil(p.GetDescription()),\n\t\tInstanceFilter: ProtoToOsconfigOSPolicyAssignmentInstanceFilter(p.GetInstanceFilter()),\n\t\tRollout: ProtoToOsconfigOSPolicyAssignmentRollout(p.GetRollout()),\n\t\tRevisionId: dcl.StringOrNil(p.GetRevisionId()),\n\t\tRevisionCreateTime: dcl.StringOrNil(p.GetRevisionCreateTime()),\n\t\tEtag: dcl.StringOrNil(p.GetEtag()),\n\t\tRolloutState: ProtoToOsconfigOSPolicyAssignmentRolloutStateEnum(p.GetRolloutState()),\n\t\tBaseline: dcl.Bool(p.GetBaseline()),\n\t\tDeleted: dcl.Bool(p.GetDeleted()),\n\t\tReconciling: dcl.Bool(p.GetReconciling()),\n\t\tUid: dcl.StringOrNil(p.GetUid()),\n\t\tProject: dcl.StringOrNil(p.GetProject()),\n\t\tLocation: dcl.StringOrNil(p.GetLocation()),\n\t\tSkipAwaitRollout: dcl.Bool(p.GetSkipAwaitRollout()),\n\t}\n\tfor _, r := range p.GetOsPolicies() {\n\t\tobj.OSPolicies = append(obj.OSPolicies, *ProtoToOsconfigOSPolicyAssignmentOSPolicies(r))\n\t}\n\treturn obj\n}", "func ProvokingVertex(mode uint32) {\n\tsyscall.Syscall(gpProvokingVertex, 1, uintptr(mode), 0, 0)\n}", "func (t *Link) PrependPreviewObject(v ObjectType) {\n\tt.preview = append([]*previewIntermediateType{&previewIntermediateType{Object: v}}, t.preview...)\n\n}", "func WithDescriptors(path string) Option {\n\treturn func(o *config) error {\n\t\tif len(path) == 0 {\n\t\t\treturn errors.New(\"empty descriptors path\")\n\t\t}\n\t\to.args = append(o.args, \"--descriptors_path=\"+absoluteConfigPath(path))\n\t\to.descriptors = absoluteConfigPath(path)\n\t\treturn nil\n\t}\n}", "func (o *GetOrganizationPrototypePermissionsParams) WithHTTPClient(client *http.Client) *GetOrganizationPrototypePermissionsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func WithDataPageV2() FileWriterOption {\n\treturn func(fw *FileWriter) {\n\t\tfw.newPageFunc = newDataPageV2Writer\n\t}\n}", "func AddRelatedPropertyGeneratorsForVirtualMachine_Spec(gens map[string]gopter.Gen) {\n\tgens[\"AdditionalCapabilities\"] = gen.PtrOf(AdditionalCapabilitiesGenerator())\n\tgens[\"ApplicationProfile\"] = gen.PtrOf(ApplicationProfileGenerator())\n\tgens[\"AvailabilitySet\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"BillingProfile\"] = gen.PtrOf(BillingProfileGenerator())\n\tgens[\"CapacityReservation\"] = gen.PtrOf(CapacityReservationProfileGenerator())\n\tgens[\"DiagnosticsProfile\"] = gen.PtrOf(DiagnosticsProfileGenerator())\n\tgens[\"ExtendedLocation\"] = gen.PtrOf(ExtendedLocationGenerator())\n\tgens[\"HardwareProfile\"] = gen.PtrOf(HardwareProfileGenerator())\n\tgens[\"Host\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"HostGroup\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"Identity\"] = gen.PtrOf(VirtualMachineIdentityGenerator())\n\tgens[\"NetworkProfile\"] = gen.PtrOf(NetworkProfileGenerator())\n\tgens[\"OsProfile\"] = gen.PtrOf(OSProfileGenerator())\n\tgens[\"Plan\"] = gen.PtrOf(PlanGenerator())\n\tgens[\"ProximityPlacementGroup\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"ScheduledEventsProfile\"] = gen.PtrOf(ScheduledEventsProfileGenerator())\n\tgens[\"SecurityProfile\"] = gen.PtrOf(SecurityProfileGenerator())\n\tgens[\"StorageProfile\"] = gen.PtrOf(StorageProfileGenerator())\n\tgens[\"VirtualMachineScaleSet\"] = gen.PtrOf(SubResourceGenerator())\n}", "func newSeparatorFromNative(obj unsafe.Pointer) interface{} {\n\tsep := &Separator{}\n\tsep.object = C.to_GtkSeparator(obj)\n\n\tif gobject.IsObjectFloating(sep) {\n\t\tgobject.RefSink(sep)\n\t} else {\n\t\tgobject.Ref(sep)\n\t}\n\tsep.Widget = NewWidget(obj)\n\tsep.Orientable = newOrientableFromNative(obj).(*Orientable)\n\tseparatorFinalizer(sep)\n\n\treturn sep\n}", "func (o *GetOrganizationPrototypePermissionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param orgname\n\tif err := r.SetPathParam(\"orgname\", o.Orgname); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (t *Link) AppendPreviewObject(v ObjectType) {\n\tt.preview = append(t.preview, &previewIntermediateType{Object: v})\n\n}", "func WithKnowledgeGraphProcessor(s kgProcessor) Option {\n\treturn func(o *options) {\n\t\to.kg = s\n\t}\n}", "func ImplementationWrapAlgInfoSerializerCopy(pointer unsafe.Pointer) (AlgInfoSerializer, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tshallowCopy := C.vscf_impl_shallow_copy(ctx)\n\treturn ImplementationWrapAlgInfoSerializer(unsafe.Pointer(shallowCopy))\n}", "func WithGenerator(g *Generator) OptionFunc {\n\treturn func(b *Bot) {\n\t\tb.generator = g\n\t}\n}", "func (_DelegationController *DelegationControllerFilterer) WatchDelegationProposed(opts *bind.WatchOpts, sink chan<- *DelegationControllerDelegationProposed) (event.Subscription, error) {\n\n\tlogs, sub, err := _DelegationController.contract.WatchLogs(opts, \"DelegationProposed\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(DelegationControllerDelegationProposed)\n\t\t\t\tif err := _DelegationController.contract.UnpackLog(event, \"DelegationProposed\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (qs ControlQS) OrderByKpDesc() ControlQS {\n\tqs.order = append(qs.order, `\"kp\" DESC`)\n\n\treturn qs\n}", "func (t *Link) PrependPreviewLink(v LinkType) {\n\tt.preview = append([]*previewIntermediateType{&previewIntermediateType{Link: v}}, t.preview...)\n\n}", "func WithTracing(tracing bool) Option {\n\treturn func(b *builder) {\n\t\tb.useTracing = tracing\n\t}\n}", "func AddIndependentPropertyGeneratorsForVirtualNetworkGatewaySku(gens map[string]gopter.Gen) {\n\tgens[\"Name\"] = gen.PtrOf(gen.OneConstOf(\n\t\tVirtualNetworkGatewaySku_Name_Basic,\n\t\tVirtualNetworkGatewaySku_Name_ErGw1AZ,\n\t\tVirtualNetworkGatewaySku_Name_ErGw2AZ,\n\t\tVirtualNetworkGatewaySku_Name_ErGw3AZ,\n\t\tVirtualNetworkGatewaySku_Name_HighPerformance,\n\t\tVirtualNetworkGatewaySku_Name_Standard,\n\t\tVirtualNetworkGatewaySku_Name_UltraPerformance,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw1,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw1AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw2,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw2AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw3,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw3AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw4,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw4AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw5,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw5AZ))\n\tgens[\"Tier\"] = gen.PtrOf(gen.OneConstOf(\n\t\tVirtualNetworkGatewaySku_Tier_Basic,\n\t\tVirtualNetworkGatewaySku_Tier_ErGw1AZ,\n\t\tVirtualNetworkGatewaySku_Tier_ErGw2AZ,\n\t\tVirtualNetworkGatewaySku_Tier_ErGw3AZ,\n\t\tVirtualNetworkGatewaySku_Tier_HighPerformance,\n\t\tVirtualNetworkGatewaySku_Tier_Standard,\n\t\tVirtualNetworkGatewaySku_Tier_UltraPerformance,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw1,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw1AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw2,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw2AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw3,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw3AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw4,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw4AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw5,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw5AZ))\n}", "func (p *ZkEstablishAccept) Encode(w io.Writer, pver uint32) error {\n\treturn WriteElements(w,\n\t\tp.EscrowTxid,\n\t\tp.ToSelfDelay,\n\t\tp.MerchPayoutPk,\n\t\tp.MerchChildPk,\n\t\tp.ChannelState)\n}", "func (*PacketBrokerRoutingPolicyUplink) Descriptor() ([]byte, []int) {\n\treturn file_ttn_lorawan_v3_packetbrokeragent_proto_rawDescGZIP(), []int{9}\n}", "func (o *GenericSorting) WithFlavor(flavor sql.DbFlavor) *GenericSorting {\n\to.Flavor = flavor\n\treturn o\n}", "func (m *BusinessScenarioPlanner) SetPlanConfiguration(value PlannerPlanConfigurationable)() {\n err := m.GetBackingStore().Set(\"planConfiguration\", value)\n if err != nil {\n panic(err)\n }\n}", "func AddIndependentPropertyGeneratorsForOriginGroupOverride(gens map[string]gopter.Gen) {\n\tgens[\"ForwardingProtocol\"] = gen.PtrOf(gen.AlphaString())\n}", "func (g *Generation) ApplySurvivorSelection(outgoingParents []Individual,\n\tchildren []Individual) ([]Individual, error) {\n\n\tswitch g.engine.Parameters.Selection.Survivor.Type {\n\tcase SurvivorSelectionHalfAndHalf:\n\t\treturn HalfAndHalfSurvivorSelection(outgoingParents, children, g.engine.Parameters.Selection.Survivor.SurvivorPercentage, g.engine.Parameters.EachPopulationSize)\n\tcase SurvivorSelectionParentVsChild:\n\t\treturn ParentVsChildSurvivorSelection(outgoingParents, children, g.engine.Parameters)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid survivor selection selected\")\n\t}\n}", "func (p *Peer) negotiateOutboundProtocol() error {\n\tif err := p.writeLocalVersionMsg(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.readRemoteVersionMsg(); err != nil {\n\t\treturn err\n\t}\n\n\tvar protoVersion uint32\n\tp.flagsMtx.Lock()\n\tprotoVersion = p.protocolVersion\n\tp.flagsMtx.Unlock()\n\n\tif err := p.writeSendAddrV2Msg(protoVersion); err != nil {\n\t\treturn err\n\t}\n\n\terr := p.writeMessage(wire.NewMsgVerAck(), wire.LatestEncoding)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Finish the negotiation by waiting for negotiable messages or verack.\n\treturn p.waitToFinishNegotiation(protoVersion)\n}", "func (pp *PathProcessor) SetChainProviderIfApplicable(chainProvider provider.ChainProvider) bool {\n\tif chainProvider == nil {\n\t\treturn false\n\t}\n\tif pp.pathEnd1.info.ChainID == chainProvider.ChainId() {\n\t\tpp.pathEnd1.chainProvider = chainProvider\n\n\t\tif pp.isLocalhost {\n\t\t\tpp.pathEnd2.chainProvider = chainProvider\n\t\t}\n\n\t\treturn true\n\t} else if pp.pathEnd2.info.ChainID == chainProvider.ChainId() {\n\t\tpp.pathEnd2.chainProvider = chainProvider\n\n\t\tif pp.isLocalhost {\n\t\t\tpp.pathEnd1.chainProvider = chainProvider\n\t\t}\n\n\t\treturn true\n\t}\n\treturn false\n}", "func NewCustomTraversal(str string) (g String) {\n\tg.string = str\n\tg.buffer = bytes.NewBufferString(\"\")\n\treturn g\n}", "func (w *Writer) SetTransferSyntax(bo binary.ByteOrder, implicit bool) {\n\tw.bo = bo\n\tw.implicit = implicit\n}", "func NewTraverser(config *config.WeaviateConfig, locks locks,\n\tlogger logrus.FieldLogger, authorizer authorizer,\n\tvectorSearcher VectorSearcher,\n\texplorer explorer, schemaGetter schema.SchemaGetter) *Traverser {\n\treturn &Traverser{\n\t\tconfig: config,\n\t\tlocks: locks,\n\t\tlogger: logger,\n\t\tauthorizer: authorizer,\n\t\tvectorSearcher: vectorSearcher,\n\t\texplorer: explorer,\n\t\tschemaGetter: schemaGetter,\n\t}\n}", "func (m *VirtualEndpoint) SetExternalPartnerSettings(value []CloudPcExternalPartnerSettingable)() {\n err := m.GetBackingStore().Set(\"externalPartnerSettings\", value)\n if err != nil {\n panic(err)\n }\n}", "func (v SetInterceptFileChooserDialogParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage11(w, v)\n}", "func (p *Proxy) SetProtoProxy(proxy *proto.Proxy) { p.protoProxy = proxy }", "func (d *RPCFactory) CreateGRPCDispatcherForOutbound(\n\tcallerName string,\n\tserviceName string,\n\thostName string,\n) (*yarpc.Dispatcher, error) {\n\treturn d.createOutboundDispatcher(callerName, serviceName, hostName, d.grpc.NewSingleOutbound(hostName))\n}", "func (prf *proof) prover(p Predicate, sval map[string]abstract.Scalar,\n\tpval map[string]abstract.Point,\n\tchoice map[Predicate]int) Prover {\n\n\treturn Prover(func(ctx ProverContext) error {\n\t\treturn prf.prove(p, sval, pval, choice, ctx)\n\t})\n}", "func (t *Link) AppendPreviewLink(v LinkType) {\n\tt.preview = append(t.preview, &previewIntermediateType{Link: v})\n\n}", "func WithTracePropagation() Option {\n\treturn func(i interface{}) error {\n\t\treturn nil\n\t}\n}", "func (w *Walker) DescendentsWith(predicate Predicate) *Walker {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tif predicate == nil {\n\t\tw.pipe.errors <- ErrInvalidFilter\n\t} else {\n\t\terr := w.appendFilterForTask(descendentsWith, predicate, 5) // need a helper queue\n\t\tif err != nil { // this should never happen here\n\t\t\tT().Errorf(err.Error())\n\t\t\tpanic(err) // for debugging as long as this is unstable\n\t\t}\n\t}\n\treturn w\n}", "func WithIntentGraphProcessor(s intentGraphProcessor) Option {\n\treturn func(o *options) {\n\t\to.intentGraph = s\n\t}\n}", "func (m *RemoteAssistancePartner) SetOnboardingUrl(value *string)() {\n m.onboardingUrl = value\n}", "func EncodeProtoDescriptorSource(value string) EncodeProtoAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"descriptor_source\"] = value\n\t}\n}", "func WithTracerProvider(provider trace.TracerProvider) Option {\n\treturn func(p *otelPlugin) {\n\t\tp.provider = provider\n\t}\n}", "func ProtoToOsconfigOSPolicyAssignmentInstanceFilter(p *osconfigpb.OsconfigOSPolicyAssignmentInstanceFilter) *osconfig.OSPolicyAssignmentInstanceFilter {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tobj := &osconfig.OSPolicyAssignmentInstanceFilter{\n\t\tAll: dcl.Bool(p.GetAll()),\n\t}\n\tfor _, r := range p.GetInclusionLabels() {\n\t\tobj.InclusionLabels = append(obj.InclusionLabels, *ProtoToOsconfigOSPolicyAssignmentInstanceFilterInclusionLabels(r))\n\t}\n\tfor _, r := range p.GetExclusionLabels() {\n\t\tobj.ExclusionLabels = append(obj.ExclusionLabels, *ProtoToOsconfigOSPolicyAssignmentInstanceFilterExclusionLabels(r))\n\t}\n\tfor _, r := range p.GetInventories() {\n\t\tobj.Inventories = append(obj.Inventories, *ProtoToOsconfigOSPolicyAssignmentInstanceFilterInventories(r))\n\t}\n\treturn obj\n}", "func compileTraversal(\n\tfs *Bcpfs,\n\tshares Exports,\n\treals RealExports,\n) RealExports {\n\ttravs := make([]ExportEntry, 0, len(reals)*5)\n\ttravsByRealpath := make(map[string]int)\n\n\t// Allow directory traversal along realpaths.\n\tfor _, r := range reals {\n\t\tacl := NewTraversalAclWithGroups(r.Acl.Groups())\n\t\tparts := strings.Split(r.Path, \"/\")\n\n\t\t// Do not add `--x` to `<srvdir>/<srv>` but only to subdirs, so\n\t\t// that srv group membership is required to access realpath.\n\t\tbegin := 2\n\t\tif fs.IsServiceRealpath(r.Path) {\n\t\t\tbegin = 3\n\t\t}\n\t\tfor i := begin; i < len(parts); i++ {\n\t\t\tpath := slashpath.Join(parts[:i]...)\n\t\t\tif idx, ok := travsByRealpath[path]; ok {\n\t\t\t\t// Update existing.\n\t\t\t\ttravs[idx].Acl = travs[idx].Acl.Union(acl)\n\t\t\t} else {\n\t\t\t\t// Append new.\n\t\t\t\ttravsByRealpath[path] = len(travs)\n\t\t\t\ttravs = append(travs, ExportEntry{\n\t\t\t\t\tPath: path,\n\t\t\t\t\tAcl: acl,\n\t\t\t\t\tManagingGroups: r.ManagingGroups,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\t// Allow traversal of ou toplevel directories to reach symlinks.\n\tfor _, shr := range shares {\n\t\tacl := NewTraversalAclWithGroups(shr.Acl.Groups())\n\t\tou := strings.Split(shr.Path, \"/\")[0]\n\t\tpath := slashpath.Join(fs.OrgUnitDir, ou)\n\t\tif idx, ok := travsByRealpath[path]; ok {\n\t\t\t// Update existing.\n\t\t\ttravs[idx].Acl = travs[idx].Acl.Union(acl)\n\t\t} else {\n\t\t\t// Append new.\n\t\t\ttravsByRealpath[path] = len(travs)\n\t\t\ttravs = append(travs, ExportEntry{\n\t\t\t\tPath: path,\n\t\t\t\tAcl: acl,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn travs\n}", "func NewWithPrinter(h *human.Printer) *Printer {\n\treturn &Printer{Printer: h}\n}", "func (d *RPCFactory) CreateDispatcherForOutbound(\n\tcallerName string,\n\tserviceName string,\n\thostName string,\n) (*yarpc.Dispatcher, error) {\n\treturn d.createOutboundDispatcher(callerName, serviceName, hostName, d.ch.NewSingleOutbound(hostName))\n}", "func (local *Node) traverseBackpointers(neighbors []RemoteNode, level int) (err error) {\n\tif level >= 0 {\n\t\t// copy neighbors set as basis for nextNeighbors\n\t\tnextNeighbors := make([]RemoteNode, 0)\n\n\t\tfor _, neighbor := range neighbors {\n\t\t\t// for each neighbor, grab all backpointers it has\n\t\t\t// that exist at this level in that neighbor's routing table\n\t\t\t// (also pass our node so that that node can add us to it's routing table)\n\t\t\tbackpointers, err := neighbor.GetBackpointersRPC(local.node, level)\n\t\t\tif err != nil {\n\t\t\t\t// continue to try and add more backpointers if we get an err,\n\t\t\t\t// but return error if nothing else occurs\n\t\t\t\tError.Printf(\"Unreachable node notice: %v unreachable while traversing backpointers for %v\", neighbor, local)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// append ALL the backpointers, as a set\n\t\t\tnextNeighbors = InsertWithoutDuplicates(nextNeighbors, backpointers)\n\t\t}\n\n\t\t// add all to our routing table\n\t\tfor _, neighbor := range nextNeighbors {\n\t\t\t//Debug.Printf(\"Added %v to %v via backpointer traversal\", neighbor, local.node)\n\t\t\tlocal.addRoute(neighbor)\n\t\t}\n\n\t\t// trim the list of neighbors to K (they're already sorted such\n\t\t// that the best (closest) K are at the front, and duplicates have\n\t\t// been removed (i.e. ignored when adding)\n\t\tnextNeighbors = local.SortListByCloseness(nextNeighbors)\n\t\tif len(nextNeighbors) > K {\n\t\t\tnextNeighbors = nextNeighbors[:K]\n\t\t}\n\n\t\t// move on to the next level, using our updated set of neighbors as a starting point\n\t\terr = local.traverseBackpointers(nextNeighbors, level-1)\n\t}\n\n\treturn\n}", "func AddRelatedPropertyGeneratorsForVirtualNetworks_VirtualNetworkPeering_Spec(gens map[string]gopter.Gen) {\n\tgens[\"RemoteAddressSpace\"] = gen.PtrOf(AddressSpaceGenerator())\n\tgens[\"RemoteBgpCommunities\"] = gen.PtrOf(VirtualNetworkBgpCommunitiesGenerator())\n\tgens[\"RemoteVirtualNetwork\"] = gen.PtrOf(SubResourceGenerator())\n}", "func (gui *Gui) setUpstreamToBranch(g *gocui.Gui, v *gocui.View) error {\n\tmaxX, maxY := g.Size()\n\n\te := gui.getSelectedRepository()\n\tv, err := g.SetView(confirmationViewFeature.Name, maxX/2-30, maxY/2-2, maxX/2+30, maxY/2+2)\n\tif err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(v, \"branch.\"+e.Branch.Name+\".\"+\"remote\"+\"=\"+e.Remote.Name)\n\t\tfmt.Fprintln(v, \"branch.\"+e.Branch.Name+\".\"+\"merge\"+\"=\"+e.Branch.Reference.Name().String())\n\t}\n\treturn gui.focusToView(confirmationViewFeature.Name)\n}", "func AddIndependentPropertyGeneratorsForSshPublicKeySpec(gens map[string]gopter.Gen) {\n\tgens[\"KeyData\"] = gen.PtrOf(gen.AlphaString())\n\tgens[\"Path\"] = gen.PtrOf(gen.AlphaString())\n}", "func WithCount(count int) Option {\n\treturn option{\n\t\ttable: func(enc *TableEncoder) error {\n\t\t\tenc.count = count\n\t\t\treturn nil\n\t\t},\n\t\texpanded: func(enc *ExpandedEncoder) error {\n\t\t\tenc.count = count\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func (t *DesktopTracer) TraceConfiguration(ctx context.Context) (*service.DeviceTraceConfiguration, error) {\n\tapis := make([]*service.TraceTypeCapabilities, 0, 1)\n\tif len(t.b.Instance().GetConfiguration().GetDrivers().GetVulkan().GetPhysicalDevices()) > 0 {\n\t\tapis = append(apis, tracer.VulkanTraceOptions())\n\t}\n\n\tpreferredRoot, err := t.b.GetWorkingDirectory(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tisLocal, err := t.b.IsLocal(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif t.b.SupportsPerfetto(ctx) {\n\t\tapis = append(apis, tracer.PerfettoTraceOptions())\n\t}\n\n\treturn &service.DeviceTraceConfiguration{\n\t\tApis: apis,\n\t\tServerLocalPath: isLocal,\n\t\tCanSpecifyCwd: true,\n\t\tCanUploadApplication: false,\n\t\tCanSpecifyEnv: true,\n\t\tPreferredRootUri: preferredRoot,\n\t\tHasCache: false,\n\t}, nil\n}", "func AddRelatedPropertyGeneratorsForServerfarm_Spec(gens map[string]gopter.Gen) {\n\tgens[\"ExtendedLocation\"] = gen.PtrOf(ExtendedLocationGenerator())\n\tgens[\"HostingEnvironmentProfile\"] = gen.PtrOf(HostingEnvironmentProfileGenerator())\n\tgens[\"KubeEnvironmentProfile\"] = gen.PtrOf(KubeEnvironmentProfileGenerator())\n\tgens[\"Sku\"] = gen.PtrOf(SkuDescriptionGenerator())\n}", "func (t *TablePrinter) PrintObjWithKind(kind string, obj interface{}, writer io.Writer) error {\n\titemsValue := reflect.ValueOf(obj)\n\tif itemsValue.Kind() != reflect.Slice {\n\t\treturn errors.Errorf(\"table printer expects a slice but the kind was %v\", itemsValue.Kind())\n\t}\n\n\tif itemsValue.Len() == 0 {\n\t\tw := bufio.NewWriter(writer)\n\t\tif _, err := w.WriteString(fmt.Sprintf(\"No %s found\\n\", strings.ToLower(kind))); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn w.Flush()\n\t}\n\n\treturn t.table.Render(obj, writer, t.columnames...)\n}", "func (m *PrinterCreateOperation) SetPrinter(value Printerable)() {\n m.printer = value\n}", "func Proto(p Protocol) Option {\n\treturn func(o *Options) {\n\t\to.Protocol = p\n\t}\n}", "func WithSeparator(sep rune) Option {\n\treturn option{\n\t\tunaligned: func(enc *UnalignedEncoder) error {\n\t\t\tenc.sep = sep\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func WithTracerProvider(provider trace.TracerProvider) Option {\n\treturn func(cfg *config) {\n\t\tcfg.TracerProvider = provider\n\t}\n}", "func (m *PrinterCreateOperation) SetPrinter(value Printerable)() {\n err := m.GetBackingStore().Set(\"printer\", value)\n if err != nil {\n panic(err)\n }\n}", "func (self *TraitPixbuf) SaveToCallbackv(save_func C.GdkPixbufSaveFunc, user_data unsafe.Pointer, type_ string, option_keys []string, option_values []string) (return__ bool, __err__ error) {\n\t__cgo__type_ := C.CString(type_)\n\t__header__option_keys := (*reflect.SliceHeader)(unsafe.Pointer(&option_keys))\n\t__header__option_values := (*reflect.SliceHeader)(unsafe.Pointer(&option_values))\n\tvar __cgo_error__ *C.GError\n\tvar __cgo__return__ C.gboolean\n\t__cgo__return__ = C.gdk_pixbuf_save_to_callbackv(self.CPointer, save_func, (C.gpointer)(user_data), __cgo__type_, (**C.char)(unsafe.Pointer(__header__option_keys.Data)), (**C.char)(unsafe.Pointer(__header__option_values.Data)), &__cgo_error__)\n\tC.free(unsafe.Pointer(__cgo__type_))\n\treturn__ = __cgo__return__ == C.gboolean(1)\n\tif __cgo_error__ != nil {\n\t\t__err__ = errors.New(C.GoString((*C.char)(unsafe.Pointer(__cgo_error__.message))))\n\t}\n\treturn\n}" ]
[ "0.39877933", "0.39078513", "0.38892928", "0.374556", "0.37302673", "0.36932385", "0.35834628", "0.33725303", "0.336618", "0.33358172", "0.33044896", "0.32975402", "0.32719445", "0.32489514", "0.32471502", "0.32446983", "0.32418078", "0.3223724", "0.32211462", "0.3219564", "0.31985813", "0.31830966", "0.31760514", "0.31588054", "0.31440192", "0.3141801", "0.30836493", "0.30783927", "0.30701286", "0.306409", "0.30614546", "0.3061356", "0.30604845", "0.30558637", "0.30522615", "0.30431074", "0.3038779", "0.30383423", "0.30345133", "0.30331722", "0.30304193", "0.3024443", "0.30201948", "0.3014918", "0.3009031", "0.30077714", "0.30028412", "0.3002485", "0.2988408", "0.2982713", "0.2980789", "0.2973464", "0.29722822", "0.29675153", "0.29651174", "0.2960937", "0.2960134", "0.29594392", "0.2953175", "0.2952642", "0.29522535", "0.2949601", "0.2948989", "0.29449996", "0.29408625", "0.29389837", "0.29282105", "0.29272437", "0.29197353", "0.29152918", "0.2908451", "0.29057407", "0.2902107", "0.29001072", "0.28938612", "0.28936002", "0.28916973", "0.28898177", "0.2882875", "0.2882114", "0.28814328", "0.2878907", "0.2875215", "0.28685427", "0.2862765", "0.2862166", "0.2861266", "0.28551176", "0.28550905", "0.28549674", "0.2854254", "0.28509104", "0.28503555", "0.28488305", "0.2845224", "0.28386188", "0.2837562", "0.2834852", "0.28341997", "0.28318638" ]
0.7862286
0
WithTrustedCAR specifies whether CIDs match the block data as they are read from the CAR files.
func WithTrustedCAR(t bool) Option { return func(o *Options) { o.TrustedCAR = t } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isSpecTrustedCASet(proxyConfig *configv1.ProxySpec) bool {\n\treturn len(proxyConfig.TrustedCA.Name) > 0\n}", "func WithTrusted(trusted bool) Option {\n\treturn func(linter *Linter) {\n\t\tlinter.trusted = trusted\n\t}\n}", "func (_Casper *CasperTransactor) SetTrusted(opts *bind.TransactOpts, addr common.Address) (*types.Transaction, error) {\n\treturn _Casper.contract.Transact(opts, \"setTrusted\", addr)\n}", "func RequireTrusted(req bool) Opt {\n\treturn func(p *params) { p.requireTrust = req }\n}", "func (_Casper *CasperSession) SetTrusted(addr common.Address) (*types.Transaction, error) {\n\treturn _Casper.Contract.SetTrusted(&_Casper.TransactOpts, addr)\n}", "func (_Casper *CasperTransactorSession) SetTrusted(addr common.Address) (*types.Transaction, error) {\n\treturn _Casper.Contract.SetTrusted(&_Casper.TransactOpts, addr)\n}", "func TrustedOrigins(origins []string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.TrustedOrigins = origins\n\t}\n}", "func findTrustedCerts(cfg *Config, objects []*Object) ([]*x509.Certificate, error) {\n\tvar out []*x509.Certificate\n\n\tcerts := filterObjectsByClass(objects, \"CKO_CERTIFICATE\")\n\ttrusts := filterObjectsByClass(objects, \"CKO_NSS_TRUST\")\n\n\tfor _, cert := range certs {\n\t\tderBytes := cert.attrs[\"CKA_VALUE\"].value\n\t\thash := sha1.New()\n\t\thash.Write(derBytes)\n\t\tdigest := hash.Sum(nil)\n\n\t\tx509, err := x509.ParseCertificate(derBytes)\n\t\tif err != nil {\n\t\t\t// This is known to occur because of a broken certificate in NSS.\n\t\t\t// https://bugzilla.mozilla.org/show_bug.cgi?id=707995\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(agl): wtc tells me that Mozilla might get rid of the\n\t\t// SHA1 records in the future and use issuer and serial number\n\t\t// to match trust records to certificates (which is what NSS\n\t\t// currently uses). This needs some changes to the crypto/x509\n\t\t// package to keep the raw names around.\n\n\t\tvar trust *Object\n\t\tfor _, possibleTrust := range trusts {\n\t\t\tif bytes.Equal(digest, possibleTrust.attrs[\"CKA_CERT_SHA1_HASH\"].value) {\n\t\t\t\ttrust = possibleTrust\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttrustType := trust.attrs[\"CKA_TRUST_SERVER_AUTH\"].value\n\n\t\tvar trusted bool\n\t\tswitch string(trustType) {\n\t\tcase \"CKT_NSS_NOT_TRUSTED\":\n\t\t\t// An explicitly distrusted cert\n\t\t\ttrusted = false\n\t\tcase \"CKT_NSS_TRUSTED_DELEGATOR\":\n\t\t\t// A cert trusted for issuing SSL server certs.\n\t\t\ttrusted = true\n\t\tcase \"CKT_NSS_TRUST_UNKNOWN\", \"CKT_NSS_MUST_VERIFY_TRUST\":\n\t\t\t// A cert not trusted for issuing SSL server certs, but is trusted for other purposes.\n\t\t\ttrusted = false\n\t\t}\n\n\t\tif !trusted && !cfg.IncludedUntrustedFlag {\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, x509)\n\t}\n\n\treturn out, nil\n}", "func (_Casper *CasperCaller) Trusted(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Casper.contract.Call(opts, out, \"trusted\")\n\treturn *ret0, err\n}", "func newLightClientAttackEvidence(conflicted, trusted, common *types.LightBlock) *types.LightClientAttackEvidence {\n\tev := &types.LightClientAttackEvidence{ConflictingBlock: conflicted}\n\t// if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we\n\t// return the height of the conflicting block else if it is a lunatic attack and the validator sets\n\t// are not the same then we send the height of the common header.\n\tif ev.ConflictingHeaderIsInvalid(trusted.Header) {\n\t\tev.CommonHeight = common.Height\n\t\tev.Timestamp = common.Time\n\t\tev.TotalVotingPower = common.ValidatorSet.TotalVotingPower()\n\t} else {\n\t\tev.CommonHeight = trusted.Height\n\t\tev.Timestamp = trusted.Time\n\t\tev.TotalVotingPower = trusted.ValidatorSet.TotalVotingPower()\n\t}\n\tev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader)\n\treturn ev\n}", "func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader,\n\tcommonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error {\n\t// In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single\n\t// verification jump between the common header and the conflicting one\n\tif commonHeader.Height != e.ConflictingBlock.Height {\n\t\terr := commonVals.VerifyCommitLightTrusting(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"skipping verification of conflicting block failed: %w\", err)\n\t\t}\n\n\t\t// In the case of equivocation and amnesia we expect all header hashes to be correctly derived\n\t} else if e.ConflictingHeaderIsInvalid(trustedHeader.Header) {\n\t\treturn errors.New(\"common height is the same as conflicting block height so expected the conflicting\" +\n\t\t\t\" block to be correctly derived yet it wasn't\")\n\t}\n\n\t// Verify that the 2/3+ commits from the conflicting validator set were for the conflicting header\n\tif err := e.ConflictingBlock.ValidatorSet.VerifyCommitLight(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID,\n\t\te.ConflictingBlock.Height, e.ConflictingBlock.Commit); err != nil {\n\t\treturn fmt.Errorf(\"invalid commit from conflicting block: %w\", err)\n\t}\n\n\t// Assert the correct amount of voting power of the validator set\n\tif evTotal, valsTotal := e.TotalVotingPower, commonVals.TotalVotingPower(); evTotal != valsTotal {\n\t\treturn fmt.Errorf(\"total voting power from the evidence and our validator set does not match (%d != %d)\",\n\t\t\tevTotal, valsTotal)\n\t}\n\n\t// check in the case of a forward lunatic attack that monotonically increasing time has been violated\n\tif e.ConflictingBlock.Height > trustedHeader.Height && e.ConflictingBlock.Time.After(trustedHeader.Time) {\n\t\treturn fmt.Errorf(\"conflicting block doesn't violate monotonically increasing time (%v is after %v)\",\n\t\t\te.ConflictingBlock.Time, trustedHeader.Time,\n\t\t)\n\n\t\t// In all other cases check that the hashes of the conflicting header and the trusted header are different\n\t} else if bytes.Equal(trustedHeader.Hash(), e.ConflictingBlock.Hash()) {\n\t\treturn fmt.Errorf(\"trusted header hash matches the evidence's conflicting header hash: %X\",\n\t\t\ttrustedHeader.Hash())\n\t}\n\n\treturn validateABCIEvidence(e, commonVals, trustedHeader)\n}", "func ExampleMicroStellar_AllowTrust() {\n\t// Create a new MicroStellar client connected to a fake network. To\n\t// use a real network replace \"fake\" below with \"test\" or \"public\".\n\tms := New(\"fake\")\n\n\t// Custom USD asset issued by specified issuer.\n\tUSD := NewAsset(\"USD\", \"GAIUIQNMSXTTR4TGZETSQCGBTIF32G2L5P4AML4LFTMTHKM44UHIN6XQ\", Credit4Type)\n\n\t// Issuer sets AUTH_REQUIRED flag on account.\n\terr := ms.SetFlags(\"SDPLQEABOETMI7PPKJZYBHHW2BSA3424CI3V5ZRNN3NP2H7KYQOKY5ST\", FlagAuthRequired)\n\tif err != nil {\n\t\tlog.Fatalf(\"SetFlags: %v\", ErrorString(err))\n\t}\n\n\t// Customer creates a trustline to the custom asset with no limit.\n\terr = ms.CreateTrustLine(\"SCSMBQYTXKZYY7CLVT6NPPYWVDQYDOQ6BB3QND4OIXC7762JYJYZ3RMK\", USD, \"\")\n\tif err != nil {\n\t\tlog.Fatalf(\"CreateTrustLine: %v\", err)\n\t}\n\n\t// Issuer then authorizes the trustline that was just created.\n\terr = ms.AllowTrust(\"SDPLQEABOETMI7PPKJZYBHHW2BSA3424CI3V5ZRNN3NP2H7KYQOKY5ST\",\n\t\t\"GAIUIQNMSXTTR4TGZETSQCGBTIF32G2L5P4AML4LFTMTHKM44UHIN6XQ\", \"USD\", true)\n\tif err != nil {\n\t\tlog.Fatalf(\"AllowTrust: %v\", err)\n\t}\n\n\tfmt.Printf(\"ok\")\n\t// Output: ok\n}", "func (c *ClientWithResponses) GetaspecificTrustedSourceWithResponse(ctx context.Context, id string) (*GetaspecificTrustedSourceResponse, error) {\n\trsp, err := c.GetaspecificTrustedSource(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetaspecificTrustedSourceResponse(rsp)\n}", "func (c *ClientWithResponses) ChangeaspecificTrustedSourceWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader) (*ChangeaspecificTrustedSourceResponse, error) {\n\trsp, err := c.ChangeaspecificTrustedSourceWithBody(ctx, id, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseChangeaspecificTrustedSourceResponse(rsp)\n}", "func runERC20Lock(ctx *action.Context, tx action.RawTx) (bool, action.Response) {\n\terc20lock := &ERC20Lock{}\n\n\terr := erc20lock.Unmarshal(tx.Data)\n\tif err != nil {\n\t\tctx.Logger.Error(\"wrong tx type\", err)\n\t\treturn false, action.Response{Log: \"wrong tx type\"}\n\t}\n\n\tethTx, err := ethchaindriver.DecodeTransaction(erc20lock.ETHTxn)\n\tif err != nil {\n\t\tctx.Logger.Error(\"decode eth txn err\", err)\n\t\treturn false, action.Response{\n\t\t\tLog: \"decode eth txn error\" + err.Error(),\n\t\t}\n\t}\n\n\tethOptions, err := ctx.GovernanceStore.GetETHChainDriverOption()\n\tif err != nil {\n\t\treturn helpers.LogAndReturnFalse(ctx.Logger, gov.ErrGetEthOptions, erc20lock.Tags(), err)\n\t}\n\ttoken, err := ethchaindriver.GetToken(ethOptions.TokenList, *ethTx.To())\n\tif err != nil {\n\t\treturn false, action.Response{\n\t\t\tLog: err.Error(),\n\t\t}\n\t}\n\n\tok, err := ethchaindriver.VerfiyERC20Lock(erc20lock.ETHTxn, token.TokAbi, ethOptions.ERCContractAddress)\n\tif err != nil {\n\t\tctx.Logger.Error(\"Unable to verify ERC LOCK transaction\")\n\t\treturn false, action.Response{\n\t\t\tLog: \"Unable to verify transaction\" + err.Error(),\n\t\t}\n\t}\n\n\tif !ok {\n\t\tctx.Logger.Error(\"To field of Transaction does not match OneLedger Contract Address\")\n\t\treturn false, action.Response{\n\t\t\tLog: \"To field of Transaction does not match OneLedger Contract Address\" + err.Error(),\n\t\t}\n\t}\n\n\twitnesses, err := ctx.Witnesses.GetWitnessAddresses(chain.ETHEREUM)\n\tif err != nil {\n\t\tctx.Logger.Error(\"err in getting witness address\", err)\n\t\treturn false, action.Response{Log: \"error in getting validator addresses\" + err.Error()}\n\t}\n\n\tcurr, ok := ctx.Currencies.GetCurrencyByName(token.TokName)\n\tif !ok {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Token not Supported : %s \", token.TokName)}\n\t}\n\n\terc20Params, err := ethchaindriver.ParseErc20Lock(ethOptions.TokenList, erc20lock.ETHTxn)\n\tif err != nil {\n\t\treturn false, action.Response{\n\t\t\tLog: err.Error(),\n\t\t}\n\t}\n\n\tlockToken := curr.NewCoinFromString(erc20Params.TokenAmount.String())\n\t// Adding lock amount to common address to maintain count of total oToken minted\n\ttokenSupply := action.Address(ethOptions.TotalSupplyAddr)\n\n\tbalCoin, err := ctx.Balances.GetBalanceForCurr(tokenSupply, &curr)\n\tif err != nil {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Unable to get Eth lock total balance %s\", erc20lock.Locker)}\n\t}\n\n\ttotalSupplyToken := curr.NewCoinFromString(token.TokTotalSupply)\n\tif !balCoin.Plus(lockToken).LessThanEqualCoin(totalSupplyToken) {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Token lock exceeded limit ,for Token : %s \", token.TokName)}\n\t}\n\n\ttracker := ethereum.NewTracker(\n\t\tethereum.ProcessTypeLockERC,\n\t\terc20lock.Locker,\n\t\terc20lock.ETHTxn,\n\t\tethcommon.BytesToHash(erc20lock.ETHTxn),\n\t\twitnesses,\n\t)\n\n\terr = ctx.ETHTrackers.WithPrefixType(ethereum.PrefixOngoing).Set(tracker)\n\tif err != nil {\n\t\tctx.Logger.Error(\"error saving eth tracker\", err)\n\t\treturn false, action.Response{Log: \"error saving eth tracker: \" + err.Error()}\n\t}\n\n\treturn true, action.Response{\n\t\tEvents: action.GetEvent(erc20lock.Tags(), \"erc20_lock\"),\n\t}\n}", "func TrustedTag(tag string) bool {\n\tif tag == core.DNS || tag == core.CERT || tag == core.ARCHIVE || tag == core.AXFR {\n\t\treturn true\n\t}\n\treturn false\n}", "func withCar(node *Car) carOption {\n\treturn func(m *CarMutation) {\n\t\tm.oldValue = func(context.Context) (*Car, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func withCar(node *Car) carOption {\n\treturn func(m *CarMutation) {\n\t\tm.oldValue = func(context.Context) (*Car, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func DoAllowTrust(gClient gasservice.GasServiceClient, trustorAccountAddress string, assetCode string, issuerAddress string,\n\tauthorize bool, cClient crypto_client.CryptoServiceClient) (error, string) {\n\n\tLOGGER.Debugf(\"DoAllowTrust\")\n\tif GetAssetType(assetCode) != model.AssetAssetTypeDO && GetAssetType(assetCode) != model.AssetAssetTypeDA {\n\t\tmsg := \"Asset code is not DO nor DA: \" + assetCode\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1201\"), msg\n\t}\n\n\tLOGGER.Debugf(\"DoAllowTrust Transaction issuingAccount =%v, assetCode =%v, trustorAddr=%v, authorize=%v\",\n\t\tissuerAddress, assetCode, trustorAccountAddress, authorize)\n\n\t//check AccountFlags\n\tif !isAccountFlagsValid(issuerAddress) {\n\t\tmsg := \"Issuing account's flags is not valid: \" + issuerAddress\n\t\tLOGGER.Errorf(msg)\n\t\treturn errors.New(\"1211\"), msg\n\t}\n\n\t/*\n\t\tSubmit a AllowTrust operation with authorize=boolean\n\t*/\n\n\tstellarNetwork := comn.GetStellarNetwork(os.Getenv(global_environment.ENV_KEY_STELLAR_NETWORK))\n\n\t//Get IBM gas account\n\tibmAccount, sequenceNum, err := gClient.GetAccountAndSequence()\n\n\ttx, err := b.Transaction(\n\t\tb.SourceAccount{AddressOrSeed: ibmAccount},\n\t\tstellarNetwork,\n\t\tb.Sequence{Sequence: sequenceNum},\n\t\t//\tSubmit a AllowTrust operation with authorize=boolean\n\t\tb.AllowTrust(\n\t\t\tb.SourceAccount{AddressOrSeed: issuerAddress},\n\t\t\tb.Trustor{Address: trustorAccountAddress},\n\t\t\tb.AllowTrustAsset{Code: assetCode},\n\t\t\tb.Authorize{Value: authorize}),\n\t)\n\tif err != nil {\n\t\tmsg := \"Error while allowing trust: \" + err.Error()\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1222\"), msg\n\t}\n\tvar txe b.TransactionEnvelopeBuilder\n\terr = txe.Mutate(tx)\n\n\tif err != nil {\n\t\tmsg := \"Error during building Mutate\"\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1208\"), msg\n\t}\n\n\ttxeB64, err := txe.Base64()\n\t//TBD: will have to integrate with gas service\n\txdrB, _ := base64.StdEncoding.DecodeString(txeB64)\n\n\t//Get signed by issuing account on crypto service\n\tsigXdr, errorMsg, status, _ := cClient.ParticipantSignXdr(comn.ISSUING, xdrB)\n\n\tif status != http.StatusCreated {\n\t\tLOGGER.Errorf(\"Error creating allow trust %v\", errorMsg.Error())\n\t\treturn errors.New(\"1208\"), errorMsg.Error()\n\t}\n\tLOGGER.Debugf(\"signed transaction: %v\", base64.StdEncoding.EncodeToString(sigXdr))\n\n\tif errorMsg != nil {\n\t\tmsg := \"Signing trust went through. Error during encoding\"\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1209\"), msg\n\t}\n\n\ttxeB64 = base64.StdEncoding.EncodeToString(sigXdr)\n\n\t//Post to gas service\n\thash, ledger, err := gClient.SubmitTxe(txeB64)\n\tif err != nil {\n\t\tLOGGER.Warningf(\"AllowTrust failed gas service error... %v \", err.Error())\n\t\treturn err, \"AllowTrust failed:\" + err.Error()\n\t}\n\tLOGGER.Debugf(\"Hash:%v, Ledger:%v\", hash, ledger)\n\n\tmsg := \"Transaction posted in ledger: \" + hash\n\treturn nil, msg\n}", "func (t *osCinderCSITranslator) CanSupportInline(volume *v1.Volume) bool {\n\treturn volume != nil && volume.Cinder != nil\n}", "func HasCarWith(preds ...predicate.Car) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(CarInverseTable, CarFieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, CarTable, CarColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func HasCarWith(preds ...predicate.Car) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(CarInverseTable, CarFieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, CarTable, CarColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func bindSafeERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SafeERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindSafeERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SafeERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindSafeERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SafeERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func validateClientCertificate(certificate *x509.Certificate, trustedCertsFile string,\n\tsuppressCertificateTimeInvalid, suppressCertificateChainIncomplete bool) (bool, error) {\n\tif certificate == nil {\n\t\treturn false, ua.BadCertificateInvalid\n\t}\n\tvar intermediates, roots *x509.CertPool\n\tif buf, err := os.ReadFile(trustedCertsFile); err == nil {\n\t\tfor len(buf) > 0 {\n\t\t\tvar block *pem.Block\n\t\t\tblock, buf = pem.Decode(buf)\n\t\t\tif block == nil {\n\t\t\t\t// maybe its der\n\t\t\t\tcert, err := x509.ParseCertificate(buf)\n\t\t\t\tif err == nil {\n\t\t\t\t\t// is self-signed?\n\t\t\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\t\t\tif roots == nil {\n\t\t\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\troots.AddCert(cert)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif intermediates == nil {\n\t\t\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tintermediates.AddCert(cert)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// is self-signed?\n\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\tif roots == nil {\n\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\troots.AddCert(cert)\n\t\t\t} else {\n\t\t\t\tif intermediates == nil {\n\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\tintermediates.AddCert(cert)\n\t\t\t}\n\t\t}\n\t}\n\n\topts := x509.VerifyOptions{\n\t\tIntermediates: intermediates,\n\t\tRoots: roots,\n\t\tKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},\n\t}\n\n\tif suppressCertificateTimeInvalid {\n\t\topts.CurrentTime = certificate.NotAfter // causes test to pass\n\t}\n\n\tif suppressCertificateChainIncomplete {\n\t\tif opts.Roots == nil {\n\t\t\topts.Roots = x509.NewCertPool()\n\t\t}\n\t\topts.Roots.AddCert(certificate)\n\t}\n\n\t// build chain and verify\n\tif _, err := certificate.Verify(opts); err != nil {\n\t\tswitch se := err.(type) {\n\t\tcase x509.CertificateInvalidError:\n\t\t\tswitch se.Reason {\n\t\t\tcase x509.Expired:\n\t\t\t\treturn false, ua.BadCertificateTimeInvalid\n\t\t\tcase x509.IncompatibleUsage:\n\t\t\t\treturn false, ua.BadCertificateUseNotAllowed\n\t\t\tdefault:\n\t\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t\t}\n\t\tcase x509.UnknownAuthorityError:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\tdefault:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t}\n\t}\n\treturn true, nil\n}", "func withCarInspection(node *CarInspection) carinspectionOption {\n\treturn func(m *CarInspectionMutation) {\n\t\tm.oldValue = func(context.Context) (*CarInspection, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (_Casper *CasperSession) Trusted() (common.Address, error) {\n\treturn _Casper.Contract.Trusted(&_Casper.CallOpts)\n}", "func bindClinic(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ClinicABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func AllowTrustDomainWorkload(trustDomain string) ValidationMode {\n\treturn validationMode{\n\t\toptions: validationOptions{\n\t\t\ttrustDomain: trustDomain,\n\t\t\ttrustDomainRequired: true,\n\t\t\tidType: workloadId,\n\t\t},\n\t}\n}", "func CarmaintenanceContainsFold(v string) predicate.CarRepairrecord {\n\treturn predicate.CarRepairrecord(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldCarmaintenance), v))\n\t})\n}", "func (o FluxConfigurationBlobStorageServicePrincipalOutput) ClientCertificateSendChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v FluxConfigurationBlobStorageServicePrincipal) *bool { return v.ClientCertificateSendChain }).(pulumi.BoolPtrOutput)\n}", "func WithTrustForwardHeader(enable bool) Option {\n\treturn func(o *Options) {\n\t\to.TrustForwardHeader = enable\n\t}\n}", "func (uc *UserCreate) AddCarIDs(ids ...string) *UserCreate {\n\tuc.mutation.AddCarIDs(ids...)\n\treturn uc\n}", "func (_Casper *CasperCallerSession) Trusted() (common.Address, error) {\n\treturn _Casper.Contract.Trusted(&_Casper.CallOpts)\n}", "func withCarID(id int) carOption {\n\treturn func(m *CarMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Car\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Car, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Car.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func withCarID(id int) carOption {\n\treturn func(m *CarMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Car\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Car, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Car.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func (c *ClientWithResponses) GetTrustedSourceitemsWithResponse(ctx context.Context, params *GetTrustedSourceitemsParams) (*GetTrustedSourceitemsResponse, error) {\n\trsp, err := c.GetTrustedSourceitems(ctx, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetTrustedSourceitemsResponse(rsp)\n}", "func TrustAsset(hclient *equator.Client, seed, code, issuer string) error {\n\ttx, err := b.Transaction(\n\t\tb.SourceAccount{AddressOrSeed: seed},\n\t\tb.TestNetwork,\n\t\tb.AutoSequence{SequenceProvider: hclient},\n\t\tb.Trust(code, issuer),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"building tx\")\n\t}\n\t_, err = SignAndSubmitTx(hclient, tx, seed)\n\treturn err\n}", "func NewRawCardSignerWithCtx(pointer unsafe.Pointer) *RawCardSigner {\n\tctx := (*C.vssc_raw_card_signer_t /*ct2*/)(pointer)\n\tobj := &RawCardSigner{\n\t\tcCtx: ctx,\n\t}\n\truntime.SetFinalizer(obj, (*RawCardSigner).Delete)\n\treturn obj\n}", "func WithTlsCaCert(with string) wrapping.Option {\n\treturn func() interface{} {\n\t\treturn OptionFunc(func(o *options) error {\n\t\t\to.withTlsCaCert = with\n\t\t\treturn nil\n\t\t})\n\t}\n}", "func (r *Resolver) Trusted() []int {\n\tm := r.Modules[THETAFD].(*ThetafdModule)\n\treturn m.Trusted()\n}", "func (c *Conn) EnableTrustedSchema(b bool) (bool, error) {\n\tif C.SQLITE_VERSION_NUMBER < 3031000 {\n\t\t// SQLITE_DBCONFIG_TRUSTED_SCHEMA was added in SQLite 3.31.0:\n\t\t// https://github.com/sqlite/sqlite/commit/b77da374ab6dfeaac5def640da91f219da7fa5c0\n\t\treturn false, errors.New(\"SQLITE_DBCONFIG_TRUSTED_SCHEMA isn't present in the called SQLite library\")\n\t}\n\treturn c.queryOrSetEnableDbConfig(C.SQLITE_DBCONFIG_TRUSTED_SCHEMA, btocint(b))\n}", "func (cic *CarInspectionCreate) AddCarrepairrecordIDs(ids ...int) *CarInspectionCreate {\n\tcic.mutation.AddCarrepairrecordIDs(ids...)\n\treturn cic\n}", "func (c *ca) TrustAnchors() []byte {\n\treturn c.bundle.TrustAnchors\n}", "func (m *MockConfig) TrustedCAFile() string {\n\targs := m.Called()\n\treturn args.String(0)\n}", "func (ftyp FileTypeBox) IsCR3() bool {\n\treturn ftyp.MajorBrand == brandCrx\n}", "func (mp *TxPool) verifyCRRelatedTx(txn *Transaction) ErrCode {\n\tswitch txn.TxType {\n\tcase RegisterCR:\n\t\tp, ok := txn.Payload.(*payload.CRInfo)\n\t\tif !ok {\n\t\t\tlog.Error(\"register CR payload cast failed, tx:\", txn.Hash())\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t\tif err := mp.verifyDuplicateCRAndProducer(p.CID, p.Code, p.NickName); err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\tcase UpdateCR:\n\t\tp, ok := txn.Payload.(*payload.CRInfo)\n\t\tif !ok {\n\t\t\tlog.Error(\"update CR payload cast failed, tx:\", txn.Hash())\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t\tif err := mp.verifyDuplicateCRAndNickname(p.CID, p.NickName); err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\tcase UnregisterCR:\n\t\tp, ok := txn.Payload.(*payload.UnregisterCR)\n\t\tif !ok {\n\t\t\tlog.Error(\"update producer payload cast failed, tx:\", txn.Hash())\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t\tif err := mp.verifyDuplicateCR(p.CID); err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\tcase ReturnCRDepositCoin:\n\t\terr := mp.verifyDuplicateCode(BytesToHexString(txn.Programs[0].Code))\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t}\n\n\treturn Success\n}", "func NeedsLicense(kind string) bool {\n\treturn kind == \"car\" || kind == \"truck\"\n}", "func (_Privileges *PrivilegesTransactor) SetTrusted(opts *bind.TransactOpts, addr common.Address) (*types.Transaction, error) {\n\treturn _Privileges.contract.Transact(opts, \"setTrusted\", addr)\n}", "func (k *Keeper) SetCiphertext(ctx sdk.Context, ctShare *types.CiphertextShare) sdk.Error {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Println(\"PANIC:\", r)\n\t\t}\n\t}()\n\tif ctShare.EntropyProvider.Empty() {\n\t\treturn sdk.ErrInvalidAddress(\"entropy provider can't be empty!\")\n\t}\n\tround := k.CurrentRound(ctx)\n\tstage := k.GetStage(ctx, round)\n\tpubKey, err1 := k.GetCommonPublicKey(ctx)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\terr := elgamal.CEVerify(P256, k.group.Point().Base(), pubKey, ctShare.Ciphertext.PointA, ctShare.Ciphertext.PointB, ctShare.CEproof)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"CE proof isn't correct: %v\", err))\n\t}\n\n\tif k.CurrentRound(ctx) == 0 && stage == stageUnstarted {\n\t\terr1 = k.InitializeVerificationKeys(ctx)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tstage = stageCtCollecting\n\t\tk.setStage(ctx, round, stage)\n\t}\n\n\tif stage != stageCtCollecting {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"round is not on the ciphertext collecting stage. Current stage: %v\", stage))\n\t}\n\tctStore := ctx.KVStore(k.storeCiphertextSharesKey)\n\tkeyBytesAllCt := []byte(fmt.Sprintf(\"rd_%d\", round))\n\tt, err1 := k.GetThresholdCiphertexts(ctx)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\n\tvar addrList []string\n\tif ctStore.Has(keyBytesAllCt) {\n\t\taddrListBytes := ctStore.Get(keyBytesAllCt)\n\t\terr := k.cdc.UnmarshalJSON(addrListBytes, &addrList)\n\t\tif err != nil {\n\t\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't unmarshal list of all addresses from the store: %v\", err))\n\t\t}\n\t}\n\taddrList = append(addrList, ctShare.EntropyProvider.String())\n\tnewAddrListBytes, err := k.cdc.MarshalJSON(addrList)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't marshal list of all addresses: %v\", err))\n\t}\n\n\tkeyBytesCt := createKeyBytesByAddr(round, ctShare.EntropyProvider)\n\tif ctStore.Has(keyBytesCt) {\n\t\treturn sdk.ErrInvalidAddress(\"entropy provider has already sentf ciphertext share\")\n\t}\n\tctJSON, err := types.NewCiphertextShareJSON(ctShare)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't serialize ctShare: %v\", err))\n\t}\n\tctBytes, err := k.cdc.MarshalJSON(ctJSON)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't marshall ctShare: %v\", err))\n\t}\n\taggregatedCt, err1 := k.GetAggregatedCiphertext(ctx, round)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\tvar newAggregatedCt elgamal.Ciphertext\n\tif aggregatedCt == nil {\n\t\tnewAggregatedCt = ctShare.Ciphertext\n\t} else {\n\t\tnewAggregatedCt = elgamal.AggregateCiphertext(P256, []elgamal.Ciphertext{ctShare.Ciphertext, *aggregatedCt})\n\t}\n\terr1 = k.SetAggregatedCiphertext(ctx, round, &newAggregatedCt)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\tctStore.Set(keyBytesCt, ctBytes)\n\tctStore.Set(keyBytesAllCt, newAddrListBytes)\n\n\tif uint64(len(addrList)) >= t {\n\t\tk.setStage(ctx, round, stageDSCollecting)\n\t}\n\treturn nil\n}", "func (d *DataEncryptor) EncryptWithClientID(clientID, data []byte, setting config.ColumnEncryptionSetting) ([]byte, error) {\n\tif d.needSkipEncryptionFunc(setting) {\n\t\treturn data, nil\n\t}\n\t// skip already encrypted AcraBlock\n\tif _, _, err := ExtractAcraBlockFromData(data); err == nil {\n\t\treturn data, nil\n\t}\n\tif setting.ShouldReEncryptAcraStructToAcraBlock() {\n\t\t// decrypt AcraStruct to encrypt it with AcraBlock\n\t\tif err := acrastruct.ValidateAcraStructLength(data); err == nil {\n\t\t\tdataContext := base.NewDataProcessorContext(d.keyStore)\n\t\t\taccessContext := base.NewAccessContext(base.WithClientID(clientID))\n\t\t\tdataContext.Context = base.SetAccessContextToContext(context.Background(), accessContext)\n\t\t\tdecrypted, err := base.DecryptProcessor{}.Process(data, dataContext)\n\t\t\tif err != nil {\n\t\t\t\treturn data, err\n\t\t\t}\n\t\t\tdata = decrypted\n\t\t}\n\t}\n\tkeys, err := d.keyStore.GetClientIDSymmetricKey(clientID)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\treturn CreateAcraBlock(data, keys, nil)\n}", "func withCarRepairrecord(node *CarRepairrecord) carrepairrecordOption {\n\treturn func(m *CarRepairrecordMutation) {\n\t\tm.oldValue = func(context.Context) (*CarRepairrecord, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (cfg *Config) TrustedCACerts() []string {\n\tcerts := make([]string, 0, len(cfg.TrustedCAs))\n\tfor _, ca := range cfg.TrustedCAs {\n\t\tcerts = append(certs, ca.Cert)\n\t}\n\treturn certs\n}", "func (c *ClientWithResponses) ReplacechangeaspecificTrustedSourceWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader) (*ReplacechangeaspecificTrustedSourceResponse, error) {\n\trsp, err := c.ReplacechangeaspecificTrustedSourceWithBody(ctx, id, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseReplacechangeaspecificTrustedSourceResponse(rsp)\n}", "func (o *PublicRemoveTrustedDeviceV4Params) WithContext(ctx context.Context) *PublicRemoveTrustedDeviceV4Params {\n\to.SetContext(ctx)\n\treturn o\n}", "func (uu *UserUpdate) AddCarrepairrecordIDs(ids ...int) *UserUpdate {\n\tuu.mutation.AddCarrepairrecordIDs(ids...)\n\treturn uu\n}", "func bindSmartchef(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SmartchefABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func trustedOriginExists(d *schema.ResourceData, m interface{}) (bool, error) {\n\tclient := m.(*Config).articulateOktaClient\n\t_, _, err := client.TrustedOrigins.GetTrustedOrigin(d.Id())\n\n\tif client.OktaErrorCode == \"E0000007\" {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"[ERROR] Error Getting Trusted Origin in Okta: %v\", err)\n\t}\n\treturn true, nil\n}", "func (o *PublicRemoveTrustedDeviceV4Params) WithTimeout(timeout time.Duration) *PublicRemoveTrustedDeviceV4Params {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func HasCarinspectionWith(preds ...predicate.CarInspection) predicate.CarRepairrecord {\n\treturn predicate.CarRepairrecord(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(CarinspectionInverseTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, CarinspectionTable, CarinspectionColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func (c *Client) CarsDriven(custID *string) (*CarsDriven, *http.Response, error) {\n\tv := url.Values{\"custid\": {*custID}}\n\tcarsDriven := &CarsDriven{}\n\tresp, err := c.do(URLPathCarsDriven, &v, carsDriven)\n\treturn carsDriven, resp, err\n}", "func VerifyOnChain(msg []byte, signature []byte, signers EthAddresses,\n) (types.OracleID, error) {\n\tauthor, err := crypto.SigToPub(onChainHash(msg), signature)\n\tif err != nil {\n\t\treturn types.OracleID(-1), errors.Wrapf(err, \"while trying to recover \"+\n\t\t\t\"sender from sig %x on msg %+v\", signature, msg)\n\t}\n\toid, ok := signers[(*OnChainPublicKey)(author).Address()]\n\tif ok {\n\t\treturn oid, nil\n\t} else {\n\t\treturn types.OracleID(-1), errors.Errorf(\"signer is not on whitelist\")\n\t}\n}", "func AllowTrustDomain(trustDomain string) ValidationMode {\n\treturn validationMode{\n\t\toptions: validationOptions{\n\t\t\ttrustDomain: trustDomain,\n\t\t\ttrustDomainRequired: true,\n\t\t\tidType: trustDomainId,\n\t\t},\n\t}\n}", "func (o FluxConfigurationBlobStorageServicePrincipalPtrOutput) ClientCertificateSendChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *FluxConfigurationBlobStorageServicePrincipal) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ClientCertificateSendChain\n\t}).(pulumi.BoolPtrOutput)\n}", "func WithACRVerifier(verifier oidc.ACRVerifier) VerifierOption {\n\treturn func(v *idTokenVerifier) {\n\t\tv.acr = verifier\n\t}\n}", "func (tracker *PeerTracker) listTrusted() []*types.ChainInfo {\n\ttracker.mu.Lock()\n\tdefer tracker.mu.Unlock()\n\n\tvar tracked []*types.ChainInfo\n\tfor p, ci := range tracker.peers {\n\t\tif _, trusted := tracker.trusted[p]; trusted {\n\t\t\ttracked = append(tracked, ci)\n\t\t}\n\t}\n\tout := make([]*types.ChainInfo, len(tracked))\n\tcopy(out, tracked)\n\treturn out\n}", "func (s *Setting) IsTrusted(adr string) bool {\n\tok := false\n\tfor _, t := range s.TrustedNodes {\n\t\tif t == adr {\n\t\t\tok = true\n\t\t}\n\t}\n\treturn ok\n}", "func (drc *DummyRegistryClient) BecomeFoolishlyTrusting() {}", "func (s *SmartContract) ChangeCarOwner(ctx contractapi.TransactionContextInterface, carNumber string, newOwner string) error {\n\tcar, err := s.QueryCar(ctx, carNumber)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcar.Owner = newOwner\n\n\tcarAsBytes, _ := json.Marshal(car)\n\n\treturn ctx.GetStub().PutState(carNumber, carAsBytes)\n}", "func (_Privileges *PrivilegesCaller) Trusted(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Privileges.contract.Call(opts, out, \"trusted\")\n\treturn *ret0, err\n}", "func (o *Operation) SecuredWith(name string, scopes ...string) *Operation {\n\to.Operation.SecuredWith(name, scopes...)\n\treturn o\n}", "func ClientIDContainsFold(v string) predicate.DeviceRequest {\n\treturn predicate.DeviceRequest(sql.FieldContainsFold(FieldClientID, v))\n}", "func (uq *UserQuery) WithCars(opts ...func(*CarQuery)) *UserQuery {\n\tquery := &CarQuery{config: uq.config}\n\tfor _, opt := range opts {\n\t\topt(query)\n\t}\n\tuq.withCars = query\n\treturn uq\n}", "func (o ServiceSecurityOutput) TlsRsaWithAes256CbcSha256CiphersEnabled() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v ServiceSecurity) *bool { return v.TlsRsaWithAes256CbcSha256CiphersEnabled }).(pulumi.BoolPtrOutput)\n}", "func bindMainnetCryptoCardsContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(MainnetCryptoCardsContractABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func CompareCar(w http.ResponseWriter, r *http.Request) {\n\tallVehicles := services.GetAllVehicle()\n\tids := make(map[string]uint, 0)\n\n\tfor _, vehicle := range allVehicles {\n\t\tids[\"car\"+strconv.Itoa(int(vehicle.ID))] = vehicle.ID\n\t}\n\n\tfmt.Println(ids)\n\n\tcusttpl.ExecuteTemplate(w, \"compareCar.html\", struct {\n\t\tAllVehicles []model.Vehicle\n\t\tIds map[string]uint\n\t}{allVehicles, ids})\n}", "func discoverTrident() (installed bool, err error) {\n\tif installed, _, err = isCSITridentInstalled(); err != nil {\n\t\terr = fmt.Errorf(\"could not check if CSI Trident is installed; %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}", "func (rc *RentalCreate) SetCar(c *Car) *RentalCreate {\n\treturn rc.SetCarID(c.ID)\n}", "func IsOwner(tree *dag.Dag, blockWithHeaders *chaintree.BlockWithHeaders) (bool, chaintree.CodedError) {\n\tctx := context.TODO()\n\tid, _, err := tree.Resolve(context.TODO(), []string{\"id\"})\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error: %v\", err), Code: consensus.ErrUnknown}\n\t}\n\n\theaders := &consensus.StandardHeaders{}\n\n\terr = typecaster.ToType(blockWithHeaders.Headers, headers)\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error: %v\", err), Code: consensus.ErrUnknown}\n\t}\n\n\tvar addrs []string\n\n\tuncastAuths, _, err := tree.Resolve(context.TODO(), strings.Split(\"tree/\"+consensus.TreePathForAuthentications, \"/\"))\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Code: consensus.ErrUnknown, Memo: fmt.Sprintf(\"err resolving: %v\", err)}\n\t}\n\t// If there are no authentications then the Chain Tree is still owned by its genesis key\n\tif uncastAuths == nil {\n\t\taddrs = []string{consensus.DidToAddr(id.(string))}\n\t} else {\n\t\terr = typecaster.ToType(uncastAuths, &addrs)\n\t\tif err != nil {\n\t\t\treturn false, &consensus.ErrorCode{Code: consensus.ErrUnknown, Memo: fmt.Sprintf(\"err casting: %v\", err)}\n\t\t}\n\t}\n\n\tfor _, addr := range addrs {\n\t\tisSigned, err := consensus.IsBlockSignedBy(ctx, blockWithHeaders, addr)\n\t\tif err != nil {\n\t\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error finding if signed: %v\", err), Code: consensus.ErrUnknown}\n\t\t}\n\n\t\tif isSigned {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func (i Identifiable) AsCivicStructure() (*CivicStructure, bool) {\n\treturn nil, false\n}", "func WithCORS(cfg CORSConfig) Middleware {\n\treturn func(next Handler) Handler {\n\t\treturn func(ctx context.Context, req Request) (interface{}, error) {\n\t\t\tallowMethods := strings.Join(cfg.AllowMethods, \",\")\n\t\t\tallowHeaders := strings.Join(cfg.AllowHeaders, \",\")\n\n\t\t\thttpReq := req.HTTPRequest()\n\t\t\torigin := httpReq.Header.Get(HeaderOrigin)\n\t\t\tallowOrigin := getAllowOrigin(origin, cfg)\n\t\t\theader := ResponseHeaderFromCtx(ctx)\n\n\t\t\t// non-OPTIONS requests\n\t\t\tif httpReq.Method != http.MethodOptions {\n\t\t\t\theader.Add(HeaderVary, HeaderOrigin)\n\t\t\t\theader.Set(HeaderAccessControlAllowOrigin, allowOrigin)\n\t\t\t\tif cfg.AllowCredentials {\n\t\t\t\t\theader.Set(HeaderAccessControlAllowCredentials, \"true\")\n\t\t\t\t}\n\t\t\t\treturn next(ctx, req)\n\t\t\t}\n\n\t\t\t// Preflight requests\n\t\t\theader.Add(HeaderVary, HeaderOrigin)\n\t\t\theader.Add(HeaderVary, HeaderAccessControlRequestMethod)\n\t\t\theader.Add(HeaderVary, HeaderAccessControlRequestHeaders)\n\t\t\theader.Set(HeaderAccessControlAllowOrigin, allowOrigin)\n\t\t\theader.Set(HeaderAccessControlAllowMethods, allowMethods)\n\t\t\tif cfg.AllowCredentials {\n\t\t\t\theader.Set(HeaderAccessControlAllowCredentials, \"true\")\n\t\t\t}\n\t\t\tif allowHeaders != \"\" {\n\t\t\t\theader.Set(HeaderAccessControlAllowHeaders, allowHeaders)\n\t\t\t} else {\n\t\t\t\th := httpReq.Header.Get(HeaderAccessControlRequestHeaders)\n\t\t\t\tif h != \"\" {\n\t\t\t\t\theader.Set(HeaderAccessControlAllowHeaders, h)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.MaxAge > 0 {\n\t\t\t\theader.Set(HeaderAccessControlMaxAge, strconv.Itoa(cfg.MaxAge))\n\t\t\t}\n\n\t\t\treturn nil, nil\n\t\t}\n\t}\n}", "func (o KubernetesClusterHttpProxyConfigOutput) TrustedCa() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KubernetesClusterHttpProxyConfig) *string { return v.TrustedCa }).(pulumi.StringPtrOutput)\n}", "func (o *OIDCIDVerifier) Verify(ctx context.Context, rawIDToken string) (*Claims, error) {\n\tidToken, err := o.Verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to verify rawIDToken\")\n\t}\n\tvar claims Claims\n\tif err = idToken.Claims(&claims); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &claims, nil\n}", "func WithTLSConfig(tlsConfig *tls.Config) Option {\n\treturn func(opts *VDRI) {\n\t\topts.client.Transport = &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t}\n\t}\n}", "func WithChoriaConfig(bi BuildInfoProvider, c *config.Config) Option {\n\tcfg := Config{\n\t\tAllowList: c.Choria.CertnameAllowList,\n\t\tCA: c.Choria.FileSecurityCA,\n\t\tCertificate: c.Choria.FileSecurityCertificate,\n\t\tKey: c.Choria.FileSecurityKey,\n\t\tDisableTLSVerify: c.DisableTLSVerify,\n\t\tPrivilegedUsers: c.Choria.PrivilegedUsers,\n\t\tIdentity: c.Identity,\n\t\tRemoteSignerURL: c.Choria.RemoteSignerURL,\n\t\tRemoteSignerTokenFile: c.Choria.RemoteSignerTokenFile,\n\t\tRemoteSignerSeedFile: c.Choria.RemoteSignerTokenSeedFile,\n\t\tTLSConfig: tlssetup.TLSConfig(c),\n\t\tBackwardCompatVerification: c.Choria.SecurityAllowLegacyCerts,\n\t\tIdentitySuffix: bi.ClientIdentitySuffix(),\n\t}\n\n\tif cfg.IdentitySuffix == \"\" {\n\t\tcfg.IdentitySuffix = \"mcollective\"\n\t}\n\n\tif cn, ok := os.LookupEnv(\"MCOLLECTIVE_CERTNAME\"); ok {\n\t\tc.OverrideCertname = cn\n\t}\n\n\tif c.OverrideCertname != \"\" {\n\t\tcfg.Identity = c.OverrideCertname\n\t} else if !(runtimeOs() == \"windows\" || uid() == 0) {\n\t\tif u, ok := os.LookupEnv(\"USER\"); ok {\n\t\t\tcfg.Identity = fmt.Sprintf(\"%s.%s\", u, cfg.IdentitySuffix)\n\t\t}\n\t}\n\n\treturn WithConfig(&cfg)\n}", "func (_Privileges *PrivilegesTransactorSession) SetTrusted(addr common.Address) (*types.Transaction, error) {\n\treturn _Privileges.Contract.SetTrusted(&_Privileges.TransactOpts, addr)\n}", "func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\treturn func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\t// Offload verification to S2Av2.\n\t\tif grpclog.V(1) {\n\t\t\tgrpclog.Infof(\"Sending request to S2Av2 for client peer cert chain validation.\")\n\t\t}\n\t\tif err := s2AStream.Send(&s2av2pb.SessionReq{\n\t\t\tReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{\n\t\t\t\tValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{\n\t\t\t\t\tMode: verificationMode,\n\t\t\t\t\tPeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{\n\t\t\t\t\t\tClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{\n\t\t\t\t\t\t\tCertificateChain: rawCerts,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}); err != nil {\n\t\t\tgrpclog.Infof(\"Failed to send request to S2Av2 for client peer cert chain validation.\")\n\t\t\treturn err\n\t\t}\n\n\t\t// Get the response from S2Av2.\n\t\tresp, err := s2AStream.Recv()\n\t\tif err != nil {\n\t\t\tgrpclog.Infof(\"Failed to receive client peer cert chain validation response from S2Av2.\")\n\t\t\treturn err\n\t\t}\n\n\t\t// Parse the response.\n\t\tif (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) {\n\t\t\treturn fmt.Errorf(\"failed to offload client cert verification to S2A: %d, %v\", resp.GetStatus().Code, resp.GetStatus().Details)\n\n\t\t}\n\n\t\tif resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS {\n\t\t\treturn fmt.Errorf(\"client cert verification failed: %v\", resp.GetValidatePeerCertificateChainResp().ValidationDetails)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (uu *UserUpdate) AddCarinspectionIDs(ids ...int) *UserUpdate {\n\tuu.mutation.AddCarinspectionIDs(ids...)\n\treturn uu\n}", "func CORSMiddleware(devMode bool, allowedOrigins []string) gin.HandlerFunc {\n\tcorsConfig := cors.DefaultConfig()\n\tif devMode {\n\t\tcorsConfig.AllowAllOrigins = true\n\t\tcorsConfig.AllowCredentials = false\n\t} else if !devMode && len(allowedOrigins) == 0 {\n\t\t// not dev mode, and no specified origins\n\t\t// so we should allow all\n\t\tcorsConfig.AllowAllOrigins = true\n\t\tcorsConfig.AllowOrigins = nil\n\t} else {\n\t\t// configure allowed origins\n\t\tcorsConfig.AllowOrigins = allowedOrigins\n\t}\n\t// allow the DELETE method, allowed methods are now\n\t// DELETE GET POST PUT HEAD\n\tcorsConfig.AddAllowMethods(\"DELETE\")\n\tcorsConfig.AddAllowHeaders(\"cache-control\", \"Authorization\", \"Content-Type\", \"X-Request-ID\")\n\treturn cors.New(corsConfig)\n}", "func InjectTrustedContext(ctx context.Context, t HeaderTrustHandler, r *http.Request) context.Context {\n\tif t.TrustEdgeContext(r) {\n\t\tctx = SetHeader(ctx, EdgeContextContextKey, r.Header.Get(EdgeContextHeader))\n\t}\n\n\tif t.TrustSpan(r) {\n\t\tfor k, v := range map[HeaderContextKey]string{\n\t\t\tTraceIDContextKey: r.Header.Get(TraceIDHeader),\n\t\t\tParentIDContextKey: r.Header.Get(ParentIDHeader),\n\t\t\tSpanIDContextKey: r.Header.Get(SpanIDHeader),\n\t\t\tSpanFlagsContextKey: r.Header.Get(SpanFlagsHeader),\n\t\t\tSpanSampledContextKey: r.Header.Get(SpanSampledHeader),\n\t\t} {\n\t\t\tctx = SetHeader(ctx, k, v)\n\t\t}\n\t}\n\n\treturn ctx\n}", "func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error {\n\tif primaryTrace == nil || len(primaryTrace) < 2 {\n\t\treturn errors.New(\"nil or single block primary trace\")\n\t}\n\tvar (\n\t\theaderMatched bool\n\t\tlastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader\n\t\twitnessesToRemove = make([]int, 0)\n\t)\n\tc.logger.Debug(\"Running detector against trace\", \"endBlockHeight\", lastVerifiedHeader.Height,\n\t\t\"endBlockHash\", lastVerifiedHeader.Hash, \"length\", len(primaryTrace))\n\n\tc.providerMutex.Lock()\n\tdefer c.providerMutex.Unlock()\n\n\tif len(c.witnesses) == 0 {\n\t\treturn ErrNoWitnesses\n\t}\n\n\t// launch one goroutine per witness to retrieve the light block of the target height\n\t// and compare it with the header from the primary\n\terrc := make(chan error, len(c.witnesses))\n\tfor i, witness := range c.witnesses {\n\t\tgo c.compareNewHeaderWithWitness(ctx, errc, lastVerifiedHeader, witness, i)\n\t}\n\n\t// handle errors from the header comparisons as they come in\n\tfor i := 0; i < cap(errc); i++ {\n\t\terr := <-errc\n\n\t\tswitch e := err.(type) {\n\t\tcase nil: // at least one header matched\n\t\t\theaderMatched = true\n\t\tcase errConflictingHeaders:\n\t\t\t// We have conflicting headers. This could possibly imply an attack on the light client.\n\t\t\t// First we need to verify the witness's header using the same skipping verification and then we\n\t\t\t// need to find the point that the headers diverge and examine this for any evidence of an attack.\n\t\t\t//\n\t\t\t// We combine these actions together, verifying the witnesses headers and outputting the trace\n\t\t\t// which captures the bifurcation point and if successful provides the information to create valid evidence.\n\t\t\terr := c.handleConflictingHeaders(ctx, primaryTrace, e.Block, e.WitnessIndex, now)\n\t\t\tif err != nil {\n\t\t\t\t// return information of the attack\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// if attempt to generate conflicting headers failed then remove witness\n\t\t\twitnessesToRemove = append(witnessesToRemove, e.WitnessIndex)\n\n\t\tcase errBadWitness:\n\t\t\t// these are all melevolent errors and should result in removing the\n\t\t\t// witness\n\t\t\tc.logger.Info(\"witness returned an error during header comparison, removing...\",\n\t\t\t\t\"witness\", c.witnesses[e.WitnessIndex], \"err\", err)\n\t\t\twitnessesToRemove = append(witnessesToRemove, e.WitnessIndex)\n\t\tdefault:\n\t\t\t// Benign errors which can be ignored unless there was a context\n\t\t\t// canceled\n\t\t\tif errors.Is(e, context.Canceled) || errors.Is(e, context.DeadlineExceeded) {\n\t\t\t\treturn e\n\t\t\t}\n\t\t\tc.logger.Info(\"error in light block request to witness\", \"err\", err)\n\t\t}\n\t}\n\n\t// remove witnesses that have misbehaved\n\tif err := c.removeWitnesses(witnessesToRemove); err != nil {\n\t\treturn err\n\t}\n\n\t// 1. If we had at least one witness that returned the same header then we\n\t// conclude that we can trust the header\n\tif headerMatched {\n\t\treturn nil\n\t}\n\n\t// 2. Else all witnesses have either not responded, don't have the block or sent invalid blocks.\n\treturn ErrFailedHeaderCrossReferencing\n}", "func AllowAnyTrustDomainWorkload() ValidationMode {\n\treturn validationMode{\n\t\toptions: validationOptions{\n\t\t\tidType: workloadId,\n\t\t},\n\t}\n}", "func (uuo *UserUpdateOne) AddCarrepairrecordIDs(ids ...int) *UserUpdateOne {\n\tuuo.mutation.AddCarrepairrecordIDs(ids...)\n\treturn uuo\n}", "func (tracker *PeerTracker) UpdateTrusted(ctx context.Context) error {\n\treturn tracker.updatePeers(ctx, tracker.trustedPeers()...)\n}", "func bindERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func (s *Server) isTrustedCluster(clusterID string) bool {\n\tfor _, trusted := range s.config.TrustedPeers {\n\t\tif trusted == clusterID || trusted == \"*\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (rc *RentalCreate) SetCarID(u uuid.UUID) *RentalCreate {\n\trc.mutation.SetCarID(u)\n\treturn rc\n}" ]
[ "0.5050521", "0.46469218", "0.44809136", "0.4400281", "0.43353248", "0.43324968", "0.42241368", "0.41937608", "0.41843116", "0.41048527", "0.4037473", "0.40296867", "0.4016661", "0.40072775", "0.39855403", "0.39722", "0.39688164", "0.39688164", "0.3953601", "0.39267403", "0.39037538", "0.39037538", "0.38855118", "0.38855118", "0.38855118", "0.38792187", "0.38600627", "0.38358578", "0.3831689", "0.38294238", "0.38286698", "0.38278177", "0.3825741", "0.3823116", "0.38202572", "0.3819775", "0.3819775", "0.38182354", "0.3813514", "0.38132417", "0.38013384", "0.37956598", "0.37915957", "0.37778845", "0.37745786", "0.3773235", "0.37673563", "0.376591", "0.37615287", "0.37474442", "0.3746116", "0.37421405", "0.37405303", "0.3733487", "0.37323007", "0.3730742", "0.37241894", "0.3710357", "0.37065145", "0.36977935", "0.36957306", "0.3695522", "0.3693082", "0.36705357", "0.36693332", "0.36658245", "0.36569363", "0.36546367", "0.3645987", "0.3638687", "0.36348802", "0.36336762", "0.3626719", "0.36222655", "0.36163336", "0.36067876", "0.36067086", "0.36066255", "0.3606336", "0.3605582", "0.36048195", "0.35927635", "0.35826197", "0.35820895", "0.35797834", "0.35782483", "0.35743597", "0.35737845", "0.3567621", "0.35652193", "0.35619518", "0.35566851", "0.35464576", "0.35450062", "0.35422057", "0.35410666", "0.35410666", "0.35410666", "0.3540865", "0.3539437" ]
0.7244435
0
MaxAllowedHeaderSize overrides the default maximum size (of 32 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring.
func MaxAllowedHeaderSize(max uint64) Option { return func(o *Options) { o.MaxAllowedHeaderSize = max } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *fseEncoder) maxHeaderSize() uint32 {\n\tif s.preDefined {\n\t\treturn 0\n\t}\n\tif s.useRLE {\n\t\treturn 8\n\t}\n\treturn (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn uint16(e.hdrSize)\n}", "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func HeaderFieldsTooLarge(message ...interface{}) Err {\n\treturn Boomify(http.StatusRequestHeaderFieldsTooLarge, message...)\n}", "func (c Config) MaxHeaderBytesOrDefault() int {\n\tif c.MaxHeaderBytes > 0 {\n\t\treturn c.MaxHeaderBytes\n\t}\n\treturn DefaultMaxHeaderBytes\n}", "func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}", "func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}", "func (r *Responder) RequestHeaderFieldsTooLarge() { r.write(http.StatusRequestHeaderFieldsTooLarge) }", "func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn e.lower.MaxHeaderLength()\n}", "func (r *Route) MaxHeaderLength() uint16 {\n\treturn r.outgoingNIC.getNetworkEndpoint(r.NetProto()).MaxHeaderLength()\n}", "func (msg *MsgFetchSmartContractInfo) MaxPayloadLength(pver uint32) uint32 {\n\t// 10k. In theory this message is very small.\n\treturn 10240\n}", "func (s *Server) SetMaxHeaderBytes(b int) {\n\ts.config.MaxHeaderBytes = b\n}", "func estimatedHeaderWireSize(hs http.Header) (res int) {\n\tfor h, vs := range hs {\n\t\tres += len(h) + 4 // account for \": \" and \"\\r\\n\"\n\t\tfor _, v := range vs {\n\t\t\tres += len(v)\n\t\t\tbreak // no duplicates allowed\n\t\t}\n\t}\n\treturn res\n}", "func DecodeHeader(data []byte, bytesRead int, h *Header) (int, error) {\n\tif h == nil {\n\t\treturn 0, errors.New(\"Cannot decode bytes to nil Header\")\n\t}\n\n\tif len(data) < maxHeaderSize {\n\t\treturn 0, fmt.Errorf(\"Header bytes should be %d bytes, found %d\", maxHeaderSize, len(data))\n\t}\n\n\tvar err error\n\n\th.ID, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\tcurrentByte := data[bytesRead]\n\th.QR = QRType(getBitsAtIdx(currentByte, 0, 1))\n\th.OPCODE = Opcode(getBitsAtIdx(currentByte, 1, 4))\n\th.AA = getBitsAtIdx(currentByte, 5, 1)\n\th.TC = getBitsAtIdx(currentByte, 6, 1)\n\th.RD = getBitsAtIdx(currentByte, 7, 1)\n\tbytesRead++\n\n\tcurrentByte = data[bytesRead]\n\th.RA = getBitsAtIdx(currentByte, 0, 1)\n\th.Z = getBitsAtIdx(currentByte, 1, 3)\n\th.RCODE = ResponseCode(getBitsAtIdx(currentByte, 4, 4))\n\tbytesRead++\n\n\t// Set the remaining data\n\th.QDCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\th.ANCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\th.NSCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\th.ARCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\treturn bytesRead, err\n}", "func (f *frame) headerLength() int {\n\treturn 8 + 1 + 4\n}", "func writeHeaderSize(headerLength int) []byte {\n\ttotalHeaderLen := make([]byte, 4)\n\ttotalLen := uint32(headerLength)\n\tbinary.BigEndian.PutUint32(totalHeaderLen, totalLen)\n\treturn totalHeaderLen\n}", "func (this SnappyCodec) MaxEncodedLen(srcLen int) int {\n\treturn 32 + srcLen + srcLen/6\n}", "func (r Response) RequestHeaderFieldsTooLarge(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.RequestHeaderFieldsTooLarge, payload, header...)\n}", "func HeaderSize(h http.Header) int {\n\tl := 0\n\tfor field, value := range h {\n\t\tl += len(field)\n\t\tfor _, v := range value {\n\t\t\tl += len(v)\n\t\t}\n\t}\n\n\treturn l\n}", "func (z *Writer) writeHeader() error {\n\t// Default to 4Mb if BlockMaxSize is not set.\n\tif z.Header.BlockMaxSize == 0 {\n\t\tz.Header.BlockMaxSize = blockSize4M\n\t}\n\t// The only option that needs to be validated.\n\tbSize := z.Header.BlockMaxSize\n\tif !isValidBlockSize(z.Header.BlockMaxSize) {\n\t\treturn fmt.Errorf(\"lz4: invalid block max size: %d\", bSize)\n\t}\n\t// Allocate the compressed/uncompressed buffers.\n\t// The compressed buffer cannot exceed the uncompressed one.\n\tz.newBuffers()\n\tz.idx = 0\n\n\t// Size is optional.\n\tbuf := z.buf[:]\n\n\t// Set the fixed size data: magic number, block max size and flags.\n\tbinary.LittleEndian.PutUint32(buf[0:], frameMagic)\n\tflg := byte(Version << 6)\n\tflg |= 1 << 5 // No block dependency.\n\tif z.Header.BlockChecksum {\n\t\tflg |= 1 << 4\n\t}\n\tif z.Header.Size > 0 {\n\t\tflg |= 1 << 3\n\t}\n\tif !z.Header.NoChecksum {\n\t\tflg |= 1 << 2\n\t}\n\tbuf[4] = flg\n\tbuf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4\n\n\t// Current buffer size: magic(4) + flags(1) + block max size (1).\n\tn := 6\n\t// Optional items.\n\tif z.Header.Size > 0 {\n\t\tbinary.LittleEndian.PutUint64(buf[n:], z.Header.Size)\n\t\tn += 8\n\t}\n\n\t// The header checksum includes the flags, block max size and optional Size.\n\tbuf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)\n\tz.checksum.Reset()\n\n\t// Header ready, write it out.\n\tif _, err := z.dst.Write(buf[0 : n+1]); err != nil {\n\t\treturn err\n\t}\n\tz.Header.done = true\n\tif debugFlag {\n\t\tdebug(\"wrote header %v\", z.Header)\n\t}\n\n\treturn nil\n}", "func (this X86Codec) MaxEncodedLen(srcLen int) int {\n\t// Since we do not check the dst index for each byte (for speed purpose)\n\t// allocate some extra buffer for incompressible data.\n\tif srcLen >= 1<<30 {\n\t\treturn srcLen\n\t}\n\n\tif srcLen <= 512 {\n\t\treturn srcLen + 32\n\t}\n\n\treturn srcLen + srcLen/16\n}", "func (p ZkEstablishAccept) MaxPayloadLength(uint32) uint32 {\n\treturn 65532\n}", "func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}", "func (msg *MsgVersion) MaxPayloadSize(pver uint32) uint32 {\n\treturn 48\n}", "func (t ResponseHeader) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int32 // CorrelationId\n\treturn sz\n}", "func (request *RequestNFrame) Size() int {\n\treturn request.Header.Size() + reqsSize\n}", "func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}", "func (e *encoder) configureHeader() error {\n\t// Header - Defaults\n\tif e.h.Depth == 0 {\n\t\te.h.Depth = 32\n\t}\n\tif e.h.Compression == \"\" {\n\t\te.h.Compression = CompressionGzip\n\t}\n\tif e.h.RasterMode == \"\" {\n\t\te.h.RasterMode = RasterModeNormal\n\t}\n\tif e.h.Format == \"\" {\n\t\tswitch e.m.ColorModel() {\n\t\tcase hdrcolor.RGBModel:\n\t\t\te.h.Format = FormatRGBE\n\t\tcase hdrcolor.XYZModel:\n\t\t\te.h.Format = FormatXYZE\n\t\tdefault:\n\t\t\treturn UnsupportedError(\"color model\")\n\t\t}\n\t}\n\n\t// Header - Format\n\tswitch e.h.Format {\n\tcase FormatRGBE:\n\t\te.channelSize = 1\n\t\te.nbOfchannel = 4\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\tr, g, b, _ := e.m.HDRAt(x, y).HDRRGBA()\n\t\t\treturn format.ToRadianceBytes(r, g, b)\n\t\t}\n\tcase FormatXYZE:\n\t\te.channelSize = 1\n\t\te.nbOfchannel = 4\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\txx, yy, zz, _ := e.m.HDRAt(x, y).HDRXYZA()\n\t\t\treturn format.ToRadianceBytes(xx, yy, zz)\n\t\t}\n\tcase FormatRGB:\n\t\te.channelSize = 4\n\t\te.nbOfchannel = 3\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\tr, g, b, _ := e.m.HDRAt(x, y).HDRRGBA()\n\t\t\treturn format.ToBytes(binary.LittleEndian, r, g, b)\n\t\t}\n\tcase FormatXYZ:\n\t\te.channelSize = 4\n\t\te.nbOfchannel = 3\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\txx, yy, zz, _ := e.m.HDRAt(x, y).HDRXYZA()\n\t\t\treturn format.ToBytes(binary.LittleEndian, xx, yy, zz)\n\t\t}\n\tcase FormatLogLuv:\n\t\te.channelSize = 1\n\t\te.nbOfchannel = 4\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\txx, yy, zz, _ := e.m.HDRAt(x, y).HDRXYZA()\n\t\t\treturn format.XYZToLogLuv(xx, yy, zz)\n\t\t}\n\t}\n\n\t// Header - Size\n\td := e.m.Bounds().Size()\n\te.h.Width = d.X\n\te.h.Height = d.Y\n\n\treturn nil\n}", "func Size(n uint32) uint32 {\n\treturn align(NEEDLE_HEADER_SIZE + n + NEEDLE_FOOTER_SIZE)\n}", "func (msg *MsgGetCFilterV2) MaxPayloadLength(pver uint32) uint32 {\n\t// Block hash.\n\treturn chainhash.HashSize\n}", "func validHeaderFieldByte(b byte) bool {\n\treturn int(b) < len(isTokenTable) && isTokenTable[b]\n}", "func (rw *RequestHeader) Size() int32 {\n\tencoder := NewSizingEncoder()\n\trw.Write(encoder)\n\treturn encoder.Size()\n}", "func Size(n int) int {\n\treturn int(align(_headerSize + int32(n) + _footerSize))\n}", "func (t RequestHeader) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // RequestApiKey\n\tsz += sizeof.Int16 // RequestApiVersion\n\tsz += sizeof.Int32 // CorrelationId\n\tif version >= 1 {\n\t\tsz += sizeof.String(t.ClientId) // ClientId\n\t}\n\treturn sz\n}", "func (cc *ContinueCompress) MaxMessageSize() int {\n\treturn cc.maxMessageSize\n}", "func headerWithNoFileMetaInformationGroupLength() (*headerData, error) {\n\theaderData := new(headerData)\n\n\telements := []*Element{\n\t\tmustNewElement(tag.MediaStorageSOPClassUID, []string{\"SecondaryCapture\"}),\n\t\tmustNewElement(tag.MediaStorageSOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t\tmustNewElement(tag.TransferSyntaxUID, []string{\"=RLELossless\"}),\n\t\tmustNewElement(tag.ImplementationClassUID, []string{\"1.6.6.1.4.1.9590.100.1.0.100.4.0\"}),\n\t\tmustNewElement(tag.SOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t}\n\tdata, err := writeElements(elements)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct valid DICOM header preamble.\n\tmagicWord := []byte(\"DICM\")\n\tpreamble := make([]byte, 128)\n\tpreamble = append(preamble, magicWord...)\n\theaderBytes := append(preamble, data...)\n\theaderData.HeaderBytes = bytes.NewBuffer(headerBytes)\n\theaderData.Elements = elements[0 : len(elements)-1]\n\treturn headerData, nil\n}", "func MaxBlockLen(ct CompressionType) uint64 {\n\tif ct == Snappy {\n\t\t// https://github.com/golang/snappy/blob/2a8bb927dd31d8daada140a5d09578521ce5c36a/encode.go#L76\n\t\treturn 6 * (0xffffffff - 32) / 7\n\t}\n\treturn math.MaxUint64\n}", "func (request *RequestResponseFrame) Size() int {\n\treturn request.Header.Size() + request.Metadata.Size() + len(request.Data)\n}", "func (this AliasCodec) MaxEncodedLen(srcLen int) int {\n\treturn srcLen + 1024\n}", "func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {\n\tconst max = 512 << 10\n\tn := int64(maxFrameSize)\n\tif n > max {\n\t\tn = max\n\t}\n\tif cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n {\n\t\t// Add an extra byte past the declared content-length to\n\t\t// give the caller's Request.Body io.Reader a chance to\n\t\t// give us more bytes than they declared, so we can catch it\n\t\t// early.\n\t\tn = cl + 1\n\t}\n\tif n < 1 {\n\t\treturn 1\n\t}\n\treturn int(n) // doesn't truncate; max is 512K\n}", "func ErrRequestHeaderFieldsTooLargef(format string, arguments ...interface{}) *Status {\n\treturn &Status{Code: http.StatusRequestHeaderFieldsTooLarge, Text: fmt.Sprintf(format, arguments...)}\n}", "func (hdr *Header) Unmarshal(data []byte) error {\n\thdr.Source = binary.BigEndian.Uint16(data[0:2])\n\thdr.Destination = binary.BigEndian.Uint16(data[2:4])\n\thdr.SeqNum = binary.BigEndian.Uint32(data[4:8])\n\thdr.AckNum = binary.BigEndian.Uint32(data[8:12])\n\n\thdr.DataOffset = data[12] >> 4\n\thdr.ECN = byte(data[13] >> 6 & 7) // 3 bits\n\thdr.Ctrl = Flag(byte(data[13] & 0x3f)) // bottom 6 bits\n\n\thdr.Window = binary.BigEndian.Uint16(data[14:16])\n\thdr.Checksum = binary.BigEndian.Uint16(data[16:18])\n\thdr.Urgent = binary.BigEndian.Uint16(data[18:20])\n\n\thdr.Options = hdr.opts[:0]\n\n\tif hdr.DataOffset < 5 {\n\t\treturn fmt.Errorf(\"Invalid TCP data offset %d < 5\", hdr.DataOffset)\n\t}\n\n\tdataStart := int(hdr.DataOffset) * 4\n\tif dataStart > len(data) {\n\t\thdr.Payload = nil\n\t\t//hdr.Contents = data\n\t\treturn errors.New(\"TCP data offset greater than packet length\")\n\t}\n\t//hdr.Contents = data[:dataStart]\n\thdr.Payload = data[dataStart:]\n\t// From here on, data points just to the header options.\n\tdata = data[20:dataStart]\nLoop:\n\tfor len(data) > 0 {\n\t\tif hdr.Options == nil {\n\t\t\t// Pre-allocate to avoid allocating a slice.\n\t\t\thdr.Options = hdr.opts[:0]\n\t\t}\n\t\thdr.Options = append(hdr.Options, Option{OptionType: OptionKind(data[0])})\n\t\topt := &hdr.Options[len(hdr.Options)-1]\n\t\tswitch opt.OptionType {\n\t\tcase optionKindEndList: // End of options\n\t\t\topt.OptionLength = 1\n\t\t\thdr.Padding = data[1:]\n\t\t\tbreak Loop\n\t\tcase optionKindNop: // 1 byte padding\n\t\t\topt.OptionLength = 1\n\t\tdefault:\n\t\t\topt.OptionLength = data[1]\n\t\t\tif opt.OptionLength < 2 {\n\t\t\t\treturn fmt.Errorf(\"Invalid TCP option length %d < 2\", opt.OptionLength)\n\t\t\t} else if int(opt.OptionLength) > len(data) {\n\t\t\t\treturn fmt.Errorf(\"Ivalid TCP option length %d exceeds remaining %d bytes\", opt.OptionLength, len(data))\n\t\t\t}\n\t\t\topt.OptionData = data[2:opt.OptionLength]\n\t\t}\n\t\tdata = data[opt.OptionLength:]\n\t}\n\n\treturn nil\n}", "func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"header_bytes\"] = value\n\t}\n}", "func MaxEncodedLen(ct CompressionType, srcLen uint64) (uint64, bool) {\n\tif ct == Snappy {\n\t\tif srcLen > MaxBlockLen(ct) {\n\t\t\treturn 0, false\n\t\t}\n\t\tsz := snappy.MaxEncodedLen(int(srcLen))\n\t\tif sz == -1 {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn uint64(sz), true\n\t}\n\tpanic(\"not supported compression type\")\n}", "func (h *blockHeader) setSize(v uint32) {\n\tconst mask = 7\n\t*h = (*h)&mask | blockHeader(v<<3)\n}", "func parseAcceptedBlobSize(rangeHeader string) (int64, error) {\n\t// Range: Range indicating the current progress of the upload.\n\t// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload\n\tif rangeHeader == \"\" {\n\t\treturn 0, fmt.Errorf(\"range header required\")\n\t}\n\n\tparts := strings.SplitN(rangeHeader, \"-\", 2)\n\tif len(parts) != 2 {\n\t\treturn 0, fmt.Errorf(\"range header bad value: %s\", rangeHeader)\n\t}\n\n\tsize, err := strconv.ParseInt(parts[1], 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// docker registry did '-1' in the response\n\tif size > 0 {\n\t\tsize = size + 1\n\t}\n\n\treturn size, nil\n}", "func (h literalsHeader) size() int {\n\treturn int(h >> 60)\n}", "func (m *SlimBlock) MaxPayloadLength(pver uint32) uint32 {\n\treturn MaxBlockPayload\n}", "func parseHeader(header []byte, frame *MP3Frame) bool {\n\n // MPEG version. (2 bits)\n frame.MPEGVersion = (header[1] & 0x18) >> 3\n if frame.MPEGVersion == MPEGVersionReserved {\n return false\n }\n\n // MPEG layer. (2 bits.)\n frame.MPEGLayer = (header[1] & 0x06) >> 1\n if frame.MPEGLayer == MPEGLayerReserved {\n return false\n }\n\n // CRC (cyclic redundency check) protection. (1 bit.)\n frame.CrcProtection = (header[1] & 0x01) == 0x00\n\n // Bit rate index. (4 bits.)\n bitRateIndex := (header[2] & 0xF0) >> 4\n if bitRateIndex == 0 || bitRateIndex == 15 {\n return false\n }\n\n // Bit rate.\n if frame.MPEGVersion == MPEGVersion1 {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.BitRate = v1l1_br[bitRateIndex] * 1000\n case MPEGLayerII: frame.BitRate = v1l2_br[bitRateIndex] * 1000\n case MPEGLayerIII: frame.BitRate = v1l3_br[bitRateIndex] * 1000\n }\n } else {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.BitRate = v2l1_br[bitRateIndex] * 1000\n case MPEGLayerII: frame.BitRate = v2l2_br[bitRateIndex] * 1000\n case MPEGLayerIII: frame.BitRate = v2l3_br[bitRateIndex] * 1000\n }\n }\n\n // Sampling rate index. (2 bits.)\n samplingRateIndex := (header[2] & 0x0C) >> 2\n if samplingRateIndex == 3 {\n return false\n }\n\n // Sampling rate.\n switch frame.MPEGVersion {\n case MPEGVersion1: frame.SamplingRate = v1_sr[samplingRateIndex]\n case MPEGVersion2: frame.SamplingRate = v2_sr[samplingRateIndex]\n case MPEGVersion2_5: frame.SamplingRate = v25_sr[samplingRateIndex]\n }\n\n // Padding bit. (1 bit.)\n frame.PaddingBit = (header[2] & 0x02) == 0x02\n\n // Private bit. (1 bit.)\n frame.PrivateBit = (header[2] & 0x01) == 0x01\n\n // Channel mode. (2 bits.)\n frame.ChannelMode = (header[3] & 0xC0) >> 6\n\n // Mode Extension. Valid only for Joint Stereo mode. (2 bits.)\n frame.ModeExtension = (header[3] & 0x30) >> 4\n if frame.ChannelMode != JointStereo && frame.ModeExtension != 0 {\n return false\n }\n\n // Copyright bit. (1 bit.)\n frame.CopyrightBit = (header[3] & 0x08) == 0x08\n\n // Original bit. (1 bit.)\n frame.OriginalBit = (header[3] & 0x04) == 0x04\n\n // Emphasis. (2 bits.)\n frame.Emphasis = (header[3] & 0x03)\n if frame.Emphasis == 2 {\n return false\n }\n\n // Number of samples in the frame. We need this to determine the frame size.\n if frame.MPEGVersion == MPEGVersion1 {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.SampleCount = 384\n case MPEGLayerII: frame.SampleCount = 1152\n case MPEGLayerIII: frame.SampleCount = 1152\n }\n } else {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.SampleCount = 384\n case MPEGLayerII: frame.SampleCount = 1152\n case MPEGLayerIII: frame.SampleCount = 576\n }\n }\n\n // If the padding bit is set we add an extra 'slot' to the frame length.\n // A layer I slot is 4 bytes long; layer II and III slots are 1 byte long.\n var padding int = 0\n\n if frame.PaddingBit {\n if frame.MPEGLayer == MPEGLayerI {\n padding = 4\n } else {\n padding = 1\n }\n }\n\n // Calculate the frame length in bytes. There's a lot of confusion online\n // about how to do this and definitive documentation is hard to find as\n // the official MP3 specification is not publicly available. The\n // basic formula seems to boil down to:\n //\n // bytes_per_sample = (bit_rate / sampling_rate) / 8\n // frame_length = sample_count * bytes_per_sample + padding\n //\n // In practice we need to rearrange this formula to avoid rounding errors.\n //\n // I can't find any definitive statement on whether this length is\n // supposed to include the 4-byte header and the optional 2-byte CRC.\n // Experimentation on mp3 files captured from the wild indicates that it\n // includes the header at least.\n frame.FrameLength =\n (frame.SampleCount / 8) * frame.BitRate / frame.SamplingRate + padding\n\n return true\n}", "func MaxSize32(length int) int {\n\tnumControlBytes := (length + 3) / 4\n\tmaxNumDataBytes := 4 * length\n\treturn numControlBytes + maxNumDataBytes\n}", "func defaultMaxInflightBytes(n int) option.ClientOption {\n\treturn &defaultInflightBytesSetting{maxBytes: n}\n}", "func (fixedLenByteArrayDecoderTraits) BytesRequired(n int) int {\n\treturn parquet.FixedLenByteArrayTraits.BytesRequired(n)\n}", "func readHeaderDataSize(reader *bytes.Reader, totalSize uint64) (*dataSizeHeader, error) {\n\thdrDataSize := dataSizeHeader{}\n\terr := binary.Read(reader, binary.LittleEndian, &hdrDataSize)\n\n\t/*\n\t * Check if data size header was read.\n\t */\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\treturn nil, fmt.Errorf(\"Failed to read data size header: %s\", msg)\n\t} else {\n\t\tchunkId := hdrDataSize.ChunkID\n\t\tchunkSize := hdrDataSize.ChunkSize\n\t\tsizeRiff := hdrDataSize.SizeRIFF\n\t\texpectedRiffChunkSize := totalSize - 8\n\n\t\t/*\n\t\t * Check data size header for validity.\n\t\t */\n\t\tif chunkId != ID_DATASIZE {\n\t\t\treturn nil, fmt.Errorf(\"Data size header contains invalid chunk id. Expected %#08x, found %#08x.\", ID_DATASIZE, chunkId)\n\t\t} else if chunkSize < MIN_DATASIZE_CHUNK_SIZE {\n\t\t\treturn nil, fmt.Errorf(\"Data size header has too small size. Expected at least %#08x, found %#08x.\", MIN_DATASIZE_CHUNK_SIZE, chunkSize)\n\t\t} else if sizeRiff != expectedRiffChunkSize {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected RIFF chunk size in data size header. Expected %#08x, found %0#8x.\", expectedRiffChunkSize, sizeRiff)\n\t\t} else {\n\t\t\treturn &hdrDataSize, nil\n\t\t}\n\n\t}\n\n}", "func (b IPv4Header) TotalLen() int {\n\treturn int(binary.BigEndian.Uint16(b[2:4]))\n}", "func (request *RequestChannelFrame) Size() int {\n\treturn request.Header.Size() + initReqsSize + request.Metadata.Size() + len(request.Data)\n}", "func (o BucketV2CorsRuleOutput) AllowedHeaders() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketV2CorsRule) []string { return v.AllowedHeaders }).(pulumi.StringArrayOutput)\n}", "func expectedResponseLenth(responseCode uint8, responseLength uint8) (byteCount int, err error) {\n\tswitch responseCode {\n\tcase fcReadHoldingRegisters,\n\t fcReadInputRegisters,\n\t fcReadCoils,\n\t fcReadDiscreteInputs: byteCount = int(responseLength)\n\tcase fcWriteSingleRegister,\n\t fcWriteMultipleRegisters,\n\t fcWriteSingleCoil,\n\t fcWriteMultipleCoils: byteCount = 3\n\tcase fcMaskWriteRegister: byteCount = 5\n\tcase fcReadHoldingRegisters | 0x80,\n\t fcReadInputRegisters | 0x80,\n\t fcReadCoils | 0x80,\n\t fcReadDiscreteInputs | 0x80,\n\t fcWriteSingleRegister | 0x80,\n\t fcWriteMultipleRegisters | 0x80,\n\t fcWriteSingleCoil | 0x80,\n\t fcWriteMultipleCoils | 0x80,\n\t fcMaskWriteRegister | 0x80: byteCount = 0\n\tdefault: err = fmt.Errorf(\"unexpected response code (%v)\", responseCode)\n\t}\n\n\treturn\n}", "func (request *RequestStreamFrame) Size() int {\n\treturn request.Header.Size() + initReqsSize + request.Metadata.Size() + len(request.Data)\n}", "func (f *Framer) ReadHeader() (head *frameHeader, err error) {\n\tv, err := f.r.ReadByte()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversion := v & protoVersionMask\n\n\tif version < protoVersion1 || version > protoVersion4 {\n\t\treturn nil, fmt.Errorf(\"unsupported version: %x \", v)\n\t}\n\n\tf.proto = version\n\n\thead = &frameHeader{}\n\n\thead.Version = protoVersion(v)\n\n\tflag, err := f.r.ReadByte()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thead.Flags = flag\n\n\tif version > protoVersion2 {\n\t\tstream, err := f.r.ReadNetUint16()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.Stream = int(stream)\n\n\t\tb, err := f.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thead.Op = FrameOp(b)\n\t\tl, err := f.r.ReadNetUint32()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.BodyLength = int(l)\n\t} else {\n\t\tstream, err := f.r.ReadNetUint8()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.Stream = int(stream)\n\n\t\tb, err := f.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thead.Op = FrameOp(b)\n\t\tl, err := f.r.ReadNetUint32()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.BodyLength = int(l)\n\t}\n\n\tif head.BodyLength < 0 {\n\t\treturn nil, fmt.Errorf(\"frame body length can not be less than 0: %d\", head.BodyLength)\n\t} else if head.BodyLength > maxFrameSize {\n\t\t// need to free up the connection to be used again\n\t\tlogp.Err(\"head length is too large\")\n\t\treturn nil, ErrFrameTooBig\n\t}\n\n\theadSize := f.r.BufferConsumed()\n\thead.HeadLength = headSize\n\n\tdebugf(\"header: %v\", head)\n\n\tf.Header = head\n\treturn head, nil\n}", "func (t DescribeAclsRequest) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int8 // ResourceType\n\tsz += sizeof.String(t.ResourceNameFilter) // ResourceNameFilter\n\tif version >= 1 {\n\t\tsz += sizeof.Int8 // ResourcePatternType\n\t}\n\tsz += sizeof.String(t.PrincipalFilter) // PrincipalFilter\n\tsz += sizeof.String(t.HostFilter) // HostFilter\n\tsz += sizeof.Int8 // Operation\n\tsz += sizeof.Int8 // PermissionType\n\treturn sz\n}", "func (h *Header) MarshalFrameHeader() ([]byte, error) {\n\t// NOTE(jc): Header contains a uint32 but the protocol demands a uint24,\n\t// unavailable in Go, throw ErrFrameTooBig if given >uint24.\n\tif h.Length >= (1 << 24) {\n\t\treturn nil, ErrFrameTooBig\n\t}\n\n\tb := make([]byte, HeaderLength)\n\n\tputUint24(b, h.Length)\n\tb[3] = byte(h.Type)\n\tb[4] = byte(h.Flags)\n\tputUint31(b[5:], h.StreamID)\n\n\treturn b, nil\n}", "func (UTF8Decoder) Max() int { return utf8.UTFMax }", "func (v *blockValidator) headerEstimatedSerializedSize(header externalapi.BlockHeader) uint64 {\n\tsize := uint64(0)\n\tsize += 2 // Version (uint16)\n\n\tsize += 8 // number of block levels (uint64)\n\tfor _, blockLevelParents := range header.Parents() {\n\t\tsize += 8 // number of parents in the block level (uint64)\n\t\tsize += uint64(externalapi.DomainHashSize * len(blockLevelParents)) // parents\n\t}\n\n\tsize += externalapi.DomainHashSize // HashMerkleRoot\n\tsize += externalapi.DomainHashSize // AcceptedIDMerkleRoot\n\tsize += externalapi.DomainHashSize // UTXOCommitment\n\tsize += 8 // TimeInMilliseconds (int64)\n\tsize += 4 // Bits (uint32)\n\tsize += 8 // Nonce (uint64)\n\n\treturn size\n}", "func (w *response) requestTooLarge() {\n\tw.closeAfterReply = true\n\tw.requestBodyLimitHit = true\n\tif !w.wroteHeader {\n\t\tw.Header().Set(\"Connection\", \"close\")\n\t}\n}", "func (o BucketCorsConfigurationV2CorsRuleOutput) AllowedHeaders() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketCorsConfigurationV2CorsRule) []string { return v.AllowedHeaders }).(pulumi.StringArrayOutput)\n}", "func (hf *HeaderFrame) FrameType() byte { return 2 }", "func UnmarshalHeader(data []byte, m *MessageHeader) error {\n\tif len(data) < 16 {\n\t\treturn ErrMessageTooSmall\n\t}\n\td := decoder{buffer: data}\n\tm.Txid = uint32(d.readUint(4))\n\tm.Reserved = uint32(d.readUint(4))\n\tm.Flags = uint32(d.readUint(4))\n\tm.Ordinal = uint32(d.readUint(4))\n\treturn nil\n}", "func readFrame(r io.Reader, aead cipher.AEAD, maxEncryptedFrameSize, packetSize uint32) (int, frame, []byte, error) {\n\t// Read header.\n\theaderSize := encryptedHeaderSize(aead)\n\tencryptedHeader := make([]byte, headerSize)\n\tn1, err := io.ReadFull(r, encryptedHeader)\n\tif err, ok := err.(net.Error); ok && err.Timeout() {\n\t\treturn 0, frame{}, []byte{}, err // don't wrap timeout error\n\t}\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to read encrypted header\")\n\t}\n\t// Decrypt header.\n\theader, err := decryptFrameHeader(encryptedHeader, aead)\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to decrypt frame header\")\n\t}\n\t// Unmarshal header.\n\tvar fh frameHeader\n\tif err := fh.Unmarshal(header); err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to unmarshal frame header\")\n\t}\n\t// Check payload length.\n\tmaxPayloadSize := maxFramePayloadSize(maxEncryptedFrameSize, aead)\n\tif fh.length > uint32(maxPayloadSize) {\n\t\treturn 0, frame{}, []byte{}, fmt.Errorf(\"frame payload is too large %v > %v\", fh.length, maxPayloadSize)\n\t}\n\t// Compute encrypted payload length. We expect the amount of padding after\n\t// the payload to be < packetSize.\n\tencryptionOverhead := aead.Overhead() + aead.NonceSize()\n\tencryptedPayloadSize := fh.length + uint32(encryptionOverhead)\n\tif mod := (encryptedPayloadSize + uint32(n1)) % packetSize; mod != 0 {\n\t\tencryptedPayloadSize += (packetSize - mod)\n\t}\n\t// Read payload.\n\tencryptedPayload := make([]byte, encryptedPayloadSize)\n\tn2, err := io.ReadFull(r, encryptedPayload)\n\tif err, ok := err.(net.Error); ok && err.Timeout() {\n\t\treturn 0, frame{}, []byte{}, err // don't wrap timeout error\n\t}\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to read encrypted payload\")\n\t}\n\t// Decrypt the payload.\n\tpayload, err := decryptFramePayload(encryptedPayload, aead)\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to decrypt payload\")\n\t}\n\tf := frame{\n\t\tframeHeader: fh,\n\t\tpayload: payload[:fh.length],\n\t}\n\treturn n1 + n2, f, payload[fh.length:], nil\n}", "func (d *Decoder) readFrame() (*Frame, error) {\n\theader := [10]byte{}\n\tn, err := io.ReadFull(d.r, header[:])\n\td.n += n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallZero := [10]byte{}\n\n\tif bytes.Equal(header[:], allZero[:]) {\n\t\t// Reached padding. Exit.\n\t\treturn nil, nil\n\t}\n\n\t// Frame ID $xx xx xx xx (four characters)\n\t// Size $xx xx xx xx\n\t// Flags $xx xx\n\n\t// verify if the id is a valid string\n\tidRaw := header[0:4]\n\tfor _, c := range idRaw {\n\t\tif !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9')) {\n\t\t\treturn nil, fmt.Errorf(\"invalid header: %v\", idRaw)\n\t\t}\n\t}\n\n\tid := string(idRaw)\n\n\t// It's safe to represent size as a 32-bit signed int, even if the spec says\n\t// it uses 32-bit integer without specifying it's signed or unsigned,\n\t// because the Size section of tag header can only store an 28-bit signed\n\t// integer.\n\t//\n\t// See decodeTagSize for details.\n\t//\n\t// FIXME: find a way to read signed int directly, without explicit type conversion\n\tsize := int(binary.BigEndian.Uint32(header[4:8]))\n\tflags := binary.BigEndian.Uint16(header[8:10])\n\tdata := make([]byte, size)\n\t// In case of HTTP response body, r is a bufio.Reader, and in some cases\n\t// r.Read() may not fill the whole len(data). Using io.ReadFull ensures it\n\t// fills the whole len(data) slice.\n\tn, err = io.ReadFull(d.r, data)\n\n\td.n += n\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tframe := new(Frame)\n\tframe.ID = id\n\tframe.Flags = flags\n\tframe.Data = data\n\n\treturn frame, nil\n}", "func (h NalHeader) MarshalSize() int {\n\t// NOTE: Be careful to match the MarshalTo() method.\n\treturn 1\n}", "func (h *Header) Size() common.StorageSize {\n\treturn common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+\n\t\t(h.SnailNumber.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8)\n}", "func (t ExpireDelegationTokenResponse) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.Int64 // ExpiryTimestampMs\n\tsz += sizeof.Int32 // ThrottleTimeMs\n\treturn sz\n}", "func (c PktCnf1) MaxLen() int {\n\treturn int(c & 0xff)\n}", "func ValidateMsgHeader(msgData []byte) bool {\n return len(msgData) >= HEADER_LEN_B\n}", "func (byteArrayDecoderTraits) BytesRequired(n int) int {\n\treturn parquet.ByteArrayTraits.BytesRequired(n)\n}", "func parseRequestHeader(bufHeader []byte) (RequestHeader, error) {\n\tret := RequestHeader{}\n\tbuf := bufHeader\n\n\tret.Magic = uint8(buf[0])\n\tif ret.Magic != MagicRequest {\n\t\treturn RequestHeader{}, fmt.Errorf(\"Magic byte is not 0x80: %x\", ret.Magic)\n\t}\n\tbuf = buf[1:]\n\n\tret.Opcode = uint8(buf[0])\n\t_, ok := OpHandler[ret.Opcode]\n\tif !ok {\n\t\treturn RequestHeader{}, fmt.Errorf(\"Opcode byte is not recognized: %x\", ret.Opcode)\n\t}\n\tbuf = buf[1:]\n\n\tret.KeyLength = GetUint16(buf)\n\tbuf = buf[2:]\n\n\tret.ExtraLength = uint8(buf[0])\n\tbuf = buf[1:]\n\n\tret.DataType = uint8(buf[0])\n\tif ret.DataType != 0x00 {\n\t\treturn RequestHeader{}, fmt.Errorf(\"DataType byte is supposed to be 0x00: %x\", ret.DataType)\n\t}\n\tbuf = buf[1:]\n\n\tret.VBucketID = GetUint16(buf)\n\tbuf = buf[2:]\n\n\tret.TotalBodyLength = GetUint32(buf)\n\tif uint64(ret.TotalBodyLength) < uint64(ret.KeyLength)+uint64(ret.ExtraLength) {\n\t\treturn RequestHeader{}, fmt.Errorf(\"TotaoBodyLength is supposed to be no less than KeyLength + ExtraLength: total: %d key: %d extra %d\", ret.TotalBodyLength, ret.KeyLength, ret.ExtraLength)\n\t}\n\tbuf = buf[4:]\n\n\tret.Opaque = GetUint32(buf)\n\tbuf = buf[4:]\n\n\tret.CAS = GetUint64(buf)\n\n\treturn ret, nil\n}", "func (f FormatHeader) BlockSize() uint16 {\n\treturn (f.BitsPerSample / 8) * f.NumChannels\n}", "func RenderHeaderFieldsTooLarge(w http.ResponseWriter, message ...interface{}) {\n\tRender(w, HeaderFieldsTooLarge(message...))\n}", "func (o NetworkPacketCaptureOutput) MaximumBytesPerPacket() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *NetworkPacketCapture) pulumi.IntPtrOutput { return v.MaximumBytesPerPacket }).(pulumi.IntPtrOutput)\n}", "func (msg *Message) parseFormat1HeaderBytes(data []byte) error {\n\tif headerSize := len(data); headerSize < Format1HeaderSize {\n\t\treturn fmt.Errorf(errorShortMessageSize, (headerSize + FrameHeaderSize), (Format1HeaderSize + FrameHeaderSize))\n\t}\n\n\t// SEOJ\n\n\tmsg.seoj[0] = data[0]\n\tmsg.seoj[1] = data[1]\n\tmsg.seoj[2] = data[2]\n\n\t// DEOJ\n\n\tmsg.deoj[0] = data[3]\n\tmsg.deoj[1] = data[4]\n\tmsg.deoj[2] = data[5]\n\n\t// ESV\n\n\tmsg.esv = ESV(data[6])\n\n\t// OPC\n\n\terr := msg.SetOPC(int(data[7]))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (pk PacketBufferPtr) HeaderSize() int {\n\treturn pk.pushed + pk.consumed\n}", "func (fm *FieldModelMapInt32OptionalBytes) FBESize() int { return 4 }", "func MaxAllowedSectionSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedSectionSize = max\n\t}\n}", "func (msg *Message) parseFrameHeaderBytes(data []byte) error {\n\tif headerSize := len(data); headerSize < FrameHeaderSize {\n\t\treturn fmt.Errorf(errorShortMessageSize, headerSize, FrameHeaderSize)\n\t}\n\n\t// Check Headers\n\n\tif data[0] != EHD1Echonet {\n\t\treturn fmt.Errorf(errorInvalidMessageHeader, 0, data[0], EHD1Echonet)\n\t}\n\n\tif data[1] != EHD2Format1 {\n\t\treturn fmt.Errorf(errorInvalidMessageHeader, 1, data[1], EHD2Format1)\n\t}\n\n\t// TID\n\n\tmsg.tid[0] = data[2]\n\tmsg.tid[1] = data[3]\n\n\treturn nil\n}", "func TestSmallTagHeader(t *testing.T) {\n\tt.Parallel()\n\n\t_, err := parseHeader(bytes.NewReader([]byte{0, 0, 0}))\n\tif err != ErrSmallHeaderSize {\n\t\tt.Fatalf(\"Expected err contains %q, got %q\", \"less than expected\", err)\n\t}\n}", "func RequestHeaderContentLengthValidator(req http.Request, bodyMaxSize int64) int {\n\tvar contentLength int64\n\tcontentLengthHeader := req.Header.Get(\"Content-Length\")\n\tif contentLengthHeader != \"\" {\n\t\tvar err error\n\t\tcontentLength, err = strconv.ParseInt(contentLengthHeader, 10, 64)\n\t\tif err != nil {\n\t\t\treturn http.StatusBadRequest\n\t\t}\n\t}\n\tif contentLength > bodyMaxSize || req.ContentLength > bodyMaxSize {\n\t\treturn http.StatusRequestEntityTooLarge\n\t}\n\treturn 0\n}", "func DeserializeHeader(bytes []byte) (Header, error) {\n\tvar header Header\n\tif len(bytes) < 12 {\n\t\treturn header, errors.New(fmt.Sprint(\"bytes too short to deserialize dnsmessage.Header, expected at least 12 bytes but got\", len(bytes)))\n\t}\n\theader.ID = binary.BigEndian.Uint16(bytes[0:2])\n\theader.parseFlag(binary.BigEndian.Uint16(bytes[2:4]))\n\theader.QuestionCount = binary.BigEndian.Uint16(bytes[4:6])\n\theader.AnswerRecordCount = binary.BigEndian.Uint16(bytes[6:8])\n\theader.AuthorityRecordCount = binary.BigEndian.Uint16(bytes[8:10])\n\theader.AdditionalRecordCount = binary.BigEndian.Uint16(bytes[10:12])\n\treturn header, nil\n}", "func ReadHeader(buffer *BytePacketBuffer) (Header, error) {\n\tid, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tflags, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tlowFlags := uint8(flags >> 8)\n\trecursionDesired := lowFlags&(1<<0) > 0\n\ttruncatedMessage := lowFlags&(1<<1) > 0\n\tauthoritativeAnswer := lowFlags&(1<<2) > 0\n\topcode := (lowFlags >> 3) & 0x0F\n\tresponse := lowFlags&(1<<7) > 0\n\n\thighFlags := uint8(flags & 0xFF)\n\trescode := ResultCode(highFlags & 0x0F)\n\tcheckingDisabled := highFlags&(1<<4) > 0\n\tauthedData := highFlags&(1<<5) > 0\n\tz := highFlags&(1<<6) > 0\n\trecursionAvailable := highFlags&(1<<7) > 0\n\n\tquestions, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tanswers, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tauthoritativeEntires, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tresourceEntries, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\treturn Header{\n\t\tid: id,\n\t\trecursionDesired: recursionDesired,\n\t\ttruncatedMessage: truncatedMessage,\n\t\tauthoritativeAnswer: authoritativeAnswer,\n\t\topcode: opcode,\n\t\tresponse: response,\n\t\trescode: rescode,\n\t\tcheckingDisabled: checkingDisabled,\n\t\tauthedData: authedData,\n\t\tz: z,\n\t\trecursionAvailable: recursionAvailable,\n\t\tquestions: questions,\n\t\tanswers: answers,\n\t\tauthoritativeEntires: authoritativeEntires,\n\t\tresourceEntries: resourceEntries,\n\t}, nil\n}", "func MaxDataBytesNoEvidence(maxBytes int64, keyType crypto.KeyType, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCoreChainLockSize -\n\t\tMaxCommitOverheadBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytesUnknownEvidence. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}", "func (m *ModifyBearerResponse) SetLength() {\n\tm.Header.Length = uint16(m.MarshalLen() - 4)\n}", "func extractGIF1stFrame(bytes []byte) (int, error) {\n\tsize := len(bytes)\n\tif size < 13 {\n\t\treturn size, errors.New(\"too short header\")\n\t}\n\tflags := bytes[10]\n\tglobalColorTableFlag := (flags & 0x80) >> 7\n\tsizeOfGlobalColorTable := (flags & 0x07)\n\tvar offset = 13\n\tif globalColorTableFlag != 0 {\n\t\tcolorTableSize := int(math.Pow(2, float64(sizeOfGlobalColorTable+1)))\n\t\toffset += 3 * colorTableSize\n\t\tif size < offset {\n\t\t\treturn size, errors.New(\"too short global colorTable\")\n\t\t}\n\t}\n\tfor {\n\t\tif size < (offset + 1) {\n\t\t\treturn size, errors.New(\"missing separator\")\n\t\t}\n\t\tseparator := bytes[offset]\n\t\toffset++\n\t\tswitch separator {\n\t\tcase 0x3B: // Trailer\n\t\tcase 0x21: // Extention\n\t\t\tif size < (offset + 2) {\n\t\t\t\treturn size, errors.New(\"missing extention block header\")\n\t\t\t}\n\t\t\textensionBlockLabel := bytes[offset]\n\t\t\textensionDataSize := bytes[offset+1]\n\t\t\toffset += 2 + int(extensionDataSize)\n\t\t\tif size < offset {\n\t\t\t\treturn size, errors.New(\"too short extension block\")\n\t\t\t}\n\t\t\tif extensionBlockLabel == 0xff { // Application Extension\n\t\t\t\tfor {\n\t\t\t\t\tif size < (offset + 1) {\n\t\t\t\t\t\treturn size, errors.New(\"missing extension subblock size field\")\n\t\t\t\t\t}\n\t\t\t\t\tsubBlockSize := bytes[offset]\n\t\t\t\t\toffset++\n\t\t\t\t\tif subBlockSize == 0 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\toffset += int(subBlockSize)\n\t\t\t\t\tif size < offset {\n\t\t\t\t\t\treturn size, errors.New(\"to short extension subblock\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\toffset++ // extensionBlock Trailer\n\t\t\t}\n\t\tcase 0x2C: // Image\n\t\t\tif size < (offset + 9) {\n\t\t\t\treturn size, errors.New(\"too short image header\")\n\t\t\t}\n\t\t\tflags := bytes[offset+8]\n\t\t\tlocalColorTableFlag := (flags & 0x80) >> 7\n\t\t\tsizeOfLocalColorTable := (flags & 0x07)\n\t\t\toffset += 9\n\t\t\tif localColorTableFlag != 0 {\n\t\t\t\tcolorTableSize := int(math.Pow(2, float64(sizeOfLocalColorTable+1)))\n\t\t\t\toffset += 3 * colorTableSize\n\t\t\t\tif size < offset {\n\t\t\t\t\treturn size, errors.New(\"too short local colorTable\")\n\t\t\t\t}\n\t\t\t}\n\t\t\toffset++ // LZWMinimumCodeSize\n\t\t\tfor {\n\t\t\t\tif size < (offset + 1) {\n\t\t\t\t\treturn size, errors.New(\"missing image subblock size field\")\n\t\t\t\t}\n\t\t\t\tsubBlockSize := bytes[offset]\n\t\t\t\toffset++\n\t\t\t\tif subBlockSize == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\toffset += int(subBlockSize)\n\t\t\t\tif size < offset {\n\t\t\t\t\treturn size, errors.New(\"too short image subblock\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif size < (offset + 1) {\n\t\t\t\treturn size, errors.New(\"missing separator for trailer overwrite\")\n\t\t\t}\n\t\t\tbytes[offset] = 0x3B // trailer overwrite\n\t\tdefault:\n\t\t\t// nothing to do\n\t\t}\n\t\tif separator == 0x3B {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn offset, nil\n}", "func readHeaderData(reader *bytes.Reader, totalSize uint64) (*dataHeader, error) {\n\thdrData := dataHeader{}\n\terr := binary.Read(reader, binary.LittleEndian, &hdrData)\n\n\t/*\n\t * Check if data header was read.\n\t */\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\treturn nil, fmt.Errorf(\"Failed to read data header: %s\", msg)\n\t} else {\n\t\tmaxDataLength := totalSize - MIN_TOTAL_HEADER_SIZE\n\t\tmaxDataLength32 := uint32(maxDataLength)\n\t\tchunkId := hdrData.ChunkID\n\t\tchunkSize := hdrData.ChunkSize\n\n\t\t/*\n\t\t * Check data header for validity.\n\t\t */\n\t\tif chunkId != ID_DATA {\n\t\t\treturn nil, fmt.Errorf(\"Data header contains invalid chunk id. Expected %#08x, found %#08x.\", ID_DATA, chunkId)\n\t\t} else if (chunkSize > maxDataLength32) && (chunkSize != math.MaxUint32) {\n\t\t\treturn nil, fmt.Errorf(\"Data header contains invalid chunk size. Expected at most %#08x (or %#08x), found %#08x.\", maxDataLength32, uint32(math.MaxUint32), chunkSize)\n\t\t} else {\n\t\t\treturn &hdrData, nil\n\t\t}\n\n\t}\n\n}", "func handleConn(conn net.Conn) {\n\tdefer conn.Close()\n\n\tlog.Print(\"read request to buffer\")\n\tconst maxHeaderSize = 4096\n\treader := bufio.NewReaderSize(conn, maxHeaderSize)\n\twriter := bufio.NewWriter(conn)\n\tcounter := 0\n\tbuf := [maxHeaderSize]byte{}\n\t// naive header limit\n\tfor {\n\t\tif counter == maxHeaderSize {\n\t\t\tlog.Printf(\"too long request header\")\n\t\t\twriter.WriteString(\"HTTP/1.1 413 Payload Too Large\\r\\n\")\n\t\t\twriter.WriteString(\"Content-Length: 0\\r\\n\")\n\t\t\twriter.WriteString(\"Connection: close\\r\\n\")\n\t\t\twriter.WriteString(\"\\r\\n\")\n\t\t\twriter.Flush()\n\t\t\treturn\n\t\t}\n\n\t\tread, err := reader.ReadByte()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"can't read request line: %v\", err)\n\t\t\twriter.WriteString(\"HTTP/1.1 400 Bad Request\\r\\n\")\n\t\t\twriter.WriteString(\"Content-Length: 0\\r\\n\")\n\t\t\twriter.WriteString(\"Connection: close\\r\\n\")\n\t\t\twriter.WriteString(\"\\r\\n\")\n\t\t\twriter.Flush()\n\t\t\treturn\n\t\t}\n\t\tbuf[counter] = read\n\t\tcounter++\n\n\t\tif counter < 4 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif string(buf[counter-4:counter]) == \"\\r\\n\\r\\n\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Print(\"headers found\")\n\theadersStr := string(buf[:counter - 4])\n\n\theaders := make(map[string]string) // TODO: в оригинале map[string][]string\n\trequestHeaderParts := strings.Split(headersStr, \"\\r\\n\")\n\n\tlog.Print(\"parse request line\")\n\trequestLine := requestHeaderParts[0]\n\tlog.Printf(\"request line: %s\", requestLine)\n\n\tlog.Print(\"parse headers\")\n\tfor _, headerLine := range requestHeaderParts[1:] {\n\t\theaderParts := strings.SplitN(headerLine, \": \", 2)\n\t\theaders[strings.TrimSpace(headerParts[0])] = strings.TrimSpace(headerParts[1]) // TODO: are we allow empty header?\n\t}\n\tlog.Printf(\"headers: %v\", headers)\n\n\thtml := fmt.Sprintf(`<!doctype html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\"\n content=\"width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\">\n <title>Document</title>\n</head>\n<body>\n <h1>Hello from golang %s</h1>\n</body>\n</html>`, runtime.Version())\n\n\tlog.Print(\"send response\")\n\twriter.WriteString(\"HTTP/1.1 200 OK\\r\\n\")\n\twriter.WriteString(fmt.Sprintf(\"Content-Length: %d\\r\\n\", len(html)))\n\twriter.WriteString(\"Connection: close\\r\\n\")\n\twriter.WriteString(\"\\r\\n\")\n\twriter.WriteString(html)\n\twriter.Flush()\n\n\tlog.Print(\"done\")\n\treturn\n}", "func MaxEncodedLen(b []byte) int {\n\tmaxlen := int(math.Ceil(float64(len(b)) / BitsPerDigit * 8))\n\treturn maxlen\n}", "func TestHeader(t *testing.T) {\n\n\thdr := Header{\"MyHdr1\", []byte(\"a string\")}\n\tif hdr.String() != \"MyHdr1=\\\"a string\\\"\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n\thdr = Header{\"MyHdr2\", []byte(\"a longer string that will be truncated right here <-- so you wont see this part.\")}\n\tif hdr.String() != \"MyHdr2=\\\"a longer string that will be truncated right here \\\"(30 more bytes)\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n\thdr = Header{\"MyHdr3\", []byte{1, 2, 3, 4}}\n\tif hdr.String() != \"MyHdr3=\\\"\\\\x01\\\\x02\\\\x03\\\\x04\\\"\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n}", "func MakeHeader(data []byte) ([]byte, error) {\n\theader := make([]byte, 4)\n\n\tlength := uint32(len(data))\n\n\tif length > 0x7fffffff {\n\t\treturn nil, errors.New(\"Data to large\")\n\t}\n\n\theader[0] = byte((length >> 24) & 0xff)\n\theader[1] = byte((length >> 16) & 0xff)\n\theader[2] = byte((length >> 8) & 0xff)\n\theader[3] = byte((length >> 0) & 0xff)\n\n\treturn header, nil\n}" ]
[ "0.67647296", "0.63822037", "0.60273457", "0.5796491", "0.5741116", "0.5712794", "0.5712794", "0.55037737", "0.54724437", "0.54210025", "0.54210025", "0.54165447", "0.53984994", "0.5197604", "0.5164738", "0.5153342", "0.50923467", "0.50772697", "0.5059694", "0.502238", "0.50169086", "0.49834615", "0.49759427", "0.4951584", "0.49227318", "0.49224046", "0.48794144", "0.48630556", "0.48425454", "0.4832467", "0.48213804", "0.47730905", "0.47366476", "0.47333872", "0.47256845", "0.47112405", "0.46951443", "0.4694762", "0.4691685", "0.4687955", "0.46761042", "0.4673901", "0.46635115", "0.46540776", "0.46343866", "0.46118063", "0.461048", "0.45955878", "0.45948973", "0.45898893", "0.45897633", "0.45674506", "0.45554063", "0.45526445", "0.45463008", "0.45450586", "0.45429006", "0.45399374", "0.45315114", "0.45217878", "0.4513446", "0.45129082", "0.4511414", "0.45107633", "0.4506571", "0.4495195", "0.44900244", "0.44867113", "0.44774127", "0.4475397", "0.44598675", "0.44526452", "0.44490567", "0.44461152", "0.44458944", "0.4440208", "0.44320333", "0.4429126", "0.4427023", "0.441777", "0.44159305", "0.44145042", "0.43947443", "0.43945086", "0.4393924", "0.43883517", "0.43822607", "0.43816468", "0.43802887", "0.43750298", "0.43741423", "0.43734777", "0.43724313", "0.4371097", "0.43698978", "0.43698528", "0.4366269", "0.43618563", "0.43614975", "0.4357589" ]
0.7392962
0
MaxAllowedSectionSize overrides the default maximum size (of 8 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring. Typically IPLD blocks should be under 2 MiB (ideally under 1 MiB), so unless atypical data is expected, this should not be a large value.
func MaxAllowedSectionSize(max uint64) Option { return func(o *Options) { o.MaxAllowedSectionSize = max } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MaxAllowedHeaderSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedHeaderSize = max\n\t}\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func (s *fseEncoder) maxHeaderSize() uint32 {\n\tif s.preDefined {\n\t\treturn 0\n\t}\n\tif s.useRLE {\n\t\treturn 8\n\t}\n\treturn (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8\n}", "func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn uint16(e.hdrSize)\n}", "func (f FormatHeader) BlockSize() uint16 {\n\treturn (f.BitsPerSample / 8) * f.NumChannels\n}", "func (s Section) Size() uint32 { return bytes.ReadUint32(s[6:]) }", "func MaxBlockLen(ct CompressionType) uint64 {\n\tif ct == Snappy {\n\t\t// https://github.com/golang/snappy/blob/2a8bb927dd31d8daada140a5d09578521ce5c36a/encode.go#L76\n\t\treturn 6 * (0xffffffff - 32) / 7\n\t}\n\treturn math.MaxUint64\n}", "func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}", "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}", "func BlockSize() int {\n\treturn config.Record.BlockSize\n}", "func (constr Construction) BlockSize() int { return 16 }", "func Size(n int) int {\n\treturn int(align(_headerSize + int32(n) + _footerSize))\n}", "func (s *SE) PMRLLimitTotalSize() uint64 {\n\treturn 4\n}", "func (msg *MsgFetchSmartContractInfo) MaxPayloadLength(pver uint32) uint32 {\n\t// 10k. In theory this message is very small.\n\treturn 10240\n}", "func Size(n uint32) uint32 {\n\treturn align(NEEDLE_HEADER_SIZE + n + NEEDLE_FOOTER_SIZE)\n}", "func (s *IBBSegment) SizeTotalSize() uint64 {\n\treturn 4\n}", "func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn e.lower.MaxHeaderLength()\n}", "func (c Config) MaxHeaderBytesOrDefault() int {\n\tif c.MaxHeaderBytes > 0 {\n\t\treturn c.MaxHeaderBytes\n\t}\n\treturn DefaultMaxHeaderBytes\n}", "func (m *SlimBlock) MaxPayloadLength(pver uint32) uint32 {\n\treturn MaxBlockPayload\n}", "func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}", "func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}", "func (d *RabinKarp64) BlockSize() int { return 1 }", "func (sm3 *SM3) BlockSize() int { return 64 }", "func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}", "func readSection(r io.Reader, sSize int, maxSize uint64) (data []byte, nr io.Reader, size int, err error) {\n\t// We are not going to lose data by copying a smaller var into a larger one.\n\tvar sectionSize uint64\n\tswitch sSize {\n\tcase 2:\n\t\t// Read uint16.\n\t\tvar size16 uint16\n\t\terr = binary.Read(r, binary.LittleEndian, &size16)\n\t\tif err != nil {\n\t\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read size %d bytes - %s\", sSize, err.Error())\n\t\t}\n\t\tsectionSize = uint64(size16)\n\t\t// Add bytes to the start of data []byte.\n\t\tdata = uint16Byte(size16)\n\tcase 4:\n\t\t// Read uint32.\n\t\tvar size32 uint32\n\t\terr = binary.Read(r, binary.LittleEndian, &size32)\n\t\tif err != nil {\n\t\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read size %d bytes - %s\", sSize, err.Error())\n\t\t}\n\t\tsectionSize = uint64(size32)\n\t\t// Add bytes to the start of data []byte.\n\t\tdata = uint32Byte(size32)\n\tcase 8:\n\t\t// Read uint64 or sectionSize.\n\t\terr = binary.Read(r, binary.LittleEndian, &sectionSize)\n\t\tif err != nil {\n\t\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read size %d bytes - %s\", sSize, err.Error())\n\t\t}\n\t\t// Add bytes to the start of data []byte.\n\t\tdata = uint64Byte(sectionSize)\n\tdefault:\n\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: invalid sSize - got %v\", sSize)\n\t}\n\n\t// Create a []byte of sectionSize-4 and read that many bytes from io.Reader.\n\tcomputedSize := sectionSize - uint64(sSize)\n\tif computedSize > maxSize {\n\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: invalid computed size got %d; expected a size < %d\", computedSize, maxSize)\n\t}\n\n\ttempData := make([]byte, computedSize)\n\terr = binary.Read(r, binary.LittleEndian, &tempData)\n\tif err != nil {\n\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read section %d bytes - %s\", sectionSize-uint64(sSize), err.Error())\n\t}\n\n\t// If this is successful, append it to data []byte.\n\tdata = append(data, tempData...)\n\n\t// Create a reader from the unread bytes.\n\tnr = bytes.NewReader(tempData)\n\n\treturn data, nr, int(sectionSize), nil\n}", "func (h *SnailHeader) Size() common.StorageSize {\n\treturn common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+\n\t\tlen(h.Publickey)+(h.Difficulty.BitLen()+h.FastNumber.BitLen()+h.FruitDifficulty.BitLen()+\n\t\th.PointerNumber.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8)\n}", "func MaxSize32(length int) int {\n\tnumControlBytes := (length + 3) / 4\n\tmaxNumDataBytes := 4 * length\n\treturn numControlBytes + maxNumDataBytes\n}", "func (v *blockValidator) headerEstimatedSerializedSize(header externalapi.BlockHeader) uint64 {\n\tsize := uint64(0)\n\tsize += 2 // Version (uint16)\n\n\tsize += 8 // number of block levels (uint64)\n\tfor _, blockLevelParents := range header.Parents() {\n\t\tsize += 8 // number of parents in the block level (uint64)\n\t\tsize += uint64(externalapi.DomainHashSize * len(blockLevelParents)) // parents\n\t}\n\n\tsize += externalapi.DomainHashSize // HashMerkleRoot\n\tsize += externalapi.DomainHashSize // AcceptedIDMerkleRoot\n\tsize += externalapi.DomainHashSize // UTXOCommitment\n\tsize += 8 // TimeInMilliseconds (int64)\n\tsize += 4 // Bits (uint32)\n\tsize += 8 // Nonce (uint64)\n\n\treturn size\n}", "func isMinAllowedPartSize(size int64) bool {\n\treturn size >= minPartSize\n}", "func (s *SRP) FieldSize() int {\n\treturn s.pf.n * 8\n}", "func (msg *MsgVersion) MaxPayloadSize(pver uint32) uint32 {\n\treturn 48\n}", "func (fm *FieldModelMapInt32OptionalBytes) Size() int {\n if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {\n return 0\n }\n\n fbeMapOffset := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset()))\n if (fbeMapOffset == 0) || ((fm.buffer.Offset() + fbeMapOffset + 4) > fm.buffer.Size()) {\n return 0\n }\n\n fbeMapSize := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fbeMapOffset))\n return fbeMapSize\n}", "func (r *Reader) NextSection() error {\n\tbeginOffset, err := r.fl.Seek(int64(r.nextOffset), io.SeekStart)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvals := make([]byte, 16)\n\tbytesRead, err := r.fl.Read(vals)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// end marker\n\tif bytesRead == 8 && bytes.Equal(vals[:8], []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) {\n\t\treturn io.EOF\n\t}\n\n\tsectionSize := binary.LittleEndian.Uint64(vals[:8])\n\trowCount := binary.LittleEndian.Uint64(vals[8:16])\n\n\tstr, err := readZeroTerminatedString(r.fl)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"EOF while reading string section (partial: %s)\", str)\n\t\t}\n\t\treturn err\n\t}\n\n\tr.nextOffset = uint64(beginOffset) + sectionSize + 8 // well well, sectionSize includes the rowCount I guess?\n\n\tr.CurrentSection = &Section{\n\t\tName: SectionName(strings.TrimRight(str, string([]byte{0x00}))),\n\t\tOffset: uint64(beginOffset),\n\t\tSize: sectionSize,\n\t\tRowCount: rowCount,\n\t\tBufferSize: sectionSize - uint64(len(str)) - 1 /* str-pad 0x00 byte */ - 8,\n\t\tBuffer: r.fl,\n\t}\n\treturn nil\n}", "func (c *Config) MaxSize(stream string) (uint, error) {\n\tkey, err := keyName(stream, \"maxsize\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.v.GetSizeInBytes(key), nil\n}", "func (e SszNetworkEncoder) GetMaxChunkSize() uint64 {\n\treturn MaxChunkSize\n}", "func (s *IBBSegment) ReservedTotalSize() uint64 {\n\treturn 2\n}", "func (h *BlockHeader) BlockHeaderLen() int {\n\tnSol := len(h.Solution)\n\treturn 140 + VarIntSerializeSize(uint64(nSol)) + nSol\n}", "func (msg *Block) MaxPayloadLength(pver uint32) uint32 {\n\t// Block header at 80 bytes + transaction count + max transactions which can vary up to the MaxBlockPayload\n\t// (including the block header and transaction count).\n\treturn MaxBlockPayload\n}", "func (u *uploader) initSize() {\n\tu.totalSize = -1\n\n\tswitch r := u.in.Body.(type) {\n\tcase io.Seeker:\n\t\tn, err := aws.SeekerLen(r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tu.totalSize = n\n\n\t\t// Try to adjust partSize if it is too small and account for\n\t\t// integer division truncation.\n\t\tif u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {\n\t\t\t// Add one to the part size to account for remainders\n\t\t\t// during the size calculation. e.g odd number of bytes.\n\t\t\tu.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1\n\t\t}\n\t}\n}", "func (r *Route) MaxHeaderLength() uint16 {\n\treturn r.outgoingNIC.getNetworkEndpoint(r.NetProto()).MaxHeaderLength()\n}", "func (x *gcm) BlockSize() int { return x.blockSize }", "func (s *IBBSegment) BaseTotalSize() uint64 {\n\treturn 4\n}", "func (b IPv4Header) TotalLen() int {\n\treturn int(binary.BigEndian.Uint16(b[2:4]))\n}", "func (s *SE) PMRLBaseTotalSize() uint64 {\n\treturn 4\n}", "func readHeaderDataSize(reader *bytes.Reader, totalSize uint64) (*dataSizeHeader, error) {\n\thdrDataSize := dataSizeHeader{}\n\terr := binary.Read(reader, binary.LittleEndian, &hdrDataSize)\n\n\t/*\n\t * Check if data size header was read.\n\t */\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\treturn nil, fmt.Errorf(\"Failed to read data size header: %s\", msg)\n\t} else {\n\t\tchunkId := hdrDataSize.ChunkID\n\t\tchunkSize := hdrDataSize.ChunkSize\n\t\tsizeRiff := hdrDataSize.SizeRIFF\n\t\texpectedRiffChunkSize := totalSize - 8\n\n\t\t/*\n\t\t * Check data size header for validity.\n\t\t */\n\t\tif chunkId != ID_DATASIZE {\n\t\t\treturn nil, fmt.Errorf(\"Data size header contains invalid chunk id. Expected %#08x, found %#08x.\", ID_DATASIZE, chunkId)\n\t\t} else if chunkSize < MIN_DATASIZE_CHUNK_SIZE {\n\t\t\treturn nil, fmt.Errorf(\"Data size header has too small size. Expected at least %#08x, found %#08x.\", MIN_DATASIZE_CHUNK_SIZE, chunkSize)\n\t\t} else if sizeRiff != expectedRiffChunkSize {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected RIFF chunk size in data size header. Expected %#08x, found %0#8x.\", expectedRiffChunkSize, sizeRiff)\n\t\t} else {\n\t\t\treturn &hdrDataSize, nil\n\t\t}\n\n\t}\n\n}", "func (s *SE) IBBEntryPointTotalSize() uint64 {\n\treturn 4\n}", "func (s *SE) IBBMCHBARTotalSize() uint64 {\n\treturn 8\n}", "func (t RequestHeader) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // RequestApiKey\n\tsz += sizeof.Int16 // RequestApiVersion\n\tsz += sizeof.Int32 // CorrelationId\n\tif version >= 1 {\n\t\tsz += sizeof.String(t.ClientId) // ClientId\n\t}\n\treturn sz\n}", "func (cc *ContinueCompress) MaxMessageSize() int {\n\treturn cc.maxMessageSize\n}", "func (h *Header) Size() common.StorageSize {\n\treturn common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+\n\t\t(h.SnailNumber.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8)\n}", "func (request *RequestNFrame) Size() int {\n\treturn request.Header.Size() + reqsSize\n}", "func SectorDealsMax(size abi.SectorSize) uint64 {\n\treturn max64(256, uint64(size/DealLimitDenominator))\n}", "func (r *Responder) RequestHeaderFieldsTooLarge() { r.write(http.StatusRequestHeaderFieldsTooLarge) }", "func ValidateBlockLength(blockLength uint32) (bool) {\n if blockLength <= 4294967295 && blockLength > 0 { //2^32 -1 ~ 4GB or maximum possible block length\n return true\n }\n return false\n}", "func (b *BlockSplitterSimple) MaxSize() int64 {\n\treturn b.maxSize\n}", "func (pe *PEFile) calculateHeaderEnd(offset uint32) {\n\tvar rawDataPointers []uint32\n\tfor _, section := range pe.Sections {\n\t\tprd := section.Data.PointerToRawData\n\t\tif prd > uint32(0x0) {\n\t\t\trawDataPointers = append(rawDataPointers, pe.adjustFileAlignment(prd))\n\t\t}\n\t}\n\tminSectionOffset := uint32(0x0)\n\tif len(rawDataPointers) > 0 {\n\t\tminSectionOffset = rawDataPointers[0]\n\t\tfor _, pointer := range rawDataPointers {\n\t\t\tif pointer < minSectionOffset {\n\t\t\t\tminSectionOffset = pointer\n\t\t\t}\n\t\t}\n\t}\n\tif minSectionOffset == 0 || minSectionOffset < offset {\n\t\tpe.headerEnd = offset\n\t} else {\n\t\tpe.headerEnd = minSectionOffset\n\t}\n}", "func (h *blockHeader) setSize(v uint32) {\n\tconst mask = 7\n\t*h = (*h)&mask | blockHeader(v<<3)\n}", "func (h *literalsHeader) setSize(regenLen int) {\n\tinBits := bits.Len32(uint32(regenLen))\n\t// Only retain 2 bits\n\tconst mask = 3\n\tlh := uint64(*h & mask)\n\tswitch {\n\tcase inBits < 5:\n\t\tlh |= (uint64(regenLen) << 3) | (1 << 60)\n\t\tif debugEncoder {\n\t\t\tgot := int(lh>>3) & 0xff\n\t\t\tif got != regenLen {\n\t\t\t\tpanic(fmt.Sprint(\"litRegenSize = \", regenLen, \"(want) != \", got, \"(got)\"))\n\t\t\t}\n\t\t}\n\tcase inBits < 12:\n\t\tlh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)\n\tcase inBits < 20:\n\t\tlh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)\n\tdefault:\n\t\tpanic(fmt.Errorf(\"internal error: block too big (%d)\", regenLen))\n\t}\n\t*h = literalsHeader(lh)\n}", "func (d *Decoder) IDnSize() ([4]byte, uint32, error) {\n\tvar ID [4]byte\n\tvar blockSize uint32\n\tif err := binary.Read(d.r, binary.BigEndian, &ID); err != nil {\n\t\treturn ID, blockSize, err\n\t}\n\tif err := binary.Read(d.r, binary.BigEndian, &blockSize); err != nil {\n\t\treturn ID, blockSize, err\n\t}\n\treturn ID, blockSize, nil\n}", "func SectionLength(psi []byte) uint16 {\n\toffset := int(1 + PointerField(psi))\n\tif offset >= len(psi) {\n\t\treturn 0\n\t}\n\treturn sectionLength(psi[offset:])\n}", "func MaxBlockBodySize() uint32 {\n\treturn chain.MaxBlockBodySize()\n}", "func (msg *MsgInv) MaxPayloadSize(pver uint32) uint32 {\n\treturn MaxInvSize*InvVectSize + 9\n}", "func (fixedLenByteArrayDecoderTraits) BytesRequired(n int) int {\n\treturn parquet.FixedLenByteArrayTraits.BytesRequired(n)\n}", "func (o FioSpecVolumeVolumeSourceEmptyDirOutput) SizeLimit() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceEmptyDir) *string { return v.SizeLimit }).(pulumi.StringPtrOutput)\n}", "func (h literalsHeader) size() int {\n\treturn int(h >> 60)\n}", "func (d *digest) BlockSize() int { return 1 }", "func (t ListedGroup16) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.String(t.GroupId) // GroupId\n\tsz += sizeof.String(t.ProtocolType) // ProtocolType\n\treturn sz\n}", "func ReadSection4(f io.Reader, length int) (section Section4, err error) {\n\terr = read(f, &section.CoordinatesCount, &section.ProductDefinitionTemplateNumber)\n\tif err != nil {\n\t\treturn section, err\n\t}\n\n\tswitch section.ProductDefinitionTemplateNumber {\n\tcase 0:\n\t\terr = read(f, &section.ProductDefinitionTemplate)\n\tdefault:\n\t\t//return section, fmt.Errorf(\"Category definition template number %d not implemented yet\", section.ProductDefinitionTemplateNumber)\n\t\treturn section, nil\n\t}\n\n\tif err != nil {\n\t\treturn section, err\n\t}\n\n\tsection.Coordinates = make([]byte, section.CoordinatesCount)\n\n\treturn section, read(f, &section.Coordinates)\n}", "func (ch *clientSecureChannel) MaxMessageSize() uint32 {\n\treturn ch.maxMessageSize\n}", "func (p *Policy) setMaxBlockSize(ic *interop.Context, args []stackitem.Item) stackitem.Item {\n\tvalue := uint32(toBigInt(args[0]).Int64())\n\tif value > payload.MaxSize {\n\t\tpanic(fmt.Errorf(\"MaxBlockSize cannot be more than the maximum payload size = %d\", payload.MaxSize))\n\t}\n\tok, err := p.checkValidators(ic)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !ok {\n\t\treturn stackitem.NewBool(false)\n\t}\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\terr = p.setUint32WithKey(ic.DAO, maxBlockSizeKey, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tp.isValid = false\n\treturn stackitem.NewBool(true)\n}", "func SetMaxMemory(maxMemory int64) {\n\tmaxMemoryForMultipartForm = maxMemory\n}", "func (t ReassignablePartitionResponse45) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int32 // PartitionIndex\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.String(t.ErrorMessage) // ErrorMessage\n\treturn sz\n}", "func (msg *MsgGetCFilterV2) MaxPayloadLength(pver uint32) uint32 {\n\t// Block hash.\n\treturn chainhash.HashSize\n}", "func GetMaxBlockSize() int64 {\r\n\treturn converter.StrToInt64(SysString(MaxBlockSize))\r\n}", "func (t AlterConfigsResourceResponse44) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.String(t.ErrorMessage) // ErrorMessage\n\tsz += sizeof.Int8 // ResourceType\n\tsz += sizeof.String(t.ResourceName) // ResourceName\n\treturn sz\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func (t DescribedGroupMember15) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.String(t.MemberId) // MemberId\n\tif version >= 4 {\n\t\tsz += sizeof.String(t.GroupInstanceId) // GroupInstanceId\n\t}\n\tsz += sizeof.String(t.ClientId) // ClientId\n\tsz += sizeof.String(t.ClientHost) // ClientHost\n\tsz += sizeof.Bytes(t.MemberMetadata) // MemberMetadata\n\tsz += sizeof.Bytes(t.MemberAssignment) // MemberAssignment\n\treturn sz\n}", "func (t DescribedGroup15) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.String(t.GroupId) // GroupId\n\tsz += sizeof.String(t.GroupState) // GroupState\n\tsz += sizeof.String(t.ProtocolType) // ProtocolType\n\tsz += sizeof.String(t.ProtocolData) // ProtocolData\n\tsz += sizeof.ArrayLength // Members\n\tfor i := len(t.Members) - 1; i >= 0; i-- {\n\t\tsz += t.Members[i].Size(version)\n\t}\n\tif version >= 3 {\n\t\tsz += sizeof.Int32 // AuthorizedOperations\n\t}\n\treturn sz\n}", "func (c *Configurer) GetMaxPVCSize(nn NameNamespace) (pvc *resource.Quantity, unlimited bool) {\n\tc.m.RLock()\n\tdefer c.m.RUnlock()\n\n\tif limit, ok := c.excludedNames[nn]; ok {\n\t\tif limit.Unlimited {\n\t\t\treturn nil, true\n\t\t}\n\n\t\tif limit.PVCSize != nil {\n\t\t\tq := limit.PVCSize.DeepCopy()\n\t\t\tpvc = &q\n\t\t}\n\n\t\treturn pvc, false\n\t}\n\n\tif limit, ok := c.excludedNamespaces[nn.Namespace]; ok {\n\t\tif limit.Unlimited {\n\t\t\treturn nil, true\n\t\t}\n\n\t\tif limit.PVCSize != nil {\n\t\t\tq := limit.PVCSize.DeepCopy()\n\t\t\tpvc = &q\n\t\t}\n\t\treturn pvc, false\n\t}\n\tif c.maxPvcSize != nil {\n\t\tq := c.maxPvcSize.DeepCopy()\n\t\tpvc = &q\n\t}\n\n\treturn pvc, false\n}", "func MaxDataBytesNoEvidence(maxBytes int64, keyType crypto.KeyType, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCoreChainLockSize -\n\t\tMaxCommitOverheadBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytesUnknownEvidence. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}", "func (p *Policy) getMaxBlockSize(ic *interop.Context, _ []stackitem.Item) stackitem.Item {\n\treturn stackitem.NewBigInteger(big.NewInt(int64(p.GetMaxBlockSizeInternal(ic.DAO))))\n}", "func (o StorageClusterSpecCloudStorageCapacitySpecsOutput) MaxCapacityInGiB() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v StorageClusterSpecCloudStorageCapacitySpecs) *int { return v.MaxCapacityInGiB }).(pulumi.IntPtrOutput)\n}", "func MaxDataBytes(maxBytes int64, keyType crypto.KeyType, evidenceBytes int64, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCoreChainLockSize -\n\t\tMaxCommitOverheadBytes -\n\t\tevidenceBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytes. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}", "func (packet *Packet18) Size() int {\n\treturn 1\n}", "func HeaderSize(h http.Header) int {\n\tl := 0\n\tfor field, value := range h {\n\t\tl += len(field)\n\t\tfor _, v := range value {\n\t\t\tl += len(v)\n\t\t}\n\t}\n\n\treturn l\n}", "func estimatedHeaderWireSize(hs http.Header) (res int) {\n\tfor h, vs := range hs {\n\t\tres += len(h) + 4 // account for \": \" and \"\\r\\n\"\n\t\tfor _, v := range vs {\n\t\t\tres += len(v)\n\t\t\tbreak // no duplicates allowed\n\t\t}\n\t}\n\treturn res\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func (o IopingSpecVolumeVolumeSourceEmptyDirOutput) SizeLimit() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceEmptyDir) *string { return v.SizeLimit }).(pulumi.StringPtrOutput)\n}", "func (h *ihash) BlockSize() int { return h.blockSize }", "func (t AclDescription29) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.String(t.Principal) // Principal\n\tsz += sizeof.String(t.Host) // Host\n\tsz += sizeof.Int8 // Operation\n\tsz += sizeof.Int8 // PermissionType\n\treturn sz\n}", "func (rr *Reader) ReadSizeWithLimit(limit uint32) int {\n\tif rr.Err != nil {\n\t\treturn 0\n\t}\n\tvar size32 uint32\n\tsize32, rr.Err = size32Decode(func() (byte, error) {\n\t\treturn rr.ReadByte(), rr.Err\n\t})\n\tif size32 > limit && rr.Err == nil {\n\t\trr.Err = errors.New(\"read size limit overflow\")\n\t\treturn 0\n\t}\n\treturn int(size32)\n}", "func (o *FieldArrayPoolOptions) Size() int { return o.size }", "func (p *Policy) GetMaxBlockSizeInternal(dao dao.DAO) uint32 {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tif p.isValid {\n\t\treturn p.maxBlockSize\n\t}\n\treturn p.getUint32WithKey(dao, maxBlockSizeKey)\n}", "func TestBlockHeaderSerializeSize(t *testing.T) {\n\tnonce := uint64(123123) // 0x1e0f3\n\tbits := uint32(0x1d00ffff)\n\ttimestamp := mstime.UnixMilliseconds(0x495fab29000)\n\tbaseBlockHdr := &BlockHeader{\n\t\tVersion: 1,\n\t\tParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},\n\t\tHashMerkleRoot: mainnetGenesisMerkleRoot,\n\t\tAcceptedIDMerkleRoot: &daghash.ZeroHash,\n\t\tUTXOCommitment: &daghash.ZeroHash,\n\t\tTimestamp: timestamp,\n\t\tBits: bits,\n\t\tNonce: nonce,\n\t}\n\n\tgenesisBlockHdr := &BlockHeader{\n\t\tVersion: 1,\n\t\tParentHashes: []*daghash.Hash{},\n\t\tHashMerkleRoot: mainnetGenesisMerkleRoot,\n\t\tAcceptedIDMerkleRoot: &daghash.ZeroHash,\n\t\tUTXOCommitment: &daghash.ZeroHash,\n\t\tTimestamp: timestamp,\n\t\tBits: bits,\n\t\tNonce: nonce,\n\t}\n\ttests := []struct {\n\t\tin *BlockHeader // Block header to encode\n\t\tsize int // Expected serialized size\n\t}{\n\t\t// Block with no transactions.\n\t\t{genesisBlockHdr, 121},\n\n\t\t// First block in the mainnet block DAG.\n\t\t{baseBlockHdr, 185},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\tserializedSize := test.in.SerializeSize()\n\t\tif serializedSize != test.size {\n\t\t\tt.Errorf(\"BlockHeader.SerializeSize: #%d got: %d, want: \"+\n\t\t\t\t\"%d\", i, serializedSize, test.size)\n\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (t DescribeConfigsRequest) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.ArrayLength // Resources\n\tfor i := len(t.Resources) - 1; i >= 0; i-- {\n\t\tsz += t.Resources[i].Size(version)\n\t}\n\tif version >= 1 {\n\t\tsz += sizeof.Bool // IncludeSynoyms\n\t}\n\treturn sz\n}", "func (c PktCnf1) MaxLen() int {\n\treturn int(c & 0xff)\n}", "func (w *Whisper) MaxMessageSize() uint32 {\n\tval, _ := w.settings.Load(maxMsgSizeIdx)\n\treturn val.(uint32)\n}" ]
[ "0.64527285", "0.56640345", "0.5559902", "0.5421008", "0.5421008", "0.5341554", "0.5331863", "0.53089076", "0.519577", "0.5166597", "0.51638806", "0.5088928", "0.5074577", "0.50688905", "0.50639635", "0.5061279", "0.50553644", "0.5040562", "0.5002771", "0.49799952", "0.49752024", "0.49535057", "0.49268168", "0.49268168", "0.4899951", "0.48966953", "0.48922592", "0.48745963", "0.48695055", "0.4839397", "0.48359692", "0.48181736", "0.47965342", "0.4769312", "0.47568843", "0.4708391", "0.4693535", "0.46868917", "0.46811512", "0.46751276", "0.46716633", "0.465734", "0.46409968", "0.46396565", "0.4593983", "0.4592881", "0.45792848", "0.4576392", "0.45631498", "0.4560069", "0.45577434", "0.45469505", "0.4546222", "0.4545396", "0.45445228", "0.45430136", "0.45412958", "0.4538766", "0.45224753", "0.4521533", "0.45182842", "0.45101395", "0.45027676", "0.45027435", "0.4491092", "0.44886464", "0.4472495", "0.44717517", "0.44705015", "0.446708", "0.44628704", "0.4460413", "0.44533595", "0.44520155", "0.44512802", "0.44502598", "0.44437385", "0.44390392", "0.4438729", "0.44323426", "0.44320676", "0.44310668", "0.44299176", "0.44271156", "0.44240788", "0.4422555", "0.44187742", "0.44148064", "0.43967783", "0.43957248", "0.439556", "0.43897873", "0.4389476", "0.43839282", "0.4383859", "0.43834355", "0.4379695", "0.43766767", "0.43728888", "0.43669283" ]
0.6830897
0
WriteAsCarV1 is a write option which makes a CAR interface (blockstore or storage) write the output as a CARv1 only, with no CARv2 header or index. Indexing is used internally during write but is discarded upon finalization. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
func WriteAsCarV1(asCarV1 bool) Option { return func(o *Options) { o.WriteAsCarV1 = asCarV1 } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *RAMOutputStream) WriteToV1(bytes []byte) error {\n\terr := r.flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tend := int(r.file.length)\n\tpos, buffer, bytesUpto := 0, 0, 0\n\n\tfor pos < end {\n\t\tlength := r.bufferSize\n\t\tnextPos := pos + length\n\t\tif nextPos > end {\n\t\t\tlength = end - pos\n\t\t}\n\n\t\tsrc := r.file.getBuffer(buffer)[:length]\n\t\tcopy(bytes[bytesUpto:], src)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuffer++\n\t\tbytesUpto += length\n\t\tpos = nextPos\n\t}\n\treturn nil\n}", "func (crc *CasbinRuleCreate) SetV1(s string) *CasbinRuleCreate {\n\tcrc.mutation.SetV1(s)\n\treturn crc\n}", "func (c ConfChange) AsV1() (ConfChange, bool) {\n\treturn c, true\n}", "func (_PBridge *PBridgeTransactor) UpgradeContractS1(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _PBridge.contract.Transact(opts, \"upgradeContractS1\")\n}", "func (c *Controller) Write(value byte) {\n\tc.strobe = value&1 == 1\n\tif c.strobe {\n\t\tc.index = 0\n\t}\n}", "func (d *Encoder) One(v interface{}) error {\n\theader := deriveHeader(v)\n\trecord := makeRecord(v, header)\n\tif !d.headWritten {\n\t\td.Csvwriter.Write(header)\n\t\td.headWritten = true\n\t}\n\n\terr := d.Csvwriter.Write(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (cc *CarCreate) SetCarNo(s string) *CarCreate {\n\tcc.mutation.SetCarNo(s)\n\treturn cc\n}", "func NewV1Encoder(b []byte) *V1Encoder {\n\treturn &V1Encoder{\n\t\tdata: b,\n\t}\n}", "func (a *Client) PutCredentialV1(params *PutCredentialV1Params) (*PutCredentialV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPutCredentialV1Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"putCredentialV1\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/v1/credentials\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PutCredentialV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PutCredentialV1OK), nil\n\n}", "func (w *Writer) Put1(n int) *Writer {\n\tif n < 0 || 1<<8 <= n {\n\t\tpanic(\"stor.Writer.Put1 value outside range\")\n\t}\n\tw.buf = append(w.buf,\n\t\tbyte(n))\n\treturn w\n}", "func (w *RWWrapper) WriteHeader(statusCode int) {\n\tif w.statusWritten {\n\t\treturn\n\t}\n\n\tw.configureHeader()\n\tw.rw.WriteHeader(statusCode)\n\tw.statusWritten = true\n}", "func (m *DigicamControl) Write(version int) (output []byte, err error) {\n\tvar buffer bytes.Buffer\n\n\t// Ensure only Version 1 or Version 2 were specified\n\tif version != 1 && version != 2 {\n\t\terr = mavlink2.ErrUnsupportedVersion\n\t\treturn\n\t}\n\n\t// Don't attempt to Write V2 messages to V1 bodies\n\tif m.GetID() > 255 && version < 2 {\n\t\terr = mavlink2.ErrEncodeV2MessageV1Frame\n\t\treturn\n\t}\n\n\terr = binary.Write(&buffer, binary.LittleEndian, *m)\n\tif err != nil {\n\t\treturn\n\t}\n\n\toutput = buffer.Bytes()\n\n\t// V1 uses fixed message lengths and does not include any extension fields\n\t// Truncate the byte slice to the correct length\n\t// This also removes the trailing extra byte written for HasExtensionFieldValues\n\tif version == 1 {\n\t\toutput = output[:m.getV1Length()]\n\t}\n\n\t// V2 uses variable message lengths and includes extension fields\n\t// The variable length is caused by truncating any trailing zeroes from\n\t// the end of the message before it is added to a frame\n\tif version == 2 {\n\t\t// Set HasExtensionFieldValues to zero so that it doesn't interfere with V2 truncation\n\t\toutput[len(output)-1] = 0\n\t\toutput = util.TruncateV2(buffer.Bytes())\n\t}\n\n\treturn\n\n}", "func (mb *client) WriteSingleCoil(address, value uint16) (results []byte, err error) {\n\t// The requested ON/OFF state can only be 0xFF00 and 0x0000\n\tif value != 0xFF00 && value != 0x0000 {\n\t\terr = fmt.Errorf(\"modbus: state '%v' must be either 0xFF00 (ON) or 0x0000 (OFF)\", value)\n\t\treturn\n\t}\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeWriteSingleCoil,\n\t\tData: dataBlock(address, value),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Fixed response length\n\tif len(response.Data) != 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\trespValue := binary.BigEndian.Uint16(response.Data)\n\tif address != respValue {\n\t\terr = fmt.Errorf(\"modbus: response address '%v' does not match request '%v'\", respValue, address)\n\t\treturn\n\t}\n\tresults = response.Data[2:]\n\trespValue = binary.BigEndian.Uint16(results)\n\tif value != respValue {\n\t\terr = fmt.Errorf(\"modbus: response value '%v' does not match request '%v'\", respValue, value)\n\t\treturn\n\t}\n\treturn\n}", "func (_PBridge *PBridgeSession) UpgradeContractS1() (*types.Transaction, error) {\n\treturn _PBridge.Contract.UpgradeContractS1(&_PBridge.TransactOpts)\n}", "func PutBufioWriter1K(w *bufio.Writer) bool {\n\tif w == nil {\n\t\treturn false\n\t}\n\tif l := w.Size(); l < 1024 || l >= 2048 {\n\t\treturn PutBufioWriter(w)\n\t}\n\tw.Reset(nil) // to not keep the parent writer alive\n\tputw1K(w)\n\treturn true\n}", "func (t *Type1) Write(w io.Writer) error {\n\tvar err error\n\tfor i := 0; i < 3; i++ {\n\t\terr = t.writeSegment(w, i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpostfix := []byte{128, 3}\n\t_, err = w.Write(postfix)\n\treturn err\n}", "func (r1cs *R1CS) WriteTo(w io.Writer) (int64, error) {\n\t_w := ioutils.WriterCounter{W: w} // wraps writer to count the bytes written\n\tencoder := cbor.NewEncoder(&_w)\n\n\t// encode our object\n\terr := encoder.Encode(r1cs)\n\treturn _w.N, err\n}", "func v1beta1Tov1(csc *storagev1beta1.CSIStorageCapacity) *storagev1.CSIStorageCapacity {\n\treturn &storagev1.CSIStorageCapacity{\n\t\tObjectMeta: csc.ObjectMeta,\n\t\tNodeTopology: csc.NodeTopology,\n\t\tStorageClassName: csc.StorageClassName,\n\t\tCapacity: csc.Capacity,\n\t\tMaximumVolumeSize: csc.MaximumVolumeSize,\n\t}\n}", "func V1() (*v1.Client, error) {\n\treturn nil, docker.ErrDockerNotCompiled\n}", "func (_PBridge *PBridgeTransactorSession) UpgradeContractS1() (*types.Transaction, error) {\n\treturn _PBridge.Contract.UpgradeContractS1(&_PBridge.TransactOpts)\n}", "func (a *api) DescribeClassroomV1(ctx context.Context,\n\treq *grpcApi.DescribeClassroomV1Request) (res *grpcApi.DescribeClassroomV1Response, err error) {\n\n\tdefer utils.LogGrpcCall(\"CreateClassroomV1\", &req, &res, &err)\n\n\tif err = req.Validate(); err != nil {\n\n\t\terr = status.Error(codes.InvalidArgument, err.Error())\n\t\treturn nil, err\n\t}\n\n\tclassroom, err := a.classroomRepo.DescribeClassroom(ctx, req.ClassroomId)\n\tif err != nil {\n\n\t\terr = status.Error(codes.Unavailable, err.Error())\n\t\treturn nil, err\n\t}\n\n\tprotoClassroom := classroom.ToProtoClassroom()\n\n\tres = &grpcApi.DescribeClassroomV1Response{Classroom: protoClassroom}\n\treturn res, nil\n}", "func (s *SmartContract) CreateCar(ctx contractapi.TransactionContextInterface, carNumber string, make string) error {\n\tcar := Car{\n\t\tMessage: make,\n\t}\n\n\tcarAsBytes, _ := json.Marshal(car)\n\n\treturn ctx.GetStub().PutState(carNumber, carAsBytes)\n}", "func (r *restApiImpl) V1() (v1.CoreV1) {\n return r.v1\n}", "func ImplementationWrapAsn1WriterCopy(pointer unsafe.Pointer) (Asn1Writer, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tshallowCopy := C.vscf_impl_shallow_copy(ctx)\n\treturn ImplementationWrapAsn1Writer(unsafe.Pointer(shallowCopy))\n}", "func (c *CardScanClient) UpdateOne(cs *CardScan) *CardScanUpdateOne {\n\tmutation := newCardScanMutation(c.config, OpUpdateOne, withCardScan(cs))\n\treturn &CardScanUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func (c *CompressingResponseWriter) WriteHeader(status int) {\n\tc.writer.WriteHeader(status)\n}", "func (a *Client) CreateIOAExclusionsV1(params *CreateIOAExclusionsV1Params, opts ...ClientOption) (*CreateIOAExclusionsV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateIOAExclusionsV1Params()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"createIOAExclusionsV1\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/policy/entities/ioa-exclusions/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateIOAExclusionsV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateIOAExclusionsV1OK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createIOAExclusionsV1: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func registerModelBridge1Flags(depth int, cmdPrefix string, cmd *cobra.Command) error {\n\n\tif err := registerBridge1ID(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerBridge1Interfaces(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Clientset) RbacV1() rbacv1.RbacV1Interface {\n\treturn c.rbacV1\n}", "func (mr *MockInterfaceMockRecorder) BkbcsV1() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"BkbcsV1\", reflect.TypeOf((*MockInterface)(nil).BkbcsV1))\n}", "func (c *Controller) Write(path string, v interface{}) error {\n\n\tvar buf bytes.Buffer\n\tvar err error\n\n\tswitch strings.ToLower(path[strings.LastIndex(path, \".\"):]) {\n\tcase \".json\":\n\t\terr = json.NewEncoder(&buf).Encode(v)\n\tcase \".gob\":\n\t\terr = gob.NewEncoder(&buf).Encode(v)\n\tdefault:\n\t\treturn errors.New(\"invalid file extension\")\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := bytes.NewReader(buf.Bytes())\n\n\tinput := &s3.PutObjectInput{\n\t\tBody: aws.ReadSeekCloser(r),\n\t\tBucket: aws.String(c.bucket),\n\t\tKey: aws.String(path),\n\t\tServerSideEncryption: aws.String(\"AES256\"),\n\t}\n\n\tresult, err := c.c3svc.PutObject(input)\n\tif err != nil {\n\t\treturn util.Err(err)\n\t}\n\n\tc.verIDs[path] = result.VersionId\n\n\treturn nil\n}", "func NewMBC1(rom []byte) *MBC1 {\n\treturn &MBC1{\n\t\trom: rom,\n\t\tram: make([]byte, 0x8000),\n\t\tromBankNumber: 1,\n\t\tromBanking: true,\n\t}\n}", "func (m *MockInterface) BkbcsV1() v1.BkbcsV1Interface {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BkbcsV1\")\n\tret0, _ := ret[0].(v1.BkbcsV1Interface)\n\treturn ret0\n}", "func (es *ElasticClientV5) WriteDirect(index string, id string,\n\ttyp string, v interface{}) error {\n\t_, err := es.client.Index().Index(index).Type(typ).Id(id).BodyJson(v).Do(context.Background())\n\treturn err\n}", "func (rc *RentalCreate) SetCar(c *Car) *RentalCreate {\n\treturn rc.SetCarID(c.ID)\n}", "func (w *reqResWriter) writeArg1(arg Output) error {\n\treturn w.writeArg(arg, false, reqResWriterPreArg1, reqResWriterPreArg2)\n}", "func (c *Car) Name() string {\n\treturn \"car\"\n}", "func (c *ChannelData) WriteHeader() {\n\tif len(c.Raw) < channelDataHeaderSize {\n\t\t// Making WriteHeader call valid even when c.Raw\n\t\t// is nil or len(c.Raw) is less than needed for header.\n\t\tc.grow(channelDataHeaderSize)\n\t}\n\t// Early bounds check to guarantee safety of writes below.\n\t_ = c.Raw[:channelDataHeaderSize]\n\tbinary.BigEndian.PutUint16(c.Raw[:channelDataNumberSize], uint16(c.Number))\n\tbinary.BigEndian.PutUint16(c.Raw[channelDataNumberSize:channelDataHeaderSize],\n\t\tuint16(len(c.Data)),\n\t)\n}", "func (mapper *MapperMMC1) WriteByte(addr uint16, data byte) {\n\tif addr < 0x6000 {\n\t\tif addr <= 0x0FFF {\n\t\t\tmapper.memory.cartridge.chr[mapper.getCHR1Index(addr)] = data\n\t\t} else if addr <= 0x1FFF {\n\t\t\tmapper.memory.cartridge.chr[mapper.getCHR2Index(addr)] = data\n\t\t} else if addr <= 0x2FFF {\n\t\t\tmapper.memory.ppu.vram[TranslateVRamAddress(addr, mapper.mirrorMode)] = data\n\t\t}\n\t} else if addr <= 0x7FFF {\n\t\tif mapper.registerPRG&0x10 == 0 {\n\t\t\tmapper.prgRAM[addr-0x6000] = data\n\t\t}\n\t} else {\n\t\tif data&0x80 > 0 {\n\t\t\t// clear shift register\n\t\t\tmapper.shiftNumber = 0\n\t\t\tmapper.shiftRegister = 0\n\t\t} else {\n\t\t\t// add to shift register\n\t\t\tmapper.shiftRegister = mapper.shiftRegister | ((data & 0x1) << uint(mapper.shiftNumber))\n\t\t\tmapper.shiftNumber++\n\t\t}\n\n\t\tif mapper.shiftNumber == 5 {\n\t\t\tswitch (addr >> 13) & 0x3 {\n\t\t\tcase 0:\n\t\t\t\tmapper.registerControl = mapper.shiftRegister\n\n\t\t\t\tswitch mapper.registerControl & 0x3 {\n\t\t\t\tcase 0:\n\t\t\t\t\tmapper.mirrorMode = MirrorSingleA\n\t\t\t\tcase 1:\n\t\t\t\t\tmapper.mirrorMode = MirrorSingleB\n\t\t\t\tcase 2:\n\t\t\t\t\tmapper.mirrorMode = MirrorVertical\n\t\t\t\tcase 3:\n\t\t\t\t\tmapper.mirrorMode = MirrorHorizontal\n\t\t\t\t}\n\t\t\tcase 1:\n\t\t\t\tmapper.registerCHR0 = mapper.shiftRegister\n\t\t\tcase 2:\n\t\t\t\tmapper.registerCHR1 = mapper.shiftRegister\n\t\t\tcase 3:\n\t\t\t\tmapper.registerPRG = mapper.shiftRegister\n\t\t\t}\n\t\t\tmapper.shiftNumber = 0\n\t\t\tmapper.shiftRegister = 0\n\t\t}\n\t}\n}", "func (t *Type1) writeSegment(w io.Writer, segment int) error {\n\tl := len(t.Segments[segment])\n\tvar asciiBinary byte\n\tif segment == 1 {\n\t\tasciiBinary = 2\n\t} else {\n\t\tasciiBinary = 1\n\t}\n\tprefix := []byte{128, asciiBinary, byte(l & 0xFF), byte(l >> 8 & 0xFF), byte(l >> 16 & 0xFF), byte(l >> 24 & 0xFF)}\n\t_, err := w.Write(prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(t.Segments[segment])\n\treturn err\n}", "func (c *Client) IsV1API() bool {\n\treturn c.isV1\n}", "func (w *BodylessResponseWriter) WriteHeader(s int) {\n\tif w.wroteHeader {\n\t\treturn\n\t}\n\tw.wroteHeader = true\n\tw.status = s\n\n\tw.Header().Del(\"Content-Type\")\n\tw.ResponseWriter.WriteHeader(s)\n}", "func (rc *RentalCreate) SetCarID(u uuid.UUID) *RentalCreate {\n\trc.mutation.SetCarID(u)\n\treturn rc\n}", "func (c *Clientset) CoreV1() corev1.CoreV1Interface {\n\treturn c.coreV1\n}", "func AddCar(Myconn *adabas.Connection, vendeur string, modele string, couleur string) string {\n\t// creating Store Request with MAP\n\tstoreRequest, cerr := Myconn.CreateMapStoreRequest(&Carinfo{})\n\tif cerr != nil {\n\t\treturn cerr.Error()\n\t}\n\t// Assigning query's fields\n\terr := storeRequest.StoreFields(\"Vendor,Model,Color\")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\tenreg := &carrec{Vendor: vendeur, Model: modele, Color: couleur}\n\tsterr := storeRequest.StoreData(enreg)\n\tif sterr != nil {\n\t\treturn sterr.Error()\n\t}\n\ttranserr := storeRequest.EndTransaction()\n\tif transerr != nil {\n\t\treturn transerr.Error()\n\t}\n\treturn \"\"\n}", "func (a *api) CreateClassroomV1(ctx context.Context,\n\treq *grpcApi.CreateClassroomV1Request) (res *grpcApi.CreateClassroomV1Response, err error) {\n\n\tdefer utils.LogGrpcCall(\"CreateClassroomV1\", &req, &res, &err)\n\tdefer func() {\n\t\t_ = a.logProducer.Send(producer.Created, req, res, err)\n\t}()\n\n\tif err = req.Validate(); err != nil {\n\n\t\terr = status.Error(codes.InvalidArgument, err.Error())\n\t\treturn nil, err\n\t}\n\n\tclassroomId, err := a.classroomRepo.AddClassroom(ctx, models.Classroom{\n\t\tTenantId: req.TenantId,\n\t\tCalendarId: req.CalendarId,\n\t})\n\tif err != nil {\n\n\t\terr = status.Error(codes.Unavailable, err.Error())\n\t\treturn nil, err\n\t}\n\n\tmetrics.IncCreateCounter()\n\n\tres = &grpcApi.CreateClassroomV1Response{ClassroomId: classroomId}\n\treturn res, nil\n}", "func writeResponseHeader(header ResponseHeader, rw *bufio.ReadWriter) error {\n\terr := rw.WriteByte(header.Magic)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(header.Opcode)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(GetNthByteFromUint16(header.KeyLength, 0))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rw.WriteByte(GetNthByteFromUint16(header.KeyLength, 1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(header.ExtraLength)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(header.DataType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(GetNthByteFromUint16(header.Status, 0))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rw.WriteByte(GetNthByteFromUint16(header.Status, 1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(header.TotalBodyLength, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(header.Opaque, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tl := uint32(header.CAS >> 32)\n\tr := uint32(header.CAS & 0x00000000ffffffff)\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(l, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(r, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r *Response) WriteHeader(statusCode int) {\n\tif r.committed {\n\t\treturn\n\t}\n\tr.committed = true\n\tr.statusCode = statusCode\n\tr.ResponseWriter.WriteHeader(r.statusCode)\n}", "func (d *Device) SetDCDC1VoltageSet(a uint8) {\n\td.write1Byte(RegDCDC1VoltageSet, a)\n}", "func (c *V1) Encode(w io.Writer, prettify bool) error {\n\tencoder := json.NewEncoder(w)\n\tif prettify {\n\t\tencoder.SetIndent(\"\", strings.Repeat(\" \", 2))\n\t}\n\treturn encoder.Encode(c)\n}", "func InitV1(opts *InitOpts) error {\n\tctx := context.Background()\n\n\tctxResource := &model.ContextResource{}\n\tif err := ctxResource.UpdateNamespace(opts.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif err := ctxResource.UpdateContext(opts.Context); err != nil {\n\t\treturn err\n\t}\n\tctxOptions := &contextCMD.ContextOptions{\n\t\tContext: ctxResource.Context,\n\t\tNamespace: ctxResource.Namespace,\n\t\tShow: true,\n\t}\n\tif err := contextCMD.NewContextCommand().Run(ctx, ctxOptions); err != nil {\n\t\treturn err\n\t}\n\n\topts.Language = os.Getenv(model.OktetoLanguageEnvVar)\n\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\topts.Workdir = cwd\n\n\tmc := &ManifestCommand{}\n\tif err := mc.RunInitV1(ctx, opts); err != nil {\n\t\treturn err\n\t}\n\n\toktetoLog.Success(fmt.Sprintf(\"okteto manifest (%s) created\", opts.DevPath))\n\n\tif opts.DevPath == utils.DefaultManifest {\n\t\toktetoLog.Information(\"Run 'okteto up' to activate your development container\")\n\t} else {\n\t\toktetoLog.Information(\"Run 'okteto up -f %s' to activate your development container\", opts.DevPath)\n\t}\n\treturn nil\n}", "func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface {\n\treturn c.coordinationV1\n}", "func ImplementationWrapAsn1Writer(pointer unsafe.Pointer) (Asn1Writer, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tif !C.vscf_asn1_writer_is_implemented(ctx) {\n\t\treturn nil, &FoundationError{-1, \"Given C implementation does not implement interface Asn1Writer.\"}\n\t}\n\n\timplTag := C.vscf_impl_tag(ctx)\n\tswitch implTag {\n\tcase C.vscf_impl_tag_ASN1WR:\n\t\treturn NewAsn1wrWithCtx(unsafe.Pointer(ctx)), nil\n\tdefault:\n\t\treturn nil, &FoundationError{-1, \"Unexpected C implementation cast to the Go implementation.\"}\n\t}\n}", "func (c *CarBuilder) SetStructure() BuildProcess {\n\tc.v.Structure = \"Car\"\n\treturn c\n}", "func (p *Plugin) IsV1() bool {\n\treturn false\n}", "func GenGenesisCar(cfg *GenesisCfg, out io.Writer) (*RenderedGenInfo, error) {\n\tctx := context.Background()\n\n\tbstore := blockstoreutil.WrapIDStore(blockstore.NewBlockstore(ds.NewMapDatastore()))\n\tdserv := dag.NewDAGService(bserv.New(bstore, offline.Exchange(bstore)))\n\tinfo, err := GenGen(ctx, cfg, bstore)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Ignore cids that make it on chain but that should not be read through\n\t// and therefore don't have corresponding blocks in store\n\tignore := cid.NewSet()\n\tfor _, m := range cfg.Miners {\n\t\tfor _, comm := range m.CommittedSectors {\n\t\t\tignore.Add(comm.CommR)\n\t\t\tignore.Add(comm.CommD)\n\t\t\tignore.Add(comm.DealCfg.CommP)\n\t\t}\n\t}\n\n\tignoreWalkFunc := func(nd format.Node) (out []*format.Link, err error) {\n\t\tlinks := nd.Links()\n\t\tvar filteredLinks []*format.Link\n\t\tfor _, l := range links {\n\t\t\tif ignore.Has(l.Cid) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfilteredLinks = append(filteredLinks, l)\n\t\t}\n\n\t\treturn filteredLinks, nil\n\t}\n\n\treturn info, car.WriteCarWithWalker(ctx, dserv, []cid.Cid{info.GenesisCid}, out, ignoreWalkFunc)\n}", "func NewMBC1(rom []byte, ram []byte) (*MBC1, error) {\n\tif rom == nil || ram == nil {\n\t\tpanic(fmt.Errorf(\"the rom or ram are nil\"))\n\t}\n\n\tif len(rom) < 2*romBankSize {\n\t\treturn nil, errors.E(\"rom size insufficient: must contain at least two banks\", errors.Cart)\n\t}\n\n\t// The ROM bank is initialized to 0x01 to avoid access to ROM banks 0x00, 0x20, 0x40 and 0x60\n\t// from the switchable ROM addresses on startup.\n\t// The SetByte method verifies that the lower two bits of the bank are also != 00 to impose this\n\t// after startup.\n\treturn &MBC1{rom: rom, ram: ram, romBank: 0x01}, nil\n}", "func (s *SmartContract) CreateCar(ctx contractapi.TransactionContextInterface, carNumber string, make string, model string, colour string, owner string) error {\n\tcar := Car{\n\t\tMake: make,\n\t\tModel: model,\n\t\tColour: colour,\n\t\tOwner: owner,\n\t}\n\n\tcarAsBytes, _ := json.Marshal(car)\n\n\treturn ctx.GetStub().PutState(carNumber, carAsBytes)\n}", "func (d *Device) SetGPIO1Control(a uint8) {\n\td.write1Byte(RegGPIO1Control, a)\n}", "func (z *zpoolctl) Upgrade1(ctx context.Context, v bool) *execute {\n\targs := []string{\"upgrade\"}\n\tif v {\n\t\targs = append(args, \"-v\")\n\t}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (r *response) WriteHeader(status int) {\n\tr.wrote = true\n\tr.rw.WriteHeader(status)\n}", "func NewV1Client(region string) (*vpcv1.VpcV1, error) {\n\tsvcEndpoint := \"https://\" + region + \".iaas.cloud.ibm.com/v1\"\n\n\treturn vpcv1.NewVpcV1(&vpcv1.VpcV1Options{\n\t\tServiceName: \"vpcs\",\n\t\tAuthenticator: iam.GetIAMAuth(),\n\t\tURL: svcEndpoint,\n\t})\n}", "func RVSA1() representation.Chooser {\n\treturn rvsa1{}\n}", "func (a *Client) UpdateIOAExclusionsV1(params *UpdateIOAExclusionsV1Params, opts ...ClientOption) (*UpdateIOAExclusionsV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateIOAExclusionsV1Params()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"updateIOAExclusionsV1\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/policy/entities/ioa-exclusions/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateIOAExclusionsV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*UpdateIOAExclusionsV1OK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for updateIOAExclusionsV1: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (t *ssh2Server) WriteHeader(s *Stream, md metadata.MD) error {\n\n\tlogrus.Debugln(\"WriteHeader\")\n\treturn nil\n\n\t// =================================== original code ======================================\n\t// s.mu.Lock()\n\t// if s.headerOk || s.state == streamDone {\n\t// \ts.mu.Unlock()\n\t// \treturn ErrIllegalHeaderWrite\n\t// }\n\t// s.headerOk = true\n\t// s.mu.Unlock()\n\t// if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {\n\t// \treturn err\n\t// }\n\t// t.hBuf.Reset()\n\t// t.hEnc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t// t.hEnc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: \"application/grpc\"})\n\t// for k, v := range md {\n\t// \tt.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t// }\n\t// if err := t.writeHeaders(s, t.hBuf, false); err != nil {\n\t// \treturn err\n\t// }\n\t// t.writableChan <- 0\n\t// return nil\n}", "func (c *Conn) WriteHeader(header protocommon.Header) error {\n\terr := protocommon.HeaderEncode(c.writeBuf, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.writeBuf.Flush()\n}", "func (cs *Cars) ListCars(stub shim.ChaincodeStubInterface) ([]byte, error) {\n\tidxCarsByte, _ := stub.GetState(\"idx_Cars\")\n\tcarIDs := strings.Split(string(idxCarsByte), \",\")\n\tcarList := \"{\\\"Cars\\\":\"\n\tfor i, carID := range carIDs {\n\t\tif i != 0 {\n\t\t\tcarList = carList + \",\"\n\t\t}\n\t\tcJsonIndent, _ := stub.GetState(carID)\n\t\tcarList = carList + string(cJsonIndent)\n\t}\n\tcarList = carList + \"\\n}\"\n\treturn []byte(carList), nil\n}", "func (w *responseWriter) WriteHeader(s int) {\n\tif w.wroteHeader {\n\t\treturn\n\t}\n\tw.wroteHeader = true\n\tw.status = s\n\n\tif s == http.StatusNoContent {\n\t\tw.ResponseWriter = &BodylessResponseWriter{ResponseWriter: w.ResponseWriter}\n\t}\n\n\t// Set Content-Type header if missing and not using the BodylessResponseWriter.\n\tif _, ok := w.ResponseWriter.(*BodylessResponseWriter); !ok && w.Header().Get(\"Content-Type\") == \"\" {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t}\n\tw.ResponseWriter.WriteHeader(s)\n}", "func ConvertToCAR(ctx context.Context, in io.Reader, out io.Writer) (cid.Cid, uint64, error) {\n\treturn convertToCAR(ctx, in, out, false)\n}", "func (w *Writer) WriteHeader(hdr *index.Header) error {\n\t// Flush out preceding file's content before starting new range.\n\tif !w.first {\n\t\tif err := w.tw.Flush(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tw.first = false\n\t// Setup index header for next file.\n\t// (bryce) might want to deep copy the passed in header.\n\tw.hdr = &index.Header{\n\t\tHdr: hdr.Hdr,\n\t\tIdx: &index.Index{DataOp: &index.DataOp{}},\n\t}\n\tw.cw.StartRange(w.callback(w.hdr))\n\tif err := w.tw.WriteHeader(w.hdr.Hdr); err != nil {\n\t\treturn err\n\t}\n\t// Setup first tag for header.\n\tw.hdr.Idx.DataOp.Tags = []*index.Tag{&index.Tag{Id: headerTag, SizeBytes: w.cw.RangeSize()}}\n\treturn nil\n}", "func (d *Encoder) All(v interface{}) error {\n\theader := deriveHeader(v)\n\trecord := makeRecords(v, header)\n\tif !d.headWritten {\n\t\td.Csvwriter.Write(header)\n\t\td.headWritten = true\n\t}\n\n\terr := d.Csvwriter.WriteAll(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (w *Writer) WriteHeader(hdr *Header) (err error) {\n\tif w.closed {\n\t\treturn ErrWriteAfterClose\n\t}\n\tif w.err == nil {\n\t\tw.Flush()\n\t}\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\n\tif hdr.Name != headerEOF {\n\t\t// TODO: should we be mutating hdr here?\n\t\t// ensure all inodes are unique\n\t\tw.inode++\n\t\tif hdr.Inode == 0 {\n\t\t\thdr.Inode = w.inode\n\t\t}\n\n\t\t// ensure file type is set\n\t\tif hdr.Mode&^ModePerm == 0 {\n\t\t\thdr.Mode |= ModeRegular\n\t\t}\n\n\t\t// ensure regular files have at least 1 inbound link\n\t\tif hdr.Links < 1 && hdr.Mode.IsRegular() {\n\t\t\thdr.Links = 1\n\t\t}\n\t}\n\n\tw.nb = hdr.Size\n\tw.pad, w.err = writeSVR4Header(w.w, hdr)\n\treturn\n}", "func (rr *responseRecorder) WriteHeader(statusCode int) {\n\tif rr.wroteHeader {\n\t\treturn\n\t}\n\n\t// save statusCode always, in case HTTP middleware upgrades websocket\n\t// connections by manually setting headers and writing status 101\n\trr.statusCode = statusCode\n\n\t// 1xx responses aren't final; just informational\n\tif statusCode < 100 || statusCode > 199 {\n\t\trr.wroteHeader = true\n\n\t\t// decide whether we should buffer the response\n\t\tif rr.shouldBuffer == nil {\n\t\t\trr.stream = true\n\t\t} else {\n\t\t\trr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header())\n\t\t}\n\t}\n\n\t// if informational or not buffered, immediately write header\n\tif rr.stream || (100 <= statusCode && statusCode <= 199) {\n\t\trr.ResponseWriterWrapper.WriteHeader(statusCode)\n\t}\n}", "func (c *ResponseCapture) WriteHeader(statusCode int) {\n\tc.status = statusCode\n\tc.wroteHeader = true\n\tc.ResponseWriter.WriteHeader(statusCode)\n}", "func (oo *OmciCC) SendCreateDot1PMapper(ctx context.Context, timeout int, highPrio bool,\n\taInstID uint16, rxChan chan Message) (*me.ManagedEntity, error) {\n\ttid := oo.GetNextTid(highPrio)\n\tlogger.Debugw(ctx, \"send .1pMapper-Create-msg:\", log.Fields{\"device-id\": oo.deviceID,\n\t\t\"SequNo\": strconv.FormatInt(int64(tid), 16), \"InstId\": strconv.FormatInt(int64(aInstID), 16)})\n\n\tmeParams := me.ParamData{\n\t\tEntityID: aInstID,\n\t\tAttributes: me.AttributeValueMap{\n\t\t\t//workaround for unsuitable omci-lib default values, cmp VOL-3729\n\t\t\tme.Ieee8021PMapperServiceProfile_TpPointer: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority0: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority1: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority2: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority3: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority4: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority5: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority6: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority7: 0xFFFF,\n\t\t},\n\t}\n\tmeInstance, omciErr := me.NewIeee8021PMapperServiceProfile(meParams)\n\tif omciErr.GetError() == nil {\n\t\t//we have to set all 'untouched' parameters to default by some additional option parameter!!\n\t\tomciLayer, msgLayer, err := oframe.EncodeFrame(meInstance, omci.CreateRequestType,\n\t\t\toframe.TransactionID(tid), oframe.AddDefaults(true))\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot encode .1pMapper for create\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkt, err := SerializeOmciLayer(ctx, omciLayer, msgLayer)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot serialize .1pMapper create\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tomciRxCallbackPair := CallbackPair{\n\t\t\tCbKey: tid,\n\t\t\tCbEntry: CallbackPairEntry{rxChan, oo.receiveOmciResponse, true},\n\t\t}\n\t\terr = oo.Send(ctx, pkt, timeout, CDefaultRetries, highPrio, omciRxCallbackPair)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot send .1pMapper create\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debug(ctx, \"send .1pMapper-create-msg done\")\n\t\treturn meInstance, nil\n\t}\n\tlogger.Errorw(ctx, \"Cannot generate .1pMapper\", log.Fields{\n\t\t\"Err\": omciErr.GetError(), \"device-id\": oo.deviceID})\n\treturn nil, omciErr.GetError()\n}", "func (x *MQQueueManager) Put1(good *MQOD, gomd *MQMD,\n\tgopmo *MQPMO, buffer []byte) error {\n\tvar mqrc C.MQLONG\n\tvar mqcc C.MQLONG\n\tvar mqmd C.MQMD\n\tvar mqpmo C.MQPMO\n\tvar mqod C.MQOD\n\tvar ptr C.PMQVOID\n\n\terr := checkMD(gomd, \"MQPUT1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcopyODtoC(&mqod, good)\n\tcopyMDtoC(&mqmd, gomd)\n\tcopyPMOtoC(&mqpmo, gopmo)\n\n\tbufflen := len(buffer)\n\n\tif bufflen > 0 {\n\t\tptr = (C.PMQVOID)(unsafe.Pointer(&buffer[0]))\n\t} else {\n\t\tptr = nil\n\t}\n\n\tC.MQPUT1(x.hConn, (C.PMQVOID)(unsafe.Pointer(&mqod)),\n\t\t(C.PMQVOID)(unsafe.Pointer(&mqmd)),\n\t\t(C.PMQVOID)(unsafe.Pointer(&mqpmo)),\n\t\t(C.MQLONG)(bufflen),\n\t\tptr,\n\t\t&mqcc, &mqrc)\n\n\tcopyODfromC(&mqod, good)\n\tcopyMDfromC(&mqmd, gomd)\n\tcopyPMOfromC(&mqpmo, gopmo)\n\n\tmqreturn := MQReturn{MQCC: int32(mqcc),\n\t\tMQRC: int32(mqrc),\n\t\tverb: \"MQPUT1\",\n\t}\n\n\tif mqcc != C.MQCC_OK {\n\t\treturn &mqreturn\n\t}\n\n\treturn nil\n\n}", "func (mb *client) WriteSingleRegister(address, value uint16) (results []byte, err error) {\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeWriteSingleRegister,\n\t\tData: dataBlock(address, value),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Fixed response length\n\tif len(response.Data) != 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\trespValue := binary.BigEndian.Uint16(response.Data)\n\tif address != respValue {\n\t\terr = fmt.Errorf(\"modbus: response address '%v' does not match request '%v'\", respValue, address)\n\t\treturn\n\t}\n\tresults = response.Data[2:]\n\trespValue = binary.BigEndian.Uint16(results)\n\tif value != respValue {\n\t\terr = fmt.Errorf(\"modbus: response value '%v' does not match request '%v'\", respValue, value)\n\t\treturn\n\t}\n\treturn\n}", "func VersionCar() string {\n\tversions()\n\tmessage := \"Aayez anhy Version ya Amar?\\n\"\n\n\tfor key, _ := range CarVersionMap {\n\t\tfmt.Println(key)\n\t\tmessage += \" \" + key + \" ,\\n\"\n\t}\n\treturn message\n\n}", "func (w responseWriterNoBody) WriteHeader(statusCode int) {\n\tw.ResponseWriter.WriteHeader(statusCode)\n}", "func NewObjectCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {\n\teo.ApplyDefaults(\"rax:object-cdn\")\n\turl, err := client.EndpointLocator(eo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil\n}", "func (a *api) UpdateClassroomV1(ctx context.Context,\n\treq *grpcApi.UpdateClassroomV1Request) (res *grpcApi.UpdateClassroomV1Response, err error) {\n\n\tdefer utils.LogGrpcCall(\"UpdateClassroomV1\", &req, &res, &err)\n\tdefer func() {\n\t\t_ = a.logProducer.Send(producer.Updated, req, res, err)\n\t}()\n\n\tif err = req.Validate(); err != nil {\n\n\t\terr = status.Error(codes.InvalidArgument, err.Error())\n\t\treturn nil, err\n\t}\n\n\tclassroom := models.FromProtoClassroom(req.Classroom)\n\n\tfound, err := a.classroomRepo.UpdateClassroom(ctx, *classroom)\n\tif err != nil {\n\n\t\terr = status.Error(codes.Unavailable, err.Error())\n\t\treturn nil, err\n\t}\n\n\tif found {\n\t\tmetrics.IncUpdateCounter()\n\t}\n\n\tres = &grpcApi.UpdateClassroomV1Response{Found: found}\n\treturn res, nil\n}", "func (res Responder) WriteOne() int {\n\tn, _ := res.b.Write(binONE)\n\treturn n\n}", "func (a *Client) DeleteIOAExclusionsV1(params *DeleteIOAExclusionsV1Params, opts ...ClientOption) (*DeleteIOAExclusionsV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDeleteIOAExclusionsV1Params()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"deleteIOAExclusionsV1\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/policy/entities/ioa-exclusions/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &DeleteIOAExclusionsV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*DeleteIOAExclusionsV1OK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for deleteIOAExclusionsV1: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func CreateCar(w http.ResponseWriter, r *http.Request) {\n\t// Set the way we will serve data between frontend and backend\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t// Allow cross origin connections making the routes accessible for everyone\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t// Allow the server to perform post operation\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"POST\")\n\t// Allow the content type that is specified by client to be processed on server\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\t// Declare an empty car\n\tvar car models.Car\n\t// Take the car json from the client and decode it into car struct\n\t_ = json.NewDecoder(r.Body).Decode(&car)\n\tpayload := createCar(car)\n\tjson.NewEncoder(w).Encode(payload)\n}", "func (a *PipelineControllerApiService) CancelPipelineUsingPUT1(ctx _context.Context, id string) apiCancelPipelineUsingPUT1Request {\n\treturn apiCancelPipelineUsingPUT1Request{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}", "func (a *Client) V1Version(params *V1VersionParams) (*V1VersionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1VersionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1Version\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/version\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1VersionReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V1VersionOK), nil\n\n}", "func carRegister(c router.Context) (interface{}, error) {\n\t// arg name defined in router method definition\n\tp := c.Arg(`car`).(CarPayload)\n\n\tt, _ := c.Time() // tx time\n\tcar := &Car{ // data for chaincode state\n\t\tId: p.Id,\n\t\tTitle: p.Title,\n\t\tOwner: p.Owner,\n\t\tUpdatedAt: t,\n\t}\n\n\t// trigger event\n\tc.SetEvent(CarRegisteredEvent, car)\n\n\treturn car, // peer.Response payload will be json serialized car data\n\t\t//put json serialized data to state\n\t\t// create composite key using CarKeyPrefix and car.Id\n\t\tc.State().Insert(car)\n}", "func (o *IndicatorCreateV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.IgnoreWarnings != nil {\n\n\t\t// query param ignore_warnings\n\t\tvar qrIgnoreWarnings bool\n\n\t\tif o.IgnoreWarnings != nil {\n\t\t\tqrIgnoreWarnings = *o.IgnoreWarnings\n\t\t}\n\t\tqIgnoreWarnings := swag.FormatBool(qrIgnoreWarnings)\n\t\tif qIgnoreWarnings != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ignore_warnings\", qIgnoreWarnings); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Retrodetects != nil {\n\n\t\t// query param retrodetects\n\t\tvar qrRetrodetects bool\n\n\t\tif o.Retrodetects != nil {\n\t\t\tqrRetrodetects = *o.Retrodetects\n\t\t}\n\t\tqRetrodetects := swag.FormatBool(qrRetrodetects)\n\t\tif qRetrodetects != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"retrodetects\", qRetrodetects); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (b *CPUBus) Write(addr uint16, v uint8) {\n\t// Gate VRAM and OAM off from the CPU if necessary.\n\tif b.mmu.ppu != nil {\n\t\tif addr >= AddrVRAM && addr < AddrCartRAM && !b.mmu.ppu.VRAMAccessible() {\n\t\t\treturn\n\t\t}\n\t\tif addr >= AddrOAM && addr < AddrOAM && !b.mmu.ppu.OAMAccessible() {\n\t\t\treturn\n\t\t}\n\t}\n\n\tb.mmu.write(addr, v)\n}", "func (a *Client) CmsBlockRepositoryV1SavePut(params *CmsBlockRepositoryV1SavePutParams) (*CmsBlockRepositoryV1SavePutOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCmsBlockRepositoryV1SavePutParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"cmsBlockRepositoryV1SavePut\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/V1/cmsBlock/{id}\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &CmsBlockRepositoryV1SavePutReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CmsBlockRepositoryV1SavePutOK), nil\n\n}", "func (a *Client) V1Version(params *V1VersionParams) (*V1VersionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1VersionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1Version\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/version\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1VersionReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*V1VersionOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*V1VersionDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func writeBitacora(file *os.File, index int64, log *bitacora) {\n\tfile.Seek(index, 0)\n\t//Empezamos el proceso de guardar en binario la data en memoria del struct\n\tvar binaryDisc bytes.Buffer\n\tbinary.Write(&binaryDisc, binary.BigEndian, log)\n\twriteNextBytes(file, binaryDisc.Bytes())\n}", "func registerModelTraffic1Flags(depth int, cmdPrefix string, cmd *cobra.Command) error {\n\n\tif err := registerTraffic1Download(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerTraffic1Time(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerTraffic1Upload(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (k *Keptn) APIV1() api.KeptnInterface {\n\treturn k.api\n}", "func PostCar(w http.ResponseWriter, r *http.Request) {\n\tclaims := GetToken(jwtauth.TokenFromHeader(r))\n\n\tvar cars []Car\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"can't read body\", http.StatusBadRequest)\n\t\tpanic(err)\n\t}\n\n\terr = json.Unmarshal(body, &cars)\n\tif err != nil {\n\t\thttp.Error(w, \"wrong body structure\", http.StatusBadRequest)\n\t\tpanic(err)\n\t}\n\n\tfor i := range cars {\n\t\tsql := \"INSERT INTO public.cars(\" +\n\t\t\t\"model, manufacturer, plate, color, caradded, year, fk_user, vin)\" +\n\t\t\t\"VALUES ($1, $2, $3, $4, CURRENT_DATE, $5, $6, $7);\"\n\n\t\terr = Database.QueryRow(sql, cars[i].Model, cars[i].Manufacturer, cars[i].Plate, cars[i].Color, cars[i].Year, claims[\"id\"], cars[i].Vin).Err()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"wrong body structure\", http.StatusBadRequest)\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tw.WriteHeader(http.StatusCreated)\n}", "func (asc *AsenaSmartContract) QueryAllCars(ctx contractapi.TransactionContextInterface, args []string) error {\n\n\treturn nil\n}", "func (*Car) Descriptor() ([]byte, []int) {\n\treturn file_carz_proto_rawDescGZIP(), []int{1}\n}", "func (j *JSendWriterBuffer) WriteHeader(statusCode int) {\n\tj.responseWriter.WriteHeader(statusCode)\n}", "func (_e *MockWriteBufferJsonBased_Expecter) WriteInt16(logicalName interface{}, bitLength interface{}, value interface{}, writerArgs ...interface{}) *MockWriteBufferJsonBased_WriteInt16_Call {\n\treturn &MockWriteBufferJsonBased_WriteInt16_Call{Call: _e.mock.On(\"WriteInt16\",\n\t\tappend([]interface{}{logicalName, bitLength, value}, writerArgs...)...)}\n}", "func (a *Client) CreateCredentialV1(params *CreateCredentialV1Params) (*CreateCredentialV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateCredentialV1Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createCredentialV1\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v1/credentials\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &CreateCredentialV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CreateCredentialV1OK), nil\n\n}" ]
[ "0.48664775", "0.44362253", "0.43605676", "0.43476164", "0.4318693", "0.42778337", "0.42568678", "0.4225701", "0.4217781", "0.41908678", "0.41819924", "0.41723937", "0.41719002", "0.41454837", "0.41401184", "0.4108826", "0.41082826", "0.4107811", "0.41051665", "0.40967038", "0.40580535", "0.40521723", "0.40388066", "0.40175205", "0.3992928", "0.3955306", "0.39389408", "0.39358416", "0.39285725", "0.39253452", "0.3910767", "0.39009103", "0.38891062", "0.38713565", "0.38712138", "0.38668376", "0.38611388", "0.38584313", "0.38524273", "0.38515586", "0.38501245", "0.38452137", "0.38435796", "0.38339466", "0.38333103", "0.38329363", "0.38163406", "0.38143417", "0.38127536", "0.38055724", "0.3801852", "0.38014343", "0.38002422", "0.3791948", "0.3775124", "0.37738147", "0.3768505", "0.37660995", "0.3757572", "0.37467152", "0.37372935", "0.37348953", "0.3734253", "0.37306267", "0.37279645", "0.37208253", "0.37201226", "0.3718483", "0.37142712", "0.37126788", "0.37081617", "0.37064162", "0.3696917", "0.36968547", "0.3696271", "0.36872035", "0.36833364", "0.36832616", "0.36801222", "0.3679878", "0.3679603", "0.36761034", "0.3670744", "0.36676556", "0.36652827", "0.36617583", "0.3651883", "0.3648777", "0.36483365", "0.36449426", "0.36401987", "0.3639062", "0.36377156", "0.36301425", "0.3627037", "0.36238784", "0.3622887", "0.36224213", "0.3619914", "0.36159506" ]
0.8315218
0
AllowDuplicatePuts is a write option which makes a CAR interface (blockstore or storage) not deduplicate blocks in Put and PutMany. The default is to deduplicate, which matches the current semantics of goipfsblockstore v1. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
func AllowDuplicatePuts(allow bool) Option { return func(o *Options) { o.BlockstoreAllowDuplicatePuts = allow } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func DisallowDuplicateKey() DecodeOption {\n\treturn func(d *Decoder) error {\n\t\td.disallowDuplicateKey = true\n\t\treturn nil\n\t}\n}", "func (c *Client) PutDuplicate(oldName, newName upspin.PathName) (*upspin.DirEntry, error) {\n\tconst op errors.Op = \"client.PutDuplicate\"\n\tm, s := newMetric(op)\n\tdefer m.Done()\n\n\treturn c.dupOrRename(op, oldName, newName, false, s)\n}", "func (blk *Block) duplicate() *Block {\n\tdup := &Block{}\n\n\t// Copy over.\n\t*dup = *blk\n\n\tdupContents := contentstream.ContentStreamOperations{}\n\tfor _, op := range *blk.contents {\n\t\tdupContents = append(dupContents, op)\n\t}\n\tdup.contents = &dupContents\n\n\treturn dup\n}", "func (handle Handle) Duplicate(src, dest Handle, access DuplicateAccess) (Handle, error) {\n\tvar destHandle Handle\n\terrno, _, err := duplicateHandle.Call(\n\t\tuintptr(src),\n\t\tuintptr(handle),\n\t\tuintptr(dest),\n\t\tuintptr(unsafe.Pointer(&destHandle)),\n\t\tuintptr(access),\n\t\t0,\n\t\t0,\n\t)\n\tif winerrno.Errno(errno) != winerrno.Success {\n\t\treturn destHandle, nil\n\t}\n\treturn Handle(0), os.NewSyscallError(\"DuplicateHandle\", err)\n}", "func AllowOverwrite(existing, new Source) bool {\n\tswitch existing {\n\n\t// KubeAPIServer state can only be overwritten by other kube-apiserver\n\t// state.\n\tcase KubeAPIServer:\n\t\treturn new == KubeAPIServer\n\n\t// Local state can only be overwritten by other local state or\n\t// kube-apiserver state.\n\tcase Local:\n\t\treturn new == Local || new == KubeAPIServer\n\n\t// KVStore can be overwritten by other kvstore, local state, or\n\t// kube-apiserver state.\n\tcase KVStore:\n\t\treturn new == KVStore || new == Local || new == KubeAPIServer\n\n\t// Custom-resource state can be overwritten by everything except\n\t// generated, unspecified and Kubernetes (non-CRD) state\n\tcase CustomResource:\n\t\treturn new != Generated && new != Unspec && new != Kubernetes\n\n\t// Kubernetes state can be overwritten by everything except generated\n\t// and unspecified state\n\tcase Kubernetes:\n\t\treturn new != Generated && new != Unspec\n\n\t// Generated can be overwritten by everything except by Unspecified\n\tcase Generated:\n\t\treturn new != Unspec\n\n\t// Unspecified state can be overwritten by everything\n\tcase Unspec:\n\t\treturn true\n\t}\n\n\treturn true\n}", "func (o TransferJobTransferSpecTransferOptionsOutput) OverwriteObjectsAlreadyExistingInSink() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecTransferOptions) *bool { return v.OverwriteObjectsAlreadyExistingInSink }).(pulumi.BoolPtrOutput)\n}", "func (f *PushFilter) Duplicate() *PushFilter {\n\n\tnf := NewPushFilter()\n\n\tfor id, types := range f.Identities {\n\t\tnf.FilterIdentity(id, types...)\n\t}\n\n\tfor k, v := range f.Params {\n\t\tnf.SetParameter(k, v...)\n\t}\n\n\treturn nf\n}", "func (o TransferJobTransferSpecTransferOptionsPtrOutput) OverwriteObjectsAlreadyExistingInSink() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecTransferOptions) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.OverwriteObjectsAlreadyExistingInSink\n\t}).(pulumi.BoolPtrOutput)\n}", "func IsDup(err error) bool {\n\twriteException, ok := err.(mongo.WriteException)\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor _, writeError := range writeException.WriteErrors {\n\t\treturn writeError.Code == 11000 || writeError.Code == 11001 || writeError.Code == 12582 || writeError.Code == 16460 && strings.Contains(writeError.Message, \" E11000 \")\n\t}\n\n\treturn false\n}", "func (me TxsdFeConvolveMatrixTypeEdgeMode) IsDuplicate() bool { return me.String() == \"duplicate\" }", "func (cache *diskBlockCacheWrapped) Put(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID, buf []byte,\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf) error {\n\t// This is a write operation but we are only reading the pointers to the\n\t// caches. So we use a read lock.\n\tcache.mtx.RLock()\n\tdefer cache.mtx.RUnlock()\n\tif cache.config.IsSyncedTlf(tlfID) && cache.syncCache != nil {\n\t\tworkingSetCache := cache.workingSetCache\n\t\terr := cache.syncCache.Put(ctx, tlfID, blockID, buf, serverHalf)\n\t\tif err == nil {\n\t\t\tgo workingSetCache.Delete(ctx, []kbfsblock.ID{blockID})\n\t\t\treturn nil\n\t\t}\n\t\t// Otherwise drop through and put it into the working set cache.\n\t}\n\t// TODO: Allow more intelligent transitioning from the sync cache to\n\t// the working set cache.\n\tif cache.syncCache != nil {\n\t\tsyncCache := cache.syncCache\n\t\tgo syncCache.Delete(ctx, []kbfsblock.ID{blockID})\n\t}\n\treturn cache.workingSetCache.Put(ctx, tlfID, blockID, buf, serverHalf)\n}", "func CheckDupe(domain, instance, class, id string) error {\n\n\tfileName := strings.ToLower(fileName(domain, instance, class, id))\n\n\tmux.Lock()\n\tdefer mux.Unlock()\n\n\t_, found := sources[fileName]\n\tif found {\n\t\treturn ErrDup\n\t}\n\n\tsources[fileName] = true\n\n\treturn nil\n}", "func (rs *replicationScheme) ensureBlockIsReplicated(ctx context.Context, id ulid.ULID) error {\n\tblockID := id.String()\n\tchunksDir := path.Join(blockID, thanosblock.ChunksDirname)\n\tindexFile := path.Join(blockID, thanosblock.IndexFilename)\n\tmetaFile := path.Join(blockID, thanosblock.MetaFilename)\n\n\tlevel.Debug(rs.logger).Log(\"msg\", \"ensuring block is replicated\", \"block_uuid\", blockID)\n\n\toriginMetaFile, err := rs.fromBkt.Get(ctx, metaFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get meta file from origin bucket: %w\", err)\n\t}\n\n\tdefer runutil.CloseWithLogOnErr(rs.logger, originMetaFile, \"close original meta file\")\n\n\ttargetMetaFile, err := rs.toBkt.Get(ctx, metaFile)\n\tif targetMetaFile != nil {\n\t\tdefer runutil.CloseWithLogOnErr(rs.logger, targetMetaFile, \"close target meta file\")\n\t}\n\n\tif err != nil && !rs.toBkt.IsObjNotFoundErr(err) && err != io.EOF {\n\t\treturn fmt.Errorf(\"get meta file from target bucket: %w\", err)\n\t}\n\n\toriginMetaFileContent, err := ioutil.ReadAll(originMetaFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read origin meta file: %w\", err)\n\t}\n\n\tif targetMetaFile != nil && !rs.toBkt.IsObjNotFoundErr(err) {\n\t\ttargetMetaFileContent, err := ioutil.ReadAll(targetMetaFile)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"read target meta file: %w\", err)\n\t\t}\n\n\t\tif bytes.Equal(originMetaFileContent, targetMetaFileContent) {\n\t\t\t// If the origin meta file content and target meta file content is\n\t\t\t// equal, we know we have already successfully replicated\n\t\t\t// previously.\n\t\t\tlevel.Debug(rs.logger).Log(\"msg\", \"skipping block as already replicated\", \"block_uuid\", id.String())\n\t\t\trs.metrics.blocksAlreadyReplicated.Inc()\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif err := rs.fromBkt.Iter(ctx, chunksDir, func(objectName string) error {\n\t\terr := rs.ensureObjectReplicated(ctx, objectName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"replicate object %v: %w\", objectName, err)\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err := rs.ensureObjectReplicated(ctx, indexFile); err != nil {\n\t\treturn fmt.Errorf(\"replicate index file: %w\", err)\n\t}\n\n\tlevel.Debug(rs.logger).Log(\"msg\", \"replicating meta file\", \"object\", metaFile)\n\n\tif err := rs.toBkt.Upload(ctx, metaFile, bytes.NewReader(originMetaFileContent)); err != nil {\n\t\treturn fmt.Errorf(\"upload meta file: %w\", err)\n\t}\n\n\trs.metrics.blocksReplicated.Inc()\n\n\treturn nil\n}", "func (o BucketOutput) LifecycleRuleAllowSameActionOverlap() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.BoolPtrOutput { return v.LifecycleRuleAllowSameActionOverlap }).(pulumi.BoolPtrOutput)\n}", "func IsDuplicate(err error) bool {\n\tvar e mongo.WriteException\n\tif errors.As(err, &e) {\n\t\tfor _, we := range e.WriteErrors {\n\t\t\tif we.Code == 11000 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func TestAddExportFileSkipsDuplicates(t *testing.T) {\n\tt.Parallel()\n\n\ttestDB := database.NewTestDatabase(t)\n\texportDB := New(testDB)\n\tctx := context.Background()\n\n\t// Add foreign key records.\n\tec := &model.ExportConfig{Period: time.Hour}\n\tif err := exportDB.AddExportConfig(ctx, ec); err != nil {\n\t\tt.Fatal(err)\n\t}\n\teb := &model.ExportBatch{ConfigID: ec.ConfigID, Status: model.ExportBatchOpen}\n\tif err := exportDB.AddExportBatches(ctx, []*model.ExportBatch{eb}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Lease the batch to get the ID.\n\teb, err := exportDB.LeaseBatch(ctx, time.Hour, time.Now())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twantBucketName := \"bucket-1\"\n\tef := &model.ExportFile{\n\t\tFilename: \"file\",\n\t\tBucketName: wantBucketName,\n\t\tBatchID: eb.BatchID,\n\t}\n\n\t// Add a record.\n\terr = testDB.InTx(ctx, pgx.Serializable, func(tx pgx.Tx) error {\n\t\tif err := addExportFile(ctx, tx, ef); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that the row is present.\n\tgot, err := exportDB.LookupExportFile(ctx, ef.Filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got.BucketName != wantBucketName {\n\t\tt.Fatalf(\"bucket name mismatch got %q, want %q\", got.BucketName, wantBucketName)\n\t}\n\n\t// Add a second record with same filename, must return ErrKeyConflict, and not overwrite.\n\tef.BucketName = \"bucket-2\"\n\terr = testDB.InTx(ctx, pgx.Serializable, func(tx pgx.Tx) error {\n\t\tif err := addExportFile(ctx, tx, ef); err != nil {\n\t\t\tif err == database.ErrKeyConflict {\n\t\t\t\treturn nil // Expected result.\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn errors.New(\"missing expected ErrKeyConflict\")\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Row must not be updated.\n\tgot, err = exportDB.LookupExportFile(ctx, ef.Filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got.BucketName != wantBucketName {\n\t\tt.Fatalf(\"bucket name mismatch got %q, want %q\", got.BucketName, wantBucketName)\n\t}\n}", "func NoConcurrentDupes(f OnMissHandler) (OnMissHandler, chan<- bool) {\n\terrClosed := errors.New(\"NoConcurrentDupes wrapper has been closed\")\n\topchan := make(chan reqGet)\n\tgo nocondupesMainloop(f, opchan)\n\tquit := make(chan bool, 1)\n\twrap := func(key string) (Cacheable, error) {\n\t\tif opchan == nil {\n\t\t\treturn nil, errClosed\n\t\t}\n\t\tselect {\n\t\tcase <-quit:\n\t\t\tclose(opchan)\n\t\t\topchan = nil\n\t\t\treturn nil, errClosed\n\t\tdefault:\n\t\t}\n\t\treplychan := make(chan replyGet)\n\t\topchan <- reqGet{key, replychan}\n\t\treply := <-replychan\n\t\treturn reply.val, reply.err\n\t}\n\treturn wrap, quit\n}", "func (w *binWriter) WriteAvoidRepetitionWhenPossible(v interface{}) {\n\tif w.err != nil {\n\t\treturn\n\t}\n\tif w.err = binary.Write(w.w, binary.LittleEndian, v); w.err == nil {\n\t\tw.size += int64(binary.Size(v))\n\t}\n}", "func (mw *Writer) DedupWriteIsDup(v interface{}) (res bool, err error) {\n\tdefer func() {\n\t\t// This recover allows test 911 (_generated/gen_test.go:67) to run green.\n\t\t// It turns indexing by []byte msgp.Raw into a no-op. Which it\n\t\t// should be.\n\t\tif recover() != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\tif v == nil || reflect.ValueOf(v).IsNil() {\n\t\treturn false, nil\n\t}\n\tk, dup := mw.ptrWrit[v]\n\tif !dup {\n\t\tmw.ptrWrit[v] = mw.ptrCountNext\n\t\t//fmt.Printf(\"\\n\\n $$$ NOT dup write %p -> k=%v / %#v\\n\\n\", v, mw.ptrCountNext, v)\n\t\tmw.ptrCountNext++\n\t\treturn false, nil\n\t} else {\n\t\t//fmt.Printf(\"\\n\\n $$$ DUP write %p -> k=%v / %#v\\n\\n\", v, k, v)\n\t}\n\treturn true, mw.DedupWriteExt(k)\n}", "func (me *Container) Duplicate(r ...Registries) *Container {\n\tinstance := Container{sync.Mutex{}, make(map[string]interface{})}\n\n\tfor k, v := range globalContainerInstance.Container.bag {\n\t\tinstance.bag[k] = v\n\t}\n\n\tif len(r) > 0 {\n\t\tfor _, v := range r {\n\t\t\tinstance.Register(v)\n\t\t}\n\t}\n\n\treturn &instance\n}", "func (bs *GasChargeBlockStore) Put(ctx context.Context, blk blocks.Block) error {\n\tbs.gasTank.Charge(bs.pricelist.OnIpldPut(len(blk.RawData())), \"%s storage put %d bytes\", blk.Cid(), len(blk.RawData()))\n\n\tif err := bs.inner.Put(ctx, blk); err != nil {\n\t\tpanic(xerrors.WithMessage(err, \"failed to write data to disk\"))\n\t}\n\treturn nil\n}", "func (fc finderClient) Overwrite(ctx context.Context,\n\thost, index, shard string,\n\txs []*objects.VObject,\n) ([]RepairResponse, error) {\n\treturn fc.cl.OverwriteObjects(ctx, host, index, shard, xs)\n}", "func (endpointSliceStrategy) AllowCreateOnUpdate() bool {\n\treturn false\n}", "func (d *Duplicator) Duplicate(in chan Any, count int) (outs []chan Any) {\n\t// Create duplicate channels\n\touts = make([]chan Any, 0, count)\n\tfor i := 0; i < count; i++ {\n\t\touts = append(outs, make(chan Any))\n\t}\n\n\t// Pipe input to all of the outputs\n\tgo func(outs []chan Any) {\n\t\tfor x := range in {\n\t\t\tfor _, o := range outs {\n\t\t\t\to <- x\n\t\t\t}\n\t\t}\n\t\tfor _, o := range outs {\n\t\t\tclose(o)\n\t\t}\n\t}(outs)\n\n\treturn outs\n}", "func TestMoveMultipleToSameBlock(t *testing.T) {\n\tt.Parallel()\n\n\ts, db, teardown, err := testStore()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer teardown()\n\n\tdbtx, err := db.BeginReadWriteTx()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer dbtx.Commit()\n\tns := dbtx.ReadWriteBucket(namespaceKey)\n\n\tb100 := BlockMeta{\n\t\tBlock: Block{Height: 100},\n\t\tTime: time.Now(),\n\t}\n\n\tcb := newCoinBase(20e8, 30e8)\n\tcbRec, err := NewTxRecordFromMsgTx(cb, b100.Time)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Insert coinbase and mark both outputs as credits.\n\terr = s.InsertTx(ns, cbRec, &b100)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, cbRec, &b100, 0, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, cbRec, &b100, 1, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create and insert two unmined transactions which spend both coinbase\n\t// outputs.\n\tspenderATime := time.Now()\n\tspenderA := spendOutput(&cbRec.Hash, 0, 1e8, 2e8, 18e8)\n\tspenderARec, err := NewTxRecordFromMsgTx(spenderA, spenderATime)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.InsertTx(ns, spenderARec, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderARec, nil, 0, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderARec, nil, 1, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tspenderBTime := time.Now()\n\tspenderB := spendOutput(&cbRec.Hash, 1, 4e8, 8e8, 18e8)\n\tspenderBRec, err := NewTxRecordFromMsgTx(spenderB, spenderBTime)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.InsertTx(ns, spenderBRec, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderBRec, nil, 0, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderBRec, nil, 1, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcoinbaseMaturity := int32(chaincfg.TestNet3Params.CoinbaseMaturity)\n\n\t// Mine both transactions in the block that matures the coinbase.\n\tbMaturity := BlockMeta{\n\t\tBlock: Block{Height: b100.Height + coinbaseMaturity},\n\t\tTime: time.Now(),\n\t}\n\terr = s.InsertTx(ns, spenderARec, &bMaturity)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.InsertTx(ns, spenderBRec, &bMaturity)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that both transactions can be queried at the maturity block.\n\tdetailsA, err := s.UniqueTxDetails(ns, &spenderARec.Hash, &bMaturity.Block)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif detailsA == nil {\n\t\tt.Fatal(\"No details found for first spender\")\n\t}\n\tdetailsB, err := s.UniqueTxDetails(ns, &spenderBRec.Hash, &bMaturity.Block)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif detailsB == nil {\n\t\tt.Fatal(\"No details found for second spender\")\n\t}\n\n\t// Verify that the balance was correctly updated on the block record\n\t// append and that no unmined transactions remain.\n\tbalTests := []struct {\n\t\theight int32\n\t\tminConf int32\n\t\tbal btcutil.Amount\n\t}{\n\t\t// Maturity height\n\t\t{\n\t\t\theight: bMaturity.Height,\n\t\t\tminConf: 0,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height,\n\t\t\tminConf: 1,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height,\n\t\t\tminConf: 2,\n\t\t\tbal: 0,\n\t\t},\n\n\t\t// Next block after maturity height\n\t\t{\n\t\t\theight: bMaturity.Height + 1,\n\t\t\tminConf: 0,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height + 1,\n\t\t\tminConf: 2,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height + 1,\n\t\t\tminConf: 3,\n\t\t\tbal: 0,\n\t\t},\n\t}\n\tfor i, tst := range balTests {\n\t\tbal, err := s.Balance(ns, tst.minConf, tst.height)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Balance test %d: Store.Balance failed: %v\", i, err)\n\t\t}\n\t\tif bal != tst.bal {\n\t\t\tt.Errorf(\"Balance test %d: Got %v Expected %v\", i, bal, tst.bal)\n\t\t}\n\t}\n\tif t.Failed() {\n\t\tt.Fatal(\"Failed balance checks after moving both coinbase spenders\")\n\t}\n\tunminedTxs, err := s.UnminedTxs(ns)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(unminedTxs) != 0 {\n\t\tt.Fatalf(\"Should have no unmined transactions mining both, found %d\", len(unminedTxs))\n\t}\n}", "func (kv *ShardKV) isDuplicateRequest(clientId int64, requestId int64) bool {\n\toperationContext, ok := kv.lastOperations[clientId]\n\treturn ok && requestId <= operationContext.MaxAppliedCommandId\n}", "func BenchmarkPut(b *testing.B) {\n\tdefer cleanTestFiles()\n\n\tblk, err := createTestBlock()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tdefer blk.Close()\n\n\trpos, err := blk.New()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tpld := []byte{1, 2, 3, 4}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tblk.Put(rpos, 2, pld)\n\t}\n}", "func Duplicate(h handle.Handle, pid uint32, access handle.DuplicateAccess) (handle.Handle, error) {\n\ttargetPs, err := process.Open(process.DupHandle, false, pid)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer targetPs.Close()\n\tcurrentPs, err := process.Open(process.DupHandle, false, uint32(os.Getpid()))\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer currentPs.Close()\n\t// duplicate the remote handle in the current process's address space.\n\t// Note that for certain handle types this operation might fail\n\t// as they don't permit duplicate operations\n\tdup, err := h.Duplicate(targetPs, currentPs, access)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), fmt.Errorf(\"couldn't duplicate handle: %v\", err)\n\t}\n\treturn dup, nil\n}", "func (mp *TxPool) replaceDuplicateSideChainPowTx(txn *Transaction) {\n\tvar replaceList []*Transaction\n\n\tfor _, v := range mp.txnList {\n\t\tif v.TxType == SideChainPow {\n\t\t\toldPayload := v.Payload.Data(payload.SideChainPowVersion)\n\t\t\toldGenesisHashData := oldPayload[32:64]\n\n\t\t\tnewPayload := txn.Payload.Data(payload.SideChainPowVersion)\n\t\t\tnewGenesisHashData := newPayload[32:64]\n\n\t\t\tif bytes.Equal(oldGenesisHashData, newGenesisHashData) {\n\t\t\t\treplaceList = append(replaceList, v)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, txn := range replaceList {\n\t\ttxid := txn.Hash()\n\t\tlog.Info(\"replace sidechainpow transaction, txid=\", txid.String())\n\t\tmp.removeTransaction(txn)\n\t}\n}", "func (mcc *mapChunkCache) Put(chnks []nbs.CompressedChunk) bool {\n\tmcc.mu.Lock()\n\tdefer mcc.mu.Unlock()\n\n\tfor i := 0; i < len(chnks); i++ {\n\t\tc := chnks[i]\n\t\th := c.Hash()\n\n\t\tif curr, ok := mcc.hashToChunk[h]; ok {\n\t\t\tif !curr.IsEmpty() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif mcc.cm.CapacityExceeded(len(c.FullCompressedChunk)) {\n\t\t\treturn true\n\t\t}\n\n\t\tmcc.hashToChunk[h] = c\n\n\t\tif !c.IsEmpty() {\n\t\t\tmcc.toFlush[h] = c\n\t\t}\n\t}\n\n\treturn false\n}", "func checkDuplicate(recvPath []string, confs []*Config) bool {\n\tif len(confs) == 0 {\n\t\treturn false\n\t}\n\n\tfor _, c := range confs {\n\t\tif reflect.DeepEqual(c.Path, recvPath) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestCheckDuplicateConfigs(t *testing.T) {\n\ttestCases := []struct {\n\t\tqConfigs []queueConfig\n\t\texpectedErrCode APIErrorCode\n\t}{\n\t\t// Error for duplicate queue configs.\n\t\t{\n\t\t\tqConfigs: []queueConfig{\n\t\t\t\t{\n\t\t\t\t\tQueueARN: \"arn:minio:sqs:us-east-1:1:redis\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tQueueARN: \"arn:minio:sqs:us-east-1:1:redis\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrCode: ErrOverlappingConfigs,\n\t\t},\n\t\t// Valid queue configs.\n\t\t{\n\t\t\tqConfigs: []queueConfig{\n\t\t\t\t{\n\t\t\t\t\tQueueARN: \"arn:minio:sqs:us-east-1:1:redis\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrCode: ErrNone,\n\t\t},\n\t}\n\n\t// ... validate for duplicate queue configs.\n\tfor i, testCase := range testCases {\n\t\terrCode := checkDuplicateQueueConfigs(testCase.qConfigs)\n\t\tif errCode != testCase.expectedErrCode {\n\t\t\tt.Errorf(\"Test %d: Expected %d, got %d\", i+1, testCase.expectedErrCode, errCode)\n\t\t}\n\t}\n}", "func (t *ACLRole) DBCreateIgnoreDuplicate(ctx context.Context, db DB) (sql.Result, error) {\n\tq := \"INSERT INTO `acl_role` (`acl_role`.`id`,`acl_role`.`checksum`,`acl_role`.`name`,`acl_role`.`description`,`acl_role`.`admin_user_id`,`acl_role`.`customer_id`,`acl_role`.`created_at`,`acl_role`.`updated_at`) VALUES (?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id` = `id`\"\n\tchecksum := t.CalculateChecksum()\n\tif t.GetChecksum() == checksum {\n\t\treturn nil, nil\n\t}\n\tt.Checksum = &checksum\n\treturn db.ExecContext(ctx, q,\n\t\torm.ToSQLString(t.ID),\n\t\torm.ToSQLString(t.Checksum),\n\t\torm.ToSQLString(t.Name),\n\t\torm.ToSQLString(t.Description),\n\t\torm.ToSQLString(t.AdminUserID),\n\t\torm.ToSQLString(t.CustomerID),\n\t\torm.ToSQLInt64(t.CreatedAt),\n\t\torm.ToSQLInt64(t.UpdatedAt),\n\t)\n}", "func (b Bucket) Put(args ...Params) error {\n\theader, query := getHeaderQuery(args)\n\tif b.ACL != \"\" {\n\t\theader.Set(\"x-oss-acl\", b.ACL)\n\t}\n\tvar body interface{}\n\tif b.Location != \"\" {\n\t\tbody = CreateBucketConfiguration{b.Location}\n\t}\n\treturn b.Do(\"PUT\", \"\", body, nil, header, query)\n}", "func SaveMissedBlock(vals []types.Validator, validatorSets []types.ValidatorOfValidatorSet, block types.BlockResult) {\n\theight, _ := utils.ParseInt(block.Block.Header.Height)\n\tvalidatorSetsFormat := client.FormatValidatorSetPubkeyToIndex(validatorSets)\n\tfor _, validator := range vals {\n\t\tif val, ok := validatorSetsFormat[validator.ConsensusPubkey.Key]; ok {\n\t\t\tif len(block.Block.LastCommit.Signatures) > 0 {\n\t\t\t\tsignedInfo := block.Block.LastCommit.Signatures[val]\n\t\t\t\tif signedInfo.Signature == \"\" {\n\t\t\t\t\tb := schema.NewMissedBlock(schema.MissedBlock{\n\t\t\t\t\t\tHeight: height,\n\t\t\t\t\t\tOperatorAddr: validator.OperatorAddress,\n\t\t\t\t\t\tTimestamp: block.Block.Header.Time,\n\t\t\t\t\t})\n\t\t\t\t\torm.Save(\"missed_block\", b)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func DupSecOpt(src string) ([]string, error) {\n\treturn dupSecOpt(src)\n}", "func (c *cache) Put(ctx context.Context, hash string, data []byte) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif err := c.bs.Put(ctx, hash, data); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.blobsCache.Add(hash, data); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func IsDuplicated(err error) bool {\n\tif we, ok := err.(mongo.WriteException); ok {\n\t\tfor _, e := range we.WriteErrors {\n\t\t\tif e.Code == 11000 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func RegisterDeepCopies(scheme *runtime.Scheme) error {\n\treturn scheme.AddGeneratedDeepCopyFuncs(\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*PodPolicy).DeepCopyInto(out.(*PodPolicy))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&PodPolicy{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*Redis).DeepCopyInto(out.(*Redis))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&Redis{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*RedisList).DeepCopyInto(out.(*RedisList))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&RedisList{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SentinelSpec).DeepCopyInto(out.(*SentinelSpec))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SentinelSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SentinelStatus).DeepCopyInto(out.(*SentinelStatus))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SentinelStatus{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*ServerCondition).DeepCopyInto(out.(*ServerCondition))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&ServerCondition{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*ServerSpec).DeepCopyInto(out.(*ServerSpec))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&ServerSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*ServerStatus).DeepCopyInto(out.(*ServerStatus))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&ServerStatus{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SlaveSpec).DeepCopyInto(out.(*SlaveSpec))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SlaveSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SlaveStatus).DeepCopyInto(out.(*SlaveStatus))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SlaveStatus{})},\n\t)\n}", "func (c *CopyCmd) Replicate(ctx context.Context, opt *Option, srcFs, dstFs *Firestore) error {\n\tvar err error\n\tif c.IsDelete {\n\t\tPrintInfof(opt.Stdout, \"delete original document? (y/n) \\n\")\n\t\tyes := askForConfirmation(opt)\n\t\tif !yes {\n\t\t\treturn errors.New(\"exit\")\n\t\t}\n\t}\n\n\treaderList, err := srcFs.Scan(ctx, c.FirestorePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, reader := range readerList {\n\t\tdstPath := strings.Replace(c.DestinationFirestorePath, \"*\", k, -1)\n\t\tsrcPath := strings.Replace(c.FirestorePath, \"*\", k, -1)\n\t\tDebugf(\"save with : %v from %v \\n\", srcPath, srcPath)\n\n\t\tvar m map[string]interface{}\n\t\terr = json.NewDecoder(reader).Decode(&m)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tom := dstFs.InterpretationEachValueForTime(m)\n\n\t\terr = dstFs.SaveData(ctx, opt, dstPath, om)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif c.IsDelete {\n\t\t\terr = dstFs.DeleteData(ctx, opt, srcPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tPrintInfof(opt.Stdout, \"Copy complete! \\n\\n\")\n\treturn nil\n}", "func StatsdDuplicate(watchType string, watchID string) {\n\tif DogStatsd {\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(watchType, watchID, \"\", \"\")\n\t\tmetricName := fmt.Sprintf(\"%s.duplicate\", MetricPrefix)\n\t\tstatsd.Incr(metricName, tags)\n\t}\n\tLog(fmt.Sprintf(\"dogstatsd='%t' %s='%s' action='duplicate'\", DogStatsd, watchType, watchID), \"debug\")\n}", "func recordAdaptorDuplicateBidIDs(metricsEngine metrics.MetricsEngine, adapterBids map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid) bool {\n\tbidIDCollisionFound := false\n\tif nil == adapterBids {\n\t\treturn false\n\t}\n\tfor bidder, bid := range adapterBids {\n\t\tbidIDColisionMap := make(map[string]int, len(adapterBids[bidder].Bids))\n\t\tfor _, thisBid := range bid.Bids {\n\t\t\tif collisions, ok := bidIDColisionMap[thisBid.Bid.ID]; ok {\n\t\t\t\tbidIDCollisionFound = true\n\t\t\t\tbidIDColisionMap[thisBid.Bid.ID]++\n\t\t\t\tglog.Warningf(\"Bid.id %v :: %v collision(s) [imp.id = %v] for bidder '%v'\", thisBid.Bid.ID, collisions, thisBid.Bid.ImpID, string(bidder))\n\t\t\t\tmetricsEngine.RecordAdapterDuplicateBidID(string(bidder), 1)\n\t\t\t} else {\n\t\t\t\tbidIDColisionMap[thisBid.Bid.ID] = 1\n\t\t\t}\n\t\t}\n\t}\n\treturn bidIDCollisionFound\n}", "func (pb *PBServer) SubPut(args *PutArgs, reply *PutReply) error {\n pb.mu.Lock()\n\n fmt.Printf(\"sub put %s received at %s\\n\", args, pb.me)\n \n if pb.view.Backup != pb.me {\n reply.Err = ErrWrongServer\n pb.mu.Unlock()\n return nil\n }\n\n // filter duplicated requests\n if pb.processed[args.Id] {\n reply.Err = OK\n pb.mu.Unlock()\n return nil\n }\n\n //pb.processed[args.Id] = true\n pb.doPut(args, reply)\n \n reply.Err = OK\n pb.mu.Unlock()\n return nil\n}", "func TestPut(t *testing.T) {\n\tconf := withTmpBoltStore(t, defaultConf(t, secret))\n\tcases := []struct {\n\t\tname string\n\t\tinput bits.ChunkReader\n\t\tconf bits.Config\n\t\tminKeys int\n\t\texpectedErr string\n\t\tkeyw bits.KeyWriter\n\t}{{\n\t\t\"9MiB_random_default_conf\", //chunker max size is 8Mib, so expect at least 2 chunks\n\t\trandBytesInput(bytes.NewBuffer(randb(9*1024*1024)), secret),\n\t\tconf,\n\t\t2,\n\t\t\"\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_random_storage_failed\",\n\t\trandBytesInput(bytes.NewBuffer(randb(1024*1024)), secret),\n\t\twithStore(t, defaultConf(t, secret), &failingStore{}),\n\t\t0,\n\t\t\"storage_failed\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_random_chunker_failed\",\n\t\t&failingChunker{},\n\t\tconf,\n\t\t0,\n\t\t\"chunking_failed\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_chunking_fail\",\n\t\t&failingChunker{},\n\t\tconf,\n\t\t0,\n\t\t\"chunking_failed\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_handler_failed\",\n\t\trandBytesInput(bytes.NewBuffer(randb(1024*1024)), secret),\n\t\tconf,\n\t\t0,\n\t\t\"handler_failed\",\n\t\t&failingKeyHandler{},\n\t}}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\n\t\t\tvar keys []bits.K\n\t\t\tvar err error\n\t\t\tif c.keyw == nil {\n\t\t\t\th := bitskeys.NewMemIterator()\n\t\t\t\terr = bits.Put(c.input, h, c.conf)\n\t\t\t\tkeys = h.Keys\n\n\t\t\t\tif len(keys) < c.minKeys {\n\t\t\t\t\tt.Errorf(\"expected at least '%d' keys, got: '%d'\", c.minKeys, len(keys))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = bits.Put(c.input, c.keyw, c.conf)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif c.expectedErr == \"\" {\n\t\t\t\t\tt.Errorf(\"splitting shouldnt fail but got: %v\", err)\n\t\t\t\t} else if !strings.Contains(err.Error(), c.expectedErr) {\n\t\t\t\t\tt.Errorf(\"expected an error that contains message '%s', got: %v\", c.expectedErr, err)\n\t\t\t\t}\n\t\t\t} else if c.expectedErr != \"\" {\n\t\t\t\tt.Errorf(\"expected an error, got success\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (this *Block) Unique(b []int) {\n\tfor _, vb := range b {\n\t\tfor i, dup := range this.Possible {\n\t\t\tif dup == vb {\n\t\t\t\tthis.Possible = append(this.Possible[:i], this.Possible[i+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func (bfs *BruteForceService) duplicate(sub string) bool {\n\tbfs.Lock()\n\tdefer bfs.Unlock()\n\n\tif _, found := bfs.subdomains[sub]; found {\n\t\treturn true\n\t}\n\tbfs.subdomains[sub] = struct{}{}\n\treturn false\n}", "func (ihs *IPHistoryService) duplicate(domain string) bool {\n\tihs.Lock()\n\tdefer ihs.Unlock()\n\n\tif _, found := ihs.filter[domain]; found {\n\t\treturn true\n\t}\n\tihs.filter[domain] = struct{}{}\n\treturn false\n}", "func DuplicateFile(n *net_node.Node, filename string, send_to_idx int32) {\n\t// First, determine if the file we are putting actually exists\n\tf, err := os.Stat(filename)\n\tif os.IsNotExist(err) {\n\t\tfmt.Println(filename, \"does not exist ,cant duplicate this file\")\n\t\treturn\n\t}\n\tfile_size := f.Size()\n\n\t// Do not begin writing until we have waited for all\n\t// other writes and reads on the file to finish and notified\n\t// other servers that we are writing\n\n\tacquire_distributed_write_lock(n, filename)\n\n\tSend_file_tcp(n, send_to_idx, filename, filename, file_size, \"\", false)\n\n\t// Send a message to the remaining servers that the file has been put\n\tservers := n.Files[filename].Servers\n\tfor _, idx := range servers {\n\t\tif idx == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tif n.Table[idx].Status != net_node.ACTIVE {\n\t\t\tn.Files[filename].Servers[idx] = send_to_idx\n\t\t}\n\t}\n\tnotify_servers_of_file_put_complete(n, servers, filename, file_size)\n}", "func BenchmarkBTreeDeleteInsertCloneOnce(b *testing.B) {\n\tforBenchmarkSizes(b, func(b *testing.B, count int) {\n\t\tinsertP := perm(count)\n\t\tvar tr btree\n\t\tfor _, item := range insertP {\n\t\t\ttr.Set(item)\n\t\t}\n\t\ttr = tr.Clone()\n\t\tb.ResetTimer()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\titem := insertP[i%count]\n\t\t\ttr.Delete(item)\n\t\t\ttr.Set(item)\n\t\t}\n\t})\n}", "func (_ BufferPtrPool2M) Put(b *[]byte) {\n\tPutBytesSlicePtr2M(b)\n}", "func duplicateExists(vs []string) bool {\n\tm := make(map[string]bool, len(vs))\n\n\tfor _, v := range vs {\n\t\tif _, ok := m[v]; ok {\n\t\t\treturn true\n\t\t}\n\t\tm[v] = true\n\t}\n\treturn false\n}", "func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {\n\tnewPredMeta := &predicateMetadata{\n\t\tpod: meta.pod,\n\t\tpodBestEffort: meta.podBestEffort,\n\t\tpodRequest: meta.podRequest,\n\t\tserviceAffinityInUse: meta.serviceAffinityInUse,\n\t\tignoredExtendedResources: meta.ignoredExtendedResources,\n\t}\n\tnewPredMeta.podPorts = append([]*v1.ContainerPort(nil), meta.podPorts...)\n\tnewPredMeta.matchingAntiAffinityTerms = map[string][]matchingPodAntiAffinityTerm{}\n\tfor k, v := range meta.matchingAntiAffinityTerms {\n\t\tnewPredMeta.matchingAntiAffinityTerms[k] = append([]matchingPodAntiAffinityTerm(nil), v...)\n\t}\n\tnewPredMeta.nodeNameToMatchingAffinityPods = make(map[string][]*v1.Pod)\n\tfor k, v := range meta.nodeNameToMatchingAffinityPods {\n\t\tnewPredMeta.nodeNameToMatchingAffinityPods[k] = append([]*v1.Pod(nil), v...)\n\t}\n\tnewPredMeta.nodeNameToMatchingAntiAffinityPods = make(map[string][]*v1.Pod)\n\tfor k, v := range meta.nodeNameToMatchingAntiAffinityPods {\n\t\tnewPredMeta.nodeNameToMatchingAntiAffinityPods[k] = append([]*v1.Pod(nil), v...)\n\t}\n\tnewPredMeta.serviceAffinityMatchingPodServices = append([]*v1.Service(nil),\n\t\tmeta.serviceAffinityMatchingPodServices...)\n\tnewPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),\n\t\tmeta.serviceAffinityMatchingPodList...)\n\treturn (algorithm.PredicateMetadata)(newPredMeta)\n}", "func (d *VaultPKIQuery) CanShare() bool {\n\treturn false\n}", "func (ei ei) Share(cfg upspin.Config, readers []upspin.PublicKey, packdata []*[]byte) {\n}", "func ValidateNoDuplicateNetworkRules(attribute string, rules []*NetworkRule) error {\n\n\ttype indexedRule struct {\n\t\tindex int\n\t\trule *NetworkRule\n\t}\n\tseen := make(map[[sha256.Size]byte]*indexedRule, len(rules))\n\tfor iRule, rule := range rules {\n\n\t\tif rule == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\thash := sha256.New()\n\n\t\t// hash the action\n\t\tfmt.Fprintf(hash, \"%s/\", rule.Action)\n\n\t\t// hash the object\n\t\tobj := make([]string, len(rule.Object))\n\t\tfor i, subExpr := range rule.Object {\n\t\t\tcpy := append([]string{}, subExpr...)\n\t\t\tsort.Strings(cpy)\n\t\t\tobj[i] = strings.Join(cpy, \"/\")\n\t\t}\n\t\tsort.Strings(obj)\n\t\tfor _, subExpr := range obj {\n\t\t\tfmt.Fprintf(hash, \"[%s]/\", subExpr)\n\t\t}\n\n\t\t// hash the ports\n\t\tprotoPortCpy := append([]string{}, rule.ProtocolPorts...)\n\t\tfor i, port := range protoPortCpy {\n\t\t\tprotoPortCpy[i] = strings.ToLower(port)\n\t\t}\n\t\tsort.Strings(protoPortCpy)\n\t\tfor _, port := range protoPortCpy {\n\t\t\tfmt.Fprintf(hash, \"%s/\", port)\n\t\t}\n\n\t\t// check if hash was seen before\n\t\tvar digest [sha256.Size]byte\n\t\tcopy(digest[:], hash.Sum(nil))\n\t\tif prevRule, ok := seen[digest]; ok {\n\t\t\treturn makeValidationError(\n\t\t\t\tattribute,\n\t\t\t\tfmt.Sprintf(\"Duplicate network rules at the following indexes: [%d, %d]\", prevRule.index+1, iRule+1),\n\t\t\t)\n\t\t}\n\n\t\tseen[digest] = &indexedRule{index: iRule, rule: rule}\n\t}\n\n\treturn nil\n}", "func (cp *ComposerPool) Put(composer *Composer) (result bool) {\n\tlogger.Debug(\"Entering ComposerPool.Put\", composer)\n\tdefer func() { logger.Debug(\"Exiting ComposerPool.Put\", result) }()\n\n\tcp.PoolMutex.Lock()\n\tcp.Pool <- composer\n\tcp.PoolMutex.Unlock()\n\treturn true\n}", "func (k *keeper) StoreMultiWithoutBlocking(items []Item) error {\n\tif k.disableCaching {\n\t\treturn nil\n\t}\n\n\tclient := k.connPool.Get()\n\tdefer client.Close()\n\n\terr := client.Send(\"MULTI\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, item := range items {\n\t\terr = client.Send(\"SETEX\", item.GetKey(), k.decideCacheTTL(item), item.GetValue())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err = client.Do(\"EXEC\")\n\treturn err\n}", "func Put(\n\tctx context.Context, batch engine.ReadWriter, cArgs CommandArgs, resp roachpb.Response,\n) (result.Result, error) {\n\targs := cArgs.Args.(*roachpb.PutRequest)\n\th := cArgs.Header\n\tms := cArgs.Stats\n\n\tvar ts hlc.Timestamp\n\tif !args.Inline {\n\t\tts = h.Timestamp\n\t}\n\tif h.DistinctSpans {\n\t\tif b, ok := batch.(engine.Batch); ok {\n\t\t\t// Use the distinct batch for both blind and normal ops so that we don't\n\t\t\t// accidentally flush mutations to make them visible to the distinct\n\t\t\t// batch.\n\t\t\tbatch = b.Distinct()\n\t\t\tdefer batch.Close()\n\t\t}\n\t}\n\tif args.Blind {\n\t\treturn result.Result{}, engine.MVCCBlindPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)\n\t}\n\treturn result.Result{}, engine.MVCCPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)\n}", "func (c *Client) ShareSecret() {\n\tgen := c.g.Point().Base()\n\trand := c.suite.RandomStream()\n\tsecret1 := c.g.Scalar().Pick(rand)\n\tsecret2 := c.g.Scalar().Pick(rand)\n\tpublic1 := c.g.Point().Mul(secret1, gen)\n\tpublic2 := c.g.Point().Mul(secret2, gen)\n\n\t//generate share secrets via Diffie-Hellman w/ all servers\n\t//one used for masks, one used for one-time pad\n\tcs1 := ClientDH{\n\t\tPublic: MarshalPoint(public1),\n\t\tId: c.id,\n\t}\n\tcs2 := ClientDH{\n\t\tPublic: MarshalPoint(public2),\n\t\tId: c.id,\n\t}\n\n\tmasks := make([][]byte, len(c.servers))\n\tsecrets := make([][]byte, len(c.servers))\n\n\tvar wg sync.WaitGroup\n\tfor i, rpcServer := range c.rpcServers {\n\t\twg.Add(1)\n\t\tgo func(i int, rpcServer *rpc.Client, cs1 ClientDH, cs2 ClientDH) {\n\t\t\tdefer wg.Done()\n\t\t\tservPub1 := make([]byte, SecretSize)\n\t\t\tservPub2 := make([]byte, SecretSize)\n\t\t\tservPub3 := make([]byte, SecretSize)\n\t\t\tcall1 := rpcServer.Go(\"Server.ShareMask\", &cs1, &servPub1, nil)\n\t\t\tcall2 := rpcServer.Go(\"Server.ShareSecret\", &cs2, &servPub2, nil)\n\t\t\tcall3 := rpcServer.Go(\"Server.GetEphKey\", 0, &servPub3, nil)\n\t\t\t<-call1.Done\n\t\t\t<-call2.Done\n\t\t\t<-call3.Done\n\t\t\tmasks[i] = MarshalPoint(c.g.Point().Mul(secret1, UnmarshalPoint(c.g, servPub1)))\n\t\t\t// c.masks[i] = make([]byte, SecretSize)\n\t\t\t// c.masks[i][c.id] = 1\n\t\t\tsecrets[i] = MarshalPoint(c.g.Point().Mul(secret2, UnmarshalPoint(c.g, servPub2)))\n\t\t\t//secrets[i] = make([]byte, SecretSize)\n\t\t\tc.ephKeys[i] = UnmarshalPoint(c.suite, servPub3)\n\t\t}(i, rpcServer, cs1, cs2)\n\t}\n\twg.Wait()\n\n\tfor r := range c.secretss {\n\t\tfor i := range c.secretss[r] {\n\t\t\tif r == 0 {\n\t\t\t\tsha3.ShakeSum256(c.secretss[r][i], secrets[i])\n\t\t\t} else {\n\t\t\t\tsha3.ShakeSum256(c.secretss[r][i], c.secretss[r-1][i])\n\t\t\t}\n\t\t}\n\t}\n\n\tfor r := range c.maskss {\n\t\tfor i := range c.maskss[r] {\n\t\t\tif r == 0 {\n\t\t\t\tsha3.ShakeSum256(c.maskss[r][i], masks[i])\n\t\t\t} else {\n\t\t\t\tsha3.ShakeSum256(c.maskss[r][i], c.maskss[r-1][i])\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (s *s3ManifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (godigest.Digest, error) {\n\tif err := s.r.init(); err != nil {\n\t\treturn \"\", err\n\t}\n\tmediaType, payload, err := manifest.Payload()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdgst := godigest.FromBytes(payload)\n\tblob := fmt.Sprintf(\"/v2/%s/blobs/%s\", s.r.repoName, dgst)\n\n\tif err := s.r.conditionalUpload(&s3manager.UploadInput{\n\t\tBucket: aws.String(s.r.bucket),\n\t\tContentType: aws.String(mediaType),\n\t\tBody: bytes.NewBuffer(payload),\n\t\tKey: aws.String(blob),\n\t}, dgst.String()); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// set manifests\n\ttags := []string{dgst.String()}\n\tfor _, option := range options {\n\t\tif opt, ok := option.(distribution.WithTagOption); ok {\n\t\t\ttags = append(tags, opt.Tag)\n\t\t}\n\t}\n\tfor _, tag := range tags {\n\t\tif _, err := s.r.s3.CopyObject(&s3.CopyObjectInput{\n\t\t\tBucket: aws.String(s.r.bucket),\n\t\t\tContentType: aws.String(mediaType),\n\t\t\tCopySource: aws.String(path.Join(s.r.bucket, blob)),\n\t\t\tKey: aws.String(fmt.Sprintf(\"/v2/%s/manifests/%s\", s.r.repoName, tag)),\n\t\t}); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn dgst, nil\n}", "func createDuplicates(cfg *duplicate.Config) {\n\tif cfg.File == \"\" {\n\t\tfmt.Println(\"Please specify the original file path (flag \\\"-file\\\")\")\n\t} else {\n\t\tfmt.Println(\"Functionality in development\")\n\t}\n}", "func (c *ConfigMapVault) Put(key, val string, createOnly bool) error {\n\tc.storeLock.Lock()\n\tdefer c.storeLock.Unlock()\n\tapiObj := &api_v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: c.name,\n\t\t\tNamespace: c.namespace,\n\t\t},\n\t}\n\tcfgMapKey := fmt.Sprintf(\"%v/%v\", c.namespace, c.name)\n\n\titem, exists, err := c.configMapStore.GetByKey(cfgMapKey)\n\tif err == nil && exists {\n\t\tdata := item.(*api_v1.ConfigMap).Data\n\t\tif createOnly {\n\t\t\treturn fmt.Errorf(\"failed to create configmap %v, it is already existed with data %v.\", cfgMapKey, data)\n\t\t}\n\t\texistingVal, ok := data[key]\n\t\tif ok && existingVal == val {\n\t\t\t// duplicate, no need to update.\n\t\t\treturn nil\n\t\t}\n\t\tdata[key] = val\n\t\tapiObj.Data = data\n\t\tif existingVal != val {\n\t\t\tklog.Infof(\"Configmap %v has key %v but wrong value %v, updating to %v\", cfgMapKey, key, existingVal, val)\n\t\t} else {\n\t\t\tklog.Infof(\"Configmap %v will be updated with %v = %v\", cfgMapKey, key, val)\n\t\t}\n\t\tif err := c.configMapStore.Update(apiObj); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to update %v: %v\", cfgMapKey, err)\n\t\t}\n\t} else {\n\t\tapiObj.Data = map[string]string{key: val}\n\t\tif err := c.configMapStore.Add(apiObj); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to add %v: %v\", cfgMapKey, err)\n\t\t}\n\t}\n\tklog.Infof(\"Successfully stored key %v = %v in config map %v\", key, val, cfgMapKey)\n\treturn nil\n}", "func (pmp *PrivateMarketplace) Copy() *PrivateMarketplace {\n\tpmpCopy := *pmp\n\n\tif pmp.Deals != nil {\n\t\tpmpCopy.Deals = []*Deal{}\n\t\tfor i := range pmp.Deals {\n\t\t\tpmpCopy.Deals = append(pmpCopy.Deals, pmp.Deals[i].Copy())\n\t\t}\n\t}\n\n\treturn &pmpCopy\n}", "func BenchmarkSigsOnlyWriter64K(t *testing.B) {\n\tconst totalinput = 10 << 20\n\tinput := getBufferSize(totalinput)\n\n\tconst size = 64 << 10\n\tb := input.Bytes()\n\t// Create some duplicates\n\tfor i := 0; i < 50; i++ {\n\t\t// Read from 10 first blocks\n\t\tsrc := b[(i%10)*size : (i%10)*size+size]\n\t\t// Write into the following ones\n\t\tdst := b[(10+i)*size : (i+10)*size+size]\n\t\tcopy(dst, src)\n\t}\n\tt.ResetTimer()\n\tt.SetBytes(totalinput)\n\tfor i := 0; i < t.N; i++ {\n\t\tinput = bytes.NewBuffer(b)\n\t\tw, _ := dedup.NewWriter(ioutil.Discard, ioutil.Discard, dedup.ModeSignaturesOnly, size, 0)\n\t\tio.Copy(w, input)\n\t\terr := w.Close()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func (_ BufferPtrPool16M) Put(b *[]byte) {\n\tPutBytesSlicePtr16M(b)\n}", "func (ms Mutations) Put(m Mutation) Mutations {\n\tif m.Context == immutable {\n\t\treturn ms\n\t}\n\tif ms == nil {\n\t\treturn map[Context][]MutatorFunc{m.Context: {m.mutator}}\n\t}\n\n\tif _, ok := ms[m.Context]; !ok {\n\t\tms[m.Context] = []MutatorFunc{m.mutator}\n\t} else {\n\t\tms[m.Context] = append(ms[m.Context], m.mutator)\n\t}\n\n\treturn ms\n}", "func BenchmarkDupMap(b *testing.B) {\n\tdupInit(b)\n\tfor n := 0; n < b.N; n++ {\n\t\tdupIntMapData.Dup()\n\t}\n}", "func RegisterDeepCopies(scheme *runtime.Scheme) error {\n\treturn scheme.AddGeneratedDeepCopyFuncs(\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_Route, InType: reflect.TypeOf(&Route{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteIngress, InType: reflect.TypeOf(&RouteIngress{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteIngressCondition, InType: reflect.TypeOf(&RouteIngressCondition{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteList, InType: reflect.TypeOf(&RouteList{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RoutePort, InType: reflect.TypeOf(&RoutePort{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteSpec, InType: reflect.TypeOf(&RouteSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteStatus, InType: reflect.TypeOf(&RouteStatus{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteTargetReference, InType: reflect.TypeOf(&RouteTargetReference{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouterShard, InType: reflect.TypeOf(&RouterShard{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_TLSConfig, InType: reflect.TypeOf(&TLSConfig{})},\n\t)\n}", "func RenameDuplicateColumns(RenameDuplicateColumns bool) ConfigFunc {\n\treturn func(c *Config) {\n\t\tc.RenameDuplicateColumns = RenameDuplicateColumns\n\t}\n}", "func CheckDuplicateMountPoint(mounts []*types.MountPoint, destination string) bool {\n\tfor _, sm := range mounts {\n\t\tif sm.Destination == destination {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *Cache) PutBlock(k Key, b Block) {\n\tidx := uint64(0)\n\tif len(c.shards) > 1 {\n\t\th := k.hashUint64()\n\t\tidx = h % uint64(len(c.shards))\n\t}\n\tshard := c.shards[idx]\n\tshard.PutBlock(k, b)\n}", "func (m *Setting) SetOverwriteAllowed(value *bool)() {\n err := m.GetBackingStore().Set(\"overwriteAllowed\", value)\n if err != nil {\n panic(err)\n }\n}", "func (b *BlockProcessorQueue) Put(block uint64) bool {\n\n\tresp := make(chan bool)\n\treq := Request{\n\t\tBlockNumber: block,\n\t\tResponseChan: resp,\n\t}\n\n\tb.PutChan <- req\n\treturn <-resp\n\n}", "func (s *Sync) fixDupes(claims []jsonrpc.Claim) (bool, error) {\n\tstart := time.Now()\n\tdefer func(start time.Time) {\n\t\ttiming.TimedComponent(\"fixDupes\").Add(time.Since(start))\n\t}(start)\n\tabandonedClaims := false\n\tvideoIDs := make(map[string]jsonrpc.Claim)\n\tfor _, c := range claims {\n\t\tif !isYtsyncClaim(c, s.DbChannelData.ChannelClaimID) {\n\t\t\tcontinue\n\t\t}\n\t\ttn := c.Value.GetThumbnail().GetUrl()\n\t\tvideoID := tn[strings.LastIndex(tn, \"/\")+1:]\n\n\t\tcl, ok := videoIDs[videoID]\n\t\tif !ok || cl.ClaimID == c.ClaimID {\n\t\t\tvideoIDs[videoID] = c\n\t\t\tcontinue\n\t\t}\n\t\t// only keep the most recent one\n\t\tclaimToAbandon := c\n\t\tvideoIDs[videoID] = cl\n\t\tif c.Height > cl.Height {\n\t\t\tclaimToAbandon = cl\n\t\t\tvideoIDs[videoID] = c\n\t\t}\n\t\t//it's likely that all we need is s.DbChannelData.PublishAddress.IsMine but better be safe than sorry I guess\n\t\tif (claimToAbandon.Address != s.DbChannelData.PublishAddress.Address || s.DbChannelData.PublishAddress.IsMine) && !s.syncedVideos[videoID].Transferred {\n\t\t\tlog.Debugf(\"abandoning %+v\", claimToAbandon)\n\t\t\t_, err := s.daemon.StreamAbandon(claimToAbandon.Txid, claimToAbandon.Nout, nil, true)\n\t\t\tif err != nil {\n\t\t\t\treturn true, err\n\t\t\t}\n\t\t\tabandonedClaims = true\n\t\t} else {\n\t\t\tlog.Debugf(\"claim is not ours. Have the user run this: lbrynet stream abandon --txid=%s --nout=%d\", claimToAbandon.Txid, claimToAbandon.Nout)\n\t\t}\n\t}\n\treturn abandonedClaims, nil\n}", "func Copy(source KVStore, target KVStore) error {\n\n\tvar innerErr error\n\tif err := source.Iterate(EmptyPrefix, func(key, value Value) bool {\n\t\tif err := target.Set(key, value); err != nil {\n\t\t\tinnerErr = err\n\t\t}\n\n\t\treturn innerErr == nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif innerErr != nil {\n\t\treturn innerErr\n\t}\n\n\treturn target.Flush()\n}", "func (_ BufferPtrPool512) Put(b *[]byte) {\n\tPutBytesSlicePtr512(b)\n}", "func duplicateIP(ip net.IP) net.IP {\n\tdup := make(net.IP, len(ip))\n\tcopy(dup, ip)\n\treturn dup\n}", "func (_ BufferPtrPool512K) Put(b *[]byte) {\n\tPutBytesSlicePtr512K(b)\n}", "func (dao *blockDAO) putBlock(blk *block.Block) error {\n\tbatch := db.NewBatch()\n\n\theight := byteutil.Uint64ToBytes(blk.Height())\n\thash := blk.HashBlock()\n\tserHeader, err := blk.Header.Serialize()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize block header\")\n\t}\n\tserBody, err := blk.Body.Serialize()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize block body\")\n\t}\n\tserFooter, err := blk.Footer.Serialize()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize block footer\")\n\t}\n\tif dao.compressBlock {\n\t\ttimer := dao.timerFactory.NewTimer(\"compress_header\")\n\t\tserHeader, err = compress.Compress(serHeader)\n\t\ttimer.End()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error when compressing a block header\")\n\t\t}\n\t\ttimer = dao.timerFactory.NewTimer(\"compress_body\")\n\t\tserBody, err = compress.Compress(serBody)\n\t\ttimer.End()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error when compressing a block body\")\n\t\t}\n\t\ttimer = dao.timerFactory.NewTimer(\"compress_footer\")\n\t\tserFooter, err = compress.Compress(serFooter)\n\t\ttimer.End()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error when compressing a block footer\")\n\t\t}\n\t}\n\tbatch.Put(blockHeaderNS, hash[:], serHeader, \"failed to put block header\")\n\tbatch.Put(blockBodyNS, hash[:], serBody, \"failed to put block body\")\n\tbatch.Put(blockFooterNS, hash[:], serFooter, \"failed to put block footer\")\n\n\thashKey := append(hashPrefix, hash[:]...)\n\tbatch.Put(blockHashHeightMappingNS, hashKey, height, \"failed to put hash -> height mapping\")\n\n\theightKey := append(heightPrefix, height...)\n\tbatch.Put(blockHashHeightMappingNS, heightKey, hash[:], \"failed to put height -> hash mapping\")\n\n\tvalue, err := dao.kvstore.Get(blockNS, topHeightKey)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get top height\")\n\t}\n\ttopHeight := enc.MachineEndian.Uint64(value)\n\tif blk.Height() > topHeight {\n\t\tbatch.Put(blockNS, topHeightKey, height, \"failed to put top height\")\n\t}\n\n\tvalue, err = dao.kvstore.Get(blockNS, totalActionsKey)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get total actions\")\n\t}\n\ttotalActions := enc.MachineEndian.Uint64(value)\n\ttotalActions += uint64(len(blk.Actions))\n\ttotalActionsBytes := byteutil.Uint64ToBytes(totalActions)\n\tbatch.Put(blockNS, totalActionsKey, totalActionsBytes, \"failed to put total actions\")\n\n\tif !dao.writeIndex {\n\t\treturn dao.kvstore.Commit(batch)\n\t}\n\tif err := indexBlock(dao.kvstore, blk, batch); err != nil {\n\t\treturn err\n\t}\n\treturn dao.kvstore.Commit(batch)\n}", "func (stack *StackNode) performDuplicate() bool {\n\ttop, err := stack.Top()\n\tif err != nil {\n\t\t//\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\terr = stack.Push(top)\n\tif err != nil {\n\t\t//\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\treturn true\n}", "func handleWriteBlock(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar m model.Message\n\n\t// Decode http request into message struct\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&m); err != nil {\n\t\trespondWithJSON(w, r, http.StatusBadRequest, r.Body)\n\t\treturn\n\t}\n\n\t// checks if the password is correct\n\t// if !authenticate(m.Password) {\n\t// \trespondWithJSON(w, r, http.StatusUnauthorized, r.Body)\n\t// }\n\n\tdefer r.Body.Close()\n\n\t//ensure atomicity when creating new block\n\tvar mutex = &sync.Mutex{}\n\tmutex.Lock()\n\tnewBlock := blockchainhelpers.GenerateBlock(model.Blockchain[len(model.Blockchain)-1], m.BPM)\n\tmutex.Unlock()\n\n\tif blockchainhelpers.IsBlockValid(newBlock, model.Blockchain[len(model.Blockchain)-1]) {\n\t\tmodel.Blockchain = append(model.Blockchain, newBlock)\n\t\tspew.Dump(model.Blockchain)\n\t}\n\n\trespondWithJSON(w, r, http.StatusCreated, newBlock)\n\n}", "func AddCopy(ctx context.Context, config *config.Config, mgr manager.Manager) error {\n\tctx = ctxlog.NewContextWithRecorder(ctx, \"copy-reconciler\", mgr.GetEventRecorderFor(\"copy-recorder\"))\n\tlog := ctxlog.ExtractLogger(ctx)\n\tr := NewCopyReconciler(ctx, config, mgr, credsgen.NewInMemoryGenerator(log), controllerutil.SetControllerReference)\n\n\tc, err := controller.New(\"copy-controller\", mgr, controller.Options{\n\t\tReconciler: r,\n\t\tMaxConcurrentReconciles: config.MaxQuarksSecretWorkers,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Adding copy controller to manager failed.\")\n\t}\n\n\tnsPred := newNSPredicate(ctx, mgr.GetClient(), config.MonitoredID)\n\n\t// Watch for changes to the copied status of QuarksSecrets\n\tp := predicate.Funcs{\n\t\tCreateFunc: func(e event.CreateEvent) bool { return false },\n\t\tDeleteFunc: func(e event.DeleteEvent) bool { return false },\n\t\tGenericFunc: func(e event.GenericEvent) bool { return false },\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\tn := e.ObjectNew.(*qsv1a1.QuarksSecret)\n\n\t\t\tif n.Status.Copied != nil {\n\t\t\t\tctxlog.Debugf(ctx, \"Skipping QuarksSecret '%s', if copy status '%v' is true\", n.Name, *n.Status.Copied)\n\t\t\t\treturn !(*n.Status.Copied)\n\t\t\t}\n\n\t\t\treturn true\n\t\t},\n\t}\n\terr = c.Watch(&source.Kind{Type: &qsv1a1.QuarksSecret{}}, &handler.EnqueueRequestForObject{}, nsPred, p)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Watching quarks secrets failed in copy controller.\")\n\t}\n\n\t// Watch for changes to user created secrets\n\tp = predicate.Funcs{\n\t\tCreateFunc: func(e event.CreateEvent) bool { return false },\n\t\tDeleteFunc: func(e event.DeleteEvent) bool { return false },\n\t\tGenericFunc: func(e event.GenericEvent) bool { return false },\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\tn := e.ObjectNew.(*corev1.Secret)\n\t\t\to := e.ObjectOld.(*corev1.Secret)\n\n\t\t\tshouldProcessReconcile := isUserCreatedSecret(n)\n\t\t\tif reflect.DeepEqual(n.Data, o.Data) && reflect.DeepEqual(n.Labels, o.Labels) &&\n\t\t\t\treflect.DeepEqual(n.Annotations, o.Annotations) {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\treturn shouldProcessReconcile\n\t\t},\n\t}\n\terr = c.Watch(&source.Kind{Type: &corev1.Secret{}}, handler.EnqueueRequestsFromMapFunc(\n\t\tfunc(a crc.Object) []reconcile.Request {\n\t\t\tsecret := a.(*corev1.Secret)\n\n\t\t\tif skip.Reconciles(ctx, mgr.GetClient(), secret) {\n\t\t\t\treturn []reconcile.Request{}\n\t\t\t}\n\n\t\t\treconciles, err := listQuarksSecretsReconciles(ctx, mgr.GetClient(), secret, secret.Namespace)\n\t\t\tif err != nil {\n\t\t\t\tctxlog.Errorf(ctx, \"Failed to calculate reconciles for secret '%s/%s': %v\", secret.Namespace, secret.Name, err)\n\t\t\t}\n\t\t\tif len(reconciles) > 0 {\n\t\t\t\treturn reconciles\n\t\t\t}\n\n\t\t\treturn reconciles\n\t\t}), nsPred, p)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Watching user defined secrets failed in copy controller.\")\n\t}\n\n\treturn nil\n}", "func (b Board) SafePut(pos Position, sap SideAndPiece) {\n\tif b.Has(pos) {\n\t\tpanic(fmt.Sprintf(\"%s must be empty.\", pos.String()))\n\t}\n\tb[pos] = sap\n}", "func (h *KVHandler) Put(kvPair *api.KVPair, wOptions *api.WriteOptions) (*api.WriteMeta, error) {\n\ttxnItem := &api.KVTxnOp{\n\t\tVerb: api.KVSet,\n\t\tKey: kvPair.Key,\n\t\tValue: kvPair.Value,\n\t}\n\th.KVTxnOps = append(h.KVTxnOps, txnItem)\n\treturn nil, nil\n}", "func (r *KeyRing) isDuplicate(e *openpgp.Entity) bool {\n\tfor _, re := range r.entities {\n\t\tif re.PrimaryKey.Fingerprint == e.PrimaryKey.Fingerprint {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s *Set) ShallowCopy() *Set {\n\tcopiedSet := NewSet()\n\n\tfor k := range s.set {\n\t\tcopiedSet.Add(k)\n\t}\n\n\treturn copiedSet\n}", "func WriteManifest(manifestWriter io.Writer, compression *pwr.CompressionSettings, container *tlc.Container, blockHashes *BlockHashMap) error {\n\trawWire := wire.NewWriteContext(manifestWriter)\n\terr := rawWire.WriteMagic(pwr.ManifestMagic)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = rawWire.WriteMessage(&pwr.ManifestHeader{\n\t\tCompression: compression,\n\t\tAlgorithm: pwr.HashAlgorithm_SHAKE128_32,\n\t})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\twire, err := pwr.CompressWire(rawWire, compression)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = wire.WriteMessage(container)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tsh := &pwr.SyncHeader{}\n\tmbh := &pwr.ManifestBlockHash{}\n\n\tfor fileIndex, f := range container.Files {\n\t\tsh.Reset()\n\t\tsh.FileIndex = int64(fileIndex)\n\t\terr = wire.WriteMessage(sh)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tnumBlocks := ComputeNumBlocks(f.Size)\n\n\t\tfor blockIndex := int64(0); blockIndex < numBlocks; blockIndex++ {\n\t\t\tloc := BlockLocation{FileIndex: int64(fileIndex), BlockIndex: blockIndex}\n\t\t\thash := blockHashes.Get(loc)\n\t\t\tif hash == nil {\n\t\t\t\terr = fmt.Errorf(\"missing BlockHash for block %+v\", loc)\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\tmbh.Reset()\n\t\t\tmbh.Hash = hash\n\n\t\t\terr = wire.WriteMessage(mbh)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\t}\n\n\terr = wire.Close()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}", "func IsDuplicate(b types.ButtonEvent, ElevSlice []types.Elevator, ID int) bool {\n\n\tbtnInt := types.ButtonMap[b.Button]\n\n\tif btnInt == 2 {\n\t\treturn (ElevSlice[ID].Orders[b.Floor][btnInt] == 1)\n\t}\n\tfor elevIndex := 0; elevIndex < types.NumElevators; elevIndex++ {\n\t\tif ElevSlice[ID].Orders[b.Floor][btnInt] == 1 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Share(mod *big.Int, nPieces int, secret *big.Int) []*big.Int {\n\tif nPieces == 0 {\n\t\tpanic(\"Number of shares must be at least 1\")\n\t} else if nPieces == 1 {\n\t\treturn []*big.Int{secret}\n\t}\n\n\tout := make([]*big.Int, nPieces)\n\n\tacc := new(big.Int)\n\tfor i := 0; i < nPieces-1; i++ {\n\t\tout[i] = utils.RandInt(mod)\n\n\t\tacc.Add(acc, out[i])\n\t}\n\n\tacc.Sub(secret, acc)\n\tacc.Mod(acc, mod)\n\tout[nPieces-1] = acc\n\n\treturn out\n}", "func spannerBatchPut(ctx context.Context, db string, m []*spanner.Mutation) error {\n\tclient, err := spanner.NewClient(ctx, db)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create client %v\", err)\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\tif _, err = client.Apply(ctx, m); err != nil {\n\t\treturn errors.New(\"ResourceNotFoundException: \" + err.Error())\n\t}\n\treturn nil\n}", "func validateAffinityGroupDuplicate(agList []ovirt.AffinityGroup) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tfor i, ag1 := range agList {\n\t\tfor _, ag2 := range agList[i+1:] {\n\t\t\tif ag1.Name == ag2.Name {\n\t\t\t\tif ag1.Priority != ag2.Priority ||\n\t\t\t\t\tag1.Description != ag2.Description ||\n\t\t\t\t\tag1.Enforcing != ag2.Enforcing {\n\t\t\t\t\tallErrs = append(\n\t\t\t\t\t\tallErrs,\n\t\t\t\t\t\t&field.Error{\n\t\t\t\t\t\t\tType: field.ErrorTypeDuplicate,\n\t\t\t\t\t\t\tBadValue: errors.Errorf(\"Error validating affinity groups: found same \"+\n\t\t\t\t\t\t\t\t\"affinity group defined twice with different fields %v anf %v\", ag1, ag2)})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn allErrs\n}", "func (_Ethdkg *EthdkgTransactor) DistributeShares(opts *bind.TransactOpts, encrypted_shares []*big.Int, commitments [][2]*big.Int) (*types.Transaction, error) {\n\treturn _Ethdkg.contract.Transact(opts, \"distribute_shares\", encrypted_shares, commitments)\n}", "func (f *FakeImagesClient) Put(ctx context.Context, putOpts *images.PutRequest, opts ...grpc.CallOption) (*googleprotobuf.Empty, error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.appendCalled(\"put\", putOpts)\n\tif err := f.getError(\"put\"); err != nil {\n\t\treturn nil, err\n\t}\n\tf.ImageList[putOpts.Image.Name] = putOpts.Image\n\treturn &googleprotobuf.Empty{}, nil\n}", "func (h *Handle) HandleWrite(\n\tctx context.Context,\n\ttxn txnif.AsyncTxn,\n\treq *db.WriteReq,\n\tresp *db.WriteResp) (err error) {\n\tdefer func() {\n\t\tif req.Cancel != nil {\n\t\t\treq.Cancel()\n\t\t}\n\t}()\n\tctx = perfcounter.WithCounterSetFrom(ctx, h.db.Opts.Ctx)\n\tswitch req.PkCheck {\n\tcase db.FullDedup:\n\t\ttxn.SetDedupType(txnif.FullDedup)\n\tcase db.IncrementalDedup:\n\t\tif h.db.Opts.IncrementalDedup {\n\t\t\ttxn.SetDedupType(txnif.IncrementalDedup)\n\t\t} else {\n\t\t\ttxn.SetDedupType(txnif.FullSkipWorkSpaceDedup)\n\t\t}\n\tcase db.FullSkipWorkspaceDedup:\n\t\ttxn.SetDedupType(txnif.FullSkipWorkSpaceDedup)\n\t}\n\tcommon.DoIfDebugEnabled(func() {\n\t\tlogutil.Debugf(\"[precommit] handle write typ: %v, %d-%s, %d-%s txn: %s\",\n\t\t\treq.Type, req.TableID,\n\t\t\treq.TableName, req.DatabaseId, req.DatabaseName,\n\t\t\ttxn.String(),\n\t\t)\n\t\tlogutil.Debugf(\"[precommit] write batch: %s\", common.DebugMoBatch(req.Batch))\n\t})\n\tvar dbase handle.Database\n\tvar tb handle.Relation\n\tdefer func() {\n\t\tcommon.DoIfDebugEnabled(func() {\n\t\t\tlogutil.Debugf(\"[precommit] handle write end txn: %s\", txn.String())\n\t\t})\n\t\tif err != nil && moerr.IsMoErrCode(err, moerr.ErrDuplicateEntry) && (strings.HasPrefix(req.TableName, \"bmsql\") || strings.HasPrefix(req.TableName, \"sbtest\")) {\n\t\t\tlogutil.Infof(\"[precommit] dup handle catalog on dup %s \", tb.GetMeta().(*catalog2.TableEntry).PPString(common.PPL1, 0, \"\"))\n\t\t}\n\t}()\n\n\tdbase, err = txn.GetDatabaseByID(req.DatabaseId)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttb, err = dbase.GetRelationByID(req.TableID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif req.Type == db.EntryInsert {\n\t\t//Add blocks which had been bulk-loaded into S3 into table.\n\t\tif req.FileName != \"\" {\n\t\t\tlocations := make([]objectio.Location, 0)\n\t\t\tfor _, metLoc := range req.MetaLocs {\n\t\t\t\tlocation, err := blockio.EncodeLocationFromString(metLoc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlocations = append(locations, location)\n\t\t\t}\n\t\t\terr = tb.AddBlksWithMetaLoc(ctx, locations)\n\t\t\treturn\n\t\t}\n\t\t//check the input batch passed by cn is valid.\n\t\tlen := 0\n\t\tfor i, vec := range req.Batch.Vecs {\n\t\t\tif vec == nil {\n\t\t\t\tlogutil.Errorf(\"the vec:%d in req.Batch is nil\", i)\n\t\t\t\tpanic(\"invalid vector : vector is nil\")\n\t\t\t}\n\t\t\tif vec.Length() == 0 {\n\t\t\t\tlogutil.Errorf(\"the vec:%d in req.Batch is empty\", i)\n\t\t\t\tpanic(\"invalid vector: vector is empty\")\n\t\t\t}\n\t\t\tif i == 0 {\n\t\t\t\tlen = vec.Length()\n\t\t\t}\n\t\t\tif vec.Length() != len {\n\t\t\t\tlogutil.Errorf(\"the length of vec:%d in req.Batch is not equal to the first vec\", i)\n\t\t\t\tpanic(\"invalid batch : the length of vectors in batch is not the same\")\n\t\t\t}\n\t\t}\n\t\t//Appends a batch of data into table.\n\t\terr = AppendDataToTable(ctx, tb, req.Batch)\n\t\treturn\n\t}\n\n\t//handle delete\n\tif req.FileName != \"\" {\n\t\t//wait for loading deleted row-id done.\n\t\tnctx := context.Background()\n\t\tif deadline, ok := ctx.Deadline(); ok {\n\t\t\t_, req.Cancel = context.WithTimeout(nctx, time.Until(deadline))\n\t\t}\n\t\trowidIdx := 0\n\t\tpkIdx := 1\n\t\tfor _, key := range req.DeltaLocs {\n\t\t\tvar location objectio.Location\n\t\t\tlocation, err = blockio.EncodeLocationFromString(key)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar ok bool\n\t\t\tvar bat *batch.Batch\n\t\t\tbat, err = blockio.LoadTombstoneColumns(\n\t\t\t\tctx,\n\t\t\t\t[]uint16{uint16(rowidIdx), uint16(pkIdx)},\n\t\t\t\tnil,\n\t\t\t\th.db.Runtime.Fs.Service,\n\t\t\t\tlocation,\n\t\t\t\tnil,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tblkids := getBlkIDsFromRowids(bat.Vecs[0])\n\t\t\tid := tb.GetMeta().(*catalog2.TableEntry).AsCommonID()\n\t\t\tif len(blkids) == 1 {\n\t\t\t\tfor blkID := range blkids {\n\t\t\t\t\tid.BlockID = blkID\n\t\t\t\t}\n\t\t\t\tok, err = tb.TryDeleteByDeltaloc(id, location)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlogutil.Warnf(\"blk %v try delete by deltaloc failed\", id.BlockID.String())\n\t\t\t} else {\n\t\t\t\tlogutil.Warnf(\"multiply blocks in one deltalocation\")\n\t\t\t}\n\t\t\trowIDVec := containers.ToTNVector(bat.Vecs[0])\n\t\t\tdefer rowIDVec.Close()\n\t\t\tpkVec := containers.ToTNVector(bat.Vecs[1])\n\t\t\t//defer pkVec.Close()\n\t\t\tif err = tb.DeleteByPhyAddrKeys(rowIDVec, pkVec); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif len(req.Batch.Vecs) != 2 {\n\t\tpanic(fmt.Sprintf(\"req.Batch.Vecs length is %d, should be 2\", len(req.Batch.Vecs)))\n\t}\n\trowIDVec := containers.ToTNVector(req.Batch.GetVector(0))\n\tdefer rowIDVec.Close()\n\tpkVec := containers.ToTNVector(req.Batch.GetVector(1))\n\t//defer pkVec.Close()\n\terr = tb.DeleteByPhyAddrKeys(rowIDVec, pkVec)\n\treturn\n}", "func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {\n\tparams := make(map[string]string, 1)\n\tif p.Flags != 0 {\n\t\tparams[\"flags\"] = strconv.FormatUint(p.Flags, 10)\n\t}\n\t_, wm, err := k.put(p.Key, params, p.Value, q)\n\treturn wm, err\n}", "func (_ BufferPtrPool1M) Put(b *[]byte) {\n\tPutBytesSlicePtr1M(b)\n}", "func AllowPartialUpdates() BatchOption {\n\treturn batchOptionFunc(func(b Batch) Batch {\n\t\tb.AllowPartialUpdates = true\n\t\treturn b\n\t})\n}", "func (_ BufferPtrPool256K) Put(b *[]byte) {\n\tPutBytesSlicePtr256K(b)\n}", "func ProcessDuplicates(file *File, flag bool) error {\n\tif flag {\n\t\terr := os.Remove(file.Path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"can't remove file: %s by this error: %v\",\n\t\t\t\tfile.Path, err)\n\t\t}\n\t\treturn nil\n\t}\n\tfmt.Printf(\"Duplicate: %s, with size %d byte(-s);\\n\",\n\t\tfile.Path,\n\t\tfile.Size)\n\treturn nil\n}", "func WithShares(shares *url.URL) Opt {\n\treturn func(opts *Options) {\n\t\topts.Shares = shares\n\t}\n}" ]
[ "0.5433435", "0.51779854", "0.5121265", "0.45189086", "0.44898877", "0.43579203", "0.43035212", "0.42886138", "0.4212644", "0.4183098", "0.4140767", "0.4131019", "0.40938774", "0.40888754", "0.40803897", "0.40694353", "0.40229222", "0.40060905", "0.4005224", "0.39877382", "0.39833277", "0.39755702", "0.3975145", "0.3966641", "0.39559793", "0.39407322", "0.39376116", "0.39349532", "0.39329475", "0.39174366", "0.390392", "0.39012468", "0.38975948", "0.38917676", "0.3886696", "0.38827488", "0.38791662", "0.38659146", "0.38654685", "0.38630858", "0.38547602", "0.38509023", "0.38452682", "0.38410434", "0.38353196", "0.38326362", "0.3821901", "0.38206184", "0.38186675", "0.3818266", "0.38182202", "0.38098136", "0.3806894", "0.38016403", "0.3799871", "0.3796002", "0.37925392", "0.37900123", "0.37887105", "0.37867868", "0.3780418", "0.37682056", "0.37662706", "0.37661535", "0.37596896", "0.37563112", "0.37508842", "0.37455806", "0.37425756", "0.37409097", "0.3727431", "0.37252766", "0.37230128", "0.37182", "0.37171265", "0.3712487", "0.370881", "0.3701506", "0.37001193", "0.36981037", "0.3696458", "0.36953008", "0.36948428", "0.36875415", "0.36875114", "0.3685966", "0.3684511", "0.36804813", "0.36782986", "0.3675728", "0.3674411", "0.36696813", "0.3669003", "0.36680636", "0.3665064", "0.3660059", "0.3657303", "0.36561692", "0.36529803", "0.3650922" ]
0.8344131
0
Hours returns the duration as a floating point number of hours.
func (d Duration) Hours() float64 { return time.Duration(d).Hours() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d duration) hours() float64 {\n\thour := d / hour\n\tnsec := d % hour\n\treturn float64(hour) + float64(nsec)*(1e-9/60/60)\n}", "func (d Duration) Hours() float64 {\n\thour := d / Hour\n\tusec := d % Hour\n\treturn float64(hour) + float64(usec)/(60*60*1e6)\n}", "func (f *Formatter) Hours() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%.2f\\n\"\n\t} else {\n\t\tformat = \"%.2f hours\\n\"\n\t}\n\treturn fmt.Sprintf(format, f.duration.Hours())\n}", "func hour(n int) float64 { return float64(n * 60 * 60) }", "func (i ISODuration) GetHours() int {\r\n\treturn i.duration.Hours\r\n}", "func (o TransferJobScheduleStartTimeOfDayOutput) Hours() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TransferJobScheduleStartTimeOfDay) int { return v.Hours }).(pulumi.IntOutput)\n}", "func getHours(time *int) int {\n\treturn getTimeScale(time, 3600)\n}", "func hoursToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Hour)\n}", "func (o *Gojwt) GetNumHoursDuration()(time.Duration){\n return o.numHoursDuration\n}", "func (c *Job) Hours() *Job {\n\tif c.delayUnit == delayNone {\n\t\tc.unit = hours\n\t} else {\n\t\tc.delayUnit = delayHours\n\t}\n\treturn c\n}", "func (o *PeriodModel) GetHours() int64 {\n\tif o == nil || o.Hours == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.Hours\n}", "func (o InstanceMaintenanceWindowStartTimeOutput) Hours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenanceWindowStartTime) *int { return v.Hours }).(pulumi.IntPtrOutput)\n}", "func (o TransferJobScheduleStartTimeOfDayPtrOutput) Hours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobScheduleStartTimeOfDay) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Hours\n\t}).(pulumi.IntPtrOutput)\n}", "func (o InstanceDenyMaintenancePeriodTimeOutput) Hours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceDenyMaintenancePeriodTime) *int { return v.Hours }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Hours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Hours }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Hours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Hours }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenanceWindowStartTimePtrOutput) Hours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceMaintenanceWindowStartTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Hours\n\t}).(pulumi.IntPtrOutput)\n}", "func (c Clock) Hour() int {\n\treturn int(time.Duration(c) / time.Hour)\n}", "func halfHoursToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * 30 * time.Minute)\n}", "func (o InstanceDenyMaintenancePeriodTimePtrOutput) Hours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceDenyMaintenancePeriodTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Hours\n\t}).(pulumi.IntPtrOutput)\n}", "func (t TimeOfDay) HalfHours() uint8 {\n\tn := time.Duration(t) / (30 * time.Minute)\n\tif n > 255 {\n\t\tlog.Panicf(\"time of day %v is too large\", t)\n\t}\n\treturn uint8(n)\n}", "func (t Time) Hour() int {}", "func (d *duration) setHours(h float64) {\n\t*d = duration(h) * hour\n}", "func (j *Job) Hours() (job *Job) {\n\tj.unit = JOB_UNIT_TYPE_HOUR\n\treturn j\n}", "func (p Project) HoursMinutes() (h int, m int) {\n\tDB.Preload(\"Entries\").Find(&p)\n\tvar total float64\n\tfor _, i := range p.Entries {\n\t\ttotal += i.TotalTime.Seconds()\n\t}\n\thours := int(total) / 3600\n\tf := float64((int(total) % 3600.0) / 60.0)\n\ti := float64(f) + float64(0.5)\n\tminutes := int(i)\n\treturn hours, minutes\n}", "func NsHours(count int64) int64 { return NsMinutes(count * 60) }", "func (i ISODuration) SetHours(hours int) {\r\n\ti.duration.Hours = hours\r\n}", "func normalizeHours(hrs int) int {\n\tflag := false\n\n\tif hrs < 0 {\n\t\tflag = !flag\n\t\thrs = -1 * hrs\n\t}\n\n\thrs = hrs % 24\n\n\tif flag && hrs > 0 {\n\t\thrs = 24 - hrs\n\t}\n\n\treturn hrs\n}", "func (s *ApprovalThresholdPolicy) SetProposalDurationInHours(v int64) *ApprovalThresholdPolicy {\n\ts.ProposalDurationInHours = &v\n\treturn s\n}", "func (o SchedulingPtrOutput) MaintenanceFreezeDurationHours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *Scheduling) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.MaintenanceFreezeDurationHours\n\t}).(pulumi.IntPtrOutput)\n}", "func (o KubernetesClusterMaintenanceWindowAllowedOutput) Hours() pulumi.IntArrayOutput {\n\treturn o.ApplyT(func(v KubernetesClusterMaintenanceWindowAllowed) []int { return v.Hours }).(pulumi.IntArrayOutput)\n}", "func (o SchedulingResponseOutput) MaintenanceFreezeDurationHours() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SchedulingResponse) int { return v.MaintenanceFreezeDurationHours }).(pulumi.IntOutput)\n}", "func (o SchedulingOutput) MaintenanceFreezeDurationHours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v Scheduling) *int { return v.MaintenanceFreezeDurationHours }).(pulumi.IntPtrOutput)\n}", "func (o *StoragePhysicalDisk) GetPowerOnHours() int64 {\n\tif o == nil || o.PowerOnHours == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.PowerOnHours\n}", "func (o *Gojwt) SetNumHoursDuration(hours time.Duration){\n o.numHoursDuration = hours\n}", "func (t HighresTimestamp) Duration() time.Duration {\n\treturn time.Duration(uint64(t) * uint64(tbinfo.numer) / uint64(tbinfo.denom)))\n}", "func convertEstimationToHours(jiraEstimation string) string {\n\t//Use regexp\n\ttotalHours := 0\n\n\t//Convert weeks to hours\n\tre := regexp.MustCompile(`(\\d*)w`)\n\tsubmatch := re.FindStringSubmatch(jiraEstimation)\n\n\tif submatch != nil {\n\t\tif weeks, err := strconv.Atoi(submatch[1]); err == nil {\n\t\t\ttotalHours = totalHours + weeks*40\n\t\t}\n\t}\n\n\t//Convert days to hours\n\tre = regexp.MustCompile(`(\\d*)d`)\n\tsubmatch = re.FindStringSubmatch(jiraEstimation)\n\n\tif submatch != nil {\n\t\tif days, err := strconv.Atoi(submatch[1]); err == nil {\n\t\t\ttotalHours = totalHours + days*8\n\t\t}\n\t}\n\n\t//Add hours to already calculated value\n\tre = regexp.MustCompile(`(\\d*)h`)\n\tsubmatch = re.FindStringSubmatch(jiraEstimation)\n\n\tif submatch != nil {\n\t\tif hours, err := strconv.Atoi(submatch[1]); err == nil {\n\t\t\ttotalHours = totalHours + hours\n\t\t}\n\t}\n\n\t//Add one hour, if some minutes are present(rounding to the up)\n\tre = regexp.MustCompile(`(\\d*)m`)\n\tsubmatch = re.FindStringSubmatch(jiraEstimation)\n\n\tif submatch != nil {\n\t\ttotalHours += 1\n\t}\n\n\treturn strconv.Itoa(totalHours)\n}", "func (dt *DateTime) Hour() *Number {\n\topChain := dt.chain.enter(\"Hour()\")\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn newNumber(opChain, float64(0))\n\t}\n\n\treturn newNumber(opChain, float64(dt.value.Hour()))\n}", "func (o *PeriodModel) GetHoursOk() (*int64, bool) {\n\tif o == nil || o.Hours == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Hours, true\n}", "func (o SchedulingResponsePtrOutput) MaintenanceFreezeDurationHours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *SchedulingResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.MaintenanceFreezeDurationHours\n\t}).(pulumi.IntPtrOutput)\n}", "func (t Time) Hour() int {\n\treturn time.Time(t).Hour()\n}", "func (o *PeriodModel) SetHours(v int64) {\n\to.Hours = &v\n}", "func (et ExfatTimestamp) Hour() int {\n\treturn int(et&63488) >> 11\n}", "func (o StreamOutput) RetentionPeriodHours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *Stream) pulumi.IntPtrOutput { return v.RetentionPeriodHours }).(pulumi.IntPtrOutput)\n}", "func (fs *FlowStats) Duration() float64 {\n\tendTime := fs.ReadTime(EndTime)\n\tif endTime.Equal(time.Time{}) {\n\t\tendTime = time.Now()\n\t}\n\tduration := endTime.Sub(fs.ReadTime(StartTime))\n\treturn float64(duration) / float64(time.Second)\n}", "func MeasureHoursSince(name string, field string, t time.Time) Measurement {\n\treturn NewMeasurement(name).AddHoursSince(field, t)\n}", "func DecadetoHour(decade float32) float32 {\n\treturn decade * 87600\n}", "func (tcr *TestCaseReporter) Duration() time.Duration {\n\tif tcr.startTime.IsZero() || tcr.endTime.IsZero() {\n\t\treturn 0\n\t}\n\n\treturn tcr.endTime.Sub(tcr.startTime)\n}", "func (o AllocationSpecificSKUAllocationReservedInstancePropertiesResponseOutput) MaintenanceFreezeDurationHours() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AllocationSpecificSKUAllocationReservedInstancePropertiesResponse) int {\n\t\treturn v.MaintenanceFreezeDurationHours\n\t}).(pulumi.IntOutput)\n}", "func (h *Header) Duration() time.Duration {\n\tsamples := h.Data.Size / uint32(h.Format.BlockSize())\n\tseconds := float64(samples) / float64(h.Format.SampleRate)\n\treturn time.Duration(seconds * float64(time.Second))\n}", "func (xt XSDTime) Hour() int {\n\treturn xt.innerTime.Hour()\n}", "func (s *HoursOfOperationTimeSlice) SetHours(v int64) *HoursOfOperationTimeSlice {\n\ts.Hours = &v\n\treturn s\n}", "func (dt DateTime) Hour() int {\n\treturn dt.Time().Hour()\n}", "func (t timeFlag) Duration() time.Duration {\n\treturn time.Duration(t)\n}", "func (o AllocationSpecificSKUAllocationReservedInstancePropertiesPtrOutput) MaintenanceFreezeDurationHours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *AllocationSpecificSKUAllocationReservedInstanceProperties) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.MaintenanceFreezeDurationHours\n\t}).(pulumi.IntPtrOutput)\n}", "func (gdb *Gdb) getTimeDuration(duration int) int64 {\n\treturn time.Now().Add(time.Duration(duration)*time.Second).Unix() + 8*3600\n}", "func days(d float64) time.Duration {\n\treturn time.Duration(24*d) * time.Hour // Close enough\n}", "func (dt DateTime) Hour() int {\n\treturn dt.src.Hour()\n}", "func (o *StoragePhysicalDisk) GetPowerOnHoursPercentage() int64 {\n\tif o == nil || o.PowerOnHoursPercentage == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.PowerOnHoursPercentage\n}", "func (tsr *TestSuiteReporter) Duration() time.Duration {\n\tif tsr.startTime.IsZero() || tsr.endTime.IsZero() {\n\t\treturn 0\n\t}\n\n\treturn tsr.endTime.Sub(tsr.startTime)\n}", "func (o AllocationSpecificSKUAllocationReservedInstancePropertiesResponsePtrOutput) MaintenanceFreezeDurationHours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *AllocationSpecificSKUAllocationReservedInstancePropertiesResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.MaintenanceFreezeDurationHours\n\t}).(pulumi.IntPtrOutput)\n}", "func HoursBetweenDates(stime, etime time.Time) float64 {\n\treturn stime.Sub(etime).Hours()\n}", "func (t ntpTimeShort) Duration() time.Duration {\n\tsec := uint64(t>>16) * nanoPerSec\n\tfrac := uint64(t&0xffff) * nanoPerSec\n\tnsec := frac >> 16\n\tif uint16(frac) >= 0x8000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func (d Dispatcher) ExecTTLHours(id string, hash string) (float64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetTTL().Hours(), nil\n}", "func (r *Reporter) Duration() time.Duration {\n\tif r.startTime.IsZero() || r.endTime.IsZero() {\n\t\treturn 0\n\t}\n\n\treturn r.endTime.Sub(r.startTime)\n}", "func (o AllocationSpecificSKUAllocationReservedInstancePropertiesOutput) MaintenanceFreezeDurationHours() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v AllocationSpecificSKUAllocationReservedInstanceProperties) *int {\n\t\treturn v.MaintenanceFreezeDurationHours\n\t}).(pulumi.IntPtrOutput)\n}", "func (d duration) Duration() time.Duration {\n\treturn time.Duration(d)\n}", "func getDuration(seconds int) time.Duration {\n\treturn time.Duration(seconds) * time.Second\n}", "func CountSegmentTargetHours(ects uint) uint {\n\tresponseUint := ects * ectsHours\n\treturn responseUint\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec >> 32\n\treturn time.Duration(sec + frac)\n}", "func (tod *ValidatedTimeOfDay) Hour() int {\n\treturn tod.hour\n}", "func (o ResourcePolicyHourlyCycleResponseOutput) HoursInCycle() pulumi.IntOutput {\n\treturn o.ApplyT(func(v ResourcePolicyHourlyCycleResponse) int { return v.HoursInCycle }).(pulumi.IntOutput)\n}", "func SecondtoHour(second float32) float32 {\n\treturn second * secondhour\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec\n\tnsec := frac >> 32\n\tif uint32(frac) >= 0x80000000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func (o GetSnapshotPolicyDailyScheduleOutput) Hour() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetSnapshotPolicyDailySchedule) int { return v.Hour }).(pulumi.IntOutput)\n}", "func WeektoHour(week float32) float32 {\n\treturn week * 168\n}", "func YeartoHour(year float32) float32 {\n\treturn year * 8760\n}", "func (timeout *Timeout) Duration() time.Duration {\n\treturn timeout.d\n}", "func (o GetSnapshotPolicyWeeklyScheduleOutput) Hour() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetSnapshotPolicyWeeklySchedule) int { return v.Hour }).(pulumi.IntOutput)\n}", "func (fn *formulaFuncs) HOUR(argsList *list.List) formulaArg {\n\tif argsList.Len() != 1 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"HOUR requires exactly 1 argument\")\n\t}\n\tdate := argsList.Front().Value.(formulaArg)\n\tnum := date.ToNumber()\n\tif num.Type != ArgNumber {\n\t\ttimeString := strings.ToLower(date.Value())\n\t\tif !isTimeOnlyFmt(timeString) {\n\t\t\t_, _, _, _, err := strToDate(timeString)\n\t\t\tif err.Type == ArgError {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\th, _, _, pm, _, err := strToTime(timeString)\n\t\tif err.Type == ArgError {\n\t\t\treturn err\n\t\t}\n\t\tif pm {\n\t\t\th += 12\n\t\t}\n\t\treturn newNumberFormulaArg(float64(h))\n\t}\n\tif num.Number < 0 {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, \"HOUR only accepts positive argument\")\n\t}\n\treturn newNumberFormulaArg(float64(timeFromExcelTime(num.Number, false).Hour()))\n}", "func (o ResourcePolicyHourlyCycleOutput) HoursInCycle() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ResourcePolicyHourlyCycle) *int { return v.HoursInCycle }).(pulumi.IntPtrOutput)\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func (v Value) Duration() uint64 {\n\tstart := big.NewInt(v.StartSeconds)\n\tend := big.NewInt(v.EndSeconds)\n\n\tduration := (&big.Int{}).Sub(end, start)\n\n\treturn duration.Uint64()\n}", "func (r RecordTTL) Duration() time.Duration {\n\treturn (time.Second * time.Duration(int(r)))\n}", "func (ac *ActivityCreate) SetHours(i int) *ActivityCreate {\n\tac.mutation.SetHours(i)\n\treturn ac\n}", "func (zap ChZap) Duration(provided *ChZap) time.Duration { //added *!\n\t//TODO write this method (1p)\n//\t\tmy_duration := zap.Time //- provided.Time\n\t//\tfmt.Println(my_duration)\n\tduration := zap.Time.Sub(provided.Time)\n\tif duration < 0 {\n\t\treturn provided.Time.Sub(zap.Time)\n\t}\n\treturn duration\n\n}", "func (o SnapshotPolicyDailyScheduleOutput) Hour() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SnapshotPolicyDailySchedule) int { return v.Hour }).(pulumi.IntOutput)\n}", "func (dt *DateTime) GetHour() *Number {\n\treturn dt.Hour()\n}", "func (c Certificate) Duration() time.Duration {\n\treturn c.ExpiresAt.Sub(c.IssuedAt)\n}", "func GetHours(conn io.ReadWriter) (hours Hours, err error) {\n\n\tresp, err := getQuery(getHours, conn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn *resp.(*Hours), err\n}", "func (o SnapshotPolicyWeeklyScheduleOutput) Hour() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SnapshotPolicyWeeklySchedule) int { return v.Hour }).(pulumi.IntOutput)\n}", "func (t *task) Duration() time.Duration {\n\treturn t.duration\n}", "func (p *PunchCard) GetHour() int {\n\tif p == nil || p.Hour == nil {\n\t\treturn 0\n\t}\n\treturn *p.Hour\n}", "func Hhmmss(secs int64) string {\n\tdd := secs / 86400\n\tsecs -= dd * 86400\n\thr := secs / 3600\n\tsecs -= hr * 3600\n\tmin := secs / 60\n\tsecs -= min * 60\n\n\tif dd > 0 {\n\t\treturn fmt.Sprintf(\"%dd%0dh%02dm%02ds\", dd, hr, min, secs)\n\t}\n\tif hr > 0 {\n\t\treturn fmt.Sprintf(\"%dh%02dm%02ds\", hr, min, secs)\n\t}\n\tif min > 0 {\n\t\treturn fmt.Sprintf(\"%dm%02ds\", min, secs)\n\t}\n\treturn fmt.Sprintf(\"%ds\", secs)\n}", "func (s *KinesisVideoStreamConfig) SetRetentionPeriodHours(v int64) *KinesisVideoStreamConfig {\n\ts.RetentionPeriodHours = &v\n\treturn s\n}", "func CacheExpireHours() int {\n\tif DefaultMgr() != nil {\n\t\treturn DefaultMgr().Get(backgroundCtx, common.CacheExpireHours).GetInt()\n\t}\n\t// backoff read from env.\n\thours, err := strconv.Atoi(os.Getenv(\"CACHE_EXPIRE_HOURS\"))\n\tif err != nil {\n\t\t// use default if parse error.\n\t\thours = common.DefaultCacheExpireHours\n\t}\n\n\treturn hours\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (ti *TimeInterval) Duration() time.Duration {\n\treturn ti.End.Sub(ti.Start)\n}", "func getEstimate(length float64) string {\n\t// Convert length to hours\n\thours := math.Floor(length / 3600)\n\t// Convert length to minutes\n\tminutes := (int(length) % 3600) / 60\n\n\t// Convert to strings and add leading zeros\n\tvar strMinutes, strHours string\n\tif hours < 10 {\n\t\tstrHours = \"0\" + strconv.FormatFloat(hours, 'f', -1, 64)\n\t} else {\n\t\tstrHours = strconv.FormatFloat(hours, 'f', -1, 64)\n\t}\n\n\tif minutes < 10 {\n\t\tstrMinutes = \"0\" + strconv.Itoa(minutes)\n\t} else {\n\t\tstrMinutes = strconv.Itoa(minutes)\n\t}\n\n\t// Return string formated estimate\n\treturn strHours + \":\" + strMinutes\n}", "func (d *Duration) GetTimeDuration() time.Duration {\n\treturn time.Duration(d.Year * yToNano + d.Month * monthToNano +\n\t\td.Day * dToNano + d.Hour * hToNano + d.Minute * mToNano + \n\t\td.Second * sToNano)\n}" ]
[ "0.8240469", "0.7533591", "0.7021137", "0.6808942", "0.6702991", "0.64857084", "0.6369934", "0.6315462", "0.62868947", "0.6244098", "0.6225003", "0.61721426", "0.60986143", "0.6051746", "0.60097885", "0.60097885", "0.5976994", "0.59059703", "0.58845806", "0.5852544", "0.5846431", "0.58234674", "0.57949054", "0.5755549", "0.56976444", "0.567004", "0.56015956", "0.5525525", "0.5482933", "0.5471766", "0.54655117", "0.54533446", "0.54107374", "0.5389831", "0.53860986", "0.53356636", "0.53116554", "0.53014755", "0.52990663", "0.5297033", "0.52792007", "0.526402", "0.52386755", "0.52186966", "0.5205518", "0.51825404", "0.5173663", "0.5145758", "0.51418287", "0.51379734", "0.51216584", "0.5100347", "0.5073954", "0.5071503", "0.5066677", "0.5058367", "0.5047536", "0.5037494", "0.5020621", "0.5004238", "0.4997653", "0.49880812", "0.4982662", "0.49800646", "0.49780473", "0.49495494", "0.49492887", "0.4933437", "0.49288136", "0.49150175", "0.4911526", "0.49087363", "0.49003685", "0.48834136", "0.48803547", "0.48690644", "0.48661432", "0.4859701", "0.4836178", "0.48205832", "0.48197168", "0.48151755", "0.48140055", "0.48079187", "0.48053822", "0.47939518", "0.47933486", "0.47893795", "0.47889912", "0.47880962", "0.4786664", "0.47825518", "0.47789362", "0.47769168", "0.47675756", "0.4766593", "0.47546473", "0.4739335", "0.47332105", "0.47316313" ]
0.73899436
2
Minutes returns the duration as a floating point number of minutes.
func (d Duration) Minutes() float64 { return time.Duration(d).Minutes() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *Formatter) Minutes() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d minutes\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Minutes()))\n}", "func (d Duration) Minutes() float64 {\n\tmin := d / Minute\n\tusec := d % Minute\n\treturn float64(min) + float64(usec)/(60*1e6)\n}", "func GetDurationInMillseconds(start time.Time) float64 {\n\tend := time.Now()\n\tduration := end.Sub(start)\n\tmilliseconds := float64(duration) / float64(time.Millisecond)\n\trounded := float64(int(milliseconds*100+.5)) / 100\n\treturn rounded\n}", "func GetDurationInMillseconds(start time.Time) float64 {\n\tend := time.Now()\n\tduration := end.Sub(start)\n\tmilliseconds := float64(duration) / float64(time.Millisecond)\n\trounded := float64(int(milliseconds*100+.5)) / 100\n\treturn rounded\n}", "func (p *pvc) minutes() float64 {\n\tif p == nil {\n\t\treturn 0.0\n\t}\n\n\treturn p.End.Sub(p.Start).Minutes()\n}", "func (i ISODuration) GetMinutes() int {\r\n\treturn i.duration.Minutes\r\n}", "func (c *Job) Minutes() *Job {\n\tif c.delayUnit == delayNone {\n\t\tc.unit = minutes\n\t} else {\n\t\tc.delayUnit = delayMinutes\n\t}\n\treturn c\n}", "func getMinutes(time *int) int {\n\treturn getTimeScale(time, 60)\n}", "func minutesToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Minute)\n}", "func (o BucketReplicationConfigurationRuleDestinationReplicationTimeOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigurationRuleDestinationReplicationTime) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (j *Job) Minutes() (job *Job) {\n\tj.unit = JOB_UNIT_TYPE_MINUTE\n\treturn j\n}", "func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}", "func (o BucketV2ReplicationConfigurationRuleDestinationReplicationTimeOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BucketV2ReplicationConfigurationRuleDestinationReplicationTime) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (o BucketReplicationConfigRuleDestinationReplicationTimeTimeOutput) Minutes() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigRuleDestinationReplicationTimeTime) int { return v.Minutes }).(pulumi.IntOutput)\n}", "func roundDuration(d time.Duration) time.Duration {\n\trd := time.Duration(d.Minutes()) * time.Minute\n\tif rd < d {\n\t\trd += time.Minute\n\t}\n\treturn rd\n}", "func NsMinutes(count int64) int64 { return NsSeconds(count * 60) }", "func DaytoMinutes(day float32) float32 {\n\treturn day * 1440\n}", "func (s *GetChannelScheduleInput) SetDurationMinutes(v string) *GetChannelScheduleInput {\n\ts.DurationMinutes = &v\n\treturn s\n}", "func (o TransferJobScheduleStartTimeOfDayOutput) Minutes() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TransferJobScheduleStartTimeOfDay) int { return v.Minutes }).(pulumi.IntOutput)\n}", "func (o BucketReplicationConfigurationRuleDestinationReplicationTimePtrOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BucketReplicationConfigurationRuleDestinationReplicationTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Minutes\n\t}).(pulumi.IntPtrOutput)\n}", "func durToMsec(dur time.Duration) string {\n\treturn fmt.Sprintf(\"%dms\", dur/time.Millisecond)\n}", "func (o InstanceDenyMaintenancePeriodTimeOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceDenyMaintenancePeriodTime) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (d Dispatcher) ExecDurationMinutes(id string, hash string) (float64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetDuration().Minutes(), nil\n}", "func (fn *formulaFuncs) MDURATION(argsList *list.List) formulaArg {\n\targs := fn.prepareDurationArgs(\"MDURATION\", argsList)\n\tif args.Type != ArgList {\n\t\treturn args\n\t}\n\tduration := fn.duration(args.List[0], args.List[1], args.List[2], args.List[3], args.List[4], args.List[5])\n\tif duration.Type != ArgNumber {\n\t\treturn duration\n\t}\n\treturn newNumberFormulaArg(duration.Number / (1 + args.List[3].Number/args.List[4].Number))\n}", "func (o *ViewTimelogTotals) GetMinutes() int32 {\n\tif o == nil || o.Minutes == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Minutes\n}", "func (o BucketV2ReplicationConfigurationRuleDestinationMetricOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BucketV2ReplicationConfigurationRuleDestinationMetric) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (o BucketReplicationConfigRuleDestinationMetricsEventThresholdOutput) Minutes() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigRuleDestinationMetricsEventThreshold) int { return v.Minutes }).(pulumi.IntOutput)\n}", "func (o BucketReplicationConfigRuleDestinationReplicationTimeTimePtrOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BucketReplicationConfigRuleDestinationReplicationTimeTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Minutes\n\t}).(pulumi.IntPtrOutput)\n}", "func (o BucketReplicationConfigurationRuleDestinationMetricsOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigurationRuleDestinationMetrics) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenanceWindowStartTimeOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenanceWindowStartTime) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}", "func (o TransferJobScheduleStartTimeOfDayPtrOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobScheduleStartTimeOfDay) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Minutes\n\t}).(pulumi.IntPtrOutput)\n}", "func (cvr Converter) SecondsToMinutes(s Seconds) Minutes {\n\treturn Minutes(s / 60)\n}", "func (cvr Converter) SecondsToMinutes(s Seconds) Minutes {\n\treturn Minutes(s / 60)\n}", "func (o BucketReplicationConfigurationRuleDestinationMetricsPtrOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BucketReplicationConfigurationRuleDestinationMetrics) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Minutes\n\t}).(pulumi.IntPtrOutput)\n}", "func (i ISODuration) SetMinutes(minutes int) {\r\n\ti.duration.Minutes = minutes\r\n}", "func (o BucketReplicationConfigRuleDestinationMetricsEventThresholdPtrOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BucketReplicationConfigRuleDestinationMetricsEventThreshold) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Minutes\n\t}).(pulumi.IntPtrOutput)\n}", "func (o InstanceDenyMaintenancePeriodTimePtrOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceDenyMaintenancePeriodTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Minutes\n\t}).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenanceWindowStartTimePtrOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceMaintenanceWindowStartTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Minutes\n\t}).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Minutes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Minutes }).(pulumi.IntPtrOutput)\n}", "func (p Project) HoursMinutes() (h int, m int) {\n\tDB.Preload(\"Entries\").Find(&p)\n\tvar total float64\n\tfor _, i := range p.Entries {\n\t\ttotal += i.TotalTime.Seconds()\n\t}\n\thours := int(total) / 3600\n\tf := float64((int(total) % 3600.0) / 60.0)\n\ti := float64(f) + float64(0.5)\n\tminutes := int(i)\n\treturn hours, minutes\n}", "func WeektoMinutes(week float32) float32 {\n\treturn week * 10080\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func (q MetricTicks) Duration(tempoBPM uint32, deltaTicks uint32) time.Duration {\n\tif q == 0 {\n\t\tq = defaultMetric\n\t}\n\t// (60000 / T) * (d / R) = D[ms]\n\t//\tdurQnMilli := 60000 / float64(tempoBPM)\n\t//\t_4thticks := float64(deltaTicks) / float64(uint16(q))\n\tres := 60000000000 * float64(deltaTicks) / (float64(tempoBPM) * float64(uint16(q)))\n\t//fmt.Printf(\"what: %vns\\n\", res)\n\treturn time.Duration(int64(math.Round(res)))\n\t//\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}", "func (dt *DateTime) GetMinute() *Number {\n\treturn dt.Minute()\n}", "func (v Value) Duration() uint64 {\n\tstart := big.NewInt(v.StartSeconds)\n\tend := big.NewInt(v.EndSeconds)\n\n\tduration := (&big.Int{}).Sub(end, start)\n\n\treturn duration.Uint64()\n}", "func (s *HoursOfOperationTimeSlice) SetMinutes(v int64) *HoursOfOperationTimeSlice {\n\ts.Minutes = &v\n\treturn s\n}", "func (c *Job) MilliSeconds() *Job {\n\tif c.delayUnit == delayNone {\n\t\tc.unit = milliseconds\n\t} else {\n\t\tc.delayUnit = delayMilliseconds\n\t}\n\treturn c\n}", "func (r Rest) Duration(measure time.Duration) time.Duration {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn time.Duration(float64(measure) * fraq)\n}", "func (dt *DateTime) Minute() *Number {\n\topChain := dt.chain.enter(\"Minute()\")\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn newNumber(opChain, float64(0))\n\t}\n\n\treturn newNumber(opChain, float64(dt.value.Minute()))\n}", "func (o *ViewTimelogTotals) SetMinutes(v int32) {\n\to.Minutes = &v\n}", "func (t NormalizedDuration) Duration(m time.Duration) time.Duration {\n\treturn time.Duration(float64(m.Nanoseconds()) * float64(t))\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func mdiff(start time.Time) float64 {\n\treturn float64(time.Now().Sub(start) / time.Microsecond)\n}", "func HourtoMinutes(hour float32) float32 {\n\treturn hour * 60\n}", "func (o QuotaLimitOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v QuotaLimit) *string { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func (fs *FlowStats) Duration() float64 {\n\tendTime := fs.ReadTime(EndTime)\n\tif endTime.Equal(time.Time{}) {\n\t\tendTime = time.Now()\n\t}\n\tduration := endTime.Sub(fs.ReadTime(StartTime))\n\treturn float64(duration) / float64(time.Second)\n}", "func (i Interval) Duration() time.Duration {\n\tswitch i {\n\tcase Interval1m:\n\t\treturn time.Minute\n\tcase Interval3m:\n\t\treturn time.Minute * 3\n\tcase Interval5m:\n\t\treturn time.Minute * 5\n\tcase Interval15m:\n\t\treturn time.Minute * 15\n\tcase Interval30m:\n\t\treturn time.Minute * 30\n\tcase Interval1h:\n\t\treturn time.Hour\n\tcase Interval2h:\n\t\treturn time.Hour * 2\n\tcase Interval4h:\n\t\treturn time.Hour * 4\n\tcase Interval6h:\n\t\treturn time.Hour * 6\n\tcase Interval8h:\n\t\treturn time.Hour * 8\n\tcase Interval12h:\n\t\treturn time.Hour * 12\n\tcase Interval1d:\n\t\treturn time.Hour * 24\n\tcase Interval3d:\n\t\treturn time.Hour * 24 * 3\n\tcase Interval1w:\n\t\treturn time.Hour * 24 * 7\n\tcase Interval1M:\n\t\treturn time.Hour * 24 * 30\n\t}\n\n\treturn time.Duration(0)\n}", "func DurationValue(s string, step int64) (int64, error) {\n\tif len(s) == 0 {\n\t\treturn 0, fmt.Errorf(\"duration cannot be empty\")\n\t}\n\tlastChar := s[len(s)-1]\n\tif lastChar >= '0' && lastChar <= '9' || lastChar == '.' {\n\t\t// Try parsing floating-point duration\n\t\td, err := strconv.ParseFloat(s, 64)\n\t\tif err == nil {\n\t\t\t// Convert the duration to milliseconds.\n\t\t\treturn int64(d * 1000), nil\n\t\t}\n\t}\n\tisMinus := false\n\td := float64(0)\n\tfor len(s) > 0 {\n\t\tn := scanSingleDuration(s, true)\n\t\tif n <= 0 {\n\t\t\treturn 0, fmt.Errorf(\"cannot parse duration %q\", s)\n\t\t}\n\t\tds := s[:n]\n\t\ts = s[n:]\n\t\tdLocal, err := parseSingleDuration(ds, step)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif isMinus && dLocal > 0 {\n\t\t\tdLocal = -dLocal\n\t\t}\n\t\td += dLocal\n\t\tif dLocal < 0 {\n\t\t\tisMinus = true\n\t\t}\n\t}\n\tif math.Abs(d) > 1<<63-1 {\n\t\treturn 0, fmt.Errorf(\"too big duration %.0fms\", d)\n\t}\n\treturn int64(d), nil\n}", "func (q *Quantity) MilliValue() int64 {\n\treturn q.ScaledValue(Milli)\n}", "func MeasureMinutesSince(name string, field string, t time.Time) Measurement {\n\treturn NewMeasurement(name).AddMinutesSince(field, t)\n}", "func (v Volume) Millilitres() float64 {\n\treturn float64(v / Millilitre)\n}", "func (d *Duration) Duration() time.Duration {\n\tif d == nil {\n\t\treturn 0\n\t}\n\treturn (time.Duration(d.Seconds) * time.Second) + (time.Duration(d.Nanos) * time.Nanosecond)\n}", "func milliseconds(ms int64) time.Duration {\n\treturn time.Duration(ms * 1000 * 1000)\n}", "func (d duration) Duration() time.Duration {\n\treturn time.Duration(d)\n}", "func (r *Rhythm) Duration() float64 {\n\treturn r.duration\n}", "func (p *parser) duration() Node {\n\ttoken := p.expect(TokenDuration)\n\tnum, err := newDur(token.pos, token.val)\n\tif err != nil {\n\t\tp.error(err)\n\t}\n\treturn num\n}", "func (r *Reporter) Duration() time.Duration {\n\tif r.startTime.IsZero() || r.endTime.IsZero() {\n\t\treturn 0\n\t}\n\n\treturn r.endTime.Sub(r.startTime)\n}", "func (d Duration) TimeDuration() time.Duration {\n\treturn time.Duration(int64(d) / Millisecond * int64(time.Millisecond))\n}", "func (l *Loan) DurationInMonths() int {\n\treturn l.length\n}", "func (o *InlineResponse20051TodoItems) GetEstimatedMinutes() string {\n\tif o == nil || o.EstimatedMinutes == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.EstimatedMinutes\n}", "func (s *ParticipantTimerValue) SetParticipantTimerDurationInMinutes(v int64) *ParticipantTimerValue {\n\ts.ParticipantTimerDurationInMinutes = &v\n\treturn s\n}", "func ToMillis(t time.Time) int64 {\n\treturn t.UnixNano() / 1e6\n}", "func (t Time) Minute() int {\n\treturn time.Time(t).Minute()\n}", "func (t HighresTimestamp) Duration() time.Duration {\n\treturn time.Duration(uint64(t) * uint64(tbinfo.numer) / uint64(tbinfo.denom)))\n}", "func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\treturn dateWrapper(vals, enh, func(t time.Time) float64 {\n\t\treturn float64(t.Minute())\n\t})\n}", "func (fn *formulaFuncs) MINUTE(argsList *list.List) formulaArg {\n\tif argsList.Len() != 1 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"MINUTE requires exactly 1 argument\")\n\t}\n\tdate := argsList.Front().Value.(formulaArg)\n\tnum := date.ToNumber()\n\tif num.Type != ArgNumber {\n\t\ttimeString := strings.ToLower(date.Value())\n\t\tif !isTimeOnlyFmt(timeString) {\n\t\t\t_, _, _, _, err := strToDate(timeString)\n\t\t\tif err.Type == ArgError {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t_, m, _, _, _, err := strToTime(timeString)\n\t\tif err.Type == ArgError {\n\t\t\treturn err\n\t\t}\n\t\treturn newNumberFormulaArg(float64(m))\n\t}\n\tif num.Number < 0 {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, \"MINUTE only accepts positive argument\")\n\t}\n\treturn newNumberFormulaArg(float64(timeFromExcelTime(num.Number, false).Minute()))\n}", "func (dt DateTime) SpanMinute() (DateTime, DateTime) {\n\treturn dt.FloorMinute(), dt.CeilMinute()\n}", "func MTime(file string) (int64, error) {\n\tf, err := os.Stat(file)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn f.ModTime().Unix(), nil\n}", "func MinutesInRadians(t time.Time) float64 {\n\treturn SecondsInRadians(t)/secondsInClock + XInRadians(t.Minute(), minutesInHalfClock)\n}", "func toMilliseconds(duration time.Duration) float64 {\n\tif duration < time.Microsecond*10 {\n\t\treturn 0\n\t}\n\n\tms := float64(duration) / float64(time.Millisecond)\n\t// Round time to 0.02 precision\n\treturn math.Round(ms*100) / 100\n}", "func normalizeMinutes(min int) (int, int) {\n\tflag := false\n\taddHours := 0\n\tif min < 0 {\n\t\tflag = !flag\n\t\tmin = -1 * min\n\t}\n\n\tif min >= 60 {\n\t\taddHours = min / 60\n\t\tmin = min % 60\n\t}\n\n\tif flag {\n\t\tif min > 0 {\n\t\t\tmin = 60 - min\n\t\t\taddHours += 1\n\t\t}\n\t\taddHours = -1 * addHours\n\t}\n\n\treturn min, addHours\n}", "func computeDelayMinutesLinear(d time.Duration, rc int) time.Duration {\n\tret := time.Duration((float64(1.6) * float64(rc)) * float64(d))\n\tif ret >= maxMinutes {\n\t\tret = maxMinutes\n\t}\n\treturn ret\n}", "func (d Duration) Round(m Duration) Duration {\n\tt := time.Duration(d * 1000).Round(time.Duration(m * 1000))\n\treturn Duration(t / 1000)\n}", "func durationInSeconds(d time.Duration) int64 {\n\t// converting a floating-point number to an integer discards\n\t// the fraction (truncation towards zero)\n\treturn int64(d.Seconds())\n}", "func (q MetricTicks) FractionalDuration(fractionalBPM float64, deltaTicks uint32) time.Duration {\n\tif q == 0 {\n\t\tq = defaultMetric\n\t}\n\t// (60000 / T) * (d / R) = D[ms]\n\t//\tdurQnMilli := 60000 / float64(tempoBPM)\n\t//\t_4thticks := float64(deltaTicks) / float64(uint16(q))\n\tres := 60000000000 * float64(deltaTicks) / (fractionalBPM * float64(uint16(q)))\n\t//fmt.Printf(\"what: %vns\\n\", res)\n\treturn time.Duration(int64(math.Round(res)))\n\t//\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}", "func to_ms(nano int64) int64 {\n\treturn nano / int64(time.Millisecond)\n}", "func (s *StartChatContactInput) SetChatDurationInMinutes(v int64) *StartChatContactInput {\n\ts.ChatDurationInMinutes = &v\n\treturn s\n}", "func MinutesToSeconds(minutes int) int {\n\treturn minutes * 60\n}", "func (sw *Stopwatch) ElapsedMillis() float64 {\n\treturn float64(time.Now().Sub(sw.Time).Nanoseconds()) / float64(time.Millisecond.Nanoseconds())\n}", "func (p Profile) Duration() time.Duration {\n\treturn p.Finish.Sub(p.Start)\n}", "func (d Duration) Seconds() float64 {\n\treturn time.Duration(d).Seconds()\n}", "func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}", "func SetDurationMinutes(co *ConfigOption) error {\n\t*(co.ConfigKey.(*time.Duration)) = time.Duration(viper.GetInt(co.Name)) * time.Minute\n\treturn nil\n}", "func getDuration(seconds int) time.Duration {\n\treturn time.Duration(seconds) * time.Second\n}", "func secToMins(t ClockTime) (mins, secs uint) {\n\tmins = uint(t) / 60\n\tsecs = uint(t) % 60\n\n\treturn mins, secs\n}", "func (t Time) Minute() int {}", "func (dt DateTime) Minute() int {\n\treturn dt.Time().Minute()\n}", "func (i Interval) Duration() time.Duration {\n\treturn time.Duration(i)\n}" ]
[ "0.67800033", "0.67238533", "0.671037", "0.671037", "0.6626685", "0.64240044", "0.6328356", "0.61948246", "0.6168142", "0.6068626", "0.6063561", "0.6036192", "0.6029605", "0.60184914", "0.5966828", "0.58673483", "0.5823611", "0.581647", "0.5814458", "0.5779258", "0.57744646", "0.5768834", "0.57192487", "0.5716791", "0.57070494", "0.5689318", "0.5681536", "0.5668813", "0.5634188", "0.55873287", "0.5544299", "0.5533592", "0.5509488", "0.5509488", "0.5484791", "0.54834735", "0.53963816", "0.5359409", "0.53422034", "0.53373057", "0.53373057", "0.53362286", "0.5314515", "0.52955735", "0.52883875", "0.5240633", "0.5239759", "0.52312565", "0.52105576", "0.5174284", "0.51589906", "0.51579213", "0.51490504", "0.51487887", "0.5143806", "0.5133488", "0.512793", "0.5120589", "0.5120201", "0.5117849", "0.51111853", "0.51004136", "0.5075491", "0.5060814", "0.5054956", "0.5039422", "0.503514", "0.50169283", "0.5007333", "0.5001277", "0.49994206", "0.49919614", "0.49898", "0.49710742", "0.49572685", "0.49552616", "0.49545038", "0.4947115", "0.49447787", "0.49331617", "0.49177608", "0.48987144", "0.48940718", "0.48879296", "0.4886014", "0.48768294", "0.48639703", "0.48615518", "0.4858483", "0.4855955", "0.48513672", "0.48507532", "0.48494172", "0.48400524", "0.48378754", "0.48323533", "0.48287478", "0.48277658", "0.48227596", "0.4808905" ]
0.6980067
0
Nanoseconds returns the duration as an integer nanosecond count.
func (d Duration) Nanoseconds() int64 { return time.Duration(d).Nanoseconds() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s Stopwatch) Nanoseconds() int64 {\n\treturn s.acc.Nanoseconds()\n}", "func (ft *filetime) Nanoseconds() int64 {\n\t// 100-nanosecond intervals since January 1, 1601\n\tnsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)\n\t// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)\n\tnsec -= 116444736000000000\n\t// convert into nanoseconds\n\tnsec *= 100\n\treturn nsec\n}", "func (f *Formatter) Nanoseconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d nanoseconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, f.duration.Nanoseconds())\n}", "func (t Time) Nanosecond() int {}", "func nanotime() int64", "func nanotime() int64", "func nanotime() int64", "func nanotime() int64", "func (dt DateTime) Nanosecond() int {\n\treturn dt.Time().Nanosecond()\n}", "func TimeUnitNano(unit string) int64 {\n\tswitch unit {\n\tcase TimeUnitSeconds:\n\t\treturn int64(time.Second)\n\tcase TimeUnitMilliseconds:\n\t\treturn int64(time.Millisecond)\n\tcase TimeUnitMicroseconds:\n\t\treturn int64(time.Microsecond)\n\tdefault:\n\t\treturn int64(time.Nanosecond)\n\t}\n}", "func (dt DateTime) Nanosecond() int {\n\treturn dt.src.Nanosecond()\n}", "func (xt XSDTime) Nanosecond() int {\n\treturn xt.innerTime.Nanosecond()\n}", "func TimevalToNsec(tv Timeval) int64 { return tv.Nano() }", "func NsMicroseconds(count int64) int64 { return count * 1e3 }", "func run_timeNano() int64", "func (t Time) Microseconds() int64 {\n\treturn time.Time(t).UnixNano() / DivideMicroseconds\n}", "func to_ms(nano int64) int64 {\n\treturn nano / int64(time.Millisecond)\n}", "func NanoTime() int64", "func (d Duration) Microseconds() int64 {\n\treturn int64(d)\n}", "func TimespecToNsec(ts Timespec) int64 { return ts.Nano() }", "func (dt *DateTime) GetNanosecond() *Number {\n\treturn dt.Nanosecond()\n}", "func (ts Timespec) ToNsec() int64 {\n\treturn int64(ts.Sec)*1e9 + int64(ts.Nsec)\n}", "func (dt *DateTime) Nanosecond() *Number {\n\topChain := dt.chain.enter(\"Nanosecond()\")\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn newNumber(opChain, float64(0))\n\t}\n\n\treturn newNumber(opChain, float64(dt.value.Nanosecond()))\n}", "func tickspersecond() int64", "func NsSeconds(count int64) int64 { return NsMilliseconds(count * 1e3) }", "func durationInSeconds(d time.Duration) int64 {\n\t// converting a floating-point number to an integer discards\n\t// the fraction (truncation towards zero)\n\treturn int64(d.Seconds())\n}", "func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}", "func Nanotime() int64 {\n\treturn nanotime()\n}", "func Nanotime() int64 {\n\treturn nanotime()\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec\n\tnsec := frac >> 32\n\tif uint32(frac) >= 0x80000000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func NsMilliseconds(count int64) int64 { return NsMicroseconds(count * 1e3) }", "func (sxts StatxTimestamp) ToNsec() int64 {\n\treturn int64(sxts.Sec)*1e9 + int64(sxts.Nsec)\n}", "func humanToNanoTime(value []byte) ([]byte) {\n\tdura, err := time.ParseDuration(string(value))\n\tif err != nil {\n\t\treturn value\n\t}\n\treturn []byte(strconv.FormatInt(dura.Nanoseconds(), 10))\n}", "func (o DurationOutput) Nanos() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v Duration) *int { return v.Nanos }).(pulumi.IntPtrOutput)\n}", "func durationToMilliseconds(d time.Duration) (uint64, error) {\n\tif d < 0 {\n\t\treturn 0, fmt.Errorf(\"report period cannot be negative: %v\", d)\n\t}\n\n\treturn uint64(d / time.Millisecond), nil\n}", "func currentTimeMillis() int64 {\n\tresult := time.Nanoseconds()\n\treturn result / 1e6\n}", "func micros(d time.Duration) int {\n\treturn int(d.Seconds() * 1000000)\n}", "func Nanosec() int64 {\n\treturn internal.Syscall0r64(NANOSEC)\n}", "func MeasureNanosecondsSince(name string, field string, t time.Time) Measurement {\n\treturn NewMeasurement(name).AddNanosecondsSince(field, t)\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec >> 32\n\treturn time.Duration(sec + frac)\n}", "func eps(n int, d time.Duration) float64 {\n\treturn float64(n) / d.Seconds()\n}", "func (t Time) Milliseconds() int64 {\n\treturn time.Time(t).UnixNano() / DivideMilliseconds\n}", "func ToUsec(t time.Time) int64 {\n\treturn t.UnixNano() / 1e3\n}", "func CurrentNanosecond() int64 {\n\treturn CurrentMicrosecond() * 1e3\n}", "func ToMillis(t time.Time) int64 {\n\treturn t.UnixNano() / 1e6\n}", "func toMilliseconds(duration time.Duration) float64 {\n\tif duration < time.Microsecond*10 {\n\t\treturn 0\n\t}\n\n\tms := float64(duration) / float64(time.Millisecond)\n\t// Round time to 0.02 precision\n\treturn math.Round(ms*100) / 100\n}", "func (o DurationResponseOutput) Nanos() pulumi.IntOutput {\n\treturn o.ApplyT(func(v DurationResponse) int { return v.Nanos }).(pulumi.IntOutput)\n}", "func (t ntpTimeShort) Duration() time.Duration {\n\tsec := uint64(t>>16) * nanoPerSec\n\tfrac := uint64(t&0xffff) * nanoPerSec\n\tnsec := frac >> 16\n\tif uint16(frac) >= 0x8000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func (o DurationPtrOutput) Nanos() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *Duration) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Nanos\n\t}).(pulumi.IntPtrOutput)\n}", "func NsMinutes(count int64) int64 { return NsSeconds(count * 60) }", "func (dt DateTime) ShiftNanoseconds(nanosecond int) DateTime {\n\tduration, _ := time.ParseDuration(fmt.Sprintf(\"%dns\", nanosecond))\n\treturn DateTime(dt.Time().Add(duration))\n}", "func ToMilliSec(date time.Time) int64 {\n\treturn date.UnixNano() / 1000000\n}", "func DurationInMilliseconds(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.0fms\", d.Seconds()*1e3)\n}", "func Snotime() uint64 {\n\t// Note: Division is left here instead of being impl in asm since the compiler optimizes this\n\t// into mul+shift, which is easier to read when left in as simple division.\n\t// This doesn't affect performance. The asm won't get inlined anyway while this function\n\t// will.\n\t//\n\t// 4e4 instead of TimeUnit (4e6) because the time we get from the OS is in units of 100ns.\n\treturn ostime() / 4e4\n}", "func GetMonoTime() int64 {\n\tsec, nsec := getRawMonoTime()\n\n\t// to milliseconds\n\treturn sec * 1000 + (nsec / (1 * 1000 * 1000))\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (d Dispatcher) ExecDurationNanoseconds(id string, hash string) (int64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetDuration().Nanoseconds(), nil\n}", "func Time(t time.Time) int64 {\n\treturn t.UnixNano() / 1000000\n}", "func (tv Timeval) ToNsecCapped() int64 {\n\tif tv.Sec > maxSecInDuration {\n\t\treturn math.MaxInt64\n\t}\n\treturn int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3\n}", "func TimeElapsed() int64 {\n\telapsed := time.Since(start)\n\treturn elapsed.Nanoseconds() / 1000\n}", "func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}", "func UnixMilliseconds(t time.Time) float64 {\n\tnanosPerSecond := float64(time.Second) / float64(time.Nanosecond)\n\treturn float64(t.UnixNano()) / nanosPerSecond\n}", "func (i ISODuration) GetMilliSeconds() int {\r\n\treturn i.duration.MilliSeconds\r\n}", "func toUnixMsec(t time.Time) int64 {\n\treturn t.UnixNano() / 1e6\n}", "func (v Value) Duration() uint64 {\n\tstart := big.NewInt(v.StartSeconds)\n\tend := big.NewInt(v.EndSeconds)\n\n\tduration := (&big.Int{}).Sub(end, start)\n\n\treturn duration.Uint64()\n}", "func DiffNano(startTime time.Time) (diff int64) {\n\n\tstartTimeStamp := startTime.UnixNano()\n\tendTimeStamp := time.Now().UnixNano()\n\n\tdiff = endTimeStamp - startTimeStamp\n\n\treturn\n}", "func (d *Duration) Duration() time.Duration {\n\tif d == nil {\n\t\treturn 0\n\t}\n\treturn (time.Duration(d.Seconds) * time.Second) + (time.Duration(d.Nanos) * time.Nanosecond)\n}", "func TestNanoTime(t *testing.T) {\n\tt1 := time.Now().UnixNano()\n\tt2 := time.Now().UnixNano()\n\tinterval := t2 - t1\n\tfmt.Println(interval)\n}", "func (v TimestampNano) Int() int {\n\treturn int(v.Int64())\n}", "func millisI(nanos int64) float64 {\n\treturn millisF(float64(nanos))\n}", "func (dt DateTime) UnixNano() int64 {\n\treturn dt.src.UnixNano()\n}", "func ToVNCTime(t time.Time) string {\n\tvar format string\n\tif t.Nanosecond() < 1000 {\n\t\tformat = vncFormatWithoutNanoseconds\n\t} else {\n\t\tformat = vncFormatWithNanoseconds\n\t}\n\treturn t.UTC().Format(format)\n}", "func (p Packet) TimeUnixNano() int64 {\n\t// 1.0737... is 2^30 (collectds' subsecond interval) / 10^-9 (nanoseconds)\n\treturn int64(float64(p.CdTime) / 1.073741824)\n}", "func (tm *CompilationTelemetry) CompilationDurationNS() int64 {\n\treturn tm.compilationDuration.Nanoseconds()\n}", "func (v TimestampNano) Int64() int64 {\n\tif !v.Valid() || v.time.UnixNano() == 0 {\n\t\treturn 0\n\t}\n\treturn v.time.UnixNano()\n}", "func Nanosec() int64 {\n\treturn syscall.Nanosec()\n}", "func ToUnixMillis(t time.Time) int64 {\n\treturn t.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond))\n}", "func durtoTV(d time.Duration) (int64, int64) {\n\tsec := int64(d / nanoPerSec)\n\tmicro := int64((int64(d) - sec*nanoPerSec) / 1000)\n\n\treturn sec, micro\n}", "func (sw Stopwatch) ElapsedMilliseconds() float64 {\n\tduration := time.Since(sw.startTime)\n\treturn duration.Seconds() * 1000\n}", "func timeToUnixMS(t time.Time) int64 {\n\treturn t.UnixNano() / int64(time.Millisecond)\n}", "func (p *parser) duration() Node {\n\ttoken := p.expect(TokenDuration)\n\tnum, err := newDur(token.pos, token.val)\n\tif err != nil {\n\t\tp.error(err)\n\t}\n\treturn num\n}", "func (t Time) UnixMilli() int64 {\n\treturn (time.Time)(t).UnixNano() / int64(time.Millisecond)\n}", "func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}", "func (d *Decoder) TotalTimeMs() int64 {\n\tif d.start.IsZero() {\n\t\treturn 0\n\t}\n\tdur := time.Since(d.start)\n\treturn int64(dur / time.Millisecond)\n}", "func TicksToUnixNano(ticks int64) int64 {\n\treturn TicksToTime(ticks).UnixNano()\n}", "func average(data []int64) string {\n var total int64\n for _, n := range data {\n total += n\n }\n duration, _ := time.ParseDuration(fmt.Sprintf(\"%dns\", (total / int64(len(data)))))\n return fmt.Sprintf(\"%.3f\", duration.Seconds())\n}", "func milliseconds(ms int64) time.Duration {\n\treturn time.Duration(ms * 1000 * 1000)\n}", "func ToHuman(nano int64) string {\n\tvar base int64 = 1\n\tif nano < 1000*base {\n\t\treturn strconv.Itoa(int(nano/base)) + \"ns\"\n\t}\n\n\tbase *= 1000\n\tif nano < 1000*base {\n\t\tvar us = int(nano / base)\n\t\tif nano%base >= base/2 {\n\t\t\tus++\n\t\t}\n\n\t\treturn strconv.Itoa(us) + \"us\"\n\t}\n\n\tbase *= 1000\n\tif nano < 1000*base {\n\t\tvar ms = int(nano / base)\n\t\tif nano%base >= base/2 {\n\t\t\tms++\n\t\t}\n\t\treturn strconv.Itoa(ms) + \"ms\"\n\t}\n\n\tbase *= 1000\n\tvar s = int(nano / base)\n\tif nano%base >= base/2 {\n\t\ts++\n\t}\n\treturn strconv.Itoa(s) + \"s\"\n}", "func seconds(ttl time.Duration) int64 {\n\ti := int64(ttl / time.Second)\n\tif i <= 0 {\n\t\ti = 1\n\t}\n\treturn i\n}", "func TimeTrack(start time.Time) int64 {\n\telapsed := time.Since(start)\n\treturn elapsed.Nanoseconds() / 1000\n}", "func (o KubernetesClusterMaintenanceWindowNodeOsOutput) Duration() pulumi.IntOutput {\n\treturn o.ApplyT(func(v KubernetesClusterMaintenanceWindowNodeOs) int { return v.Duration }).(pulumi.IntOutput)\n}", "func ExampleTime_TimestampNano() {\n\tt := gtime.TimestampNano()\n\n\tfmt.Println(t)\n\n\t// May output:\n\t// 1533686888000000\n}", "func (t HighresTimestamp) Duration() time.Duration {\n\treturn time.Duration(uint64(t) * uint64(tbinfo.numer) / uint64(tbinfo.denom)))\n}", "func (s *Stopwatch) ElapsedMilliSeconds() float64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn float64(s.Elapsed() / time.Millisecond)\n}", "func formatNano(nanosec uint, n int, trim bool) []byte {\n\tu := nanosec\n\tvar buf [9]byte\n\tfor start := len(buf); start > 0; {\n\t\tstart--\n\t\tbuf[start] = byte(u%10 + '0')\n\t\tu /= 10\n\t}\n\n\tif n > 9 {\n\t\tn = 9\n\t}\n\tif trim {\n\t\tfor n > 0 && buf[n-1] == '0' {\n\t\t\tn--\n\t\t}\n\t\tif n == 0 {\n\t\t\treturn buf[:0]\n\t\t}\n\t}\n\treturn buf[:n]\n}", "func unixMilli(t time.Time) int64 {\n\treturn t.UnixNano() / int64(time.Millisecond)\n}", "func ConvertNanosecondsToHz(val float64) float64 {\n\treturn val / 1e7\n}", "func (d Duration) Seconds() float64 {\n\treturn time.Duration(d).Seconds()\n}", "func (ti *TimeInterval) EndUnixNano() int64 {\n\treturn ti.End.UTC().UnixNano()\n}" ]
[ "0.7365707", "0.71677697", "0.71562576", "0.7091952", "0.68987876", "0.68987876", "0.68987876", "0.68987876", "0.6809148", "0.66780186", "0.66759145", "0.6439207", "0.64132607", "0.62661654", "0.62491643", "0.62183124", "0.6110412", "0.61070853", "0.60488445", "0.6042173", "0.59795725", "0.5977492", "0.5929279", "0.5921957", "0.59147143", "0.5858022", "0.58271694", "0.5811278", "0.5811278", "0.5794857", "0.5784236", "0.5782882", "0.57552207", "0.57082695", "0.5685207", "0.56718236", "0.5663621", "0.5662295", "0.56595993", "0.5636959", "0.55587834", "0.55228716", "0.55118525", "0.54740924", "0.54739565", "0.54455686", "0.5443789", "0.53865063", "0.5360199", "0.53380257", "0.5314501", "0.5299935", "0.5292967", "0.52471864", "0.5241303", "0.52387875", "0.52368534", "0.5227604", "0.5224608", "0.52212805", "0.52173704", "0.5211857", "0.51795745", "0.5176492", "0.51642466", "0.5157037", "0.5147364", "0.51464397", "0.5145096", "0.513736", "0.5123099", "0.51183486", "0.50997543", "0.5084079", "0.508348", "0.5066079", "0.5065343", "0.5060721", "0.50313085", "0.50312907", "0.5024725", "0.501503", "0.49933064", "0.4970519", "0.49637896", "0.4928636", "0.4924234", "0.49240726", "0.49154773", "0.49148816", "0.49087816", "0.49059728", "0.49030614", "0.48966634", "0.48881462", "0.48871264", "0.48761582", "0.48709825", "0.48655438", "0.48613238" ]
0.81052667
0
Seconds returns the duration as a floating point number of seconds.
func (d Duration) Seconds() float64 { return time.Duration(d).Seconds() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (c *ClockVal) Seconds(d time.Duration) float64 {\n\treturn d.Seconds()\n}", "func (s Stopwatch) Seconds() float64 {\n\treturn s.acc.Seconds()\n}", "func ToFloat(d time.Duration) (seconds float64) {\n\treturn float64(d) / float64(time.Second)\n}", "func (f *Formatter) Seconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d seconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Seconds()))\n}", "func durationInSeconds(d time.Duration) int64 {\n\t// converting a floating-point number to an integer discards\n\t// the fraction (truncation towards zero)\n\treturn int64(d.Seconds())\n}", "func (o DurationOutput) Seconds() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Duration) *string { return v.Seconds }).(pulumi.StringPtrOutput)\n}", "func getSeconds(data *speedTestData) float64 {\n\treturn float64(data.Milliseconds) / 1000\n}", "func (i ISODuration) GetSeconds() int {\r\n\treturn i.duration.Seconds\r\n}", "func (sw Stopwatch) ElapsedSeconds() float64 {\n\tduration := time.Since(sw.startTime)\n\treturn duration.Seconds()\n}", "func (s *Stopwatch) ElapsedSeconds() float64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn s.Elapsed().Seconds()\n}", "func ConvertSeconds(s string) float64 {\n\tnum, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn num\n}", "func (time CMTime) Seconds() uint64 {\n\t//prevent division by 0\n\tif time.CMTimeValue == 0 {\n\t\treturn 0\n\t}\n\treturn time.CMTimeValue / uint64(time.CMTimeScale)\n}", "func (o DurationPtrOutput) Seconds() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Duration) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Seconds\n\t}).(pulumi.StringPtrOutput)\n}", "func (o DurationResponseOutput) Seconds() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DurationResponse) string { return v.Seconds }).(pulumi.StringOutput)\n}", "func NsSeconds(count int64) int64 { return NsMilliseconds(count * 1e3) }", "func (o TransferJobScheduleStartTimeOfDayOutput) Seconds() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TransferJobScheduleStartTimeOfDay) int { return v.Seconds }).(pulumi.IntOutput)\n}", "func ParseSeconds(d string) (time.Duration, error) {\n\tn, err := strconv.ParseInt(d, 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn time.Duration(n) * time.Second, nil\n}", "func (c *Job) Seconds() *Job {\n\tif c.delayUnit == delayNone {\n\t\tc.unit = seconds\n\t} else {\n\t\tc.delayUnit = delaySeconds\n\t}\n\treturn c\n}", "func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}", "func getDuration(seconds int) time.Duration {\n\treturn time.Duration(seconds) * time.Second\n}", "func seconds(s string) int64 {\n\tt, err := time.Parse(gitime, s)\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn t.Unix()\n}", "func (o SecurityProfileBehaviorCriteriaOutput) DurationSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v SecurityProfileBehaviorCriteria) *int { return v.DurationSeconds }).(pulumi.IntPtrOutput)\n}", "func toTimeSeconds(value string) (int64, error) {\n\t//is serial format?\n\tserial, err := strconv.ParseFloat(value, 64)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn int64(serial * 86400), nil\n}", "func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}", "func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}", "func StringToSeconds(s string) (r time.Duration) {\n\t_sec := StringToInteger(s)\n\tsec := time.Duration(_sec * 1000 * 1000 * 1000)\n\treturn sec\n}", "func (cvr Converter) MillisecondsToSeconds(msecs Milliseconds) Seconds {\n\treturn Seconds(msecs / 60)\n}", "func DurationToFloat(dur time.Duration) float64 {\n\treturn float64(dur) / float64(time.Second)\n}", "func seconds(ttl time.Duration) int64 {\n\ti := int64(ttl / time.Second)\n\tif i <= 0 {\n\t\ti = 1\n\t}\n\treturn i\n}", "func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (s *GetCredentialsInput) SetDurationSeconds(v int64) *GetCredentialsInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (o SecurityProfileBehaviorCriteriaPtrOutput) DurationSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *SecurityProfileBehaviorCriteria) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.DurationSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (d Dispatcher) ExecDurationSeconds(id string, hash string) (float64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetDuration().Seconds(), nil\n}", "func durationTo8601Seconds(duration time.Duration) string {\n\treturn fmt.Sprintf(\"PT%dS\", duration/time.Second)\n}", "func (o DurationResponsePtrOutput) Seconds() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DurationResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Seconds\n\t}).(pulumi.StringPtrOutput)\n}", "func (o InstanceDenyMaintenancePeriodTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceDenyMaintenancePeriodTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenanceWindowStartTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenanceWindowStartTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (j *Job) Seconds() (job *Job) {\n\tj.unit = JOB_UNIT_TYPE_SECOND\n\treturn j\n}", "func (o TransferJobScheduleStartTimeOfDayPtrOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobScheduleStartTimeOfDay) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Seconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (o *FeedSyncResult) GetTotalTimeSeconds() float32 {\n\tif o == nil || o.TotalTimeSeconds == nil {\n\t\tvar ret float32\n\t\treturn ret\n\t}\n\treturn *o.TotalTimeSeconds\n}", "func SecondsSince(ts TimeSource, t time.Time) float64 {\n\treturn ts.Now().Sub(t).Seconds()\n}", "func TimestampToSeconds(timestamp int64) float64 {\n\tfloatTime := float64(timestamp)\n\treturn floatTime * 0.000000001\n}", "func getSecondsFromDurationString(s string) (int, error) {\n\tduration, err := time.ParseDuration(s)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int(duration.Seconds()), nil\n}", "func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func DurationValue(s string, step int64) (int64, error) {\n\tif len(s) == 0 {\n\t\treturn 0, fmt.Errorf(\"duration cannot be empty\")\n\t}\n\tlastChar := s[len(s)-1]\n\tif lastChar >= '0' && lastChar <= '9' || lastChar == '.' {\n\t\t// Try parsing floating-point duration\n\t\td, err := strconv.ParseFloat(s, 64)\n\t\tif err == nil {\n\t\t\t// Convert the duration to milliseconds.\n\t\t\treturn int64(d * 1000), nil\n\t\t}\n\t}\n\tisMinus := false\n\td := float64(0)\n\tfor len(s) > 0 {\n\t\tn := scanSingleDuration(s, true)\n\t\tif n <= 0 {\n\t\t\treturn 0, fmt.Errorf(\"cannot parse duration %q\", s)\n\t\t}\n\t\tds := s[:n]\n\t\ts = s[n:]\n\t\tdLocal, err := parseSingleDuration(ds, step)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif isMinus && dLocal > 0 {\n\t\t\tdLocal = -dLocal\n\t\t}\n\t\td += dLocal\n\t\tif dLocal < 0 {\n\t\t\tisMinus = true\n\t\t}\n\t}\n\tif math.Abs(d) > 1<<63-1 {\n\t\treturn 0, fmt.Errorf(\"too big duration %.0fms\", d)\n\t}\n\treturn int64(d), nil\n}", "func SinceInSeconds(start time.Time) float64 {\n\treturn time.Since(start).Seconds()\n}", "func SinceInSeconds(start time.Time) float64 {\n\treturn time.Since(start).Seconds()\n}", "func SinceInSeconds(start time.Time) float64 {\n\treturn time.Since(start).Seconds()\n}", "func formatSeconds(d uint64) string {\n\tif d == 0 {\n\t\treturn \"\"\n\t}\n\n\tdays := d / 86400\n\thours := (d - days*86400) / 3600\n\tminutes := (d - days*86400 - hours*3600) / 60\n\tseconds := d - days*86400 - hours*3600 - minutes*60\n\n\tif days > 0 {\n\t\treturn fmt.Sprintf(\"%dd %dh %dm\", days, hours, minutes)\n\t}\n\tif hours > 0 {\n\t\treturn fmt.Sprintf(\"%dh %dm %ds\", hours, minutes, seconds)\n\t}\n\tif minutes > 0 {\n\t\treturn fmt.Sprintf(\"%dm %ds\", minutes, seconds)\n\t}\n\n\treturn fmt.Sprintf(\"%ds\", seconds)\n}", "func (d Duration) Microseconds() int64 {\n\treturn int64(d)\n}", "func (fs *FlowStats) Duration() float64 {\n\tendTime := fs.ReadTime(EndTime)\n\tif endTime.Equal(time.Time{}) {\n\t\tendTime = time.Now()\n\t}\n\tduration := endTime.Sub(fs.ReadTime(StartTime))\n\treturn float64(duration) / float64(time.Second)\n}", "func FormatDeltaSeconds(delta int) string {\n\treturn time.SecondsToUTC(time.Seconds() + int64(delta)).Format(TimeLayout)\n}", "func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (t *Timespan) unmarshalSeconds(s string) (time.Duration, error) {\n\t// \"03\" = 3 * time.Second\n\t// \"00.099\" = 99 * time.Millisecond\n\t// \"03.0123\" == 3 * time.Second + 12300 * time.Microsecond\n\tsp := strings.Split(s, \".\")\n\tswitch len(sp) {\n\tcase 1:\n\t\tseconds, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field was incorrect, was %s\", s)\n\t\t}\n\t\treturn time.Duration(seconds) * time.Second, nil\n\tcase 2:\n\t\tseconds, err := strconv.Atoi(sp[0])\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field was incorrect, was %s\", s)\n\t\t}\n\t\tn, err := strconv.Atoi(sp[1])\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field was incorrect, was %s\", s)\n\t\t}\n\t\tvar prec time.Duration\n\t\tswitch len(sp[1]) {\n\t\tcase 1:\n\t\t\tprec = time.Duration(n) * (100 * time.Millisecond)\n\t\tcase 2:\n\t\t\tprec = time.Duration(n) * (10 * time.Millisecond)\n\t\tcase 3:\n\t\t\tprec = time.Duration(n) * time.Millisecond\n\t\tcase 4:\n\t\t\tprec = time.Duration(n) * 100 * time.Microsecond\n\t\tcase 5:\n\t\t\tprec = time.Duration(n) * 10 * time.Microsecond\n\t\tcase 6:\n\t\t\tprec = time.Duration(n) * time.Microsecond\n\t\tcase 7:\n\t\t\tprec = time.Duration(n) * tick\n\t\tcase 8:\n\t\t\tprec = time.Duration(n) * (10 * time.Nanosecond)\n\t\tcase 9:\n\t\t\tprec = time.Duration(n) * time.Nanosecond\n\t\tdefault:\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field did not have 1-9 numbers after the decimal, had %v\", s)\n\t\t}\n\n\t\treturn time.Duration(seconds)*time.Second + prec, nil\n\t}\n\treturn 0, fmt.Errorf(\"timespan's seconds field did not have the requisite '.'s, was %s\", s)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (cvr Converter) MinutesToSeconds(m Minutes) Seconds {\n\treturn Seconds(m) * Seconds(60)\n}", "func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func GetTimeInSeconds() float64 {\n\treturn float64(C.ovr_GetTimeInSeconds())\n}", "func (i ISODuration) SetSeconds(seconds int) {\r\n\ti.duration.Seconds = seconds\r\n}", "func (o GroupContainerLivenessProbeOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerLivenessProbe) *int { return v.PeriodSeconds }).(pulumi.IntPtrOutput)\n}", "func (v Value) Duration() uint64 {\n\tstart := big.NewInt(v.StartSeconds)\n\tend := big.NewInt(v.EndSeconds)\n\n\tduration := (&big.Int{}).Sub(end, start)\n\n\treturn duration.Uint64()\n}", "func (e2 *PicoSecondTimeStamp) Duration(e1 *PicoSecondTimeStamp) *PicoSecondDuration {\n\tresult := &PicoSecondDuration{\n\t\tEpoch: int32(e2.Epoch - e1.Epoch),\n\t\tPicoSeconds: int64(e2.PicoSeconds - e1.PicoSeconds),\n\t}\n\n\tif result.PicoSeconds < 0 && result.Epoch > 0 {\n\t\tresult.Epoch = result.Epoch - 1\n\t\tresult.PicoSeconds = result.PicoSeconds + 1000000000000\n\t}\n\treturn result\n}", "func TimeInSec(period string) int {\n\tif strings.HasSuffix(period, \"sec\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"sec\", \"\", -1))\n\t\treturn i\n\t} else if strings.HasSuffix(period, \"min\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"min\", \"\", -1))\n\t\treturn i * 60\n\t} else if strings.HasSuffix(period, \"hours\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"hours\", \"\", -1))\n\t\treturn i * 60 * 60\n\t} else if strings.HasSuffix(period, \"days\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"days\", \"\", -1))\n\t\treturn i * 60 * 60 * 24\n\t} else {\n\t\treturn 0\n\t}\n}", "func (d *Duration) Duration() time.Duration {\n\tif d == nil {\n\t\treturn 0\n\t}\n\treturn (time.Duration(d.Seconds) * time.Second) + (time.Duration(d.Nanos) * time.Nanosecond)\n}", "func ToUsec(t time.Time) int64 {\n\treturn t.UnixNano() / 1e3\n}", "func (i ISODuration) GetMilliSeconds() int {\r\n\treturn i.duration.MilliSeconds\r\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func MinutesToSeconds(minutes int) int {\n\treturn minutes * 60\n}", "func getDurationStringFromSeconds(seconds int) string {\n\treturn (time.Duration(seconds) * time.Second).String()\n}", "func (o GroupContainerLivenessProbePtrOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *GroupContainerLivenessProbe) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PeriodSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenanceWindowStartTimePtrOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceMaintenanceWindowStartTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Seconds\n\t}).(pulumi.IntPtrOutput)\n}", "func convertToSeconds(hours, minutes, seconds, microseconds string) {\n\thoursInSeconds, _ := strconv.Atoi(hours)\n\tminutesInSeconds, _ := strconv.Atoi(minutes)\n\tformattedSeconds, _ := strconv.Atoi(seconds)\n\tformattedSeconds = formattedSeconds + (hoursInSeconds * 3600) + (minutesInSeconds * 60)\n\n\tvar buffer bytes.Buffer\n\n\tbuffer.WriteString(strconv.Itoa(formattedSeconds))\n\tbuffer.WriteString(\".\")\n\tbuffer.WriteString(microseconds)\n\n\tfmt.Println(\"BarDuration: \" + buffer.String())\n}", "func (t Time) Microseconds() int64 {\n\treturn time.Time(t).UnixNano() / DivideMicroseconds\n}", "func (o InstanceDenyMaintenancePeriodTimePtrOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceDenyMaintenancePeriodTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Seconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (cvr Converter) MinutesToSeconds(m Minutes) Seconds {\n\treturn Seconds(m * 60)\n}", "func daysToSeconds(inDays int64) int64 {\n\treturn int64(inDays * 24 * 60 * 60 )\n}", "func (o BuildStrategySpecBuildStepsLivenessProbeOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsLivenessProbe) *int { return v.PeriodSeconds }).(pulumi.IntPtrOutput)\n}", "func (m *sdt) Duration() int32 {\n\treturn m.durationField\n}", "func GetTrackedSeconds(ctx context.Context, opts FindTrackedTimesOptions) (trackedSeconds int64, err error) {\n\treturn opts.toSession(db.GetEngine(ctx)).SumInt(&TrackedTime{}, \"time\")\n}", "func (o HPAScalingPolicyOutput) PeriodSeconds() pulumi.IntOutput {\n\treturn o.ApplyT(func(v HPAScalingPolicy) int { return v.PeriodSeconds }).(pulumi.IntOutput)\n}", "func (s *Stopwatch) ElapsedMilliSeconds() float64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn float64(s.Elapsed() / time.Millisecond)\n}", "func FromFloat(seconds float64) time.Duration {\n\treturn time.Duration(seconds*float64(time.Second) + 0.5)\n}", "func (o ClusterBuildStrategySpecBuildStepsLivenessProbeOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsLivenessProbe) *int { return v.PeriodSeconds }).(pulumi.IntPtrOutput)\n}", "func (d UnixDuration) Duration() time.Duration {\n\treturn time.Duration(d) * time.Second\n}", "func (c *Client) Duration() (float64, error) {\n\treturn c.GetFloatProperty(\"duration\")\n}", "func fmtDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}", "func (o BuildStrategySpecBuildStepsLivenessProbePtrOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsLivenessProbe) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PeriodSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func FormatSeconds(seconds float64) string {\n\t// Make sure localised strings are fetched\n\tlocOnce.Do(func() {\n\t\tlocDay = glib.Local(\"one day\")\n\t\tlocDays = glib.Local(\"days\")\n\t})\n\n\tminutes, secs := int(seconds)/60, int(seconds)%60\n\thours, mins := minutes/60, minutes%60\n\tdays, hrs := hours/24, hours%24\n\tswitch {\n\tcase days > 1:\n\t\treturn fmt.Sprintf(\"%d %s %d:%02d:%02d\", days, locDays, hrs, mins, secs)\n\tcase days == 1:\n\t\treturn fmt.Sprintf(\"%s %d:%02d:%02d\", locDay, hrs, mins, secs)\n\tcase hours >= 1:\n\t\treturn fmt.Sprintf(\"%d:%02d:%02d\", hrs, mins, secs)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d:%02d\", mins, secs)\n\t}\n}", "func (s Segment) Duration() time.Duration {\n\treturn s.EndsBefore.Sub(s.Start)\n}", "func MeasureSecondsSince(name string, field string, t time.Time) Measurement {\n\treturn NewMeasurement(name).AddSecondsSince(field, t)\n}", "func getSeconds(time *int) int {\n\treturn *time\n}", "func (m *TimerMutation) ElapsedSeconds() (r int, exists bool) {\n\tv := m.elapsedSeconds\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func micros(d time.Duration) int {\n\treturn int(d.Seconds() * 1000000)\n}", "func (o ClusterBuildStrategySpecBuildStepsLivenessProbePtrOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsLivenessProbe) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PeriodSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (t Time) Second() int {\n\treturn time.Time(t).Second()\n}", "func (dt *DateTime) GetSecond() *Number {\n\treturn dt.Second()\n}" ]
[ "0.8146225", "0.7864046", "0.7570394", "0.72681606", "0.7212462", "0.7174782", "0.7154755", "0.71513623", "0.6977348", "0.6971889", "0.6894677", "0.6776812", "0.6714895", "0.67132235", "0.66712093", "0.65651536", "0.6538294", "0.65263283", "0.65176785", "0.6466086", "0.6434017", "0.6346442", "0.6335964", "0.6320832", "0.6305017", "0.6227518", "0.6227518", "0.6188115", "0.6179615", "0.6160989", "0.6151172", "0.61360115", "0.6134382", "0.6124866", "0.6116353", "0.60917294", "0.60746425", "0.60684717", "0.60326755", "0.60280186", "0.59919035", "0.5981728", "0.5965073", "0.59522265", "0.59358466", "0.5933968", "0.5911186", "0.5909971", "0.5904017", "0.5904017", "0.5904017", "0.59037054", "0.5853884", "0.58501333", "0.5846143", "0.5842225", "0.5826644", "0.58200824", "0.58200824", "0.5814505", "0.58078283", "0.58017075", "0.57867104", "0.5777276", "0.57679474", "0.5767262", "0.57671547", "0.5743368", "0.57286924", "0.5720256", "0.5709292", "0.5700157", "0.5690397", "0.5683481", "0.56823915", "0.56571186", "0.5638545", "0.56276274", "0.5623667", "0.56083286", "0.5604272", "0.5583957", "0.55779094", "0.55776834", "0.5528202", "0.55278444", "0.5521058", "0.5519935", "0.55159116", "0.5504168", "0.5501442", "0.5497751", "0.5496833", "0.5489368", "0.54639864", "0.5434759", "0.5431013", "0.54273176", "0.54169023", "0.54133046" ]
0.81206095
1
Returns a string representation of the duration in russian language
func (d Duration) String() (result string) { var seconds, minutes, hours int seconds = int(d.Seconds()) if seconds > 60 { minutes = (seconds - seconds%60) / 60 seconds = seconds % 60 } if minutes > 59 { hours = (minutes - minutes%60) / 60 minutes = minutes - hours*60 result = numberInString(hours, false) result += " " + hoursTail(hours) } if minutes != 0 { if result != "" { result += ", " } result += strings.ToLower(numberInString(minutes, true)) result += " " + minutesTail(minutes) } if seconds != 0 { if result != "" { result += ", " } result += strings.ToLower(numberInString(seconds, true)) result += " " + secondsTail(seconds) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d Duration) String() string {}", "func (s TtlDuration) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (d Duration) String() string {\n\tvalue := int64(d)\n\tout := \"\"\n\tif value < 0 {\n\t\tout = \"-\"\n\t\tvalue = -value\n\t}\n\tdivmod := func(divisor, dividend int64) (int64, int64) {\n\t\treturn divisor / dividend, divisor % dividend\n\t}\n\textract := func(symbol string, unit int64) {\n\t\tvar units int64\n\t\tunits, value = divmod(value, unit)\n\t\tif units > 0 {\n\t\t\tout += fmt.Sprintf(\"%d%s\", units, symbol)\n\t\t}\n\t}\n\textract(\"y\", Year)\n\textract(\"m\", Month)\n\textract(\"d\", Day)\n\tif value > 0 {\n\t\tout += \"t\"\n\t}\n\textract(\"h\", Hour)\n\textract(\"m\", Minute)\n\textract(\"s\", Second)\n\textract(\"us\", Microsecond)\n\n\tif out == \"\" {\n\t\t// input duration was 0\n\t\tout = \"t0s\" // seconds are the fundamental unit\n\t}\n\n\treturn out\n}", "func (d *Duration) String() string {\n\treturn time.Duration(*d).String()\n}", "func (channelInfo ChannelInfo) GetStreamDuration() string {\n\n\tif !channelInfo.StreamStatus.Online {\n\t\treturn \"\"\n\t}\n\tminutePrefix := \"минут\"\n\thourPrefix := \"часов\"\n\tduration := time.Now().Sub(channelInfo.StreamStatus.Start)\n\tminutes := float64(int(duration.Minutes() - math.Floor(duration.Minutes()/60)*60))\n\thours := float64(int(duration.Hours()))\n\tif math.Floor(minutes/10) != 1 {\n\t\tswitch int(minutes - math.Floor(minutes/10)*10) {\n\t\tcase 1:\n\t\t\tminutePrefix = \"минуту\"\n\t\t\tbreak\n\t\tcase 2:\n\t\tcase 3:\n\t\tcase 4:\n\t\t\tminutePrefix = \"минуты\"\n\t\t}\n\t}\n\n\tif int(math.Floor(hours/10)) != 1 {\n\t\tswitch int(hours - math.Floor(hours/10)*10) {\n\t\tcase 1:\n\t\t\thourPrefix = \"час\"\n\t\t\tbreak\n\t\tcase 2:\n\t\tcase 3:\n\t\tcase 4:\n\t\t\thourPrefix = \"часа\"\n\t\t}\n\t}\n\tif int(minutes) == 0 {\n\t\treturn fmt.Sprintf(\"%d %s\", int(hours), hourPrefix)\n\n\t}\n\tif int(hours) == 0 {\n\t\treturn fmt.Sprintf(\"%d %s\", int(minutes), minutePrefix)\n\t}\n\treturn fmt.Sprintf(\"%d %s %d %s\", int(hours), hourPrefix, int(minutes), minutePrefix)\n\n}", "func RenderDuration(d time.Duration) string {\n\tif d == math.MaxInt64 {\n\t\treturn \"never\"\n\t}\n\n\tif d == 0 {\n\t\treturn \"forever\"\n\t}\n\n\ttsecs := d / time.Second\n\ttmins := tsecs / 60\n\tthrs := tmins / 60\n\ttdays := thrs / 24\n\ttyrs := tdays / 365\n\n\tif tyrs > 0 {\n\t\treturn fmt.Sprintf(\"%dy%dd%dh%dm%ds\", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60)\n\t}\n\n\tif tdays > 0 {\n\t\treturn fmt.Sprintf(\"%dd%dh%dm%ds\", tdays, thrs%24, tmins%60, tsecs%60)\n\t}\n\n\tif thrs > 0 {\n\t\treturn fmt.Sprintf(\"%dh%dm%ds\", thrs, tmins%60, tsecs%60)\n\t}\n\n\tif tmins > 0 {\n\t\treturn fmt.Sprintf(\"%dm%ds\", tmins, tsecs%60)\n\t}\n\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}", "func (hms HHMMSS) String() string {\n\treturn time.Duration(hms).String()\n}", "func DurationToString(duration common.Duration) string {\n\t// Shortcut for zero duration\n\tif duration.IsZero() {\n\t\treturn \"0초\"\n\t}\n\tslices := make([]string, 0, 4)\n\t// Using strconv.Itoa is faster than fmt.Sprintf in this case\n\tif duration.Day != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Day)+\"일\")\n\t}\n\tif duration.Hour != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Hour)+\"시간\")\n\t}\n\tif duration.Minute != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Minute)+\"분\")\n\t}\n\tif duration.Second != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Second)+\"초\")\n\t}\n\n\treturn strings.Join(slices, \" \")\n}", "func (d *DataGenerator) getDuration() string {\n\t// ISO 8601 format\n\t// P3Y6M4DT12H30M5S = three years, six months, four days, twelve hours, thirty minutes, and five seconds\n\thr := rand.Int31n(12)\n\tmin := rand.Int31n(60)\n\tsec := rand.Int31n(60)\n\treturn fmt.Sprintf(\"P0Y0M0DT%dH%dM%dS\", hr, min, sec)\n}", "func (d Duration) String() string {\n\treturn d.duration\n}", "func Duration(d time.Duration) string {\n\treturn d.String()\n}", "func (s Duration) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (d Duration) String() string {\n\treturn d.toString()\n}", "func (d Duration) String() string {\n\tsign := \"\"\n\tif d.Seconds < 0 && d.Nanos > 0 {\n\t\td.Seconds++\n\t\td.Nanos = int(time.Second) - d.Nanos\n\n\t\tif d.Seconds == 0 {\n\t\t\tsign = \"-\"\n\t\t}\n\t}\n\n\ttimePart := \"\"\n\tif d.Nanos == 0 {\n\t\ttimePart = fmt.Sprintf(\"%s%d\", sign, d.Seconds)\n\t} else {\n\t\ttimePart = fmt.Sprintf(\"%s%d.%09d\", sign, d.Seconds, d.Nanos)\n\t}\n\n\treturn fmt.Sprintf(\"P%dM%dDT%sS\", d.Months, d.Days, timePart)\n}", "func (d Duration) String() string {\n\treturn time.Duration(d * 1000).String()\n}", "func (o ResourcePolicyWeeklyCycleDayOfWeekResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyWeeklyCycleDayOfWeekResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (d Duration) String() string {\n\treturn time.Duration(d).String()\n}", "func (d Duration) String() string {\n\treturn time.Duration(d).String()\n}", "func DescDuration(d time.Duration) string {\n\tif d < time.Minute {\n\t\treturn fmt.Sprintf(\"%0.1f sec ago\", d.Seconds())\n\t} else if d < time.Hour {\n\t\treturn fmt.Sprintf(\"%0.1f min ago\", d.Minutes())\n\t} else if d < time.Hour*24 {\n\t\treturn fmt.Sprintf(\"%0.1f hrs ago\", d.Hours())\n\t} else {\n\t\treturn fmt.Sprintf(\"%0.1f days ago\", d.Hours()/24.0)\n\t}\n}", "func (o ResourcePolicyDailyCycleResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyDailyCycleResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (o ResourcePolicyHourlyCycleResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyHourlyCycleResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (tso TimeWebsmsShortOne) String() string { return time.Time(tso).Format(timeWebsmsShortOneFormat) }", "func (d Duration) String() string {\n\tif d == 0 {\n\t\treturn \"0s\"\n\t}\n\tswitch {\n\tcase d%Year == 0:\n\t\treturn fmt.Sprintf(\"%dy\", d/Year)\n\tcase d%Week == 0:\n\t\treturn fmt.Sprintf(\"%dw\", d/Week)\n\tcase d%Day == 0:\n\t\treturn fmt.Sprintf(\"%dd\", d/Day)\n\tcase d%Hour == 0:\n\t\treturn fmt.Sprintf(\"%dh\", d/Hour)\n\tcase d%Minute == 0:\n\t\treturn fmt.Sprintf(\"%dm\", d/Minute)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%ds\", d)\n\t}\n}", "func (d Duration) String() string {\n\treturn d.Dur().String()\n}", "func (d Duration) StringApproximate() (result string) {\n\tvar seconds, minutes, hours, days, months, years int\n\tseconds = int(d.Seconds())\n\tif seconds > 60 {\n\t\tminutes = int(d.Minutes())\n\t}\n\tif minutes > 59 {\n\t\thours = int(d.Hours())\n\t\tminutes = minutes - hours*60\n\t}\n\tif hours > 24 {\n\t\tdays = (hours - hours%24) / 24\n\t\thours = hours - days*24\n\t}\n\tif days > 365 {\n\t\tyears = (days - days%365) / 365\n\t\tdays = days - years*365\n\t}\n\tif days > 30 {\n\t\tmonths = (days - days%30) / 30\n\t\tdays = days - months*30\n\t}\n\tif years > 0 {\n\t\tif months < 3 {\n\t\t\tresult = numberInString(years, false) + \" \" + yearsTail(years)\n\t\t} else {\n\t\t\tresult = \"Более\"\n\t\t\tif years > 1 {\n\t\t\t\tresult = \" \" + strings.ToLower(numberStringInGenitiveCase(years, false))\n\t\t\t}\n\t\t\tresult += \" \" + strings.ToLower(numberStringInGenitiveCase(years, false)) + \" \" + strings.ToLower(yearsTailInGenitiveCase(years))\n\t\t}\n\t} else if months > 0 {\n\t\tif days < 8 {\n\t\t\tresult = numberInString(months, false) + \" \" + monthsTail(months)\n\t\t} else {\n\t\t\tresult = \"Более\"\n\t\t\tif months > 1 {\n\t\t\t\tresult = \" \" + strings.ToLower(numberStringInGenitiveCase(months, false))\n\t\t\t}\n\t\t\tresult += \" \" + strings.ToLower(numberStringInGenitiveCase(months, false)) + \" \" + strings.ToLower(monthsTailInGenitiveCase(months))\n\t\t}\n\t} else if days > 0 {\n\t\tif hours < 5 {\n\t\t\tresult = numberInString(days, false) + \" \" + daysTail(days)\n\t\t} else {\n\t\t\tresult = \"Более \"\n\t\t\tif days == 1 {\n\t\t\t\tresult += \"суток\"\n\t\t\t} else {\n\t\t\t\tresult += strings.ToLower(numberStringInGenitiveCase(days, false)) + \" суток\"\n\t\t\t}\n\t\t}\n\t} else if hours > 0 {\n\t\tif minutes < 16 {\n\t\t\tresult = numberInString(hours, false) + \" \" + hoursTail(hours)\n\t\t} else {\n\t\t\tresult = \"Более \"\n\t\t\tif hours == 1 {\n\t\t\t\tresult += \"часа\"\n\t\t\t} else {\n\t\t\t\tresult += strings.ToLower(numberStringInGenitiveCase(hours, false))\n\t\t\t\tresult += \" \" + strings.ToLower(hoursTailInGenitiveCase(hours))\n\t\t\t}\n\t\t}\n\t} else if minutes > 0 {\n\t\tif minutes == 1 {\n\t\t\tresult = \"Минуту\"\n\t\t} else {\n\t\t\tresult = numberInString(minutes, true) + \" \" + minutesTail(minutes)\n\t\t}\n\t} else {\n\t\tresult = \"Менее минуты\"\n\t}\n\tresult += \" назад\"\n\treturn\n}", "func (t Time) StringEN() string {\n\treturn t.In(time.UTC).Format(time.RFC1123Z)\n}", "func (s TimeSpan) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (b *Build) HumanDuration() string {\n\td := time.Duration(b.Duration)\n\tif seconds := int(d.Seconds()); seconds < 1 {\n\t\treturn \"Less than a second\"\n\t} else if seconds < 60 {\n\t\treturn fmt.Sprintf(\"%d seconds\", seconds)\n\t} else if minutes := int(d.Minutes()); minutes == 1 {\n\t\treturn \"About a minute\"\n\t} else if minutes < 60 {\n\t\treturn fmt.Sprintf(\"%d minutes\", minutes)\n\t} else if hours := int(d.Hours()); hours == 1 {\n\t\treturn \"About an hour\"\n\t} else if hours < 48 {\n\t\treturn fmt.Sprintf(\"%d hours\", hours)\n\t} else if hours < 24*7*2 {\n\t\treturn fmt.Sprintf(\"%d days\", hours/24)\n\t} else if hours < 24*30*3 {\n\t\treturn fmt.Sprintf(\"%d weeks\", hours/24/7)\n\t} else if hours < 24*365*2 {\n\t\treturn fmt.Sprintf(\"%d months\", hours/24/30)\n\t}\n\treturn fmt.Sprintf(\"%f years\", d.Hours()/24/365)\n}", "func durationToWord(in Interval) string {\n\tswitch in {\n\tcase FifteenSecond:\n\t\treturn \"fifteensecond\"\n\tcase OneMin:\n\t\treturn \"onemin\"\n\tcase ThreeMin:\n\t\treturn \"threemin\"\n\tcase FiveMin:\n\t\treturn \"fivemin\"\n\tcase TenMin:\n\t\treturn \"tenmin\"\n\tcase FifteenMin:\n\t\treturn \"fifteenmin\"\n\tcase ThirtyMin:\n\t\treturn \"thirtymin\"\n\tcase OneHour:\n\t\treturn \"onehour\"\n\tcase TwoHour:\n\t\treturn \"twohour\"\n\tcase FourHour:\n\t\treturn \"fourhour\"\n\tcase SixHour:\n\t\treturn \"sixhour\"\n\tcase EightHour:\n\t\treturn \"eighthour\"\n\tcase TwelveHour:\n\t\treturn \"twelvehour\"\n\tcase OneDay:\n\t\treturn \"oneday\"\n\tcase ThreeDay:\n\t\treturn \"threeday\"\n\tcase FifteenDay:\n\t\treturn \"fifteenday\"\n\tcase OneWeek:\n\t\treturn \"oneweek\"\n\tcase TwoWeek:\n\t\treturn \"twoweek\"\n\tcase OneMonth:\n\t\treturn \"onemonth\"\n\tcase OneYear:\n\t\treturn \"oneyear\"\n\tdefault:\n\t\treturn \"notfound\"\n\t}\n}", "func DurationString(duration time.Duration) string {\n\tdurSecs := int64(duration.Seconds())\n\n\tdurStr := \"\"\n\tif durSecs > 0 {\n\t\tif durSecs%SecsPerDay == 0 {\n\t\t\t// convert to days\n\t\t\tdurStr = fmt.Sprintf(\"%dd\", durSecs/SecsPerDay)\n\t\t} else if durSecs%SecsPerHour == 0 {\n\t\t\t// convert to hours\n\t\t\tdurStr = fmt.Sprintf(\"%dh\", durSecs/SecsPerHour)\n\t\t} else if durSecs%SecsPerMin == 0 {\n\t\t\t// convert to mins\n\t\t\tdurStr = fmt.Sprintf(\"%dm\", durSecs/SecsPerMin)\n\t\t} else if durSecs > 0 {\n\t\t\t// default to mins, as long as duration is positive\n\t\t\tdurStr = fmt.Sprintf(\"%ds\", durSecs)\n\t\t}\n\t}\n\n\treturn durStr\n}", "func toElapsedLabel(rfc850time string) string {\n\tcreated, err := time.Parse(time.RFC850, rfc850time)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\telapsed := time.Now().UTC().Sub(created.UTC())\n\tseconds := elapsed.Seconds()\n\tminutes := elapsed.Minutes()\n\thours := elapsed.Hours()\n\tdays := hours / 24\n\tweeks := days / 7\n\tmonths := weeks / 4\n\tyears := months / 12\n\n\tif math.Trunc(years) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(years), plural(int64(years), \"year\"))\n\t} else if math.Trunc(months) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(months), plural(int64(months), \"month\"))\n\t} else if math.Trunc(weeks) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(weeks), plural(int64(weeks), \"week\"))\n\t} else if math.Trunc(days) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(days), plural(int64(days), \"day\"))\n\t} else if math.Trunc(hours) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(hours), plural(int64(hours), \"hour\"))\n\t} else if math.Trunc(minutes) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(minutes), plural(int64(minutes), \"minute\"))\n\t}\n\treturn fmt.Sprintf(\"%d %s ago\", int64(seconds), plural(int64(seconds), \"second\"))\n}", "func (pomo *Pomo) GetDuration() string {\n\n\t// if pomo is off do not output anything\n\tif pomo.Status == OFF {\n\t\treturn \"\"\n\t}\n\n\t// if pomo run out of time that was set\n\t// make a blinking animation and send ntification\n\tif pomo.Time < 0 {\n\n\t\t// if user not notified\n\t\tif !pomo.Notified {\n\n\t\t\t// notify the user\n\t\t\tgo notifyUser(NOTIFICATION_MESSAGE)\n\n\t\t\tpomo.Notified = true\n\t\t}\n\n\t\t// emoji_id is a number between 0 and 1\n\t\temoji_id := (pomo.Time.Milliseconds() / 1000 % 2) * (-1)\n\n\t\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Blink[emoji_id], pomo.Time)\n\t}\n\n\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Emoji, pomo.Time)\n}", "func (d *DurationValue) String() string {\n\treturn (*time.Duration)(d).String()\n}", "func formatDuration(d time.Duration) string {\n\t//calculate values\n\tmilliseconds := int64(d/time.Millisecond) % 1000\n\tseconds := int64(d.Seconds()) % 60\n\tminutes := int64(d.Minutes()) % 60\n\thours := int64(d.Hours()) % 24\n\tvar str string = \"\"\n\t//only add values if they are not 0\n\tif hours != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, hours, \" h \")\n\t}\n\tif minutes != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, minutes, \" min \")\n\t}\n\tif seconds != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, seconds, \" sec \")\n\t}\n\tif milliseconds != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, milliseconds, \" ms \")\n\t}\n\t//var str string = fmt.Sprintf(\"%d%s%d%s%d%s%d%s\", hours, \" h \", minutes, \" min \", seconds, \" sec \", milliseconds, \" ms\")\n\treturn str\n}", "func (f *Formatter) Long() string {\n\tdays, hours, mins, secs := resolve(f.duration)\n\treturn fmt.Sprintf(\"%d days %d hours %d minutes %d seconds\\n\", days, hours, mins, secs)\n}", "func (d *Duration) String() string {\n\treturn d.valueString\n}", "func (d Duration) StringUsingUnits(unit units.Unit) string {\n\treturn d.convert(units.Second, unit).toString()\n}", "func (tu TimeUnit) String() string {\n\tswitch tu {\n\tcase Nanoseconds:\n\t\treturn \"ns\"\n\tcase Microseconds:\n\t\treturn \"µs\"\n\tcase Milliseconds:\n\t\treturn \"ms\"\n\tcase Seconds:\n\t\treturn \"s\"\n\tdefault:\n\t\treturn \"*\" + time.Duration(tu).String()\n\t}\n}", "func DurationInWords(d time.Duration) string {\n\n\tif d >= time.Second && d <= (time.Second*4) {\n\t\treturn fmt.Sprintf(lssthnd, 5, \"seconds\")\n\t} else if d >= (time.Second*5) && d < (time.Second*10) {\n\t\treturn fmt.Sprintf(lssthnd, 10, \"seconds\")\n\t} else if d >= (time.Second*10) && d < (time.Second*20) {\n\t\treturn fmt.Sprintf(lssthnd, 20, \"seconds\")\n\t} else if d >= (time.Second*20) && d < (time.Second*40) {\n\t\treturn \"half a minute\"\n\t} else if d >= (time.Second*40) && d < (time.Second*60) {\n\t\treturn fmt.Sprintf(lssthns, \"minute\")\n\t} else if d >= (time.Second*60) && d < time.Minute+(time.Second*30) {\n\t\treturn \"1 minute\"\n\t} else if d >= time.Minute+(time.Second*30) && d < (time.Minute*44)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d minutes\", (d / time.Minute))\n\t} else if d >= (time.Minute*44)+(time.Second*30) && d < (time.Minute*89)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, d/time.Hour, \"hour\")\n\t} else if d >= (time.Minute*89)+(time.Second*30) && d < (time.Hour*29)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, (d / time.Hour), \"hours\")\n\t} else if d >= (time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (time.Hour*41)+(time.Minute*59)+(time.Second*30) {\n\t\treturn \"1 day\"\n\t} else if d >= (time.Hour*41)+(time.Minute*59)+(time.Second*30) && d < (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d days\", d/(time.Hour*24))\n\t} else if d >= (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"month\")\n\t} else if d >= (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (year) {\n\t\treturn fmt.Sprintf(aboutnd, d/month+1, \"months\")\n\t} else if d >= year && d < year+(3*month) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"year\")\n\t} else if d >= year+(3*month) && d < year+(9*month) {\n\t\treturn \"over 1 year\"\n\t} else if d >= year+(9*month) && d < (year*2) {\n\t\treturn \"almost 2 years\"\n\t} else {\n\t\treturn fmt.Sprintf(aboutnd, d/year, \"years\")\n\t}\n}", "func main() {\n\tconst now = 1589570165\n\n\ttimeStamp := time.Unix(now, 0).UTC()\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\tscanner.Scan()\n\ts := scanner.Text()\n\n\telapsedTime := strings.Replace(s, \"мин.\", \"m\", 1)\n\telapsedTime = strings.Replace(elapsedTime, \"сек.\", \"s\", 1)\n\telapsedTime = strings.Replace(elapsedTime, \" \", \"\", -1)\n\n\tdur, err := time.ParseDuration(elapsedTime)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//dur.Round(time.Hour).Hours()\n\tunitedDate := timeStamp.Add(dur)\n\n\tfmt.Println(unitedDate.Format(time.UnixDate))\n\n}", "func formatDuration(d time.Duration) string {\n\tif d >= time.Hour {\n\t\treturn fmt.Sprintf(\"%6.02fh \", d.Seconds()/3600)\n\t} else if d >= time.Minute {\n\t\treturn fmt.Sprintf(\"%6.02fm \", d.Seconds()/60)\n\t} else if d >= time.Second {\n\t\treturn fmt.Sprintf(\"%6.02fs \", d.Seconds())\n\t} else if d >= time.Millisecond {\n\t\treturn fmt.Sprintf(\"%6.02fms\", d.Seconds()/time.Millisecond.Seconds())\n\t}\n\n\treturn fmt.Sprintf(\"%6.02fµs\", d.Seconds()/time.Microsecond.Seconds())\n}", "func (india indianTimeZones) Mauritius() string {return \"Indian/Mauritius\" }", "func formatDuration(d time.Duration) string {\n\tif d > time.Second {\n\t\td = d - d%(100*time.Millisecond)\n\t}\n\treturn d.String()\n}", "func (d Duration) String() string {\n\tif d.IsUnknown() {\n\t\treturn attr.UnknownValueString\n\t}\n\n\tif d.IsNull() {\n\t\treturn attr.NullValueString\n\t}\n\n\treturn d.value.String()\n}", "func (me TdurationType) String() string { return xsdt.Nmtoken(me).String() }", "func youtubeTimify(seconds int) string {\n\tjustSeconds := seconds % 60\n\tjustMinutes := (seconds - justSeconds) / 60\n\n\tsecondsString := \"\"\n\tminutesString := \"\"\n\n\tif justSeconds < 10 {\n\t\tsecondsString = fmt.Sprintf(\"0%d\", justSeconds)\n\t} else {\n\t\tsecondsString = fmt.Sprintf(\"%d\", justSeconds)\n\t}\n\n\tif justMinutes < 10 {\n\t\tminutesString = fmt.Sprintf(\"0%d\", justMinutes)\n\t} else {\n\t\tminutesString = fmt.Sprintf(\"%d\", justMinutes)\n\t}\n\n\treturn fmt.Sprintf(\"%s:%s\", minutesString, secondsString)\n}", "func (me TimePeriod) String() string { return xsdt.String(me).String() }", "func durationTo8601Seconds(duration time.Duration) string {\n\treturn fmt.Sprintf(\"PT%dS\", duration/time.Second)\n}", "func durString(d time.Duration) string {\n\tswitch d {\n\tcase ndp.Infinity:\n\t\treturn \"infinite\"\n\tdefault:\n\t\treturn d.String()\n\t}\n}", "func (o QuotaLimitResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v QuotaLimitResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func HumanizeDuration(duration *time.Duration) string {\n\tif duration.Seconds() < 60.0 {\n\t\treturn fmt.Sprintf(\"%d seconds\", int64(duration.Seconds()))\n\t}\n\tif duration.Minutes() < 60.0 {\n\t\t//remainingSeconds := math.Mod(duration.Seconds(), 60)\n\t\treturn fmt.Sprintf(\"%d min\", int64(duration.Minutes()))\n\t}\n\tif duration.Hours() < 24.0 {\n\t\tremainingMinutes := math.Mod(duration.Minutes(), 60)\n\t\t// remainingSeconds := math.Mod(duration.Seconds(), 60)\n\t\treturn fmt.Sprintf(\"%d hours %d min\",\n\t\t\tint64(duration.Hours()), int64(remainingMinutes))\n\t}\n\tremainingHours := math.Mod(duration.Hours(), 24)\n\tremainingMinutes := math.Mod(duration.Minutes(), 60)\n\t// remainingSeconds := math.Mod(duration.Seconds(), 60)\n\treturn fmt.Sprintf(\"%d days %d hours %d minutes\",\n\t\tint64(duration.Hours()/24), int64(remainingHours),\n\t\tint64(remainingMinutes))\n}", "func (dur ISODuration) String() string {\n\tif dur == zeroDur {\n\t\treturn \"P0D\"\n\t}\n\n\tvar b strings.Builder\n\tb.WriteRune('P')\n\n\tif dur.Years > 0 {\n\t\tfmt.Fprintf(&b, \"%dY\", dur.Years)\n\t}\n\tif dur.Months > 0 {\n\t\tfmt.Fprintf(&b, \"%dM\", dur.Months)\n\t}\n\tif dur.Days/7 > 0 {\n\t\tfmt.Fprintf(&b, \"%dW\", dur.Days/7)\n\t\tdur.Days %= 7\n\t}\n\tif dur.Days > 0 {\n\t\tfmt.Fprintf(&b, \"%dD\", dur.Days)\n\t}\n\n\tif dur.TimePart == 0 {\n\t\treturn b.String()\n\t}\n\n\tb.WriteRune('T')\n\n\tif dur.TimePart/time.Hour > 0 {\n\t\tfmt.Fprintf(&b, \"%dH\", dur.TimePart/time.Hour)\n\t\tdur.TimePart %= time.Hour\n\t}\n\n\tif dur.TimePart/time.Minute > 0 {\n\t\tfmt.Fprintf(&b, \"%dM\", dur.TimePart/time.Minute)\n\t\tdur.TimePart %= time.Minute\n\t}\n\n\tif dur.TimePart.Seconds() > 0 {\n\t\tsec := dur.TimePart.Seconds()\n\t\t// round to microseconds\n\t\tsec = math.Round(sec*1e6) / 1e6\n\t\tfmt.Fprintf(&b, \"%gS\", sec)\n\t}\n\n\treturn b.String()\n}", "func (i ISODuration) String() string {\r\n\treturn i.duration.String()\r\n}", "func FormatDuration(d time.Duration) string {\n\ts := d.String()\n\tif strings.HasSuffix(s, \"µs\") {\n\t\t// for µs we don't want fractions\n\t\tparts := strings.Split(s, \".\")\n\t\tif len(parts) > 1 {\n\t\t\treturn parts[0] + \" µs\"\n\t\t}\n\t\treturn strings.ReplaceAll(s, \"µs\", \" µs\")\n\t} else if strings.HasSuffix(s, \"ms\") {\n\t\t// for ms we only want 2 digit fractions\n\t\tparts := strings.Split(s, \".\")\n\t\t//fmt.Printf(\"fmtDur: '%s' => %#v\\n\", s, parts)\n\t\tif len(parts) > 1 {\n\t\t\ts2 := parts[1]\n\t\t\tif len(s2) > 4 {\n\t\t\t\t// 2 for \"ms\" and 2+ for fraction\n\t\t\t\tres := parts[0] + \".\" + s2[:2] + \" ms\"\n\t\t\t\t//fmt.Printf(\"fmtDur: s2: '%s', res: '%s'\\n\", s2, res)\n\t\t\t\treturn res\n\t\t\t}\n\t\t}\n\t\treturn strings.ReplaceAll(s, \"ms\", \" ms\")\n\t}\n\treturn s\n}", "func (f *Formatter) Seconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d seconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Seconds()))\n}", "func (o TopicRuleTimestreamTimestampOutput) Unit() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleTimestreamTimestamp) string { return v.Unit }).(pulumi.StringOutput)\n}", "func getDurationStringFromSeconds(seconds int) string {\n\treturn (time.Duration(seconds) * time.Second).String()\n}", "func DurationToString(dt time.Duration) string {\n\th := dt / time.Hour\n\thm := dt % time.Hour\n\td := h / 24\n\tdh := dt % (24 * time.Hour)\n\n\t// If matches whole day\n\tif d > 0 && dh == 0 {\n\t\treturn fmt.Sprintf(\"%dd\", d)\n\t}\n\n\t// If matches whole hour\n\tif h > 0 && hm == 0 {\n\t\treturn fmt.Sprintf(\"%dh\", h)\n\t}\n\n\t// Otherwise make into minutes\n\tm := dt / time.Minute\n\tif m > 0 {\n\t\treturn fmt.Sprintf(\"%dm\", m)\n\t}\n\n\treturn \"1m\"\n}", "func ShortDuration(d time.Duration) string {\n\tvar s string\n\tswitch {\n\tcase d < time.Microsecond:\n\t\treturn d.String()\n\tcase d < time.Millisecond:\n\t\treturn d.Round(time.Microsecond / 10).String()\n\tcase d < time.Second:\n\t\treturn d.Round(time.Millisecond / 10).String()\n\tcase d < time.Minute:\n\t\treturn d.Round(time.Second / 10).String()\n\tcase d < time.Hour:\n\t\ts = d.Round(time.Second).String()\n\tcase d < time.Hour*24:\n\t\ts = d.Round(time.Second).String()\n\tdefault:\n\t\tdays := float64(d) / float64(time.Hour*24)\n\t\td %= (time.Hour * 24)\n\t\ts = fmt.Sprintf(\"%dd%s\", int(days), d.Round(time.Minute).String())\n\t}\n\tif len(s) > 2 {\n\t\ts = strings.TrimSuffix(s, \"0s\")\n\t}\n\tif len(s) > 2 {\n\t\ts = strings.TrimSuffix(s, \"0m\")\n\t}\n\treturn s\n}", "func (r WebRestrictions) TimeDescription() string {\n\tintClass := r.getTimeClass()\n\tswitch intClass {\n\tcase 0:\n\t\treturn \"This token has an infinite lifetime!\"\n\tcase 1:\n\t\treturn \"This token is long-lived.\"\n\tcase 2:\n\t\treturn \"This token will expire within 7days.\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (dv *DurationValue) String() string {\n\treturn dv.get().String()\n}", "func formatDuration(d time.Duration) string {\n\ts := \"\"\n\tif d < 0 {\n\t\td = -d\n\t\ts = \"-\"\n\t}\n\tif d < day {\n\t\t// handle several special cases\n\t\treturn s + d.String()\n\t}\n\t// Now we know days are the most significant unit,\n\t// so all other units should be present\n\tr := d\n\tdays := int64(r / day)\n\tr %= day // remainder\n\ts += fmt.Sprint(days) + \"d\"\n\thours := int64(r / time.Hour)\n\tr %= time.Hour\n\ts += fmt.Sprint(hours) + \"h\"\n\tminutes := int64(r / time.Minute)\n\tr %= time.Minute\n\ts += fmt.Sprint(minutes) + \"m\"\n\tseconds := int64(r / time.Second)\n\tr %= time.Second\n\ts += fmt.Sprint(seconds) // no suffix yet\n\tif r != 0 {\n\t\t// Follow normal Duration formatting, but need to avoid\n\t\t// the special handling of fractional seconds.\n\t\tr += time.Second\n\t\tfrac := r.String()\n\t\t// append skipping the leading \"1\"\n\t\treturn s + frac[1:] // adds the suffix\n\t}\n\treturn s + \"s\" // now the suffix\n}", "func (d NameableDuration) String() string {\n\tif len(d.Name) > 0 {\n\t\treturn d.Name\n\t}\n\treturn d.Duration.String()\n}", "func sheetDurationFormat(d time.Duration) string {\n\tseconds := int64(d.Seconds()) % 60\n\tminutes := int64(d.Minutes()) % 60\n\thours := int64(d.Hours())\n\treturn fmt.Sprintf(\"%02d:%02d:%02d.000\", hours, minutes, seconds)\n}", "func prettyDuration(t int64) string {\n\tif t > 1000000000 {\n\t\treturn fmt.Sprintf(\"%.2fs\", float64(t)/float64(1000000000))\n\t}\n\treturn fmt.Sprintf(\"%.2fms\", float64(t)/float64(1000000))\n}", "func (f *Formatter) Short() string {\n\tdays, hours, mins, secs := resolve(f.duration)\n\treturn fmt.Sprintf(\"%dd%dh%dm%ds\\n\", days, hours, mins, secs)\n}", "func (t *Timer) String() string {\n\treturn fmt.Sprintf(\"%.2fs\", t.Elapsed().Seconds())\n}", "func fmtDuration(dur time.Duration) string {\n\tremainingDur := dur.Round(time.Second)\n\thours := remainingDur / time.Hour\n\tremainingDur -= hours * time.Hour\n\tminutes := remainingDur / time.Minute\n\tremainingDur -= minutes * time.Minute\n\tseconds := remainingDur / time.Second\n\treturn fmt.Sprintf(\"%02d:%02d:%02d\", hours, minutes, seconds)\n}", "func EncodeDuration(d time.Duration) string {\n\treturn d.String()\n}", "func (rs *Restake) Duration() uint32 { return rs.duration }", "func (o TopicRuleErrorActionTimestreamTimestampOutput) Unit() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleErrorActionTimestreamTimestamp) string { return v.Unit }).(pulumi.StringOutput)\n}", "func fmtDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindow) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (pacif pacificTimeZones) Majuro() string {return \"Pacific/Majuro\" }", "func (europ europeDeprecatedTimeZones) Ljubljana() string { return \"Europe/Belgrade\" }", "func humanizeDuration(d time.Duration) string {\n\tif d%(24*time.Hour) == 0 {\n\t\treturn fmt.Sprintf(\"%dd\", d/(24*time.Hour))\n\t}\n\ts := d.String()\n\tif strings.HasSuffix(s, \"m0s\") {\n\t\ts = s[:len(s)-2]\n\t}\n\tif strings.HasSuffix(s, \"h0m\") {\n\t\ts = s[:len(s)-2]\n\t}\n\treturn s\n}", "func (europ europeTimeZones) Ljubljana() string {return \"Europe/Ljubljana\" }", "func (t TimeCode) String() string {\n\n\tswitch t.FramesPerSecond {\n\tcase 29:\n\t\treturn fmt.Sprintf(\"SMPTE30DropFrame %v subframes\", t.SubFrames)\n\tdefault:\n\t\treturn fmt.Sprintf(\"SMPTE%v %v subframes\", t.FramesPerSecond, t.SubFrames)\n\t}\n\n}", "func DurClock(d time.Duration) string {\n\th := int(d.Hours())\n\tm := int(d.Minutes())\n\ts := int(d.Seconds())\n\tn := int(d.Nanoseconds()) - 1000000000*s\n\ts = s - 60*m\n\tm = m - 60*h\n\tswitch {\n\tcase h > 0:\n\t\treturn fmt.Sprintf(\"%d:%02d:%02d.%09d\", h, m, s, n)\n\tcase m > 0:\n\t\treturn fmt.Sprintf(\"%d:%02d.%09d\", m, s, n)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d.%09d\", s, n)\n\t}\n}", "func FormatDuration(duration time.Duration) string {\n\tnow := time.Now()\n\treturn formatDurationFrom(now, now.Add(duration), relative)\n}", "func (europ europeDeprecatedTimeZones) Vaduz() string { return \"Europe/Zurich\" }", "func (india indianTimeZones) Maldives() string {return \"Indian/Maldives\" }", "func (europ europeTimeZones) Vaduz() string {return \"Europe/Vaduz\" }", "func durToMsec(dur time.Duration) string {\n\treturn fmt.Sprintf(\"%dms\", dur/time.Millisecond)\n}", "func (asiaD asiaDeprecatedTimeZones) Tel_Aviv() string { return \"Asia/Jerusalem\" }", "func DurationCompute(start, end uint8) string {\n\treturn fmt.Sprintf(\"0天%s时\", strconv.Itoa(int(end-start)))\n}", "func (s UtteranceAggregationDuration) String() string {\n\treturn awsutil.Prettify(s)\n}", "func PrettyDuration(duration time.Duration) string {\n\tseconds := duration.Seconds()\n\n\tremainingSeconds := int64(seconds) % 60\n\tremainingMinutes := (int64(seconds) - remainingSeconds) / 60\n\n\tif remainingMinutes > 0 {\n\t\treturn fmt.Sprintf(\"%d min %d sec\", remainingMinutes, remainingSeconds)\n\t}\n\n\treturn fmt.Sprintf(\"%d sec\", remainingSeconds)\n}", "func (t Time) String() string {\n\treturn string(t.AppendTo(make([]byte, 0, 8), ':'))\n}", "func (antar antarcticaTimeZones) DumontDUrville() string {return \"Antarctica/DumontDUrville\" }", "func Duration(key string, val time.Duration) zap.Field {\n\t// Don't use the duration field as its encoder translates to seconds only: https://github.com/uber-go/zap/issues/649\n\treturn zap.String(key, val.String())\n}", "func TTLString(ttl time.Duration) string {\n\tstr := ttl.String()\n\tstr = strings.TrimSuffix(str, \"0s\")\n\tstr = strings.TrimSuffix(str, \"0m\")\n\tstr = strings.TrimSuffix(str, \"0h\")\n\treturn str\n}", "func (o QuotaLimitOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v QuotaLimit) *string { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func ResetDuration(s string) string {\n\ts = dddPattern.ReplaceAllString(s, \"0.00s\")\n\ts = ddddPattern.ReplaceAllString(s, \"0.000s\")\n\treturn elapsedTimePattern.ReplaceAllString(s, \"elapsed time: 0.000000 sec\")\n}", "func SerializeDuration(this time.Duration) (interface{}, error) {\n\t// Seriously questioning my life choices.\n\ts := \"P\"\n\tif this < 0 {\n\t\ts = \"-P\"\n\t\tthis = -1 * this\n\t}\n\tvar tally time.Duration\n\t// Assume 8760 Hours per 365 days, cannot account for leap years in xsd:duration. :(\n\tif years := this.Hours() / 8760.0; years >= 1 {\n\t\tnYears := int64(math.Floor(years))\n\t\ttally += time.Duration(nYears) * 8760 * time.Hour\n\t\ts = fmt.Sprintf(\"%s%dY\", s, nYears)\n\t}\n\t// Assume 30 days per month, cannot account for months lasting 31, 30, 29, or 28 days in xsd:duration. :(\n\tif months := (this.Hours() - tally.Hours()) / 720.0; months >= 1 {\n\t\tnMonths := int64(math.Floor(months))\n\t\ttally += time.Duration(nMonths) * 720 * time.Hour\n\t\ts = fmt.Sprintf(\"%s%dM\", s, nMonths)\n\t}\n\tif days := (this.Hours() - tally.Hours()) / 24.0; days >= 1 {\n\t\tnDays := int64(math.Floor(days))\n\t\ttally += time.Duration(nDays) * 24 * time.Hour\n\t\ts = fmt.Sprintf(\"%s%dD\", s, nDays)\n\t}\n\tif tally < this {\n\t\ts = fmt.Sprintf(\"%sT\", s)\n\t\tif hours := this.Hours() - tally.Hours(); hours >= 1 {\n\t\t\tnHours := int64(math.Floor(hours))\n\t\t\ttally += time.Duration(nHours) * time.Hour\n\t\t\ts = fmt.Sprintf(\"%s%dH\", s, nHours)\n\t\t}\n\t\tif minutes := this.Minutes() - tally.Minutes(); minutes >= 1 {\n\t\t\tnMinutes := int64(math.Floor(minutes))\n\t\t\ttally += time.Duration(nMinutes) * time.Minute\n\t\t\ts = fmt.Sprintf(\"%s%dM\", s, nMinutes)\n\t\t}\n\t\tif seconds := this.Seconds() - tally.Seconds(); seconds >= 1 {\n\t\t\tnSeconds := int64(math.Floor(seconds))\n\t\t\ttally += time.Duration(nSeconds) * time.Second\n\t\t\ts = fmt.Sprintf(\"%s%dS\", s, nSeconds)\n\t\t}\n\t}\n\treturn s, nil\n}", "func (i Interval) String() string {\n\treturn i.Duration().String()\n}", "func (d duration) pretty() string {\n\treturn fmt.Sprintf(\"Duration: %d\", &d) // modify *duration and *d => &d\n}", "func (europ europeTimeZones) Uzhgorod() string {return \"Europe/Uzhgorod\" }", "func sigarUptimeFormatString(u sigar.Uptime) string {\n\tuptime := uint64(u.Length)\n\tdays := uptime / (60 * 60 * 24)\n\n\ts := \"\"\n\tif days != 0 {\n\t\tend := \"\"\n\t\tif days > 1 {\n\t\t\tend = \"s\"\n\t\t}\n\t\ts = fmt.Sprintf(\"%d day%s, \", days, end)\n\t}\n\n\tminutes := uptime / 60\n\thours := minutes / 60\n\thours %= 24\n\tminutes %= 60\n\n\ts += fmt.Sprintf(\"%2d:%02d\", hours, minutes)\n\treturn s\n}", "func (depre deprecatedTimeZones) Portugal() string { return \"Europe/Lisbon\" }" ]
[ "0.6247409", "0.61673176", "0.58863103", "0.58602095", "0.58570576", "0.5843788", "0.57781535", "0.57749504", "0.5758013", "0.5729817", "0.57266206", "0.56786615", "0.5660766", "0.5638222", "0.5626445", "0.56179184", "0.5606179", "0.5606179", "0.5599463", "0.5569892", "0.55640453", "0.5545954", "0.55390596", "0.55295146", "0.5515231", "0.54898804", "0.54733604", "0.54682493", "0.54663175", "0.5458455", "0.5456727", "0.5436488", "0.53917474", "0.53804", "0.53803456", "0.537419", "0.5367474", "0.5359971", "0.53497475", "0.53417945", "0.5310136", "0.5307308", "0.52668095", "0.52640045", "0.5248128", "0.522205", "0.5221641", "0.5207936", "0.5193512", "0.5176288", "0.5172156", "0.5168414", "0.5155577", "0.51531744", "0.5123564", "0.51206374", "0.51027644", "0.5096362", "0.5093389", "0.5089337", "0.50892574", "0.5081852", "0.5078592", "0.506966", "0.5066787", "0.50611323", "0.50591683", "0.505366", "0.5052106", "0.50488216", "0.5035967", "0.50338244", "0.50334936", "0.50326717", "0.5027183", "0.50184304", "0.501824", "0.4998587", "0.4997873", "0.49963462", "0.4980385", "0.4979485", "0.49717644", "0.49695233", "0.49612328", "0.49587935", "0.49574086", "0.49535823", "0.4953042", "0.49459925", "0.49384418", "0.49374208", "0.49307323", "0.49233577", "0.49113134", "0.49036768", "0.49007234", "0.48994386", "0.48861915", "0.48667613" ]
0.5642375
13
Returns a string representation of the approximate duration in russian language
func (d Duration) StringApproximate() (result string) { var seconds, minutes, hours, days, months, years int seconds = int(d.Seconds()) if seconds > 60 { minutes = int(d.Minutes()) } if minutes > 59 { hours = int(d.Hours()) minutes = minutes - hours*60 } if hours > 24 { days = (hours - hours%24) / 24 hours = hours - days*24 } if days > 365 { years = (days - days%365) / 365 days = days - years*365 } if days > 30 { months = (days - days%30) / 30 days = days - months*30 } if years > 0 { if months < 3 { result = numberInString(years, false) + " " + yearsTail(years) } else { result = "Более" if years > 1 { result = " " + strings.ToLower(numberStringInGenitiveCase(years, false)) } result += " " + strings.ToLower(numberStringInGenitiveCase(years, false)) + " " + strings.ToLower(yearsTailInGenitiveCase(years)) } } else if months > 0 { if days < 8 { result = numberInString(months, false) + " " + monthsTail(months) } else { result = "Более" if months > 1 { result = " " + strings.ToLower(numberStringInGenitiveCase(months, false)) } result += " " + strings.ToLower(numberStringInGenitiveCase(months, false)) + " " + strings.ToLower(monthsTailInGenitiveCase(months)) } } else if days > 0 { if hours < 5 { result = numberInString(days, false) + " " + daysTail(days) } else { result = "Более " if days == 1 { result += "суток" } else { result += strings.ToLower(numberStringInGenitiveCase(days, false)) + " суток" } } } else if hours > 0 { if minutes < 16 { result = numberInString(hours, false) + " " + hoursTail(hours) } else { result = "Более " if hours == 1 { result += "часа" } else { result += strings.ToLower(numberStringInGenitiveCase(hours, false)) result += " " + strings.ToLower(hoursTailInGenitiveCase(hours)) } } } else if minutes > 0 { if minutes == 1 { result = "Минуту" } else { result = numberInString(minutes, true) + " " + minutesTail(minutes) } } else { result = "Менее минуты" } result += " назад" return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func toElapsedLabel(rfc850time string) string {\n\tcreated, err := time.Parse(time.RFC850, rfc850time)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\telapsed := time.Now().UTC().Sub(created.UTC())\n\tseconds := elapsed.Seconds()\n\tminutes := elapsed.Minutes()\n\thours := elapsed.Hours()\n\tdays := hours / 24\n\tweeks := days / 7\n\tmonths := weeks / 4\n\tyears := months / 12\n\n\tif math.Trunc(years) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(years), plural(int64(years), \"year\"))\n\t} else if math.Trunc(months) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(months), plural(int64(months), \"month\"))\n\t} else if math.Trunc(weeks) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(weeks), plural(int64(weeks), \"week\"))\n\t} else if math.Trunc(days) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(days), plural(int64(days), \"day\"))\n\t} else if math.Trunc(hours) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(hours), plural(int64(hours), \"hour\"))\n\t} else if math.Trunc(minutes) > 0 {\n\t\treturn fmt.Sprintf(\"%d %s ago\", int64(minutes), plural(int64(minutes), \"minute\"))\n\t}\n\treturn fmt.Sprintf(\"%d %s ago\", int64(seconds), plural(int64(seconds), \"second\"))\n}", "func (d Duration) String() string {}", "func (d *DataGenerator) getDuration() string {\n\t// ISO 8601 format\n\t// P3Y6M4DT12H30M5S = three years, six months, four days, twelve hours, thirty minutes, and five seconds\n\thr := rand.Int31n(12)\n\tmin := rand.Int31n(60)\n\tsec := rand.Int31n(60)\n\treturn fmt.Sprintf(\"P0Y0M0DT%dH%dM%dS\", hr, min, sec)\n}", "func main() {\n\tconst now = 1589570165\n\n\ttimeStamp := time.Unix(now, 0).UTC()\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\tscanner.Scan()\n\ts := scanner.Text()\n\n\telapsedTime := strings.Replace(s, \"мин.\", \"m\", 1)\n\telapsedTime = strings.Replace(elapsedTime, \"сек.\", \"s\", 1)\n\telapsedTime = strings.Replace(elapsedTime, \" \", \"\", -1)\n\n\tdur, err := time.ParseDuration(elapsedTime)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//dur.Round(time.Hour).Hours()\n\tunitedDate := timeStamp.Add(dur)\n\n\tfmt.Println(unitedDate.Format(time.UnixDate))\n\n}", "func (india indianTimeZones) Mauritius() string {return \"Indian/Mauritius\" }", "func durationToWord(in Interval) string {\n\tswitch in {\n\tcase FifteenSecond:\n\t\treturn \"fifteensecond\"\n\tcase OneMin:\n\t\treturn \"onemin\"\n\tcase ThreeMin:\n\t\treturn \"threemin\"\n\tcase FiveMin:\n\t\treturn \"fivemin\"\n\tcase TenMin:\n\t\treturn \"tenmin\"\n\tcase FifteenMin:\n\t\treturn \"fifteenmin\"\n\tcase ThirtyMin:\n\t\treturn \"thirtymin\"\n\tcase OneHour:\n\t\treturn \"onehour\"\n\tcase TwoHour:\n\t\treturn \"twohour\"\n\tcase FourHour:\n\t\treturn \"fourhour\"\n\tcase SixHour:\n\t\treturn \"sixhour\"\n\tcase EightHour:\n\t\treturn \"eighthour\"\n\tcase TwelveHour:\n\t\treturn \"twelvehour\"\n\tcase OneDay:\n\t\treturn \"oneday\"\n\tcase ThreeDay:\n\t\treturn \"threeday\"\n\tcase FifteenDay:\n\t\treturn \"fifteenday\"\n\tcase OneWeek:\n\t\treturn \"oneweek\"\n\tcase TwoWeek:\n\t\treturn \"twoweek\"\n\tcase OneMonth:\n\t\treturn \"onemonth\"\n\tcase OneYear:\n\t\treturn \"oneyear\"\n\tdefault:\n\t\treturn \"notfound\"\n\t}\n}", "func DescDuration(d time.Duration) string {\n\tif d < time.Minute {\n\t\treturn fmt.Sprintf(\"%0.1f sec ago\", d.Seconds())\n\t} else if d < time.Hour {\n\t\treturn fmt.Sprintf(\"%0.1f min ago\", d.Minutes())\n\t} else if d < time.Hour*24 {\n\t\treturn fmt.Sprintf(\"%0.1f hrs ago\", d.Hours())\n\t} else {\n\t\treturn fmt.Sprintf(\"%0.1f days ago\", d.Hours()/24.0)\n\t}\n}", "func DurationInWords(d time.Duration) string {\n\n\tif d >= time.Second && d <= (time.Second*4) {\n\t\treturn fmt.Sprintf(lssthnd, 5, \"seconds\")\n\t} else if d >= (time.Second*5) && d < (time.Second*10) {\n\t\treturn fmt.Sprintf(lssthnd, 10, \"seconds\")\n\t} else if d >= (time.Second*10) && d < (time.Second*20) {\n\t\treturn fmt.Sprintf(lssthnd, 20, \"seconds\")\n\t} else if d >= (time.Second*20) && d < (time.Second*40) {\n\t\treturn \"half a minute\"\n\t} else if d >= (time.Second*40) && d < (time.Second*60) {\n\t\treturn fmt.Sprintf(lssthns, \"minute\")\n\t} else if d >= (time.Second*60) && d < time.Minute+(time.Second*30) {\n\t\treturn \"1 minute\"\n\t} else if d >= time.Minute+(time.Second*30) && d < (time.Minute*44)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d minutes\", (d / time.Minute))\n\t} else if d >= (time.Minute*44)+(time.Second*30) && d < (time.Minute*89)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, d/time.Hour, \"hour\")\n\t} else if d >= (time.Minute*89)+(time.Second*30) && d < (time.Hour*29)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, (d / time.Hour), \"hours\")\n\t} else if d >= (time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (time.Hour*41)+(time.Minute*59)+(time.Second*30) {\n\t\treturn \"1 day\"\n\t} else if d >= (time.Hour*41)+(time.Minute*59)+(time.Second*30) && d < (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d days\", d/(time.Hour*24))\n\t} else if d >= (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"month\")\n\t} else if d >= (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (year) {\n\t\treturn fmt.Sprintf(aboutnd, d/month+1, \"months\")\n\t} else if d >= year && d < year+(3*month) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"year\")\n\t} else if d >= year+(3*month) && d < year+(9*month) {\n\t\treturn \"over 1 year\"\n\t} else if d >= year+(9*month) && d < (year*2) {\n\t\treturn \"almost 2 years\"\n\t} else {\n\t\treturn fmt.Sprintf(aboutnd, d/year, \"years\")\n\t}\n}", "func (s TtlDuration) String() string {\n\treturn awsutil.Prettify(s)\n}", "func RenderDuration(d time.Duration) string {\n\tif d == math.MaxInt64 {\n\t\treturn \"never\"\n\t}\n\n\tif d == 0 {\n\t\treturn \"forever\"\n\t}\n\n\ttsecs := d / time.Second\n\ttmins := tsecs / 60\n\tthrs := tmins / 60\n\ttdays := thrs / 24\n\ttyrs := tdays / 365\n\n\tif tyrs > 0 {\n\t\treturn fmt.Sprintf(\"%dy%dd%dh%dm%ds\", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60)\n\t}\n\n\tif tdays > 0 {\n\t\treturn fmt.Sprintf(\"%dd%dh%dm%ds\", tdays, thrs%24, tmins%60, tsecs%60)\n\t}\n\n\tif thrs > 0 {\n\t\treturn fmt.Sprintf(\"%dh%dm%ds\", thrs, tmins%60, tsecs%60)\n\t}\n\n\tif tmins > 0 {\n\t\treturn fmt.Sprintf(\"%dm%ds\", tmins, tsecs%60)\n\t}\n\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}", "func (hms HHMMSS) String() string {\n\treturn time.Duration(hms).String()\n}", "func (d Duration) String() string {\n\tvalue := int64(d)\n\tout := \"\"\n\tif value < 0 {\n\t\tout = \"-\"\n\t\tvalue = -value\n\t}\n\tdivmod := func(divisor, dividend int64) (int64, int64) {\n\t\treturn divisor / dividend, divisor % dividend\n\t}\n\textract := func(symbol string, unit int64) {\n\t\tvar units int64\n\t\tunits, value = divmod(value, unit)\n\t\tif units > 0 {\n\t\t\tout += fmt.Sprintf(\"%d%s\", units, symbol)\n\t\t}\n\t}\n\textract(\"y\", Year)\n\textract(\"m\", Month)\n\textract(\"d\", Day)\n\tif value > 0 {\n\t\tout += \"t\"\n\t}\n\textract(\"h\", Hour)\n\textract(\"m\", Minute)\n\textract(\"s\", Second)\n\textract(\"us\", Microsecond)\n\n\tif out == \"\" {\n\t\t// input duration was 0\n\t\tout = \"t0s\" // seconds are the fundamental unit\n\t}\n\n\treturn out\n}", "func (b *Build) HumanDuration() string {\n\td := time.Duration(b.Duration)\n\tif seconds := int(d.Seconds()); seconds < 1 {\n\t\treturn \"Less than a second\"\n\t} else if seconds < 60 {\n\t\treturn fmt.Sprintf(\"%d seconds\", seconds)\n\t} else if minutes := int(d.Minutes()); minutes == 1 {\n\t\treturn \"About a minute\"\n\t} else if minutes < 60 {\n\t\treturn fmt.Sprintf(\"%d minutes\", minutes)\n\t} else if hours := int(d.Hours()); hours == 1 {\n\t\treturn \"About an hour\"\n\t} else if hours < 48 {\n\t\treturn fmt.Sprintf(\"%d hours\", hours)\n\t} else if hours < 24*7*2 {\n\t\treturn fmt.Sprintf(\"%d days\", hours/24)\n\t} else if hours < 24*30*3 {\n\t\treturn fmt.Sprintf(\"%d weeks\", hours/24/7)\n\t} else if hours < 24*365*2 {\n\t\treturn fmt.Sprintf(\"%d months\", hours/24/30)\n\t}\n\treturn fmt.Sprintf(\"%f years\", d.Hours()/24/365)\n}", "func (pomo *Pomo) GetDuration() string {\n\n\t// if pomo is off do not output anything\n\tif pomo.Status == OFF {\n\t\treturn \"\"\n\t}\n\n\t// if pomo run out of time that was set\n\t// make a blinking animation and send ntification\n\tif pomo.Time < 0 {\n\n\t\t// if user not notified\n\t\tif !pomo.Notified {\n\n\t\t\t// notify the user\n\t\t\tgo notifyUser(NOTIFICATION_MESSAGE)\n\n\t\t\tpomo.Notified = true\n\t\t}\n\n\t\t// emoji_id is a number between 0 and 1\n\t\temoji_id := (pomo.Time.Milliseconds() / 1000 % 2) * (-1)\n\n\t\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Blink[emoji_id], pomo.Time)\n\t}\n\n\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Emoji, pomo.Time)\n}", "func (channelInfo ChannelInfo) GetStreamDuration() string {\n\n\tif !channelInfo.StreamStatus.Online {\n\t\treturn \"\"\n\t}\n\tminutePrefix := \"минут\"\n\thourPrefix := \"часов\"\n\tduration := time.Now().Sub(channelInfo.StreamStatus.Start)\n\tminutes := float64(int(duration.Minutes() - math.Floor(duration.Minutes()/60)*60))\n\thours := float64(int(duration.Hours()))\n\tif math.Floor(minutes/10) != 1 {\n\t\tswitch int(minutes - math.Floor(minutes/10)*10) {\n\t\tcase 1:\n\t\t\tminutePrefix = \"минуту\"\n\t\t\tbreak\n\t\tcase 2:\n\t\tcase 3:\n\t\tcase 4:\n\t\t\tminutePrefix = \"минуты\"\n\t\t}\n\t}\n\n\tif int(math.Floor(hours/10)) != 1 {\n\t\tswitch int(hours - math.Floor(hours/10)*10) {\n\t\tcase 1:\n\t\t\thourPrefix = \"час\"\n\t\t\tbreak\n\t\tcase 2:\n\t\tcase 3:\n\t\tcase 4:\n\t\t\thourPrefix = \"часа\"\n\t\t}\n\t}\n\tif int(minutes) == 0 {\n\t\treturn fmt.Sprintf(\"%d %s\", int(hours), hourPrefix)\n\n\t}\n\tif int(hours) == 0 {\n\t\treturn fmt.Sprintf(\"%d %s\", int(minutes), minutePrefix)\n\t}\n\treturn fmt.Sprintf(\"%d %s %d %s\", int(hours), hourPrefix, int(minutes), minutePrefix)\n\n}", "func Duration(d time.Duration) string {\n\treturn d.String()\n}", "func (t Time) StringEN() string {\n\treturn t.In(time.UTC).Format(time.RFC1123Z)\n}", "func DurationCompute(start, end uint8) string {\n\treturn fmt.Sprintf(\"0天%s时\", strconv.Itoa(int(end-start)))\n}", "func (d *Duration) String() string {\n\treturn time.Duration(*d).String()\n}", "func prettyDuration(t int64) string {\n\tif t > 1000000000 {\n\t\treturn fmt.Sprintf(\"%.2fs\", float64(t)/float64(1000000000))\n\t}\n\treturn fmt.Sprintf(\"%.2fms\", float64(t)/float64(1000000))\n}", "func (t *Timer) String() string {\n\treturn fmt.Sprintf(\"%.2fs\", t.Elapsed().Seconds())\n}", "func (tso TimeWebsmsShortOne) String() string { return time.Time(tso).Format(timeWebsmsShortOneFormat) }", "func (d Duration) String() string {\n\tsign := \"\"\n\tif d.Seconds < 0 && d.Nanos > 0 {\n\t\td.Seconds++\n\t\td.Nanos = int(time.Second) - d.Nanos\n\n\t\tif d.Seconds == 0 {\n\t\t\tsign = \"-\"\n\t\t}\n\t}\n\n\ttimePart := \"\"\n\tif d.Nanos == 0 {\n\t\ttimePart = fmt.Sprintf(\"%s%d\", sign, d.Seconds)\n\t} else {\n\t\ttimePart = fmt.Sprintf(\"%s%d.%09d\", sign, d.Seconds, d.Nanos)\n\t}\n\n\treturn fmt.Sprintf(\"P%dM%dDT%sS\", d.Months, d.Days, timePart)\n}", "func (d Duration) String() string {\n\treturn time.Duration(d * 1000).String()\n}", "func getEstimate(length float64) string {\n\t// Convert length to hours\n\thours := math.Floor(length / 3600)\n\t// Convert length to minutes\n\tminutes := (int(length) % 3600) / 60\n\n\t// Convert to strings and add leading zeros\n\tvar strMinutes, strHours string\n\tif hours < 10 {\n\t\tstrHours = \"0\" + strconv.FormatFloat(hours, 'f', -1, 64)\n\t} else {\n\t\tstrHours = strconv.FormatFloat(hours, 'f', -1, 64)\n\t}\n\n\tif minutes < 10 {\n\t\tstrMinutes = \"0\" + strconv.Itoa(minutes)\n\t} else {\n\t\tstrMinutes = strconv.Itoa(minutes)\n\t}\n\n\t// Return string formated estimate\n\treturn strHours + \":\" + strMinutes\n}", "func timeInWords(h int32, m int32) string {\n if m == 0 {\n return numberToString(h) + \" o' clock\"\n }\n\n var next_h int32 = h + 1\n if h == 12 {\n next_h = 1\n }\n\n var min_str = \"minutes\" \n if m == 1 {\n min_str = \"minute\"\n }\n\n if m % 15 == 0 {\n switch m {\n case 15:\n return \"quarter past \" + numberToString(h)\n case 30:\n return \"half past \" + numberToString(h)\n case 45:\n return \"quarter to \" + numberToString(next_h)\n }\n } else if m > 30 {\n return numberToString(60 - m) + \" \" + min_str + \" to \" + numberToString(next_h)\n }\n \n return numberToString(m) + \" \" + min_str + \" past \" + numberToString(h)\n}", "func getElapsed(start time.Time) string {\n\treturn fmt.Sprintf(\"(%.3fs)\", time.Since(start).Seconds())\n}", "func TestTimeUnit_String(t *testing.T) {\n\ttests := []struct {\n\t\tu arrow.TimeUnit\n\t\texp string\n\t}{\n\t\t{arrow.Nanosecond, \"ns\"},\n\t\t{arrow.Microsecond, \"us\"},\n\t\t{arrow.Millisecond, \"ms\"},\n\t\t{arrow.Second, \"s\"},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.exp, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.exp, test.u.String())\n\t\t})\n\t}\n}", "func (tu TimeUnit) String() string {\n\tswitch tu {\n\tcase Nanoseconds:\n\t\treturn \"ns\"\n\tcase Microseconds:\n\t\treturn \"µs\"\n\tcase Milliseconds:\n\t\treturn \"ms\"\n\tcase Seconds:\n\t\treturn \"s\"\n\tdefault:\n\t\treturn \"*\" + time.Duration(tu).String()\n\t}\n}", "func (pacif pacificTimeZones) Majuro() string {return \"Pacific/Majuro\" }", "func (d Duration) String() string {\n\treturn d.duration\n}", "func (s Duration) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (antar antarcticaTimeZones) DumontDUrville() string {return \"Antarctica/DumontDUrville\" }", "func (mes *MarkerEncodingScheme) TimeUnit() Marker { return mes.timeUnit }", "func DurationToString(duration common.Duration) string {\n\t// Shortcut for zero duration\n\tif duration.IsZero() {\n\t\treturn \"0초\"\n\t}\n\tslices := make([]string, 0, 4)\n\t// Using strconv.Itoa is faster than fmt.Sprintf in this case\n\tif duration.Day != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Day)+\"일\")\n\t}\n\tif duration.Hour != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Hour)+\"시간\")\n\t}\n\tif duration.Minute != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Minute)+\"분\")\n\t}\n\tif duration.Second != 0 {\n\t\tslices = append(slices, strconv.Itoa(duration.Second)+\"초\")\n\t}\n\n\treturn strings.Join(slices, \" \")\n}", "func HumanizeDuration(duration *time.Duration) string {\n\tif duration.Seconds() < 60.0 {\n\t\treturn fmt.Sprintf(\"%d seconds\", int64(duration.Seconds()))\n\t}\n\tif duration.Minutes() < 60.0 {\n\t\t//remainingSeconds := math.Mod(duration.Seconds(), 60)\n\t\treturn fmt.Sprintf(\"%d min\", int64(duration.Minutes()))\n\t}\n\tif duration.Hours() < 24.0 {\n\t\tremainingMinutes := math.Mod(duration.Minutes(), 60)\n\t\t// remainingSeconds := math.Mod(duration.Seconds(), 60)\n\t\treturn fmt.Sprintf(\"%d hours %d min\",\n\t\t\tint64(duration.Hours()), int64(remainingMinutes))\n\t}\n\tremainingHours := math.Mod(duration.Hours(), 24)\n\tremainingMinutes := math.Mod(duration.Minutes(), 60)\n\t// remainingSeconds := math.Mod(duration.Seconds(), 60)\n\treturn fmt.Sprintf(\"%d days %d hours %d minutes\",\n\t\tint64(duration.Hours()/24), int64(remainingHours),\n\t\tint64(remainingMinutes))\n}", "func formatDuration(d time.Duration) string {\n\tif d >= time.Hour {\n\t\treturn fmt.Sprintf(\"%6.02fh \", d.Seconds()/3600)\n\t} else if d >= time.Minute {\n\t\treturn fmt.Sprintf(\"%6.02fm \", d.Seconds()/60)\n\t} else if d >= time.Second {\n\t\treturn fmt.Sprintf(\"%6.02fs \", d.Seconds())\n\t} else if d >= time.Millisecond {\n\t\treturn fmt.Sprintf(\"%6.02fms\", d.Seconds()/time.Millisecond.Seconds())\n\t}\n\n\treturn fmt.Sprintf(\"%6.02fµs\", d.Seconds()/time.Microsecond.Seconds())\n}", "func formatDuration(d time.Duration) string {\n\t//calculate values\n\tmilliseconds := int64(d/time.Millisecond) % 1000\n\tseconds := int64(d.Seconds()) % 60\n\tminutes := int64(d.Minutes()) % 60\n\thours := int64(d.Hours()) % 24\n\tvar str string = \"\"\n\t//only add values if they are not 0\n\tif hours != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, hours, \" h \")\n\t}\n\tif minutes != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, minutes, \" min \")\n\t}\n\tif seconds != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, seconds, \" sec \")\n\t}\n\tif milliseconds != 0 {\n\t\tstr = fmt.Sprintf(\"%s%d%s\", str, milliseconds, \" ms \")\n\t}\n\t//var str string = fmt.Sprintf(\"%d%s%d%s%d%s%d%s\", hours, \" h \", minutes, \" min \", seconds, \" sec \", milliseconds, \" ms\")\n\treturn str\n}", "func (europ europeTimeZones) Ljubljana() string {return \"Europe/Ljubljana\" }", "func durString(d time.Duration) string {\n\tswitch d {\n\tcase ndp.Infinity:\n\t\treturn \"infinite\"\n\tdefault:\n\t\treturn d.String()\n\t}\n}", "func (india indianTimeZones) Maldives() string {return \"Indian/Maldives\" }", "func (f *Formatter) Long() string {\n\tdays, hours, mins, secs := resolve(f.duration)\n\treturn fmt.Sprintf(\"%d days %d hours %d minutes %d seconds\\n\", days, hours, mins, secs)\n}", "func (europ europeTimeZones) Vaduz() string {return \"Europe/Vaduz\" }", "func (d Duration) StringUsingUnits(unit units.Unit) string {\n\treturn d.convert(units.Second, unit).toString()\n}", "func (europ europeDeprecatedTimeZones) Vaduz() string { return \"Europe/Zurich\" }", "func (d Duration) String() (result string) {\n\tvar seconds, minutes, hours int\n\tseconds = int(d.Seconds())\n\tif seconds > 60 {\n\t\tminutes = (seconds - seconds%60) / 60\n\t\tseconds = seconds % 60\n\t}\n\tif minutes > 59 {\n\t\thours = (minutes - minutes%60) / 60\n\t\tminutes = minutes - hours*60\n\t\tresult = numberInString(hours, false)\n\t\tresult += \" \" + hoursTail(hours)\n\t}\n\tif minutes != 0 {\n\t\tif result != \"\" {\n\t\t\tresult += \", \"\n\t\t}\n\t\tresult += strings.ToLower(numberInString(minutes, true))\n\t\tresult += \" \" + minutesTail(minutes)\n\t}\n\tif seconds != 0 {\n\t\tif result != \"\" {\n\t\t\tresult += \", \"\n\t\t}\n\t\tresult += strings.ToLower(numberInString(seconds, true))\n\t\tresult += \" \" + secondsTail(seconds)\n\n\t}\n\treturn\n}", "func (europ europeDeprecatedTimeZones) Ljubljana() string { return \"Europe/Belgrade\" }", "func (o ResourcePolicyWeeklyCycleDayOfWeekResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyWeeklyCycleDayOfWeekResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (d Duration) String() string {\n\treturn d.toString()\n}", "func (o ResourcePolicyHourlyCycleResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyHourlyCycleResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func DurationString(duration time.Duration) string {\n\tdurSecs := int64(duration.Seconds())\n\n\tdurStr := \"\"\n\tif durSecs > 0 {\n\t\tif durSecs%SecsPerDay == 0 {\n\t\t\t// convert to days\n\t\t\tdurStr = fmt.Sprintf(\"%dd\", durSecs/SecsPerDay)\n\t\t} else if durSecs%SecsPerHour == 0 {\n\t\t\t// convert to hours\n\t\t\tdurStr = fmt.Sprintf(\"%dh\", durSecs/SecsPerHour)\n\t\t} else if durSecs%SecsPerMin == 0 {\n\t\t\t// convert to mins\n\t\t\tdurStr = fmt.Sprintf(\"%dm\", durSecs/SecsPerMin)\n\t\t} else if durSecs > 0 {\n\t\t\t// default to mins, as long as duration is positive\n\t\t\tdurStr = fmt.Sprintf(\"%ds\", durSecs)\n\t\t}\n\t}\n\n\treturn durStr\n}", "func FormatDuration(d time.Duration) string {\n\ts := d.String()\n\tif strings.HasSuffix(s, \"µs\") {\n\t\t// for µs we don't want fractions\n\t\tparts := strings.Split(s, \".\")\n\t\tif len(parts) > 1 {\n\t\t\treturn parts[0] + \" µs\"\n\t\t}\n\t\treturn strings.ReplaceAll(s, \"µs\", \" µs\")\n\t} else if strings.HasSuffix(s, \"ms\") {\n\t\t// for ms we only want 2 digit fractions\n\t\tparts := strings.Split(s, \".\")\n\t\t//fmt.Printf(\"fmtDur: '%s' => %#v\\n\", s, parts)\n\t\tif len(parts) > 1 {\n\t\t\ts2 := parts[1]\n\t\t\tif len(s2) > 4 {\n\t\t\t\t// 2 for \"ms\" and 2+ for fraction\n\t\t\t\tres := parts[0] + \".\" + s2[:2] + \" ms\"\n\t\t\t\t//fmt.Printf(\"fmtDur: s2: '%s', res: '%s'\\n\", s2, res)\n\t\t\t\treturn res\n\t\t\t}\n\t\t}\n\t\treturn strings.ReplaceAll(s, \"ms\", \" ms\")\n\t}\n\treturn s\n}", "func (d Duration) String() string {\n\treturn d.Dur().String()\n}", "func (d Duration) String() string {\n\treturn time.Duration(d).String()\n}", "func (d Duration) String() string {\n\treturn time.Duration(d).String()\n}", "func fmtDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}", "func (o ResourcePolicyDailyCycleResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyDailyCycleResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (europ europeTimeZones) Malta() string {return \"Europe/Malta\" }", "func (b *Bar) TimeElapsedString() string {\n\treturn strutil.PrettyTime(b.TimeElapsed())\n}", "func (d Duration) String() string {\n\tif d == 0 {\n\t\treturn \"0s\"\n\t}\n\tswitch {\n\tcase d%Year == 0:\n\t\treturn fmt.Sprintf(\"%dy\", d/Year)\n\tcase d%Week == 0:\n\t\treturn fmt.Sprintf(\"%dw\", d/Week)\n\tcase d%Day == 0:\n\t\treturn fmt.Sprintf(\"%dd\", d/Day)\n\tcase d%Hour == 0:\n\t\treturn fmt.Sprintf(\"%dh\", d/Hour)\n\tcase d%Minute == 0:\n\t\treturn fmt.Sprintf(\"%dm\", d/Minute)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%ds\", d)\n\t}\n}", "func (asiaT asiaTimeZones) Urumqi() string {return \"Asia/Urumqi\" }", "func (o TopicRuleTimestreamTimestampOutput) Unit() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleTimestreamTimestamp) string { return v.Unit }).(pulumi.StringOutput)\n}", "func (asiaD asiaDeprecatedTimeZones) Tel_Aviv() string { return \"Asia/Jerusalem\" }", "func youtubeTimify(seconds int) string {\n\tjustSeconds := seconds % 60\n\tjustMinutes := (seconds - justSeconds) / 60\n\n\tsecondsString := \"\"\n\tminutesString := \"\"\n\n\tif justSeconds < 10 {\n\t\tsecondsString = fmt.Sprintf(\"0%d\", justSeconds)\n\t} else {\n\t\tsecondsString = fmt.Sprintf(\"%d\", justSeconds)\n\t}\n\n\tif justMinutes < 10 {\n\t\tminutesString = fmt.Sprintf(\"0%d\", justMinutes)\n\t} else {\n\t\tminutesString = fmt.Sprintf(\"%d\", justMinutes)\n\t}\n\n\treturn fmt.Sprintf(\"%s:%s\", minutesString, secondsString)\n}", "func (india indianTimeZones) Mahe() string {return \"Indian/Mahe\" }", "func (data Film) GetElapsedTimePretty() string {\n\tvar hours, minutes int\n\tif hours = int(data.ViewOffset / 60); hours < 0 {\n\t\thours = 0\n\t}\n\n\tif minutes = int(data.ViewOffset) % 60; minutes < 0 {\n\t\tminutes = 0\n\t}\n\treturn fmt.Sprintf(\"%02d:%02d\", hours, minutes)\n}", "func formatDuration(d time.Duration) string {\n\tif d > time.Second {\n\t\td = d - d%(100*time.Millisecond)\n\t}\n\treturn d.String()\n}", "func (s TimeSpan) String() string {\n\treturn awsutil.Prettify(s)\n}", "func humanizeDuration(d time.Duration) string {\n\tif d%(24*time.Hour) == 0 {\n\t\treturn fmt.Sprintf(\"%dd\", d/(24*time.Hour))\n\t}\n\ts := d.String()\n\tif strings.HasSuffix(s, \"m0s\") {\n\t\ts = s[:len(s)-2]\n\t}\n\tif strings.HasSuffix(s, \"h0m\") {\n\t\ts = s[:len(s)-2]\n\t}\n\treturn s\n}", "func durationTo8601Seconds(duration time.Duration) string {\n\treturn fmt.Sprintf(\"PT%dS\", duration/time.Second)\n}", "func IsoTime() string {\n\tutcTime := time.Now().UTC()\n\tiso := utcTime.String()\n\tisoBytes := []byte(iso)\n\tiso = string(isoBytes[:10]) + \"T\" + string(isoBytes[11:23]) + \"Z\"\n\tlog.Printf(\"value:%f\\n\", math.Sqrt(200))\n\treturn iso\n}", "func (asiaT asiaTimeZones) Kuala_Lumpur() string {return \"Asia/Kuala_Lumpur\" }", "func (dur ISODuration) String() string {\n\tif dur == zeroDur {\n\t\treturn \"P0D\"\n\t}\n\n\tvar b strings.Builder\n\tb.WriteRune('P')\n\n\tif dur.Years > 0 {\n\t\tfmt.Fprintf(&b, \"%dY\", dur.Years)\n\t}\n\tif dur.Months > 0 {\n\t\tfmt.Fprintf(&b, \"%dM\", dur.Months)\n\t}\n\tif dur.Days/7 > 0 {\n\t\tfmt.Fprintf(&b, \"%dW\", dur.Days/7)\n\t\tdur.Days %= 7\n\t}\n\tif dur.Days > 0 {\n\t\tfmt.Fprintf(&b, \"%dD\", dur.Days)\n\t}\n\n\tif dur.TimePart == 0 {\n\t\treturn b.String()\n\t}\n\n\tb.WriteRune('T')\n\n\tif dur.TimePart/time.Hour > 0 {\n\t\tfmt.Fprintf(&b, \"%dH\", dur.TimePart/time.Hour)\n\t\tdur.TimePart %= time.Hour\n\t}\n\n\tif dur.TimePart/time.Minute > 0 {\n\t\tfmt.Fprintf(&b, \"%dM\", dur.TimePart/time.Minute)\n\t\tdur.TimePart %= time.Minute\n\t}\n\n\tif dur.TimePart.Seconds() > 0 {\n\t\tsec := dur.TimePart.Seconds()\n\t\t// round to microseconds\n\t\tsec = math.Round(sec*1e6) / 1e6\n\t\tfmt.Fprintf(&b, \"%gS\", sec)\n\t}\n\n\treturn b.String()\n}", "func durToMsec(dur time.Duration) string {\n\treturn fmt.Sprintf(\"%dms\", dur/time.Millisecond)\n}", "func (o TopicRuleErrorActionTimestreamTimestampOutput) Unit() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleErrorActionTimestreamTimestamp) string { return v.Unit }).(pulumi.StringOutput)\n}", "func SerializeDuration(this time.Duration) (interface{}, error) {\n\t// Seriously questioning my life choices.\n\ts := \"P\"\n\tif this < 0 {\n\t\ts = \"-P\"\n\t\tthis = -1 * this\n\t}\n\tvar tally time.Duration\n\t// Assume 8760 Hours per 365 days, cannot account for leap years in xsd:duration. :(\n\tif years := this.Hours() / 8760.0; years >= 1 {\n\t\tnYears := int64(math.Floor(years))\n\t\ttally += time.Duration(nYears) * 8760 * time.Hour\n\t\ts = fmt.Sprintf(\"%s%dY\", s, nYears)\n\t}\n\t// Assume 30 days per month, cannot account for months lasting 31, 30, 29, or 28 days in xsd:duration. :(\n\tif months := (this.Hours() - tally.Hours()) / 720.0; months >= 1 {\n\t\tnMonths := int64(math.Floor(months))\n\t\ttally += time.Duration(nMonths) * 720 * time.Hour\n\t\ts = fmt.Sprintf(\"%s%dM\", s, nMonths)\n\t}\n\tif days := (this.Hours() - tally.Hours()) / 24.0; days >= 1 {\n\t\tnDays := int64(math.Floor(days))\n\t\ttally += time.Duration(nDays) * 24 * time.Hour\n\t\ts = fmt.Sprintf(\"%s%dD\", s, nDays)\n\t}\n\tif tally < this {\n\t\ts = fmt.Sprintf(\"%sT\", s)\n\t\tif hours := this.Hours() - tally.Hours(); hours >= 1 {\n\t\t\tnHours := int64(math.Floor(hours))\n\t\t\ttally += time.Duration(nHours) * time.Hour\n\t\t\ts = fmt.Sprintf(\"%s%dH\", s, nHours)\n\t\t}\n\t\tif minutes := this.Minutes() - tally.Minutes(); minutes >= 1 {\n\t\t\tnMinutes := int64(math.Floor(minutes))\n\t\t\ttally += time.Duration(nMinutes) * time.Minute\n\t\t\ts = fmt.Sprintf(\"%s%dM\", s, nMinutes)\n\t\t}\n\t\tif seconds := this.Seconds() - tally.Seconds(); seconds >= 1 {\n\t\t\tnSeconds := int64(math.Floor(seconds))\n\t\t\ttally += time.Duration(nSeconds) * time.Second\n\t\t\ts = fmt.Sprintf(\"%s%dS\", s, nSeconds)\n\t\t}\n\t}\n\treturn s, nil\n}", "func getDurationStringFromSeconds(seconds int) string {\n\treturn (time.Duration(seconds) * time.Second).String()\n}", "func (europ europeTimeZones) Uzhgorod() string {return \"Europe/Uzhgorod\" }", "func (argen argentinaTimeZones) La_Rioja() string {return \"America/Argentina/La_Rioja\" }", "func (europ europeDeprecatedTimeZones) Vatican() string { return \"Europe/Rome\" }", "func (t *ElapsedTimeout) String() string { return \"<Always elapsed timeout>\" }", "func DurClock(d time.Duration) string {\n\th := int(d.Hours())\n\tm := int(d.Minutes())\n\ts := int(d.Seconds())\n\tn := int(d.Nanoseconds()) - 1000000000*s\n\ts = s - 60*m\n\tm = m - 60*h\n\tswitch {\n\tcase h > 0:\n\t\treturn fmt.Sprintf(\"%d:%02d:%02d.%09d\", h, m, s, n)\n\tcase m > 0:\n\t\treturn fmt.Sprintf(\"%d:%02d.%09d\", m, s, n)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d.%09d\", s, n)\n\t}\n}", "func (o QuotaLimitResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v QuotaLimitResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (europ europeTimeZones) Monaco() string {return \"Europe/Monaco\" }", "func (india indianTimeZones) Kerguelen() string {return \"Indian/Kerguelen\" }", "func (depre deprecatedTimeZones) Portugal() string { return \"Europe/Lisbon\" }", "func (rs *Restake) Duration() uint32 { return rs.duration }", "func (asiaT asiaTimeZones) Tel_Aviv() string {return \"Asia/Tel_Aviv\" }", "func (f *Formatter) Seconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d seconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Seconds()))\n}", "func (r WebRestrictions) TimeDescription() string {\n\tintClass := r.getTimeClass()\n\tswitch intClass {\n\tcase 0:\n\t\treturn \"This token has an infinite lifetime!\"\n\tcase 1:\n\t\treturn \"This token is long-lived.\"\n\tcase 2:\n\t\treturn \"This token will expire within 7days.\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (r Rest) Duration(measure time.Duration) time.Duration {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn time.Duration(float64(measure) * fraq)\n}", "func total(data []int64) string {\n var total int64\n for _, n := range data {\n total += n\n }\n duration, _ := time.ParseDuration(fmt.Sprintf(\"%dns\", total))\n return duration.String()\n}", "func adjTime(context interface{}, value string) (time.Time, error) {\n\n\t// The default value is in seconds unless overridden.\n\t// #time:0 Current date/time\n\t// #time:-3600 3600 seconds in the past\n\t// #time:3m\t\t3 minutes in the future.\n\n\t// Possible duration types.\n\t// \"ns\": int64(Nanosecond),\n\t// \"us\": int64(Microsecond),\n\t// \"ms\": int64(Millisecond),\n\t// \"s\": int64(Second),\n\t// \"m\": int64(Minute),\n\t// \"h\": int64(Hour),\n\n\t// Do we have a single value?\n\tif len(value) == 1 {\n\t\tval, err := strconv.Atoi(value[0:1])\n\t\tif err != nil {\n\t\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[0:1])\n\t\t}\n\n\t\tif val == 0 {\n\t\t\treturn time.Now().UTC(), nil\n\t\t}\n\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n\n\t// Do we have a duration type and where does the\n\t// actual duration value end\n\tvar typ string\n\tvar end int\n\n\t// The end byte position for the last character in the string.\n\tePos := len(value) - 1\n\n\t// Look at the very last character.\n\tt := value[ePos:]\n\tswitch t {\n\n\t// Is this a minute or hour? [3m]\n\tcase \"m\", \"h\":\n\t\ttyp = t\n\t\tend = ePos // Position of last chr in value.\n\n\t// Is this a second or other duration? [3s or 3us]\n\tcase \"s\":\n\t\ttyp = t // s for 3s\n\t\tend = ePos // 3 for 3s\n\n\t\t// Is this smaller than a second? [ns, us, ms]\n\t\tif len(value) > 2 {\n\t\t\tt := value[ePos-1 : ePos]\n\t\t\tswitch t {\n\t\t\tcase \"n\", \"u\", \"m\":\n\t\t\t\ttyp = value[ePos-1:] // us for 3us\n\t\t\t\tend = ePos - 1 // 3 for 3us\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\ttyp = \"s\" // s for 3600\n\t\tend = ePos + 1 // 0 for 3600\n\t}\n\n\t// Check if we are to negative the value.\n\tvar start int\n\tif value[0] == '-' {\n\t\tstart = 1\n\t}\n\n\t// Check the remaining bytes is an integer value.\n\tval, err := strconv.Atoi(value[start:end])\n\tif err != nil {\n\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[start:end])\n\t}\n\n\t// Do we have to negate the value?\n\tif start == 1 {\n\t\tval *= -1\n\t}\n\n\t// Calcuate the time value.\n\tswitch typ {\n\tcase \"ns\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Nanosecond).UTC(), nil\n\tcase \"us\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Microsecond).UTC(), nil\n\tcase \"ms\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Millisecond).UTC(), nil\n\tcase \"m\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Minute).UTC(), nil\n\tcase \"h\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Hour).UTC(), nil\n\tdefault:\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n}", "func (d *Duration) String() string {\n\treturn d.valueString\n}", "func (d *DurationValue) String() string {\n\treturn (*time.Duration)(d).String()\n}", "func (europ europeTimeZones) Rome() string {return \"Europe/Rome\" }", "func (afric africaTimeZones) Timbuktu() string {return \"Africa/Timbuktu\" }", "func (milTz militaryTimeZones) Delta() string {return \"Etc/GMT+4\" }", "func (me TdurationType) String() string { return xsdt.Nmtoken(me).String() }", "func (f *Formatter) Short() string {\n\tdays, hours, mins, secs := resolve(f.duration)\n\treturn fmt.Sprintf(\"%dd%dh%dm%ds\\n\", days, hours, mins, secs)\n}" ]
[ "0.6004345", "0.5819394", "0.5691862", "0.5682199", "0.5630542", "0.55865556", "0.5574526", "0.5537256", "0.5509831", "0.5462976", "0.5461879", "0.5460645", "0.54367495", "0.54135704", "0.5379238", "0.53715813", "0.5345213", "0.5343836", "0.5315461", "0.5292297", "0.52745247", "0.52656907", "0.52570057", "0.5249038", "0.5244291", "0.52386904", "0.5230712", "0.5228613", "0.52250206", "0.5201621", "0.51941705", "0.51890695", "0.51811546", "0.51800233", "0.5176985", "0.5174878", "0.51720273", "0.51710683", "0.5170255", "0.51698166", "0.51663756", "0.51661164", "0.5162093", "0.5161556", "0.5159461", "0.5156062", "0.5151611", "0.51430887", "0.51380527", "0.5131289", "0.51221687", "0.51129967", "0.5112552", "0.5107185", "0.5107185", "0.50901866", "0.5082965", "0.50805354", "0.5073621", "0.506971", "0.50611067", "0.5048214", "0.50434357", "0.5040333", "0.50244874", "0.502169", "0.5021245", "0.5018559", "0.50147974", "0.5009875", "0.50046235", "0.50030106", "0.50000507", "0.49794367", "0.49696404", "0.49533218", "0.4952362", "0.4950943", "0.4950255", "0.49375802", "0.49221885", "0.49160585", "0.49138945", "0.49086997", "0.49006402", "0.49003437", "0.48990023", "0.48946908", "0.4891345", "0.48859444", "0.4881399", "0.48650306", "0.48612577", "0.48610148", "0.48560697", "0.48559156", "0.48553053", "0.4853455", "0.4851063", "0.4842885" ]
0.6517362
0
AddLabel adds a label to the specified PR or issue
func (fc *fakeClient) AddLabel(owner, repo string, number int, label string) error { fc.added = append(fc.added, label) fc.labels = append(fc.labels, label) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (issue *Issue) AddLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"add\": \"%s\"}`, val)\n\t}\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (issue *Issue) AddLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"add\": \"%s\"}`, val)\n\t}\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (c *client) AddLabel(org, repo string, number int, label string) error {\n\treturn c.AddLabelWithContext(context.Background(), org, repo, number, label)\n}", "func (c *client) AddRepoLabel(org, repo, label, description, color string) error {\n\tdurationLogger := c.log(\"AddRepoLabel\", org, repo, label, description, color)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPost,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/labels\", org, repo),\n\t\taccept: \"application/vnd.github.symmetra-preview+json\", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\torg: org,\n\t\trequestBody: Label{Name: label, Description: description, Color: color},\n\t\texitCodes: []int{201},\n\t}, nil)\n\treturn err\n}", "func AddLabel(ctx context.Context, obj *Object, key, value string, override bool) error {\n\tif key == \"\" || value == \"\" {\n\t\treturn fmt.Errorf(\"key and value cannot be empty\")\n\t}\n\n\tif err := addToNestedMap(obj, key, value, override, \"metadata\", \"labels\"); err != nil {\n\t\treturn err\n\t}\n\n\tvar nestedFields []string\n\tswitch kind := ObjectKind(obj); kind {\n\tcase \"CronJob\":\n\t\tnestedFields = []string{\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"labels\"}\n\tcase \"DaemonSet\", \"Deployment\", \"Job\", \"ReplicaSet\", \"ReplicationController\", \"StatefulSet\":\n\t\tnestedFields = []string{\"spec\", \"template\", \"metadata\", \"labels\"}\n\tdefault:\n\t\treturn nil\n\t}\n\tif err := addToNestedMap(obj, key, value, override, nestedFields...); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func AddLabel(obj mftest.Object, label string, value string) mftest.Object {\n\tlabels := obj.GetLabels()\n\tif labels == nil {\n\t\tobj.SetLabels(make(map[string]string))\n\t}\n\tobj.GetLabels()[label] = value\n\treturn obj\n}", "func addLabelsToPullRequest(prInfo *PullRequestInfo, labels []string) error {\n\tif prInfo == nil {\n\t\treturn errors.New(\"pull request to label cannot be nil\")\n\t}\n\tpr := prInfo.PullRequest\n\tprovider := prInfo.GitProvider\n\n\tif len(labels) > 0 {\n\t\tnumber := *pr.Number\n\t\tvar err error\n\t\terr = provider.AddLabelsToIssue(pr.Owner, pr.Repo, number, labels)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logger().Infof(\"Added label %s to Pull Request %s\", util.ColorInfo(strings.Join(labels, \", \")), pr.URL)\n\t}\n\treturn nil\n}", "func (b *Bot) Label(ctx context.Context) error {\n\tfiles, err := b.c.GitHub.ListFiles(ctx,\n\t\tb.c.Environment.Organization,\n\t\tb.c.Environment.Repository,\n\t\tb.c.Environment.Number)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tlabels, err := b.labels(ctx, files)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif len(labels) == 0 {\n\t\treturn nil\n\t}\n\n\terr = b.c.GitHub.AddLabels(ctx,\n\t\tb.c.Environment.Organization,\n\t\tb.c.Environment.Repository,\n\t\tb.c.Environment.Number,\n\t\tlabels)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\treturn nil\n}", "func (t *AuroraTask) AddLabel(key string, value string) *AuroraTask {\n\tt.task.Metadata = append(t.task.Metadata, &aurora.Metadata{Key: key, Value: value})\n\treturn t\n}", "func (j *AuroraJob) AddLabel(key string, value string) Job {\n\tif _, ok := j.metadata[key]; !ok {\n\t\tj.metadata[key] = &aurora.Metadata{Key: key}\n\t\tj.jobConfig.TaskConfig.Metadata = append(j.jobConfig.TaskConfig.Metadata, j.metadata[key])\n\t}\n\n\tj.metadata[key].Value = value\n\treturn j\n}", "func (s *Service) AddLabel(label *model.Label) (err error) {\n\tif _, err = s.dao.QueryLabel(label); err == nil {\n\t\terr = ecode.MelloiLabelExistErr\n\t\treturn\n\t}\n\tlabel.Active = 1\n\treturn s.dao.AddLabel(label)\n}", "func (i *Icon) AddLabel(label string) {\n\ti.Config.Label = label\n}", "func (m *MockClient) AddRepoLabel(org, repo, label, description, color string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddRepoLabel\", org, repo, label, description, color)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func NewIssueAddLabelOK() *IssueAddLabelOK {\n\treturn &IssueAddLabelOK{}\n}", "func (m *MockRepositoryClient) AddRepoLabel(org, repo, label, description, color string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddRepoLabel\", org, repo, label, description, color)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func AddLabelToResource(spec interface{}, key string, val string) error {\n\tif obj, ok := spec.(*v1.PersistentVolumeClaim); ok {\n\t\tif obj.Labels == nil {\n\t\t\tobj.Labels = make(map[string]string)\n\t\t}\n\t\tlogrus.Infof(\"Adding label [%s=%s] to PVC %s\", key, val, obj.Name)\n\t\tobj.Labels[key] = val\n\t\tcore.Instance().UpdatePersistentVolumeClaim(obj)\n\t\treturn nil\n\t} else if obj, ok := spec.(*v1.ConfigMap); ok {\n\t\tif obj.Labels == nil {\n\t\t\tobj.Labels = make(map[string]string)\n\t\t}\n\t\tlogrus.Infof(\"Adding label [%s=%s] to ConfigMap %s\", key, val, obj.Name)\n\t\tobj.Labels[key] = val\n\t\tcore.Instance().UpdateConfigMap(obj)\n\t\treturn nil\n\t} else if obj, ok := spec.(*v1.Secret); ok {\n\t\tif obj.Labels == nil {\n\t\t\tobj.Labels = make(map[string]string)\n\t\t}\n\t\tlogrus.Infof(\"Adding label [%s=%s] to Secret %s\", key, val, obj.Name)\n\t\tobj.Labels[key] = val\n\t\tcore.Instance().UpdateSecret(obj)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"spec is of unknown resource type\")\n}", "func (ls *LabelService) AddLabel(\n\tlabelID string,\n\tlabelParams *map[string]interface{},\n) (labeledResources []*types.LabeledResource, err error) {\n\tlog.Debug(\"AddLabel\")\n\n\tdata, status, err := ls.concertoService.Post(fmt.Sprintf(APIPathLabelResources, labelID), labelParams)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &labeledResources); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn labeledResources, nil\n}", "func (m *MockRepositoryClient) AddLabel(org, repo string, number int, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddLabel\", org, repo, number, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func HasLabel(label string, issueLabels []*github.Label) bool {\n\tfor _, l := range issueLabels {\n\t\tif strings.ToLower(l.GetName()) == strings.ToLower(label) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (m *MockClient) AddLabel(org, repo string, number int, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddLabel\", org, repo, number, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (p PRMirror) CreateLabel(labelText string, labelColour string) bool {\n\tlabel := github.Label{\n\t\tName: &labelText,\n\t\tColor: &labelColour,\n\t}\n\n\t_, _, err := p.GitHubClient.Issues.CreateLabel(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, &label)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while creating a label - %s\", err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func AddLabel(label types.Label) ([]byte, error) {\n\tlog.Trace.Printf(\"Adding the following label: %+v\", label)\n\tvar ret []byte\n\tvar err error\n\n\tif err = store.DB.Create(&label).Error; err == nil {\n\t\tlog.Trace.Printf(\"Successfully added the label to the database: %+v\", label)\n\t\tret, err = json.Marshal(label)\n\t} else {\n\t\tlog.Warning.Printf(err.Error())\n\t}\n\n\treturn ret, err\n}", "func (mr *MockRepositoryClientMockRecorder) AddLabel(org, repo, number, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddLabel\", reflect.TypeOf((*MockRepositoryClient)(nil).AddLabel), org, repo, number, label)\n}", "func (p PRMirror) AddLabels(id int, labels []string) bool {\n\t_, _, err := p.GitHubClient.Issues.AddLabelsToIssue(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, id, labels)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while adding a label on issue#:%d - %s\", id, err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (d *DeviceInfo) AddLabel(name, value string) *DeviceInfo {\n\tm := d.Labels\n\tif m == nil {\n\t\tm = make(map[string]string)\n\t\td.Labels = m\n\t}\n\tm[name] = value\n\treturn d\n}", "func (b *profileBuilder) pbLabel(tag int, key, str string, num int64) {\n\tstart := b.pb.startMessage()\n\tb.pb.int64Opt(tagLabel_Key, b.stringIndex(key))\n\tb.pb.int64Opt(tagLabel_Str, b.stringIndex(str))\n\tb.pb.int64Opt(tagLabel_Num, num)\n\tb.pb.endMessage(tag, start)\n}", "func (r *Repo) CreateLabel(params *github.CreateLabelParams) (*github.Label, error) {\n\treturn r.cli.CreateLabel(r.path, params)\n}", "func (p *plugin) Label(instance instance.ID, labels map[string]string) error {\n\treturn fmt.Errorf(\"VMware vSphere VM label updates are not implemented yet\")\n}", "func AddPhotoLabel(router *gin.RouterGroup) {\n\trouter.POST(\"/photos/:uid/label\", func(c *gin.Context) {\n\t\ts := Auth(SessionID(c), acl.ResourcePhotos, acl.ActionUpdate)\n\n\t\tif s.Invalid() {\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, ErrUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tm, err := query.PhotoByUID(c.Param(\"uid\"))\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, ErrPhotoNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tvar f form.Label\n\n\t\tif err := c.BindJSON(&f); err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusBadRequest, gin.H{\"error\": txt.UcFirst(err.Error())})\n\t\t\treturn\n\t\t}\n\n\t\tlabelEntity := entity.FirstOrCreateLabel(entity.NewLabel(f.LabelName, f.LabelPriority))\n\n\t\tif labelEntity == nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\"error\": \"failed creating label\"})\n\t\t\treturn\n\t\t}\n\n\t\tif err := labelEntity.Restore(); err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\"error\": \"could not restore label\"})\n\t\t}\n\n\t\tphotoLabel := entity.FirstOrCreatePhotoLabel(entity.NewPhotoLabel(m.ID, labelEntity.ID, f.Uncertainty, \"manual\"))\n\n\t\tif photoLabel == nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\"error\": \"failed updating photo label\"})\n\t\t\treturn\n\t\t}\n\n\t\tif photoLabel.Uncertainty > f.Uncertainty {\n\t\t\tif err := photoLabel.Updates(map[string]interface{}{\n\t\t\t\t\"Uncertainty\": f.Uncertainty,\n\t\t\t\t\"LabelSrc\": entity.SrcManual,\n\t\t\t}); err != nil {\n\t\t\t\tlog.Errorf(\"label: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tp, err := query.PhotoPreloadByUID(c.Param(\"uid\"))\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, ErrPhotoNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tif err := p.Save(); err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\"error\": txt.UcFirst(err.Error())})\n\t\t\treturn\n\t\t}\n\n\t\tPublishPhotoEvent(EntityUpdated, c.Param(\"uid\"), c)\n\n\t\tevent.Success(\"label updated\")\n\n\t\tc.JSON(http.StatusOK, p)\n\t})\n}", "func HasLabel(i *github.Issue, label string) bool {\n\tfor _, l := range i.Labels {\n\t\tif *l.Name == label {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (a *analysis) addLabel(ptr, label nodeid) bool {\n\tb := a.nodes[ptr].solve.pts.add(label)\n\tif b && a.log != nil {\n\t\tfmt.Fprintf(a.log, \"\\t\\tpts(n%d) += n%d\\n\", ptr, label)\n\t}\n\treturn b\n}", "func (mr *MockClientMockRecorder) AddLabel(org, repo, number, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddLabel\", reflect.TypeOf((*MockClient)(nil).AddLabel), org, repo, number, label)\n}", "func (r *Release) label(storageBackend string, labels ...string) {\n\tif len(labels) == 0 {\n\t\treturn\n\t}\n\tif r.Enabled.Value {\n\n\t\targs := []string{\"label\", \"--overwrite\", storageBackend, \"-n\", r.Namespace, \"-l\", \"owner=helm,name=\" + r.Name}\n\t\targs = append(args, labels...)\n\t\tcmd := kubectl(args, \"Applying Helmsman labels to [ \"+r.Name+\" ] release\")\n\n\t\tif _, err := cmd.Exec(); err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t}\n\t}\n}", "func Label(name string) string {\n\treturn fmt.Sprintf(\"%s/%s\", LabelPrefix, name)\n}", "func AddLabels(newLabels []string, issueURL, authToken string) (*http.Response, error) {\n\tlabelResponse, err := json.Marshal(map[string][]string{\n\t\t\"labels\": newLabels,\n\t})\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error marshalling labels to map[string][]string: %v\", err)\n\t}\n\n\t// converting labelResponse to bytes for making a new request\n\tresponseBody := bytes.NewBuffer(labelResponse)\n\n\turl := fmt.Sprintf(\"%s%s\", issueURL, \"/labels\")\n\n\trequest, err := http.NewRequest(\"POST\", url, responseBody)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error: Writing a new request with labels as bytes buffer: %v\", err)\n\t}\n\n\trequest.Header.Add(\"Authorization\", authToken)\n\trequest.Header.Add(\"Accept\", \"application/vnd.github.v3+json\")\n\n\treturn http.DefaultClient.Do(request)\n}", "func (mr *MockRepositoryClientMockRecorder) AddRepoLabel(org, repo, label, description, color interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddRepoLabel\", reflect.TypeOf((*MockRepositoryClient)(nil).AddRepoLabel), org, repo, label, description, color)\n}", "func (issue *Issue) RemoveLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"remove\": \"%s\"}`, val)\n\t}\n\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (issue *Issue) RemoveLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"remove\": \"%s\"}`, val)\n\t}\n\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (c *evictionClient) LabelPod(podInfo *types.PodInfo, priority string, action string) error {\n\tif podInfo.Name == \"\" {\n\t\treturn fmt.Errorf(\"pod name should not be empty\")\n\t}\n\toldPod, err := c.client.CoreV1().Pods(podInfo.Namespace).Get(podInfo.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tlog.Errorf(\"get pod %s error\", podInfo.Name)\n\t\treturn err\n\t}\n\toldData, err := json.Marshal(oldPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal old node for node %v : %v\", c.nodeName, err)\n\t}\n\tnewPod := oldPod.DeepCopy()\n\n\tif newPod.Labels == nil {\n\t\tlog.Infof(\"there is no label on this pod: %v, create it\", podInfo.Name)\n\t\tnewPod.Labels = make(map[string]string)\n\t}\n\tif action == \"Add\" {\n\t\tnewPod.Labels[priority] = \"true\"\n\t} else if action == \"Delete\" {\n\t\tdelete(newPod.Labels, priority)\n\t}\n\n\tnewData, err := json.Marshal(newPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal new pod %v : %v\", podInfo.Name, err)\n\t}\n\n\tpatchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Pod{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create patch for pod %v\", podInfo.Name)\n\t}\n\t_, err = c.client.CoreV1().Pods(oldPod.Namespace).Patch(oldPod.Name, k8stypes.StrategicMergePatchType, patchBytes)\n\n\tlog.Infof(\"Label pod: %v, action:%v\", podInfo.Name, action)\n\treturn err\n}", "func InjectLabel(key, value string, overwritePolicy OverwritePolicy, kinds ...string) mf.Transformer {\n\treturn func(u *unstructured.Unstructured) error {\n\t\tkind := u.GetKind()\n\t\tif len(kinds) != 0 && !ItemInSlice(kind, kinds) {\n\t\t\treturn nil\n\t\t}\n\t\tlabels, found, err := unstructured.NestedStringMap(u.Object, \"metadata\", \"labels\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not find labels set, %q\", err)\n\t\t}\n\t\tif overwritePolicy == Retain && found {\n\t\t\tif _, ok := labels[key]; ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tlabels = map[string]string{}\n\t\t}\n\t\tlabels[key] = value\n\t\terr = unstructured.SetNestedStringMap(u.Object, labels, \"metadata\", \"labels\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updateing labes for %s:%s, %s\", kind, u.GetName(), err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (l Label) Label() string { return string(l) }", "func addLabelToResource(resource *metav1.ObjectMeta, ctx *ReporterContext) {\n\t// k8s labels may be nil,need to make it\n\tif resource.Labels == nil {\n\t\tresource.Labels = make(map[string]string)\n\t}\n\n\tresource.Labels[ClusterLabel] = ctx.ClusterName()\n\t// support for CM sequential checking\n\tresource.Labels[EdgeVersionLabel] = resource.ResourceVersion\n}", "func (r *ImageRef) Label(labelParams *LabelParams) error {\n\tout, err := labelImage(r.image, labelParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.setImage(out)\n\treturn nil\n}", "func (g *Generator) AddConfigLabel(label, value string) {\n\tg.image.Config.Labels[label] = value\n}", "func (lbl *LabelService) AddLabel(labelVector *map[string]interface{}, labelID string) (labeledResources []*types.LabeledResource, err error) {\n\tlog.Debug(\"AddLabel\")\n\n\tdata, status, err := lbl.concertoService.Post(fmt.Sprintf(\"/labels/%s/resources\", labelID), labelVector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &labeledResources); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn labeledResources, nil\n}", "func (mr *MockClientMockRecorder) AddRepoLabel(org, repo, label, description, color interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddRepoLabel\", reflect.TypeOf((*MockClient)(nil).AddRepoLabel), org, repo, label, description, color)\n}", "func (b *buffer) Label(label string, indent int) {\n\tb.Write(fmt.Sprintf(\"%s:\\n\", strings.TrimSpace(label)), indent)\n}", "func NewLabel(text string) *Label {\n\tw := &Label{\n\t\tText: text,\n\t}\n\tw.Wrapper = w\n\treturn w\n}", "func Label(key string, value interface{}, args ...context.Context) {\n\tcurrentTracer := ExtractTracer(args)\n\tif currentTracer != nil {\n\t\tcurrentTracer.AddLabel(key, value)\n\t}\n}", "func (t Task) ApplyLabel(node *cgraph.Node) {\n\tif t.Driver == Driver_GitHub {\n\t\tfor _, label := range t.HasLabel {\n\t\t\ts, _ := strings.CutPrefix(label.String(), t.HasOwner.String()+\"/labels/\")\n\t\t\tfor _, dl := range depvizLabels {\n\t\t\t\tif s == dl.label {\n\t\t\t\t\tnode.SetStyle(cgraph.NodeStyle(dl.style))\n\t\t\t\t\tnode.SetColor(dl.color)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *client) UpdateRepoLabel(org, repo, label, newName, description, color string) error {\n\tdurationLogger := c.log(\"UpdateRepoLabel\", org, repo, label, newName, color)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPatch,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/labels/%s\", org, repo, label),\n\t\taccept: \"application/vnd.github.symmetra-preview+json\", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\torg: org,\n\t\trequestBody: Label{Name: newName, Description: description, Color: color},\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func AddMetadataLabel(pod *corev1.Pod, labelName, labelContent string) *corev1.Pod {\n\tif nil == pod.ObjectMeta.Labels {\n\t\tpod.ObjectMeta.Labels = make(map[string]string)\n\t}\n\n\tpod.ObjectMeta.Labels[labelName] = labelContent\n\n\treturn pod\n}", "func (fc *fakeClient) RemoveLabel(owner, repo string, number int, label string) error {\n\tfc.removed = append(fc.removed, label)\n\n\t// remove from existing labels\n\tfor k, v := range fc.labels {\n\t\tif label == v {\n\t\t\tfc.labels = append(fc.labels[:k], fc.labels[k+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *Test) Label(labels ...label.Instance) *Test {\n\tt.labels = append(t.labels, labels...)\n\treturn t\n}", "func (a *Agent) LabelFind(name string, label *Label) (err error) {\n\tspec := (&api.LabelSearchSpec{}).Init(name, 0)\n\tif err = a.pc.ExecuteApi(spec); err != nil {\n\t\treturn\n\t}\n\n\tif spec.Result.Total == 0 || spec.Result.List[0].Name != name {\n\t\terr = errors.ErrNotExist\n\t} else {\n\t\tli := spec.Result.List[0]\n\t\tlabel.Id = li.Id\n\t\tlabel.Name = li.Name\n\t\tlabel.Color = labelColorRevMap[li.Color]\n\t}\n\treturn\n}", "func (pull *LibcomposePullProperty) Label() string {\n\treturn \"Pull\"\n}", "func (c *Client) CreateLabel(ctx context.Context, path string, payload *CreateLabelPayload) (*http.Response, error) {\n\treq, err := c.NewCreateLabelRequest(ctx, path, payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Client.Do(ctx, req)\n}", "func (c *client) AddLabels(org, repo string, number int, labels ...string) error {\n\treturn c.AddLabelsWithContext(context.Background(), org, repo, number, labels...)\n}", "func (k *kubectlContext) Label(args ...string) (string, error) {\n\tout, err := k.do(append([]string{\"label\"}, args...)...)\n\treturn string(out), err\n}", "func (c *GlPushGroupMarkerEXT) Label(ctx context.Context, s *api.GlobalState) string {\n\treturn readString(ctx, c, s, c.Marker(), c.Length())\n}", "func (p *Builder) Label(l *Label) *Builder {\n\tif v, ok := p.labels[l]; ok && v >= 0 {\n\t\tlog.Panicln(\"Label failed: label is defined already -\", l.Name)\n\t}\n\tp.labels[l] = p.code.Len()\n\treturn p\n}", "func (c *client) CreateLabel(\n\tid interface{},\n\topt *glab.CreateLabelOptions,\n\toptions ...glab.RequestOptionFunc,\n) (*glab.Label, *glab.Response, error) {\n\treturn c.c.Labels.CreateLabel(id, opt, options...)\n}", "func (ls *LabelService) CreateLabel(labelParams *map[string]interface{}) (label *types.Label, err error) {\n\tlog.Debug(\"CreateLabel\")\n\n\tdata, status, err := ls.concertoService.Post(APIPathLabels, labelParams)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &label); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn label, nil\n}", "func Label(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"label\", Attributes: attrs, Children: children}\n}", "func (c *client) RemoveLabel(org, repo string, number int, label string) error {\n\treturn c.RemoveLabelWithContext(context.Background(), org, repo, number, label)\n}", "func (o *AddOn) Label() string {\n\tif o != nil && o.bitmap_&1024 != 0 {\n\t\treturn o.label\n\t}\n\treturn \"\"\n}", "func (o PublishingResponseOutput) GithubLabel() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PublishingResponse) string { return v.GithubLabel }).(pulumi.StringOutput)\n}", "func Label(opts render.TagOpts) render.TagBuilder {\n\treturn render.NewTag(\"label\").WithOpts(opts)\n}", "func (pc *programCode) createLabel(name string) {\n\tcode := \"\"\n\tcode += \"\\n\" + name + \":\\n\"\n\tpc.funcSlice = append(pc.funcSlice, name)\n\tpc.indentLevel += 1 // dive deeper -> next buffer.\n\t// Please have a look to FIXME: Where can I find what?\n\tpc.appendCode(code)\n\n}", "func (o ResourceAnnotationOutput) Label() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ResourceAnnotation) *string { return v.Label }).(pulumi.StringPtrOutput)\n}", "func (a *Agent) LabelCreate(name string, color LabelColor) (labelId string, err error) {\n\tcolorName, ok := labelColorMap[color]\n\tif !ok {\n\t\tcolorName = api.LabelColorBlank\n\t}\n\tspec := (&api.LabelCreateSpec{}).Init(\n\t\tname, colorName,\n\t)\n\tif err = a.pc.ExecuteApi(spec); err != nil {\n\t\treturn\n\t}\n\tif len(spec.Result) > 0 {\n\t\tlabelId = spec.Result[0].Id\n\t}\n\treturn\n}", "func WithLabel(k string, v interface{}) Option {\n\treturn labelOption{key: k, value: v}\n}", "func (o PublishingOutput) GithubLabel() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Publishing) *string { return v.GithubLabel }).(pulumi.StringPtrOutput)\n}", "func (p PRMirror) RemoveLabel(id int, labels string) bool {\n\t_, err := p.GitHubClient.Issues.RemoveLabelForIssue(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, id, labels)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while removing a label on issue#:%d - %s\", id, err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func label(r interface{}) string {\n\treturn strings.ToLower(migref.ToKind(r))\n}", "func (m *MockRepositoryClient) AddLabelWithContext(ctx context.Context, org, repo string, number int, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddLabelWithContext\", ctx, org, repo, number, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func ReleaseLabel(label string) {\n\treleaseLabel(label)\n}", "func (m *Media) SetLabel(value *string)() {\n m.label = value\n}", "func (m *ExecutionManager) addProjectLabels(ctx context.Context, projectName string, initialLabels map[string]string) (map[string]string, error) {\n\tproject, err := m.db.ProjectRepo().Get(ctx, projectName)\n\tif err != nil {\n\t\tlogger.Errorf(ctx, \"Failed to get project for [%+v] with error: %v\", project, err)\n\t\treturn nil, err\n\t}\n\t// passing nil domain as not needed to retrieve labels\n\tprojectLabels := transformers.FromProjectModel(project, nil).Labels.GetValues()\n\n\tif initialLabels == nil {\n\t\tinitialLabels = make(map[string]string)\n\t}\n\n\tfor k, v := range projectLabels {\n\t\tif _, ok := initialLabels[k]; !ok {\n\t\t\tinitialLabels[k] = v\n\t\t}\n\t}\n\treturn initialLabels, nil\n}", "func (zc *Zcounter) Label(l string) {\n\tzc.label = l\n}", "func WithLabel(key, value string) Option {\n\treturn func(meta metav1.Object) {\n\t\tlabels := meta.GetLabels()\n\t\tlabels = k8slabels.AddLabel(labels, key, value)\n\t\tmeta.SetLabels(labels)\n\t}\n}", "func (r *Repo) UpdateLabel(name string, params *github.UpdateLabelParams) (*github.Label, error) {\n\treturn r.cli.UpdateLabel(r.path, name, params)\n}", "func (b *button) label(eng vu.Engine, part vu.Part, text string) {\n\tcolour := \"weblySleek22Black\"\n\tif b.banner == nil {\n\t\tb.banner = part.AddPart()\n\t\tb.banner.SetBanner(text, \"uv\", \"weblySleek22\", colour)\n\t\tb.banner.SetLocation(float64(b.x), float64(b.y), 0)\n\t} else {\n\t\tb.banner.UpdateBanner(text)\n\t}\n}", "func (c *GlInsertEventMarkerEXT) Label(ctx context.Context, s *api.GlobalState) string {\n\treturn readString(ctx, c, s, c.Marker(), c.Length())\n}", "func (o *TppCredentialsParams) SetLabel(v string) {\n\to.Label = v\n}", "func execNewLabel(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\tret := types.NewLabel(token.Pos(args[0].(int)), args[1].(*types.Package), args[2].(string))\n\tp.Ret(3, ret)\n}", "func (a *Client) CreateLabel(params *CreateLabelParams, opts ...ClientOption) (*CreateLabelOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateLabelParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"CreateLabel\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/create-label\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &CreateLabelReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateLabelOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for CreateLabel: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (b *button) label(part *vu.Ent, keyCode int) {\n\tif keysym := vu.Symbol(keyCode); keysym > 0 {\n\t\tif b.banner == nil {\n\t\t\tb.banner = part.AddPart().SetAt(float64(b.x), float64(b.y), 0)\n\t\t\tb.banner.MakeLabel(\"labeled\", \"lucidiaSu22\")\n\t\t\tb.banner.SetColor(0, 0, 0)\n\t\t}\n\t\tif keyCode == 0 {\n\t\t\tkeyCode = vu.KSpace\n\t\t}\n\t\tb.banner.SetStr(string(keysym))\n\t}\n}", "func (p libvirtPlugin) Label(instance instance.ID, labels map[string]string) error {\n\t//l := log.WithField(\"instance\", instance)\n\n\tconn, err := libvirt.NewConnect(p.URI)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Connecting to libvirt\")\n\t}\n\tdefer conn.Close()\n\n\td, err := p.lookupInstanceByID(conn, instance)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Looking up domain\")\n\t}\n\n\tmeta := infrakitMetadata{}\n\tm, err := d.GetMetadata(libvirt.DOMAIN_METADATA_ELEMENT,\n\t\t\"https://github.com/docker/infrakit\",\n\t\tlibvirt.DOMAIN_AFFECT_LIVE)\n\tif err == nil {\n\t\tif err := meta.Unmarshal(m); err != nil {\n\t\t\treturn errors.Wrap(err, \"Unmarshalling domain metadata XML\")\n\t\t}\n\t} else {\n\t\tmeta.LogicalID = string(instance)\n\t}\n\n\tmetaSetTags(&meta, labels)\n\n\txmlbytes, err := xml.MarshalIndent(meta, \"\", \" \")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Marshalling infrakitMetadata\")\n\t}\n\tm = string(xmlbytes)\n\n\terr = d.SetMetadata(libvirt.DOMAIN_METADATA_ELEMENT,\n\t\tm,\n\t\t\"infrakit\",\n\t\t\"https//github.com/docker/infrakit\",\n\t\tlibvirt.DOMAIN_AFFECT_LIVE)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Setting domain metadata\")\n\t}\n\n\treturn nil\n}", "func (o BoundingPolyOutput) Label() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BoundingPoly) *string { return v.Label }).(pulumi.StringPtrOutput)\n}", "func (g Gen) WithLabel(label string) Gen {\n\treturn func(genParams *GenParameters) *GenResult {\n\t\tresult := g(genParams)\n\t\tresult.Labels = append(result.Labels, label)\n\t\treturn result\n\t}\n}", "func onAdd(obj interface{}) {\n\t// Cast the obj as node\n\tpod := obj.(*corev1.Pod)\n\tlabel, ok := pod.GetLabels()[SOME_LABEL]\n\tif ok {\n\t\tfmt.Printf(\"onAdd\\n\")\n\t\tfmt.Printf(\"It has the label: %s\\n\", label)\n\t}\n}", "func runlabelCmd(c *cli.Context) error {\n\tvar (\n\t\timageName string\n\t\tstdErr, stdOut io.Writer\n\t\tstdIn io.Reader\n\t\textraArgs []string\n\t)\n\n\t// Evil images could trick into recursively executing the runlabel\n\t// command. Avoid this by setting the \"PODMAN_RUNLABEL_NESTED\" env\n\t// variable when executing a label first.\n\tnested := os.Getenv(\"PODMAN_RUNLABEL_NESTED\")\n\tif nested == \"1\" {\n\t\treturn fmt.Errorf(\"nested runlabel calls: runlabels cannot execute the runlabel command\")\n\t}\n\n\topts := make(map[string]string)\n\truntime, err := libpodruntime.GetRuntime(c)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not get runtime\")\n\t}\n\tdefer runtime.Shutdown(false)\n\n\targs := c.Args()\n\tif len(args) < 2 {\n\t\tlogrus.Errorf(\"the runlabel command requires at least 2 arguments: LABEL IMAGE\")\n\t\treturn nil\n\t}\n\tif err := validateFlags(c, runlabelFlags); err != nil {\n\t\treturn err\n\t}\n\tif c.Bool(\"display\") && c.Bool(\"quiet\") {\n\t\treturn errors.Errorf(\"the display and quiet flags cannot be used together.\")\n\t}\n\n\tif len(args) > 2 {\n\t\textraArgs = args[2:]\n\t}\n\tpull := c.Bool(\"pull\")\n\tlabel := args[0]\n\n\trunlabelImage := args[1]\n\n\tif c.IsSet(\"opt1\") {\n\t\topts[\"opt1\"] = c.String(\"opt1\")\n\t}\n\tif c.IsSet(\"opt2\") {\n\t\topts[\"opt2\"] = c.String(\"opt2\")\n\t}\n\tif c.IsSet(\"opt3\") {\n\t\topts[\"opt3\"] = c.String(\"opt3\")\n\t}\n\n\tctx := getContext()\n\n\tstdErr = os.Stderr\n\tstdOut = os.Stdout\n\tstdIn = os.Stdin\n\n\tif c.Bool(\"quiet\") {\n\t\tstdErr = nil\n\t\tstdOut = nil\n\t\tstdIn = nil\n\t}\n\n\tdockerRegistryOptions := image.DockerRegistryOptions{\n\t\tDockerCertPath: c.String(\"cert-dir\"),\n\t}\n\tif c.IsSet(\"tls-verify\") {\n\t\tdockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT(\"tls-verify\"))\n\t}\n\n\tauthfile := getAuthFile(c.String(\"authfile\"))\n\trunLabel, imageName, err := shared.GetRunlabel(label, runlabelImage, ctx, runtime, pull, c.String(\"creds\"), dockerRegistryOptions, authfile, c.String(\"signature-policy\"), stdOut)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif runLabel == \"\" {\n\t\treturn nil\n\t}\n\n\tcmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, c.String(\"name\"), opts, extraArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !c.Bool(\"quiet\") {\n\t\tfmt.Printf(\"Command: %s\\n\", strings.Join(cmd, \" \"))\n\t\tif c.Bool(\"display\") {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn utils.ExecCmdWithStdStreams(stdIn, stdOut, stdErr, env, cmd[0], cmd[1:]...)\n}", "func (r *InformationProtectionPolicyLabelsCollectionRequest) Add(ctx context.Context, reqObj *InformationProtectionLabel) (resObj *InformationProtectionLabel, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func AddLabels(obj metav1.Object, additionalLabels map[string]string) {\n\tlabels := obj.GetLabels()\n\tif labels == nil {\n\t\tlabels = map[string]string{}\n\t\tobj.SetLabels(labels)\n\t}\n\tfor k, v := range additionalLabels {\n\t\tlabels[k] = v\n\t}\n}", "func labelExactMatch(pr *gogithub.PullRequest, labelToFind string) bool {\n\tfor _, label := range pr.Labels {\n\t\tif *label.Name == labelToFind {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *Board) AddLabels(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Label) error {\n\tvar err error\n\tfor _, rel := range related {\n\t\tif insert {\n\t\t\trel.BoardID = o.ID\n\t\t\tif err = rel.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t\t}\n\t\t} else {\n\t\t\tupdateQuery := fmt.Sprintf(\n\t\t\t\t\"UPDATE `labels` SET %s WHERE %s\",\n\t\t\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, []string{\"board_id\"}),\n\t\t\t\tstrmangle.WhereClause(\"`\", \"`\", 0, labelPrimaryKeyColumns),\n\t\t\t)\n\t\t\tvalues := []interface{}{o.ID, rel.ID}\n\n\t\t\tif boil.DebugMode {\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, updateQuery)\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t\t\t}\n\n\t\t\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to update foreign table\")\n\t\t\t}\n\n\t\t\trel.BoardID = o.ID\n\t\t}\n\t}\n\n\tif o.R == nil {\n\t\to.R = &boardR{\n\t\t\tLabels: related,\n\t\t}\n\t} else {\n\t\to.R.Labels = append(o.R.Labels, related...)\n\t}\n\n\tfor _, rel := range related {\n\t\tif rel.R == nil {\n\t\t\trel.R = &labelR{\n\t\t\t\tBoard: o,\n\t\t\t}\n\t\t} else {\n\t\t\trel.R.Board = o\n\t\t}\n\t}\n\treturn nil\n}", "func (gauo *GithubAssetUpdateOne) SetLabel(s string) *GithubAssetUpdateOne {\n\tgauo.mutation.SetLabel(s)\n\treturn gauo\n}", "func NewLabel(name string) *Label {\n\treturn &Label{Name: name}\n}", "func (gau *GithubAssetUpdate) SetLabel(s string) *GithubAssetUpdate {\n\tgau.mutation.SetLabel(s)\n\treturn gau\n}" ]
[ "0.75481737", "0.75481737", "0.7350045", "0.71433645", "0.7023004", "0.6740278", "0.655589", "0.64857525", "0.6314333", "0.62937886", "0.62546927", "0.62197626", "0.6193546", "0.61563903", "0.6149052", "0.6143574", "0.61072576", "0.6095906", "0.60725856", "0.60462886", "0.6039999", "0.6028774", "0.60183024", "0.60070443", "0.6003138", "0.5971456", "0.59558547", "0.5934847", "0.591908", "0.58983815", "0.5886662", "0.58699834", "0.58649", "0.58030397", "0.57964486", "0.5769435", "0.5756149", "0.5756149", "0.57519174", "0.5743585", "0.5734936", "0.5722172", "0.57218343", "0.56972075", "0.56812525", "0.567084", "0.5652639", "0.56409985", "0.5636184", "0.5633166", "0.56232935", "0.5603068", "0.5583475", "0.5572795", "0.5557984", "0.5556151", "0.5541782", "0.5521042", "0.55077636", "0.5498153", "0.54762626", "0.5465853", "0.5461253", "0.5452862", "0.54364425", "0.543557", "0.541899", "0.54171103", "0.5408388", "0.5405034", "0.54014134", "0.5397784", "0.5397063", "0.53841186", "0.5383354", "0.537259", "0.5372469", "0.5362483", "0.5361563", "0.5358265", "0.5351364", "0.5342205", "0.53387666", "0.5329523", "0.5327853", "0.5315752", "0.53108233", "0.5305608", "0.529701", "0.5294024", "0.52911294", "0.5280333", "0.5279013", "0.5273981", "0.5273482", "0.5269325", "0.5266934", "0.524788", "0.5246509", "0.524464" ]
0.72734094
3
RemoveLabel removes the label from the specified PR or issue
func (fc *fakeClient) RemoveLabel(owner, repo string, number int, label string) error { fc.removed = append(fc.removed, label) // remove from existing labels for k, v := range fc.labels { if label == v { fc.labels = append(fc.labels[:k], fc.labels[k+1:]...) break } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (issue *Issue) RemoveLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"remove\": \"%s\"}`, val)\n\t}\n\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (issue *Issue) RemoveLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"remove\": \"%s\"}`, val)\n\t}\n\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (c *client) RemoveLabel(org, repo string, number int, label string) error {\n\treturn c.RemoveLabelWithContext(context.Background(), org, repo, number, label)\n}", "func (p PRMirror) RemoveLabel(id int, labels string) bool {\n\t_, err := p.GitHubClient.Issues.RemoveLabelForIssue(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, id, labels)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while removing a label on issue#:%d - %s\", id, err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (c *ContainerWorkloadProfile) RemoveLabel(key string) error {\n\n\t// Create the new label array\n\tnewLabels := []Label{}\n\n\t// Iterate through the existing labels\n\tfor _, existingLabel := range *c.Labels {\n\t\t// If the key isn't target, keep it\n\t\tif existingLabel.Key != key {\n\t\t\tnewLabels = append(newLabels, existingLabel)\n\t\t}\n\t}\n\n\t// Update the labels\n\tc.Labels = &newLabels\n\n\treturn nil\n}", "func (ls *LabelService) RemoveLabel(labelID string, resourceType string, resourceID string) error {\n\tlog.Debug(\"RemoveLabel\")\n\n\tdata, status, err := ls.concertoService.Delete(\n\t\tfmt.Sprintf(APIPathLabelResource, labelID, resourceType, resourceID),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (lbl *LabelService) RemoveLabel(labelID string, resourceType string, resourceID string) error {\n\tlog.Debug(\"RemoveLabel\")\n\n\tdata, status, err := lbl.concertoService.Delete(fmt.Sprintf(\"/labels/%s/resources/%s/%s\", labelID, resourceType, resourceID))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *client) DeleteRepoLabel(org, repo, label string) error {\n\tdurationLogger := c.log(\"DeleteRepoLabel\", org, repo, label)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodDelete,\n\t\taccept: \"application/vnd.github.symmetra-preview+json\", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/labels/%s\", org, repo, label),\n\t\torg: org,\n\t\trequestBody: Label{Name: label},\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func (m *MockClient) RemoveLabel(org, repo string, number int, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveLabel\", org, repo, number, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRepositoryClient) RemoveLabel(org, repo string, number int, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveLabel\", org, repo, number, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func RemovePhotoLabel(router *gin.RouterGroup) {\n\trouter.DELETE(\"/photos/:uid/label/:id\", func(c *gin.Context) {\n\t\ts := Auth(SessionID(c), acl.ResourcePhotos, acl.ActionUpdate)\n\n\t\tif s.Invalid() {\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, ErrUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tm, err := query.PhotoByUID(c.Param(\"uid\"))\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, ErrPhotoNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tlabelId, err := strconv.Atoi(c.Param(\"id\"))\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, gin.H{\"error\": txt.UcFirst(err.Error())})\n\t\t\treturn\n\t\t}\n\n\t\tlabel, err := query.PhotoLabel(m.ID, uint(labelId))\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, gin.H{\"error\": txt.UcFirst(err.Error())})\n\t\t\treturn\n\t\t}\n\n\t\tif label.LabelSrc == classify.SrcManual || label.LabelSrc == classify.SrcKeyword {\n\t\t\tlogError(\"label\", entity.Db().Delete(&label).Error)\n\t\t} else {\n\t\t\tlabel.Uncertainty = 100\n\t\t\tlogError(\"label\", entity.Db().Save(&label).Error)\n\t\t}\n\n\t\tp, err := query.PhotoPreloadByUID(c.Param(\"uid\"))\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, ErrPhotoNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tlogError(\"label\", p.RemoveKeyword(label.Label.LabelName))\n\n\t\tif err := p.Save(); err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\"error\": txt.UcFirst(err.Error())})\n\t\t\treturn\n\t\t}\n\n\t\tPublishPhotoEvent(EntityUpdated, c.Param(\"uid\"), c)\n\n\t\tevent.Success(\"label removed\")\n\n\t\tc.JSON(http.StatusOK, p)\n\t})\n}", "func (mr *MockRepositoryClientMockRecorder) RemoveLabel(org, repo, number, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveLabel\", reflect.TypeOf((*MockRepositoryClient)(nil).RemoveLabel), org, repo, number, label)\n}", "func (g LabeledUndirected) RemoveEdgeLabel(n1, n2 NI, l LI) (ok bool) {\n\tok, x1, x2 := g.HasEdgeLabel(n1, n2, l)\n\tif !ok {\n\t\treturn\n\t}\n\ta := g.LabeledAdjacencyList\n\tto := a[n1]\n\tlast := len(to) - 1\n\tto[x1] = to[last]\n\ta[n1] = to[:last]\n\tif n1 == n2 {\n\t\treturn\n\t}\n\tto = a[n2]\n\tlast = len(to) - 1\n\tto[x2] = to[last]\n\ta[n2] = to[:last]\n\treturn\n}", "func (g *Generator) RemoveConfigLabel(label string) {\n\tdelete(g.image.Config.Labels, label)\n}", "func (mr *MockClientMockRecorder) RemoveLabel(org, repo, number, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveLabel\", reflect.TypeOf((*MockClient)(nil).RemoveLabel), org, repo, number, label)\n}", "func (q *QuestionnaireT) LabelCleanse(s string) string {\n\n\ts = openingDiv.ReplaceAllString(s, \" \")\n\ts = openingP.ReplaceAllString(s, \" \")\n\ts = strings.ReplaceAll(s, \"</div>\", \" \")\n\ts = strings.ReplaceAll(s, \"</p>\", \" \")\n\n\ts = strings.ReplaceAll(s, \"&#931;\", \" sum \") // Σ - greek sum symbol\n\ts = strings.ReplaceAll(s, \"&shy;\", \"\")\n\ts = strings.ReplaceAll(s, \"&nbsp;\", \" \")\n\ts = strings.ReplaceAll(s, \"<br>\", \" \")\n\n\ts = strings.ReplaceAll(s, \"<b>\", \" \")\n\ts = strings.ReplaceAll(s, \"</b>\", \" \")\n\ts = strings.ReplaceAll(s, \"<bx>\", \" \")\n\ts = strings.ReplaceAll(s, \"</bx>\", \" \")\n\n\ts = EnglishTextAndNumbersOnly(s)\n\n\ts = strings.TrimPrefix(s, \"-- \")\n\ts = strings.TrimSuffix(s, \" --\")\n\n\treturn s\n}", "func ClearLabels(issue *models.Issue, doer *models.User) (err error) {\n\tif err = issue.ClearLabels(doer); err != nil {\n\t\treturn\n\t}\n\n\tnotification.NotifyIssueClearLabels(doer, issue)\n\n\treturn nil\n}", "func ReleaseLabel(label string) {\n\treleaseLabel(label)\n}", "func (gauo *GithubAssetUpdateOne) ClearLabel() *GithubAssetUpdateOne {\n\tgauo.mutation.ClearLabel()\n\treturn gauo\n}", "func (removerunning *LibcomposeRemoveRunningProperty) Label() string {\n\treturn \"Remove running\"\n}", "func NewIssueRemoveLabelParams() *IssueRemoveLabelParams {\n\tvar ()\n\treturn &IssueRemoveLabelParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (gau *GithubAssetUpdate) ClearLabel() *GithubAssetUpdate {\n\tgau.mutation.ClearLabel()\n\treturn gau\n}", "func (f *tmplFuncs) cleanLabel(l *descriptor.FieldDescriptorProto_Label) string {\n\tswitch int32(*l) {\n\tcase 1:\n\t\treturn \"optional\"\n\tcase 2:\n\t\treturn \"required\"\n\tcase 3:\n\t\treturn \"repeated\"\n\tdefault:\n\t\tpanic(\"unknown label\")\n\t}\n}", "func (r *mutationResolver) DeleteLabel(ctx context.Context, id int) (int, error) {\n\treturn id, ent.FromContext(ctx).Label.DeleteOneID(id).Exec(ctx)\n}", "func (puo *PostUpdateOne) RemoveLabels(l ...*Label) *PostUpdateOne {\n\tids := make([]int, len(l))\n\tfor i := range l {\n\t\tids[i] = l[i].ID\n\t}\n\treturn puo.RemoveLabelIDs(ids...)\n}", "func (puo *PostUpdateOne) RemoveLabelIDs(ids ...int) *PostUpdateOne {\n\tpuo.mutation.RemoveLabelIDs(ids...)\n\treturn puo\n}", "func DeleteLabelFromResource(spec interface{}, key string) {\n\tif obj, ok := spec.(*v1.PersistentVolumeClaim); ok {\n\t\tif obj.Labels != nil {\n\t\t\t_, ok := obj.Labels[key]\n\t\t\tif ok {\n\t\t\t\tlogrus.Infof(\"Deleting label with key [%s] from PVC %s\", key, obj.Name)\n\t\t\t\tdelete(obj.Labels, key)\n\t\t\t\tcore.Instance().UpdatePersistentVolumeClaim(obj)\n\t\t\t}\n\t\t}\n\t} else if obj, ok := spec.(*v1.ConfigMap); ok {\n\t\tif obj.Labels != nil {\n\t\t\t_, ok := obj.Labels[key]\n\t\t\tif ok {\n\t\t\t\tlogrus.Infof(\"Deleting label with key [%s] from ConfigMap %s\", key, obj.Name)\n\t\t\t\tdelete(obj.Labels, key)\n\t\t\t\tcore.Instance().UpdateConfigMap(obj)\n\t\t\t}\n\t\t}\n\t} else if obj, ok := spec.(*v1.Secret); ok {\n\t\tif obj.Labels != nil {\n\t\t\t_, ok := obj.Labels[key]\n\t\t\tif ok {\n\t\t\t\tlogrus.Infof(\"Deleting label with key [%s] from Secret %s\", key, obj.Name)\n\t\t\t\tdelete(obj.Labels, key)\n\t\t\t\tcore.Instance().UpdateSecret(obj)\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *Plex) RemoveLabelFromMedia(_type, id, label, locked string) (bool, error) {\n\trequestInfo.headers.Token = p.token\n\n\tquery := fmt.Sprintf(\"%s/library/sections/3/all\", p.URL)\n\n\tparsedQuery, parseErr := url.Parse(query)\n\n\tif parseErr != nil {\n\t\treturn false, parseErr\n\t}\n\n\tvals := parsedQuery.Query()\n\n\tvals.Add(\"type\", _type)\n\tvals.Add(\"id\", id)\n\tvals.Add(\"label[].tag.tag-\", label)\n\tvals.Add(\"label.locked\", locked)\n\n\tparsedQuery.RawQuery = vals.Encode()\n\n\tquery = parsedQuery.String()\n\n\tresp, respErr := requestInfo.put(query)\n\n\tif respErr != nil {\n\t\treturn false, respErr\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn resp.StatusCode == 200, nil\n}", "func (o *IssueRemoveLabelParams) WithContext(ctx context.Context) *IssueRemoveLabelParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (o *IssueRemoveLabelParams) WithHTTPClient(client *http.Client) *IssueRemoveLabelParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func (puo *PostUpdateOne) ClearLabels() *PostUpdateOne {\n\tpuo.mutation.ClearLabels()\n\treturn puo\n}", "func (pu *PostUpdate) RemoveLabels(l ...*Label) *PostUpdate {\n\tids := make([]int, len(l))\n\tfor i := range l {\n\t\tids[i] = l[i].ID\n\t}\n\treturn pu.RemoveLabelIDs(ids...)\n}", "func RemoveLabels(o metav1.Object, labels ...string) {\n\tl := o.GetLabels()\n\tif l == nil {\n\t\treturn\n\t}\n\tfor _, k := range labels {\n\t\tdelete(l, k)\n\t}\n\to.SetLabels(l)\n}", "func filterOutLabels(issues []*gitlab.Issue, exLabels string) []*gitlab.Issue {\n\tif exLabels == \"\" {\n\t\treturn issues\n\t}\n\tlabels := map[string]struct{}{}\n\tfor _, l := range strings.Split(exLabels, \",\") {\n\t\tlabels[l] = struct{}{}\n\t}\n\tfor i := 0; i < len(issues); {\n\t\tissue := issues[i]\n\t\tskip := false\n\t\tfor _, l := range issue.Labels {\n\t\t\tif _, ok := labels[l]; ok {\n\t\t\t\tskip = true\n\t\t\t}\n\t\t}\n\t\tif skip {\n\t\t\tissues[i] = issues[len(issues)-1]\n\t\t\tissues = issues[0 : len(issues)-1]\n\t\t} else {\n\t\t\ti++\n\t\t}\n\t}\n\treturn issues\n}", "func (opt RemoveLabel) Run(ctx context.Context, resourceList *framework.ResourceList) error {\n\tpredicate := func(key string) bool {\n\t\tfor _, label := range opt.Labels {\n\t\t\tif label == key {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfilter, err := BuildRemoveLabelFilter(predicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn xform.RunFilters(ctx, resourceList, filter)\n}", "func (a *Agent) LabelDelete(labelId string) (err error) {\n\tif labelId == \"\" {\n\t\treturn\n\t}\n\tspec := (&api.LabelDeleteSpec{}).Init(labelId)\n\treturn a.pc.ExecuteApi(spec)\n}", "func (s *DataStore) RemoveRecurringJobLabelFromVolume(volume *longhorn.Volume, labelKey string) (*longhorn.Volume, error) {\n\tvar err error\n\tif _, exist := volume.Labels[labelKey]; exist {\n\t\tdelete(volume.Labels, labelKey)\n\t\tvolume, err = s.UpdateVolume(volume)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogrus.Infof(\"Removed volume %v recurring job label %v\", volume.Name, labelKey)\n\t}\n\treturn volume, nil\n}", "func NewIssueRemoveLabelParamsWithHTTPClient(client *http.Client) *IssueRemoveLabelParams {\n\tvar ()\n\treturn &IssueRemoveLabelParams{\n\t\tHTTPClient: client,\n\t}\n}", "func (removeimagetypes *LibcomposeRemoveImageTypeProperty) Label() string {\n\treturn \"Remove image types\"\n}", "func (o *IssueRemoveLabelParams) WithTimeout(timeout time.Duration) *IssueRemoveLabelParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (s *Service) DeleteLabel(id int64) error {\n\tif id <= 0 {\n\t\treturn ecode.RequestErr\n\t}\n\treturn s.dao.DeleteLabel(id)\n}", "func (pu *PostUpdate) RemoveLabelIDs(ids ...int) *PostUpdate {\n\tpu.mutation.RemoveLabelIDs(ids...)\n\treturn pu\n}", "func (pu *PostUpdate) ClearLabels() *PostUpdate {\n\tpu.mutation.ClearLabels()\n\treturn pu\n}", "func deleteLabelValueOp(metric pmetric.Metric, mtpOp internalOperation) {\n\top := mtpOp.configOperation\n\t//exhaustive:enforce\n\tswitch metric.Type() {\n\tcase pmetric.MetricTypeGauge:\n\t\tmetric.Gauge().DataPoints().RemoveIf(func(dp pmetric.NumberDataPoint) bool {\n\t\t\treturn hasAttr(dp.Attributes(), op.Label, op.LabelValue)\n\t\t})\n\tcase pmetric.MetricTypeSum:\n\t\tmetric.Sum().DataPoints().RemoveIf(func(dp pmetric.NumberDataPoint) bool {\n\t\t\treturn hasAttr(dp.Attributes(), op.Label, op.LabelValue)\n\t\t})\n\tcase pmetric.MetricTypeHistogram:\n\t\tmetric.Histogram().DataPoints().RemoveIf(func(dp pmetric.HistogramDataPoint) bool {\n\t\t\treturn hasAttr(dp.Attributes(), op.Label, op.LabelValue)\n\t\t})\n\tcase pmetric.MetricTypeExponentialHistogram:\n\t\tmetric.ExponentialHistogram().DataPoints().RemoveIf(func(dp pmetric.ExponentialHistogramDataPoint) bool {\n\t\t\treturn hasAttr(dp.Attributes(), op.Label, op.LabelValue)\n\t\t})\n\tcase pmetric.MetricTypeSummary:\n\t\tmetric.Summary().DataPoints().RemoveIf(func(dp pmetric.SummaryDataPoint) bool {\n\t\t\treturn hasAttr(dp.Attributes(), op.Label, op.LabelValue)\n\t\t})\n\t}\n}", "func (m *MockClient) DeleteRepoLabel(org, repo, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteRepoLabel\", org, repo, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRepositoryClient) DeleteRepoLabel(org, repo, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteRepoLabel\", org, repo, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (forceremove *LibcomposeForceRemoveProperty) Label() string {\n\treturn \"Force remove\"\n}", "func (instance *Host) UnbindLabel(ctx context.Context, labelInstance resources.Label) (ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif instance == nil || valid.IsNil(instance) {\n\t\treturn fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\tif labelInstance == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"labelInstance\")\n\t}\n\n\tinstanceID, err := instance.GetID()\n\tif err != nil {\n\t\treturn fail.ConvertError(err)\n\t}\n\n\txerr := instance.Alter(ctx, func(_ data.Clonable, props *serialize.JSONProperties) fail.Error {\n\t\treturn props.Alter(hostproperty.LabelsV1, func(clonable data.Clonable) fail.Error {\n\t\t\thostLabelsV1, ok := clonable.(*propertiesv1.HostLabels)\n\t\t\tif !ok {\n\t\t\t\treturn fail.InconsistentError(\"'*propertiesv1.HostLabels' expected, '%s' provided\", reflect.TypeOf(clonable).String())\n\t\t\t}\n\n\t\t\tlabelID, err := labelInstance.GetID()\n\t\t\tif err != nil {\n\t\t\t\treturn fail.ConvertError(err)\n\t\t\t}\n\n\t\t\t// If the host is not bound to this Label, consider it a success\n\t\t\tif _, ok = hostLabelsV1.ByID[labelID]; ok {\n\t\t\t\tdelete(hostLabelsV1.ByID, labelID)\n\t\t\t\tdelete(hostLabelsV1.ByName, labelInstance.GetName())\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t})\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\txerr = labelInstance.UnbindFromHost(ctx, instance)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tsvc := instance.Service()\n\txerr = svc.DeleteTags(ctx, abstract.HostResource, instanceID, []string{labelInstance.GetName()})\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\treturn nil\n}", "func (z *zpoolctl) LabelClear(ctx context.Context, device string, force bool) *execute {\n\targs := []string{\"labelclear\"}\n\tif force {\n\t\targs = append(args, \"-f\", device)\n\t}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (o *IssueRemoveLabelParams) WithID(id int64) *IssueRemoveLabelParams {\n\to.SetID(id)\n\treturn o\n}", "func (t Task) ApplyLabel(node *cgraph.Node) {\n\tif t.Driver == Driver_GitHub {\n\t\tfor _, label := range t.HasLabel {\n\t\t\ts, _ := strings.CutPrefix(label.String(), t.HasOwner.String()+\"/labels/\")\n\t\t\tfor _, dl := range depvizLabels {\n\t\t\t\tif s == dl.label {\n\t\t\t\t\tnode.SetStyle(cgraph.NodeStyle(dl.style))\n\t\t\t\t\tnode.SetColor(dl.color)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockRepositoryClient) RemoveLabelWithContext(ctx context.Context, org, repo string, number int, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveLabelWithContext\", ctx, org, repo, number, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (s *DataStore) RemoveSystemRestoreLabel(systemRestore *longhorn.SystemRestore) (*longhorn.SystemRestore, error) {\n\tkey := types.GetSystemRestoreLabelKey()\n\tif _, exist := systemRestore.Labels[key]; !exist {\n\t\treturn systemRestore, nil\n\t}\n\n\tdelete(systemRestore.Labels, key)\n\t_, err := s.lhClient.LonghornV1beta2().SystemRestores(s.namespace).Update(context.TODO(), systemRestore, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to remove SystemRestore %v label %v\", systemRestore.Name, key)\n\t}\n\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"systemRestore\": systemRestore.Name,\n\t\t\"label\": key,\n\t})\n\tlog.Info(\"Removed SystemRestore label\")\n\treturn systemRestore, nil\n}", "func (a *Client) DeleteLabel(params *DeleteLabelParams, opts ...ClientOption) (*DeleteLabelOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDeleteLabelParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"DeleteLabel\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/delete-label\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &DeleteLabelReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*DeleteLabelOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for DeleteLabel: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (p *Parser) patchLabel(label string) {\n\tif labels, ok := p.backpatch[label]; ok {\n\t\tfor _, data := range labels {\n\t\t\tp.chunk.code[data.offset] = p.labels[label]\n\t\t}\n\t\t// Label no longer needs backpatching since it is declared\n\t\tdelete(p.backpatch, label)\n\t}\n}", "func AddRemoveLabelsCommand(parent *cobra.Command) {\n\tvar opt RemoveLabel\n\n\tcmd := &cobra.Command{\n\t\tUse: \"remove-label\",\n\t\tAliases: []string{\"remove-labels\"},\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\topt.Labels = append(opt.Labels, args...)\n\t\t\treturn xform.RunXform(cmd.Context(), opt.Run)\n\t\t},\n\t}\n\n\tparent.AddCommand(cmd)\n}", "func (o *IssueRemoveLabelParams) WithIndex(index int64) *IssueRemoveLabelParams {\n\to.SetIndex(index)\n\treturn o\n}", "func (c *Client) VolumeLabelsRemove(lr *types.VolumeLabelsRemoveRequest) (reply *types.Volume, err error) {\n\turl := \"/admin/volumelabelsremove\"\n\tif _, err = c.httpPost(url, lr, &reply); err != nil {\n\t\treturn nil, err\n\t}\n\treturn reply, nil\n}", "func (a *Client) RemoveLabelsFromResources(params *RemoveLabelsFromResourcesParams, opts ...ClientOption) (*RemoveLabelsFromResourcesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewRemoveLabelsFromResourcesParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"RemoveLabelsFromResources\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/remove-labels-from-resources\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &RemoveLabelsFromResourcesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*RemoveLabelsFromResourcesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for RemoveLabelsFromResources: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (currentLabel *Label) removeNode(removeLabel *Label, g *Graph) uint16 {\n Assert(nilGraph, g != nil)\n Assert(nilLabelStore, g.labelStore != nil)\n Assert(nilLabel, removeLabel != nil)\n \n // make sure we haven't reached the end of the road\n if (currentLabel == nil) { // TODO should this cause an error?\n return uint16(0)\n }\n \n // this is the one we want\n if removeLabel.Id == currentLabel.Id {\n // remove this label\n cl, _ := currentLabel.left(g) // TODO do not ignore error\n cr, _ := currentLabel.right(g) // TODO do not ignore error\n \n if cl == nil && cr == nil { // no descendents\n return uint16(0)\n } else if cl == nil { // one descendent\n return cr.Id\n } else if cr == nil { // one descendent\n return cl.Id\n } else if cl.height() > cr.height() {\n // get the right most node of the left branch\n rLabel := cl.rightmostNode(g)\n rLabel.l = cl.removeNode(rLabel, g)\n rLabel.r = currentLabel.r\n g.labelStore.writes[rLabel.Id] = rLabel\n return rLabel.balance(g)\n } else {\n // get the left most node of the right branch\n lLabel := cr.leftmostNode(g)\n lLabel.r = cl.removeNode(lLabel, g)\n lLabel.l = currentLabel.l\n g.labelStore.writes[lLabel.Id] = lLabel\n return lLabel.balance(g)\n }\n \n // keep looking\n } else if removeLabel.Value(g) < currentLabel.Value(g) {\n left, _ := currentLabel.left(g) // TODO do not ignore error\n l := left.removeNode(removeLabel, g)\n if (l != currentLabel.l) {\n g.labelStore.writes[currentLabel.Id] = currentLabel\n }\n currentLabel.l = l\n } else {\n right, _ := currentLabel.right(g) // TODO do not ignore error\n r := right.removeNode(removeLabel, g)\n if (r != currentLabel.r) {\n g.labelStore.writes[currentLabel.Id] = currentLabel\n }\n currentLabel.r = r\n }\n \n return currentLabel.balance(g)\n \n}", "func (m *MockClient) RemoveLabelWithContext(ctx context.Context, org, repo string, number int, label string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveLabelWithContext\", ctx, org, repo, number, label)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (g *Generator) ClearConfigLabels() {\n\tg.image.Config.Labels = map[string]string{}\n}", "func (c *client) UpdateRepoLabel(org, repo, label, newName, description, color string) error {\n\tdurationLogger := c.log(\"UpdateRepoLabel\", org, repo, label, newName, color)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPatch,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/labels/%s\", org, repo, label),\n\t\taccept: \"application/vnd.github.symmetra-preview+json\", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\torg: org,\n\t\trequestBody: Label{Name: newName, Description: description, Color: color},\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func ClearLabels() {\n\tclearLabels()\n}", "func (instance *Host) ResetLabel(ctx context.Context, labelInstance resources.Label) (ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif instance == nil || valid.IsNil(instance) {\n\t\treturn fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\tif labelInstance == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"tag\")\n\t}\n\n\tdefaultValue, xerr := labelInstance.DefaultValue(ctx)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\treturn instance.UpdateLabel(ctx, labelInstance, defaultValue)\n}", "func (o *IssueRemoveLabelParams) WithRepo(repo string) *IssueRemoveLabelParams {\n\to.SetRepo(repo)\n\treturn o\n}", "func (c *evictionClient) LabelPod(podInfo *types.PodInfo, priority string, action string) error {\n\tif podInfo.Name == \"\" {\n\t\treturn fmt.Errorf(\"pod name should not be empty\")\n\t}\n\toldPod, err := c.client.CoreV1().Pods(podInfo.Namespace).Get(podInfo.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tlog.Errorf(\"get pod %s error\", podInfo.Name)\n\t\treturn err\n\t}\n\toldData, err := json.Marshal(oldPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal old node for node %v : %v\", c.nodeName, err)\n\t}\n\tnewPod := oldPod.DeepCopy()\n\n\tif newPod.Labels == nil {\n\t\tlog.Infof(\"there is no label on this pod: %v, create it\", podInfo.Name)\n\t\tnewPod.Labels = make(map[string]string)\n\t}\n\tif action == \"Add\" {\n\t\tnewPod.Labels[priority] = \"true\"\n\t} else if action == \"Delete\" {\n\t\tdelete(newPod.Labels, priority)\n\t}\n\n\tnewData, err := json.Marshal(newPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal new pod %v : %v\", podInfo.Name, err)\n\t}\n\n\tpatchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Pod{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create patch for pod %v\", podInfo.Name)\n\t}\n\t_, err = c.client.CoreV1().Pods(oldPod.Namespace).Patch(oldPod.Name, k8stypes.StrategicMergePatchType, patchBytes)\n\n\tlog.Infof(\"Label pod: %v, action:%v\", podInfo.Name, action)\n\treturn err\n}", "func RewriteEncryptedDataRemoveLabel(\n\tctx context.Context,\n\tlog logr.Logger,\n\truntimeClient client.Client,\n\ttargetClient client.Client,\n\tkubeAPIServerNamespace string,\n\tnamePrefix string,\n\tgvks ...schema.GroupVersionKind,\n) error {\n\tif err := rewriteEncryptedData(\n\t\tctx,\n\t\tlog,\n\t\ttargetClient,\n\t\tutils.MustNewRequirement(labelKeyRotationKeyName, selection.Exists),\n\t\tfunc(objectMeta *metav1.ObjectMeta) {\n\t\t\tdelete(objectMeta.Labels, labelKeyRotationKeyName)\n\t\t},\n\t\tgvks...,\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn PatchKubeAPIServerDeploymentMeta(ctx, runtimeClient, kubeAPIServerNamespace, namePrefix, func(meta *metav1.PartialObjectMetadata) {\n\t\tdelete(meta.Annotations, AnnotationKeyEtcdSnapshotted)\n\t})\n}", "func (l label) GetLabel() string {\n\treturn \"\"\n}", "func (mr *MockRepositoryClientMockRecorder) DeleteRepoLabel(org, repo, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteRepoLabel\", reflect.TypeOf((*MockRepositoryClient)(nil).DeleteRepoLabel), org, repo, label)\n}", "func (o *IssueRemoveLabelParams) WithOwner(owner string) *IssueRemoveLabelParams {\n\to.SetOwner(owner)\n\treturn o\n}", "func (r *Release) label(storageBackend string, labels ...string) {\n\tif len(labels) == 0 {\n\t\treturn\n\t}\n\tif r.Enabled.Value {\n\n\t\targs := []string{\"label\", \"--overwrite\", storageBackend, \"-n\", r.Namespace, \"-l\", \"owner=helm,name=\" + r.Name}\n\t\targs = append(args, labels...)\n\t\tcmd := kubectl(args, \"Applying Helmsman labels to [ \"+r.Name+\" ] release\")\n\n\t\tif _, err := cmd.Exec(); err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t}\n\t}\n}", "func (config *Config) Unset(label string) error {\n\tif !contains(AvailableLabels, label) {\n\t\treturn fmt.Errorf(\"%s: unknown variable name\", label)\n\t}\n\tdelete(config.values, label)\n\treturn nil\n}", "func (removevolumes *LibcomposeRemoveVolumesProperty) Label() string {\n\treturn \"Remove volumes\"\n}", "func (b *Bot) Label(ctx context.Context) error {\n\tfiles, err := b.c.GitHub.ListFiles(ctx,\n\t\tb.c.Environment.Organization,\n\t\tb.c.Environment.Repository,\n\t\tb.c.Environment.Number)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tlabels, err := b.labels(ctx, files)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif len(labels) == 0 {\n\t\treturn nil\n\t}\n\n\terr = b.c.GitHub.AddLabels(ctx,\n\t\tb.c.Environment.Organization,\n\t\tb.c.Environment.Repository,\n\t\tb.c.Environment.Number,\n\t\tlabels)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\treturn nil\n}", "func (o PublishingResponseOutput) GithubLabel() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PublishingResponse) string { return v.GithubLabel }).(pulumi.StringOutput)\n}", "func (mr *MockClientMockRecorder) DeleteRepoLabel(org, repo, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteRepoLabel\", reflect.TypeOf((*MockClient)(nil).DeleteRepoLabel), org, repo, label)\n}", "func BuildRemoveLabelFilter(predicate func(key string) bool) (yaml.Filter, error) {\n\tfieldPaths, err := xform.ParseFieldPaths(\n\t\t[]string{\n\t\t\t\"metadata.labels\",\n\t\t\t\"spec.selector\",\n\t\t\t\"spec.selector.matchLabels\",\n\t\t\t\"spec.template.metadata.labels\",\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &xform.FieldClearer{\n\t\tFieldPaths: fieldPaths,\n\t\tPredicate: predicate,\n\t}, nil\n}", "func labelIntersection(metric1, metric2 metric.Metric) metric.Metric {\n\tfor label, value := range metric1.Metric {\n\t\tif metric2.Metric[label] != value {\n\t\t\tmetric1.Del(label)\n\t\t}\n\t}\n\treturn metric1\n}", "func (o PublishingOutput) GithubLabel() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Publishing) *string { return v.GithubLabel }).(pulumi.StringPtrOutput)\n}", "func rcDefLabel(p *TCompiler, code *TCode) (*value.Value, error) {\n\tp.moveNext()\n\treturn nil, nil\n}", "func TestRemoveServiceLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"blue\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive exactly zero ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Labels = map[string]string{\n\t\t\"color\": \"green\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (removeorphans *LibcomposeRemoveOrphansProperty) Label() string {\n\treturn \"Remove orphans\"\n}", "func (o PublishingPtrOutput) GithubLabel() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Publishing) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.GithubLabel\n\t}).(pulumi.StringPtrOutput)\n}", "func (mr *MockRepositoryClientMockRecorder) RemoveLabelWithContext(ctx, org, repo, number, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveLabelWithContext\", reflect.TypeOf((*MockRepositoryClient)(nil).RemoveLabelWithContext), ctx, org, repo, number, label)\n}", "func labelsWithPrefix(pr *gogithub.PullRequest, prefix string) []string {\n\tlabels := []string{}\n\tfor _, label := range pr.Labels {\n\t\tif strings.HasPrefix(*label.Name, prefix) {\n\t\t\tlabels = append(labels, strings.TrimPrefix(*label.Name, prefix+\"/\"))\n\t\t}\n\t}\n\treturn labels\n}", "func (l Label) Label() string { return string(l) }", "func (p *plugin) Label(instance instance.ID, labels map[string]string) error {\n\treturn fmt.Errorf(\"VMware vSphere VM label updates are not implemented yet\")\n}", "func HasLabel(label string, issueLabels []*github.Label) bool {\n\tfor _, l := range issueLabels {\n\t\tif strings.ToLower(l.GetName()) == strings.ToLower(label) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (w *Wallet) Remove(label string) error {\n\treturn w.store.Remove(label)\n}", "func runlabelCmd(c *cli.Context) error {\n\tvar (\n\t\timageName string\n\t\tstdErr, stdOut io.Writer\n\t\tstdIn io.Reader\n\t\textraArgs []string\n\t)\n\n\t// Evil images could trick into recursively executing the runlabel\n\t// command. Avoid this by setting the \"PODMAN_RUNLABEL_NESTED\" env\n\t// variable when executing a label first.\n\tnested := os.Getenv(\"PODMAN_RUNLABEL_NESTED\")\n\tif nested == \"1\" {\n\t\treturn fmt.Errorf(\"nested runlabel calls: runlabels cannot execute the runlabel command\")\n\t}\n\n\topts := make(map[string]string)\n\truntime, err := libpodruntime.GetRuntime(c)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not get runtime\")\n\t}\n\tdefer runtime.Shutdown(false)\n\n\targs := c.Args()\n\tif len(args) < 2 {\n\t\tlogrus.Errorf(\"the runlabel command requires at least 2 arguments: LABEL IMAGE\")\n\t\treturn nil\n\t}\n\tif err := validateFlags(c, runlabelFlags); err != nil {\n\t\treturn err\n\t}\n\tif c.Bool(\"display\") && c.Bool(\"quiet\") {\n\t\treturn errors.Errorf(\"the display and quiet flags cannot be used together.\")\n\t}\n\n\tif len(args) > 2 {\n\t\textraArgs = args[2:]\n\t}\n\tpull := c.Bool(\"pull\")\n\tlabel := args[0]\n\n\trunlabelImage := args[1]\n\n\tif c.IsSet(\"opt1\") {\n\t\topts[\"opt1\"] = c.String(\"opt1\")\n\t}\n\tif c.IsSet(\"opt2\") {\n\t\topts[\"opt2\"] = c.String(\"opt2\")\n\t}\n\tif c.IsSet(\"opt3\") {\n\t\topts[\"opt3\"] = c.String(\"opt3\")\n\t}\n\n\tctx := getContext()\n\n\tstdErr = os.Stderr\n\tstdOut = os.Stdout\n\tstdIn = os.Stdin\n\n\tif c.Bool(\"quiet\") {\n\t\tstdErr = nil\n\t\tstdOut = nil\n\t\tstdIn = nil\n\t}\n\n\tdockerRegistryOptions := image.DockerRegistryOptions{\n\t\tDockerCertPath: c.String(\"cert-dir\"),\n\t}\n\tif c.IsSet(\"tls-verify\") {\n\t\tdockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT(\"tls-verify\"))\n\t}\n\n\tauthfile := getAuthFile(c.String(\"authfile\"))\n\trunLabel, imageName, err := shared.GetRunlabel(label, runlabelImage, ctx, runtime, pull, c.String(\"creds\"), dockerRegistryOptions, authfile, c.String(\"signature-policy\"), stdOut)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif runLabel == \"\" {\n\t\treturn nil\n\t}\n\n\tcmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, c.String(\"name\"), opts, extraArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !c.Bool(\"quiet\") {\n\t\tfmt.Printf(\"Command: %s\\n\", strings.Join(cmd, \" \"))\n\t\tif c.Bool(\"display\") {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn utils.ExecCmdWithStdStreams(stdIn, stdOut, stdErr, env, cmd[0], cmd[1:]...)\n}", "func (pull *LibcomposePullProperty) Label() string {\n\treturn \"Pull\"\n}", "func excludesNonStaleableLabels(issue *github.Issue, config Configuration) bool {\n\tif len(issue.Labels) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, exemptLabel := range config.ExemptLabels {\n\t\tfor _, issueLabel := range issue.Labels {\n\t\t\tif *issueLabel.Name == exemptLabel {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}", "func addLabelsToPullRequest(prInfo *PullRequestInfo, labels []string) error {\n\tif prInfo == nil {\n\t\treturn errors.New(\"pull request to label cannot be nil\")\n\t}\n\tpr := prInfo.PullRequest\n\tprovider := prInfo.GitProvider\n\n\tif len(labels) > 0 {\n\t\tnumber := *pr.Number\n\t\tvar err error\n\t\terr = provider.AddLabelsToIssue(pr.Owner, pr.Repo, number, labels)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logger().Infof(\"Added label %s to Pull Request %s\", util.ColorInfo(strings.Join(labels, \", \")), pr.URL)\n\t}\n\treturn nil\n}", "func fixLabels(f *File, w *Rewriter) {\n\tjoinLabel := func(p *Expr) {\n\t\tadd, ok := (*p).(*BinaryExpr)\n\t\tif !ok || add.Op != \"+\" {\n\t\t\treturn\n\t\t}\n\t\tstr1, ok := add.X.(*StringExpr)\n\t\tif !ok || !strings.HasPrefix(str1.Value, \"//\") || strings.Contains(str1.Value, \" \") {\n\t\t\treturn\n\t\t}\n\t\tstr2, ok := add.Y.(*StringExpr)\n\t\tif !ok || strings.Contains(str2.Value, \" \") {\n\t\t\treturn\n\t\t}\n\t\tstr1.Value += str2.Value\n\n\t\t// Deleting nodes add and str2.\n\t\t// Merge comments from add, str1, and str2 and save in str1.\n\t\tcom1 := add.Comment()\n\t\tcom2 := str1.Comment()\n\t\tcom3 := str2.Comment()\n\t\tcom1.Before = append(com1.Before, com2.Before...)\n\t\tcom1.Before = append(com1.Before, com3.Before...)\n\t\tcom1.Suffix = append(com1.Suffix, com2.Suffix...)\n\t\tcom1.Suffix = append(com1.Suffix, com3.Suffix...)\n\t\t*str1.Comment() = *com1\n\n\t\t*p = str1\n\t}\n\n\tlabelPrefix := \"//\"\n\tif w.StripLabelLeadingSlashes {\n\t\tlabelPrefix = \"\"\n\t}\n\t// labelRE matches label strings, e.g. @r//x/y/z:abc\n\t// where $1 is @r//x/y/z, $2 is @r//, $3 is r, $4 is z, $5 is abc.\n\tlabelRE := regexp.MustCompile(`^(((?:@(\\w+))?//|` + labelPrefix + `)(?:.+/)?([^:]*))(?::([^:]+))?$`)\n\n\tshortenLabel := func(v Expr) {\n\t\tstr, ok := v.(*StringExpr)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tif w.StripLabelLeadingSlashes && strings.HasPrefix(str.Value, \"//\") {\n\t\t\tif filepath.Dir(f.Path) == \".\" || !strings.HasPrefix(str.Value, \"//:\") {\n\t\t\t\tstr.Value = str.Value[2:]\n\t\t\t}\n\t\t}\n\t\tif w.ShortenAbsoluteLabelsToRelative {\n\t\t\tthisPackage := labelPrefix + filepath.Dir(f.Path)\n\t\t\t// filepath.Dir on Windows uses backslashes as separators, while labels always have slashes.\n\t\t\tif filepath.Separator != '/' {\n\t\t\t\tthisPackage = strings.Replace(thisPackage, string(filepath.Separator), \"/\", -1)\n\t\t\t}\n\n\t\t\tif str.Value == thisPackage {\n\t\t\t\tstr.Value = \":\" + path.Base(str.Value)\n\t\t\t} else if strings.HasPrefix(str.Value, thisPackage+\":\") {\n\t\t\t\tstr.Value = str.Value[len(thisPackage):]\n\t\t\t}\n\t\t}\n\n\t\tm := labelRE.FindStringSubmatch(str.Value)\n\t\tif m == nil {\n\t\t\treturn\n\t\t}\n\t\tif m[4] != \"\" && m[4] == m[5] { // e.g. //foo:foo\n\t\t\tstr.Value = m[1]\n\t\t} else if m[3] != \"\" && m[4] == \"\" && m[3] == m[5] { // e.g. @foo//:foo\n\t\t\tstr.Value = \"@\" + m[3]\n\t\t}\n\t}\n\n\t// Join and shorten labels within a container of labels (which can be a single\n\t// label, e.g. a single string expression or a concatenation of them).\n\t// Gracefully finish if the argument is of a different type.\n\tfixLabelsWithinAContainer := func(e *Expr) {\n\t\tif list, ok := (*e).(*ListExpr); ok {\n\t\t\tfor i := range list.List {\n\t\t\t\tif leaveAlone1(list.List[i]) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tjoinLabel(&list.List[i])\n\t\t\t\tshortenLabel(list.List[i])\n\t\t\t}\n\t\t}\n\t\tif set, ok := (*e).(*SetExpr); ok {\n\t\t\tfor i := range set.List {\n\t\t\t\tif leaveAlone1(set.List[i]) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tjoinLabel(&set.List[i])\n\t\t\t\tshortenLabel(set.List[i])\n\t\t\t}\n\t\t} else {\n\t\t\tjoinLabel(e)\n\t\t\tshortenLabel(*e)\n\t\t}\n\t}\n\n\tWalk(f, func(v Expr, stk []Expr) {\n\t\tswitch v := v.(type) {\n\t\tcase *CallExpr:\n\t\t\tif leaveAlone(stk, v) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor i := range v.List {\n\t\t\t\tif leaveAlone1(v.List[i]) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tas, ok := v.List[i].(*AssignExpr)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tkey, ok := as.LHS.(*Ident)\n\t\t\t\tif !ok || !w.IsLabelArg[key.Name] || w.LabelDenyList[callName(v)+\".\"+key.Name] {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif leaveAlone1(as.RHS) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfindAndModifyStrings(&as.RHS, fixLabelsWithinAContainer)\n\t\t\t}\n\t\t}\n\t})\n}", "func (b *profileBuilder) pbLabel(tag int, key, str string, num int64) {\n\tstart := b.pb.startMessage()\n\tb.pb.int64Opt(tagLabel_Key, b.stringIndex(key))\n\tb.pb.int64Opt(tagLabel_Str, b.stringIndex(str))\n\tb.pb.int64Opt(tagLabel_Num, num)\n\tb.pb.endMessage(tag, start)\n}", "func (p *PullRequestBranch) GetLabel() string {\n\tif p == nil || p.Label == nil {\n\t\treturn \"\"\n\t}\n\treturn *p.Label\n}", "func (gauo *GithubAssetUpdateOne) SetNillableLabel(s *string) *GithubAssetUpdateOne {\n\tif s != nil {\n\t\tgauo.SetLabel(*s)\n\t}\n\treturn gauo\n}", "func HasLabel(i *github.Issue, label string) bool {\n\tfor _, l := range i.Labels {\n\t\tif *l.Name == label {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func NewIssueRemoveLabelParamsWithTimeout(timeout time.Duration) *IssueRemoveLabelParams {\n\tvar ()\n\treturn &IssueRemoveLabelParams{\n\n\t\ttimeout: timeout,\n\t}\n}" ]
[ "0.7710544", "0.7710544", "0.7190297", "0.7131391", "0.69650364", "0.6953825", "0.68455964", "0.66010845", "0.65678805", "0.6539643", "0.63907325", "0.63030523", "0.6289586", "0.6282687", "0.62680924", "0.6208816", "0.620009", "0.61222297", "0.6066965", "0.6064077", "0.6008396", "0.5997355", "0.59954995", "0.59898204", "0.5981833", "0.5887353", "0.5884995", "0.58422863", "0.58316946", "0.5827726", "0.5777512", "0.5775239", "0.57645667", "0.57629985", "0.5757127", "0.57317656", "0.56739324", "0.5665643", "0.5648563", "0.56470597", "0.5627935", "0.55846626", "0.55807805", "0.55793524", "0.5553791", "0.5548886", "0.5544726", "0.5523281", "0.5510326", "0.5489031", "0.5475919", "0.54672134", "0.54635054", "0.5414211", "0.5363655", "0.53558826", "0.5336151", "0.5326367", "0.5325456", "0.5305143", "0.5296521", "0.5260061", "0.52396655", "0.52364874", "0.5231092", "0.5222493", "0.52204925", "0.5203173", "0.5201982", "0.51509845", "0.5140571", "0.51173747", "0.511719", "0.5114513", "0.5096794", "0.50903434", "0.5050774", "0.5041642", "0.5039977", "0.5030515", "0.49881816", "0.49850976", "0.49774644", "0.49482003", "0.49438396", "0.49414015", "0.49325648", "0.49277368", "0.49270138", "0.49170998", "0.49167475", "0.49081", "0.49055392", "0.4892244", "0.4890421", "0.4867597", "0.4864349", "0.4841759", "0.48237693", "0.48236793" ]
0.7706561
2
GetIssueLabels gets the current labels on the specified PR or issue
func (fc *fakeClient) GetIssueLabels(owner, repo string, number int) ([]github.Label, error) { var la []github.Label for _, l := range fc.labels { la = append(la, github.Label{Name: l}) } return la, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *client) GetIssueLabels(org, repo string, number int) ([]Label, error) {\n\tdurationLogger := c.log(\"GetIssueLabels\", org, repo, number)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/issues/%d/labels\", org, repo, number), org)\n}", "func (issue *Issue) GetLabels() []string {\n\treturn issue.Fields.Labels\n}", "func (m *MockRerunClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (i *IssueRequest) GetLabels() []string {\n\tif i == nil || i.Labels == nil {\n\t\treturn nil\n\t}\n\treturn *i.Labels\n}", "func (m *MockIssueClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (a ProblemAdapter) GetLabels() map[string]string {\n\treturn nil\n}", "func GetLabels(repositoryURL string, token string) ([]Label, error) {\n\tURL := fmt.Sprintf(\"%v/labels\", repositoryURL)\n\n\trequest, err := http.NewRequest(\"GET\", URL, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Couldn't make a new request in GetLabel: %v\", err)\n\t}\n\n\trequest.Header.Add(\"Authorization\", token)\n\trequest.Header.Add(\"Accept\", \"application/vnd.github.v3+json\")\n\n\tresponse, err := http.DefaultClient.Do(request)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Response error in GetLabel: %v\", err)\n\t}\n\n\tif response.Body != nil {\n\t\tdefer response.Body.Close()\n\t}\n\n\tbody, err := ioutil.ReadAll(response.Body)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't convert response body to []byte: %v\", err)\n\t}\n\n\tvar labels []Label\n\n\terr = json.Unmarshal(body, &labels)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"problem unmarshalling the response body: %v\", err)\n\t}\n\n\treturn labels, nil\n}", "func NewIssueGetLabelParams() *IssueGetLabelParams {\n\tvar ()\n\treturn &IssueGetLabelParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func Labels(ctx context.Context, client *github.Client, settings *Settings) ([]string, error) {\n\tlabels, _, err := client.Issues.ListLabels(ctx, settings.BaseAccount, settings.BaseRepo, &github.ListOptions{PerPage: 100})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Slice(labels, func(i, j int) bool {\n\t\tswitch {\n\t\tcase labels[i] == nil:\n\t\t\treturn true\n\t\tcase labels[j] == nil:\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn *labels[i].Name < *labels[j].Name\n\t\t}\n\t})\n\tvar o []string\n\tfor _, l := range labels {\n\t\tif l.Name != nil {\n\t\t\to = append(o, *l.Name)\n\t\t}\n\t}\n\treturn o, nil\n}", "func (c *client) GetRepoLabels(org, repo string) ([]Label, error) {\n\tdurationLogger := c.log(\"GetRepoLabels\", org, repo)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/labels\", org, repo), org)\n}", "func jiraLabels(j *v1alpha1.Jira) map[string]string {\n\tlabels := defaultLabels(j)\n\tfor key, val := range j.ObjectMeta.Labels {\n\t\tlabels[key] = val\n\t}\n\treturn labels\n}", "func (c *client) getLabels(path, org string) ([]Label, error) {\n\tvar labels []Label\n\tif c.fake {\n\t\treturn labels, nil\n\t}\n\terr := c.readPaginatedResults(\n\t\tpath,\n\t\t\"application/vnd.github.symmetra-preview+json\", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\torg,\n\t\tfunc() interface{} {\n\t\t\treturn &[]Label{}\n\t\t},\n\t\tfunc(obj interface{}) {\n\t\t\tlabels = append(labels, *(obj.(*[]Label))...)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn labels, nil\n}", "func GetLabels(component, name, identifier string) map[string]string {\n\t// see https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels\n\treturn map[string]string{\n\t\t\"app.kubernetes.io/managed-by\": \"splunk-operator\",\n\t\t\"app.kubernetes.io/component\": component,\n\t\t\"app.kubernetes.io/name\": name,\n\t\t\"app.kubernetes.io/part-of\": fmt.Sprintf(\"splunk-%s-%s\", identifier, component),\n\t\t\"app.kubernetes.io/instance\": fmt.Sprintf(\"splunk-%s-%s\", identifier, name),\n\t}\n}", "func (mr *MockRerunClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockRerunClient)(nil).GetIssueLabels), org, repo, number)\n}", "func (mr *MockIssueClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockIssueClient)(nil).GetIssueLabels), org, repo, number)\n}", "func getSigLabelsForIssue(issue Issue) []string {\n\tvar sigs []string = nil\n\n\tvar sizeFactor float64 = 400\n\tissueSize := float64(len(issue.Title) + len(issue.Body))\n\tsizeScaling := 0.75 * issueSize / sizeFactor\n\tif sizeScaling < 1 { // Don't weirdly scale tiny issues\n\t\tsizeScaling = 1\n\t}\n\tfmt.Println(\"size scaling\", sizeScaling)\n\n\tfor sigName, scoreData := range getScoresForSigs(issue) {\n\t\tfmt.Println(\"Debug\", sigName, scoreData.scoreItems)\n\t\tif float64(scoreData.scoreTotal) >= scoreThreshhold*sizeScaling {\n\t\t\tsigs = append(sigs, sigName)\n\t\t}\n\t}\n\n\treturn sigs\n}", "func (mr *MockClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockClient)(nil).GetIssueLabels), org, repo, number)\n}", "func GetLabels(component constants.ComponentName, cr_name string) map[string]string {\n\treturn generateComponentLabels(component, cr_name)\n}", "func (a *Awaitility) GetMetricLabels(t *testing.T, family string) []map[string]*string {\n\tlabels, err := metrics.GetMetricLabels(a.RestConfig, a.MetricsURL, family)\n\trequire.NoError(t, err)\n\treturn labels\n}", "func (pc *PodCache) GetLabels(key types.UID) labels.Set {\n\treturn pc.cachedPods[key].LabelSet\n}", "func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {\n\tcfg, err := getAPIConfig(sdc, baseDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get API config: %w\", err)\n\t}\n\tms := getServiceLabels(cfg)\n\treturn ms, nil\n}", "func GetLabels() []string {\n\tvar res []string\n\tlabelsURL := \"https://raw.githubusercontent.com/googlecreativelab/quickdraw-dataset/master/categories.txt\"\n\tresp, err := http.Get(labelsURL)\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to fetch labels\")\n\t}\n\tres = strings.Split(string(body), \"\\n\")\n\treturn res\n}", "func NewIssueGetLabelParamsWithHTTPClient(client *http.Client) *IssueGetLabelParams {\n\tvar ()\n\treturn &IssueGetLabelParams{\n\t\tHTTPClient: client,\n\t}\n}", "func (m *Group) GetAssignedLabels()([]AssignedLabelable) {\n return m.assignedLabels\n}", "func (sm SchedulerModel) getLabels(group string, instance InstanceID) map[string]string {\n\tlabels := map[string]string{\n\t\t\"group\": group,\n\t\t\"instance\": string(instance),\n\t}\n\n\treturn labels\n}", "func (m *MockClient) GetRepoLabels(org, repo string) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRepoLabels\", org, repo)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (i *Issue) GetLabelsURL() string {\n\tif i == nil || i.LabelsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *i.LabelsURL\n}", "func (o *VirtualizationIweVirtualMachine) GetLabels() []InfraMetaData {\n\tif o == nil {\n\t\tvar ret []InfraMetaData\n\t\treturn ret\n\t}\n\treturn o.Labels\n}", "func (i *IssueEvent) GetLabel() *Label {\n\tif i == nil {\n\t\treturn nil\n\t}\n\treturn i.Label\n}", "func (d *RetryDownloader) GetLabels() ([]*Label, error) {\n\tvar (\n\t\tlabels []*Label\n\t\terr error\n\t)\n\n\terr = d.retry(func() error {\n\t\tlabels, err = d.Downloader.GetLabels()\n\t\treturn err\n\t})\n\n\treturn labels, err\n}", "func GetIssueType(issue *github.Issue) string {\n\tfor _, l := range issue.Labels {\n\t\tswitch l.GetName() {\n\t\tcase enhancementTag:\n\t\t\treturn enhancementDisplayName\n\t\tcase bugTag:\n\t\t\treturn bugDisplayName\n\t\tdefault:\n\t\t\treturn closedDisplayName\n\t\t}\n\t}\n\treturn closedDisplayName\n}", "func (wt *WorkspaceTemplateFilter) GetLabels() []*Label {\n\treturn wt.Labels\n}", "func (m *HistogramDataPoint) GetLabels() []v11.StringKeyValue {\n\tif m != nil {\n\t\treturn m.Labels\n\t}\n\treturn nil\n}", "func (a GetSLITriggeredAdapter) GetLabels() map[string]string {\n\treturn a.event.Labels\n}", "func (a *Client) GetLabels(params *GetLabelsParams, opts ...ClientOption) (*GetLabelsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetLabelsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetLabels\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/get-labels\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetLabelsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetLabelsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetLabels: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (o *TemplateSummaryResources) GetLabels() []TemplateSummaryLabel {\n\tif o == nil {\n\t\tvar ret []TemplateSummaryLabel\n\t\treturn ret\n\t}\n\n\treturn o.Labels\n}", "func fetchAllIssuesByLabel(client *github.Client, owner, name, state string, labels []string) []*github.Issue {\n\tpageIndex := 1\n\trepoOptions := github.IssueListByRepoOptions{\n\t\tListOptions: github.ListOptions{\n\t\t\tPage: pageIndex,\n\t\t\tPerPage: 100,\n\t\t},\n\t\tState: state,\n\t\tLabels: labels,\n\t}\n\tvar allIssues []*github.Issue\n\tfor {\n\t\tissues, _, err :=\n\t\t\tclient.Issues.ListByRepo(context.Background(), owner, name, &repoOptions)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tallIssues = append(allIssues, issues...)\n\t\trepoOptions.Page++\n\t\tif len(issues) != 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn allIssues\n}", "func (lbl *LabelService) GetLabelList() (labels []*types.Label, err error) {\n\tlog.Debug(\"GetLabelList\")\n\n\tdata, status, err := lbl.concertoService.Get(\"/labels\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &labels); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// exclude internal labels (with a Namespace defined)\n\tvar filteredLabels []*types.Label\n\tfor _, label := range labels {\n\t\tif label.Namespace == \"\" {\n\t\t\tfilteredLabels = append(filteredLabels, label)\n\t\t}\n\t}\n\n\treturn filteredLabels, nil\n}", "func (m *MockRepositoryClient) GetRepoLabels(org, repo string) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRepoLabels\", org, repo)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (i *IssuesEvent) GetLabel() *Label {\n\tif i == nil {\n\t\treturn nil\n\t}\n\treturn i.Label\n}", "func (o *Channel) GetLabels() []Label {\n\tif o == nil || o.Labels == nil {\n\t\tvar ret []Label\n\t\treturn ret\n\t}\n\treturn *o.Labels\n}", "func GetLabels() ([]byte, error) {\n\tlog.Trace.Printf(\"Getting all the labels.\")\n\tvar ret []byte\n\tvar err error\n\n\tlabels := make([]types.Label, 0)\n\tif err = store.DB.Find(&labels).Error; err == nil {\n\t\tlog.Trace.Printf(\"Successfully got the labels: %+v\", labels)\n\t\tret, err = json.Marshal(labels)\n\t}\n\n\tif err != nil {\n\t\tlog.Warning.Printf(err.Error())\n\t}\n\n\treturn ret, err\n}", "func getTagLabels() ([]string, []string) {\n\tdefer trace()()\n\tvar ntags []string\n\tvar vtags []string\n\tif conf.UCMConfig.AwsTagsToLabels.Enabled {\n\t\ttags := processTagLabelMap(labels, conf.UCMConfig.MetadataReporting.Attributes)\n\t\tfor k, v := range tags {\n\t\t\tntags = append(ntags, strings.ToLower(k))\n\t\t\tvtags = append(vtags, v)\n\t\t}\n\t}\n\treturn ntags, vtags\n}", "func (o ProjectOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Project) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (o *IssueGetLabelParams) WithHTTPClient(client *http.Client) *IssueGetLabelParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func GetLabelsForComputeInstance(t *testing.T, projectID string, zone string, instanceID string) map[string]string {\n\tlabels, err := GetLabelsForComputeInstanceE(t, projectID, zone, instanceID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn labels\n}", "func filterOutLabels(issues []*gitlab.Issue, exLabels string) []*gitlab.Issue {\n\tif exLabels == \"\" {\n\t\treturn issues\n\t}\n\tlabels := map[string]struct{}{}\n\tfor _, l := range strings.Split(exLabels, \",\") {\n\t\tlabels[l] = struct{}{}\n\t}\n\tfor i := 0; i < len(issues); {\n\t\tissue := issues[i]\n\t\tskip := false\n\t\tfor _, l := range issue.Labels {\n\t\t\tif _, ok := labels[l]; ok {\n\t\t\t\tskip = true\n\t\t\t}\n\t\t}\n\t\tif skip {\n\t\t\tissues[i] = issues[len(issues)-1]\n\t\t\tissues = issues[0 : len(issues)-1]\n\t\t} else {\n\t\t\ti++\n\t\t}\n\t}\n\treturn issues\n}", "func (node *Node) GetLabels() *[]string {\n\n\tres := make([]string, 0)\n\tlabel := &en.ELabel{ID: node.NextLabelID}\n\tfor engine.GetObject(label) {\n\t\tlabelStr := &en.ELabelString{ID: label.LabelStringID}\n\t\tengine.GetObject(labelStr)\n\t\tres = append(res, labelStr.String)\n\t\tif label.NextLabelID != -1 {\n\t\t\tlabel = &en.ELabel{ID: label.NextLabelID}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &res\n}", "func getLabels(\n docker *client.Client,\n containerId string) (labels map[string]string, err error) {\n\n inspect, err := docker.ContainerInspect(context.Background(), containerId)\n if err != nil {\n return\n }\n\n labels = inspect.Config.Labels\n return\n}", "func (m *EnvoyFilter) GetWorkloadLabels() map[string]string {\n\tif m != nil {\n\t\treturn m.WorkloadLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func getIssues(user, repo, label string) []Datos {\n\t// Format the http link\n\turl := fmt.Sprintf(\"https://api.github.com/repos/%s/%s/issues?labels=%s&page=1&per_page=100\", user, repo, label)\n\n\t// Get response\n\tbody := connectHTML(url)\n\n\t// Filter data\n\tdata := getData(body)\n\n\treturn data\n\n}", "func (manager *Manager) getIdentityLabels(securityIdentity uint32) (labels.Labels, error) {\n\tidentityCtx, cancel := context.WithTimeout(context.Background(), option.Config.KVstoreConnectivityTimeout)\n\tdefer cancel()\n\tif err := manager.identityAllocator.WaitForInitialGlobalIdentities(identityCtx); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to wait for initial global identities: %v\", err)\n\t}\n\n\tidentity := manager.identityAllocator.LookupIdentityByID(identityCtx, identity.NumericIdentity(securityIdentity))\n\tif identity == nil {\n\t\treturn nil, fmt.Errorf(\"identity %d not found\", securityIdentity)\n\t}\n\treturn identity.Labels, nil\n}", "func (r *RedisFailoverHandler) getLabels(rf *redisfailoverv1.RedisFailover) map[string]string {\n\tdynLabels := map[string]string{\n\t\trfLabelNameKey: rf.Name,\n\t}\n\n\t// Filter the labels based on the whitelist\n\tfilteredCustomLabels := make(map[string]string)\n\tif rf.Spec.LabelWhitelist != nil && len(rf.Spec.LabelWhitelist) != 0 {\n\t\tfor _, regex := range rf.Spec.LabelWhitelist {\n\t\t\tcompiledRegexp, err := regexp.Compile(regex)\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Errorf(\"Unable to compile label whitelist regex '%s', ignoring it.\", regex)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor labelKey, labelValue := range rf.Labels {\n\t\t\t\tif match := compiledRegexp.MatchString(labelKey); match {\n\t\t\t\t\tfilteredCustomLabels[labelKey] = labelValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// If no whitelist is specified then don't filter the labels.\n\t\tfilteredCustomLabels = rf.Labels\n\t}\n\treturn util.MergeLabels(defaultLabels, dynLabels, filteredCustomLabels)\n}", "func (c *Client) GetCronWorkflowLabels(namespace, name, prefix string) (labels map[string]string, err error) {\n\tcwf, err := c.ArgoprojV1alpha1().CronWorkflows(namespace).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"Namespace\": namespace,\n\t\t\t\"Name\": name,\n\t\t\t\"Error\": err.Error(),\n\t\t}).Error(\"CronWorkflow not found.\")\n\t\treturn nil, util.NewUserError(codes.NotFound, \"CronWorkflow not found.\")\n\t}\n\n\tlabels = label.FilterByPrefix(prefix, cwf.Labels)\n\tlabels = label.RemovePrefix(prefix, labels)\n\n\treturn\n}", "func (o Iperf3SpecClientConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o Iperf3SpecClientConfigurationPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecClientConfiguration) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (m *NumberDataPoint) GetLabels() []v11.StringKeyValue {\n\tif m != nil {\n\t\treturn m.Labels\n\t}\n\treturn nil\n}", "func HasLabel(i *github.Issue, label string) bool {\n\tfor _, l := range i.Labels {\n\t\tif *l.Name == label {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o *DashboardAllOfLinks) GetLabels() string {\n\tif o == nil || o.Labels == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Labels\n}", "func getPendingPRs(g *u.GithubClient, f *os.File, owner, repo, branch string) error {\n\tlog.Print(\"Getting pending PR status...\")\n\tf.WriteString(\"-------\\n\")\n\tf.WriteString(fmt.Sprintf(\"## PENDING PRs on the %s branch\\n\", branch))\n\n\tif *htmlizeMD {\n\t\tf.WriteString(\"PR | Milestone | User | Date | Commit Message\\n\")\n\t\tf.WriteString(\"-- | --------- | ---- | ---- | --------------\\n\")\n\t}\n\n\tvar query []string\n\tquery = u.AddQuery(query, \"repo\", owner, \"/\", repo)\n\tquery = u.AddQuery(query, \"is\", \"open\")\n\tquery = u.AddQuery(query, \"type\", \"pr\")\n\tquery = u.AddQuery(query, \"base\", branch)\n\tpendingPRs, err := g.SearchIssues(strings.Join(query, \" \"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to search pending PRs: %v\", err)\n\t}\n\n\tfor _, pr := range pendingPRs {\n\t\tvar str string\n\t\t// escape '*' in commit messages so they don't mess up formatting\n\t\tmsg := strings.Replace(*pr.Title, \"*\", \"\", -1)\n\t\tmilestone := \"null\"\n\t\tif pr.Milestone != nil {\n\t\t\tmilestone = *pr.Milestone.Title\n\t\t}\n\t\tif *htmlizeMD {\n\t\t\tstr = fmt.Sprintf(\"#%-8d | %-4s | @%-10s| %s | %s\\n\", *pr.Number, milestone, *pr.User.Login, pr.UpdatedAt.Format(\"Mon Jan 2 15:04:05 MST 2006\"), msg)\n\t\t} else {\n\t\t\tstr = fmt.Sprintf(\"#%-8d %-4s @%-10s %s %s\\n\", *pr.Number, milestone, *pr.User.Login, pr.UpdatedAt.Format(\"Mon Jan 2 15:04:05 MST 2006\"), msg)\n\t\t}\n\t\tf.WriteString(str)\n\t}\n\tf.WriteString(\"\\n\\n\")\n\treturn nil\n}", "func (o *IssueGetLabelParams) WithTimeout(timeout time.Duration) *IssueGetLabelParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (drc *DummyRegistryClient) LabelsForImageName(in string) (labels map[string]string, err error) {\n\tres := drc.Called(in)\n\treturn res.Get(0).(map[string]string), res.Error(1)\n}", "func (p *Plex) GetLibraryLabels(sectionKey, sectionIndex string) (libraryLabels, error) {\n\trequestInfo.headers.Token = p.token\n\n\tif sectionIndex == \"\" {\n\t\tsectionIndex = \"1\"\n\t}\n\n\tquery := fmt.Sprintf(\"%s/library/sections/%s/labels?type=%s\", p.URL, sectionKey, sectionIndex)\n\n\tresp, respErr := requestInfo.get(query)\n\n\tif respErr != nil {\n\t\treturn libraryLabels{}, respErr\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvar result libraryLabels\n\n\tif err := json.NewDecoder(resp.Body).Decode(result); err != nil {\n\t\tfmt.Println(err.Error())\n\n\t\treturn libraryLabels{}, err\n\t}\n\n\treturn result, nil\n}", "func (p *PullRequestEvent) GetLabel() *Label {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.Label\n}", "func (m *SummaryDataPoint) GetLabels() []v11.StringKeyValue {\n\tif m != nil {\n\t\treturn m.Labels\n\t}\n\treturn nil\n}", "func (p *PullRequestBranch) GetLabel() string {\n\tif p == nil || p.Label == nil {\n\t\treturn \"\"\n\t}\n\treturn *p.Label\n}", "func (r *Repo) ListLabels() github.Labels {\n\treturn r.cli.ListLabels(r.path)\n}", "func (o Iperf3SpecServerConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v Iperf3SpecServerConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (r *Registry) Labels(ctx context.Context, ref image.Reference) (map[string]string, error) {\n\t// Set the default namespace if unset\n\tctx = ensureNamespace(ctx)\n\n\tmanifest, err := r.getManifest(ctx, ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timageConfig, err := r.getImage(ctx, *manifest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn imageConfig.Config.Labels, nil\n}", "func (r *InformationProtectionPolicyLabelsCollectionRequest) Get(ctx context.Context) ([]InformationProtectionLabel, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (o Iperf3SpecServerConfigurationPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecServerConfiguration) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func addLabelsToPullRequest(prInfo *PullRequestInfo, labels []string) error {\n\tif prInfo == nil {\n\t\treturn errors.New(\"pull request to label cannot be nil\")\n\t}\n\tpr := prInfo.PullRequest\n\tprovider := prInfo.GitProvider\n\n\tif len(labels) > 0 {\n\t\tnumber := *pr.Number\n\t\tvar err error\n\t\terr = provider.AddLabelsToIssue(pr.Owner, pr.Repo, number, labels)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logger().Infof(\"Added label %s to Pull Request %s\", util.ColorInfo(strings.Join(labels, \", \")), pr.URL)\n\t}\n\treturn nil\n}", "func (o PgbenchSpecPodConfigOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v PgbenchSpecPodConfig) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o DrillSpecPodConfigPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *DrillSpecPodConfig) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o FioSpecPodConfigOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v FioSpecPodConfig) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (m *Milestone) GetLabelsURL() string {\n\tif m == nil || m.LabelsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *m.LabelsURL\n}", "func GetLabelsForComputeInstanceE(t *testing.T, projectID string, zone string, instanceID string) (map[string]string, error) {\n\tlogger.Logf(t, \"Getting Labels for Compute Instance %s\", instanceID)\n\n\tctx := context.Background()\n\n\tservice, err := NewComputeServiceE(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstance, err := service.Instances.Get(projectID, zone, instanceID).Context(ctx).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Instances.Get(%s) got error: %v\", instanceID, err)\n\t}\n\n\treturn instance.Labels, nil\n}", "func GetClanLabels(qparms rest.QParms) ([]Label, error) {\n\tvar sb strings.Builder\n\tsb.Grow(100)\n\tsb.WriteString(config.Data.BaseURL)\n\tsb.WriteString(\"/labels/clans/\")\n\n\tbody, err := get(sb.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse into an array of clans\n\ttype respType struct {\n\t\tLabels []Label `json:\"items\"`\n\t}\n\tvar resp respType\n\terr = json.Unmarshal(body, &resp)\n\tif err != nil {\n\t\tlog.Debug(\"failed to parse the json response\")\n\t\treturn nil, err\n\t}\n\n\treturn resp.Labels, nil\n}", "func (r *resultImpl) Labels() []Label {\n\treturn r.labels\n}", "func (o *Channel) GetLabelsOk() (*[]Label, bool) {\n\tif o == nil || o.Labels == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Labels, true\n}", "func (o SysbenchSpecOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v SysbenchSpec) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func GetMetricLabels(restConfig *rest.Config, url string, family string) ([]map[string]*string, error) {\n\turi := fmt.Sprintf(\"https://%s/metrics\", url)\n\tvar metrics []byte\n\n\tclient := http.Client{\n\t\tTimeout: time.Duration(30 * time.Second),\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec\n\t\t},\n\t}\n\trequest, err := http.NewRequest(\"Get\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", restConfig.BearerToken))\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\tmetrics, err = io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// parse the metrics\n\tparser := expfmt.TextParser{}\n\tfamilies, err := parser.TextToMetricFamilies(bytes.NewReader(metrics))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlabels := make([]map[string]*string, 0, len(families))\n\tfor _, f := range families {\n\t\tif f.GetName() == family {\n\t\t\tlbls := map[string]*string{}\n\t\t\tlabels = append(labels, lbls)\n\t\t\tfor _, m := range f.GetMetric() {\n\t\t\t\tfor _, kv := range m.Label {\n\t\t\t\t\tif kv.Name != nil {\n\t\t\t\t\t\tlbls[*kv.Name] = kv.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// here we can return `0` is the metric does not exist, which may be valid if the expected value is `0`, too.\n\treturn labels, nil\n}", "func (o FioSpecPodConfigPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *FioSpecPodConfig) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o LookupClientTlsPolicyResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupClientTlsPolicyResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func HasLabel(label string, issueLabels []*github.Label) bool {\n\tfor _, l := range issueLabels {\n\t\tif strings.ToLower(l.GetName()) == strings.ToLower(label) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o LookupFeatureResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupFeatureResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (o QperfSpecClientConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v QperfSpecClientConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o QperfSpecClientConfigurationPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *QperfSpecClientConfiguration) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o LookupApiResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupApiResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (b *Bot) labels(ctx context.Context, files []github.PullRequestFile) ([]string, error) {\n\tvar labels []string\n\n\t// The branch name is unsafe, but here we are simply adding a label.\n\tif isReleaseBranch(b.c.Environment.UnsafeBase) {\n\t\tlog.Println(\"Label: Found backport branch.\")\n\t\tlabels = append(labels, \"backport\")\n\t}\n\n\tfor _, file := range files {\n\t\tif strings.HasPrefix(file.Name, \"vendor/\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor k, v := range prefixes {\n\t\t\tif strings.HasPrefix(file.Name, k) {\n\t\t\t\tlog.Printf(\"Label: Found prefix %v, attaching labels: %v.\", k, v)\n\t\t\t\tlabels = append(labels, v...)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn deduplicate(labels), nil\n}", "func (l CurrentLabels) Labels() []string {\n\treturn []string{\"type\"}\n}", "func (o PgbenchSpecPodConfigPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *PgbenchSpecPodConfig) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o DrillSpecPodConfigOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v DrillSpecPodConfig) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o *CreateOptions) GetLabels() map[string]string {\n\tif o.Labels == nil {\n\t\tvar z map[string]string\n\t\treturn z\n\t}\n\treturn o.Labels\n}", "func GetXWso2Labels(vendorExtensionsMap map[string]interface{}) []string {\n\tvar labelArray []string\n\tif y, found := vendorExtensionsMap[\"x-wso2-label\"]; found {\n\t\tif val, ok := y.([]interface{}); ok {\n\t\t\tfor _, label := range val {\n\t\t\t\tlabelArray = append(labelArray, label.(string))\n\t\t\t}\n\t\t\treturn labelArray\n\t\t}\n\t\tlogger.LoggerOasparser.Errorln(\"Error while parsing the x-wso2-label\")\n\t}\n\treturn []string{\"default\"}\n}" ]
[ "0.8113356", "0.7227975", "0.70755315", "0.7042123", "0.70022243", "0.6956072", "0.69555527", "0.6602383", "0.65491056", "0.65308553", "0.6387276", "0.63280463", "0.63201314", "0.6253154", "0.6241437", "0.62240124", "0.62148833", "0.6124567", "0.6104237", "0.5956916", "0.59501666", "0.59391576", "0.5933868", "0.58997226", "0.5864033", "0.58363646", "0.5805237", "0.5791458", "0.5790756", "0.57645184", "0.57587945", "0.57539576", "0.57486945", "0.5737803", "0.5726392", "0.57115436", "0.5709977", "0.56985253", "0.56836843", "0.5673481", "0.56612855", "0.5648707", "0.5623989", "0.5605312", "0.5588475", "0.55833757", "0.55811244", "0.55766886", "0.5570993", "0.5566398", "0.55643225", "0.5552313", "0.5552313", "0.5552313", "0.5552313", "0.55305886", "0.5522722", "0.5512849", "0.5499617", "0.5499104", "0.5490772", "0.5475641", "0.54685646", "0.54657024", "0.54631054", "0.54460514", "0.5437185", "0.54368925", "0.5432543", "0.5431588", "0.5419353", "0.5414626", "0.5412325", "0.5401767", "0.5399571", "0.5389586", "0.5379682", "0.53742796", "0.53735936", "0.53693944", "0.53693295", "0.5368887", "0.5351958", "0.53516346", "0.5349291", "0.53460735", "0.5343084", "0.5324835", "0.5322699", "0.53184247", "0.53156734", "0.53151214", "0.53145564", "0.53144497", "0.5314368", "0.53136873", "0.5303117", "0.5301994", "0.5290026", "0.5288933" ]
0.8116904
0
CreateComment adds and tracks a comment in the client
func (fc *fakeClient) CreateComment(owner, repo string, number int, comment string) error { fc.commentsAdded[number] = append(fc.commentsAdded[number], comment) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Server) CreateComment(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tvar comment Comment\n\tif err = json.Unmarshal(b, &comment); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tcommentUUID, err := uuid.NewUUID()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tcommentID := commentUUID.String()\n\n\tcomment.ID = commentID\n\n\terr1 := s.database.CreateComment(ctx, &comment)\n\tif httperr.HandleError(w, err, http.StatusInternalServerError) {\n\t\ts.logger.For(ctx).Error(\"request failed\", zap.Error(err1))\n\t\treturn\n\t}\n\n\tresponseBody := CreatePostResponse{\"success\"}\n\tjsonResponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(jsonResponse)\n}", "func (b *Service) CommentCreate(ctx context.Context, TeamID string, UserID string, EventValue string) ([]byte, error, bool) {\n\tvar c struct {\n\t\tCheckinId string `json:\"checkinId\"`\n\t\tUserID string `json:\"userId\"`\n\t\tComment string `json:\"comment\"`\n\t}\n\terr := json.Unmarshal([]byte(EventValue), &c)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\terr = b.CheckinService.CheckinComment(ctx, TeamID, c.CheckinId, c.UserID, c.Comment)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\tmsg := createSocketEvent(\"comment_added\", \"\", \"\")\n\n\treturn msg, nil, false\n}", "func (s *APIClientService) CreateComment(ctx context.Context, id string, new CommentRequest) (Comment, *http.Response, error) {\n\tresource := Comment{} // new(APIClient)\n\n\treq, err := s.client.NewRequest(ctx, http.MethodPost, \"comments/\"+apiClientBasePath+\"/\"+id, new)\n\tif err != nil {\n\t\treturn resource, nil, err\n\t}\n\n\tresp, _, err := s.client.Do(ctx, req, &resource, false)\n\tif err != nil {\n\t\treturn resource, nil, err\n\t}\n\n\treturn resource, resp, nil\n}", "func (c *Client) CreateComment(owner, repo string, number int, comment string) error {\n\tif c.dry {\n\t\treturn nil\n\t}\n\n\tic := IssueComment{\n\t\tBody: comment,\n\t}\n\tresp, err := c.request(http.MethodPost, fmt.Sprintf(\"%s/repos/%s/%s/issues/%d/comments\", c.base, owner, repo, number), ic)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 201 {\n\t\treturn fmt.Errorf(\"response not 201: %s\", resp.Status)\n\t}\n\treturn nil\n}", "func (b *Client) CreateComment(repo models.Repo, pullNum int, comment string, command string) error {\n\t// NOTE: I tried to find the maximum size of a comment for bitbucket.org but\n\t// I got up to 200k chars without issue so for now I'm not going to bother\n\t// to detect this.\n\tbodyBytes, err := json.Marshal(map[string]map[string]string{\"content\": {\n\t\t\"raw\": comment,\n\t}})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"json encoding\")\n\t}\n\tpath := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d/comments\", b.BaseURL, repo.FullName, pullNum)\n\t_, err = b.makeRequest(\"POST\", path, bytes.NewBuffer(bodyBytes))\n\treturn err\n}", "func (c *client) CreateComment(org, repo string, number int, comment string) error {\n\treturn c.CreateCommentWithContext(context.Background(), org, repo, number, comment)\n}", "func CreateComment(w http.ResponseWriter, r *http.Request) {\n\tsessionID := r.Header.Get(\"sessionID\")\n\tuser, err := getUserFromSession(sessionID)\n\n\tif err != nil {\n\t\tmsg := map[string]string{\"error\": \"Sorry there was an internal server error\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tif !user.Active {\n\t\tmsg := map[string]string{\"error\": \"Sorry your account isn't activated yet\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tif r.Body == nil {\n\t\tmsg := map[string]string{\"error\": \"Sorry you need to supply an item id and a comment text\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tvar comment comments.Comment\n\n\terr = json.NewDecoder(r.Body).Decode(&comment)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tmsg := map[string]string{\"error\": \"Please supply a valid email and password\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tcomment.Username = user.DisplayName\n\n\terr = comment.Create()\n\n\tif err != nil {\n\t\tmsg := map[string]string{\"error\": \"Sorry there was an internal server error\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tmsg := map[string]string{\"message\": \"Success!\"}\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(msg)\n\n\treturn\n}", "func (c *CommentApiController) CreateComment(w http.ResponseWriter, r *http.Request) {\n\tcommentParam := Comment{}\n\td := json.NewDecoder(r.Body)\n\td.DisallowUnknownFields()\n\tif err := d.Decode(&commentParam); err != nil {\n\t\tc.errorHandler(w, r, &ParsingError{Err: err}, nil)\n\t\treturn\n\t}\n\tif err := AssertCommentRequired(commentParam); err != nil {\n\t\tc.errorHandler(w, r, err, nil)\n\t\treturn\n\t}\n\tresult, err := c.service.CreateComment(r.Context(), commentParam)\n\t// If an error occurred, encode the error with the status code\n\tif err != nil {\n\t\tc.errorHandler(w, r, err, &result)\n\t\treturn\n\t}\n\t// If no error, encode the body and the result code\n\tEncodeJSONResponse(result.Body, &result.Code, result.Headers, w)\n\n}", "func (v Notes) CreateComment(params NotesCreateCommentParams) (NotesCreateCommentResponse, error) {\n\tr, err := v.API.Request(\"notes.createComment\", params)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar resp NotesCreateCommentResponse\n\n\tvar cnv int\n\tcnv, err = strconv.Atoi(string(r))\n\tresp = NotesCreateCommentResponse(cnv)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn resp, nil\n}", "func (a *ProblemsApiService) CreateComment(ctx _context.Context, problemId string) ApiCreateCommentRequest {\n\treturn ApiCreateCommentRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tproblemId: problemId,\n\t}\n}", "func (dbHandler *Handler) CreateComment(userID uint, entryID uint, text string, ts time.Time) (api.Comment, error) {\n\tcomment := api.Comment{UserID: userID, EntryID: entryID, Text: text}\n\tif !ts.IsZero() {\n\t\tcomment.CreatedAt = ts\n\t\tcomment.UpdatedAt = ts\n\t}\n\n\tdb := dbHandler.DB.Create(&comment)\n\tif db.Error != nil {\n\t\treturn comment, errors.WrapWithDetails(db.Error, \"cannot create comment\", \"userID\", userID, \"entryID\", entryID)\n\t}\n\n\treturn comment, nil\n}", "func (_article *Article) CommentsCreate(am map[string]interface{}) error {\n\t\t\tam[\"article_id\"] = _article.Id\n\t\t_, err := CreateComment(am)\n\treturn err\n}", "func CreateComment(dbp zesty.DBProvider, t *Task, user, content string) (c *Comment, err error) {\n\tdefer errors.DeferredAnnotatef(&err, \"Failed to create comment\")\n\n\tc = &Comment{\n\t\tPublicID: uuid.Must(uuid.NewV4()).String(),\n\t\tTaskID: t.ID,\n\t\tUsername: user,\n\t\tCreated: now.Get(),\n\t\tUpdated: now.Get(),\n\t\tContent: content,\n\t}\n\n\terr = c.Valid()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dbp.DB().Insert(c)\n\tif err != nil {\n\t\treturn nil, pgjuju.Interpret(err)\n\t}\n\n\treturn c, nil\n}", "func (s *commentService) CreateComment(input dto.CreateComment) (entity.Comment, error) {\n\tcomment := entity.Comment{}\n\n\tcomment.Author = input.Author\n\tcomment.Comments = input.Comments\n\tcomment.BlogID = input.BlogID\n\n\t//proceed to the save method in the package repository, which returns the data and error values\n\tnewComment, err := s.commentRepository.Save(comment)\n\tif err != nil {\n\t\treturn newComment, err\n\t}\n\n\treturn newComment, nil\n\n}", "func (s *Server) createComment() http.HandlerFunc {\n\ttype request struct {\n\t\tBody string `json:\"body\"`\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserToken := getAuthorizationToken(r)\n\t\tif userToken == \"\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tctx := r.Context()\n\n\t\tuser, err := s.Accounts.GetUserByToken(ctx, userToken)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tvar req request\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\terr = json.Unmarshal(b, &req)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusUnprocessableEntity)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tnewsUUID := vars[\"newsuuid\"]\n\n\t\tcomment, err := s.Comments.AddComment(ctx, req.Body, user.UID, newsUUID)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\tjson, err := json.Marshal(*comment)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Write(json)\n\t}\n}", "func CreateComment(comment *Comment) error {\n\tvar err error\n\tcomment.CreatedAt = time.Now()\n\terr = db.Debug().Create(comment).Error\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func CreateComment(c *gin.Context, in *createCommentIn) (*task.Comment, error) {\n\tmetadata.AddActionMetadata(c, metadata.TaskID, in.TaskID)\n\n\tdbp, err := zesty.NewDBProvider(utask.DBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt, err := task.LoadFromPublicID(dbp, in.TaskID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttt, err := tasktemplate.LoadFromID(dbp, t.TemplateID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetadata.AddActionMetadata(c, metadata.TemplateName, tt.Name)\n\n\tvar res *resolution.Resolution\n\tif t.Resolution != nil {\n\t\tres, err = resolution.LoadFromPublicID(dbp, *t.Resolution)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmetadata.AddActionMetadata(c, metadata.ResolutionID, res.PublicID)\n\t}\n\n\tadmin := auth.IsAdmin(c) == nil\n\trequester := auth.IsRequester(c, t) == nil\n\twatcher := auth.IsWatcher(c, t) == nil\n\tresolutionManager := auth.IsResolutionManager(c, tt, t, res) == nil\n\n\tif !requester && !watcher && !resolutionManager && !admin {\n\t\treturn nil, errors.Forbiddenf(\"Can't create comment\")\n\t} else if !requester && !watcher && !resolutionManager {\n\t\tmetadata.SetSUDO(c)\n\t}\n\n\treqUsername := auth.GetIdentity(c)\n\n\tcomment, err := task.CreateComment(dbp, t, reqUsername, in.Content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn comment, nil\n}", "func (m *GormCommentRepository) Create(ctx context.Context, u *Comment) error {\n\tdefer goa.MeasureSince([]string{\"goa\", \"db\", \"comment\", \"create\"}, time.Now())\n\n\tu.ID = uuid.NewV4()\n\n\terr := m.db.Create(u).Error\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"error adding Comment\", \"error\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c Comment) Create(commentUid, articleId, content string, parentId int) error {\n\tcomment := Comment{\n\t\tContent: strings.TrimSpace(content),\n\t\tArticleId: articleId,\n\t\tCommentUid: commentUid,\n\t\tParentId: parentId,\n\t}\n\tif err := load.Conn.Create(&comment).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *Resolver) CreateComment(ctx context.Context, args struct {\n\tInput createCommentInput\n}) (Comment, error) {\n\tresult := Comment{}\n\tm := dbmodel.Comment{}\n\n\t// Role-based Access Control\n\tif _, err := AssertPermissions(ctx, \"create\", \"Comment\", args, &args.Input); err != nil {\n\t\treturn result, errors.Wrapf(err, \"permission denied\")\n\t}\n\n\tdata, err := json.Marshal(args.Input)\n\tif err != nil {\n\t\treturn result, errors.Wrapf(err, \"json.Marshal(%#v)\", args.Input)\n\t}\n\tif err = json.Unmarshal(data, &m); err != nil {\n\t\treturn result, errors.Wrapf(err, \"json.Unmarshal(%s)\", data)\n\t}\n\n\tif err := m.Insert(r.db(ctx)); err != nil {\n\t\treturn result, errors.Wrapf(err, \"createComment(%#v)\", m)\n\t}\n\treturn Comment{model: m, db: r.db(ctx)}, nil\n}", "func createComment(w http.ResponseWriter, r *http.Request) {\n\n\tsession := sessions.Start(w, r)\n\n\tvars := mux.Vars(r)\n\n\tpost_id := vars[\"id\"]\n\tuser_id := session.GetString(\"user_id\")\n\tbody := r.FormValue(\"body\")\n\timage := r.FormValue(\"image\")\n\turl := r.FormValue(\"url\")\n\n\tif len(body) > 2000 {\n\t\thttp.Error(w, \"Your comment is too long. (2000 characters maximum)\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif len(body) == 0 && len(image) == 0 {\n\t\thttp.Error(w, \"Your comment is empty.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstmt, err := db.Prepare(\"INSERT comments SET created_by=?, post=?, body=?, image=?, url=?\")\n\tif err == nil {\n\n\t\t// If there's no errors, we can go ahead and execute the statement.\n\t\t_, err := stmt.Exec(&user_id, &post_id, &body, &image, &url)\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\n\t\t}\n\n\t\tvar comments = comment{}\n\t\tvar timestamp time.Time\n\n\t\tdb.QueryRow(\"SELECT comments.id, created_by, created_at, body, image, username, nickname, avatar FROM comments LEFT JOIN users ON users.id = created_by WHERE created_by = ? ORDER BY created_at DESC LIMIT 1\", user_id).\n\t\t\tScan(&comments.ID, &comments.CreatedBy, &timestamp, &comments.Body, &comments.Image, &comments.CommenterUsername, &comments.CommenterNickname, &comments.CommenterIcon)\n\t\tcomments.CreatedAt = humanTiming(timestamp)\n\n\t\tvar data = map[string]interface{}{\n\t\t\t// This is sent to the user who created the comment so they can't yeah it.\n\t\t\t\"CanYeah\": false,\n\t\t\t\"Comment\": comments,\n\t\t}\n\n\t\terr = templates.ExecuteTemplate(w, \"create_comment.html\", data)\n\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\t}\n\n\t\tvar commentTpl bytes.Buffer\n\t\tvar commentPreviewTpl bytes.Buffer\n\n\t\t// This will be sent other users so they can yeah it.\n\t\tdata[\"CanYeah\"] = true\n\n\t\ttemplates.ExecuteTemplate(&commentTpl, \"create_comment.html\", data)\n\t\ttemplates.ExecuteTemplate(&commentPreviewTpl, \"comment_preview.html\", data)\n\n\t\tvar msg wsMessage\n\t\tvar community_id string\n\n\t\tdb.QueryRow(\"SELECT community_id FROM posts WHERE id = ?\", post_id).Scan(&community_id)\n\n\t\tfor client := range clients {\n\t\t\tif clients[client].OnPage == \"/posts/\"+post_id && clients[client].UserID != strconv.Itoa(comments.CreatedBy) {\n\t\t\t\tmsg.Type = \"comment\"\n\t\t\t\tmsg.Content = commentTpl.String()\n\t\t\t\terr := client.WriteJSON(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tclient.Close()\n\t\t\t\t\tdelete(clients, client)\n\t\t\t\t}\n\t\t\t} else if clients[client].OnPage == \"/communities/\"+community_id {\n\t\t\t\tmsg.Type = \"commentPreview\"\n\t\t\t\tmsg.ID = post_id\n\t\t\t\tmsg.Content = commentPreviewTpl.String()\n\t\t\t\terr := client.WriteJSON(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tclient.Close()\n\t\t\t\t\tdelete(clients, client)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\n\t}\n\n}", "func (c *client) CreateCommentReaction(org, repo string, id int, reaction string) error {\n\tc.log(\"CreateCommentReaction\", org, repo, id, reaction)\n\tr := Reaction{Content: reaction}\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPost,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/issues/comments/%d/reactions\", org, repo, id),\n\t\taccept: \"application/vnd.github.squirrel-girl-preview\",\n\t\torg: org,\n\t\texitCodes: []int{201},\n\t\trequestBody: &r,\n\t}, nil)\n\treturn err\n}", "func CreateNewComment(newComment Comment) {\n\tconfig := LoadConfigurationFile(\"config.json\")\n\n\tdb, err := sql.Open(\"postgres\", config.DatabaseURI)\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error while running sql.Open(): \", err)\n\t}\n\tdefer db.Close()\n\n\t// Begin a database transaction\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error in db.Being(): \", err)\n\t}\n\n\t// Prepare the statement\n\tstmt, err := tx.Prepare(\"INSERT INTO go_comments (post_id, user_id, comment, likes) VALUES ($1, $2, $3, $4)\")\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error preparing statement: \", err)\n\t}\n\tdefer stmt.Close()\n\n\t// Execute the statement\n\t_, err = stmt.Exec(&newComment.PostID, &newComment.UserID, &newComment.Comment, &newComment.Likes)\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error executing statement: \", err)\n\t}\n\n\t// Commit the transaction\n\ttx.Commit()\n\n\t// Tell the User\n\tfmt.Println(\"[!] New user added to database....\")\n\n\t// Update the Comments slice\n\tGetComments(db)\n}", "func CreateComment(cmt Comment) (Comment, error) {\n\t// call existing create comments func\n\tcreatedComments, createCommentsErr := CreateComments([]Comment{\n\t\tcmt,\n\t})\n\tif createCommentsErr != nil {\n\t\treturn Comment{}, createCommentsErr\n\t}\n\n\t//Return success without any error.\n\treturn createdComments[0], nil\n}", "func resourceCommentCreate(d *schema.ResourceData, m interface{}) error {\n\tconfig := m.(*Config)\n\tbody := d.Get(\"body\").(string)\n\tissueKey := d.Get(\"issue_key\").(string)\n\n\tc := jira.Comment{Body: body}\n\n\tcomment, res, err := config.jiraClient.Issue.AddComment(issueKey, &c)\n\n\tif err != nil {\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\t\treturn errors.Wrapf(err, \"creating jira issue failed: %s\", body)\n\t}\n\n\td.SetId(comment.ID)\n\n\treturn resourceCommentRead(d, m)\n}", "func (db *ConcreteDatastore) CreateComment(Comment model.Comment) (int64, error) {\n\tvar (\n\t\ttx *sql.Tx\n\t\terr error\n\t\tres sql.Result\n\t\tcommentId int64\n\t)\n\n\t// Preparing to request\n\tif tx, err = db.Begin(); err != nil {\n\t\treturn -1, err\n\t}\n\n\t// Setting up the request and executing it\n\trequest := `INSERT INTO Comment(schedule_id, comment, is_important) VALUES (?, ?, ?)`\n\tif res, err = tx.Exec(request, Comment.ScheduleId, Comment.Comment, Comment.IsImportant); err != nil {\n\t\tif errr := tx.Rollback(); errr != nil {\n\t\t\treturn -1, errr\n\t\t}\n\t\treturn -1, err\n\t}\n\n\t// Getting the id of the last item inserted\n\tif commentId, err = res.LastInsertId(); err != nil {\n\t\tif errr := tx.Rollback(); errr != nil {\n\t\t\treturn -1, errr\n\t\t}\n\t\treturn -1, err\n\t}\n\n\t// Saving\n\tif err = tx.Commit(); err != nil {\n\t\tif errr := tx.Rollback(); errr != nil {\n\t\t\treturn -1, errr\n\t\t}\n\t\treturn -1, err\n\t}\n\n\treturn commentId, nil\n}", "func (u *commentUsecase) Create(org, comment string) (*domain.Comment, error) {\n\texists, err := u.ghCli.OrgExists(org)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, domain.NewErrorNotFound(fmt.Sprintf(\"Org %s not found\", org))\n\t}\n\n\tnewComment := &domain.Comment{Org: org, Comment: comment}\n\tID, err := u.dbRepo.InsertComment(newComment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewComment.ID = ID\n\n\treturn newComment, nil\n}", "func CreateComment(db *sql.DB, c *Comment) (int, error) {\n\tvar id int\n\terr := db.QueryRow(`\n\t\tinsert into comments (\n\t\t\tcomment, created_at\n\t\t) values (\n\t\t\t$1, $2\n\t\t) returning id\n\t`, c.Comment, c.CreatedAt).Scan(&id)\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\treturn id, nil\n}", "func CreateNewCommentHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tnewComment := Comment{}\n\n\terr := json.NewDecoder(r.Body).Decode(&newComment)\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error decoding data in request body (CreateNewCommentHandler): \", err)\n\t}\n\n\tCreateNewComment(newComment)\n}", "func (s *Rest) createCommentCtrl(w http.ResponseWriter, r *http.Request) {\n\n\tcomment := store.Comment{}\n\tif err := render.DecodeJSON(http.MaxBytesReader(w, r.Body, hardBodyLimit), &comment); err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusBadRequest, err, \"can't bind comment\")\n\t\treturn\n\t}\n\n\tuser, err := rest.GetUserInfo(r)\n\tif err != nil { // this not suppose to happen (handled by Auth), just dbl-check\n\t\trest.SendErrorJSON(w, r, http.StatusUnauthorized, err, \"can't get user info\")\n\t\treturn\n\t}\n\tlog.Printf(\"[DEBUG] create comment %+v\", comment)\n\n\tcomment.PrepareUntrusted() // clean all fields user not supposed to set\n\tcomment.User = user\n\tcomment.User.IP = strings.Split(r.RemoteAddr, \":\")[0]\n\n\tcomment.Orig = comment.Text // original comment text, prior to md render\n\tif err = s.DataService.ValidateComment(&comment); err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusBadRequest, err, \"invalid comment\")\n\t\treturn\n\t}\n\tcomment.Text = string(blackfriday.Run([]byte(comment.Text), blackfriday.WithExtensions(mdExt)))\n\tcomment.Text = s.ImageProxy.Convert(comment.Text)\n\t// check if user blocked\n\tif s.adminService.checkBlocked(comment.Locator.SiteID, comment.User) {\n\t\trest.SendErrorJSON(w, r, http.StatusForbidden, errors.New(\"rejected\"), \"user blocked\")\n\t\treturn\n\t}\n\n\tif s.ReadOnlyAge > 0 {\n\t\tif info, e := s.DataService.Info(comment.Locator, s.ReadOnlyAge); e == nil && info.ReadOnly {\n\t\t\trest.SendErrorJSON(w, r, http.StatusForbidden, errors.New(\"rejected\"), \"old post, read-only\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tid, err := s.DataService.Create(comment)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"can't save comment\")\n\t\treturn\n\t}\n\n\t// DataService modifies comment\n\tfinalComment, err := s.DataService.Get(comment.Locator, id)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"can't load created comment\")\n\t\treturn\n\t}\n\ts.Cache.Flush(comment.Locator.URL, \"last\", comment.User.ID)\n\n\trender.Status(r, http.StatusCreated)\n\trender.JSON(w, r, &finalComment)\n}", "func (c CommentRepo) Create(context context.Context, comment model.CommentDTO) (string, error) {\n\tcommentEntity, err := comment.Entity()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tres, err := c.collection.InsertOne(context, commentEntity)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.InsertedID.(primitive.ObjectID).Hex(), nil\n}", "func (client *Client) Comment(refType string, refId int64, text string, params map[string]interface{}) (*Comment, error) {\n\tpath := fmt.Sprintf(\"/comment/%s/%d/\", refType, refId)\n\tif params == nil {\n\t\tparams = map[string]interface{}{}\n\t}\n\tparams[\"value\"] = text\n\n\tcomment := &Comment{}\n\terr := client.RequestWithParams(\"POST\", path, nil, params, comment)\n\treturn comment, err\n}", "func (*XMLDocument) CreateComment(data string) (w *window.Comment) {\n\tmacro.Rewrite(\"$_.createComment($1)\", data)\n\treturn w\n}", "func (c PostCommentDetailController) Create(ctx *fasthttp.RequestCtx) {\n\tvar e []bool\n\tpostID, notExists := utils.ParseInt(phi.URLParam(ctx, \"postID\"), 10, 64)\n\te = append(e, notExists)\n\tcommentID, notExists := utils.ParseInt(phi.URLParam(ctx, \"commentID\"), 10, 64)\n\te = append(e, notExists)\n\n\tif exists, _ := utils.InArray(true, e); exists {\n\t\tc.JSONResponse(ctx, model2.ResponseError{\n\t\t\tDetail: fasthttp.StatusMessage(fasthttp.StatusBadRequest),\n\t\t}, fasthttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcommentDetail := new(model.PostCommentDetail)\n\tc.JSONBody(ctx, &commentDetail)\n\tcommentDetail.PostID = postID\n\tcommentDetail.CommentID = commentID\n\n\tif errs, err := database.ValidateStruct(commentDetail); err != nil {\n\t\tc.JSONResponse(ctx, model2.ResponseError{\n\t\t\tErrors: errs,\n\t\t\tDetail: fasthttp.StatusMessage(fasthttp.StatusUnprocessableEntity),\n\t\t}, fasthttp.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tcommentDetail.Comment = c.App.TextPolicy.Sanitize(commentDetail.Comment)\n\n\terr := c.GetDB().Insert(new(model.PostCommentDetail), commentDetail, \"id\", \"inserted_at\")\n\tif errs, err := database.ValidateConstraint(err, commentDetail); err != nil {\n\t\tc.JSONResponse(ctx, model2.ResponseError{\n\t\t\tErrors: errs,\n\t\t\tDetail: fasthttp.StatusMessage(fasthttp.StatusUnprocessableEntity),\n\t\t}, fasthttp.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tc.JSONResponse(ctx, model2.ResponseSuccessOne{\n\t\tData: commentDetail,\n\t}, fasthttp.StatusCreated)\n}", "func (r *Rietveld) AddComment(issue int64, message string) error {\n\tdata := url.Values{}\n\tdata.Add(\"message\", message)\n\tdata.Add(\"message_only\", \"True\")\n\tdata.Add(\"add_as_reviewer\", \"False\")\n\tdata.Add(\"send_mail\", \"True\")\n\tdata.Add(\"no_redirect\", \"True\")\n\treturn r.post(fmt.Sprintf(\"/%d/publish\", issue), data)\n}", "func CreateCommentReaction(doerID, issueID, commentID int64, content string) (*Reaction, error) {\n\treturn CreateReaction(&ReactionOptions{\n\t\tType: content,\n\t\tDoerID: doerID,\n\t\tIssueID: issueID,\n\t\tCommentID: commentID,\n\t})\n}", "func (z *Client) CreateTicketComment(ctx context.Context, ticketID int64, ticketComment TicketComment) (TicketComment, error) {\n\ttype comment struct {\n\t\tTicket struct {\n\t\t\tTicketComment TicketComment `json:\"comment\"`\n\t\t} `json:\"ticket\"`\n\t}\n\n\tdata := &comment{}\n\tdata.Ticket.TicketComment = ticketComment\n\n\tbody, err := z.put(ctx, fmt.Sprintf(\"/tickets/%d.json\", ticketID), data)\n\tif err != nil {\n\t\treturn TicketComment{}, err\n\t}\n\n\tresult := TicketComment{}\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\treturn TicketComment{}, err\n\t}\n\n\treturn result, err\n}", "func (m *Client) CreateTicketComment(arg0 context.Context, arg1 int64, arg2 zendesk.TicketComment) (zendesk.TicketComment, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateTicketComment\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(zendesk.TicketComment)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (db *Database) CreateComment(body string, author string, path string, confirmed bool, replyTo *uuid.UUID) (*uuid.UUID, error) {\n\tthread, err := db.GetThread(path)\n\tif err != nil {\n\t\tif err == global.ErrThreadNotFound {\n\t\t\t_, err := db.CreateThread(path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn db.CreateComment(body, author, path, confirmed, replyTo)\n\t\t}\n\t\treturn nil, err\n\t}\n\tif replyTo != nil {\n\t\tcomment, err := db.GetComment(*replyTo)\n\t\tif err != nil {\n\t\t\tif err == global.ErrCommentNotFound {\n\t\t\t\treturn nil, global.ErrWrongReplyTo\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\t// Check if the comment you're replying to actually is a part of the thread\n\t\tif !bytes.Equal(comment.ThreadId.Bytes(), thread.Id.Bytes()) {\n\t\t\treturn nil, global.ErrWrongReplyTo\n\t\t}\n\t\t// We allow for only a single layer of nesting. (Maybe just for now? who knows.)\n\t\tif comment.ReplyTo != nil && replyTo != nil {\n\t\t\treplyTo = comment.ReplyTo\n\t\t}\n\t}\n\tuid := global.GetUUID()\n\tvar toReplyTo *string\n\tif replyTo != nil {\n\t\ttrt := replyTo.String()\n\t\ttoReplyTo = &trt\n\t}\n\terr = db.DB.Table(db.TablePrefix + global.DefaultDynamoDbCommentTableName).Put(dynamoModel.Comment{\n\t\tId: uid,\n\t\tThreadId: thread.Id,\n\t\tBody: body,\n\t\tAuthor: author,\n\t\tConfirmed: confirmed,\n\t\tCreatedAt: time.Now().UTC(),\n\t\tReplyTo: toReplyTo,\n\t}).Run()\n\treturn &uid, err\n}", "func (c *Client) NewComment(nc *www.NewComment) (*www.NewCommentReply, error) {\n\tresponseBody, err := c.makeRequest(http.MethodPost,\n\t\twww.PoliteiaWWWAPIRoute, www.RouteNewComment, nc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ncr www.NewCommentReply\n\terr = json.Unmarshal(responseBody, &ncr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal NewCommentReply: %v\", err)\n\t}\n\n\tif c.cfg.Verbose {\n\t\terr := prettyPrintJSON(ncr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &ncr, nil\n}", "func (w *ServerInterfaceWrapper) NewComment(ctx echo.Context) error {\n\tvar err error\n\t// ------------- Path parameter \"project_id\" -------------\n\tvar projectId string\n\n\terr = runtime.BindStyledParameter(\"simple\", false, \"project_id\", ctx.Param(\"project_id\"), &projectId)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter project_id: %s\", err))\n\t}\n\n\t// ------------- Path parameter \"issue_id\" -------------\n\tvar issueId string\n\n\terr = runtime.BindStyledParameter(\"simple\", false, \"issue_id\", ctx.Param(\"issue_id\"), &issueId)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter issue_id: %s\", err))\n\t}\n\n\t// HasSecurity is set\n\n\tctx.Set(\"OpenId.Scopes\", []string{\"exitus/comment.write\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.NewComment(ctx, projectId, issueId)\n\treturn err\n}", "func (mr *MockClientMockRecorder) CreateComment(org, repo, number, comment interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockClient)(nil).CreateComment), org, repo, number, comment)\n}", "func CreateIssueComment(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/issues/{index}/comments issue issueCreateComment\n\t// ---\n\t// summary: Add a comment to an issue\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: index\n\t// in: path\n\t// description: index of the issue\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateIssueCommentOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/Comment\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\tform := web.GetForm(ctx).(*api.CreateIssueCommentOption)\n\tissue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(\":index\"))\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetIssueByIndex\", err)\n\t\treturn\n\t}\n\n\tif issue.IsLocked && !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) && !ctx.Doer.IsAdmin {\n\t\tctx.Error(http.StatusForbidden, \"CreateIssueComment\", errors.New(ctx.Tr(\"repo.issues.comment_on_locked\")))\n\t\treturn\n\t}\n\n\tcomment, err := issue_service.CreateIssueComment(ctx, ctx.Doer, ctx.Repo.Repository, issue, form.Body, nil)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"CreateIssueComment\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToAPIComment(ctx, ctx.Repo.Repository, comment))\n}", "func (pr *PrMock) CreateCommentEvent(userCreator SenderCreator, content, action string) *gogh.IssueCommentEvent {\n\treturn &gogh.IssueCommentEvent{\n\t\tAction: utils.String(action),\n\t\tIssue: &gogh.Issue{\n\t\t\tNumber: pr.PullRequest.Number,\n\t\t},\n\t\tComment: &gogh.IssueComment{\n\t\t\tBody: utils.String(content),\n\t\t},\n\t\tRepo: pr.PullRequest.Base.Repo,\n\t\tSender: userCreator(pr.PullRequest),\n\t}\n}", "func (a *ProblemsApiService) CreateCommentExecute(r ApiCreateCommentRequest) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ProblemsApiService.CreateComment\")\n\tif err != nil {\n\t\treturn nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/problems/{problemId}/comments\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"problemId\"+\"}\", _neturl.PathEscape(parameterToString(r.problemId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json; charset=utf-8\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.commentRequestDtoImpl\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif apiKey, ok := auth[\"Api-Token\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif apiKey.Prefix != \"\" {\n\t\t\t\t\tkey = apiKey.Prefix + \" \" + apiKey.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = apiKey.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func CreateIssueComment(id int64, login, owner, repo string) error {\n\tmessage := fmt.Sprintf(\"Thank you for opening an issue @%s. Your contributions are welcome.\", login)\n\n\tissueComment := github.IssueComment{\n\t\tID: &id,\n\t\tBody: &message,\n\t}\n\n\tgithubClient := New()\n\tcomment, _, err := githubClient.Issues.CreateComment(context.Background(), owner, repo, int(id), &issueComment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.SetOutput(os.Stdout)\n\tlog.Print(comment)\n\n\treturn nil\n}", "func (self *CourseService)AddComment(content, courseId, userId string) (commentVo *course.CourseCommentVo, error *err.HttpError) {\n\tcommentVo = new(course.CourseCommentVo)\n\tcommentTable := new(table.CourseCommentTable)\n\tcommentTable.UUID = uuid.New()\n\tcommentTable.Content = content\n\tcommentTable.CourseId = courseId\n\tcommentTable.CreateUser = userId\n\tcommentTable.CreateTime = time.Now()\n\tcommentTable.FrozenStatus = value.STATUS_ENABLED\n\tinsertNum, insertErr := self.Session.InsertOne(commentTable)\n\tif insertNum == 0 {\n\t\tif insertErr != nil {\n\t\t\tself.Log.Println(insertErr)\n\t\t}\n\t\terror = err.COURSE_COMMENT_INSERT_ERR\n\t\treturn\n\t}\n\tcommentVo = course.NewCommentVo(commentTable, self.Session, self.Log)\n\terror = nil\n\treturn\n}", "func createIssueComment(\n\tctx context.Context,\n\tpr *github.PullRequest,\n\tclient *github.Client,\n\tmessage string,\n) error {\n\tcomment := &github.IssueComment{Body: &message}\n\t_, _, err := client.Issues.CreateComment(\n\t\tctx,\n\t\tpr.Base.Repo.Owner.GetLogin(),\n\t\tpr.Base.Repo.GetName(),\n\t\tpr.GetNumber(),\n\t\tcomment,\n\t)\n\treturn err\n}", "func CreatePRReviewComment(username string, owner string, repo string, id int64) error {\n\tmessage := fmt.Sprintf(\"Thank you for opening an PR @%s. Your contributions are welcomed ! :)\", username)\n\n\tpullReqComment := github.PullRequestComment{\n\t\tID: &id,\n\t\tBody: &message,\n\t}\n\n\tgithubClient := New()\n\tcomment, _, err := githubClient.PullRequests.CreateComment(context.Background(), owner, repo, int(id), &pullReqComment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.SetOutput(os.Stdout)\n\tlog.Print(comment)\n\n\treturn nil\n}", "func TestCommentCreateOnline(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tdto := slidescloud.NewSlideComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\n\tchildCommentDto := slidescloud.NewSlideComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tsource, e := ioutil.ReadFile(localTestFile)\n\t_, _, e = c.SlidesApi.CreateCommentOnline(source, slideIndex, dto, nil, password)\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n}", "func (c Client) AddComment(ctx context.Context, ID it.IssueID, comment it.Comment) (it.CommentID, error) {\n\tissueID, err := strconv.Atoi(string(ID))\n\tif err != nil {\n\t\treturn it.CommentID(\"\"), err\n\t}\n\tid, err := c.Client.IssueNoteAdd(ctx, issueID, mantis.IssueNoteData{\n\t\t//Reporter:\n\t\tDateSubmitted: mantis.Time(comment.CreatedAt),\n\t\tText: comment.Body,\n\t})\n\treturn it.CommentID(strconv.Itoa(id)), err\n}", "func NewComment(text string) Comment {\n\treturn Comment{\n\t\tID: time.Now().Unix(),\n\t\tText: \"Hello\",\n\t}\n}", "func NewComment(createdAtTimestamp int64) *Comment {\n\tthis := Comment{}\n\tthis.CreatedAtTimestamp = createdAtTimestamp\n\treturn &this\n}", "func NewCreateCommentContext(ctx context.Context, r *http.Request, service *goa.Service) (*CreateCommentContext, error) {\n\tvar err error\n\tresp := goa.ContextResponse(ctx)\n\tresp.Service = service\n\treq := goa.ContextRequest(ctx)\n\treq.Request = r\n\trctx := CreateCommentContext{Context: ctx, ResponseData: resp, RequestData: req}\n\treturn &rctx, err\n}", "func NewCreateCommentContext(ctx context.Context, r *http.Request, service *goa.Service) (*CreateCommentContext, error) {\n\tvar err error\n\tresp := goa.ContextResponse(ctx)\n\tresp.Service = service\n\treq := goa.ContextRequest(ctx)\n\treq.Request = r\n\trctx := CreateCommentContext{Context: ctx, ResponseData: resp, RequestData: req}\n\treturn &rctx, err\n}", "func (mr *MockCommentClientMockRecorder) CreateComment(org, repo, number, comment interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockCommentClient)(nil).CreateComment), org, repo, number, comment)\n}", "func TestCommentCreate(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tdto := slidescloud.NewSlideComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\n\tchildCommentDto := slidescloud.NewSlideComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tresponse, _, e := c.SlidesApi.CreateComment(fileName, slideIndex, dto, nil, password, folderName, \"\")\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tif len(response.GetList()) != 1 {\n\t\tt.Errorf(\"Expected %v, but was %v\", 1, len(response.GetList()))\n\t\treturn\n\t}\n\n\tif response.GetList()[0].GetText() != commentText {\n\t\tt.Errorf(\"Expected %v, but was %v\", commentText, response.GetList()[0].GetText())\n\t\treturn\n\t}\n\n\tif response.GetList()[0].GetAuthor() != author {\n\t\tt.Errorf(\"Expected %v, but was %v\", author, response.GetList()[0].GetAuthor())\n\t\treturn\n\t}\n\n\tchildComment := response.GetList()[0].GetChildComments()[0]\n\tif childComment.GetText() != childCommentText {\n\t\tt.Errorf(\"Expected %v, but was %v\", childCommentText, childComment.GetText())\n\t\treturn\n\t}\n\n\tif childComment.GetAuthor() != author {\n\t\tt.Errorf(\"Expected %v, but was %v\", childCommentText, childComment.GetAuthor())\n\t\treturn\n\t}\n}", "func (c *client) CreatePullRequestReviewComment(org, repo string, number int, rc ReviewComment) error {\n\tc.log(\"CreatePullRequestReviewComment\", org, repo, number, rc)\n\n\t// TODO: remove custom Accept headers when their respective API fully launches.\n\tacceptHeaders := []string{\n\t\t// https://developer.github.com/changes/2016-05-12-reactions-api-preview/\n\t\t\"application/vnd.github.squirrel-girl-preview\",\n\t\t// https://developer.github.com/changes/2019-10-03-multi-line-comments/\n\t\t\"application/vnd.github.comfort-fade-preview+json\",\n\t}\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPost,\n\t\taccept: strings.Join(acceptHeaders, \", \"),\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d/comments\", org, repo, number),\n\t\torg: org,\n\t\trequestBody: &rc,\n\t\texitCodes: []int{201},\n\t}, nil)\n\treturn err\n}", "func (ctx *CreateCommentContext) Created() error {\n\tctx.ResponseData.WriteHeader(201)\n\treturn nil\n}", "func (ctx *CreateCommentContext) Created() error {\n\tctx.ResponseData.WriteHeader(201)\n\treturn nil\n}", "func (m *MockClient) CreateComment(org, repo string, number int, comment string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", org, repo, number, comment)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func DefaultCreateComment(ctx context.Context, in *Comment, db *gorm1.DB) (*Comment, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(CommentORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(CommentORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func TestModernCommentCreate(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tvar textSelectionStartIndex int32 = 1\n\tvar textSelectionLength int32 = 5\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tchildCommentDto := slidescloud.NewSlideModernComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tchildCommentDto.Status = \"Resolved\"\n\n\tdto := slidescloud.NewSlideModernComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\tdto.Status = \"Active\"\n\tdto.TextSelectionStart = textSelectionStartIndex\n\tdto.TextSelectionLength = textSelectionLength\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tresponse, _, e := c.SlidesApi.CreateComment(fileName, slideIndex, dto, nil, password, folderName, \"\")\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tif len(response.GetList()) != 1 {\n\t\tt.Errorf(\"Expected %v, but was %v\", 1, len(response.GetList()))\n\t\treturn\n\t}\n\n\tchildComment := response.GetList()[0].GetChildComments()[0]\n\tif childComment.GetText() != childCommentText {\n\t\tt.Errorf(\"Expected %v, but was %v\", childCommentText, childComment.GetText())\n\t\treturn\n\t}\n}", "func Comment(c *fiber.Ctx) {\n\tShopID := c.Params(\"shop_id\")\n\tUserID := userIDF(c.Get(\"token\"))\n\n\tvar Data CommentStruct\n\n\tif errorParse := c.BodyParser(&Data); errorParse != nil {\n\t\tfmt.Println(\"Error parsing data\", errorParse)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Error al parsear información\"})\n\t\tc.Status(400)\n\t\treturn\n\t}\n\n\tid, errorInsert := sq.Insert(\"shop_comments\").\n\t\tColumns(\n\t\t\t\"user_id\",\n\t\t\t\"shop_id\",\n\t\t\t\"comment\",\n\t\t).\n\t\tValues(\n\t\t\tUserID,\n\t\t\tShopID,\n\t\t\tData.Comment,\n\t\t).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif errorInsert != nil {\n\t\tfmt.Println(\"Error to save shop\", errorInsert)\n\t}\n\n\tIDLast, _ := id.LastInsertId()\n\tIDS := strconv.FormatInt(IDLast, 10)\n\n\tc.JSON(SuccessResponse{MESSAGE: IDS})\n}", "func TestShapeModernCommentCreate(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tvar shapeIndex int32 = 1\n\tvar textSelectionStartIndex int32 = 1\n\tvar textSelectionLength int32 = 5\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tchildCommentDto := slidescloud.NewSlideModernComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tchildCommentDto.Status = \"Resolved\"\n\n\tdto := slidescloud.NewSlideModernComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\tdto.Status = \"Active\"\n\tdto.TextSelectionStart = textSelectionStartIndex\n\tdto.TextSelectionLength = textSelectionLength\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tresponse, _, e := c.SlidesApi.CreateComment(fileName, slideIndex, dto, &shapeIndex, password, folderName, \"\")\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tif len(response.GetList()) != 1 {\n\t\tt.Errorf(\"Expected %v, but was %v\", 1, len(response.GetList()))\n\t\treturn\n\t}\n}", "func (m *MockCommentClient) CreateComment(org, repo string, number int, comment string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", org, repo, number, comment)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (j *Jira) AddComment(issue *Issue, comment string) error {\n\tvar cMap = make(map[string]string)\n\tcMap[\"body\"] = comment\n\n\tcJson, err := json.Marshal(cMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\turi := j.BaseUrl + j.ApiPath + \"/issue/\" + issue.Key + \"/comment\"\n\tbody := bytes.NewBuffer(cJson)\n\n\t_, err = j.postJson(uri, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *IdeaStorage) AddComment(number int, content string, userID int) (int, error) {\n\treturn 0, nil\n}", "func (s *TeamsService) CreateCommentByID(ctx context.Context, orgID, teamID int64, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) {\n\tu := fmt.Sprintf(\"organizations/%v/team/%v/discussions/%v/comments\", orgID, teamID, discsusionNumber)\n\treq, err := s.client.NewRequest(\"POST\", u, comment)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdiscussionComment := &DiscussionComment{}\n\tresp, err := s.client.Do(ctx, req, discussionComment)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn discussionComment, resp, nil\n}", "func (cs *CommentService) Post(ctx context.Context, diagramID string, opt *CommentOption) (*Comment, *Response, error) {\n\tu := fmt.Sprintf(\"diagrams/%s/comments/post.json\", diagramID)\n\n\tc := new(Comment)\n\n\tresp, err := cs.client.Post(ctx, u, opt, &c)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn c, resp, nil\n}", "func NewComment(db boil.Executor, model dbmodel.Comment) Comment {\n\treturn Comment{\n\t\tmodel: model,\n\t\tdb: db,\n\t}\n}", "func (mr *MockFeedUseCaseMockRecorder) CreateComment(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockFeedUseCase)(nil).CreateComment), arg0, arg1)\n}", "func (mr *MockServiceBoardMockRecorder) CreateComment(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockServiceBoard)(nil).CreateComment), arg0)\n}", "func NewComment(commentid, userid, postid uint64, commenttext string) *Comment {\n\tcomment := new(Comment)\n\tcomment.CommentID = commentid\n\tcomment.UserID = userid\n\tcomment.PostID = postid\n\tcomment.CommentText = commenttext\n\n\treturn comment\n}", "func NewCommentEvent(comment *models.Comment, torrent *models.Torrent) {\n\tcomment.Torrent = torrent\n\turl := \"/view/\" + strconv.FormatUint(uint64(torrent.ID), 10)\n\tif torrent.UploaderID > 0 {\n\t\ttorrent.Uploader.ParseSettings()\n\t\tif torrent.Uploader.Settings.Get(\"new_comment\") {\n\t\t\tT, _, _ := publicSettings.TfuncAndLanguageWithFallback(torrent.Uploader.Language, torrent.Uploader.Language) // We need to send the notification to every user in their language\n\t\t\tnotifications.NotifyUser(torrent.Uploader, comment.Identifier(), fmt.Sprintf(T(\"new_comment_on_torrent\"), torrent.Name), url, torrent.Uploader.Settings.Get(\"new_comment_email\"))\n\t\t}\n\t}\n}", "func (issue *Issue) SetComment(comment io.Reader) (*Comment, error) {\n\turl := fmt.Sprintf(\"%s/issue/%s/comment\", BaseUrl, issue.Key)\n\tcode, body := execRequest(\"POST\", url, comment)\n\tif code == http.StatusCreated {\n\t\tvar jiraComment Comment\n\t\terr := json.Unmarshal(body, &jiraComment)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &jiraComment, nil\n\t} else {\n\t\treturn nil, handleJiraError(body)\n\t}\n}", "func AddComment(tid, nickname, content string) error {\n\ttidNum, err := strconv.ParseInt(tid, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcomment := new(Comment)\n\tcomment.Tid = tidNum\n\tcomment.Name = nickname\n\tcomment.Content = content\n\tcomment.Created = time.Now()\n\n\to := orm.NewOrm()\n\t/*insert a reply*/\n\t_, err = o.Insert(comment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/* update topic reply count */\n\ttopic := new(Topic)\n\tqs := o.QueryTable(\"topic\")\n\terr = qs.Filter(\"Id\", tid).One(topic)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttopic.ReplyCount++\n\ttopic.ReplyTime = time.Now()\n\n\t_, err = o.Update(topic)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *TeamsService) CreateCommentBySlug(ctx context.Context, org, slug string, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) {\n\tu := fmt.Sprintf(\"orgs/%v/teams/%v/discussions/%v/comments\", org, slug, discsusionNumber)\n\treq, err := s.client.NewRequest(\"POST\", u, comment)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdiscussionComment := &DiscussionComment{}\n\tresp, err := s.client.Do(ctx, req, discussionComment)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn discussionComment, resp, nil\n}", "func (mr *MockFeedRepositoryMockRecorder) CreateComment(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockFeedRepository)(nil).CreateComment), arg0, arg1)\n}", "func createRobotComment(c context.Context, runID int64, comment tricium.Data_Comment) *robotCommentInput {\n\troco := &robotCommentInput{\n\t\tMessage: comment.Message,\n\t\tRobotID: comment.Category,\n\t\tRobotRunID: strconv.FormatInt(runID, 10),\n\t\tURL: composeRunURL(c, runID),\n\t\tPath: pathForGerrit(comment.Path),\n\t\tProperties: map[string]string{\"tricium_comment_uuid\": comment.Id},\n\t\tFixSuggestions: createFixSuggestions(comment.Suggestions),\n\t}\n\t// If no StartLine is given, the comment is assumed to be a file-level comment,\n\t// and the line field will not be populated so it will be set to zero.\n\tif comment.StartLine > 0 {\n\t\tif comment.EndLine > 0 {\n\t\t\t// If range is set, [the line field] equals the end line of the range.\n\t\t\troco.Line = int(comment.EndLine)\n\t\t\troco.Range = &commentRange{\n\t\t\t\tStartLine: int(comment.StartLine),\n\t\t\t\tEndLine: int(comment.EndLine),\n\t\t\t\tStartCharacter: int(comment.StartChar),\n\t\t\t\tEndCharacter: int(comment.EndChar),\n\t\t\t}\n\t\t} else {\n\t\t\troco.Line = int(comment.StartLine)\n\t\t}\n\t}\n\treturn roco\n}", "func (h *Handler) PostComment(w http.ResponseWriter, r *http.Request) {\n\tvar comment comment.Comment\n\tif err := json.NewDecoder(r.Body).Decode(&comment); err != nil {\n\t\tsendErrorResponse(w, \"Failed to decodde JSON body\", err)\n\t\treturn\n\t}\n\n\tcomment, err := h.Service.PostComment(comment)\n\tif err != nil {\n\t\tsendErrorResponse(w, \"Error posting a new comment\", err)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(comment); err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n}", "func (m *MaintainerManager) AddComment(number, comment string) (gh.Comment, error) {\n\treturn m.client.AddComment(m.repo, number, comment)\n}", "func (issue *Issue) SetComment(comment *Comment) (*Comment, error) {\n\turl := fmt.Sprintf(\"%s/issue/%s/comment\", BaseURL, issue.Key)\n\tencodedParams, err := json.Marshal(comment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcode, body := execRequest(\"POST\", url, bytes.NewBuffer(encodedParams))\n\tif code == http.StatusCreated {\n\t\tvar jiraComment Comment\n\t\terr := json.Unmarshal(body, &jiraComment)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &jiraComment, nil\n\t}\n\treturn nil, handleJiraError(body)\n}", "func AddComment(commentValue, threadId, collectionName, projectName, repositoryId, pullRequestId string) error {\n\tusername, pass, _, _, _, domain := GetConfigDatas()\n\turl := domain + collectionName + `/` + projectName + `/_apis/git/repositories/` + repositoryId + `/pullRequests/` + pullRequestId + `/threads/` + threadId + `/comments?api-version=4.1`\n\n\tfmt.Println(\"url: \", url)\n\tbody := `{\n\t\t\t \"content\": \"` + commentValue + `\",\n\t\t\t \"parentCommentId\": 1,\n\t\t\t \"commentType\": 1\n\t\t\t}`\n\n\tfmt.Println(\"body: \", body)\n\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"POST\", url, bytes.NewBuffer([]byte(body)))\n\n\treq.SetBasicAuth(username, pass)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tCreateLogJson(\"Error\", \"AzureOps/AddComment\", \"Error while requesting to update tasks on Azure DevOps for adding comment on thread.\", err.Error())\n\t\treturn err\n\t}\n\n\tioutil.ReadAll(resp.Body)\n\n\t//CreateLogJson(\"Info\",\"AddComment\",\"Adding comment on pull request thread.\",\"AzureDevops thread comment is added. =>\"+bodyString)\n\treturn nil\n}", "func (mr *ClientMockRecorder) CreateTicketComment(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateTicketComment\", reflect.TypeOf((*Client)(nil).CreateTicketComment), arg0, arg1, arg2)\n}", "func (m *MockFeedUseCase) CreateComment(arg0 int, arg1 models.Comment) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (c *Client) CreateIssueCommentContext(ctx context.Context, issueIDOrKey string, input *CreateIssueCommentInput) (*IssueComment, error) {\n\tu := fmt.Sprintf(\"/api/v2/issues/%v/comments\", issueIDOrKey)\n\n\treq, err := c.NewRequest(\"POST\", u, input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tissueComment := new(IssueComment)\n\tif err := c.Do(ctx, req, &issueComment); err != nil {\n\t\treturn nil, err\n\t}\n\treturn issueComment, nil\n}", "func (m *MockServiceBoard) CreateComment(arg0 models.CommentInput) (models.CommentOutside, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", arg0)\n\tret0, _ := ret[0].(models.CommentOutside)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (o *WatchlistScreeningIndividualReviewCreateResponse) SetComment(v string) {\n\to.Comment.Set(&v)\n}", "func (r *AutoRoller) AddComment(ctx context.Context, issueNum int64, message, user string, timestamp time.Time) error {\n\troll, err := r.recent.Get(ctx, issueNum)\n\tif err != nil {\n\t\treturn skerr.Fmt(\"No such issue %d\", issueNum)\n\t}\n\tid := fmt.Sprintf(\"%d_%d\", issueNum, len(roll.Comments))\n\troll.Comments = append(roll.Comments, comment.New(id, message, user))\n\treturn r.recent.Update(ctx, roll)\n}", "func Comment(ctx context.Context, cfg *v1.Config, pr int, contents []byte) error {\n\tc := newClient(ctx, cfg.Github)\n\treturn c.CommentOnPR(pr, string(contents))\n}", "func StoreComment(dbOwner, dbFolder, dbName, commenter string, discID int, comText string, discClose bool, mrState MergeRequestState) error {\n\t// Begin a transaction\n\ttx, err := pdb.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Set up an automatic transaction roll back if the function exits without committing\n\tdefer tx.Rollback()\n\n\t// Get the current details for the discussion or MR\n\tvar discCreator string\n\tvar discState bool\n\tvar discType int64\n\tvar discTitle string\n\tdbQuery := `\n\t\tSELECT disc.open, u.user_name, disc.discussion_type, disc.title\n\t\tFROM discussions AS disc, users AS u\n\t\tWHERE disc.db_id = (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t)\n\t\t\tAND disc.disc_id = $4\n\t\t\tAND disc.creator = u.user_id`\n\terr = tx.QueryRow(dbQuery, dbOwner, dbFolder, dbName, discID).Scan(&discState, &discCreator, &discType, &discTitle)\n\tif err != nil {\n\t\tlog.Printf(\"Error retrieving current open state for '%s%s%s', discussion '%d': %v\\n\", dbOwner,\n\t\t\tdbFolder, dbName, discID, err)\n\t\treturn err\n\t}\n\n\t// If the discussion is to be closed or reopened, ensure the person doing so is either the database owner or the\n\t// person who started the discussion\n\tif discClose == true {\n\t\tif (strings.ToLower(commenter) != strings.ToLower(dbOwner)) && (strings.ToLower(commenter) != strings.ToLower(discCreator)) {\n\t\t\treturn errors.New(\"Not authorised\")\n\t\t}\n\t}\n\n\t// If comment text was provided, insert it into the database\n\tvar commandTag pgx.CommandTag\n\tvar comID int64\n\tif comText != \"\" {\n\t\tdbQuery = `\n\t\t\tWITH d AS (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t), int AS (\n\t\t\t\tSELECT internal_id AS int_id\n\t\t\t\tFROM discussions\n\t\t\t\tWHERE db_id = (SELECT db_id FROM d)\n\t\t\t\tAND disc_id = $5\n\t\t\t)\n\t\t\tINSERT INTO discussion_comments (db_id, disc_id, commenter, body, entry_type)\n\t\t\tSELECT (SELECT db_id FROM d), (SELECT int_id FROM int), (SELECT user_id FROM users WHERE lower(user_name) = lower($4)), $6, 'txt'\n\t\t\tRETURNING com_id`\n\t\terr = tx.QueryRow(dbQuery, dbOwner, dbFolder, dbName, commenter, discID, comText).Scan(&comID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Adding comment for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner, dbFolder,\n\t\t\t\tdbName, discID, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// If the discussion is to be closed or reopened, insert a close or reopen record as appropriate\n\tif discClose == true {\n\t\tvar eventTxt, eventType string\n\t\tif discState {\n\t\t\t// Discussion is open, so a close event should be inserted\n\t\t\teventTxt = \"close\"\n\t\t\teventType = \"cls\"\n\t\t} else {\n\t\t\t// Discussion is closed, so a re-open event should be inserted\n\t\t\teventTxt = \"reopen\"\n\t\t\teventType = \"rop\"\n\t\t}\n\n\t\t// Insert the appropriate close or reopen record\n\t\tdbQuery = `\n\t\t\tWITH d AS (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t), int AS (\n\t\t\t\tSELECT internal_id AS int_id\n\t\t\t\tFROM discussions\n\t\t\t\tWHERE db_id = (SELECT db_id FROM d)\n\t\t\t\tAND disc_id = $5\n\t\t\t)\n\t\t\tINSERT INTO discussion_comments (db_id, disc_id, commenter, body, entry_type)\n\t\t\tSELECT (SELECT db_id FROM d), (SELECT int_id FROM int), (SELECT user_id FROM users WHERE lower(user_name) = lower($4)), $6, $7`\n\t\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName, commenter, discID, eventTxt, eventType)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Adding comment for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner, dbFolder,\n\t\t\t\tdbName, discID, err)\n\t\t\treturn err\n\t\t}\n\t\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\t\tlog.Printf(\n\t\t\t\t\"Wrong number of rows (%v) affected when adding a comment to database '%s%s%s', discussion '%d'\\n\",\n\t\t\t\tnumRows, dbOwner, dbFolder, dbName, discID)\n\t\t}\n\t}\n\n\t// Update the merge request state for MR's being closed\n\tif discClose == true && discType == MERGE_REQUEST {\n\t\tdbQuery = `\n\t\t\tUPDATE discussions\n\t\t\tSET mr_state = $5\n\t\t\tWHERE db_id = (\n\t\t\t\t\tSELECT db.db_id\n\t\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\t\tFROM users\n\t\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t\t)\n\t\t\t\t\t\tAND folder = $2\n\t\t\t\t\t\tAND db_name = $3\n\t\t\t\t)\n\t\t\t\tAND disc_id = $4`\n\t\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName, discID, mrState)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Updating MR state for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner,\n\t\t\t\tdbFolder, dbName, discID, err)\n\t\t\treturn err\n\t\t}\n\t\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\t\tlog.Printf(\n\t\t\t\t\"Wrong number of rows (%v) affected when updating MR state for database '%s%s%s', discussion '%d'\\n\",\n\t\t\t\tnumRows, dbOwner, dbFolder, dbName, discID)\n\t\t}\n\t}\n\n\t// Update the last_modified date for the parent discussion\n\tdbQuery = `\n\t\tUPDATE discussions\n\t\tSET last_modified = now()`\n\tif discClose == true {\n\t\tif discState {\n\t\t\t// Discussion is open, so set it to closed\n\t\t\tdbQuery += `, open = false`\n\t\t} else {\n\t\t\t// Discussion is closed, so set it to open\n\t\t\tdbQuery += `, open = true`\n\t\t}\n\t}\n\tif comText != \"\" {\n\t\tdbQuery += `, comment_count = comment_count + 1`\n\t}\n\tdbQuery += `\n\t\tWHERE db_id = (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t)\n\t\t\tAND disc_id = $4`\n\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName, discID)\n\tif err != nil {\n\t\tlog.Printf(\"Updating last modified date for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner,\n\t\t\tdbFolder, dbName, discID, err)\n\t\treturn err\n\t}\n\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\tlog.Printf(\n\t\t\t\"Wrong number of rows (%v) affected when updating last_modified date for database '%s%s%s', discussion '%d'\\n\",\n\t\t\tnumRows, dbOwner, dbFolder, dbName, discID)\n\t}\n\n\t// Update the open discussion and MR counters for the database\n\tdbQuery = `\n\t\tWITH d AS (\n\t\t\tSELECT db.db_id\n\t\t\tFROM sqlite_databases AS db\n\t\t\tWHERE db.user_id = (\n\t\t\t\t\tSELECT user_id\n\t\t\t\t\tFROM users\n\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t)\n\t\t\t\tAND folder = $2\n\t\t\t\tAND db_name = $3\n\t\t)\n\t\tUPDATE sqlite_databases\n\t\tSET discussions = (\n\t\t\t\tSELECT count(disc.*)\n\t\t\t\tFROM discussions AS disc, d\n\t\t\t\tWHERE disc.db_id = d.db_id\n\t\t\t\t\tAND open = true\n\t\t\t\t\tAND discussion_type = 0\n\t\t\t),\n\t\t\tmerge_requests = (\n\t\t\t\tSELECT count(disc.*)\n\t\t\t\tFROM discussions AS disc, d\n\t\t\t\tWHERE disc.db_id = d.db_id\n\t\t\t\t\tAND open = true\n\t\t\t\t\tAND discussion_type = 1\n\t\t\t)\n\t\tWHERE db_id = (SELECT db_id FROM d)`\n\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName)\n\tif err != nil {\n\t\tlog.Printf(\"Updating discussion count for database '%s%s%s' failed: %v\\n\", dbOwner, dbFolder, dbName,\n\t\t\terr)\n\t\treturn err\n\t}\n\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\tlog.Printf(\n\t\t\t\"Wrong number of rows (%v) affected when updating discussion count for database '%s%s%s'\\n\",\n\t\t\tnumRows, dbOwner, dbFolder, dbName)\n\t}\n\n\t// If comment text was provided, generate an event about the new comment\n\tif comText != \"\" {\n\t\tvar commentURL string\n\t\tif discType == MERGE_REQUEST {\n\t\t\tcommentURL = fmt.Sprintf(\"/merge/%s%s%s?id=%d#c%d\", url.PathEscape(dbOwner), dbFolder,\n\t\t\t\turl.PathEscape(dbName), discID, comID)\n\t\t} else {\n\t\t\tcommentURL = fmt.Sprintf(\"/discuss/%s%s%s?id=%d#c%d\", url.PathEscape(dbOwner), dbFolder,\n\t\t\t\turl.PathEscape(dbName), discID, comID)\n\t\t}\n\t\tdetails := EventDetails{\n\t\t\tDBName: dbName,\n\t\t\tDiscID: discID,\n\t\t\tFolder: dbFolder,\n\t\t\tOwner: dbOwner,\n\t\t\tType: EVENT_NEW_COMMENT,\n\t\t\tTitle: discTitle,\n\t\t\tURL: commentURL,\n\t\t\tUserName: commenter,\n\t\t}\n\t\terr = NewEvent(details)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error when creating a new event: %s\\n\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Commit the transaction\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p PRMirror) AddComment(id int, comment string) bool {\n\tissueComment := github.IssueComment{}\n\tissueComment.Body = &comment\n\n\t_, _, err := p.GitHubClient.Issues.CreateComment(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, id, &issueComment)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while adding a comment to issue#:%d - %s\", id, err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (r *AutoRoller) AddComment(issueNum int64, message, user string, timestamp time.Time) error {\n\troll, err := r.recent.Get(issueNum)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"No such issue %d\", issueNum)\n\t}\n\tid := fmt.Sprintf(\"%d_%d\", issueNum, len(roll.Comments))\n\troll.Comments = append(roll.Comments, comment.New(id, message, user))\n\treturn r.recent.Update(roll)\n}", "func addComment(gh *octokat.Client, repo octokat.Repo, prNum, comment, commentType string) error {\n\t// get the comments\n\tcomments, err := gh.Comments(repo, prNum, &octokat.Options{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check if we already made the comment\n\tfor _, c := range comments {\n\t\t// if we already made the comment return nil\n\t\tif strings.ToLower(c.User.Login) == \"gordontheturtle\" && strings.Contains(c.Body, commentType) {\n\t\t\tlogrus.Debugf(\"Already made comment about %q on PR %s\", commentType, prNum)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// add the comment because we must not have already made it\n\tif _, err := gh.AddComment(repo, prNum, comment); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Would have added comment about %q PR %s\", commentType, prNum)\n\treturn nil\n}", "func ajaxCreateComment(w http.ResponseWriter, r *http.Request) {\n\tpr(\"ajaxCreateComment\")\n\tprVal(\"r.Method\", r.Method)\n\n\tif r.Method != \"POST\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tuserId := GetSession(w, r)\n\tif userId == -1 { // Secure cookie not found. Either session expired, or someone is hacking.\n\t\t// So go to the register page.\n\t\tpr(\"Must be logged in to create a comment.\")\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tprVal(\"userId\", userId)\n\n\t//parse request to struct\n\tvar newComment struct {\n\t\tId int64\n\t\tPostId int64\n\t\tParentId int64\n\t\tText string\n\t}\n\n\terr := json.NewDecoder(r.Body).Decode(&newComment)\n\tif err != nil {\n\t\tprVal(\"Failed to decode json body\", r.Body)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tprVal(\"=======>>>>> newComment\", newComment)\n\n\t// Get the postId and path from the parent's info, in the database.\n\tnewPath := []int64{} // New path = append(parent's path, num children).\n\t{\n\t\t// Have the database determine what the new path should be.\n\t\t// e.g\tParent path:\t1, 2, 3\n\t\t// Child0 path: \t1, 2, 3, 0\n\t\t// Child1 path: \t1, 2, 3, 1\n\t\t// New Child path: [1, 2, 3] + (NumChildren)\n\t\trows := DbQuery(\"SELECT ARRAY_APPEND(Path, NumChildren) FROM $$Comment WHERE Id = $1::bigint\", newComment.ParentId)\n\t\tdefer rows.Close()\n\t\tif rows.Next() {\n\t\t\tarr := pq.Int64Array{} // This weirdness is required for scanning into []int64\n\n\t\t\terr := rows.Scan(&arr)\n\t\t\tcheck(err)\n\n\t\t\tnewPath = []int64(arr) // This weirdness is required for scanning into []int64\n\t\t} else {\n\t\t\t// If it's not in the database, it must be because it has Id = -1 (the top-level post)...\n\t\t\tassert(newComment.ParentId == -1)\n\n\t\t\t// The head comment of the tree, must be added!\n\t\t\t// This allows us to maintain a count of top-level posts, in this head record's NumChildren.\n\t\t\tDbExec(`INSERT INTO $$Comment (Id, PostId, UserId, ParentId, Text, Path, NumChildren)\n\t\t\t\t\tVALUES (-1, $1::bigint, -1, -1, '', '{}'::bigint[], 0);`,\n\t\t\t\tnewComment.PostId)\n\t\t}\n\t\tcheck(rows.Err())\n\t}\n\n\t// TODO: add a database transaction here.\n\t// See: http://go-database-sql.org/prepared.html\n\n\t// Send the new comment to the database.\n\tnewComment.Id = DbInsert(\n\t\t`INSERT INTO $$Comment (PostId, UserId, ParentId, Text, Path)\n\t VALUES ($1::bigint, $2::bigint, $3::bigint, $4, $5::bigint[])\n\t returning Id;`,\n\t\tnewComment.PostId,\n\t\tuserId,\n\t\tnewComment.ParentId,\n\t\tnewComment.Text,\n\t\tpq.Array(newPath))\n\n\t// Increment the parent's number of children.\n\tDbExec(`UPDATE $$Comment SET NumChildren = NumChildren + 1 WHERE Id = $1::bigint`, newComment.ParentId)\n\n\t// Increment the Post's NumComments field here.\n\tDbExec(`UPDATE $$Post SET NumComments = NumComments + 1 WHERE Id = $1::bigint`, newComment.PostId)\n\n\t// Have user like their own comments by default.\n\tvoteUpDown(newComment.Id, userId, true, true, true)\n\n\t// Convert newlines to be HTML-friendly. (Do it here so the JSON response gets it and also it will get reapplied\n\t// in ReadCommentTagsFromDB.)\n\tnewComment.Text = strings.Replace(newComment.Text, \"\\n\", \"<br>\", -1)\n\n\t// create json response from struct. It needs to know newCommentId so it knows where to put the focus after the window reload.\n\ta, err := json.Marshal(newComment)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Write(a)\n}", "func (db *Database) AddComment(userID types.UserID, objectID types.ObjectID, content string) (err error) {\n\tif err = userID.Validate(); err != nil {\n\t\treturn\n\t}\n\tif err = objectID.Validate(); err != nil {\n\t\treturn\n\t}\n\tif len(content) > 1024 {\n\t\treturn errors.New(\"content too large\")\n\t}\n\n\terr = db.comments.Insert(types.Comment{\n\t\tUserID: userID,\n\t\tObjectID: objectID,\n\t\tContent: content,\n\t\tDate: time.Now(),\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to insert new comment\")\n\t}\n\n\treturn\n}", "func (cmntRepo *CommentGormRepo) StoreComment(comment *entity.Comment) (*entity.Comment, []error) {\n\tcmnt := comment\n\terrs := cmntRepo.conn.Create(cmnt).GetErrors()\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn cmnt, errs\n}", "func (cri *CommentRepositoryImpl) StoreComment(c entity.Comment) error {\r\n\r\n\t_, err := cri.conn.Exec(\"INSERT INTO comments (username,email,messages,placedat) values($1, $2, $3,$4)\", c.UserName, c.Email, c.Message, c.PlacedAt)\r\n\tif err != nil {\r\n\t\treturn errors.New(\"Insertion has failed\")\r\n\t}\r\n\r\n\treturn nil\r\n}" ]
[ "0.78270876", "0.77825135", "0.7703459", "0.76506597", "0.7603772", "0.7488165", "0.74705166", "0.74413574", "0.7431552", "0.7339268", "0.71487004", "0.71214473", "0.7113855", "0.71087986", "0.71036273", "0.70275646", "0.70132875", "0.6990566", "0.6989565", "0.6970975", "0.69571495", "0.693099", "0.6873804", "0.6854227", "0.6839919", "0.6750548", "0.6731324", "0.67124367", "0.6707782", "0.6682779", "0.6676664", "0.6660153", "0.6586898", "0.65529215", "0.6493634", "0.6485769", "0.6478456", "0.64337534", "0.6401435", "0.6382773", "0.6368067", "0.6359626", "0.63572377", "0.6348645", "0.6344735", "0.6332879", "0.6310622", "0.62951976", "0.62923384", "0.62855774", "0.6246597", "0.6217452", "0.62148035", "0.62025076", "0.62025076", "0.61854476", "0.61592567", "0.61534005", "0.61310476", "0.61310476", "0.6087621", "0.60648316", "0.60149616", "0.6002502", "0.5983041", "0.59769154", "0.59700954", "0.59700763", "0.5969006", "0.59686804", "0.59634846", "0.5961855", "0.5955311", "0.5940309", "0.59070957", "0.58910996", "0.58876497", "0.58825576", "0.5873124", "0.58730346", "0.58572865", "0.58410996", "0.5806708", "0.5788734", "0.5772476", "0.5754941", "0.5715491", "0.57022077", "0.56979644", "0.5696392", "0.5677787", "0.56648225", "0.5657109", "0.5652912", "0.5650021", "0.56496716", "0.5641635", "0.5634016", "0.5621224" ]
0.8281089
1
NumComments counts the number of tracked comments
func (fc *fakeClient) NumComments() int { n := 0 for _, comments := range fc.commentsAdded { n += len(comments) } return n }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *commentsQueryBuilder) Count() (int64, error) {\n\tif c.err != nil {\n\t\treturn 0, c.err\n\t}\n\treturn c.builder.Count()\n}", "func (o *ViewMilestone) GetCommentsCount() int32 {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (q commentQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count comment rows\")\n\t}\n\n\treturn count, nil\n}", "func (o *InlineResponse20033Milestones) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse20034Milestone) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse200115) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse20033Milestones) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (o *InlineResponse200115) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func CountAllCommentsPerPost(postID uint64) uint64 {\n\n\tvar result uint64\n\tDB, err := database.NewOpen()\n\n\tcountedCommentsResult, err := DB.Query(\"SELECT * FROM comment WHERE PostID=?\", postID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfor countedCommentsResult.Next() {\n\t\tresult = result + 1\n\t}\n\n\tDB.Close()\n\n\tfmt.Println(\"Number of comments for u:\", result)\n\n\treturn result\n}", "func (o *InlineResponse20051TodoItems) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse20034Milestone) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (o *ViewMilestone) SetCommentsCount(v int32) {\n\to.CommentsCount = &v\n}", "func (t *TeamDiscussion) GetCommentsCount() int {\n\tif t == nil || t.CommentsCount == nil {\n\t\treturn 0\n\t}\n\treturn *t.CommentsCount\n}", "func (o *InlineResponse20049Post) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse200115) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20034Milestone) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20049Post) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (s *TattooStorage) GetArticleCommentCount(name string) int {\n\tlst_buff, err := s.CommentIndexDB.GetJSON(name)\n\tif err != nil {\n\t\tlog.Printf(\"load comment index failed (%v)!\\n\", err)\n\t\treturn 0\n\t}\n\treturn len(lst_buff.([]interface{}))\n}", "func (o *InlineResponse20033Milestones) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *userService) IncrCommentCount(userId int64) int {\n\tt := dao.UserDao.Get(userId)\n\tif t == nil {\n\t\treturn 0\n\t}\n\tcommentCount := t.CommentCount + 1\n\tif err := dao.UserDao.UpdateColumn(userId, \"comment_count\", commentCount); err != nil {\n\t\tlog.Error(err.Error())\n\t} else {\n\t\tcache.UserCache.Invalidate(userId)\n\t}\n\treturn commentCount\n}", "func (o *ViewMilestone) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20051TodoItems) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ViewMilestone) GetCommentsCountOk() (*int32, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (cs commentsByTimestamp) Len() int { return len(cs) }", "func (o *InlineResponse20051TodoItems) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (c *Commit) GetCommentCount() int {\n\tif c == nil || c.CommentCount == nil {\n\t\treturn 0\n\t}\n\treturn *c.CommentCount\n}", "func (b *PhotosGetCommentsBuilder) Count(v int) *PhotosGetCommentsBuilder {\n\tb.Params[\"count\"] = v\n\treturn b\n}", "func (o *InlineResponse20049Post) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20034Milestone) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (q cmfPaidprogramCommentQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count cmf_paidprogram_comment rows\")\n\t}\n\n\treturn count, nil\n}", "func (o *InlineResponse20051TodoItems) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (o *InlineResponse20049Post) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (b *PhotosGetAllCommentsBuilder) Count(v int) *PhotosGetAllCommentsBuilder {\n\tb.Params[\"count\"] = v\n\treturn b\n}", "func (o *InlineResponse200115) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (o *InlineResponse20033Milestones) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (g *Gist) GetComments() int {\n\tif g == nil || g.Comments == nil {\n\t\treturn 0\n\t}\n\treturn *g.Comments\n}", "func (o *ViewMilestone) GetNumCommentsRead() int32 {\n\tif o == nil || o.NumCommentsRead == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.NumCommentsRead\n}", "func ProcessComments(comments []Comment) map[string]int {\n\twords := map[string]int{}\n\tfor _, comment := range comments {\n\t\tfor _, word := range strings.Fields(comment.Body) {\n\t\t\t_, ok := words[word]\n\t\t\tif ok {\n\t\t\t\twords[word]++\n\n\t\t\t} else {\n\t\t\t\twords[word] = 1\n\t\t\t}\n\t\t}\n\t}\n\treturn words\n}", "func CountNbLines(filename string) int {\n\treader, file := ReturnReader(filename, 0)\n\tdefer CloseFile(file)\n\n\tnbLines := 0\n\n\ttStart := time.Now()\n\n\tfor reader.Scan() {\n\t\tnbLines++\n\t}\n\n\ttDiff := time.Since(tStart)\n\tfmt.Printf(\"Count nb lines done in time: %f s \\n\", tDiff.Seconds())\n\n\treturn nbLines\n}", "func (env *Env) NumCon() int {\n\tenv.RLock()\n\tn := env.openCons.len()\n\tenv.RUnlock()\n\treturn n\n}", "func GetAllComments(c *gin.Context) {\n\tcontent, err := comments.GetAllComments()\n\n\tif err != nil {\n\t\tc.JSON(400, gin.H{\"success\": false, \"msg\": \"Unable to fetch translations\", \"errCode\": 38})\n\t\treturn\n\t}\n\n\ttotal :=len(content)\n\tc.Header(\"X-Total-Count\", strconv.Itoa(total))\n\tc.Header(\"Access-Control-Expose-Headers\",\"X-Total-Count\")\n\tc.JSON(http.StatusOK, gin.H{\"success\": true, \"msg\": \"ok\", \"data\": content})\n}", "func (l *ChannelList) Count() int {\n\tc := 0\n\tfor i := 0; i < Conf.ChannelBucketCount; i++ {\n\t\tc += len(l.channels[i].data)\n\t}\n\treturn c\n}", "func (c *Client) GetIssueCommentsCount(issueIDOrKey string) (int, error) {\n\treturn c.GetIssueCommentsCountContext(context.Background(), issueIDOrKey)\n}", "func (node *Node) CountBreakpoints() int {\n\tnumBreakpoints := 0\n\n\t// If we find any breakpoints return false\n\t// A breakpoint in a permutation X is a position j such that X(j) + 1 ≠ X(j+1)\n\tfor i, element := range node.contents[:len(node.contents)-1] {\n\t\tif math.Abs(float64(element-node.contents[i+1])) > 1 {\n\t\t\tnumBreakpoints++\n\t\t}\n\t}\n\n\treturn numBreakpoints\n}", "func (c *CommentStats) GetTotalPullRequestComments() int {\n\tif c == nil || c.TotalPullRequestComments == nil {\n\t\treturn 0\n\t}\n\treturn *c.TotalPullRequestComments\n}", "func (c *CommentStats) GetTotalGistComments() int {\n\tif c == nil || c.TotalGistComments == nil {\n\t\treturn 0\n\t}\n\treturn *c.TotalGistComments\n}", "func (s Scope) Count() int {\n\treturn s.m.Count()\n}", "func (c *Counter) Count() int64 { return c.count }", "func (b *Buffer) LinesNum() int {\n\treturn len(b.lines)\n}", "func (h *clientHub) NumClients() int {\n\th.RLock()\n\tdefer h.RUnlock()\n\ttotal := 0\n\tfor _, clientConnections := range h.users {\n\t\ttotal += len(clientConnections)\n\t}\n\treturn total\n}", "func (g *Grid) Count() int32 {\n\treturn int32(len(g.set))\n}", "func (h HMSketch) Count(kvs map[string]string) float64 {\n\thist := h.Sketch(kvs)\n\treturn hist.Total()\n}", "func (i *Issue) GetComments() int {\n\tif i == nil || i.Comments == nil {\n\t\treturn 0\n\t}\n\treturn *i.Comments\n}", "func CountOpenFiles() int {\n\tt.Lock()\n\tdefer t.Unlock()\n\treturn len(t.entries)\n}", "func (p *PostingsList) Count() uint64 {\n\tvar n, e uint64\n\tif p.normBits1Hit != 0 {\n\t\tn = 1\n\t\tif p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) {\n\t\t\te = 1\n\t\t}\n\t} else if p.postings != nil {\n\t\tn = p.postings.GetCardinality()\n\t\tif p.except != nil {\n\t\t\te = p.postings.AndCardinality(p.except)\n\t\t}\n\t}\n\treturn n - e\n}", "func (s *plannerStats) NumFacts() int {\n\tr := s.impl.NumFacts()\n\treturn s.track(r, \"NumFacts\")\n}", "func openDiscussionsCount(discussions []*gitlab.Discussion) int {\n\t// check if any of the discussions are unresolved\n\tcount := 0\n\tfor _, d := range discussions {\n\t\tfor _, n := range d.Notes {\n\t\t\tif !n.Resolved && n.Resolvable {\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t}\n\treturn count\n}", "func (cgCache *consumerGroupCache) getNumOpenConns() int32 {\n\treturn int32(cgCache.cgMetrics.Get(load.CGMetricNumOpenConns))\n}", "func (r *SlidingWindow) Count() int {return r.count}", "func (q commentQuery) CountP() int64 {\n\tc, err := q.Count()\n\tif err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n\n\treturn c\n}", "func (c *countHashWriter) Count() int {\n\treturn c.n\n}", "func (e *Editor) NumLines() int {\n\te.makeValid()\n\treturn len(e.lines)\n}", "func (tb *TextBuf) NumLines() int {\n\ttb.LinesMu.RLock()\n\tdefer tb.LinesMu.RUnlock()\n\treturn tb.NLines\n}", "func (fc *fakeClient) ClearComments() {\n\tfc.commentsAdded = map[int][]string{}\n}", "func (m NMap) Count() int {\n\tcount := 0\n\tfor _, inMap := range m {\n\t\tinMap.RLock()\n\t\tcount += len(inMap.objs)\n\t\tinMap.RUnlock()\n\t}\n\treturn count\n}", "func (q oauthClientQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count oauth_clients rows\")\n\t}\n\n\treturn count, nil\n}", "func (c *Client) GetIssueCommentsCountContext(ctx context.Context, issueIDOrKey string) (int, error) {\n\tu := fmt.Sprintf(\"/api/v2/issues/%v/comments/count\", issueIDOrKey)\n\n\treq, err := c.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tr := new(p)\n\tif err := c.Do(ctx, req, &r); err != nil {\n\t\treturn 0, err\n\t}\n\treturn r.Count, nil\n}", "func (t *CountTracker) IncrementCount() {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tt.tokenCount++\n}", "func (t *Todo) Count() int {\n\treturn len(t.todos)\n}", "func ObserveCount(mType, provider string, success, timeCritical bool) {\n\tmessageCounter.WithLabelValues(mType, provider, strconv.FormatBool(success), strconv.FormatBool(timeCritical)).Inc()\n}", "func (o *ViewMilestone) HasNumCommentsRead() bool {\n\tif o != nil && o.NumCommentsRead != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (h *StmtHistory) Count() int {\n\treturn len(h.history)\n}", "func (s *Streaming) NumObservations() float64 {\n\treturn s.n\n}", "func (strg *inMemoryStorage) NumConnected() int {\n\tstrg.lock.RLock()\n\tdefer strg.lock.RUnlock()\n\treturn len(strg.connected)\n}", "func (bq *BrowserQuery) Count(ctx context.Context) (int, error) {\n\tif err := bq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn bq.sqlCount(ctx)\n}", "func (o *VersionedConnection) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (b *CompactableBuffer) Count() int {\n\treturn int(atomic.LoadInt64(&b.count))\n}", "func CountTrackedTimes(ctx context.Context, opts *FindTrackedTimesOptions) (int64, error) {\n\tsess := db.GetEngine(ctx).Where(opts.toCond())\n\tif opts.RepositoryID > 0 || opts.MilestoneID > 0 {\n\t\tsess = sess.Join(\"INNER\", \"issue\", \"issue.id = tracked_time.issue_id\")\n\t}\n\treturn sess.Count(&TrackedTime{})\n}", "func (o DebugSessionOutput) Count() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *DebugSession) pulumi.IntOutput { return v.Count }).(pulumi.IntOutput)\n}", "func (o *VersionedControllerService) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Store) NumLines() int {\n\treturn len(s.lines)\n}", "func (p *MongodbProvider) Count() (total int) {\n\tvar err error\n\ttotal, err = p.c.Count()\n\tif err != nil {\n\t\tpanic(\"session/mgoSession: error counting records: \" + err.Error())\n\t}\n\treturn total\n}", "func (c *countHashReader) Count() int {\n\treturn c.n\n}", "func (s *Server) NumConns() int64 {\n\treturn atomic.LoadInt64(&s.activeConns)\n}", "func FlushCommentSum(courseId string, ds *db.DataSource, log *log.Logger) error {\n\tsession := ds.NewSession()\n\tdefer session.Close()\n\tcomment := new(table.CourseCommentTable)\n\tcountSql := `SELECT COUNT(\"UUID\") FROM \"COURSE_COMMENT\" WHERE \"COURSE_ID\" = ?\n\t\tAND \"FROZEN_STATUS\" = ?`\n\tcount, countErr := session.Sql(countSql, courseId, value.STATUS_ENABLED).Count(comment)\n\tif countErr != nil {\n\t\tlog.Println(count)\n\t}\n\tcourse := new(table.CourseTable)\n\tcourse.CommentSum = count\n\tupdateNum, updateErr := session.Id(courseId).Update(course)\n\tif updateNum == 0 {\n\t\tif updateErr != nil {\n\t\t\tlog.Println(updateNum)\n\t\t}\n\t\treturn COURSE_FLUSH_COMMENT_NUM_ERR\n\t}\n\tcommitErr := session.Commit()\n\tif commitErr != nil {\n\t\tlog.Println(commitErr)\n\t\tsession.Rollback()\n\t\treturn COURSE_FLUSH_COMMENT_NUM_ERR\n\t}\n\treturn nil\n}", "func (c connectInfo) numConnections(grid rect) int {\n\tn := 0\n\tif c.up && grid.y > 0 {\n\t\tn++\n\t}\n\tif c.right && grid.x < grid.w-1 {\n\t\tn++\n\t}\n\tif c.down && grid.y < grid.h-1 {\n\t\tn++\n\t}\n\tif c.left && grid.x > 0 {\n\t\tn++\n\t}\n\treturn n\n}", "func (m *Cmap) Count() int {\n\treturn int(atomic.LoadInt64(&m.count))\n}", "func (r *postCommentResolver) NumChildren(ctx context.Context, post *posts.Comment) (int, error) {\n\treturn r.postService.GetNumChildrenOfPost(post.ID)\n}", "func (s MemoryStorage) Count(q Query) (int, error) {\n\tfmt.Println(\"LEN\", len(s.bookmarks))\n\treturn len(s.bookmarks), nil\n}", "func (t *TrafficClones) GetCount() int {\n\tif t == nil || t.Count == nil {\n\t\treturn 0\n\t}\n\treturn *t.Count\n}", "func (s *Server) Count() int {\n\ts.cond.L.Lock()\n\tdefer s.cond.L.Unlock()\n\treturn len(s.points)\n}", "func (state *StateConditions) Count() int {\n\tcount := 0\n\tif state.Exit != nil {\n\t\tcount++\n\t}\n\tif state.Timeout != nil {\n\t\tcount++\n\t}\n\tcount += len(state.FileMonitors)\n\tcount += len(state.Outputs)\n\treturn count\n}", "func (o *ViewProjectActivePages) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c CounterSnapshot) Count() int64 { return int64(c) }", "func (g *Game)Count()(map[string]int,map[string]*DetailScore,map[string]int){\n\troundScore,detailScore := g.CurrentRound.countScore(g.playersById,g.Players[g.CurrentDicoPlayer],true,g.TypeGameNormal)\n\treturn roundScore,detailScore,g.GetTotalScore()\n}", "func (n *NodeServiceImpl) Count(namespace string) (map[string]int, error) {\n\tlist, err := n.List(namespace, &models.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn map[string]int{\n\t\tplugin.QuotaNode: len(list.Items),\n\t}, nil\n}", "func (p *printer) commentSizeBefore(next token.Position) int {\n\t// save/restore current p.commentInfo (p.nextComment() modifies it)\n\tdefer func(info commentInfo) {\n\t\tp.commentInfo = info\n\t}(p.commentInfo)\n\n\tsize := 0\n\tfor p.commentBefore(next) {\n\t\tfor _, c := range p.comment.List {\n\t\t\tsize += len(c.Text)\n\t\t}\n\t\tp.nextComment()\n\t}\n\treturn size\n}", "func (s *gcBlobTaskStore) Count(ctx context.Context) (int, error) {\n\tdefer metrics.InstrumentQuery(\"gc_blob_task_count\")()\n\n\tq := \"SELECT COUNT(*) FROM gc_blob_review_queue\"\n\tvar count int\n\n\tif err := s.db.QueryRowContext(ctx, q).Scan(&count); err != nil {\n\t\treturn count, fmt.Errorf(\"counting GC blob tasks: %w\", err)\n\t}\n\n\treturn count, nil\n}", "func (game *Game) NumDistinctActions() int {\n\treturn int(C.GameNumDistinctActions(game.game))\n}" ]
[ "0.68079066", "0.6796185", "0.6678622", "0.6601007", "0.65994287", "0.6564124", "0.65543514", "0.6539888", "0.6526384", "0.6513839", "0.64962953", "0.6486291", "0.6461657", "0.638486", "0.63353264", "0.63241607", "0.6318741", "0.63061756", "0.6243758", "0.62380064", "0.62378746", "0.6220585", "0.620633", "0.61968046", "0.6171061", "0.6121614", "0.6091763", "0.60822076", "0.6018143", "0.5950989", "0.59445137", "0.59093654", "0.59075516", "0.5896424", "0.5881786", "0.58195764", "0.5753269", "0.5720177", "0.5645601", "0.56334037", "0.5594379", "0.55807394", "0.5565681", "0.55521196", "0.5543793", "0.55103266", "0.55067986", "0.5446873", "0.54441005", "0.5425608", "0.54179865", "0.5397907", "0.5384524", "0.5375621", "0.53722537", "0.53547305", "0.53526103", "0.5343816", "0.5325659", "0.53189856", "0.5295645", "0.5287216", "0.5283478", "0.5274075", "0.52619296", "0.5253266", "0.52498114", "0.5247458", "0.5243047", "0.52423835", "0.52373093", "0.52228785", "0.5214331", "0.5213373", "0.52103764", "0.5208144", "0.52052206", "0.5199832", "0.51956034", "0.51952195", "0.51885915", "0.5183358", "0.5183189", "0.51724726", "0.5167646", "0.5165896", "0.5162264", "0.51577485", "0.51540226", "0.51532954", "0.51527226", "0.5151885", "0.5148952", "0.51458836", "0.5142926", "0.51400924", "0.5138832", "0.5137771", "0.51300544" ]
0.8412568
1
NewOutput instantiates a new output plugin instance publishing to elasticsearch.
func (f elasticsearchOutputPlugin) NewOutput( config *outputs.MothershipConfig, topologyExpire int, ) (outputs.Outputer, error) { // configure bulk size in config in case it is not set if config.BulkMaxSize == nil { bulkSize := defaultBulkSize config.BulkMaxSize = &bulkSize } output := &elasticsearchOutput{} err := output.init(*config, topologyExpire) if err != nil { return nil, err } return output, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Manager) NewOutput(conf loutput.Config, pipelines ...processor.PipelineConstructorFunc) (output.Streamed, error) {\n\treturn bundle.AllOutputs.Init(conf, m, pipelines...)\n}", "func NewOutput() *Output {\n\treturn &Output{}\n}", "func newOutput(node rpcClient, txHash *chainhash.Hash, vout uint32, value uint64, redeem dex.Bytes) *output {\n\treturn &output{\n\t\ttxHash: *txHash,\n\t\tvout: vout,\n\t\tvalue: value,\n\t\tredeem: redeem,\n\t\tnode: node,\n\t}\n}", "func (c *Config) NewOutput(ctx context.Context) (output.Output, error) {\n\tvar e exporter.Exporter\n\tswitch c.Format {\n\tcase exporter.FormatJSON:\n\t\te = exporter.NewJSONExporter()\n\tcase exporter.FormatRaw:\n\t\te = exporter.NewRawExporter()\n\tcase exporter.FormatMD:\n\t\te = exporter.NewMarkdownExporter()\n\tcase exporter.FormatHTML:\n\t\te = exporter.NewHTMLExporter()\n\tcase exporter.FormatIntermediate:\n\t\te = exporter.NewIntermediateExporter()\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"format %q not supported\", c.Format)\n\t}\n\n\to := &Output{\n\t\tpath: c.Path,\n\t\texporter: e,\n\t}\n\treturn o, nil\n}", "func newOutput(txHash *chainhash.Hash, vout uint32, value uint64, tree int8) *output {\n\treturn &output{\n\t\tpt: outPoint{\n\t\t\ttxHash: *txHash,\n\t\t\tvout: vout,\n\t\t},\n\t\tvalue: value,\n\t\ttree: tree,\n\t}\n}", "func newOutput(txHash *chainhash.Hash, vout uint32, value uint64, tree int8) *output {\n\treturn &output{\n\t\tpt: outPoint{\n\t\t\ttxHash: *txHash,\n\t\t\tvout: vout,\n\t\t},\n\t\tvalue: value,\n\t\ttree: tree,\n\t}\n}", "func NewOutput() *Output {\n\treturn &Output{\n\t\tConnections: make(map[Connection]bool),\n\t}\n}", "func NewOutput(source *ValueSource, controlProgram *Program, ordinal uint64) *Output {\n\treturn &Output{\n\t\tSource: source,\n\t\tControlProgram: controlProgram,\n\t\tOrdinal: ordinal,\n\t}\n}", "func NewOutput(value *Thunk) *Thunk {\n\treturn Normal(OutputType{value})\n}", "func NewOutput(t mockConstructorTestingTNewOutput) *Output {\n\tmock := &Output{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewOutput(conf output.MongoDBConfig, mgr bundle.NewManagement) (output.Streamed, error) {\n\tm, err := NewWriter(mgr, conf, mgr.Logger(), mgr.Metrics())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar w output.Streamed\n\tif w, err = output.NewAsyncWriter(\"mongodb\", conf.MaxInFlight, m, mgr); err != nil {\n\t\treturn w, err\n\t}\n\treturn batcher.NewFromConfig(conf.Batching, w, mgr)\n}", "func NewOutputter(outputFormat string) (Outputter, error) {\n\tif _, exists := registry.Outputs[outputFormat]; !exists {\n\t\treturn nil, ErrorUnknownOutputter\n\t}\n\tfactory, ok := registry.Outputs[outputFormat]\n\tif !ok {\n\t\treturn nil, ErrorInvalidOutputter\n\t}\n\to := factory()\n\treturn o, nil\n}", "func (a *Agent) StartOutput(ctx context.Context, pluginName string) (string, error) {\n\toutputConfig := models.OutputConfig{\n\t\tName: pluginName,\n\t}\n\n\toutput, err := a.CreateOutput(pluginName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tuniqueId, err := uuid.NewUUID()\n\tif err != nil {\n\t\treturn \"\", errors.New(\"errored while generating UUID for new INPUT\")\n\t}\n\n\tro := models.NewRunningOutput(pluginName, output, &outputConfig,\n\t\ta.Config.Agent.MetricBatchSize, a.Config.Agent.MetricBufferLimit, uniqueId.String())\n\n\terr = ro.Init()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = a.connectOutput(ctx, ro)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = a.RunSingleOutput(ro, ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// add new output to outputunit\n\ta.ou.outputs = append(a.ou.outputs, ro)\n\n\terr = a.Config.UpdateConfig(map[string]interface{}{\"unique_id\": uniqueId.String(), \"name\": pluginName}, uniqueId.String(), \"outputs\", \"START_PLUGIN\")\n\tif err != nil {\n\t\tlog.Printf(\"W! [agent] Unable to save configuration for output %s\", uniqueId.String())\n\t}\n\treturn uniqueId.String(), nil\n}", "func NewOutputController(service *goa.Service, om *output.Manager) *OutputController {\n\treturn &OutputController{\n\t\tController: service.NewController(\"OutputController\"),\n\t\tom: om,\n\t}\n}", "func NewOutput(output *Synapse) *Neuron {\n\treturn &Neuron{\n\t\tInputs: []*Synapse{},\n\t\tOutputs: []*Synapse{output},\n\t\tFunction: func(inputs, outputs []*Synapse) {\n\t\t\tvar sum float64\n\t\t\tfor _, s := range inputs {\n\t\t\t\tsum += (*s.Value * *s.Weight)\n\t\t\t}\n\t\t\toutputs[0].Value = &sum\n\t\t},\n\t}\n}", "func (out *elasticsearchOutput) Init(beat string, config outputs.MothershipConfig, topology_expire int) error {\n\n\tif len(config.Protocol) == 0 {\n\t\tconfig.Protocol = \"http\"\n\t}\n\n\tvar urls []string\n\n\tif len(config.Hosts) > 0 {\n\t\t// use hosts setting\n\t\tfor _, host := range config.Hosts {\n\t\t\turl := fmt.Sprintf(\"%s://%s%s\", config.Protocol, host, config.Path)\n\t\t\turls = append(urls, url)\n\t\t}\n\t} else {\n\t\t// use host and port settings\n\t\turl := fmt.Sprintf(\"%s://%s:%d%s\", config.Protocol, config.Host, config.Port, config.Path)\n\t\turls = append(urls, url)\n\t}\n\n\tes := NewElasticsearch(urls, config.Username, config.Password)\n\tout.Conn = es\n\n\tif config.Index != \"\" {\n\t\tout.Index = config.Index\n\t} else {\n\t\tout.Index = beat\n\t}\n\n\tout.TopologyExpire = 15000\n\tif topology_expire != 0 {\n\t\tout.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec\n\t}\n\n\tout.FlushInterval = 1000 * time.Millisecond\n\tif config.Flush_interval != nil {\n\t\tout.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond\n\t}\n\tout.BulkMaxSize = 10000\n\tif config.Bulk_size != nil {\n\t\tout.BulkMaxSize = *config.Bulk_size\n\t}\n\n\tif config.Max_retries != nil {\n\t\tout.Conn.SetMaxRetries(*config.Max_retries)\n\t}\n\n\tlogp.Info(\"[ElasticsearchOutput] Using Elasticsearch %s\", urls)\n\tlogp.Info(\"[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD\", out.Index)\n\tlogp.Info(\"[ElasticsearchOutput] Topology expires after %ds\", out.TopologyExpire/1000)\n\tif out.FlushInterval > 0 {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.\", out.FlushInterval, out.BulkMaxSize)\n\t} else {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.\")\n\t}\n\n\tif config.Save_topology {\n\t\terr := out.EnableTTL()\n\t\tif err != nil {\n\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t// keep trying in the background\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\terr := out.EnableTTL()\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\n\tout.sendingQueue = make(chan EventMsg, 1000)\n\tgo out.SendMessagesGoroutine()\n\n\treturn nil\n}", "func (pub *Publisher) CreateOutput(nodeHWID string, outputType types.OutputType,\n\tinstance string) *types.OutputDiscoveryMessage {\n\toutput := pub.registeredOutputs.CreateOutput(nodeHWID, outputType, instance)\n\treturn output\n}", "func New(\n\tconf Config,\n\tmgr interop.Manager,\n\tlog log.Modular,\n\tstats metrics.Type,\n\tpipelines ...iprocessor.PipelineConstructorFunc,\n) (output.Streamed, error) {\n\tif mgrV2, ok := mgr.(interface {\n\t\tNewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error)\n\t}); ok {\n\t\treturn mgrV2.NewOutput(conf, pipelines...)\n\t}\n\tif c, ok := Constructors[conf.Type]; ok {\n\t\treturn c.constructor(conf, mgr, log, stats, pipelines...)\n\t}\n\treturn nil, component.ErrInvalidType(\"output\", conf.Type)\n}", "func newOutput(\n\tnames []Name,\n\tgenera map[string]struct{},\n\tts []token.TokenSN,\n\tversion string,\n\tcfg config.Config,\n) Output {\n\tfor i := range names {\n\t\tlg := math.Log10(names[i].Odds)\n\t\tif math.IsInf(lg, 0) {\n\t\t\tlg = 0\n\t\t}\n\t\tnames[i].OddsLog10 = lg\n\t}\n\tmeta := Meta{\n\t\tDate: time.Now(),\n\t\tFinderVersion: version,\n\t\tWithAllMatches: cfg.WithAllMatches,\n\t\tWithAmbiguousNames: cfg.WithAmbiguousNames,\n\t\tWithUniqueNames: cfg.WithUniqueNames,\n\t\tWithBayes: cfg.WithBayes,\n\t\tWithOddsAdjustment: cfg.WithOddsAdjustment,\n\t\tWithVerification: cfg.WithVerification,\n\t\tWordsAround: cfg.TokensAround,\n\t\tLanguage: cfg.Language.String(),\n\t\tLanguageDetected: cfg.LanguageDetected,\n\t\tTotalWords: len(ts),\n\t\tTotalNameCandidates: candidatesNum(ts),\n\t\tTotalNames: len(names),\n\t}\n\tif !cfg.WithAmbiguousNames {\n\t\tnames = FilterNames(names, genera)\n\t}\n\n\tif !cfg.WithBayesOddsDetails || cfg.WithOddsAdjustment {\n\t\tpostprocessNames(names, meta.TotalNameCandidates, cfg)\n\t}\n\to := Output{Meta: meta, Names: names}\n\to.WithLanguageDetection = o.LanguageDetected != \"\"\n\n\treturn o\n}", "func (a *Agent) CreateOutput(name string) (telegraf.Output, error) {\n\tp, exists := outputs.Outputs[name]\n\tif exists {\n\t\treturn p(), nil\n\t}\n\treturn nil, fmt.Errorf(\"could not find output plugin with name: %s\", name)\n}", "func NewOutputField() *OutputField {\n\tthis := OutputField{}\n\treturn &this\n}", "func NewOutputs(outputsCfg config.Outputs) Outputs {\n\toutputs := make(Outputs)\n\tfor _, o := range outputsCfg {\n\t\tName := o.IO.Name\n\t\tType := o.IO.Type\n\t\tRepr := msgs.Representation(o.IO.Representation)\n\t\tChan := o.IO.Channel\n\t\tif !msgs.IsMessageTypeRegistered(Type) {\n\t\t\terrorString := fmt.Sprintf(\"The '%s' message type has not been registered!\", Type)\n\t\t\tpanic(errorString)\n\t\t}\n\t\tif !msgs.DoesMessageTypeImplementsRepresentation(Type, Repr) {\n\t\t\terrorString := fmt.Sprintf(\"'%s' message-type does not implement codec for '%s' representation format\", Type, Repr)\n\t\t\tpanic(errorString)\n\t\t}\n\t\toutputs[Name] = Output{IO{Name: Name, Type: Type, Representation: Repr, Channel: Chan}}\n\t}\n\treturn outputs\n}", "func (x *fastReflection_Output) New() protoreflect.Message {\n\treturn new(fastReflection_Output)\n}", "func (p *TwitterOutputPlugin) Build(output *model.OutputDef) (model.OutputProvider, error) {\n\tconsumerKey := output.Props.Get(\"consumerKey\")\n\tif consumerKey == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing consumer key property\")\n\t}\n\tconsumerSecret := output.Props.Get(\"consumerSecret\")\n\tif consumerSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing consumer secret property\")\n\t}\n\taccessToken := output.Props.Get(\"accessToken\")\n\tif accessToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing access token property\")\n\t}\n\taccessTokenSecret := output.Props.Get(\"accessTokenSecret\")\n\tif accessTokenSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing access token secret property\")\n\t}\n\tanaconda.SetConsumerKey(consumerKey)\n\tanaconda.SetConsumerSecret(consumerSecret)\n\tapi := anaconda.NewTwitterApi(accessToken, accessTokenSecret)\n\n\treturn &TwitterOutputProvider{\n\t\tid: output.ID,\n\t\talias: output.Alias,\n\t\tspec: spec,\n\t\ttags: output.Tags,\n\t\tenabled: output.Enabled,\n\t\tapi: api,\n\t\tconsumerKey: consumerKey,\n\t\tconsumerSecret: consumerSecret,\n\t}, nil\n}", "func NewExporter(uri string, timeout time.Duration) *Exporter {\n\tcounters := make(map[string]*prometheus.CounterVec)\n\tgauges := make(map[string]*prometheus.GaugeVec)\n\n\tfor name, info := range counterVecMetrics {\n\t\tlog.Printf(\"Registering %s\", name)\n\t\tcounters[name] = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: info.help,\n\t\t}, append([]string{\"cluster\", \"node\"}, info.labels...))\n\t}\n\n\tfor name, info := range gaugeVecMetrics {\n\t\tlog.Printf(\"Registering %s\", name)\n\t\tgauges[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: info.help,\n\t\t}, append([]string{\"cluster\", \"node\"}, info.labels...))\n\t}\n\n\tfor name, help := range counterMetrics {\n\t\tcounters[name] = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t}, []string{\"cluster\", \"node\"})\n\t}\n\n\tfor name, help := range gaugeMetrics {\n\t\tgauges[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t}, []string{\"cluster\", \"node\"})\n\t}\n\n\t// Init our exporter.\n\treturn &Exporter{\n\t\tURI: uri,\n\n\t\tup: prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"up\",\n\t\t\tHelp: \"Was the Elasticsearch instance query successful?\",\n\t\t}),\n\n\t\tcounters: counters,\n\t\tgauges: gauges,\n\n\t\tclient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDial: func(netw, addr string) (net.Conn, error) {\n\t\t\t\t\tc, err := net.DialTimeout(netw, addr, timeout)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tif err := c.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn c, nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewOutput(output string) io.Writer {\n\tswitch output {\n\tcase \"-\":\n\t\treturn os.Stdout\n\tdefault:\n\t\tf, err := os.Create(output)\n\t\tcheck(err)\n\t\treturn bufio.NewWriter(f)\n\t}\n}", "func NewOutput(addr sdk.CUAddress, coins sdk.Coins) Output {\n\treturn Output{\n\t\tAddress: addr,\n\t\tCoins: coins,\n\t}\n}", "func (a *Agent) UpdateOutputPlugin(uid string, config map[string]interface{}) (telegraf.Output, error) {\n\ta.pluginLock.Lock()\n\tplugin, ok := a.runningPlugins[uid]\n\ta.pluginLock.Unlock()\n\n\tif !ok {\n\t\tlog.Printf(\"E! [agent] You are trying to update an output that does not exist: %s \\n\", uid)\n\t\treturn nil, errors.New(\"you are trying to update an output that does not exist\")\n\t}\n\n\toutput := plugin.(*models.RunningOutput)\n\n\t// This code creates a copy of the struct and see if JSON Unmarshal works without errors\n\tconfigJSON, err := validateStructConfig(reflect.ValueOf(output.Output), config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not update output plugin %s with error: %s\", uid, err)\n\t}\n\n\ttomlMap, err := generateTomlKeysMap(reflect.ValueOf(output.Output), config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not update output plugin %s with error: %s\", uid, err)\n\t}\n\n\tif len(a.Config.Outputs) == 1 {\n\t\ta.incrementOutputCount(1)\n\t}\n\n\terr = a.StopOutputPlugin(uid, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(configJSON, &output.Output)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tro := models.NewRunningOutput(output.Config.Name, output.Output, output.Config,\n\t\ta.Config.Agent.MetricBatchSize, a.Config.Agent.MetricBufferLimit, output.UniqueId)\n\n\terr = a.RunSingleOutput(ro, a.Context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = a.Config.UpdateConfig(tomlMap, output.UniqueId, \"outputs\", \"UPDATE_PLUGIN\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not update output plugin %s with error: %s\", uid, err)\n\t}\n\n\tif len(a.Config.Outputs) == 1 {\n\t\ta.incrementOutputCount(-1)\n\t}\n\n\treturn output.Output, nil\n}", "func NewOutputter(dt, dtOut, tmax float64, nu int, outFcn OutFcnType) (o *Outputter) {\n\tif dtOut < dt {\n\t\tdtOut = dt\n\t}\n\to = new(Outputter)\n\to.Dt = dt\n\to.DtOut = dtOut\n\to.Nsteps = int(math.Ceil(tmax / o.Dt))\n\to.Tmax = float64(o.Nsteps) * o.Dt // fix tmax\n\to.Every = int(o.DtOut / o.Dt)\n\to.Nmax = int(math.Ceil(float64(o.Nsteps)/float64(o.Every))) + 1\n\to.T = make([]float64, o.Nmax)\n\to.U = Alloc(o.Nmax, nu)\n\tif outFcn != nil {\n\t\to.Fcn = outFcn\n\t\to.Fcn(o.U[o.Idx], 0)\n\t\tif o.Every > 1 {\n\t\t\to.Tidx = o.Every - 1 // use -1 here only for the first output\n\t\t}\n\t\to.Idx++\n\t}\n\treturn\n}", "func NewOutput(sink Sink, opts ...OutputOption) Output {\n\tvar config OutputConfig\n\tfor _, opt := range opts {\n\t\topt(&config)\n\t}\n\treturn newOutput(\"\", sink, config)\n}", "func NewOutputWidget(name string, x0, y0, x1, y1 float32, label string, initMsg string) *OutputWidget {\n\treturn &OutputWidget{name: name, x0: x0, y0: y0, x1: x1, y1: y1, label: label, initMsg: initMsg}\n}", "func NewJsonOutput(w http.ResponseWriter) (out JsonOutput) {\n\tout.ResponseWriter = w\n\treturn\n}", "func (s TestingSingleton) Output(file string) TestingBuildParams {\n\treturn buildParamsFromOutput(s.provider, file)\n}", "func (c *SearchCall) Output(output string) *SearchCall {\n\tc.urlParams_.Set(\"output\", output)\n\treturn c\n}", "func (a *Agent) storePluginOutput(plugin PluginOutput) error {\n\n\tif plugin.Data == nil {\n\t\tplugin.Data = make(PluginInventoryDataset, 0)\n\t}\n\n\tsort.Sort(plugin.Data)\n\n\t// Filter out ignored inventory data before writing the file out\n\tvar sortKey string\n\tignore := a.Context.Config().IgnoredInventoryPathsMap\n\tsimplifiedPluginData := make(map[string]interface{})\nDataLoop:\n\tfor _, data := range plugin.Data {\n\t\tif data == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsortKey = data.SortKey()\n\t\tpluginSource := fmt.Sprintf(\"%s/%s\", plugin.Id, sortKey)\n\t\tif _, ok := ignore[strings.ToLower(pluginSource)]; ok {\n\t\t\tcontinue DataLoop\n\t\t}\n\t\tsimplifiedPluginData[sortKey] = data\n\t}\n\n\treturn a.store.SavePluginSource(\n\t\tplugin.Entity.Key.String(),\n\t\tplugin.Id.Category,\n\t\tplugin.Id.Term,\n\t\tsimplifiedPluginData,\n\t)\n}", "func newExporter(cfg component.Config, set exporter.CreateSettings) (*baseExporter, error) {\n\toCfg := cfg.(*Config)\n\n\tif oCfg.Endpoint == \"\" {\n\t\treturn nil, errors.New(\"OTLP exporter config requires an Endpoint\")\n\t}\n\n\tuserAgent := fmt.Sprintf(\"%s/%s (%s/%s)\",\n\t\tset.BuildInfo.Description, set.BuildInfo.Version, runtime.GOOS, runtime.GOARCH)\n\n\treturn &baseExporter{config: oCfg, settings: set.TelemetrySettings, userAgent: userAgent}, nil\n}", "func New() *Module {\n\tconstruct()\n\tm := new(Module)\n\tl.Register(m, \"outputFunc\")\n\tm.Output(defaultOutput)\n\treturn m\n}", "func (pub *Publisher) PublishOutputEvent(node *types.NodeDiscoveryMessage) error {\n\treturn PublishOutputEvent(node, pub.registeredOutputs, pub.registeredOutputValues, pub.messageSigner)\n}", "func NewPgoutputEventHandler(s RowSink) *PgoutputEventHandler {\n\treturn &PgoutputEventHandler{\n\t\tsink: s,\n\t\tlog: pkglog.NewLogger(\"replication-event-handler\"),\n\t\trelations: &relationSet{},\n\t}\n}", "func (act *PublishAction) Output() error {\n\t// do nothing.\n\treturn nil\n}", "func (c *StdOutputConfig) CreateOutput() (plugin.Output, error) {\n\treturn &StdOutput{\n\t\tdata: make(chan []byte),\n\t\terrs: make(chan error),\n\t\tstopChan: make(chan struct{}),\n\t\twg: sync.WaitGroup{},\n\t}, nil\n}", "func NewPlugin(plugins func() discovery.Plugins, choices selector.Options) instance.Plugin {\n\tbase := &internal.Base{\n\t\tPlugins: plugins,\n\t\tChoices: choices,\n\t\tSelectFunc: SelectOne,\n\t}\n\treturn &impl{\n\t\tPlugin: base.Init(),\n\t}\n}", "func NewOutput(path string, batchSize int) (*Path, error) {\n\n\tp := &Path{\n\t\tname: path,\n\t\tbatchSize: batchSize,\n\t}\n\n\tif err := p.create(); err != nil {\n\t\treturn p, err\n\t}\n\n\treturn p, nil\n}", "func newExporter(w io.Writer) (trace.SpanExporter, error) {\n\treturn stdouttrace.New(\n\t\tstdouttrace.WithWriter(w),\n\t\t// Use human-readable output.\n\t\tstdouttrace.WithPrettyPrint(),\n\t\t// Do not print timestamps for the demo.\n\t\tstdouttrace.WithoutTimestamps(),\n\t)\n}", "func newTestPublisherWithBulk(response OutputResponse) *testPublisher {\n\treturn newTestPublisher(defaultBulkSize, response)\n}", "func (md *MassDns) SetOutput(oc chan dns.RR) {\n\tmd.output = oc\n}", "func (r *Request) NewResult(plugin string) *Result {\n\treturn &Result{\n\t\tPlugin: plugin,\n\t\tVersion: r.Version,\n\t\tMetadata: make(map[string]string),\n\t}\n}", "func NewMockedOutput(txID utxo.TransactionID, index uint16, balance uint64) (out *MockedOutput) {\n\tout = model.NewStorable[utxo.OutputID, MockedOutput](&mockedOutput{\n\t\tTxID: txID,\n\t\tIndex: index,\n\t\tBalance: balance,\n\t})\n\tout.SetID(utxo.OutputID{TransactionID: txID, Index: index})\n\treturn out\n}", "func newTXOutput(value int, address string) TXOutput {\n\ttxo := TXOutput{value, address}\n\treturn txo\n}", "func NewOutputsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *OutputsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &OutputsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}", "func (p *jsonOutputNode) New(attr string) outputNode {\n\treturn &jsonOutputNode{make(map[string]interface{})}\n}", "func NewOutputter(name string, measurements <-chan Measurement, config Config) (Outputter, error) {\n\tswitch name {\n\tcase \"stdoutl2metraw\":\n\t\t{\n\t\t\treturn NewStdOutL2MetRaw(measurements, config), nil\n\t\t}\n\tcase \"stdoutl2metder\":\n\t\t{\n\t\t\treturn NewStdOutL2MetDer(measurements, config), nil\n\t\t}\n\tcase \"librato\":\n\t\t{\n\t\t\treturn NewLibratoOutputter(measurements, config), nil\n\t\t}\n\tcase \"carbon\":\n\t\t{\n\t\t\treturn NewCarbonOutputter(measurements, config), nil\n\t\t}\n\tcase \"statsd\":\n\t\t{\n\t\t\treturn NewStatsdOutputter(measurements, config), nil\n\t\t}\n\t}\n\n\treturn nil, errors.New(\"unknown outputter\")\n}", "func newTestPublisherNoBulk(response OutputResponse) *testPublisher {\n\treturn newTestPublisher(-1, response)\n}", "func NewPlugin(opts ...Option) *Plugin {\n\tp := &Plugin{}\n\n\tp.SetName(\"generator\")\n\tp.KVStore = &etcd.DefaultPlugin\n\tp.KVScheduler = &kvscheduler.DefaultPlugin\n\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\n\tp.Setup()\n\n\treturn p\n}", "func NewEs(componentId ...string) *Es {\n id := defaultEsId\n if len(componentId) > 0 {\n id = componentId[0]\n }\n e := &Es{}\n e.client = pgo2.App().Component(id, es.New).(*es.Client)\n\n return e\n}", "func (z *ZapPlugin) New(results []index.Document) (\n\tsegment.Segment, uint64, error) {\n\treturn z.newWithChunkFactor(results, defaultChunkFactor)\n}", "func NewExporter(cfg *Configuration) *Exporter {\n\te := Exporter{}\n\te.Client = ovsdb.NewOvnClient()\n\te.initParas(cfg)\n\treturn &e\n}", "func New(s *lmsensors.Scanner) *Exporter {\n\treturn &Exporter{\n\t\ts: s,\n\t}\n}", "func (sdkLogger SdkLogger) Output(calldepth int, s string) error {\n\tlog.WithField(\"type\", \"nsq driver\").Info(s)\n\treturn nil\n}", "func New(api api.API, inputCommand []string) (p *Publisher, err error) {\n\tp = new(Publisher)\n\n\tp.sdAPI = api\n\tp.specPath, p.tag, err = parsePublishCommand(inputCommand)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse command:%v\", err)\n\t}\n\n\tp.commandSpec, err = util.LoadYaml(p.specPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Yaml load failed:%v\", err)\n\t}\n\tp.commandSpec.SpecYamlPath = p.specPath\n\n\treturn\n}", "func (pub *Publisher) CreateInputFromOutput(\n\tnodeHWID string, inputType types.InputType, instance string, outputAddress string,\n\thandler func(input *types.InputDiscoveryMessage, sender string, value string)) {\n\n\tinput := pub.inputFromOutputs.CreateInput(nodeHWID, inputType, instance, outputAddress, handler)\n\n\t_ = input\n}", "func NewPlugin(namespace string, dfn plugin.Definition, cfg *plugin.WorkerConfig) *Plugin {\n\treturn &Plugin{\n\t\tName: dfn.Name,\n\t\tUUID: gouuid.NewV4(),\n\t\tResultType: dfn.ResultType,\n\t\tPodSpec: &dfn.PodSpec,\n\t\tNamespace: namespace,\n\t\tConfig: cfg,\n\t}\n}", "func newSinkMetaHandler(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tvars := mux.Vars(r)\n\tpluginName := vars[\"name\"]\n\n\tlanguage := getLanguage(r)\n\tptrMetadata, err := meta.GetSinkMeta(pluginName, language)\n\tif err != nil {\n\t\thandleError(w, err, \"\", logger)\n\t\treturn\n\t}\n\tjsonResponse(ptrMetadata, w, logger)\n}", "func NewElasticsearchOutboundOp(opts ...Option) *Schema {\n\treturn NewDBOutboundOp(\"elasticsearch\", opts...)\n}", "func Output(props *OutputProps, children ...Element) *OutputElem {\n\trProps := &_OutputProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &OutputElem{\n\t\tElement: createElement(\"output\", rProps, children...),\n\t}\n}", "func GetOutputPlugin() (op model.OutputPlugin, err error) {\n\treturn &TwitterOutputPlugin{}, nil\n}", "func newPlugin() (p *slackscot.Plugin) {\n\tp = new(slackscot.Plugin)\n\tp.Name = \"tester\"\n\tp.Commands = []slackscot.ActionDefinition{{\n\t\tMatch: func(m *slackscot.IncomingMessage) bool {\n\t\t\treturn strings.HasPrefix(m.NormalizedText, \"make\")\n\t\t},\n\t\tUsage: \"make `<something>`\",\n\t\tDescription: \"Have the test bot make something for you\",\n\t\tAnswer: func(m *slackscot.IncomingMessage) *slackscot.Answer {\n\t\t\treturn &slackscot.Answer{Text: \"Ready\"}\n\t\t},\n\t}}\n\n\treturn p\n}", "func New(options ...Option) (metric.Exporter, error) {\n\tcfg := newConfig(options...)\n\texp := &exporter{\n\t\ttemporalitySelector: cfg.temporalitySelector,\n\t\taggregationSelector: cfg.aggregationSelector,\n\t}\n\texp.encVal.Store(*cfg.encoder)\n\treturn exp, nil\n}", "func NewCliOutput(color bool) OutputWriter {\n\tau := aurora.NewAurora(color)\n\tif run.GOOS == \"windows\" {\n\t\tau = aurora.NewAurora(false)\n\t}\n\n\tt := newCliTemplate()\n\n\treturn OutputWriter{\n\t\tout: os.Stdout,\n\t\tau: au,\n\t\ttemplate: t,\n\t}\n}", "func (m *Module) Output(outputFunc func(Info) bar.Output) *Module {\n\tm.outputFunc.Set(outputFunc)\n\treturn m\n}", "func New() *Action {\n\treturn &Action{w: os.Stdout}\n}", "func NewOutputStrategy(root string, g g.Generator) (OutputStrategy, error) {\n\toutput := outputStrategies[g.Spec.Build.Output.Strategy]\n\tif output == nil {\n\t\treturn nil, fmt.Errorf(\"Output Strategy %s unknown\", g.Spec.Build.Output.Strategy)\n\t}\n\n\treturn output(root, g), nil\n}", "func New(cfg *Config, logger logger.Logger, registerer prometheus.Registerer) (*Plugin, error) {\n\tservice := &Plugin{\n\t\tcfg: cfg,\n\t\tregisterer: registerer,\n\t\tLogger: logger.NewLogger(\"simplePlugin\"),\n\t}\n\treturn service, nil\n}", "func NewElasticsearch(conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type) (output.Streamed, error) {\n\telasticWriter, err := writer.NewElasticsearchV2(conf.Elasticsearch, mgr, log, stats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw, err := NewAsyncWriter(\n\t\tTypeElasticsearch, conf.Elasticsearch.MaxInFlight, elasticWriter, log, stats,\n\t)\n\tif err != nil {\n\t\treturn w, err\n\t}\n\treturn NewBatcherFromConfig(conf.Elasticsearch.Batching, w, mgr, log, stats)\n}", "func (h *MemHome) Output(p, name string) io.WriteCloser {\n\tpkg := h.pkgs[p]\n\tif pkg == nil {\n\t\tpanic(\"pkg not exists\")\n\t}\n\tret := newMemFile()\n\tpkg.outs[name] = ret\n\treturn ret\n}", "func New(config *Config, log *zap.Logger) (exporter.TraceExporter, error) {\n\thttpClient := &http.Client{}\n\toptions := []elastic.ClientOptionFunc{\n\t\telastic.SetURL(config.Servers...),\n\t\telastic.SetBasicAuth(config.Username, config.Password),\n\t\telastic.SetSniff(config.Sniffer),\n\t\telastic.SetHttpClient(httpClient),\n\t}\n\tif config.TokenFile != \"\" {\n\t\ttoken, err := loadToken(config.TokenFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient.Transport = &tokenAuthTransport{\n\t\t\ttoken: token,\n\t\t\twrapped: &http.Transport{},\n\t\t}\n\t}\n\n\tesRawClient, err := elastic.NewClient(options...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Elasticsearch client for %s, %v\", config.Servers, err)\n\t}\n\tbulk, err := esRawClient.BulkProcessor().\n\t\tBulkActions(config.bulkActions).\n\t\tBulkSize(config.bulkSize).\n\t\tWorkers(config.bulkWorkers).\n\t\tFlushInterval(config.bulkFlushInterval).\n\t\tDo(context.Background())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversion := config.Version\n\tif version == 0 {\n\t\tversion, err = getVersion(esRawClient, config.Servers[0])\n\t}\n\tvar tags []string\n\tif config.TagsAsFields.AllAsFields && config.TagsAsFields.File != \"\" {\n\t\ttags, err = loadTagsFromFile(config.TagsAsFields.File)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load tags file: %v\", err)\n\t\t}\n\t}\n\n\tw := esSpanStore.NewSpanWriter(esSpanStore.SpanWriterParams{\n\t\tLogger: log,\n\t\tMetricsFactory: metrics.NullFactory,\n\t\tClient: eswrapper.WrapESClient(esRawClient, bulk, version),\n\t\tIndexPrefix: config.IndexPrefix,\n\t\tUseReadWriteAliases: config.UseWriteAlias,\n\t\tAllTagsAsFields: config.TagsAsFields.AllAsFields,\n\t\tTagKeysAsFields: tags,\n\t\tTagDotReplacement: config.TagsAsFields.DotReplacement,\n\t})\n\n\tif config.CreateTemplates {\n\t\tspanMapping, serviceMapping := es.GetMappings(int64(config.Shards), int64(config.Shards), version)\n\t\terr := w.CreateTemplates(spanMapping, serviceMapping)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tstorage := jexporter.Storage{\n\t\tWriter: w,\n\t}\n\treturn exporterhelper.NewTraceExporter(\n\t\tconfig,\n\t\tstorage.Store,\n\t\texporterhelper.WithShutdown(func() error {\n\t\t\treturn w.Close()\n\t\t}))\n}", "func NewOut(name string, f []float64) *Out {\n\treturn &Out{\n\t\tname: name,\n\t\tframe: f,\n\t}\n}", "func codeintelUploadOutput() (out *output.Output) {\n\tif codeintelUploadFlags.json || codeintelUploadFlags.noProgress || codeintelUploadFlags.verbosity > 0 {\n\t\treturn nil\n\t}\n\n\treturn output.NewOutput(flag.CommandLine.Output(), output.OutputOpts{\n\t\tVerbose: true,\n\t})\n}", "func NewLogger(output *os.File, component string) (Logger, error) {\n\tlog := zerolog.New(output).With().\n\t\tStr(\"component\", component).\n\t\tLogger()\n\n\tswitch viper.GetString(\"log-level\") {\n\tcase \"debug\":\n\t\tzerolog.SetGlobalLevel(zerolog.DebugLevel)\n\tcase \"warning\":\n\t\tzerolog.SetGlobalLevel(zerolog.WarnLevel)\n\tcase \"fatal\":\n\t\tzerolog.SetGlobalLevel(zerolog.FatalLevel)\n\tcase \"info\":\n\t\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n\tdefault:\n\t\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n\t\tlog.Info().Msgf(\"Unknown log-level %s, using info.\", viper.GetString(\"log-level\"))\n\t}\n\n\treturn logger{\n\t\tLogger: log,\n\t}, nil\n}", "func New(out string, truncate bool) *Engine {\n\tif (len(_extractors) < 1) || (len(_loaders) < 1) {\n\t\t// we need at least 1 extractor and 1 loader for work\n\t\treturn nil\n\t}\n\te := &Engine{\n\t\textractors: _extractors,\n\t\tloaders: _loaders,\n\t\toutputFolder: out,\n\t}\n\tif truncate {\n\t\te.Clean()\n\t}\n\treturn e\n}", "func (p *protoOutputNode) New(attr string) outputNode {\n\tuc := nodePool.Get().(*graph.Node)\n\tuc.Attribute = attr\n\treturn &protoOutputNode{uc}\n}", "func NewExporter(dsIP string, interval time.Duration) (*Exporter, error) {\n\tlog.Infof(\"Setup Syno client using diskstation: %s and interval %s\\n\", dsIP, interval)\n\tclient, err := syno.NewClient(dsIP, interval)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Can't create the Syno client: %s\", err)\n\t}\n\n\tlog.Debugln(\"Init exporter\")\n\treturn &Exporter{\n\t\tClient: client,\n\t}, nil\n}", "func NewExporter(o Options) (*Exporter, error) {\n\tif o.Host == \"\" {\n\t\t// default Host\n\t\to.Host = \"127.0.0.1\"\n\t}\n\n\tif o.Port == 0 {\n\t\t// default Port\n\t\to.Port = 2003\n\t}\n\n\te := &Exporter{\n\t\topts: o,\n\t}\n\n\tfor _, val := range o.Tags {\n\t\te.tags += \";\" + val\n\t}\n\n\tb := bundler.NewBundler((*view.Data)(nil), func(items interface{}) {\n\t\tvds := items.([]*view.Data)\n\t\te.sendBundle(vds)\n\t})\n\te.bundler = b\n\n\te.bundler.BufferedByteLimit = defaultBufferedViewDataLimit\n\te.bundler.BundleCountThreshold = defaultBundleCountThreshold\n\te.bundler.DelayThreshold = defaultDelayThreshold\n\n\te.connectGraphite = func() (*client.Graphite, error) {\n\t\treturn client.NewGraphite(o.Host, o.Port)\n\t}\n\n\treturn e, nil\n}", "func NewOutputCodec() *codec.MsgpackHandle {\n\t_codec := &codec.MsgpackHandle{}\n\t_codec.MapType = reflect.TypeOf(map[string]interface{}(nil))\n\t_codec.RawToString = false\n\t// _codec.DecodeOptions.MapValueReset = true\n\t_codec.StructToArray = true\n\treturn _codec\n}", "func NewMySQLOutput(cfg *config.Config, outCfg *config.Output) (outputs.Output, error) {\n\tif outCfg == nil {\n\t\toutCfg = &config.Output{\n\t\t\tMySQL: &config.MySQL{},\n\t\t}\n\t}\n\tm := MySQL{\n\t\tlogger: log.WithFields(logrus.Fields{\"output\": NameMySQL}),\n\t\tconfig: outCfg.MySQL,\n\t\tdbCons: map[string]*sqlx.DB{},\n\t}\n\tif m.config.DSN == \"\" {\n\t\treturn nil, fmt.Errorf(\"no DSN for mysql connection given\")\n\t}\n\tif m.config.TableNamePattern == \"\" {\n\t\tm.config.TableNamePattern = defaultTableNamePattern\n\t}\n\n\treturn m, nil\n}", "func NewExport(info *ExportInfo) *ExportData {\n\tif len(agent.Version) > 0 {\n\t\tinfo.AgentVersion = agent.Version\n\t} else {\n\t\tinfo.AgentVersion = \"debug\"\n\t}\n\n\tinfo.ExportVersion = \"1.0\"\n\tinfo.CreationDate = time.Now()\n\treturn &ExportData{\n\t\tInfo: info,\n\t}\n}", "func RegisterOutput(name string, factory OutputCtr) {\n\tif _, exists := registry.Outputs[name]; !exists {\n\t\tregistry.Outputs[name] = factory\n\t}\n}", "func (all *Widgets) Output() *OutputWidget { return all.widgets[OutputWidgetName].(*OutputWidget) }", "func New(outputToFile bool, outputFile string, pageSize uint, allowOutputOverwrite bool) (*Writer, error) {\n\tif allowOutputOverwrite == false && core.IsFileExisting(outputFile) {\n\t\treturn nil, errors.New(\"Output file already existing. You can use `-o` to allow overwrite\")\n\t}\n\n\tfilepool := file.New(outputFile, pageSize)\n\n\treturn &Writer{outputToFile, outputFile, filepool}, nil\n}", "func NewOTExporter(conf *envvar.Configuration) (*prometheus.Exporter, error) {\n\tif err := runtime.Start(runtime.WithMinimumReadMemStatsInterval(time.Second)); err != nil {\n\t\treturn nil, fmt.Errorf(\"runtime.Start %w\", err)\n\t}\n\n\tpromExporter, err := prometheus.NewExportPipeline(prometheus.Config{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"prometheus.NewExportPipeline %w\", err)\n\t}\n\n\tglobal.SetMeterProvider(promExporter.MeterProvider())\n\n\t//-\n\n\tjaegerEndpoint, _ := conf.Get(\"JAEGER_ENDPOINT\")\n\n\tjaegerExporter, err := jaeger.NewRawExporter(\n\t\tjaeger.WithCollectorEndpoint(jaegerEndpoint),\n\t\tjaeger.WithSDKOptions(sdktrace.WithSampler(sdktrace.AlwaysSample())),\n\t\tjaeger.WithProcessFromEnv(),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"jaeger.NewRawExporter %w\", err)\n\t}\n\n\ttp := sdktrace.NewTracerProvider(\n\t\tsdktrace.WithSampler(sdktrace.AlwaysSample()),\n\t\tsdktrace.WithSyncer(jaegerExporter),\n\t)\n\n\totel.SetTracerProvider(tp)\n\totel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n\treturn promExporter, nil\n}", "func NewExporter(uri string, timeout time.Duration, logger log.Logger) (*Exporter, error) {\n\treturn &Exporter{\n\t\tURI: uri,\n\t\ttotalScrapes: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"exporter_scrapes_total\",\n\t\t\tHelp: \"Current total iqAir scrapes.\",\n\t\t}),\n\t\tjsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"exporter_json_parse_failures_total\",\n\t\t\tHelp: \"Number of errors while parsing JSON.\",\n\t\t}),\n\t\tlogger: logger,\n\t}, nil\n}", "func NewPluginCommand(cmd *cobra.Command, dockerCli *client.DockerCli) {\n}", "func NewFakeOutput(t testing.TB) *FakeOutput {\n\treturn &FakeOutput{\n\t\tReceived: make(chan *entry.Entry, 100),\n\t\tSugaredLogger: zaptest.NewLogger(t).Sugar(),\n\t}\n}", "func NewHTTPPublisher(endpoint string) *HTTPPublisher { return &HTTPPublisher{endpoint} }", "func newBgMetadataElasticSearchConnector(elasticSearchClient ElasticSearchClient, registry prometheus.Registerer, bulkSize, maxRetry uint, indexName, IndexDateFmt string) *BgMetadataElasticSearchConnector {\n\tvar esc = BgMetadataElasticSearchConnector{\n\t\tclient: elasticSearchClient,\n\t\tBulkSize: bulkSize,\n\t\tbulkBuffer: make([]ElasticSearchDocument, 0, bulkSize),\n\t\tMaxRetry: maxRetry,\n\t\tIndexName: indexName,\n\t\tIndexDateFmt: IndexDateFmt,\n\n\t\tUpdatedDocuments: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"updated_documents\",\n\t\t\tHelp: \"total number of documents updated in ElasticSearch splited between metrics and directories\",\n\t\t}, []string{\"status\", \"type\"}),\n\n\t\tHTTPErrors: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"http_errors\",\n\t\t\tHelp: \"total number of http errors encountered partitionned by status code\",\n\t\t}, []string{\"code\"}),\n\n\t\tWriteDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"write_duration_ms\",\n\t\t\tHelp: \"time spent writing to ElasticSearch based on `took` field of response \",\n\t\t\tBuckets: []float64{250, 500, 750, 1000, 1500, 2000, 5000, 10000}}),\n\n\t\tRequestSize: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"write_request_size_bytes\",\n\t\t\tHelp: \"Size of batch create requests performed on elasticsearch\",\n\t\t\tBuckets: []float64{10000, 100000, 1000000, 5000000, 10000000, 20000000, 50000000}}),\n\n\t\tDocumentBuildDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"document_build_duration_ms\",\n\t\t\tHelp: \"time spent building an ElasticSearch document\",\n\t\t\tBuckets: []float64{1, 5, 10, 50, 100, 250, 500, 750, 1000, 2000}}),\n\t\tlogger: zap.L(),\n\t}\n\t_ = registry.Register(esc.UpdatedDocuments)\n\t_ = registry.Register(esc.WriteDurationMs)\n\t_ = registry.Register(esc.DocumentBuildDurationMs)\n\t_ = registry.Register(esc.HTTPErrors)\n\t_ = registry.Register(esc.RequestSize)\n\tif esc.IndexName == \"\" {\n\t\tesc.IndexName = default_metrics_metadata_index\n\t}\n\tif esc.IndexDateFmt == \"\" {\n\t\tesc.DirectoriesIndexAlias = fmt.Sprintf(\"%s_%s\", esc.IndexName, directories_index_suffix)\n\t\tesc.MetricsIndexAlias = fmt.Sprintf(\"%s_%s\", esc.IndexName, metrics_index_suffix)\n\t}\n\n\tesc.KnownIndices = map[string]bool{}\n\treturn &esc\n}", "func NewPlugin(name string, path string, args []string, config skyconfig.Configuration) Plugin {\n\tfactory := transportFactories[name]\n\tif factory == nil {\n\t\tpanic(fmt.Errorf(\"unable to find plugin transport '%v'\", name))\n\t}\n\tp := Plugin{\n\t\ttransport: factory.Open(path, args, config),\n\t\tgatewayMap: map[string]*router.Gateway{},\n\t}\n\treturn p\n}", "func New() *AttestorPlugin {\n\treturn &AttestorPlugin{}\n}", "func New() (*Plugin, error) {\n\treturn &Plugin{\n\t\tHandler: admission.NewHandler(admission.Create, admission.Update),\n\t}, nil\n}", "func NewUsecase(outputPort Outport) Inport {\n\treturn &showPostBySlugInteractor{\n\t\toutport: outputPort,\n\t}\n}", "func NewExporter(endpoint string) (*Exporter, error) {\n\tlog.Infof(\"Setup Pihole exporter using URL: %s\", endpoint)\n\tpihole, err := pihole.NewClient(endpoint, auth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Exporter{\n\t\tPihole: pihole,\n\t}, nil\n}" ]
[ "0.6398827", "0.6216201", "0.61832756", "0.6142493", "0.600292", "0.600292", "0.58812755", "0.5859912", "0.57929546", "0.57894796", "0.578846", "0.5770203", "0.5722518", "0.5554305", "0.5553186", "0.55329394", "0.55323535", "0.55209005", "0.54609996", "0.5429639", "0.54219884", "0.5405621", "0.53437126", "0.53307337", "0.53292", "0.52711636", "0.5236616", "0.52067727", "0.5201567", "0.51732534", "0.51714903", "0.505765", "0.5002947", "0.5000211", "0.49892643", "0.4986545", "0.4972501", "0.49460304", "0.4929581", "0.49263844", "0.49176493", "0.4913875", "0.4887396", "0.48831442", "0.4876739", "0.48689485", "0.48527572", "0.4850293", "0.48355407", "0.48313758", "0.4824092", "0.48172522", "0.47862184", "0.47666293", "0.4765697", "0.4761946", "0.47535133", "0.47486657", "0.47359955", "0.4728658", "0.47230285", "0.47140732", "0.47137624", "0.47006634", "0.47002882", "0.46996567", "0.4697176", "0.46963033", "0.46652922", "0.46609977", "0.4654663", "0.464867", "0.46448267", "0.46338913", "0.4631898", "0.46292064", "0.46277216", "0.46134928", "0.46100476", "0.46051022", "0.4591692", "0.45904315", "0.45849147", "0.45820555", "0.45789778", "0.45755494", "0.4558131", "0.4555649", "0.4549276", "0.45422187", "0.45321774", "0.45217153", "0.45151034", "0.45108235", "0.4493657", "0.4492377", "0.4489348", "0.44855615", "0.44854528", "0.4485322" ]
0.8399119
0
loadTemplate checks if the index mapping template should be loaded In case template loading is enabled, template is written to index
func loadTemplate(config outputs.Template, clients []mode.ProtocolClient) { // Check if template should be loaded // Not being able to load the template will output an error but will not stop execution if config.Name != "" && len(clients) > 0 { // Always takes the first client esClient := clients[0].(*Client) logp.Info("Loading template enabled. Trying to load template: %v", config.Path) exists := esClient.CheckTemplate(config.Name) // Check if template already exist or should be overwritten if !exists || config.Overwrite { if config.Overwrite { logp.Info("Existing template will be overwritten, as overwrite is enabled.") } // Load template from file content, err := ioutil.ReadFile(config.Path) if err != nil { logp.Err("Could not load template from file path: %s; Error: %s", config.Path, err) } else { reader := bytes.NewReader(content) err = esClient.LoadTemplate(config.Name, reader) if err != nil { logp.Err("Could not load template: %v", err) } } } else { logp.Info("Template already exists and will not be overwritten.") } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func loadTemplate() {\n\tif _, err := os.Stat(indexPath); err == nil {\n\t\tif homeTemplate, err = template.New(\"index.html\").ParseFiles(indexPath); err != nil {\n\t\t\tlog.Errorf(\"Unable to parse template %s: %+v\", indexPath, err)\n\t\t}\n\t\tlog.Infof(\"Running with production template...\")\n\t\treturn\n\t}\n\tlog.Infof(\"Running in dev mode...\")\n\tdevMode = true\n}", "func (e *esearch) ApplyIndexTemplate(indexType string) error {\n\n\t// Remove the existing index template (if exists)\n\tdeleteTemplateRepsonse, err := e.client.IndexDeleteTemplate(e.indexName(indexType)).Do(e.ctx)\n\tif elastic.IsNotFound(err) {\n\t\t// We're good\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"Failed to remove Elasticsearch template '%s' error: %v\", e.indexName(indexType), err)\n\t} else if !deleteTemplateRepsonse.Acknowledged {\n\t\treturn fmt.Errorf(\"Failed to receive Elasticsearch delete %s template response\", indexType)\n\t}\n\n\t// Load the index mapping\n\tvar mapping = make(map[string]interface{})\n\tmappingFile := config.GetString(\"elasticsearch.\" + indexType + \".template_file\")\n\n\t// Get mapping file\n\tvar rawMapping []byte\n\tif mappingFile == \"\" {\n\t\tmappingFile = \"embedded\"\n\t\trawMapping, err = embed.Asset(\"template-6-\" + indexType + \".json\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not retrieve embedded mapping file: %v\", err)\n\t\t}\n\t} else {\n\t\t// Get the default mapping from the mapping file\n\t\trawMapping, err = ioutil.ReadFile(mappingFile)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not retrieve mapping from %s error: %s\", mappingFile, err)\n\t\t}\n\t}\n\n\t// Copy the mapping structure to a map we can modify\n\terr = json.Unmarshal(rawMapping, &mapping)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not parse mapping JSON from %s error %s\", mappingFile, err)\n\t}\n\n\t// Update the default mapping settings based on passed in options\n\tsettings := mapping[\"settings\"].(map[string]interface{})\n\tsettings[\"number_of_shards\"] = config.GetInt(\"elasticsearch.\" + indexType + \".index_shards\")\n\tsettings[\"number_of_replicas\"] = config.GetInt(\"elasticsearch.\" + indexType + \".index_replicas\")\n\tsettings[\"refresh_interval\"] = config.GetString(\"elasticsearch.\" + indexType + \".refresh_interval\")\n\n\t// Create an index template\n\tmapping[\"index_patterns\"] = e.indexName(indexType) + \"-*\"\n\n\t// Create the new index template\n\tcreateTemplate, err := e.client.IndexPutTemplate(e.indexName(indexType)).BodyJson(mapping).Do(e.ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create Elasticsearch %s template: %v\", indexType, err)\n\t}\n\tif !createTemplate.Acknowledged {\n\t\treturn fmt.Errorf(\"Failed to receive acknowledgement that Elasticsearch %s template was created\", indexType)\n\t}\n\n\treturn nil\n\n}", "func (t *Pongo2Engine) Load() (err error) {\n\n\terr = recoverTemplateNotFound()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\t//var root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\t// TODO (kostyarin): follow symlinks\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.opts.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.opts.templateDir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.opts.ext)\n\t\ttplExample := pongo2.Must(pongo2.FromFile(path))\n\t\tt.tmplMap[rel] = tplExample\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.opts.templateDir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (s *server) loadTemplates() error {\n includePath := \"templates/\"\n layoutPath := \"templates/layout/\"\n\n if s.templates == nil {\n s.templates = make(map[string]*template.Template)\n }\n\n layoutFiles, err := filepath.Glob(layoutPath + \"*.tmpl\")\n if err != nil {\n log.Println(\"failed to get included templates\")\n return err\n }\n\n includeFiles, err := filepath.Glob(includePath + \"*.tmpl\")\n if err != nil {\n log.Println(\"failed to get layout templates\")\n return err\n }\n\n mainTemplate := template.New(\"main\")\n mainTemplate, err = mainTemplate.Parse(mainTmpl)\n if err != nil {\n log.Println(\"failed to parse main template\")\n return err\n }\n\n for _, file := range includeFiles {\n fileName := filepath.Base(file)\n files := append(layoutFiles, file)\n s.templates[fileName], err = mainTemplate.Clone()\n if err != nil {\n return err\n }\n s.templates[fileName] = template.Must(\n s.templates[fileName].ParseFiles(files...))\n }\n\n s.bufpool = bpool.NewBufferPool(64)\n return nil\n}", "func (g *Generator) loadTemplate(t *template.Template, tmplPath string) (*template.Template, error) {\n\t// Make the filepath relative to the filemap.\n\ttmplPath = g.FileMap.relative(tmplPath)[0]\n\n\t// Determine the open function.\n\treadFile := g.ReadFile\n\tif readFile == nil {\n\t\treadFile = ioutil.ReadFile\n\t}\n\n\t// Read the file.\n\tdata, err := readFile(tmplPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a new template and parse.\n\t_, name := path.Split(tmplPath)\n\treturn t.New(name).Parse(string(data))\n}", "func (st *Stemplate) loadTemplate(tname string) *template.Template {\n\n\ttemplates, terr := filepath.Glob(st.templatesDir + \"*.tmpl\")\n\tif terr != nil {\n\t\tfmt.Println(\"[JIT template]: ERROR ~ \" + terr.Error())\n\t\treturn nil\n\t}\n\n\ttemplates = append(templates, st.templatesDir+tname)\n\n\treturn template.Must(template.ParseFiles(templates...))\n}", "func (self templateEngine) loadTemplate(name string) (t string) {\n b, err := ioutil.ReadFile(self.templateFilepath(name))\n if err == nil {\n t = string(b)\n } else {\n log.Println(\"error loading template\", err)\n t = err.Error()\n }\n return\n}", "func (loader *MapLoader) LoadTemplate(name string) (string, error) {\n\tif src, ok := (*loader)[name]; ok {\n\t\treturn src, nil\n\t}\n\treturn \"\", Errorf(\"Could not find template \" + name)\n}", "func Load(pathPrefix string) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttpl = template.New(\"index\").Funcs(funcs)\n\ttpl = template.Must(tpl.ParseGlob(filepath.Join(cwd, pathPrefix, templatePath, \"*html\")))\n\ttpl = template.Must(tpl.ParseGlob(filepath.Join(cwd, pathPrefix, partialPath, \"*.html\")))\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\n\tp.Index = pages\n\n\terr := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (site *Site) loadTemplate(includes fs.FS, path string, content []byte) error {\n\tpage, err := ParseTemplate(includes, path, content)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ParseTemplate: %w\", err)\n\t}\n\n\tif _, exists := site.Pages[page.Slug]; exists {\n\t\treturn fmt.Errorf(\"duplicate page %q\", page.Slug)\n\t}\n\n\tsite.Pages[page.Slug] = page\n\n\treturn nil\n}", "func (self templateEngine) reloadTemplate(name string) {\n self.templates[name] = self.loadTemplate(name)\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tif *local {\n\t\tloadTemplates()\n\t}\n\tif r.Method == \"GET\" {\n\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t\tif err := indexTemplate.Execute(w, struct{}{}); err != nil {\n\t\t\tsklog.Errorln(\"Failed to expand template:\", err)\n\t\t}\n\t}\n}", "func generateIndex(path string, templatePath string) (lo string) {\n homeDir, hmErr := user.Current()\n checkErr(hmErr)\n var lines []string\n var layout string\n if templatePath == \"\" {\n layout = randFromFile(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/bslayouts\")\n imgOne := randFile(path + \"/img\")\n imgOneStr := \"imgOne: .\" + imgOne.Name()\n imgTwo := randFile(path + \"/img\")\n imgTwoStr := \"imgTwo: .\" + imgTwo.Name()\n imgThree := randFile(path + \"/img\")\n imgThreeStr := \"imgThree: .\" + imgThree.Name()\n imgFour := randFile(path + \"/img\")\n imgFourStr := \"imgFour: .\" + imgFour.Name()\n imgsStr := imgOneStr + \"\\n\" + imgTwoStr + \"\\n\" + imgThreeStr + \"\\n\" + imgFourStr\n\n lines = append(lines, \"---\")\n lines = append(lines, \"layout: \" + layout)\n lines = append(lines, imgsStr)\n lines = append(lines, \"title: \" + randFromFile(path + \"/titles\"))\n title := randFromFile(path + \"/titles\")\n lines = append(lines, \"navTitle: \" + title)\n lines = append(lines, \"heading: \" + title)\n lines = append(lines, \"subheading: \" + randFromFile(path + \"/subheading\"))\n lines = append(lines, \"aboutHeading: About Us\")\n lines = append(lines, generateServices(path + \"/services\"))\n lines = append(lines, generateCategories(path + \"/categories\"))\n lines = append(lines, \"servicesHeading: Our offerings\")\n lines = append(lines, \"contactDesc: Contact Us Today!\")\n lines = append(lines, \"phoneNumber: \" + randFromFile(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/phone-num\"))\n lines = append(lines, \"email: \" + randFromFile(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/emails\"))\n lines = append(lines, \"---\")\n lines = append(lines, \"\\n\")\n lines = append(lines, randFromFile(path + \"/content\"))\n } else {\n template, err := os.Open(templatePath)\n checkErr(err)\n scanner := bufio.NewScanner(template)\n for scanner.Scan() {\n lines = append(lines, scanner.Text())\n }\n }\n\n writeTemplate(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/index.md\", lines)\n\n return layout\n}", "func (s *server) getIndexTemplate(FSS fs.FS) *template.Template {\n\t// get the html template from dist, have it ready for requests\n\ttmplContent, ioErr := fs.ReadFile(FSS, \"index.html\")\n\tif ioErr != nil {\n\t\tlog.Println(\"Error opening index template\")\n\t\tif !embedUseOS {\n\t\t\tlog.Fatal(ioErr)\n\t\t}\n\t}\n\n\ttmplString := string(tmplContent)\n\ttmpl, tmplErr := template.New(\"index\").Parse(tmplString)\n\tif tmplErr != nil {\n\t\tlog.Println(\"Error parsing index template\")\n\t\tif !embedUseOS {\n\t\t\tlog.Fatal(tmplErr)\n\t\t}\n\t}\n\n\treturn tmpl\n}", "func (h *GrafanaTemplateHelper) loadTemplate(name string) ([]byte, error) {\n\tpath := fmt.Sprintf(\"%s/%s.yaml\", h.TemplatePath, name)\n\ttpl, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparsed, err := template.New(\"grafana\").Parse(string(tpl))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buffer bytes.Buffer\n\terr = parsed.Execute(&buffer, h.Parameters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer.Bytes(), nil\n}", "func (app Datetime) loadTemplate(name string, w http.ResponseWriter, req *http.Request) *template.Template {\n\taccept := req.Header.Get(\"Accept\")\n\ttmpl, contentType, acceptable := app.chooseTemplate(accept, name)\n\tif !acceptable {\n\t\tif name == \"error\" {\n\t\t\tapp.simpleError(HTTPError{http.StatusNotAcceptable, nil}, w, req)\n\t\t\treturn nil\n\t\t}\n\t\tapp.error(HTTPError{http.StatusNotAcceptable, nil}, w, req)\n\t\treturn nil\n\t}\n\tif tmpl == nil {\n\t\terr := fmt.Errorf(\"%w \\\"%s\\\" for \\\"%s\\\"\", ErrTemplateNotFound, name, accept)\n\t\tl.Warn(\"unable to find template\", zap.Error(err), zap.String(\"name\", name), zap.String(\"accept\", accept))\n\t\tif name == \"error\" {\n\t\t\tapp.simpleError(HTTPError{http.StatusNotAcceptable, err}, w, req)\n\t\t}\n\t\tapp.error(HTTPError{http.StatusNotAcceptable, err}, w, req)\n\t\t//app.simpleError(HTTPError{http.StatusInternalServerError, ErrNoTemplate}, w, req)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", contentType)\n\treturn tmpl\n}", "func loadTemplates() {\n\n\tfmt.Println(\"About to load templates\")\n\n\t// get layouts\n\tlayouts, err := filepath.Glob(\"templates/layouts/*.layout\")\n\tpanicOnError(err)\n\n\t// get list of main pages\n\tpages, err := filepath.Glob(\"templates/pages/*.html\")\n\tpanicOnError(err)\n\n\tfor _, page := range pages {\n\t\tfiles := append(layouts, page)\n\t\ttemplateName := filepath.Base(page)\n\n\t\tnewTemplate := template.Must(template.ParseFiles(files...))\n\t\tnewTemplate.Option(\"missingkey=default\")\n\n\t\tappTemplates[templateName] = newTemplate\n\t}\n\n\t// loaded templates\n\tfor file, _ := range appTemplates {\n\t\tfmt.Printf(\"Loaded Template: %s\\n\", file)\n\t\tfmt.Printf(\"loaded: %s\\n\", file)\n\t}\n\n}", "func (self templateEngine) hasTemplate(name string) bool {\n return CheckFile(self.templateFilepath(name))\n}", "func (h *GiteaTemplateHelper) loadTemplate(name string) ([]byte, error) {\n\tpath := fmt.Sprintf(\"%s/%s.yaml\", h.TemplatePath, name)\n\ttpl, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparsed, err := template.New(\"gitea\").Parse(string(tpl))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buffer bytes.Buffer\n\terr = parsed.Execute(&buffer, h.Parameters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer.Bytes(), nil\n}", "func loadTemplate() (*template.Template, error) {\n\tt := template.New(\"\")\n\tfor name, file := range Assets.Files {\n\t\tif file.IsDir() || !strings.HasSuffix(name, \".gohtml\") {\n\t\t\tcontinue\n\t\t}\n\t\th, err := ioutil.ReadAll(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt, err = t.New(name).Parse(string(h))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t, nil\n}", "func LoadTemplates(relativePath string, pOpt *ParseOptions) {\n\t// Initializes the template map\n\ttemplates = make(map[string]*template.Template)\n\n\t// Save Path to Base file\n\tpOpt.BasePath = relativePath\n\n\t// Check if every option is set\n\tif pOpt.BaseName == \"\" {\n\t\tpOpt.BaseName = DefaultParseOptions.BaseName\n\t}\n\n\tif pOpt.Delimiter == \"\" {\n\t\tpOpt.Delimiter = DefaultParseOptions.Delimiter\n\t}\n\n\tif pOpt.Ext == \"\" {\n\t\tpOpt.Ext = DefaultParseOptions.Ext\n\t}\n\n\tif pOpt.NonBaseFolder == \"\" {\n\t\tpOpt.NonBaseFolder = DefaultParseOptions.NonBaseFolder\n\t}\n\n\t// Start checking the main dir of the views\n\tcheckDir(relativePath, pOpt, false)\n}", "func indexHandler(res http.ResponseWriter, req *http.Request) {\n//Parsing the template\ntpl := template.Must(template.ParseFiles(\"index.html\"))\nerr := tpl.Execute(res, nil)\nlogError(err)\n}", "func renderTemplate(w http.ResponseWriter, name string, template string, context interface{}) {\n //get template from compiled map holding all templates\n tmpl, ok := templates[name]\n if !ok {\n http.Error(w, \"The page does not exist\", http.StatusInternalServerError)\n }\n //execute (render) template\n err := tmpl.ExecuteTemplate(w, template, context)\n \n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n}", "func Load() *template.Template {\n\treturn template.Must(\n\t\ttemplate.New(\n\t\t\t\"index.html\",\n\t\t).Parse(\n\t\t\tstring(MustAsset(\"index.html\")),\n\t\t),\n\t)\n}", "func getIndexHandler() func(http.ResponseWriter, *http.Request) {\n\ttempFiles := []string{\n\t\tfilepath.Join(*resourcesDir, \"templates/index.html\"),\n\t\tfilepath.Join(*resourcesDir, \"templates/header.html\"),\n\t}\n\n\tindexTemplate := template.Must(template.ParseFiles(tempFiles...))\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif *local {\n\t\t\tindexTemplate = template.Must(template.ParseFiles(tempFiles...))\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\n\t\tif err := indexTemplate.Execute(w, nil); err != nil {\n\t\t\tsklog.Errorf(\"Failed to expand template: %v\", err)\n\t\t}\n\t}\n}", "func handleIndex(w http.ResponseWriter, r *http.Request) {\n\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tc := appengine.NewContext(r)\n\tlog.Infof(c, \"Serving main page.\")\n\n\ttmpl, _ := template.ParseFiles(\"web/tmpl/index.tmpl\")\n\n\ttmpl.Execute(w, time.Since(initTime))\n}", "func indexHandler(w http.ResponseWriter, req *http.Request) {\n\tlayout, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_LAYOUT)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tindex, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_INDEX)\n\t//artical, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_ARTICAL)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tmapOutput := map[string]interface{}{\"Title\": \"炫酷的网站技术\" + TITLE, \"Keyword\": KEYWORD, \"Description\": DESCRIPTION, \"Base\": BASE_URL, \"Url\": BASE_URL, \"Carousel\": getAddition(PREFIX_INDEX), \"Script\": getAddition(PREFIX_SCRIPT), \"Items\": leveldb.GetRandomContents(20, &Filter{})}\n\tcontent := []byte(index.RenderInLayout(layout, mapOutput))\n\tw.Write(content)\n\tgo cacheFile(\"index\", content)\n}", "func loadTemplate() *template.Template {\n\t// define template\n\tt := &template.Template{\n\t\tDelimiter: delimiter,\n\t\tFilter: filter,\n\t\tFormat: format,\n\t\tOutfile: outfile,\n\t\tPrefix: prefix,\n\t}\n\tif err := validation.Validate.Struct(t); err != nil {\n\t\tlogrus.WithError(err).Fatalln(\"error loading template...\")\n\t}\n\treturn t\n}", "func (loader *FileSystemLoader) LoadTemplate(name string) (string, error) {\n\tf, err := os.Open(path.Join(loader.BaseDir, name))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tb, err := ioutil.ReadAll(f)\n\treturn string(b), err\n}", "func (t *Tmpl) Load() (err error) {\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\tvar root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.dir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.ext)\n\n\t\t// load or reload\n\t\tvar (\n\t\t\tnt = root.New(rel)\n\t\t\tb []byte\n\t\t)\n\n\t\tif b, err = ioutil.ReadFile(path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = nt.Parse(string(b))\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.dir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\t// necessary for reloading\n\tif t.funcs != nil {\n\t\troot = root.Funcs(t.funcs)\n\t}\n\n\tt.Template = root // set or replace\n\treturn\n}", "func templateInit(w http.ResponseWriter, templateFile string, templateData page) {\n\tif err := tmpls.ExecuteTemplate(w, templateFile, templateData); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (h *TemplateHelper) loadTemplate(name string) ([]byte, error) {\n\tpath := fmt.Sprintf(\"%s/%s.yaml\", h.TemplatePath, name)\n\ttpl, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparser := template.New(\"application-monitoring\")\n\tparser.Funcs(template.FuncMap{\n\t\t\"JoinQuote\": joinQuote,\n\t}).Funcs(sprig.TxtFuncMap())\n\n\tparsed, err := parser.Parse(string(tpl))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buffer bytes.Buffer\n\terr = parsed.Execute(&buffer, h.Parameters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer.Bytes(), nil\n}", "func (h *TemplateHelper) loadTemplate(name string) ([]byte, error) {\n\tpath := fmt.Sprintf(\"%s/%s.yaml\", h.TemplatePath, name)\n\ttpl, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparser := template.New(\"application-monitoring\")\n\tparser.Funcs(template.FuncMap{\n\t\t\"JoinQuote\": joinQuote,\n\t})\n\n\tparsed, err := parser.Parse(string(tpl))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buffer bytes.Buffer\n\terr = parsed.Execute(&buffer, h.Parameters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer.Bytes(), nil\n}", "func (s *Service) prepareIndexHTML() (bool, error) {\n\tindexTMPL := filepath.Join(s.HTMLDir, \"index.tmpl\")\n\tindexTMPLFile, err := os.Open(indexTMPL)\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tdefer indexTMPLFile.Close()\n\n\tindexHTML := filepath.Join(s.HTMLDir, \"index.html\")\n\tindexHTMLFile, err := os.Create(indexHTML)\n\tif err != nil {\n\t\treturn true, sdk.WrapError(err, \"error while creating %s file\", indexHTML)\n\t}\n\tdefer indexHTMLFile.Close()\n\t_, err = io.Copy(indexHTMLFile, indexTMPLFile)\n\treturn true, sdk.WrapError(err, \"error while copy index.tmpl to index.html file\")\n}", "func templatesIndexTmpl() (*asset, error) {\n\tpath := \"/Volumes/Code/go/src/github.com/schollz/cowyo/templates/index.tmpl\"\n\tname := \"templates/index.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %v\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func index(w http.ResponseWriter, r *http.Request){\n\terr := templ.ExecuteTemplate(w, \"index\", nil)\n\tif err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n}", "func LoadTemplate(filename string) (*raymond.Template, error) {\n\ttpl, err := raymond.ParseFile(filename)\n\treturn tpl, err\n}", "func TestNewTemplateMap(t *testing.T) {\n\tconst title = \"Translation Portal\"\n\tconst query = \"謹\"\n\tconst simplified = \"謹\"\n\tconst pinyin = \"jǐn\"\n\tconst english = \"to be cautious\"\n\tws := dicttypes.WordSense{\n\t\tId: 42,\n\t\tHeadwordId: 42,\n\t\tSimplified: simplified,\n\t\tTraditional: query,\n\t\tPinyin: pinyin,\n\t\tEnglish: english,\n\t\tGrammar: \"verb\",\n\t\tConcept: \"\\\\N\",\n\t\tConceptCN: \"\\\\N\",\n\t\tDomain: \"Literary Chinese\",\n\t\tDomainCN: \"\\\\N\",\n\t\tSubdomain: \"\\\\N\",\n\t\tSubdomainCN: \"\\\\N\",\n\t\tImage: \"\\\\N\",\n\t\tMP3: \"\\\\N\",\n\t\tNotes: \"\\\\N\",\n\t}\n\tw := dicttypes.Word{\n\t\tSimplified: simplified,\n\t\tTraditional: \"謹\",\n\t\tPinyin: pinyin,\n\t\tHeadwordId: 42,\n\t\tSenses: []dicttypes.WordSense{ws},\n\t}\n\tterm := find.TextSegment{\n\t\tQueryText: query,\n\t\tDictEntry: w,\n\t}\n\tresults := find.QueryResults{\n\t\tQuery: query,\n\t\tCollectionFile: \"\",\n\t\tNumCollections: 0,\n\t\tNumDocuments: 0,\n\t\tCollections: []find.Collection{},\n\t\tDocuments: []find.Document{},\n\t\tTerms: []find.TextSegment{term},\n\t}\n\ttype test struct {\n\t\tname string\n\t\ttemplateName string\n\t\tcontent interface{}\n\t\twant string\n\t}\n\ttests := []test{\n\t\t{\n\t\t\tname: \"Home page\",\n\t\t\ttemplateName: \"index.html\",\n\t\t\tcontent: map[string]string{\"Title\": title},\n\t\t\twant: \"<title>\" + title + \"</title>\",\n\t\t},\n\t\t{\n\t\t\tname: \"Find results\",\n\t\t\ttemplateName: \"find_results.html\",\n\t\t\tcontent: htmlContent{\n\t\t\t\tTitle: title,\n\t\t\t\tResults: results,\n\t\t\t},\n\t\t\twant: english,\n\t\t},\n\t}\n\tfor _, tc := range tests {\n\t\ttemplates := NewTemplateMap(config.WebAppConfig{})\n\t\ttmpl, ok := templates[tc.templateName]\n\t\tif !ok {\n\t\t\tt.Errorf(\"%s, template not found: %s\", tc.name, tc.templateName)\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\terr := tmpl.Execute(&buf, tc.content)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s, error rendering template %v\", tc.name, err)\n\t\t}\n\t\tgot := buf.String()\n\t\tif !strings.Contains(got, tc.want) {\n\t\t\tt.Errorf(\"%s, got %s\\n bug want %s\", tc.name, got, tc.want)\n\t\t}\n\t}\n}", "func renderTemplate(basePath string, templateConfig *rt.TemplateConfig, config *rt.Config) error {\n\tfor _, fromTo := range templateConfig.FromTo {\n\t\tif *showOutputDir {\n\t\t\tlog.Printf(\"Rendering %s\", fromTo)\n\t\t}\n\t\ttemplateInstance, err := loadTemplate(basePath, fromTo.From)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, render := range templateConfig.Render {\n\t\t\terr = renderFileFromTemplate(basePath, templateInstance, *render, config, fromTo)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed render %s file\", fromTo.To)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func RenderTemplateTest(w http.ResponseWriter) (map[string]*template.Template, error) {\n\ttemplateCache := map[string]*template.Template{}\n\n\tpages, err := filepath.Glob(\"./templates/*.page.html\")\n\tif err != nil {\n\t\treturn templateCache, err\n\t}\n\n\tfor _, page := range pages {\n\t\tfmt.Println(\"selected page path is\", page)\n\t\tname := filepath.Base(page)\n\t\tfmt.Println(\"selected page is\", name)\n\n\t\tts, err := template.New(name).Funcs(functions).ParseFiles(page)\n\t\tif err != nil {\n\t\t\treturn templateCache, err\n\t\t}\n\n\t\tmatches, err := filepath.Glob(\"./templates/*.layout.html\")\n\t\tif err != nil {\n\t\t\treturn templateCache, err\n\t\t}\n\n\t\tif len(matches) > 0 {\n\t\t\tts, err = ts.ParseGlob(\"./templates/*.layout.html\")\n\t\t\tif err != nil {\n\t\t\t\treturn templateCache, err\n\t\t\t}\n\t\t}\n\n\t\ttemplateCache[name] = ts\n\t}\n\n\treturn templateCache, nil\n}", "func (ui *GUI) loadTemplates() error {\n\tvar templates []string\n\tfindTemplate := func(path string, f os.FileInfo, err error) error {\n\t\t// If path doesn't exist, or other error with path, return error so\n\t\t// that Walk will quit and return the error to the caller.\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !f.IsDir() && strings.HasSuffix(f.Name(), \".html\") {\n\t\t\ttemplates = append(templates, path)\n\t\t}\n\t\treturn nil\n\t}\n\n\terr := filepath.Walk(ui.cfg.GUIDir, findTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thttpTemplates := template.New(\"template\").Funcs(template.FuncMap{\n\t\t\"hashString\": util.HashString,\n\t\t\"upper\": strings.ToUpper,\n\t\t\"percentString\": util.PercentString,\n\t})\n\n\t// Since template.Must panics with non-nil error, it is much more\n\t// informative to pass the error to the caller to log it and exit\n\t// gracefully.\n\thttpTemplates, err = httpTemplates.ParseFiles(templates...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tui.templates = template.Must(httpTemplates, nil)\n\treturn nil\n}", "func renderTemplate(w http.ResponseWriter, id string, d interface{}) bool {\n\tif t, err := template.New(id).ParseFiles(\"./templates/\" + id); err != nil {\n\t\thttp.Error(w, errors.Wrap(err, \"Could not render template\").Error(), http.StatusInternalServerError)\n\t\treturn false\n\t} else if err := t.Execute(w, d); err != nil {\n\t\thttp.Error(w, errors.Wrap(err, \"Could not render template\").Error(), http.StatusInternalServerError)\n\t\treturn false\n\t}\n\treturn true\n}", "func renderTemplate(w http.ResponseWriter, id string, d interface{}) bool {\n\tif t, err := template.New(id).ParseFiles(\"./templates/\" + id); err != nil {\n\t\thttp.Error(w, errors.Wrap(err, \"Could not render template\").Error(), http.StatusInternalServerError)\n\t\treturn false\n\t} else if err := t.Execute(w, d); err != nil {\n\t\thttp.Error(w, errors.Wrap(err, \"Could not render template\").Error(), http.StatusInternalServerError)\n\t\treturn false\n\t}\n\treturn true\n}", "func indexHandler(res http.ResponseWriter, req *http.Request) {\n\tfmt.Println(\"website index\")\n\t//grab all partials\n\tpartials, err := loadPartials()\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t//get template function based on index and execute to load page\n\tt, _ := template.ParseFiles(\"../index.html\")\n\tt.Execute(res, partials)\n}", "func (c *Client) Template(sourceFilePath, destinationFilePath string, perms os.FileMode, appendMap, envMap map[string]string) error {\n\ttemplateText, err := readTemplate(sourceFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttemplateResultBuffer, err := c.renderTemplate(templateText, appendMap, envMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn writeTemplateResults(destinationFilePath, templateResultBuffer, perms)\n}", "func runTemplate(operationName string, w io.Writer, data interface{}) error {\n\tt, ok := pageTemplates[operationName]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Template %s doesn't exist\", operationName))\n\t}\n\n\terr := t.Execute(w, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func notFound(w http.ResponseWriter) {\n\tt, err := template.ParseFiles(notFoundTemplatePath)\n\tif err != nil {\n\t\trlog.Errorf(\"could not load notfound template [%s]\", err.Error())\n\t\treturn\n\t}\n\tt.ExecuteTemplate(w, notFoundTemplate, nil)\n\treturn\n}", "func RenderTpl(w http.ResponseWriter, r *http.Request, template string, pageTitle string) {\n\n\t// Load given template by name\n\ttpl, err := ace.Load(\"templates/\"+template, \"\", nil)\n\tif err != nil {\n\n\t\t// Invalid resource - hardcode to redirect to 404 page\n\t\tlog.Println(\"Error:\", err.Error(), \"trying 404 instead\")\n\t\tpageTitle, template = \"not found\", \"404\"\n\n\t\t// If this fails for some reason, just quit\n\t\tif tpl, err = ace.Load(\"templates/bodies/404\", \"\", nil); err != nil {\n\t\t\tlog.Println(\"Error:\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Print IP, URL, requested path; path to template file\n\tlog.Println(\"Serving template:\", \"templates/bodies/\"+template)\n\n\t// Load our Data obj\n\tdata := Data{Title: \"jm - \" + pageTitle}\n\n\t// Apply parsed template to w, passing in our Data obj\n\tif err := tpl.Execute(w, data); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Println(\"Error:\", err.Error())\n\t\treturn\n\t}\n}", "func (application *Application) LoadTemplates() error {\n\tvar templates []string\n\n\t// Create function to collect our template files\n\tfn := func(path string, f os.FileInfo, err error) error {\n\t\tif f.IsDir() != true && strings.HasSuffix(f.Name(), \".html\") {\n\t\t\ttemplates = append(templates, path)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Look for all the template files\n\terr := filepath.Walk(application.Configuration.TemplatePath, fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure we can parse all the template files\n\tapplication.Template = template.Must(template.ParseFiles(templates...))\n\treturn nil\n}", "func (s *DjangoEngine) Load() error {\n\t// If only custom templates should be loaded.\n\tif (s.fs == nil || context.IsNoOpFS(s.fs)) && len(s.templateCache) > 0 {\n\t\treturn nil\n\t}\n\n\trootDirName := getRootDirName(s.fs)\n\n\treturn walk(s.fs, \"\", func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif info == nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tif s.extension != \"\" {\n\t\t\tif !strings.HasSuffix(path, s.extension) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif s.rootDir == rootDirName {\n\t\t\tpath = strings.TrimPrefix(path, rootDirName)\n\t\t\tpath = strings.TrimPrefix(path, \"/\")\n\t\t}\n\n\t\tcontents, err := asset(s.fs, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn s.ParseTemplate(path, contents)\n\t})\n}", "func serveTemplate(w http.ResponseWriter, r *http.Request, name string, info interface{}) {\n\tdata, err := Asset(\"templates/\" + name + \".mustache\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tcontent := mustache.Render(string(data), info)\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.Write([]byte(content))\n}", "func TestTemplateManager_LoadTemplates(t *testing.T) {\n\ttestee := TemplateManager{}\n\n\terr := testee.LoadTemplates(testhelpers.GetTestLogger())\n\n\tassert.Nil(t, err, \"All templates should be loaded without error\")\n\tassert.Equal(t, 9, len(testee.Templates), \"All templates should be loaded\")\n}", "func RenderTemplate(tmpfile string, pairs map[string]interface{}) (string, error) {\n\n\tfile, err := os.Open(tmpfile)\n\tif err != nil {\n\t\tLogWarning.Println(err)\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\t//var srcContent string\n\tvar srcContent bytes.Buffer\n\tfor scanner.Scan() {\n\t\tt := fmt.Sprintln(scanner.Text())\n\t\tif strings.Index(t, \"<%file:\") > -1 {\n\t\t\tLogDebug.Println(\"Including file external file\")\n\t\t\tif strings.Index(t, \"%>\") > -1 {\n\t\t\t\tre := regexp.MustCompile(\"\\\\<\\\\%file:(.*?)\\\\%\\\\>\")\n\t\t\t\tmatch := re.FindStringSubmatch(t)\n\t\t\t\tif len(match) == 0 {\n\t\t\t\t\tLogError.Println(\"invalid file: syntax \", t)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tincludeFileName := fmt.Sprintf(\"%s/%s\", path.Dir(tmpfile), match[1])\n\t\t\t\tincludeContent, err := ioutil.ReadFile(includeFileName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tLogWarning.Println(err)\n\t\t\t\t}\n\t\t\t\tLogInfo.Println(\"including file :\", includeFileName)\n\t\t\t\tLogDebug.Println(\"includeContent\", string(includeContent))\n\t\t\t\tsrcContent.WriteString(string(includeContent))\n\t\t\t} else {\n\t\t\t\tLogWarning.Println(\"Found incomplete tag in include from file \", tmpfile)\n\t\t\t}\n\t\t} else if strings.Index(t, \"<%LookupFile:\") > -1 {\n\t\t\tLogDebug.Println(\"Rendering LookupFile\")\n\t\t\tvar lookup LookupList\n\t\t\tre := regexp.MustCompile(\"\\\\<\\\\%LookupFile:(.*?),(.*?),(.*?),(.*?)\\\\%\\\\>\")\n\n\t\t\t/*\n\t\t\t\t//\n\t\t\t\t// Fist we need to find if there is a template within the lookup definition\n\t\t\t\tt := fasttemplate.New(t, \"{{\", \"}}\")\n\t\t\t\ts := t.ExecuteString(pairs)\n\t\t\t*/\n\t\t\t//var tmpl = template.Must(template.ParseFiles(t))\n\t\t\t// Create a new template and parse the letter into it.\n\t\t\t// Get the Sprig function map.\n\t\t\tfmap := sprig.TxtFuncMap()\n\t\t\tvar tmpl = template.Must(template.New(\"LookupFile\").Funcs(fmap).Parse(t))\n\n\t\t\tvar bytes bytes.Buffer\n\t\t\twriter := bufio.NewWriter(&bytes)\n\n\t\t\terr = tmpl.Execute(writer, pairs)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\terr = writer.Flush()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tLogDebug.Println(bytes.String())\n\n\t\t\tmatch := re.FindStringSubmatch(bytes.String())\n\n\t\t\tif len(match) == 0 {\n\t\t\t\tLogError.Println(\"invalid LookupFile: syntax \", t)\n\t\t\t\t//BUG/FIX: Should push up a error to rest calling function\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tLogDebug.Println(\"LookupFile: \", match[LookupFile])\n\t\t\tLogDebug.Println(\"LookupKey: \", match[LookupKey])\n\t\t\tLogDebug.Println(\"LookupSubkey: \", match[LookupSubkey])\n\t\t\tLogDebug.Println(\"LookupDefaultValue: \", match[LookupDefaultValue])\n\n\t\t\tyamlFile, err := ioutil.ReadFile(fmt.Sprintf(match[LookupFile]))\n\t\t\tif err != nil {\n\t\t\t\tLogError.Println(\"reading LookupFile \", match[LookupFile])\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\terr = yaml.Unmarshal(yamlFile, &lookup)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tvar lookupvalue string\n\t\t\tvar ok bool\n\t\t\tLogDebug.Println(lookup.Lookup)\n\t\t\tif lookupvalue, ok = lookup.Lookup[match[LookupKey]][match[LookupSubkey]]; ok {\n\t\t\t\tLogDebug.Println(\"Found lookup value in file :\", lookupvalue)\n\t\t\t} else {\n\t\t\t\tlookupvalue = match[LookupDefaultValue]\n\t\t\t\tLogDebug.Println(\"Using default lookup Value :\", lookupvalue)\n\t\t\t}\n\n\t\t\tsrcContent.WriteString(re.ReplaceAllString(bytes.String(), lookupvalue))\n\n\t\t} else {\n\t\t\tsrcContent.WriteString(t)\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tLogWarning.Println(err)\n\t}\n\n\t//var tmpl = template.Must(template.ParseFiles(tmpl_file))\n\t// Get the Sprig function map.\n\tfmap := sprig.TxtFuncMap()\n\tvar tmpl = template.Must(template.New(\"rendered_template\").Funcs(fmap).Parse(srcContent.String()))\n\n\tvar bytes bytes.Buffer\n\twriter := bufio.NewWriter(&bytes)\n\n\terr = tmpl.Execute(writer, pairs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = writer.Flush()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tLogDebug.Println(bytes.String())\n\n\treturn bytes.String(), nil\n\n}", "func (self templateEngine) getTemplate(name string) (t string) {\n if ! self.templateCached(name) {\n self.templates[name] = self.loadTemplate(name) \n }\n t, _ = self.templates[name]\n return\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p interface{}) {\n\terr := templates.ExecuteTemplate(w, tmpl, p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tlog.Println(\"GET \" + tmpl)\n}", "func writeTemplate(dirToWrite, dirType, parentDir, templateName string) error {\n\tfmt.Printf(\"\\n\\n\\nAbsolute Directory: %v\\nDirrectory Type: %v\\nParent Dir: %v\\nName of template: %v\\n\\n\\n\", dirToWrite, dirType, parentDir, templateName)\n\n\t// Open index file in components directory and read data\n\tdata, err := getIndexData(filepath.Join(parentDir, \"index.js\"))\n\thandleError(err, \"from Get index data\")\n\n\ttoSliceOn := toSliceOnRoot + \"/\" + dirType\n\n\tdata.RestImports = append(data.RestImports, \"import \"+templateName+\" from '.\"+strings.Split(dirToWrite, toSliceOn)[1]+\"/\"+templateName+\"';\")\n\n\tdata.Name = templateName\n\tfmt.Println(data.RestImports)\n\n\t// Write Template of index\n\twriteNewIndexFile(filepath.Join(parentDir, \"index.js\"), data)\n\tos.Chdir(dirToWrite)\n\tnewComponentFile, err := os.Create(templateName + \".js\")\n\thandleError(err, \"from creating a new component file\")\n\tdefer newComponentFile.Close()\n\n\tsassFile, err := os.Create(templateName + \".css\")\n\thandleError(err, \"from creating new css file\")\n\tdefer sassFile.Close()\n\n\tos.Chmod(newComponentFile.Name(), fullPermission)\n\tos.Chmod(sassFile.Name(), fullPermission)\n\n\ttemplateType := getTemplate(dirType)\n\tfmt.Printf(\"Template Type is: %v\\n\", templateType)\n\n\ttmpl := template.Must(template.New(dirType).Delims(\"[){[\", \"]}(]\").Parse(templateType))\n\n\terr = tmpl.Execute(newComponentFile, data)\n\thandleError(err, \"from executing new component template\")\n\treturn nil\n}", "func indexHandler(w http.ResponseWriter, r *http.Request) {\r\n t, _ := template.New(\"webpage\").Parse(indexPage) // parse embeded index page\r\n t.Execute(w, pd) // serve the index page (html template)\r\n}", "func LoadTemplates(rootTemp string, childTemps []string) {\n\trootTemplate = rootTemp\n\tchildTemplates = childTemps\n}", "func (self templateEngine) reloadAllTemplates() {\n loadThese := []string{}\n // get all the names of the templates we have loaded\n for tname, _ := range self.templates {\n loadThese = append(loadThese, tname)\n }\n // for each template we have loaded, reload the contents from file\n for _, tname := range loadThese {\n self.reloadTemplate(tname)\n }\n}", "func (r *oauthProxy) createTemplates() error {\n\tvar list []string\n\n\tif r.config.SignInPage != \"\" {\n\t\tr.log.Debug(\"loading the custom sign in page\", zap.String(\"page\", r.config.SignInPage))\n\t\tlist = append(list, r.config.SignInPage)\n\t}\n\n\tif r.config.ForbiddenPage != \"\" {\n\t\tr.log.Debug(\"loading the custom forbidden page\", zap.String(\"page\", r.config.ForbiddenPage))\n\t\tlist = append(list, r.config.ForbiddenPage)\n\t}\n\n\tif r.config.UnauthorizedPage != \"\" {\n\t\tr.log.Debug(\"loading the custom unauthorized page\", zap.String(\"page\", r.config.UnauthorizedPage))\n\t\tlist = append(list, r.config.UnauthorizedPage)\n\t}\n\n\tif len(list) > 0 {\n\t\tr.log.Info(\"loading the custom templates\", zap.String(\"templates\", strings.Join(list, \",\")))\n\t\tr.templates = template.Must(template.ParseFiles(list...))\n\t}\n\n\treturn nil\n}", "func renderIndexPage(ctx *Context, indexPage *model.IndexPage) error {\n\tpagePath := filepath.Join(ctx.TargetDir, indexPage.Path)\n\n\tif err := filesystem.CreateDir(pagePath, true); err != nil {\n\t\treturn err\n\t}\n\n\thandle, err := filesystem.CreateFile(filepath.Join(pagePath, indexFile))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttemplateFile := template.IndexPage\n\n\tif indexPage.Article.Template != \"\" {\n\t\ttemplateFile = indexPage.Article.Template\n\t}\n\n\ttplPath := filepath.Join(ctx.TemplateDir, templateFile)\n\treturn template.Render(tplPath, indexPage, handle)\n}", "func (d *galleryDocument) LoadTemplates(t *template.Template) error {\n\treturn nil\n}", "func ExecuteTemplate(w http.ResponseWriter, name string, data interface{}) error {\n\tt, ok := templateCache[name]\n\tif ok {\n\t\treturn t.Execute(w, data)\n\t}\n\n\ttplGen, ok := templates[name]\n\tif !ok {\n\t\treturn fmt.Errorf(\"No template with name %s\", name)\n\t}\n\n\tt, err := tplGen()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to generate template %s: %s\", name, err)\n\t}\n\n\ttemplateCache[name] = t\n\n\treturn t.Execute(w, data)\n}", "func renderTemplate(w http.ResponseWriter, p *models.Page) {\n\tlp := path.Join(\"views\", \"log.html\")\n\ttmpl, err := template.ParseFiles(lp)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\ttmpl.ExecuteTemplate(w, \"log.html\", p)\n}", "func renderTemplate(layout, name string, w http.ResponseWriter, data interface{}) error {\n\n\tt, ok := appTemplates[name]\n\n\tif !ok {\n\t\terr := fmt.Errorf(\"Unable to find the template: %s\\n\", name)\n\n\t\tfmt.Println(err.Error())\n\n\t\treturn err\n\t}\n\n\terr := t.ExecuteTemplate(w, layout, data)\n\n\treturn err\n}", "func (st *Stemplate) load() error {\n\n\ttemplates, terr := filepath.Glob(st.templatesDir + \"*.tmpl\")\n\tif terr != nil {\n\t\treturn terr\n\t}\n\n\tcontents, err := filepath.Glob(st.templatesDir + \"*.html\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, c := range contents {\n\t\tcurrent := append(templates, c)\n\t\tst.templates[filepath.Base(c)] = template.Must(template.ParseFiles(current...))\n\t}\n\n\treturn nil\n\n}", "func (g *Generator) applyTemplate(tplName string, data interface{}) error {\n\ttplFuncMap := make(template.FuncMap)\n\tcache := make(map[string]interface{})\n\ttplFuncMap[\"ToSnake\"] = strcase.ToSnake\n\ttplFuncMap[\"Add\"] = func(a, b int) int {\n\t\treturn a + b\n\t}\n\ttplFuncMap[\"CacheSet\"] = func(key string, value interface{}) string {\n\t\tcache[key] = value\n\t\treturn \"\"\n\t}\n\ttplFuncMap[\"CacheGet\"] = func(key string) interface{} {\n\t\treturn cache[key]\n\t}\n\ttplContent, err := g.box.FindString(tplName)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttpl, err := template.New(\"\").Funcs(tplFuncMap).Parse(tplContent)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn tpl.Execute(&g.buf, data)\n}", "func (f *TemplateFile) IsIndex() bool {\n\treturn f.Name == \"index.html\"\n}", "func (t *Renderer) ReloadTemplates() {\n\tt.template = template.Must(template.ParseGlob(t.location))\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\r\n t, err := template.ParseFiles(tmpl + \".html\")\r\n if err != nil {\r\n http.Error(w, err.Error(), http.StatusInternalServerError)\r\n return\r\n }\r\n err = t.Execute(w, p)\r\n if err != nil {\r\n http.Error(w, err.Error(), http.StatusInternalServerError)\r\n }\r\n}", "func loadTemplate(filePath string) (string, error) {\n\tcontent, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"failed to read file %s: %v\", filePath, err))\n\t}\n\ttemplateStr := string(content)\n\treturn templateStr, nil\n}", "func index(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\terr := tpl.ExecuteTemplate(w, \"index.html\", nil)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Fatalln(err)\n\t}\n\tfmt.Println(\"HERE INDEX\")\n}", "func indexHandler(res http.ResponseWriter, req *http.Request) {\n\n\t// Execute the template and respond with the index page.\n\ttemplates.ExecuteTemplate(res, \"index\", nil)\n}", "func execmTemplateLookup(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := args[0].(*template.Template).Lookup(args[1].(string))\n\tp.Ret(2, ret)\n}", "func LoadTemplates(VersionString string) {\n\tloadConfiguration()\n\tversionString = VersionString\n\tif templates == nil {\n\t\ttemplates = make(map[string]*template.Template)\n\t}\n\n\tlayoutFiles, err := filepath.Glob(templateConfig.TemplateLayoutPath + \"*.html\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tincludeFiles, err := filepath.Glob(templateConfig.TemplateIncludePath + \"*.html\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tmainTemplate := template.New(\"main\")\n\n\tmainTemplate, err = mainTemplate.Parse(mainTmpl)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, file := range includeFiles {\n\t\tfileName := filepath.Base(file)\n\t\tfiles := append(layoutFiles, file)\n\t\ttemplates[fileName], err = mainTemplate.Clone()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ttemplates[fileName] = template.Must(templates[fileName].Funcs(template.FuncMap{\n\t\t\t\"htmlSafe\": func(html string) template.HTML {\n\t\t\t\treturn template.HTML(html)\n\t\t\t},\n\t\t}).ParseFiles(files...))\n\t}\n\n\tlog.Println(\"templates loading successful\")\n\n\tbufpool = bpool.NewBufferPool(64)\n\tlog.Println(\"buffer allocation successful\")\n}", "func index(res http.ResponseWriter, req *http.Request) {\n\ttpl, err := template.ParseFiles(\"index.html\")\n\tif err != nil { // if file does not exist, give user a error\n\t\tlog.Fatalln(err) // stops program if file does not exist\n\t}\n\ttpl.Execute(res, nil) // execute the html file\n}", "func Template(w http.ResponseWriter, r *http.Request, tmpl string, td *models.TemplateData) error {\n\n\tvar tc map[string]*template.Template\n\n\t//posso scegliere se usare la cache o no (intanto che sviluppo non la uso, così vedo subito le modifiche)\n\tif app.UseCache {\n\t\t// get the template cach from the app config\n\t\ttc = app.TemplateCache\n\t} else {\n\t\ttc, _ = CreateTemplateCache()\n\t}\n\n\tt, ok := tc[tmpl]\n\tif !ok {\n\t\t//log.Fatal(\"could not get template from template cache\")\n\t\treturn errors.New(\"could not get template from cache\")\n\t}\n\n\tbuf := new(bytes.Buffer)\n\n\ttd = AddDefaultData(td, r)\n\n\t_ = t.Execute(buf, td)\n\n\t_, err := buf.WriteTo(w)\n\tif err != nil {\n\t\tfmt.Println(\"Error writing template to browser\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func MustLoadTemplate(filename string) *raymond.Template {\n\ttpl, err := raymond.ParseFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tpl\n}", "func index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tasset, err := Asset(\"static/templates/index.html\")\n\tif err != nil {\n\t\tlog.Panic(\"Unable to read file from bindata: \", err)\n\t}\n\tfmt.Fprint(w, string(asset))\n}", "func (t Tmpl) RenderTemplate(w http.ResponseWriter, req *http.Request, name string, args map[string]interface{}) {\n\t// Check if app is running on dev mode\n\tif Config.Configuration.IsDev() {\n\n\t\t// Lock mutex\n\t\tt.rw.Lock()\n\t\tdefer t.rw.Unlock()\n\n\t\t// Create new template\n\t\tt = NewTemplate(\"castro\")\n\n\t\t// Set template FuncMap\n\t\tt.Tmpl.Funcs(FuncMap)\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"views/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"pages/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all extension templates\n\t\tif err := t.LoadExtensionTemplates(\"pages\"); err != nil {\n\t\t\tLogger.Logger.Errorf(\"Cannot load extension subtopic template: %v\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all template hooks\n\t\tt.LoadTemplateHooks()\n\t}\n\n\t// Check if args is a valid map\n\tif args == nil {\n\t\targs = map[string]interface{}{}\n\t}\n\n\t// Load microtime from the microtimeHandler\n\tmicrotime, ok := req.Context().Value(\"microtime\").(time.Time)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read microtime value\"))\n\t\treturn\n\t}\n\n\t// Get csrf token\n\ttkn, ok := req.Context().Value(\"csrf-token\").(*models.CsrfToken)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read csrf token value\"))\n\t\treturn\n\t}\n\n\t// Get nonce value\n\tnonce, ok := req.Context().Value(\"nonce\").(string)\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read nonce value\"))\n\t\treturn\n\t}\n\n\t// Get session map\n\tsession, ok := req.Context().Value(\"session\").(map[string]interface{})\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read session map\"))\n\t\treturn\n\t}\n\n\t// Set session map\n\targs[\"session\"] = session\n\n\t// Set nonce value\n\targs[\"nonce\"] = nonce\n\n\t// Set token value\n\targs[\"csrfToken\"] = tkn.Token\n\n\t// Set microtime value\n\targs[\"microtime\"] = fmt.Sprintf(\"%9.4f seconds\", time.Since(microtime).Seconds())\n\n\t// Render template and log error\n\tif err := t.Tmpl.ExecuteTemplate(w, name, args); err != nil {\n\t\tLogger.Logger.Error(err.Error())\n\t}\n}", "func (v *View) RenderSingle(w http.ResponseWriter) {\n\n\t// Get the template collection from cache\n\t/*mutex.RLock()\n\ttc, ok := templateCollection[v.Name]\n\tmutex.RUnlock()*/\n\n\t// Get the plugin collection\n\tmutexPlugins.RLock()\n\tpc := pluginCollection\n\tmutexPlugins.RUnlock()\n\n\ttemplateList := []string{v.Name}\n\n\t// List of template names\n\t/*templateList := make([]string, 0)\n\ttemplateList = append(templateList, rootTemplate)\n\ttemplateList = append(templateList, v.Name)\n\ttemplateList = append(templateList, childTemplates...)*/\n\n\t// Loop through each template and test the full path\n\tfor i, name := range templateList {\n\t\t// Get the absolute path of the root template\n\t\tpath, err := filepath.Abs(v.Folder + string(os.PathSeparator) + name + \".\" + v.Extension)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Template Path Error: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\ttemplateList[i] = path\n\t}\n\n\t// Determine if there is an error in the template syntax\n\ttemplates, err := template.New(v.Name).Funcs(pc).ParseFiles(templateList...)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template Parse Error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Cache the template collection\n\t/*mutex.Lock()\n\ttemplateCollection[v.Name] = templates\n\tmutex.Unlock()*/\n\n\t// Save the template collection\n\ttc := templates\n\n\t// Get session\n\tsess := session.Instance(v.request)\n\n\t// Get the flashes for the template\n\tif flashes := sess.Flashes(); len(flashes) > 0 {\n\t\tv.Vars[\"flashes\"] = make([]Flash, len(flashes))\n\t\tfor i, f := range flashes {\n\t\t\tswitch f.(type) {\n\t\t\tcase Flash:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = f.(Flash)\n\t\t\tdefault:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = Flash{f.(string), \"alert-box\"}\n\t\t\t}\n\n\t\t}\n\t\tsess.Save(v.request, w)\n\t}\n\n\t// Display the content to the screen\n\terr = tc.Funcs(pc).ExecuteTemplate(w, v.Name+\".\"+v.Extension, v.Vars)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template File Error: \"+err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (app *Application) Index(w http.ResponseWriter, r *http.Request) {\n\tdata := struct {\n\t\tTime int64\n\t}{\n\t\tTime: time.Now().Unix(),\n\t}\n\n\tt, err := template.ParseFiles(\"views/index.tpl\")\n\n\tif err != nil {\n\t\tlog.Println(\"Template.Parse:\", err)\n\t\thttp.Error(w, \"Internal Server Error 0x0178\", http.StatusInternalServerError)\n\t}\n\n\tif err := t.Execute(w, data); err != nil {\n\t\tlog.Println(\"Template.Execute:\", err)\n\t\thttp.Error(w, \"Internal Server Error 0x0183\", http.StatusInternalServerError)\n\t}\n}", "func Execute(wr io.Writer, data *PageInfo) (err error) {\n\tif len(data.Name) < 1 {\n\t\terr = errors.New(\"PageInfo template name not specified!\")\n\t\treturn\n\t}\n\n\tdata.ModuleName = make(map[string]bool, 1)\n\tdata.ModuleName[moduleName] = true\n\n\tlog.Println(\"ModuleName:\", moduleName, \"Map:\", data.ModuleName)\n\n\tprefix := \"tmpl/desktop/\"\n\tif WouldUseMobile(data.Request) {\n\t\tprefix = \"tmpl/mobile/\"\n\t}\n\n\tstartTime := time.Now()\n\ttemplate, err := GetTemplate(prefix + data.Name)\n\tendTime := time.Now()\n\n\tdeltaTime := endTime.Sub(startTime)\n\tfmt.Println(\"Took\", float32(deltaTime)/(1000.0*1000.0*1000.0), \"seconds to parse template\", data.Name)\n\n\tif err != nil {\n\t\tlog.Println(\"Error parsing template '\", data.Name, \"':\", err)\n\t\treturn err\n\t}\n\n\tstartTime = time.Now()\n\n\ttemplate.Render(wr, data)\n\n\tendTime = time.Now()\n\tdeltaTime = endTime.Sub(startTime)\n\tlog.Println(\"Took\", float32(deltaTime)/(1000.0*1000.0*1000.0), \"seconds to render template\", data.Name)\n\treturn\n}", "func IndexHandler(w http.ResponseWriter, r *http.Request) {\n\tpage := Page{Title: Environ.Config.Title, Logo: Environ.Config.Logo}\n\n\t// Set the document root based on the type of interface that is to be served\n\tvar path []string\n\tif Environ.Config.Interface == InterfaceTypeAdmin {\n\t\tpath = []string{Environ.Config.DocRootAdmin, indexTemplate}\n\t} else {\n\t\tpath = []string{Environ.Config.DocRootUser, indexTemplate}\n\t}\n\n\tt, err := template.ParseFiles(strings.Join(path, \"\"))\n\tif err != nil {\n\t\tlog.Printf(\"Error loading the application template: %v\\n\", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terr = t.Execute(w, page)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func load(filenames ...string) *template.Template {\n\treturn template.Must(template.ParseFiles(joinTemplateDir(filenames...)...)).Lookup(\"root\")\n}", "func handler(w http.ResponseWriter, r *http.Request) {\n\ttmpl := template.Must(template.ParseFiles(\"index.html\"))\n\tdata := page\n\ttmpl.Execute(w, data)\n}", "func ExecuteTemplate(wr io.Writer, name string, data interface{}) error {\n\treturn ExecuteViewPathTemplate(wr, name, BConfig.WebConfig.ViewsPath, data)\n}", "func (g *Generator) prepare(gen *FileMapGenerate) (*template.Template, error) {\n\t// Preload the function map (or else the functions will fail when\n\t// called due to a lack of valid context).\n\tvar (\n\t\tt = template.New(\"\").Funcs(Preload)\n\t\terr error\n\t)\n\n\t// Parse the included template files.\n\tfor _, inc := range gen.Include {\n\t\t_, err := g.loadTemplate(t, inc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Parse the template file to execute.\n\ttmpl, err := g.loadTemplate(t, gen.Template)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, name := path.Split(gen.Template)\n\ttmpl = tmpl.Lookup(name)\n\treturn tmpl, nil\n}", "func renderPage(ctx *Context, path, tpl string, data interface{}) error {\n\tif err := filesystem.CreateDir(path, true); err != nil {\n\t\treturn err\n\t}\n\n\thandle, err := filesystem.CreateFile(filepath.Join(path, indexFile))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttplPath := filepath.Join(ctx.TemplateDir, tpl)\n\treturn template.Render(tplPath, data, handle)\n}", "func LoadTemplate(category, file, builtin string) (string, error) {\n\tdir, err := GetTemplateDir(category)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfile = filepath.Join(dir, file)\n\tif !FileExists(file) {\n\t\treturn builtin, nil\n\t}\n\n\tcontent, err := os.ReadFile(file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(content), nil\n}", "func (site *Site) loadTemplates(templates fs.FS) error {\n\ttempl, err := template.New(\"site.tmpl\").ParseFS(templates, \"*.tmpl\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse failed: %w\", err)\n\t}\n\n\tsite.Template = templ\n\tsite.BaseTemplate, _ = templ.Clone()\n\n\treturn nil\n}", "func TestIndex_badtemplate(t *testing.T) {\n\ttemplateString := \"{{ .ValueNotPresent }}\"\n\ttestTempl := template.Must(template.New(\"test\").Parse(templateString))\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tlogger := log.New(ioutil.Discard, \"\", 0)\n\tts := httptest.NewServer(Index(logger, \"testdata\", done, testTempl))\n\tdefer ts.Close()\n\n\tres, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tassert.Equal(t, 500, res.StatusCode, \"got wrong response\")\n}", "func Template(templatePath string) Result {\n\tconfig := config.GetLoadedConfig()\n\tfullPath := filepath.Join(config.GetTemplatePath(), templatePath)\n\n\tif f, err := os.Open(fullPath); err != nil {\n\t\tlog.Printf(\"could not open template file %s\\n\", fullPath)\n\t} else {\n\t\tif bytes, err := io.ReadAll(f); err != nil {\n\t\t\tlog.Printf(\"could not read template file %s\\n\", fullPath)\n\t\t} else {\n\t\t\treturn StringResult(bytes)\n\t\t}\n\t}\n\n\treturn StringResult(\"\")\n}", "func (v *VTemplates) Load(name string, ext string, fileList, delims []string) (*template.Template, error) {\n\tif len(fileList) == 0 {\n\t\treturn nil, fmt.Errorf(\"Empty File Lists\")\n\t}\n\n\tvar tl *template.Template\n\tvar ok bool\n\n\tv.rw.RLock()\n\ttl, ok = v.loaded[name]\n\tv.rw.RUnlock()\n\n\tif ok {\n\t\tif !v.Debug {\n\t\t\treturn tl, nil\n\t\t}\n\t}\n\n\tvar tree = template.New(name)\n\n\t//check if the delimiter array has content if so,set them\n\tif len(delims) > 0 && len(delims) >= 2 {\n\t\ttree.Delims(delims[0], delims[1])\n\t}\n\n\tfor _, fp := range fileList {\n\t\t//is it a file ? if no error then use it else try a directory\n\t\tvf, err := v.VDir.GetFile(fp)\n\n\t\tif err == nil {\n\t\t\t_, err = LoadVirtualTemplateFile(vf, tree)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t} else {\n\t\t\tvd, err := v.VDir.GetDir(fp)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\terr = LoadVirtualTemplateDir(tree, vd, name, ext)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tv.rw.Lock()\n\tv.loaded[name] = tree\n\tv.rw.Unlock()\n\n\treturn tree, nil\n}", "func loadTemplates(path string) (*pongo2.TemplateSet, error) {\n\tloader, err := pongo2.NewLocalFileSystemLoader(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load templates: %s\", err.Error())\n\t}\n\n\ttset := pongo2.NewSet(\"web\", loader)\n\n\treturn tset, nil\n}", "func (h *Handler) execTemplate(name string, writer io.Writer, context Context) {\n\tif h.templateStore == nil {\n\t\tpanic(\"No template store associated with handler\")\n\t}\n\n\terr := h.templateStore.ExecuteTemplate(writer, name, context)\n\tif err != nil {\n\t\tpanic(\"Template \" + name + \" could not be executed\")\n\t}\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\n err := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n}", "func loadTemplate(source string, sourceData interface{}) []byte {\n\tvar byteBuff = new(bytes.Buffer)\n\tdefer byteBuff.Reset()\n\n\ttmpl, err := tpl.Funcs(formatTemplate()).Parse(source)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := tmpl.Execute(byteBuff, sourceData); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn byteBuff.Bytes()\n}", "func renderTemplate(c context.Context, name string, partial bool, data *templateData) error {\n\tif name == \"/\" || name == \"\" {\n\t\tname = \"home\"\n\t}\n\n\tvar layout string\n\tif partial {\n\t\tlayout = \"layout_partial.html\"\n\t} else {\n\t\tlayout = \"layout_full.html\"\n\t}\n\n\tt, err := template.New(layout).Delims(\"{%\", \"%}\").Funcs(tmplFunc).ParseFiles(\n\t\tfilepath.Join(rootDir, \"templates\", layout),\n\t\tfilepath.Join(rootDir, \"templates\", name+\".html\"),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := pageMeta(t)\n\tif data == nil {\n\t\tdata = &templateData{}\n\t}\n\tif data.Env == \"\" {\n\t\tdata.Env = env(c)\n\t}\n\tdata.Meta = m\n\tdata.Title = pageTitle(m)\n\tdata.Slug = name\n\tif data.OgImage == \"\" {\n\t\tdata.OgImage = ogImageDefault\n\t}\n\treturn t.Execute(writer(c), data)\n}" ]
[ "0.6890042", "0.6643959", "0.624839", "0.62140644", "0.61590606", "0.60328317", "0.6032525", "0.5931923", "0.58915156", "0.58405447", "0.583602", "0.57904", "0.5784774", "0.57673085", "0.5760238", "0.5710334", "0.5703832", "0.5703815", "0.56856227", "0.5679397", "0.56789005", "0.5660735", "0.56435287", "0.5611295", "0.5608623", "0.5584214", "0.5570902", "0.5569693", "0.5564789", "0.5531131", "0.5491807", "0.5483509", "0.54802656", "0.5476608", "0.5476076", "0.5469801", "0.5461199", "0.54347056", "0.54323775", "0.53857195", "0.5359716", "0.5336365", "0.5306766", "0.5306766", "0.52986056", "0.52910525", "0.5285428", "0.52850103", "0.52820176", "0.5264877", "0.5253153", "0.52288175", "0.5215371", "0.5204053", "0.519612", "0.51842916", "0.51830715", "0.51797605", "0.517014", "0.51678246", "0.5141299", "0.51399034", "0.5138882", "0.5135424", "0.5135129", "0.51312584", "0.5131042", "0.51153934", "0.5111471", "0.5106727", "0.50953335", "0.5090522", "0.5089649", "0.5088792", "0.5087663", "0.5087595", "0.5086329", "0.50789875", "0.50727487", "0.5070892", "0.5064756", "0.5064212", "0.50614953", "0.5055282", "0.50549793", "0.5044905", "0.50392663", "0.5034697", "0.50226533", "0.50206417", "0.4998795", "0.49869615", "0.49831566", "0.49792454", "0.49727398", "0.4956126", "0.49469787", "0.49376747", "0.49375144", "0.49275348" ]
0.63263446
2
New returns a new PagerDuty notifier.
func New(c *config.PagerdutyConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) { client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "pagerduty", httpOpts...) if err != nil { return nil, err } n := &Notifier{conf: c, tmpl: t, logger: l, client: client} if c.ServiceKey != "" || c.ServiceKeyFile != "" { n.apiV1 = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" // Retrying can solve the issue on 403 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/trigger-events n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusForbidden}, CustomDetailsFunc: errDetails} } else { // Retrying can solve the issue on 429 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/events-api-v2#api-response-codes--retry-logic n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusTooManyRequests}, CustomDetailsFunc: errDetails} } return n, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New(c *config.DingTalkConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, \"dingtalk\", false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}", "func New(done <-chan bool) *Notifier {\n\tnotifier := Notifier{\n\t\tnotificationMessages: make(chan string),\n\t\tobservers: make(map[chan *model.Notification]bool),\n\t\tdone: done,\n\t}\n\n\tgo notifier.dispatch()\n\n\treturn &notifier\n}", "func New(conf *config.YachConfig, t *template.Template) (*Notifier, error) {\n\tclient, err := commoncfg.NewClientFromConfig(*conf.HTTPConfig, \"yach\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{\n\t\tconf: conf,\n\t\ttmpl: t,\n\t\tclient: client,\n\t\tlogger: logging.DefaultLogger.WithField(\"notify\", \"yach\"),\n\t\tretrier: &notify.Retrier{},\n\t}, nil\n}", "func New(config *model.NotifMail, meta model.Meta) notifier.Notifier {\n\treturn notifier.Notifier{\n\t\tHandler: &Client{\n\t\t\tcfg: config,\n\t\t\tmeta: meta,\n\t\t},\n\t}\n}", "func New(config *model.NotifAmqp, app model.App) notifier.Notifier {\n\treturn notifier.Notifier{\n\t\tHandler: &Client{\n\t\t\tcfg: config,\n\t\t\tapp: app,\n\t\t},\n\t}\n}", "func NewNotifier(cfg Config) (forward.Notifier, error) {\n\terr := cfg.defaults()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", err, internalerrors.ErrInvalidConfiguration)\n\t}\n\n\treturn &notifier{\n\t\tcfg: cfg,\n\t\ttplRenderer: cfg.TemplateRenderer,\n\t\tclient: cfg.Client,\n\t\tlogger: cfg.Logger.WithValues(log.KV{\"notifier\": \"telegram\"}),\n\t}, nil\n}", "func NewNotifier(slack *chat.Slack) (*Notifier, error) {\n\tnotifier := &Notifier{s: slack, db: slack.DB, conf: slack.Conf}\n\treturn notifier, nil\n}", "func NewNotifier(configs []*pb.NotificationConfig, amURL string) *notifier {\n\tnotifier := &notifier{\n\t\tpendingNotifications: make(chan *notificationReq, *notificationBufferSize),\n\t\talertmanagerURL: amURL,\n\t}\n\tnotifier.SetNotificationConfigs(configs)\n\treturn notifier\n}", "func New(d *dut.DUT) *Reporter {\n\treturn &Reporter{d}\n}", "func NewNotifier(config *config.Config) Notifier {\n\t// webhook URL and template are required\n\tif len(config.WebHookURL) > 0 && len(config.WebHookTemplate) > 0 {\n\t\treturn &baseNotifier{config}\n\t}\n\t// otherwise return noop\n\treturn &noopNotifier{baseNotifier{config}}\n}", "func New(url string) *SlackNotify {\n\treturn &SlackNotify{\n\t\tURL: url,\n\t\tc: http.Client{\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t}\n}", "func New(cfg Config) (*Notifier, error) {\n\tparsedProjectID, err := strconv.ParseInt(cfg.ProjectID, 10, 64)\n\tif err != nil {\n\t\treturn nil, ex.New(err)\n\t}\n\t// create a new reporter\n\tclient := gobrake.NewNotifierWithOptions(&gobrake.NotifierOptions{\n\t\tProjectId: parsedProjectID,\n\t\tProjectKey: cfg.ProjectKey,\n\t\tEnvironment: cfg.Environment,\n\t})\n\n\t// filter airbrakes from `dev`, `ci`, and `test`.\n\tclient.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice {\n\t\tif noticeEnv := notice.Context[\"environment\"]; noticeEnv == env.ServiceEnvDev ||\n\t\t\tnoticeEnv == env.ServiceEnvCI ||\n\t\t\tnoticeEnv == env.ServiceEnvTest {\n\t\t\treturn nil\n\t\t}\n\t\treturn notice\n\t})\n\n\treturn &Notifier{\n\t\tClient: client,\n\t}, nil\n}", "func NewNotifier() *Notifier {\n\tnotifier := &Notifier{\n\t\tnotifierMap: new(sync.Map),\n\t\treceiveCh: make(chan Message, 65536),\n\t}\n\treturn notifier\n}", "func New(name, summary, body, icon string, timeout time.Duration, urgency NotificationUrgency) *Notification {\n\treturn &Notification{name, summary, body, icon, timeout, urgency}\n}", "func NewNotifier(c *cobra.Command) *Notifier {\n\tn := &Notifier{}\n\n\tf := c.PersistentFlags()\n\n\tlevel, _ := f.GetString(\"notifications-level\")\n\tlogLevel, err := log.ParseLevel(level)\n\tif err != nil {\n\t\tlog.Fatalf(\"Notifications invalid log level: %s\", err.Error())\n\t}\n\n\tacceptedLogLevels := slackrus.LevelThreshold(logLevel)\n\t// slackrus does not allow log level TRACE, even though it's an accepted log level for logrus\n\tif len(acceptedLogLevels) == 0 {\n\t\tlog.Fatalf(\"Unsupported notification log level provided: %s\", level)\n\t}\n\n\t// Parse types and create notifiers.\n\ttypes, err := f.GetStringSlice(\"notifications\")\n\tif err != nil {\n\t\tlog.WithField(\"could not read notifications argument\", log.Fields{\"Error\": err}).Fatal()\n\t}\n\n\tn.types = n.getNotificationTypes(c, acceptedLogLevels, types)\n\n\treturn n\n}", "func NewNotifier(token string) *Notifier {\n\treturn &Notifier{\n\t\tToken: token,\n\t\tClient: nil,\n\t}\n}", "func New(c *config.AliyunSmsConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := dysmsapi.NewClientWithAccessKey(\"cn-hangzhou\", c.AccessKeyId, c.AccessSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}", "func New(patterns []string) (*Notify, error) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlogger.ErrorObject(err)\n\t\treturn nil, err\n\t}\n\n\twatchDirs := findDirs(patterns)\n\n\tfor _, t := range watchDirs {\n\t\terr = watcher.Add(t)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"%s: %v\", t, err)\n\t\t} else {\n\t\t\tlogger.Info(\"gazing at: %s\", t)\n\t\t}\n\t}\n\n\tnotify := &Notify{\n\t\tEvents: make(chan Event),\n\t\twatcher: watcher,\n\t\tisClosed: false,\n\t\ttimes: make(map[string]int64),\n\t\tpendingPeriod: 100,\n\t\tregardRenameAsModPeriod: 1000,\n\t\tdetectCreate: false,\n\t}\n\n\tgo notify.wait()\n\n\treturn notify, nil\n}", "func NewNotifier(c config.Config, chat chat.Chat) (*Notifier, error) {\n\tconn, err := storage.NewMySQL(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnotifier := &Notifier{Chat: chat, DB: conn, Config: c}\n\treturn notifier, nil\n}", "func NewNotifier() WakeSleepNotifier {\n\treturn new(notifier)\n}", "func NewNotifier(site database.Site, message string, subject string,\n\tsendEmail EmailSender, sendSms SmsSender) *Notifier {\n\tn := Notifier{Site: site, Message: message, Subject: subject,\n\t\tSendEmail: sendEmail, SendSms: sendSms}\n\treturn &n\n}", "func New(dependencies Dependencies) {\n\twriter = dependencies.Writer\n\treader = dependencies.Reader\n\thost = dependencies.Host\n\tnotifierService = dependencies.NotifierService\n}", "func New(cfg config.Queue, n notifier) *Queue {\n\tq := &Queue{\n\t\taddCh: make(chan struct{}, cfg.QueueSize),\n\t\tpopCh: make(chan struct{}, cfg.GoRoutinesSize),\n\t\taddMessage: make(chan entity.NotifierMessage, 1),\n\t\tpopMessage: make(chan entity.NotifierMessage, 1),\n\t\tnotifier: n,\n\t}\n\n\tgo q.pop()\n\tgo q.add()\n\n\treturn q\n}", "func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}", "func newNotifier() (*notifier, error) {\n\tepfd, err := unix.EpollCreate1(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw := &notifier{\n\t\tepFD: epfd,\n\t\tfdMap: make(map[int32]*fdInfo),\n\t}\n\n\tgo w.waitAndNotify() // S/R-SAFE: no waiter exists during save / load.\n\n\treturn w, nil\n}", "func NewNotifier(ec *EngineConfig) (*Notifier, error) {\n\toutgoing := make(chan Notification, 1)\n\tn := &Notifier{\n\t\tC: outgoing,\n\t\toutgoing: outgoing,\n\t\treload: make(chan bool, 1),\n\t\tshutdown: make(chan bool, 1),\n\t\tengineCfg: ec,\n\t\tsource: SourceServer,\n\t}\n\n\tnote, err := n.bootstrap()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnote.Cluster.Vservers = rateLimitVS(note.Cluster.Vservers, nil)\n\n\tn.outgoing <- *note\n\tn.last = note\n\n\t// If the on disk configuration is different, update it.\n\tif note.Source != SourceDisk {\n\t\tdNote, _ := n.pullConfig(SourceDisk)\n\t\tif dNote == nil || !dNote.Cluster.Equal(note.Cluster) {\n\t\t\tif err := saveConfig(note.protobuf, n.engineCfg.ClusterFile, true); err != nil {\n\t\t\t\tlog.Warningf(\"Failed to save config to %s: %v\", n.engineCfg.ClusterFile, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tgo n.run()\n\treturn n, nil\n}", "func New(notifier *bugsnag.Notifier) *NegroniBugsnag {\n\treturn &NegroniBugsnag{\n\t\tnotifier: notifier,\n\t}\n}", "func MustNew(cfg Config) *Notifier {\n\tnotifier, err := New(cfg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn notifier\n}", "func New(reporter metrics.Reporter) *Wrapper {\n\treturn &Wrapper{\n\t\treporter: reporter,\n\t}\n}", "func New(pushURL, owner string) {\n\tSave(pushURL, config.Tart{\n\t\tName: pushURL,\n\t\tPushURL: pushURL,\n\t\tIsRunning: false,\n\t\tOwners: []string{owner},\n\t\tPID: -1,\n\t\tRestartDelaySecs: 30,\n\t\tRestartOnStop: false,\n\t\tLogStdout: true,\n\t})\n}", "func New() *DelayCaller {\n\tvar c DelayCaller\n\tc.p.init(runtime.NumCPU())\n\tc.queue = nil\n\tc.queueIn = make(chan call)\n\tgo c.runner()\n\treturn &c\n}", "func New(reporter services.UsageReporter, log logrus.FieldLogger, inner apievents.Emitter) (*UsageLogger, error) {\n\tif log == nil {\n\t\tlog = logrus.StandardLogger()\n\t}\n\n\treturn &UsageLogger{\n\t\tEntry: log.WithField(\n\t\t\ttrace.Component,\n\t\t\tteleport.Component(teleport.ComponentUsageReporting),\n\t\t),\n\t\treporter: reporter,\n\t\tinner: inner,\n\t}, nil\n}", "func New(label, message string) *Badge {\n\treturn &Badge{\n\t\tVersion: 1,\n\t\tLabel: label,\n\t\tMessage: message,\n\t}\n}", "func New() Email {\n\treturn Email{}\n}", "func NewPagerDuty(apiKey string) *PagerDuty {\n\treturn &PagerDuty{client: pagerduty.NewClient(apiKey)}\n}", "func New(dnsList, domainList []string, checkInterval time.Duration) *Checker {\n\tc := &Checker{}\n\tif len(dnsList) == 0 {\n\t\tc.DNSList = DefaultDNSList\n\t} else {\n\t\tc.DNSList = dnsList\n\t}\n\n\tif len(domainList) == 0 {\n\t\tc.DomainList = DefaultDomainList\n\t} else {\n\t\tc.DomainList = domainList\n\t}\n\n\tif checkInterval == 0 {\n\t\tc.CheckInterval = DefaultCheckInterval\n\t} else {\n\t\tc.CheckInterval = checkInterval\n\t}\n\n\tgo c.Monitor()\n\treturn c\n}", "func New(configfile string) *Poloniex {\r\n\treturn NewWithConfig(configfile)\r\n}", "func New(id uint, observerChannel chan common.ObserverMessage, resultChannel chan common.RoundResult) *Diner {\n\treturn &Diner{\n\t\tnil,\n\t\tmake(chan bool, 1),\n\t\tnil,\n\t\tobserverChannel,\n\t\tid,\n\t\tresultChannel}\n}", "func NewPinger(opts *Options) Pinger {\n\topts.setDefaults()\n\treturn &pinger{\n\t\tid: rand.Intn(maxID),\n\t\topts: opts,\n\t\treportChan: make(chan Ping), // TODO: use buffer?\n\t\terrChan: make(chan error, 1),\n\t\tstop: make(chan struct{}, 1),\n\t\tstats: &Stats{},\n\t\tclock: defaultClock{},\n\t}\n}", "func New(wsPath string, webhookURLs []string) *WebNotifier {\n\twebhook := NewHTTPNotifier(webhookURLs)\n\tws := NewWSNotifier(wsPath)\n\n\tn := WebNotifier{\n\t\tnotifiers: []command.Notifier{webhook, ws},\n\t\thandlers: ws.GetRESTHandlers(),\n\t}\n\n\treturn &n\n}", "func New() *Email {\r\n\treturn &Email{\r\n\t\tCreated: time.Now(),\r\n\t\tUpdated: time.Now(),\r\n\t}\r\n}", "func New(apiKey, apiSecret string) *Poloniex {\n\tclient := NewClient(apiKey, apiSecret)\n\treturn &Poloniex{client}\n}", "func New() *PubSub {\n\treturn &PubSub{\n\t\tMaxSubs: 20,\n\t\tregistry: make(map[string]*topic),\n\t}\n}", "func New(host string) *PubSub {\n\tps := PubSub{\n\t\thost: host,\n\t}\n\n\t// PRETEND THERE IS A SPECIFIC IMPLEMENTATION.\n\n\treturn &ps\n}", "func New(logger log.Logger) *Prober {\n\treturn &Prober{logger: logger}\n}", "func New(cfg *Config) *Tailer {\n\tif cfg.Log == nil {\n\t\tcfg.Log = &log.Logger{Out: ioutil.Discard}\n\t}\n\n\treturn &Tailer{\n\t\tcfg: cfg,\n\t\tstripeAuthClient: stripeauth.NewClient(cfg.Key, &stripeauth.Config{\n\t\t\tLog: cfg.Log,\n\t\t\tAPIBaseURL: cfg.APIBaseURL,\n\t\t}),\n\t\tinterruptCh: make(chan os.Signal, 1),\n\t}\n}", "func NewNotifiee(code uint32, messageCh chan Message) *Notifiee {\n\treturn &Notifiee{code: code, messageCh: messageCh}\n}", "func New(counter metrics.Counter, latency metrics.Histogram, logger log.Logger) Logger {\n\treturn Logger{\n\t\tcallUpdate: make(chan interface{}),\n\t\tcallError: make(chan error),\n\t\trequestCount: counter,\n\t\trequestLatency: latency,\n\t\tlogger: logger,\n\t}\n}", "func NewNotifier(v *viper.Viper) (Notifier, error) {\n\treturn NewSNSServer(v)\n}", "func New(params Params) (*presignerT, error) {\n\treturn newPresigner(params)\n}", "func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {\n\tc := config.DefaultConfig\n\tif err := cfg.Unpack(&c); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error reading config file: %v\", err)\n\t}\n\n\tbt := &Polutbeat{\n\t\tdone: make(chan struct{}),\n\t\tconfig: c,\n\t}\n\treturn bt, nil\n}", "func New() *Updater {\n\treturn &Updater{}\n}", "func New(notifier *gobrake.Notifier) *Handler {\n\th := Handler{notifier}\n\treturn &h\n}", "func NewNotification(title, message string) Notification {\n\tif title == \"\" {\n\t\ttitle = \"notification\"\n\t}\n\treturn Notification{\n\t\tTitle: title,\n\t\tMessage: message,\n\t}\n}", "func NewNotificator(st storage.Storage, settings *storage.Settings) (*Notificator, error) {\n\tns, err := NewNotificationStorage(st)\n\tconnectionCreator := &notificationConnectionCreatorImpl{\n\t\tstorageURI: settings.URI,\n\t\tminReconnectInterval: settings.Notification.MinReconnectInterval,\n\t\tmaxReconnectInterval: settings.Notification.MaxReconnectInterval,\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Notificator{\n\t\tqueueSize: settings.Notification.QueuesSize,\n\t\tconnectionMutex: &sync.Mutex{},\n\t\tconsumersMutex: &sync.Mutex{},\n\t\tconsumers: make(consumers),\n\t\tstorage: ns,\n\t\tconnectionCreator: connectionCreator,\n\t\tlastKnownRevision: invalidRevisionNumber,\n\t}, nil\n}", "func NewNotification(userID, title, subtitle, urlLink, body string) (notification Notification) {\n\tid := uuid.NewV4().String()\n\n\treturn Notification{\n\t\tID: id,\n\t\tUserID: userID,\n\t\tTitle: title,\n\t\tSubtitle: subtitle,\n\t\tURLLink: urlLink,\n\t\tBody: body,\n\t}\n}", "func New(numNodes int, outgoing chan packet.Message, timeout time.Duration, numRetries int) *Repeater {\n\tr := Repeater{\n\t\toutgoing: outgoing,\n\t\ttimeout: timeout,\n\t\tnumRetries: numRetries,\n\t\tlock: sync.Mutex{},\n\t\tunackedReqs: make(map[int]map[int]map[packet.Messagetype]bool),\n\t}\n\n\tfor i := 0; i < numNodes; i++ {\n\t\tr.unackedReqs[i] = make(map[int]map[packet.Messagetype]bool)\n\t}\n\n\treturn &r\n}", "func New(config *rpcclient.ConnConfig, chainParams *chaincfg.Params,\n\tspendHintCache chainntnfs.SpendHintCache,\n\tconfirmHintCache chainntnfs.ConfirmHintCache) (*DcrdNotifier, error) {\n\n\tnotifier := &DcrdNotifier{\n\t\tchainParams: chainParams,\n\n\t\tnotificationCancels: make(chan interface{}),\n\t\tnotificationRegistry: make(chan interface{}),\n\n\t\tblockEpochClients: make(map[uint64]*blockEpochRegistration),\n\n\t\tchainUpdates: queue.NewConcurrentQueue(10),\n\n\t\tspendHintCache: spendHintCache,\n\t\tconfirmHintCache: confirmHintCache,\n\n\t\tquit: make(chan struct{}),\n\t}\n\n\tntfnCallbacks := &rpcclient.NotificationHandlers{\n\t\tOnBlockConnected: notifier.onBlockConnected,\n\t\tOnBlockDisconnected: notifier.onBlockDisconnected,\n\t}\n\n\t// Disable connecting to dcrd within the rpcclient.New method. We defer\n\t// establishing the connection to our .Start() method.\n\tconfig.DisableConnectOnNew = true\n\tconfig.DisableAutoReconnect = false\n\tchainConn, err := rpcclient.New(config, ntfnCallbacks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnotifier.chainConn = chainConn\n\tnotifier.cca = &chainConnAdaptor{c: chainConn, ctx: context.TODO()}\n\n\treturn notifier, nil\n}", "func NewNotification(Type string, Body string) *Notification {\n\treturn &Notification{\"notification\", Type, Body}\n}", "func New() *PerfcPublisher {\n\treturn &PerfcPublisher{}\n}", "func New() (*T) {\n\n\tme := T{\n\t\tcount: 0,\n\t\tdatum: \"\",\n\t}\n\n\treturn &me\n}", "func newNotification(format NotificationFormat, payload []byte) (*Notification, error) {\n\tif !format.IsValid() {\n\t\treturn nil, fmt.Errorf(\"unknown format '%s'\", format)\n\t}\n\n\treturn &Notification{format, payload}, nil\n}", "func New() *Beeper { return &Beeper{} }", "func New(conf Config, client *http.Client) (*Deliverer, error) {\n\tvar c Config\n\tvar err error\n\tif c, err = conf.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\treturn &Deliverer{\n\t\tconf: c,\n\t\tc: client,\n\t}, nil\n}", "func setUpNotifier(t *testing.T, h *rpctest.Harness) *DcrdNotifier {\n\thintCache := initHintCache(t)\n\n\trpcConfig := h.RPCConfig()\n\tnotifier, err := New(&rpcConfig, netParams, hintCache, hintCache)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create notifier: %v\", err)\n\t}\n\tif err := notifier.Start(); err != nil {\n\t\tt.Fatalf(\"unable to start notifier: %v\", err)\n\t}\n\n\treturn notifier\n}", "func NewNotification(c NotificationConfig) Elem {\n\treturn driver.NewNotification(c)\n}", "func New(opts ...metrics.Option) *Reporter {\n\treturn &Reporter{\n\t\toptions: metrics.NewOptions(opts...),\n\t}\n}", "func NewMockNotifier(ctrl *gomock.Controller) *MockNotifier {\n\tmock := &MockNotifier{ctrl: ctrl}\n\tmock.recorder = &MockNotifierMockRecorder{mock}\n\treturn mock\n}", "func NewMockNotifier(ctrl *gomock.Controller) *MockNotifier {\n\tmock := &MockNotifier{ctrl: ctrl}\n\tmock.recorder = &MockNotifierMockRecorder{mock}\n\treturn mock\n}", "func NewMockNotifier(ctrl *gomock.Controller) *MockNotifier {\n\tmock := &MockNotifier{ctrl: ctrl}\n\tmock.recorder = &MockNotifierMockRecorder{mock}\n\treturn mock\n}", "func New(config *Config) (*Reporter, error) {\n\tvar (\n\t\treporter Reporter\n\t\terr error\n\t)\n\n\tif config == nil {\n\t\treturn nil, nil\n\t}\n\n\tif err := config.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treporter.config = config\n\n\treporter.D = debug.New(\"reporter/errors\")\n\tif config.Debug {\n\t\treporter.D.On()\n\t}\n\n\treporter.Debug(\"initializing Sentry client\")\n\n\tsentryOpts := sentry.ClientOptions{\n\t\tDsn: config.DSN,\n\t\tAttachStacktrace: true,\n\t}\n\n\tif config.Wait {\n\t\tsentryOpts.Transport = &sentry.HTTPSyncTransport{Timeout: sentryFlushTimeout}\n\t}\n\n\tif config.Debug {\n\t\tsentryOpts.Debug = true\n\t\tsentryOpts.DebugWriter = &sentryDebugWriter{d: reporter.D}\n\t}\n\n\treporter.sentry, err = sentry.NewClient(sentryOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &reporter, nil\n}", "func New(\n\tspyInterval, publishInterval time.Duration,\n\tpublisher ReportPublisher,\n\tticksPerFullReport int,\n\tnoControls bool,\n) *Probe {\n\tresult := &Probe{\n\t\tspyInterval: spyInterval,\n\t\tpublishInterval: publishInterval,\n\t\tpublisher: publisher,\n\t\trateLimiter: rate.NewLimiter(rate.Every(publishInterval/100), 1),\n\t\tticksPerFullReport: ticksPerFullReport,\n\t\tnoControls: noControls,\n\t\tquit: make(chan struct{}),\n\t\tspiedReports: make(chan report.Report, spiedReportBufferSize),\n\t\tshortcutReports: make(chan report.Report, shortcutReportBufferSize),\n\t}\n\treturn result\n}", "func NewPinger(p *lt.Port, target uint8) *Pinger {\n\treturn &Pinger{\n\t\tp: p,\n\t\taddress: target,\n\t}\n}", "func New(cfg *worker.Config, taskid string, devices []int) *Worker {\n\treturn &Worker{\n\t\ttaskid: taskid,\n\t\tdevices: devices,\n\t\tcfg: cfg,\n\t}\n}", "func NewPinger(addr, network, protocol string, id int) (*Pinger, error) {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tp := &Pinger{\n\t\tRecordRtts: true,\n\t\tSize: timeSliceLength,\n\t\tTracker: r.Int63n(math.MaxInt64),\n\n\t\taddr: addr,\n\t\tdone: make(chan bool),\n\t\tid: id,\n\t\tipaddr: nil,\n\t\tipv4: false,\n\t\tnetwork: network,\n\t\tprotocol: protocol,\n\t}\n\treturn p, p.Resolve()\n}", "func NewNotifierService(c *Config) *NotifierService {\n\tclient := client.NewHTTP(\n\t\tc.Endpoint,\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\ttrue,\n\t\tnil,\n\t)\n\tclient.Init()\n\n\treturn &NotifierService{\n\t\tinTransactionDoer: c.InTransactionDoer,\n\t\tclient: client,\n\t\tlog: pkglog.NewLogger(\"vnc-api-notifier\"),\n\t}\n}", "func New(w io.Writer, template string) (*Logger, error) {\n\tformatters, isTimeRequired, err := compileFormat(template)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Create a dummy event to see how long the log line is with the provided\n\t// template.\n\tbuf := make([]byte, 0, 64)\n\tvar e event\n\tfor _, formatter := range formatters {\n\t\tformatter(&e, &buf)\n\t}\n\tmin := len(buf) + 64\n\tif min < 128 {\n\t\tmin = 128\n\t}\n\tparent := &base{\n\t\tc: min,\n\t\tformatters: formatters,\n\t\tisTimeRequired: isTimeRequired,\n\t\tw: w,\n\t}\n\treturn &Logger{parent: parent, level: Warning}, nil\n}", "func New(progressAction string, logger slog.Logger) *Logger {\n\treturn &Logger{\n\t\tlastLogTime: time.Now(),\n\t\tprogressAction: progressAction,\n\t\tsubsystemLogger: logger,\n\t}\n}", "func New(auth aws.Auth, region aws.Region, name string) *SQSNotify {\n\treturn &SQSNotify{\n\t\tauth: auth,\n\t\tregion: region,\n\t\tname: name,\n\t\tqueue: nil,\n\t\trunning: false,\n\t}\n}", "func NewReporter(webhook string, d time.Duration) *Reporter {\n\tc := make(chan string, 1)\n\tdone := make(chan bool)\n\tticker := time.NewTicker(d)\n\tr := &Reporter{c: c, done: done, ticker: ticker}\n\tgo runUpdater(webhook, c, done, ticker, &r.E)\n\treturn r\n}", "func Notification(title, message string) GNotifier {\n\tconfig := &Config{title, message, 5000, \"\"}\n\tn := &notifier{Config: config}\n\treturn n\n}", "func New(fireAfter time.Duration, fireFunc func() ()) *Ticker {\n\treturn &Ticker{\n\t\tlastRestart: time.Now(),\n\t\tfireAfter: fireAfter,\n\t\tfireFunc: fireFunc,\n\t\tactive: false,\n\t}\n}", "func New(owner string, repo string, event string, id int, data *string) (*Labeler, error) {\n\tif data == nil {\n\t\treturn nil, errors.New(\"a JSON string of event data is required\")\n\t}\n\treturn NewWithOptions(\n\t\tWithOwner(owner),\n\t\tWithRepo(repo),\n\t\tWithEvent(event),\n\t\tWithID(id),\n\t\tWithData(*data),\n\t\tWithContext(context.Background()),\n\t\tWithConfigPath(\".github/labeler.yml\"),\n\t)\n}", "func New(l log.Logger, taskInterval, taskDelay time.Duration) Timer {\n\treturn &timer{\n\t\twg: sync.WaitGroup{},\n\t\tl: l.WithModule(\"timer\"),\n\t\ttaskInterval: taskInterval,\n\t\ttaskDelay: taskDelay,\n\t}\n}", "func New(c Config) (*Prober, error) {\n\tpr := &Prober{\n\t\tcfg: c,\n\t\tclock: realClock{},\n\t\tmtu: mtuMax,\n\t\ttransitProbes: newTransitProbes(),\n\t\tmeasurements: measurement.NewDB(),\n\t\tstop: make(chan struct{}),\n\t\tpayload: make(gopacket.Payload, c.PayloadSizeBytes),\n\t}\n\n\treturn pr, nil\n}", "func New() *Mediator {\n\tconfig := cfg.New()\n\taddress := fmt.Sprintf(\"%s:%s\", config.RPCHost, config.RPCPort)\n\tpool := pools.NewResourcePool(func() (pools.Resource, error) {\n\t\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\t\tclient := pb.NewDailyBonusClient(conn)\n\t\treturn &ResourceConn{\n\t\t\tconn,\n\t\t\tclient,\n\t\t}, err\n\t}, config.RPCConnectionPool.InitialCapacity, config.RPCConnectionPool.MaxCapacity, config.RPCConnectionPool.IdleTimeout)\n\treturn &Mediator{\n\t\tclientPool: pool,\n\t\tconfig: &config,\n\t\tpoolMutex: &sync.Mutex{},\n\t}\n}", "func New(w io.Writer) *PLog {\n\tp := &PLog{\n\t\twriter: w,\n\t}\n\n\tdp := &defaultPrinter{now: time.Now}\n\tdp.setOutput(w)\n\tp.printer = dp\n\n\treturn p\n}", "func New(\n\tprotocolID string,\n\tbroadcastChannel net.BroadcastChannel,\n\tmembershipValidator *group.MembershipValidator,\n) *Announcer {\n\tbroadcastChannel.SetUnmarshaler(func() net.TaggedUnmarshaler {\n\t\treturn &announcementMessage{}\n\t})\n\n\treturn &Announcer{\n\t\tprotocolID: protocolID,\n\t\tbroadcastChannel: broadcastChannel,\n\t\tmembershipValidator: membershipValidator,\n\t}\n}", "func New() broker.Broker {\n\treturn &natsBroker{\n\t\tsubscriptionMap: make(map[string]*natsSubscriber),\n\t}\n}", "func NewTracker(timeout time.Duration) *Tracker {\n\treturn &Tracker{\n\t\tlast: time.Now(),\n\t\ttimeout: timeout,\n\t}\n}", "func NewPinger(ds Datastorer) Pinger {\n\treturn Pinger{ds}\n}", "func New(timeout time.Duration, timeoutFunc func()) *Heartbeat {\n\thb := &Heartbeat{\n\t\ttimeout: int64(timeout),\n\t\ttimer: time.AfterFunc(timeout, timeoutFunc),\n\t}\n\treturn hb\n}", "func New() Publisher {\n\treturn &publisher{\n\t\ttopics: make(map[string]*topic),\n\t}\n}", "func New() (*PolicyChecker, error) {\n\treturn NewWithConfig(Config{})\n}", "func NewDutyManager() *DutyManager {\n\treturn &DutyManager{\n\t\tFailures: make(map[string]error),\n\t}\n}", "func New(filename string) *Logger {\n\tl := lumberjack.Logger{\n\t\tFilename: filename,\n\t\tMaxSize: 500,\n\t\tMaxBackups: 3,\n\t\tMaxAge: 30,\n\t\tCompress: true,\n\t}\n\n\treturn &Logger{\n\t\tLogger: l,\n\t}\n}", "func NewInformer(workqueue workqueue.RateLimitingInterface) (Informer, error) {\n\ttOpts := []http.Option{cloudevents.WithBinaryEncoding()}\n\n\t// Make an http transport for the CloudEvents client.\n\tt, err := cloudevents.NewHTTPTransport(tOpts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating HTTP transport: %v\", err)\n\t}\n\n\t// Use the transport to make a new CloudEvents client.\n\tc, err := cloudevents.NewClient(t,\n\t\tcloudevents.WithUUIDs(),\n\t\tcloudevents.WithTimeNow(),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating CloudEvent client: %v\", err)\n\t}\n\n\treturn &informer{\n\t\tworkqueue: workqueue,\n\t\tclient: c,\n\t}, nil\n}", "func New(provider Provider) *Module {\n\tm := &Module{\n\t\tprovider: provider,\n\t\tscheduler: timing.NewScheduler(),\n\t}\n\n\tm.notifyFn, m.notifyCh = notifier.New()\n\tm.outputFunc.Set(func(info Info) bar.Output {\n\t\tif info.Updates == 1 {\n\t\t\treturn outputs.Text(\"1 update\")\n\t\t}\n\t\treturn outputs.Textf(\"%d updates\", info.Updates)\n\t})\n\n\tm.Every(time.Hour)\n\n\treturn m\n}", "func New(t testing.TB) lg.Log {\n\treturn NewWith(t, FactoryFn)\n}", "func NewPusher(g prometheus.Gatherer) *Pusher {\n\treturn &Pusher{\n\t\tURL: \"https://telemetry.influxdata.com/metrics/job/influxdb\",\n\t\tGather: &pr.Filter{\n\t\t\tGatherer: g,\n\t\t\tMatcher: telemetryMatcher,\n\t\t},\n\t\tClient: &http.Client{\n\t\t\tTransport: http.DefaultTransport,\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t\tPushFormat: expfmt.FmtText,\n\t}\n}" ]
[ "0.7220423", "0.7142861", "0.6967976", "0.68429434", "0.6688869", "0.6591405", "0.65628475", "0.65613985", "0.6546102", "0.6531209", "0.6517486", "0.6479263", "0.6462606", "0.64456606", "0.64222896", "0.6400352", "0.63652164", "0.6297083", "0.62604415", "0.6237658", "0.6207808", "0.6120669", "0.6021293", "0.60014665", "0.5964776", "0.5932267", "0.5877909", "0.585091", "0.5838484", "0.58356595", "0.5832543", "0.57886803", "0.5757485", "0.5756792", "0.5742825", "0.5726592", "0.5718845", "0.57083595", "0.56761247", "0.5647791", "0.5644625", "0.562607", "0.56080735", "0.55862504", "0.55794656", "0.5563436", "0.556205", "0.55570954", "0.55479115", "0.55475366", "0.55418116", "0.55318344", "0.5523775", "0.5513814", "0.551201", "0.5511859", "0.5509789", "0.5505789", "0.5498581", "0.54880685", "0.5484692", "0.5468045", "0.54561573", "0.54505193", "0.5449757", "0.5446721", "0.5444769", "0.5442755", "0.5442755", "0.5442755", "0.54379106", "0.5431306", "0.54274577", "0.5423401", "0.54212636", "0.54204047", "0.538821", "0.53812236", "0.5379022", "0.53789043", "0.53750265", "0.53599304", "0.5355527", "0.53552276", "0.53541315", "0.53483486", "0.53451526", "0.5344224", "0.5341943", "0.53323084", "0.5331325", "0.5314864", "0.53116214", "0.5311331", "0.53105783", "0.5308818", "0.53070015", "0.5303048", "0.53025985", "0.52994835" ]
0.77650785
0
Notify implements the Notifier interface.
func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { key, err := notify.ExtractGroupKey(ctx) if err != nil { return false, err } var ( alerts = types.Alerts(as...) data = notify.GetTemplateData(ctx, n.tmpl, as, n.logger) eventType = pagerDutyEventTrigger ) if alerts.Status() == model.AlertResolved { eventType = pagerDutyEventResolve } level.Debug(n.logger).Log("incident", key, "eventType", eventType) details := make(map[string]string, len(n.conf.Details)) for k, v := range n.conf.Details { detail, err := n.tmpl.ExecuteTextString(v, data) if err != nil { return false, errors.Wrapf(err, "%q: failed to template %q", k, v) } details[k] = detail } if n.apiV1 != "" { return n.notifyV1(ctx, eventType, key, data, details, as...) } return n.notifyV2(ctx, eventType, key, data, details, as...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r NopReporter) Notify(ctx context.Context, err error) {}", "func (notifier *Notifier) Notify(notification Notification) {\n\n}", "func (n *Notifier) Notify(err interface{}) error {\n\t_, sendErr := n.Client.SendNotice(NewNotice(err, nil))\n\treturn ex.New(sendErr)\n}", "func (n *IFTTTNotifier) Notify(msg string) error {\n\n\treq := &utility.HTTPRequest{\n\t\tURL: fmt.Sprintf(\"https://maker.ifttt.com/trigger/%s/with/key/%s\", EventName, n.Key),\n\t}\n\n\tresp, _, err := n.httpClient.DoRequest(utility.HTTPMethodGET, req, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\treturn errors.New(fmt.Sprintf(\"Unexpected status code %d in IFTTTNotifier\", resp.StatusCode))\n\t}\n\n\treturn nil\n}", "func (a *Animator) Notify() {\n\ta.mux.Lock()\n\tdefer a.mux.Unlock()\n\ta.NotifyCount++\n\tif !a.notifyFlagged {\n\t\ta.notifyFlagged = true\n\t\tclose(a.notifyChan)\n\t}\n}", "func (on *OpsGenieNotifier) Notify(evalContext *alerting.EvalContext) error {\n\tvar err error\n\tswitch evalContext.Rule.State {\n\tcase models.AlertStateOK:\n\t\tif on.AutoClose {\n\t\t\terr = on.closeAlert(evalContext)\n\t\t}\n\tcase models.AlertStateAlerting:\n\t\terr = on.createAlert(evalContext)\n\t}\n\treturn err\n}", "func (p *messagePredicate) Notify(f fact.Fact) {\n\tif p.ok {\n\t\treturn\n\t}\n\n\tif env, ok := p.tracker.Notify(f); ok {\n\t\tp.messageProduced(env)\n\t}\n}", "func (mp *MonitorPool) Notify(n *Notification) {\n\tif timeout, ok := mp.timeouts[n.Code]; ok {\n\t\tsession := mp.session(n.Key)\n\t\tmonitor := session.monitor(n.Code, timeout)\n\t\tmonitor.pulse()\n\t} else {\n\t\tlog.Println(fmt.Sprintf(\"presence: no configuration for notification code: %s\", n.Code))\n\t}\n}", "func (n *ConfirmChanNotifier) Notify(c *Confirmation) {\n\tn.C <- c\n}", "func (notifier *Notifier) Notify(msg Message) {\n\tnotifier.receiveCh <- msg\n}", "func (node *hostNode) Notify(notifee network.Notifiee) {\n\tnode.host.Network().Notify(notifee)\n}", "func (c *Client) Notify(m Method) error {\n\tif c.shutdown {\n\t\treturn fmt.Errorf(\"Client is shutdown\")\n\t}\n\treq := &Request{nil, m}\n\tc.requestQueue <- req\n\treturn nil\n}", "func (ed *Editor) Notify(note string) {\n\ted.state.AddNote(note)\n\ted.Redraw(false)\n}", "func (o *observerI) Notify(value interface{}) {\n\to.Lock()\n\tdefer o.Unlock()\n\to.state.Value = value\n\tnext := NewState()\n\to.state.Next = next\n\tclose(o.state.C)\n\to.state = o.state.Next\n}", "func (notifier *ApprovalStatusNotifier) Notify(p *ProductReview, approved bool, msg string) error {\n\ts := \"Hello, this is \" + notifier.Sender + \" from Foo Incorporated.\\n\"\n\tif approved {\n\t\ts += \"Thank you for your review. It has been approved and will be on our site shortly!\\n\"\n\t} else {\n\t\ts += \"Your review has been denied due to not meeting our corporate policies regarding language.\"\n\t\ts += \"Please see our policies listed here: foo.inc/guidelines/community-practices.html\\n\"\n\t}\n\ts += msg\n\tlog.Println(\"Notifying client:\", s)\n\treturn nil\n}", "func (p *Printer) Notify(ctx context.Context, to string, msg Message) error {\n\tb, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = p.writer.Write(b)\n\treturn err\n}", "func Notify(client *gophercloud.ServiceClient, id string) (r NotifyResult) {\n\tresp, err := client.Post(notifyURL(client, id), nil, nil, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{204},\n\t})\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}", "func Notify(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tircutil.SendNotice(client, message.Args[0], strings.Join(message.Args[1:],\n\t\t\" \"))\n}", "func sendNotification(n notifier) {\n\tn.notify()\n}", "func sendNotification(n notifier) {\n\tn.notify()\n}", "func sendNotification(n notifier) {\n\tn.notify()\n}", "func sendNotification(n notifier) {\n\tn.notify()\n}", "func Notify (err error, rawData ...interface{}){\n\tbugsnag.Notify(err, rawData)\n}", "func (wt *WatchTower) Notify(event interface{}) {\n\twt.observer.Range(func(key, value interface{}) bool {\n\t\tif key == nil {\n\t\t\treturn false\n\t\t}\n\n\t\tkey.(handler.Observer).Trigger(event)\n\t\treturn true\n\t})\n}", "func (_m *MockJournal) Notify(_a0 Notifiee, _a1 Index) {\n\t_m.Called(_a0, _a1)\n}", "func (u User) Notify() {\n\tfmt.Println(\"Func User Notify() \" + u.Name)\n}", "func (n *ReturnChanNotifier) Notify(c *amqp.Return) {\n\tn.C <- c\n}", "func (d *DeadmansSwitch) Notify(summary, detail string) {\n\tif err := d.notifier(summary, detail); err != nil {\n\t\tfailedNotifications.Inc()\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n\t}\n}", "func (c *observerSubComponent) notify(notification Notification) {\n\tc.callbackLock.RLock()\n\tdefer c.callbackLock.RUnlock()\n\tif c.callback != nil {\n\t\tc.callback(notification)\n\t}\n}", "func (p *BoxPeer) Notify(msg Message) {\n\tp.notifier.Notify(msg)\n}", "func (s *Store) Notify(events []workloadmeta.CollectorEvent) {\n\tpanic(\"not implemented\")\n}", "func (h *Health) notify() {\n\tfor _, subscriber := range h.subscribers {\n\t\tif subscriber == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase subscriber <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n}", "func (c Client) Notify(ctx context.Context, ee logger.ErrorEvent) {\n\tc.Client.CaptureEvent(errEvent(ctx, ee), nil, raven.NewScope())\n\tc.Client.Flush(c.Config.FlushTimeoutOrDefault()) // goose this a bit\n}", "func (b *Broadcaster) Notify(typpedChan interface{}) error {\n\ttp := reflect.TypeOf(typpedChan)\n\toutC := reflect.ValueOf(typpedChan)\n\tif tp.Kind() != reflect.Chan {\n\t\treturn errors.New(\"input parameter should be channel\")\n\t}\n\tif tp.ChanDir() == reflect.RecvDir {\n\t\treturn errors.New(\"channel should be writable\")\n\t}\n\tif tp.Elem() != b.valueType {\n\t\treturn errors.New(\"bad channel value type\")\n\t}\n\tc := make(chan chan broadcast)\n\tb.listenc <- c\n\trc := <-c\n\tgo func() {\n\t\tdefer func() {\n\t\t\t_ = recover()\n\t\t}()\n\t\tfor {\n\t\t\tb := <-rc\n\t\t\tv := b.v\n\t\t\trc <- b\n\t\t\trc = b.c\n\t\t\toutC.Send(reflect.ValueOf(v))\n\t\t}\n\t}()\n\treturn nil\n}", "func (recv *Object) Notify(propertyName string) {\n\tc_property_name := C.CString(propertyName)\n\tdefer C.free(unsafe.Pointer(c_property_name))\n\n\tC.g_object_notify((*C.GObject)(recv.native), c_property_name)\n\n\treturn\n}", "func (n *MyNode) Notify(arg string, reply *bool) error {\n\n\tn.PreMutex.Lock()\n\tdefer n.PreMutex.Unlock()\n\n\t*reply = false\n\tif n.Predecessor == \"\" || between(HashString(n.Predecessor), HashString(arg), n.ID, false) {\n\t\tn.Predecessor = arg\n\t\t*reply = true\n\t}\n\treturn nil\n}", "func (n *StatusChangeNotifier) Notify(evt *Event) {\n\tlog.Println(fmt.Sprintf(\"received event: [%s] %s - %s\",\n\t\tevt.Kind, evt.Created, string(evt.Meta)))\n\n\tfor _, obs := range n.observers[evt.Kind] {\n\t\tif obs != nil {\n\t\t\tif err := obs.Notify(evt); err != nil {\n\t\t\t\tlog.Println(\"notify error:\", err)\n\t\t\t\tn.RemoveObserver(obs)\n\t\t\t} else {\n\t\t\t\tlog.Println(fmt.Sprintf(\n\t\t\t\t\t\"notify [%v] - event %s %s %s\",\n\t\t\t\t\tobs.Id,\n\t\t\t\t\tevt.Kind,\n\t\t\t\t\tevt.Created,\n\t\t\t\t\tevt.Meta,\n\t\t\t\t))\n\t\t\t}\n\t\t}\n\t}\n}", "func (n *ErrorChanNotifier) Notify(c error) {\n\tn.C <- c\n}", "func (b *Broadcaster) Notify(nodeUpdate *NodeUpdate) {\n\tb.observersLock.RLock()\n\tobservers := b.observers\n\tb.observersLock.RUnlock()\n\tfor _, observer := range observers {\n\t\tgo observer.OnUpdate(nodeUpdate)\n\t}\n}", "func Notify(m MasterElector, path, id string, s Service) {\n\tn := &notifier{id: Master(id), service: s}\n\tn.cond = sync.NewCond(&n.lock)\n\tgo n.serviceLoop()\n\tfor {\n\t\tw := m.Elect(path, id)\n\t\tfor {\n\t\t\tevent, open := <-w.ResultChan()\n\t\t\tif !open {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif event.Type != watch.Modified {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\telectedMaster, ok := event.Object.(Master)\n\t\t\tif !ok {\n\t\t\t\tglog.Errorf(\"Unexpected object from election channel: %v\", event.Object)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfunc() {\n\t\t\t\tn.lock.Lock()\n\t\t\t\tdefer n.lock.Unlock()\n\t\t\t\tn.desired = electedMaster\n\t\t\t\tif n.desired != n.current {\n\t\t\t\t\tn.cond.Signal()\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func (n *NotificationService) Notify(ctx context.Context, sub corgi.Subscription) error {\n\tmsg, err := n.messageGenerator.CreateNotification(ctx, sub)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn n.messageSender.Send(ctx, sub.User, msg)\n}", "func (h *Hub) Notify(e event.Event) {\n\t// if the event has not been previously adopted in any pending run then it is a trigger event\n\tif !e.RunRef.Adopted() {\n\t\terr := h.pendFlowFromTrigger(e)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t\treturn\n\t}\n\t// otherwise it is an adopted run specific event so and is directed to this host\n\th.dispatchToActive(e)\n}", "func (n Notifier) Notify(status int) error {\n\tif n.webHook == \"\" {\n\t\treturn nil\n\t}\n\tstatusStr := \"\"\n\tif status == PROCESS_STARTED {\n\t\tstatusStr = \"\\\"starting\\\"\"\n\t} else if status == PROCESS_RUNNING {\n\t\tstatusStr = \"\\\"up\\\"\"\n\t} else {\n\t\tstatusStr = \"\\\"crashed\\\"\"\n\t}\n\tbody := `{\n\t\t\t\t\t\t\t\"ps\":\n\t\t\t\t\t\t\t\t{ \"status\":` + statusStr + `}\n\t\t\t\t\t\t}`\n\n\treq, err := http.NewRequest(\"PUT\", n.webHook, bytes.NewBufferString(body))\n\tif err != nil {\n\t\treturn errors.New(\"Error in Notify : Failed to construct the HTTP request\" + err.Error())\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tclient := &http.Client{}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn errors.New(\"Error in Notify : Was not able to trigger the hook!\\n\" + err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\treturn nil\n}", "func Notify(ctx context.Context, subj, msg string) error {\n\tif os.Getenv(\"GOPASS_NO_NOTIFY\") != \"\" || !config.Bool(ctx, \"core.notifications\") {\n\t\tdebug.Log(\"Notifications disabled\")\n\n\t\treturn nil\n\t}\n\tconn, err := dbus.SessionBus()\n\tif err != nil {\n\t\tdebug.Log(\"DBus failure: %s\", err)\n\n\t\treturn err\n\t}\n\n\tobj := conn.Object(\"org.freedesktop.Notifications\", \"/org/freedesktop/Notifications\")\n\tcall := obj.Call(\"org.freedesktop.Notifications.Notify\", 0, \"gopass\", uint32(0), iconURI(), subj, msg, []string{}, map[string]dbus.Variant{\"transient\": dbus.MakeVariant(true)}, int32(3000))\n\tif call.Err != nil {\n\t\tdebug.Log(\"DBus notification failure: %s\", call.Err)\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (observer Observer) notify(event Event) {\n\tfmt.Printf(\"Observer %s: New event received from channel %s with message %s \\n\", observer.id, event.channelId, event.value)\n}", "func (t *DIC3) Notify(request NodeInfo, reply *NodeInfo) error {\n\tif predecessor.Chordid == -1 ||\n\t\tbelongsto(request.Chordid, predecessor.Chordid, chordid) == true {\n\t\tpredecessor.Chordid = request.Chordid\n\t\tpredecessor.Address = request.Address\n\t\treturn nil\n\t}\n\treturn nil\n}", "func Notify(client *hipchat.Client, cfg *config.Config) error {\n\treq := &hipchat.NotificationRequest{\n\t\tMessage: cfg.FormattedMessage(),\n\t\tNotify: config.ToBool(cfg.Notify),\n\t\tColor: cfg.Color,\n\t}\n\n\tfmt.Printf(\"%+v\\n\", req)\n\n\t_, err := client.Room.Notification(cfg.Room, req)\n\tfmt.Printf(\"%+v\\n\", err)\n\n\treturn err\n}", "func (l *CommandQueueStatusListener) Notify() {\n\tselect {\n\tcase <-l.closeSignal:\n\tcase l.signal <- true:\n\tdefault:\n\t}\n}", "func (ib *indexBuilder) notify() {\n\tselect {\n\tcase ib.notifyChan <- struct{}{}:\n\tdefault:\n\t}\n}", "func Notify(cmd string, msg string) error {\n\terr := execCmd(cmd)\n\texecCmd(notifyCmd(msg))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}", "func (r BatchJobReplicateV1) Notify(ctx context.Context, body io.Reader) error {\n\tif r.Flags.Notify.Endpoint == \"\" {\n\t\treturn nil\n\t}\n\n\tctx, cancel := context.WithTimeout(ctx, 10*time.Second)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, r.Flags.Notify.Endpoint, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif r.Flags.Notify.Token != \"\" {\n\t\treq.Header.Set(\"Authorization\", r.Flags.Notify.Token)\n\t}\n\n\tclnt := http.Client{Transport: getRemoteInstanceTransport}\n\tresp, err := clnt.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\txhttp.DrainBody(resp.Body)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn errors.New(resp.Status)\n\t}\n\n\treturn nil\n}", "func (sn *ViewUpdateNotification) Notify(sub ViewUpdateSubscriber) {\n\tsn.do(func() {\n\t\tsn.register[sub] = len(sn.subs)\n\t\tsn.subs = append(sn.subs, sub)\n\t})\n}", "func notify(ctx context.Context, report string) error {\n\t_, err := sendRequest(ctx, \"POST\", notifyAddr, report)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (w *EventWaiter) Notify(ctx context.Context, event eh.Event) {\n\n\t// TODO: separation of concerns: this should be factored out into a middleware of some sort...\n\tif e, ok := event.(syncEvent); ok {\n\t\t//fmt.Printf(\"ADD(1) in eventwaiter Notify\\n\")\n\t\te.Add(1)\n\t}\n\n\tw.inbox <- event\n}", "func (r *Room) Notify(msg interface{}) {\n\tfor _, m := range r.mobs.GetAll() {\n\t\tm.Send(msg)\n\t}\n\tr.Send(msg)\n}", "func (k *k8sService) notify(e types.K8sPodEvent) error {\n\tvar err error\n\tk.Lock()\n\tdefer k.Unlock()\n\tfor _, o := range k.observers {\n\t\tlog.Infof(\"Calling observer: with k8sPodEvent: %v Name: %s Status: %v\", e.Type, e.Pod.Name, e.Pod.Status)\n\t\ter := o.OnNotifyK8sPodEvent(e)\n\t\tif err == nil && er != nil {\n\t\t\terr = er\n\t\t}\n\t}\n\treturn err\n}", "func (z *Zone) Notify() (*NotifyResult, error) {\n\tnotifyResult := &NotifyResult{}\n\tmyError := new(Error)\n\tnotifySling := z.PowerDNSHandle.makeSling()\n\tresp, err := notifySling.New().Put(strings.TrimRight(z.URL, \".\")+\"/notify\").Receive(notifyResult, myError)\n\n\tif err == nil && resp.StatusCode >= 400 {\n\t\tmyError.Message = strings.Join([]string{resp.Status, myError.Message}, \" \")\n\t\treturn &NotifyResult{}, myError\n\t}\n\n\treturn notifyResult, err\n}", "func (r NopReporter) AutoNotify(ctx context.Context) {}", "func (notifier *JenkinsNotifier) Notify() error {\n\tif notifier.JenkinsProject.Name == \"\" || notifier.JenkinsProject.Token == \"\" {\n\t\treturn errors.New(\"Jenkins Project config is not correct.\")\n\t}\n\n\turl := notifier.notifyUrl()\n\treq, err := http.NewRequest(\"POST\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tusername, apiToken := notifier.UserName, notifier.UserApiToken\n\tif notifier.JenkinsProject.HasJenkinsConfig() {\n\t\tusername, apiToken = notifier.JenkinsProject.Username, notifier.JenkinsProject.UserApiToken\n\t}\n\treq.SetBasicAuth(username, apiToken)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err == nil && resp.StatusCode == http.StatusOK {\n\t\tdefer resp.Body.Close()\n\t\tlogs.Info(\"Notified to project \", notifier.JenkinsProject.Name)\n\t\treturn nil\n\t} else {\n\t\tif err == nil {\n\t\t\treturn errors.New(\"Notify Status is \" + resp.Status)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (c *Connector) Notify(route string, v interface{} ) error {\n\n\tdata, err := json.Marshal( v )\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsg := &message.Message{\n\t\tType: message.Notify,\n\t\tRoute: route,\n\t\tData: data,\n\t}\n\treturn c.sendMessage(msg)\n}", "func (i *Indicator) Notify(title string, message string, notifyIcon NotifyIcon, indicatorIcon Icon) {\n\tswitch i.config.notifyLevel {\n\tcase NotifyLevelOff:\n\t\treturn\n\tcase NotifyLevelMin:\n\t\ti.SetIcon(indicatorIcon)\n\tcase NotifyLevelMax:\n\t\ti.SetIcon(indicatorIcon)\n\t\tvar icoName string\n\t\tswitch notifyIcon {\n\t\tcase NotifyIconNil:\n\t\t\ticoName = \"\"\n\t\tcase NotifyIconNoConn:\n\t\t\ticoName = \"liqo-no_conn.png\"\n\t\tcase NotifyIconDefault:\n\t\t\ticoName = \"liqo-black.png\"\n\t\tcase NotifyIconGreen:\n\t\t\ticoName = \"liqo-green.png\"\n\t\tcase NotifyIconGray:\n\t\t\ticoName = \"liqo-gray.png\"\n\t\tcase NotifyIconOrange:\n\t\t\ticoName = \"liqo-orange.png\"\n\t\tdefault:\n\t\t\ticoName = \"liqo-black.png\"\n\t\t}\n\t\tif !i.gProvider.Mocked() {\n\t\t\t/*The golang guidelines suggests error messages should not start with a capitalized letter.\n\t\t\tTherefore, since Notify sometimes receives an error as 'message', the Capitalize() function\n\t\t\tovercomes this problem, correctly displaying the string to the user.*/\n\t\t\t_ = bip.Notify(title, stringUtils.Capitalize(message), filepath.Join(i.config.notifyIconPath, icoName))\n\t\t}\n\tdefault:\n\t\treturn\n\t}\n}", "func (l *Lock) Notify(blockTime math.Timestamp, weightedAverageAge math.Duration) error {\n\tif l.UnlocksOn != nil {\n\t\treturn errors.New(\"already notified\")\n\t}\n\tuo := blockTime.Add(l.NoticePeriod)\n\tl.UnlocksOn = &uo\n\treturn nil\n}", "func (w *Watches) Notify(ctx context.Context, name string, events, cookie uint32, et EventType, unlinked bool) {\n\tvar hasExpired bool\n\tw.mu.RLock()\n\tfor _, watch := range w.ws {\n\t\tif unlinked && watch.ExcludeUnlinked() && et == PathEvent {\n\t\t\tcontinue\n\t\t}\n\t\tif watch.Notify(name, events, cookie) {\n\t\t\thasExpired = true\n\t\t}\n\t}\n\tw.mu.RUnlock()\n\n\tif hasExpired {\n\t\tw.cleanupExpiredWatches(ctx)\n\t}\n}", "func (c *Event) Notify(correlationId string, args *run.Parameters) {\n\tfor _, listener := range c.listeners {\n\t\tlistener.OnEvent(correlationId, c, args)\n\t}\n}", "func (s *Subject) Notify(message string) {\n\ts.mu.RLock()\n\tobservers := s.observers\n\ts.mu.RUnlock()\n\n\tfor _, observer := range observers {\n\t\tif observer != nil {\n\t\t\tobserver.send(message)\n\t\t}\n\t}\n}", "func (n *NotifyMail) Notify(mail M.Mail) {\n\tif err := n.store.Insert(&mail); err != nil {\n\t\tn.errChan.In() <- err\n\t\treturn\n\t}\n\tn.sendChan <- mail\n}", "func (p *Promises) Notify(c chan *irc.Message) {\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(*config.NotifyInterval)\n\n\t\t\tif *config.NotifyRepaired {\n\t\t\t\tfor _, v := range p.Repaired {\n\t\t\t\t\tc <- &irc.Message{\n\t\t\t\t\t\tCommand: irc.PRIVMSG,\n\t\t\t\t\t\tParams: []string{*config.Channels},\n\t\t\t\t\t\tTrailing: v.ToString(),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif *config.NotifyFailed {\n\t\t\t\tfor _, v := range p.Failed {\n\t\t\t\t\tc <- &irc.Message{\n\t\t\t\t\t\tCommand: irc.PRIVMSG,\n\t\t\t\t\t\tParams: []string{*config.Channels},\n\t\t\t\t\t\tTrailing: v.ToString(),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *Client) Notify(node *Node) error {\n\tctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout)\n\tdefer cancel()\n\treq := &pb.Node{Id: node.ID, Addr: node.Addr}\n\t_, err := c.rpcClient.Notify(ctx, req)\n\treturn err\n\n}", "func (p Ping) notify(pingErr error) error {\n\tenvelope_ := adapters.Envelope{\n\t\tTitle: \"actuator-failed\",\n\t\tRecipient: \"*\",\n\t}\n\t// TODO proper protocol ?\n\tpayload := fmt.Sprintf(\"endpoint=%s actuator=ping err=%s\", p.Endpoint, pingErr)\n\tif err := p.Adapter.Send(envelope_, payload); err != nil {\n\t\tp.logger.Error(\"Error sending event: %s\", err)\n\t\treturn err\n\t}\n\tp.logger.Info(\"Event '%s' dispatched\", envelope_.Title)\n\treturn pingErr\n}", "func (a *API) Notify(userID uint, msg *model.Message) {\n\tif clients, ok := a.getClients(userID); ok {\n\t\tgo func() {\n\t\t\tfor _, c := range clients {\n\t\t\t\tc.write <- msg\n\t\t\t}\n\t\t}()\n\t}\n}", "func (n *WebNotifier) Notify(topic string, message []byte) error {\n\tvar allErrs error\n\n\tfor _, notifier := range n.notifiers {\n\t\terr := notifier.Notify(topic, message)\n\t\tallErrs = appendError(allErrs, err)\n\t}\n\n\treturn allErrs\n}", "func (agent *Agent) Notify() chan *Request {\n\treturn agent.notification\n}", "func (b *MemoryBackend) Notify(_ Feature, payload Payload) error {\n\tnotice, ok := payload.(*Notice)\n\tif !ok {\n\t\treturn fmt.Errorf(\"memory backend does not support payload of type %q\", reflect.TypeOf(payload))\n\t}\n\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.Notices = append(b.Notices, notice)\n\n\treturn nil\n}", "func Notify(e Event) {\n\tglog.Infof(\"event '%s' is notified\", NameOfEvent(e.GetType()))\n\te.SetBrokerId(base.GetBrokerId())\n\tservice := base.GetService(ServiceName).(*eventService)\n\tservice.notify(e)\n}", "func (n *Notifier) Notify(ctx context.Context, text, language string) error {\n\tu := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"translate.google.com\",\n\t\tPath: \"translate_tts\",\n\t}\n\n\tq := u.Query()\n\tq.Add(\"ie\", \"UTF-8\")\n\tq.Add(\"q\", text)\n\tq.Add(\"tl\", language)\n\tq.Add(\"client\", \"tw-ob\")\n\tu.RawQuery = q.Encode()\n\n\treturn n.Play(ctx, u.String())\n}", "func (n *Node) notify(rn *comm.Rnode) {\n\tif n.prev.ID.IsEqual(n.ID) || rn.ID.IsBetween(n.prev.ID, n.ID) {\n\t\tn.setPredecessor(rn)\n\t}\n\tif alive, _ := n.remote.IsAlive(*n.prev); !alive {\n\t\tn.setPredecessor(rn)\n\t}\n\n\tif n.fingers[0].node.ID.IsEqual(n.ID) {\n\t\tn.setSuccessor(rn)\n\t}\n\tif len(n.successors) == 1 && n.successors[0].ID.IsEqual(n.ID) {\n\t\tn.setSuccessor(rn)\n\t}\n\n\t//TODO: Handle key replication\n}", "func (f *Sender) Notify(msg BotMessageInterface, callbackNotification string, showAlert bool) {\n\tf.bot.AnswerCallbackQuery(tgbotapi.CallbackConfig{\n\t\tCallbackQueryID: msg.CallbackID(),\n\t\tShowAlert: showAlert,\n\t\tText: callbackNotification,\n\t})\n}", "func Notify(to tb.Recipient, b *tb.Bot, action tb.ChatAction) {\n\terr := b.Notify(to, action)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t}\n}", "func (u *user) notify() {\n\tfmt.Printf(\"Sending user email to %s<%s>\\n\",\n\t\tu.name,\n\t\tu.email)\n}", "func (d delegate) NotifyMsg(data []byte) {}", "func (u *user) notify() {\n\tfmt.Printf(\"Sending user email to %s %s\\n\", u.name, u.email)\n}", "func (u user) notify() {\n\tfmt.Printf(\"Sending User Email to %s<%s>\\n\",\n\t\tu.name,\n\t\tu.email)\n}", "func (n *Notifier) Notify() {\n\tvar wg sync.WaitGroup\n\tlog.Println(\"Sending Notification of Site Contacts about\", n.Subject+\"...\")\n\tfor _, c := range n.Site.Contacts {\n\t\tif c.SmsActive || c.EmailActive {\n\t\t\t// Notify contact\n\t\t\twg.Add(1)\n\t\t\tgo send(c, n.Message, n.Subject, n.SendEmail, n.SendSms, &wg)\n\t\t} else {\n\t\t\tlog.Println(\"No active contact methods for\", c.Name)\n\t\t}\n\t}\n\twg.Wait()\n}", "func (g *Pin) Notify(sig ...os.Signal) {\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, sig...)\n\tgo func() {\n\t\tn := 0\n\t\tfor sig := range c {\n\t\t\tif n == 1 {\n\t\t\t\tpanic(\"got too many signals\")\n\t\t\t}\n\t\t\tg.Pull(fmt.Errorf(\"Recieved signal %s\", sig))\n\t\t\tn++\n\t\t}\n\t}()\n}", "func Notify(title, message string) error {\n\tnotification := toast.Notification{\n\t\tAppID: \"ntify\",\n\t\tTitle: title,\n\t\tMessage: message,\n\t\tIcon: \"\",\n\t\tActions: nil}\n\n\treturn notification.Push()\n}", "func (u *user) notify() {\n\tfmt.Printf(\"Sending User Email To %s<%s>\\n\",\n\t\tu.name,\n\t\tu.email)\n}", "func (u *user) notify() {\n\tfmt.Printf(\"Send email to %s <%s>\\n\", u.name, u.email)\n}", "func (c *Client) Notify(text string, language ...string) error {\n\tlang := c.lang\n\tif len(language) != 0 {\n\t\tlang = language[0]\n\t}\n\tif c.accent != \"\" {\n\t\tlang = fmt.Sprintf(\"%s-%s\", lang, c.accent)\n\t}\n\n\turl, err := googletts.GetTTSURL(text, lang)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.Play(url)\n}", "func (req *BaseRequest) Notify(err error) {\n\treq.Done <- err\n}", "func (mas MetricAlerts) Notify() error {\n\tclient, err := metrics.NewClient()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create Prometheus client: %v\", err)\n\t}\n\n\tfor _, ma := range mas {\n\t\tlog.Printf(\"Checking %s\", ma.Name)\n\t\tif err := ma.Check(client); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (a *admin) notify() {\n\tfmt.Printf(\"Sending admin email to %s<%s>\\n\",\n\t\ta.name,\n\t\ta.email)\n}", "func Notify(id int) {\n\tresp := \".\\n\"\n\tresp += \"**\" + Config.Feeds[id].Feed.Title + \": **\" + \"\\n\"\n\tresp += Config.Feeds[id].Feed.Items[0].Date.String() + \"\\n\\n\"\n\t// If a 9front feed, extract the user ☺\n\tif strings.Contains(Config.Feeds[id].Feed.Items[0].Link, \"http://code.9front.org/hg/\") {\n\t\tlines := strings.Split(Config.Feeds[id].Feed.Items[0].Summary, \"\\n\")\n\t\tfor i, v := range lines {\n\t\t\tif strings.Contains(v, \"<th style=\\\"text-align:left;vertical-align:top;\\\">user</th>\") {\n\t\t\t\tline := html.UnescapeString((lines[i+1])[6:len(lines[i+1])-5])\n\t\t\t\tresp += line + \"\\n\\n\"\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tresp += \"`\" + Config.Feeds[id].Feed.Items[0].Title + \"`\" + \"\\n\"\n\tresp += \"\\n\" + Config.Feeds[id].Feed.Items[0].Link + \"\\n\"\n\tConfig.Feeds[id].Feed.Items[0].Read = true\n\tresp += \"\\n\"\n\t\n\t// Loop through subbed chans and post notification message\n\tfmt.Println(\"Looping through subs to notify...\")\n\tfor _, v := range Config.Subs {\n\t\tif v.SubID == id {\n\t\t\tSession.ChannelMessageSend(v.ChanID, resp)\n\t\t}\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\tfmt.Println(\"No new notifys for \", Config.Feeds[id].Feed.UpdateURL)\n\t\n\t/* Enable for logging if subs break\n\tfmt.Println(Config.Feeds[id].Feed.Items[0])\n\tfmt.Println(Config.Feeds[id].Feed.Items[len(Config.Feeds[id].Feed.Items)-1])\n\t*/\n}", "func Notify(e ExecutionEvent) {\n\tfor _, c := range subscriberRegistry[e.Topic] {\n\t\tc <- e\n\t}\n}", "func (m *Map) Notify(c chan<- string, keys ...string) {\n\tm.bus.Notify(c, keys)\n}", "func Notify(err interface{}, req *http.Request) error {\n\tif Airbrake != nil {\n\t\treturn Airbrake.Notify(err, req)\n\t}\n\tlog.Printf(\"[AIRBRAKE] %v\", err)\n\treturn nil\n}", "func (tx Transmitter[M]) Notify(message M) {\n\tfor _, ch := range tx.Chs {\n\t\tif ch != nil {\n\t\t\tch <- message\n\t\t}\n\t}\n}", "func (p *Note) Notification(mt, msg, buid string, out interface{}) error {\n\tctx, cancel := context.WithTimeout(context.Background(), p.Timeout)\n\tdefer cancel()\n\treturn p.client.Do(p.note(ctx, mt, msg, buid), out)\n}", "func Notify(title, message string) error {\n\treturn errors.New(\"beeep: unsupported operating system: %v\", runtime.GOOS)\n}", "func (u user) notify() {\n\tfmt.Printf(\"Sending an email to %s<%s>\\n\", u.name, u.email)\n}", "func (r *room) notify(method string, data interface{}) {\n\tpeers := r.getPeers()\n\tfor _, p := range peers {\n\t\tp.notify(method, data)\n\t}\n}" ]
[ "0.8132382", "0.79153454", "0.73934114", "0.73212564", "0.72838044", "0.7273848", "0.72720206", "0.7210515", "0.71945655", "0.7182227", "0.7165415", "0.7159474", "0.7114332", "0.7078729", "0.7068588", "0.7026278", "0.69815", "0.696589", "0.69469804", "0.69469804", "0.69469804", "0.69469804", "0.6937568", "0.69009155", "0.6886554", "0.6822638", "0.6810061", "0.67947567", "0.6789999", "0.67788386", "0.67720425", "0.6766839", "0.67526776", "0.67390716", "0.6717301", "0.6713334", "0.6712102", "0.6695354", "0.66752946", "0.6644453", "0.66431975", "0.663261", "0.6626374", "0.6618288", "0.6607347", "0.6602734", "0.6602063", "0.6598576", "0.6580561", "0.65786946", "0.657333", "0.6573276", "0.65663075", "0.6561165", "0.6557942", "0.6538158", "0.6533936", "0.652405", "0.6510681", "0.6509748", "0.65047324", "0.64944637", "0.6491393", "0.64906454", "0.6479958", "0.6476687", "0.64714265", "0.6467389", "0.6466137", "0.6459618", "0.64563894", "0.64505005", "0.64224195", "0.64185953", "0.64167315", "0.6414492", "0.6399993", "0.63951", "0.63844585", "0.63674796", "0.63664913", "0.6359819", "0.6357722", "0.6357674", "0.6352841", "0.6333448", "0.6323714", "0.6317469", "0.63140833", "0.63064843", "0.6300937", "0.6299059", "0.62988293", "0.6295612", "0.6287836", "0.6285251", "0.6284661", "0.62775844", "0.62443805", "0.62302804" ]
0.6415623
75
String returns a JSON representation of the model
func (o *Patchintegrationactionfields) String() string { o.RequestMappings = []Requestmapping{{}} j, _ := json.Marshal(o) str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\u`, `\u`, -1)) return str }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Model) JSON() string {\n\tret := \"\"\n\tif m.Emm != nil {\n\t\tret += m.Emm.JSON()\n\t}\n\tif m.Snow != nil {\n\t\tret += m.Snow.JSON()\n\t}\n\treturn ret\n}", "func Stringnify(model interface{}) string {\n\tbyteModel, err := json.Marshal(model)\n\tif err != nil {\n\t\tfmt.Println(\"model Stringnify error:\", err)\n\t}\n\treturn string(byteModel)\n}", "func (s *Siegfried) JSON() string {\n\tversion := config.Version()\n\tstr := fmt.Sprintf(\n\t\t\"{\\\"siegfried\\\":\\\"%d.%d.%d\\\",\\\"scandate\\\":\\\"%v\\\",\\\"signature\\\":\\\"%s\\\",\\\"created\\\":\\\"%v\\\",\\\"identifiers\\\":[\",\n\t\tversion[0], version[1], version[2],\n\t\ttime.Now().Format(time.RFC3339),\n\t\tconfig.SignatureBase(),\n\t\ts.C.Format(time.RFC3339))\n\tfor i, id := range s.ids {\n\t\tif i > 0 {\n\t\t\tstr += \",\"\n\t\t}\n\t\td := id.Describe()\n\t\tstr += fmt.Sprintf(\"{\\\"name\\\":\\\"%s\\\",\\\"details\\\":\\\"%s\\\"}\", d[0], d[1])\n\t}\n\tstr += \"],\"\n\treturn str\n}", "func (o *Createemailrequest) String() string {\n \n \n \n o.SkillIds = []string{\"\"} \n \n \n o.Attributes = map[string]string{\"\": \"\"} \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (m *Meta) JSON() string {\n\tj, _ := json.MarshalIndent(m, \"\", \" \")\n\treturn string(j)\n}", "func (o *Predictor) String() string {\n o.Queues = []Addressableentityref{{}} \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Coretype) String() string {\n \n \n \n \n \n o.ValidationFields = []string{\"\"} \n \n o.ItemValidationFields = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Object) JSON() string {\n\tif o.URL != nil {\n\t\to.VersionID = o.URL.VersionID\n\t}\n\treturn strutil.JSON(o)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Entitytypecriteria) String() string {\n \n o.Values = []string{\"\"} \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (b BudgetLine) String() string {\n\tjb, _ := json.Marshal(b)\n\treturn string(jb)\n}", "func (b BudgetLines) String() string {\n\tjb, _ := json.Marshal(b)\n\treturn string(jb)\n}", "func (l logrec) String() string {\n\tout, _ := json.Marshal(&l)\n\treturn string(out)\n}", "func (s DataModel) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Timeoffbalancerequest) String() string {\n o.ActivityCodeIds = []string{\"\"} \n o.DateRanges = []Localdaterange{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (m Mall) String() string {\n\ts, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(s)\n}", "func (o *Commonruleconditions) String() string {\n o.Clauses = []Commonruleconditions{{}} \n o.Predicates = []Commonrulepredicate{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (r Review) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (uc *UseCase) String() string {\n\treturn string(uc.JSON())\n}", "func (s Model) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Model) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Model) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Model) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Model) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Integrationtype) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (s GetModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Knowledgedocumentreq) String() string {\n \n \n o.Alternatives = []Knowledgedocumentalternative{{}} \n \n o.LabelIds = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (b Bom) String() string {\n\tjb, _ := json.Marshal(b)\n\treturn string(jb)\n}", "func ( fq *Fq_req ) To_json( ) ( *string, error ) {\n\tjbytes, err := json.Marshal( fq )\t\t\t// bundle into a json string\n\n\ts := string( jbytes )\n\n\treturn &s, err\n}", "func (o *Intentdefinition) String() string {\n \n o.EntityTypeBindings = []Namedentitytypebinding{{}} \n o.Utterances = []Nluutterance{{}} \n o.AdditionalLanguages = map[string]Additionallanguagesintent{\"\": {}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Recording) String() string {\n \n \n \n \n \n \n o.Annotations = []Annotation{{}} \n o.Transcript = []Chatmessage{{}} \n o.EmailTranscript = []Recordingemailmessage{{}} \n o.MessagingTranscript = []Recordingmessagingmessage{{}} \n \n \n o.MediaUris = map[string]Mediaresult{\"\": {}} \n \n \n \n \n \n \n \n \n \n \n \n \n o.Users = []User{{}} \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Createtimeofflimitrequest) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (tr TransmissionRecord) String() string {\n\tout, err := json.Marshal(tr)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(out)\n}", "func (t Transaction) JSONString() string {\n\ts, _ := toJSONString(t)\n\treturn s\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (m *Model) String() string {\n\tret := \"Model:\\n\"\n\tif m.Emm != nil {\n\t\tret += m.Emm.String() + \"\\n\"\n\t}\n\tif m.Snow != nil {\n\t\tret += m.Snow.String() + \"\\n\"\n\t}\n\treturn ret\n}", "func (o *Learningassignmentbulkaddresponse) String() string {\n o.Entities = []Learningassignment{{}} \n o.DisallowedEntities = []Disallowedentitylearningassignmentitem{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e *Entity) String() (string, error) {\n\tj, err := json.Marshal(e)\n\tif err == nil {\n\t\treturn string(j), err\n\t}\n\treturn \"\", err\n}", "func (o *Createperformanceprofile) String() string {\n \n \n \n o.ReportingIntervals = []Reportinginterval{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (p DomainModelName) String() string {\n\treturn string(p)\n}", "func (u Users) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u Users) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u Users) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u Users) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u Users) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u Users) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (list *LinkList) toJSON() string {\n\t// @TODO Get a module for this\n\treturn \"\"\n}", "func (p *Parms) String() string {\n\tout, _ := json.MarshalIndent(p, \"\", \"\\t\")\n\treturn string(out)\n}", "func (b Boms) String() string {\n\tjb, _ := json.Marshal(b)\n\treturn string(jb)\n}", "func (s LanguageModel) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Campaign) String() string {\n \n \n \n \n \n \n \n \n \n o.PhoneColumns = []Phonecolumn{{}} \n \n o.DncLists = []Domainentityref{{}} \n \n \n \n \n \n o.RuleSets = []Domainentityref{{}} \n \n \n \n \n o.ContactSorts = []Contactsort{{}} \n \n \n \n o.ContactListFilters = []Domainentityref{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r Roar) toJson() string {\n var result []byte\n result, _ = json.Marshal(r)\n return string(result)\n}", "func (inst *UserN) String() string {\n\trs, _ := json.Marshal(inst)\n\treturn string(rs)\n}", "func (t Terms) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func toJSON(v interface{}) string {\n\toutput, _ := json.Marshal(v)\n\treturn string(output)\n}", "func (info *SchemaInfo) JSON() string {\n\treturn fmt.Sprintf(\n\t\t`{\"name\": \"%s\",\"latestRev\": %d,\"desc\":`+\n\t\t\t` \"%s\",\"isActive\": %t,\"hasDraft\": %t}`,\n\t\tinfo.Name, info.LatestRevision,\n\t\tinfo.Description, info.IsActive, info.HasDraft)\n}", "func (s CreateModelInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (st SourceTables) String() string {\n\ts, _ := st.toJSON()\n\treturn s\n}", "func (s CreateLanguageModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Knowledgedocumentbulkrequest) String() string {\n \n \n \n o.Categories = []Documentcategoryinput{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (f Featureds) String() string {\n\tjf, _ := json.Marshal(f)\n\treturn string(jf)\n}", "func (o *Knowledgeimport) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func ToJSONString(i interface{}) string {\n\tjsonStr, _ := json.Marshal(i)\n\treturn string(jsonStr)\n}", "func (inst *UserExtN) String() string {\n\trs, _ := json.Marshal(inst)\n\treturn string(rs)\n}", "func (q Quests) String() string {\n\tjc, _ := json.Marshal(q)\n\treturn string(jc)\n}", "func (o *Emailcampaignschedule) String() string {\n \n \n o.Intervals = []Scheduleinterval{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (u User) JSONString() ([]byte, error) {\n\tJSONUser, err := json.Marshal(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn JSONUser, nil\n}", "func (st SourceTables) toJSON() (string, error) {\n\tdata, err := json.Marshal(st)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func (object Object) String() string {\n\tb, _ := json.Marshal(object)\n\treturn string(b)\n}", "func (o *Meteredevaluationassignment) String() string {\n \n o.Evaluators = []User{{}} \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Documentbodyvideoproperties) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r RoomOccupancies) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (t Technology) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (r Rooms) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (user *User) String() string {\n\tjson, _ := json.MarshalIndent(*user, \" \", \" \")\n\treturn string(json)\n}", "func (u User) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u User) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u User) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u User) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u User) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (u User) String() string {\n\tju, _ := json.Marshal(u)\n\treturn string(ju)\n}", "func (s *TrainingJob) String() string {\n\tb, _ := json.MarshalIndent(s, \"\", \" \")\n\treturn fmt.Sprintf(\"%s\", b)\n}", "func (c Contracts) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (g Groups) String() string {\n\tjg, _ := json.Marshal(g)\n\treturn string(jg)\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (o *Updatebusinessunitrequest) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e Data) String() string {\n\tj, _ := e.MarshalJSON()\n\treturn string(j)\n}", "func (o *Apiusageclientquery) String() string {\n \n \n o.Metrics = []string{\"\"} \n o.GroupBy = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func ToJsonStr(v interface{}) string {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\terr := encoder.Encode(v)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\ts := string(buffer.Bytes())\n\treturn strings.TrimRight(s, \"\\n\")\n}", "func (f BodyField) String() string {\n\treturn toJSONDot(f)\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r Resiliency) String() string {\n\tb, _ := json.Marshal(r)\n\treturn string(b)\n}", "func getRoarsAsJson() string {\n return roarList.toJson()\n}", "func (s ModelDashboardModel) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Smsavailablephonenumberentitylisting) String() string {\n o.Entities = []Smsavailablephonenumber{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (t Things) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}" ]
[ "0.7602305", "0.6521702", "0.6492924", "0.6464296", "0.6367784", "0.6342571", "0.63273394", "0.62768203", "0.6276228", "0.6276228", "0.62744915", "0.6235233", "0.6219029", "0.61787146", "0.6173133", "0.61425954", "0.61332303", "0.6116796", "0.6115479", "0.6108615", "0.61083996", "0.6100038", "0.60991347", "0.60991347", "0.60991347", "0.60991347", "0.60966957", "0.6093213", "0.6091564", "0.6086869", "0.60849756", "0.6084647", "0.60824347", "0.6080396", "0.6072454", "0.6067901", "0.6044965", "0.6031801", "0.60298806", "0.60203975", "0.6017558", "0.6001314", "0.6000053", "0.6000053", "0.6000053", "0.6000053", "0.6000053", "0.6000053", "0.5998184", "0.59968114", "0.5996285", "0.5996093", "0.59912616", "0.5985088", "0.59845644", "0.598279", "0.5976483", "0.59735936", "0.59709334", "0.59709334", "0.59709334", "0.5968139", "0.5965026", "0.59642845", "0.59636605", "0.59560406", "0.5954005", "0.5946861", "0.59461856", "0.59419125", "0.5940926", "0.593401", "0.5930851", "0.5927813", "0.59271365", "0.5926516", "0.59202254", "0.59172976", "0.59140325", "0.591267", "0.59058434", "0.59058434", "0.59058434", "0.59058434", "0.59058434", "0.59058434", "0.5904628", "0.590282", "0.59019893", "0.5898997", "0.5895401", "0.5890311", "0.5883264", "0.5882092", "0.5879223", "0.5878209", "0.58758694", "0.5870415", "0.58690256", "0.58688974", "0.5866819" ]
0.0
-1
/ Outgoing connections Using an outgoing connection is a snap. A `net.Conn` satisfies the io.Reader and `io.Writer` interfaces, so we can treat a TCP connection just like any other `Reader` or `Writer`. Open connects to a TCP Address. It returns a TCP connection armed with a timeout and wrapped into a buffered ReadWriter.
func Open(addr string) (*bufio.ReadWriter, error) { // Dial the remote process. // Note that the local port is chosen on the fly. If the local port // must be a specific one, use DialTCP() instead. log.Println("Dial " + addr) conn, err := net.Dial("tcp", addr) if err != nil { return nil, errors.Wrap(err, "Dialing "+addr+" failed") } return bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Open(addr string) (*bufio.ReadWriter, error) {\n\tlog.Println(\"Dial \" + addr)\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Dialing \"+addr+\" failed\")\n\t}\n\treturn bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil\n}", "func NewConn(conn net.Conn, timeout time.Duration, maxReadBuffer int64) *Conn {\n\treturn &Conn{conn, timeout, maxReadBuffer}\n}", "func NewConn(conn net.Conn, timeout time.Duration) *Conn {\n\tpConn := &Conn{\n\t\tbufReader: bufio.NewReader(conn),\n\t\tconn: conn,\n\t\tproxyHeaderTimeout: timeout,\n\t}\n\treturn pConn\n}", "func (tc *TimeoutConn) NetConn() net.Conn {\n\treturn tc.Conn\n}", "func New(rawConn net.Conn) *Conn {\n\tif _, ok := rawConn.(*Conn); ok {\n\t\treturn rawConn.(*Conn)\n\t}\n\n\treturn &Conn{\n\t\tConn: rawConn,\n\t\tisClosed: atomic.NewBool(false),\n\t\tcreatedAt: time.Now(),\n\t}\n}", "func New(nc net.Conn) (*Conn, error) {\n\t// XXX: setting deadline to now + 10 min\n\tif err := nc.SetDeadline(time.Now().Add(10 * time.Minute)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Conn{c: nc, r: bufio.NewReader(nc), w: bufio.NewWriter(nc)}, nil\n}", "func WrapConn(c net.Conn) net.Conn {\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\twg.Add(1)\n\treturn &conn{\n\t\tConn: c,\n\t\tid: atomic.AddUint64(&idleSet.id, 1),\n\t}\n}", "func NewConn(conn net.Conn) Conn {\n\treturn &connWrap{\n\t\tConn: conn,\n\t\tbrw: bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)),\n\t}\n}", "func NewConn(conn net.Conn, headerTimeout time.Duration, maxProxyHeaderBytes int64) *Conn {\n\tif headerTimeout <= 0 {\n\t\theaderTimeout = defaultProxyHeaderTimeout\n\t}\n\tif maxProxyHeaderBytes <= 0 {\n\t\tmaxProxyHeaderBytes = defaultMaxProxyHeaderBytes\n\t}\n\n\tpConn := new(Conn)\n\tpConn.headerTimeout = headerTimeout\n\tpConn.headerLimit = maxProxyHeaderBytes\n\tpConn.conn = conn\n\tpConn.lmtReader = io.LimitReader(conn, pConn.headerLimit).(*io.LimitedReader)\n\tpConn.bufReader = bufio.NewReader(pConn.lmtReader)\n\treturn pConn\n}", "func NewWriterConn(protocol string, host string, timeout time.Duration) (net.Conn, error) {\n\tif protocol == \"tcp\" || protocol == \"udp\" || protocol == \"unix\" {\n\t\tconn, err := net.DialTimeout(protocol, host, timeout)\n\t\treturn conn, err\n\t} else if protocol == \"http\" || protocol == \"https\" {\n\t\treturn NewWriterHttpConn(protocol, host, timeout)\n\t}\n\treturn nil, fmt.Errorf(\"Invalid connection protocol\")\n}", "func (dsf *testConnSource) newConn() (net.Conn, error) {\n\t// If we get called again after already opening a connection\n\t// (during the event subscription test for instance)\n\t// then make sure to close the existing connection (if there\n\t// is one) so we don't leak connections\n\tif server != nil {\n\t\tserver.Close()\n\t}\n\n\tserver, client = net.Pipe()\n\treturn client, nil\n}", "func NewConn() *Conn {\n\treturn &Conn{\n\t\tReceiveNow: make(chan bool),\n\t\tstats: make(map[cmdHash]int),\n\t}\n}", "func newConnection(address string, timeout time.Duration) (*Connection, error) {\n\tnewConn := &Connection{dataBuffer: bufPool.Get().([]byte)}\n\truntime.SetFinalizer(newConn, connectionFinalizer)\n\n\t// don't wait indefinitely\n\tif timeout == 0 {\n\t\ttimeout = 5 * time.Second\n\t}\n\n\tconn, err := net.DialTimeout(\"tcp\", address, timeout)\n\tif err != nil {\n\t\tlogger.Logger.Debug(\"Connection to address `%s` failed to establish with error: %s\", address, err.Error())\n\t\treturn nil, errToTimeoutErr(nil, err)\n\t}\n\tnewConn.conn = conn\n\tnewConn.limitReader = &io.LimitedReader{R: conn, N: 0}\n\n\t// set timeout at the last possible moment\n\tif err := newConn.SetTimeout(time.Now().Add(timeout), timeout); err != nil {\n\t\tnewConn.Close()\n\t\treturn nil, err\n\t}\n\n\treturn newConn, nil\n}", "func newConnTCP(conn net.Conn, stat systree.BytesMetric) (conn, error) {\n\tc := &connTCP{\n\t\tconn: conn,\n\t\tstat: stat,\n\t}\n\n\treturn c, nil\n}", "func (t *Transport) GetConn(cm *ConnectMethod, opt *RequestOptions) (*PersistConn, error) {\n\tif pc := t.getIdleConn(cm); pc != nil {\n\t\tpc.useCount++\n\t\tif opt != nil && opt.Stat != nil {\n\t\t\topt.Stat.RemoteAddr = pc.conn.RemoteAddr()\n\t\t\topt.Stat.ConnectionAge = time.Now().Sub(pc.started)\n\t\t\topt.Stat.ConnectionUse = pc.useCount\n\t\t}\n\t\treturn pc, nil\n\t}\n\n\tconn, err := t.dial(\"tcp\", cm.addr(), opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpconn := &PersistConn{\n\t\tcacheKey: cm.String(),\n\t\tconn: conn,\n\t\treqch: make(chan requestAndOptions, 50),\n\t\trech: make(chan responseAndError, 1),\n\t\tstarted: time.Now(),\n\t\tuseCount: 1,\n\t\tidleTimeout: 120 * time.Second,\n\t}\n\tif opt != nil && opt.KeepaliveTimeout != 0 {\n\t\tpconn.idleTimeout = opt.KeepaliveTimeout\n\t}\n\n\tif cm.targetScheme == \"https\" {\n\t\t// Initiate TLS and check remote host name against certificate.\n\t\tconn = tls.Client(conn, t.TLSClientConfig)\n\t\tif err = conn.(*tls.Conn).Handshake(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif t.TLSClientConfig == nil || !t.TLSClientConfig.InsecureSkipVerify {\n\t\t\tif err = conn.(*tls.Conn).VerifyHostname(cm.tlsHost()); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tpconn.conn = conn\n\t}\n\n\tpconn.bw = bufio.NewWriter(pconn.conn)\n\tgo pconn.readLoop(func(pc *PersistConn) bool { return t.putIdleConn(pc) })\n\treturn pconn, nil\n}", "func NewConn(protocol string, addr string) (net.Conn, error) {\n\tnewConn, err := net.Dial(protocol, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newConn, nil\n}", "func TCPDial(address string) (io.ReadWriteCloser, error) {\n\treturn net.Dial(\"tcp\", address)\n}", "func NewKeepAliveConn(conn PacketConn, readBufferSize int, outgoing chan Packet) *KeepAliveConn {\n\treturn &KeepAliveConn{\n\t\tconn: conn,\n\t\tread: make(chan Packet, readBufferSize),\n\t\twrite: outgoing,\n\t\tping: make(chan Packet),\n\t\tpong: make(chan struct{}),\n\t\topen: make(chan Packet),\n\t\topened: make(chan OpenData),\n\t\tpongOnce: &sync.Once{},\n\t\topenOnce: &sync.Once{},\n\t\tonce: &sync.Once{},\n\t}\n}", "func NewConn(conn net.Conn) Conn {\n\treturn Conn{\n\t\tConn: conn,\n\t}\n}", "func NewNetConn(errOnWrite error) *NetConn {\n\treturn &NetConn{errOnWrite: errOnWrite}\n}", "func (c *connPool) NewConn() (net.Conn, error) {\n\tserver, client := net.Pipe()\n\tselect {\n\tcase c.p <- server:\n\t\treturn client, nil\n\tcase <-c.closed:\n\t\treturn nil, errClosed\n\t}\n}", "func DialTo(addr string) (*Conn, error) {\n\n\tconst network = \"tcp\"\n\n\tif \"\" == addr {\n\t\taddr = \"127.0.0.1:telnet\"\n\t}\n\n\tconn, err := net.Dial(network, addr)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\tdataReader := newDataReader(conn)\n\tdataWriter := newDataWriter(conn)\n\n\tclientConn := Conn{\n\t\tconn: conn,\n\t\tdataReader: dataReader,\n\t\tdataWriter: dataWriter,\n\t}\n\n\treturn &clientConn, nil\n}", "func connect(addr string) (net.Conn, error) {\n\tConnecting(addr)\n\tdialConn, err := DialTCP(addr, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tConnected(addr)\n\treturn dialConn, nil\n}", "func WrapConn(conn net.Conn, options *ConnOptions) (*Conn, error) {\n\tif options == nil {\n\t\toptions = new(ConnOptions)\n\t}\n\tnow := time.Now()\n\tsizeR := options.ReadBufferSize\n\tif sizeR < minBufferSize {\n\t\tsizeR = minBufferSize\n\t}\n\tsizeW := options.WriteBufferSize\n\tif sizeW < minBufferSize {\n\t\tsizeW = minBufferSize\n\t}\n\tw := timeoutWriter(conn, options.WriteTimeout)\n\tc := Conn{\n\t\tconn: conn,\n\t\toptions: *options,\n\t\tr: *resp.NewStreamSize(conn, sizeR),\n\t\tw: PipelineWriter{\n\t\t\tKeyPrefix: options.KeyPrefix,\n\t\t\tdest: bufio.NewWriterSize(w, sizeW),\n\t\t},\n\t\tcreatedAt: now,\n\t\tlastUsedAt: now,\n\t\tscripts: make(map[Arg]string),\n\t}\n\n\tif pass := options.Auth; pass != \"\" {\n\t\tif err := c.Auth(pass); err != nil {\n\t\t\tconn.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif db := options.DB; DBIndexValid(db) {\n\t\tif err := c.injectCommand(\"SELECT\", Int(db)); err != nil {\n\t\t\tconn.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif options.WriteOnly {\n\t\ttype closeReader interface {\n\t\t\tCloseRead() error\n\t\t}\n\t\tif cr, ok := conn.(closeReader); ok {\n\t\t\tif err := cr.CloseRead(); err != nil {\n\t\t\t\tconn.Close()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif err := c.WriteCommand(\"CLIENT\", String(\"REPLY\"), String(\"OFF\")); err != nil {\n\t\t\tconn.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &c, nil\n}", "func (c *Conn) NetConn() net.Conn {\n\treturn c.nconn\n}", "func DialToTimeout(addr string, timeout time.Duration) (*Conn, error) {\n\n\tconst network = \"tcp\"\n\n\tif \"\" == addr {\n\t\taddr = \"127.0.0.1:telnet\"\n\t}\n\n\tconn, err := net.DialTimeout(network, addr, timeout)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\tdataReader := newDataReader(conn)\n\tdataWriter := newDataWriter(conn)\n\n\tclientConn := Conn{\n\t\tconn: conn,\n\t\tdataReader: dataReader,\n\t\tdataWriter: dataWriter,\n\t}\n\n\treturn &clientConn, nil\n}", "func NewConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {\n\treturn newConn(conn, isServer, readBufferSize, writeBufferSize)\n}", "func NewConn(server Server, tc TCPConn) Conn {\n\tc := &conn{}\n\tc.inbox = make(chan Request, MAX_CONN_CHAN_BACKLOG)\n\tc.outboxQueue = NewBoundedQueue(int32(server.Config().Limits.Pending))\n\tc.commands = make(chan ClientCmd, MAX_CONN_CHAN_BACKLOG)\n\tc.subscribedMessages = make(chan *SubscribedMessage, MAX_CONN_CHAN_BACKLOG)\n\n\tc.server = server\n\tc.subcriptions = make(map[int]*Subscription)\n\tc.options = &ConnOptions{Pedantic: true, Verbose: true}\n\n\tc.tc = tc\n\tc.reader = bufio.NewReaderSize(tc, BUF_IO_SIZE)\n\tc.writer = bufio.NewWriterSize(tc, BUF_IO_SIZE)\n\n\tc.fatalError = make(chan *NATSError, 1)\n\tc.writerDone = make(chan bool, 1)\n\n\tc.heartbeatHelper = NewHeartbeatHelper(c, server.Config().Ping.IntervalDuration,\n\t\tserver.Config().Ping.MaxOutstanding)\n\tc.authHelper = NewAuthHelper(c, server.Config().Auth.Users,\n\t\tserver.Config().Auth.TimeoutDuration)\n\treturn c\n}", "func NetConn(network, address string) Conn {\n\tnc := &netConn{\n\t\tlock: new(sync.Mutex),\n\t\tnetwork: network,\n\t\taddress: address,\n\t}\n\n\treturn nc\n}", "func newConnection(c net.Conn) *connection {\n\treturn &connection{\n\t\tc: c,\n\t}\n}", "func newConn(ctx context.Context, t *Transport, remoteMa ma.Multiaddr,\n\tremotePID peer.ID, inbound bool) (tpt.CapableConn, error) {\n\t// Creates a manet.Conn\n\tpr, pw := io.Pipe()\n\tconnCtx, cancel := context.WithCancel(gListener.ctx)\n\n\tmaconn := &Conn{\n\t\treadIn: pw,\n\t\treadOut: pr,\n\t\tlocalMa: gListener.localMa,\n\t\tremoteMa: remoteMa,\n\t\tctx: connCtx,\n\t\tcancel: cancel,\n\t}\n\n\t// Unlock gListener locked from discovery.go (HandlePeerFound)\n\tgListener.inUse.Done()\n\n\t// Stores the conn in connMap, will be deleted during conn.Close()\n\tconnMap.Store(maconn.RemoteAddr().String(), maconn)\n\n\t// Returns an upgraded CapableConn (muxed, addr filtered, secured, etc...)\n\tif inbound {\n\t\treturn t.upgrader.UpgradeInbound(ctx, t, maconn)\n\t}\n\treturn t.upgrader.UpgradeOutbound(ctx, t, maconn, remotePID)\n}", "func newOutboundConn(c net.Conn, s *Server, conf *config.FeedConfig) Conn {\n\n\tsname := s.Name()\n\n\tif len(sname) == 0 {\n\t\tsname = \"nntp.anon.tld\"\n\t}\n\tstorage := s.Storage\n\tif storage == nil {\n\t\tstorage = store.NewNullStorage()\n\t}\n\treturn &v1OBConn{\n\t\tconf: conf,\n\t\tC: v1Conn{\n\t\t\thooks: s,\n\t\t\tstate: ConnState{\n\t\t\t\tFeedName: conf.Name,\n\t\t\t\tHostName: conf.Addr,\n\t\t\t\tOpen: true,\n\t\t\t},\n\t\t\tserverName: sname,\n\t\t\tstorage: storage,\n\t\t\tC: textproto.NewConn(c),\n\t\t\tconn: c,\n\t\t\thdrio: message.NewHeaderIO(),\n\t\t},\n\t}\n}", "func (srv *Server) newConn(rwc net.Conn) *conn {\n\treturn &conn{\n\t\tserver: srv,\n\t\trwc: rwc,\n\t}\n\n}", "func Dial(ctx context.Context, network, addr string, timeout time.Duration, connTimeout time.Duration) (*ClientConn, error) {\n\tc := &ClientConn{\n\t\taddr: addr,\n\t\tnetwork: network,\n\t\tTimeout: timeout,\n\t\tDialTimeout: connTimeout,\n\t}\n\tconn, err := net.DialTimeout(c.network, c.addr, c.DialTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.rwc = conn\n\treturn c, err\n}", "func NewConn(conn net.Conn, bufrw *bufio.ReadWriter, request *http.Request) (c Conn, err error) {\n\tvar mask [4]byte\n\tmaskSlice := make([]byte, 4)\n\tn, err := rand.Read(maskSlice)\n\n\tcopy(mask[:], maskSlice[0:4])\n\n\tif err != nil {\n\t\treturn c, err\n\t}\n\n\tif n != 4 {\n\t\treturn c, errors.New(\"Expected 4 random bytes for the mask\")\n\t}\n\n\tresult := Conn{\n\t\trwc: conn,\n\t\trequest: request,\n\t\tisServer: request != nil,\n\t\treceivedClose: false,\n\t\tsentClose: false,\n\t\tmask: mask,\n\t\tbrw: bufrw,\n\t}\n\n\tresult.Handler = NewFrameSpecHandler(&result)\n\n\treturn result, nil\n}", "func (DirectTCPClient) Request(ctx context.Context, addr Address) (\n\tio.ReadWriteCloser, Address, *ProxyError) {\n\tvar reqAddr string\n\tswitch a := addr.(type) {\n\tcase *TCP4Addr:\n\t\treqAddr = a.String()\n\tcase *TCP6Addr:\n\t\treqAddr = a.String()\n\tcase *DomainNameAddr:\n\t\treqAddr = a.String()\n\tdefault:\n\t\treturn nil, nil, wrapAsProxyError(\n\t\t\terrors.Errorf(\"unsupported address for DirectTCPClient: %s\", addr),\n\t\t\tProxyAddrUnsupported)\n\t}\n\n\tconn, err := TCPTransport{}.Dial(ctx, reqAddr)\n\tvar boundAddr Address\n\tif err == nil {\n\t\tboundAddr, err = FromNetAddr(conn.LocalAddr())\n\t}\n\tpErr := wrapAsProxyError(errors.WithStack(err), ProxyConnectFailed)\n\treturn conn, boundAddr, pErr\n}", "func newConnection(address string, timeout time.Duration) (*Connection, Error) {\n\tnewConn := &Connection{dataBuffer: bufPool.Get().([]byte)}\n\truntime.SetFinalizer(newConn, connectionFinalizer)\n\n\t// don't wait indefinitely\n\tif timeout == 0 {\n\t\ttimeout = 5 * time.Second\n\t}\n\n\tconn, err := net.DialTimeout(\"tcp\", address, timeout)\n\tif err != nil {\n\t\tlogger.Logger.Debug(\"Connection to address `%s` failed to establish with error: %s\", address, err.Error())\n\t\treturn nil, errToAerospikeErr(nil, err)\n\t}\n\tnewConn.conn = conn\n\tnewConn.limitReader = &io.LimitedReader{R: conn, N: 0}\n\n\t// set timeout at the last possible moment\n\tif err := newConn.SetTimeout(time.Now().Add(timeout), timeout); err != nil {\n\t\tnewConn.Close()\n\t\treturn nil, err\n\t}\n\n\treturn newConn, nil\n}", "func _createConn(data []byte) net.Conn {\n\tmconn := &mockConn{\n\t\taddr: \"127.0.0.1:12345\",\n\t\tbuf: bytes.NewBuffer(data),\n\t}\n\treturn mconn\n}", "func (s *Server) newConn(rwc net.Conn) *conn {\n\treturn &conn{\n\t\tserver: s,\n\t\trwc: rwc,\n\t\tbrc: bufio.NewReader(rwc),\n\t}\n}", "func NewConn(conn net.Conn) Conn {\n\treturn newConn(conn)\n}", "func (c *Conn) open(ctx context.Context) (conn *Conn, err error) {\n\tc.mu.Lock()\n\tc.refCount++\n\tc.lastOpened = time.Now()\n\tc.mu.Unlock()\n\n\tdefer func() {\n\t\t// If we return an error, the caller is not expected to call Close().\n\t\t// We need to decrement the refCount ourselves.\n\t\tif err != nil {\n\t\t\tc.mu.Lock()\n\t\t\tc.refCount--\n\t\t\tc.mu.Unlock()\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-c.connectDone:\n\t\tif c.connectErr != nil {\n\t\t\treturn nil, c.connectErr\n\t\t}\n\t\treturn c, nil\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}", "func Dial(addr string, options *ConnOptions) (*Conn, error) {\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn WrapConn(conn, options)\n}", "func Dial(addr string) (*Conn, error) {\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newConn(conn)\n}", "func (c *localConn) Connect() (WriteCloser, error) {\n\tif c.address != \"\" && c.network != \"\" {\n\t\treturn c.localWriteCloser(net.DialUnix(c.network, nil, &net.UnixAddr{Name: c.address, Net: c.network}))\n\t}\n\n\treturn c.localWriteCloser(c.osGuessConnnector())\n}", "func Connect(cli Client) *net.TCPConn {\n\tsock, err := net.DialTCP(Net, nil, *cli.TargetIP())\n\tutil.Hand(cli.ILog(), err, \"Error opening connection to target\", \"Opened connection to target\")\n\treturn sock\n}", "func JoinConn(local *net.TCPConn, remote *net.TCPConn) {\n\tdefer local.Close()\n\tdefer remote.Close()\n\t_, err := io.Copy(local, remote)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Println(\"Copy local remote failed.\")\n\t\treturn\n\t}\n}", "func newConn(dial func(string) (net.Conn, error)) func(string) (net.Conn, error) {\n\treturn func(addr string) (net.Conn, error) {\n\t\t// get the proxy url\n\t\tproxyURL, err := getURL(addr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// set to addr\n\t\tcallAddr := addr\n\n\t\t// got proxy\n\t\tif proxyURL != nil {\n\t\t\tcallAddr = proxyURL.Host\n\t\t}\n\n\t\t// dial the addr\n\t\tc, err := dial(callAddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// do proxy connect if we have proxy url\n\t\tif proxyURL != nil {\n\t\t\tc, err = proxyDial(c, addr, proxyURL)\n\t\t}\n\n\t\treturn c, err\n\t}\n}", "func (c *ClientConn) NetConn() net.Conn {\n\treturn c.nconn\n}", "func newConn(conn net.Conn) *basicConn {\n\tbc := &basicConn{Conn: conn}\n\tbc.activeOpsCond = sync.NewCond(&bc.activeOpsMut)\n\n\treturn bc\n}", "func (srv *Server) newConn(rwc net.Conn) *conn {\n\treturn &conn{\n\t\tserver: srv,\n\t\tmuc: newMuxConn(rwc),\n\t}\n}", "func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {\n\tsysConn, ok := rawConn.(syscall.Conn)\n\tif !ok {\n\t\treturn newConn\n\t}\n\treturn &syscallConn{\n\t\tConn: newConn,\n\t\tsysConn: sysConn,\n\t}\n}", "func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {\n\tsysConn, ok := rawConn.(syscall.Conn)\n\tif !ok {\n\t\treturn newConn\n\t}\n\treturn &syscallConn{\n\t\tConn: newConn,\n\t\tsysConn: sysConn,\n\t}\n}", "func (s *Service) newConn(t net.Conn, readRate int) *Conn {\n\tc := &Conn{\n\t\tservice: s,\n\t\tsocket: t,\n\t\thandler: s.handler,\n\t}\n\n\tif readRate == 0 {\n\t\treadRate = defaultReadRate\n\t}\n\n\tc.limit = rate.New(readRate, time.Second)\n\n\t// Increment the connection counter\n\tatomic.AddInt64(&s.connections, 1)\n\treturn c\n}", "func WrapConn(conn net.Conn, hs ws.Handshake, state ws.State) *Conn {\n\tc := new(Conn)\n\tc.establish(conn, hs, state)\n\treturn c\n}", "func (p *TSocket) Conn() net.Conn {\n\treturn p.conn\n}", "func NewReader(conn net.Conn) *Reader {\n\treturn &Reader{conn: conn, timeout: ReadNoTimeout}\n}", "func (m *cMux) newConn(rwc net.Conn) *conn {\n\treturn &conn{\n\t\tserver: m,\n\t\tmuc: newMuxConn(rwc),\n\t}\n}", "func NewMockConn(localAddr, remoteAddr net.Addr, closed bool) *MockConn {\n\treturn &MockConn{\n\t\tlocalAddr: localAddr,\n\t\tremoteAddr: remoteAddr,\n\t\tsendChan: make(chan []byte),\n\t\treceiveChan: make(chan []byte),\n\t\tdone: make(chan struct{}),\n\t\tclosed: closed,\n\t}\n}", "func Tunnel() net.Conn {\n\tconn, err := net.Dial(tcpPort, address)\n\tif err != nil {\n\t\tlog.Println(\"[TUNNEL-ERROR] : Unable to connect to port.\")\n\t}\n\treturn conn\n}", "func (pool *Pool) newConn(conn net.Conn) (c *Conn) {\n\tnow := time.Now()\n\tx := connPool.Get()\n\tif x == nil {\n\t\tc = new(Conn)\n\t\tsize := pool.ReadBufferSize\n\t\tif size < minBufferSize {\n\t\t\tsize = minBufferSize\n\t\t}\n\t\tc.r = bufio.NewReaderSize(c, size)\n\t} else {\n\t\tc = x.(*Conn)\n\t\tc.r.Reset(c)\n\t}\n\tc.options = &ConnOptions{\n\t\tReadBufferSize: pool.ReadBufferSize,\n\t\tReadTimeout: pool.ReadTimeout,\n\t\tWriteTimeout: pool.WriteTimeout,\n\t}\n\tc.conn = conn\n\tc.createdAt = now\n\tc.lastUsedAt = now\n\tc.Select(int64(pool.DB))\n\treturn\n}", "func (p *channelPool) wrapConn(conn net.Conn) *Conn {\n\tc := &Conn{c: p, Conn: conn, t: time.Now()}\n\treturn c\n}", "func Connect(destination mynet.Address) mynet.Connection {\n\tconn, err := net.Dial(\"tcp\", destination.(string))\n\t// FIXME: error handling\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn newConnection(destination, conn)\n}", "func (c *channelPool) wrapConn(conn net.Conn) net.Conn {\n\treturn &PoolConn{c: c, Conn: conn}\n}", "func Open(ctx context.Context, params planetscalev2.VitessLockserverParams) (*Conn, error) {\n\tstartTime := time.Now()\n\tdefer func() {\n\t\topenLatency.Observe(time.Since(startTime).Seconds())\n\t}()\n\n\t// Hold the openMu RLock for as long as we're trying to get a connection,\n\t// to prevent the connection GC from closing connections.\n\t// Other Open attempts can happen concurrently, however.\n\tpool.openMu.RLock()\n\tdefer pool.openMu.RUnlock()\n\n\t// Get or start a connection attempt.\n\tconn := pool.get(params)\n\n\t// Wait for the connection attempt to finish.\n\tctx, cancel := context.WithTimeout(ctx, connectTimeout)\n\tdefer cancel()\n\treturn conn.open(ctx)\n}", "func sendConnectRequest(proxyConn net.Conn, dstAddr, authHeader string) (net.Conn, error) {\n\treq := \"CONNECT \" + dstAddr + \" HTTP/1.1\\r\\nHost: \" + dstAddr + \"\\r\\n\" + authHeader + \"\\r\\n\"\n\tif _, err := proxyConn.Write([]byte(req)); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot send CONNECT request for dstAddr=%q: %w\", dstAddr, err)\n\t}\n\tvar res fasthttp.Response\n\tres.SkipBody = true\n\tconn := &bufferedReaderConn{\n\t\tbr: bufio.NewReader(proxyConn),\n\t\tConn: proxyConn,\n\t}\n\tif err := res.Read(conn.br); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot read CONNECT response for dstAddr=%q: %w\", dstAddr, err)\n\t}\n\tif statusCode := res.Header.StatusCode(); statusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"unexpected status code received: %d; want: 200\", statusCode)\n\t}\n\treturn conn, nil\n}", "func (nd *Node) GetConnection(timeout time.Duration) (conn *Connection, err error) {\n\ttBegin := time.Now()\n\tpollTries := 0\nL:\n\tfor timeout == 0 || time.Now().Sub(tBegin) <= timeout {\n\t\tif t := nd.connections.Poll(); t != nil {\n\t\t\tconn = t.(*Connection)\n\t\t\tif conn.IsConnected() && !conn.isIdle() {\n\t\t\t\tif err := conn.SetTimeout(timeout); err == nil {\n\t\t\t\t\treturn conn, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tnd.InvalidateConnection(conn)\n\t\t}\n\n\t\t// if connection count is limited and enough connections are already created, don't create a new one\n\t\tif nd.cluster.clientPolicy.LimitConnectionsToQueueSize && nd.connectionCount.Get() >= nd.cluster.clientPolicy.ConnectionQueueSize {\n\t\t\t// will avoid an infinite loop\n\t\t\tif timeout != 0 || pollTries < 10 {\n\t\t\t\t// 10 reteies, each waits for 100us for a total of 1 milliseconds\n\t\t\t\ttime.Sleep(time.Microsecond * 100)\n\t\t\t\tpollTries++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak L\n\t\t}\n\n\t\tif conn, err = NewConnection(nd.address, nd.cluster.clientPolicy.Timeout); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// need to authenticate\n\t\tif err = conn.Authenticate(nd.cluster.user, nd.cluster.Password()); err != nil {\n\t\t\t// Socket not authenticated. Do not put back into pool.\n\t\t\tconn.Close()\n\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err = conn.SetTimeout(timeout); err != nil {\n\t\t\t// Socket not authenticated. Do not put back into pool.\n\t\t\tconn.Close()\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconn.setIdleTimeout(nd.cluster.clientPolicy.IdleTimeout)\n\t\tconn.refresh()\n\n\t\tnd.connectionCount.IncrementAndGet()\n\t\treturn conn, nil\n\t}\n\treturn nil, NewAerospikeError(NO_AVAILABLE_CONNECTIONS_TO_NODE)\n}", "func Connect(conn net.Conn) (*Connection, error) {\n\tc := &Connection{}\n\terr := c.Open(conn)\n\treturn c, err\n}", "func NewTSocketConnTimeout(connection net.Conn, nsecTimeout int64) (*TSocket, TTransportException) {\n\taddress := connection.RemoteAddr()\n\tif address == nil {\n\t\taddress = connection.LocalAddr()\n\t}\n\tp := &TSocket{conn: connection, addr: address, nsecTimeout: nsecTimeout, writeBuffer: bytes.NewBuffer(make([]byte, 0, 4096))}\n\treturn p, nil\n}", "func newClient(t *TCP, conn net.Conn) *client {\n\tipAddress := conn.RemoteAddr().String()\n\tt.Event(\"newClient\", \"IPAddress[%s]\", ipAddress)\n\n\t// Ask the user to bind the reader and writer they want to\n\t// use for this connection.\n\tr, w := t.ConnHandler.Bind(conn)\n\n\tc := client{\n\t\tt: t,\n\t\tconn: conn,\n\t\tipAddress: ipAddress,\n\t\treader: r,\n\t\twriter: w,\n\t}\n\n\t// Check to see if this connection is ipv6.\n\tif raddr := conn.RemoteAddr().(*net.TCPAddr); raddr.IP.To4() == nil {\n\t\tc.isIPv6 = true\n\t}\n\n\t// Launch a goroutine for this connection.\n\tc.wg.Add(1)\n\tgo c.read()\n\n\treturn &c\n}", "func Dial(laddr, raddr *net.TCPAddr, ttl uint8, md5Secret string, noRoute bool) (*Conn, error) {\n\tif raddr == nil {\n\t\treturn nil, fmt.Errorf(\"raddr is mandatory\")\n\t}\n\n\tafi := uint16(syscall.AF_INET)\n\tif raddr.IP.To4() == nil {\n\t\tafi = syscall.AF_INET6\n\t}\n\n\tc, err := dialTCP(afi, laddr, raddr, ttl, md5Secret, noRoute)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Dialing failed\")\n\t}\n\n\tc.laddr = laddr\n\tif c.laddr == nil || c.laddr.IP == nil {\n\t\tsa, err := syscall.Getsockname(c.fd)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"getsockname() failed\")\n\t\t}\n\n\t\tsa4 := sa.(*syscall.SockaddrInet4)\n\t\tc.laddr.IP = net.IP(sa4.Addr[:])\n\t\tc.laddr.Port = sa4.Port\n\t}\n\tc.raddr = raddr\n\treturn c, nil\n}", "func NewConnection(conn net.Conn, server bool) (*Connection, error) {\n\tframer, framerErr := spdy.NewFramer(conn, conn)\n\tif framerErr != nil {\n\t\treturn nil, framerErr\n\t}\n\tidleAwareFramer := newIdleAwareFramer(framer)\n\tvar sid spdy.StreamId\n\tvar rid spdy.StreamId\n\tvar pid uint32\n\tif server {\n\t\tsid = 2\n\t\trid = 1\n\t\tpid = 2\n\t} else {\n\t\tsid = 1\n\t\trid = 2\n\t\tpid = 1\n\t}\n\n\tstreamLock := new(sync.RWMutex)\n\tstreamCond := sync.NewCond(streamLock)\n\n\tsession := &Connection{\n\t\tconn: conn,\n\t\tframer: idleAwareFramer,\n\n\t\tcloseChan: make(chan bool),\n\t\tgoAwayTimeout: time.Duration(0),\n\t\tcloseTimeout: time.Duration(0),\n\n\t\tstreamLock: streamLock,\n\t\tstreamCond: streamCond,\n\t\tstreams: make(map[spdy.StreamId]*Stream),\n\t\tnextStreamId: sid,\n\t\treceivedStreamId: rid,\n\n\t\tpingId: pid,\n\t\tpingChans: make(map[uint32]chan error),\n\n\t\tshutdownChan: make(chan error),\n\t}\n\tsession.dataFrameHandler = session.handleDataFrame\n\tidleAwareFramer.conn = session\n\tgo idleAwareFramer.monitor()\n\n\treturn session, nil\n}", "func NewTSocketConn(connection net.Conn) (*TSocket, TTransportException) {\n\treturn NewTSocketConnTimeout(connection, 0)\n}", "func newWaitConn(ctx context.Context, conn net.Conn) *waitConn {\n\tctx, cancel := context.WithCancel(ctx)\n\treturn &waitConn{\n\t\tConn: conn,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}\n}", "func (p *ConnProvider) Get(addr string) (net.Conn, error) {\n\tclosed := atomic.LoadInt32(&p.closed)\n\tif closed == 1 {\n\t\treturn nil, errors.New(\"pool is closed\")\n\t}\n\n\tp.mu.Lock()\n\tif _, ok := p.idleConnMap[addr]; !ok {\n\t\tp.mu.Unlock()\n\t\treturn nil, errors.New(\"no idle conn\")\n\t}\n\tp.mu.Unlock()\n\nRETRY:\n\tselect {\n\tcase conn := <-p.idleConnMap[addr]:\n\t\t// Getting a net.Conn requires verifying that the net.Conn is valid\n\t\t_, err := conn.Read([]byte{})\n\t\tif err != nil || err == io.EOF {\n\t\t\t// conn is close Or timeout\n\t\t\t_ = conn.Close()\n\t\t\tgoto RETRY\n\t\t}\n\t\treturn conn, nil\n\tdefault:\n\t\treturn nil, errors.New(\"no idle conn\")\n\t}\n}", "func NewConn(options ...ConnOption) (*Conn, error) {\n\tc := Conn{\n\t\tclock: clock.New(),\n\t\tdialF: defaultDialF,\n\t\tcloseChan: make(chan chan struct{}),\n\t}\n\tfor _, o := range options {\n\t\tif err := o(&c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.addr == \"\" {\n\t\tc.addr = defaultAddr\n\t}\n\tif c.pingInterval.Nanoseconds() == 0 {\n\t\tc.pingInterval = defaultPingInterval\n\t}\n\tif c.applicationID == \"\" {\n\t\treturn nil, errMissingApplicationID\n\t}\n\tif c.installationID == \"\" {\n\t\treturn nil, errMissingInstallationID\n\t}\n\tif c.pushHandler == nil {\n\t\treturn nil, errMissingPushHandler\n\t}\n\tif c.dialer == nil {\n\t\tc.dialer = &net.Dialer{\n\t\t\tTimeout: defaultDialerTimeout,\n\t\t}\n\t}\n\tif c.retry == nil {\n\t\tc.retry = defaultRetry\n\t}\n\tgo c.do()\n\treturn &c, nil\n}", "func (c *Control) Conn() net.Conn {\n\treturn c.conn\n}", "func establishConnection(addr string) (net.Conn, error) {\n\treturn net.Dial(\"tcp\", addr)\n}", "func ToConn(thing interface{}) (net.Conn, error) {\n\tif conn, ok := thing.(net.Conn); ok {\n\t\treturn conn, nil\n\t}\n\tfile, err := ToFile(thing)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn, err := net.FileConn(file)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\treturn conn, nil\n}", "func (c *boundedPool) wrapConn(conn net.Conn) net.Conn {\n\tp := &pooledConn{c: c}\n\tp.Conn = conn\n\treturn p\n}", "func PipeConn() (io.ReadWriteCloser, io.ReadWriteCloser) {\n\tclient, server := new(pipeConn), new(pipeConn)\n\t{\n\t\tr, w := io.Pipe()\n\t\tclient.WriteCloser, server.Reader = w, r\n\t}\n\t{\n\t\tr, w := io.Pipe()\n\t\tserver.WriteCloser, client.Reader = w, r\n\t}\n\treturn client, server\n}", "func newConn(t *testing.T) net.Conn {\n\tconn, err := net.Dial(\"tcp\", \"127.0.0.1:52525\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect to test server: %v\", err)\n\t}\n\tbanner, readErr := bufio.NewReader(conn).ReadString('\\n')\n\tif readErr != nil {\n\t\tt.Fatalf(\"Failed to read banner from test server: %v\", readErr)\n\t}\n\tif banner[0:3] != \"220\" {\n\t\tt.Fatalf(\"Read incorrect banner from test server: %v\", banner)\n\t}\n\treturn conn\n}", "func (p *TSocket) Open() error {\n\tif p.IsOpen() {\n\t\treturn NewTTransportException(ALREADY_OPEN, \"Socket already connected.\")\n\t}\n\tif p.addr == nil {\n\t\treturn NewTTransportException(NOT_OPEN, \"Cannot open nil address.\")\n\t}\n\tif len(p.addr.Network()) == 0 {\n\t\treturn NewTTransportException(NOT_OPEN, \"Cannot open bad network name.\")\n\t}\n\tif len(p.addr.String()) == 0 {\n\t\treturn NewTTransportException(NOT_OPEN, \"Cannot open bad address.\")\n\t}\n\tvar err error\n\tif p.nsecTimeout > 0 {\n\t\tif p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), time.Duration(p.nsecTimeout)); err != nil {\n\t\t\tLOGGER.Print(\"Could not open socket\", err.Error())\n\t\t\treturn NewTTransportException(NOT_OPEN, err.Error())\n\t\t}\n\t} else {\n\t\tif p.conn, err = net.Dial(p.addr.Network(), p.addr.String()); err != nil {\n\t\t\tLOGGER.Print(\"Could not open socket\", err.Error())\n\t\t\treturn NewTTransportException(NOT_OPEN, err.Error())\n\t\t}\n\t}\n\treturn nil\n}", "func Dial(addr string) (io.ReadWriteCloser, error) {\n\tpub, priv, err := box.GenerateKey(rand.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawConn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Conn{rawConn: rawConn, priv: priv, pub: pub}, nil\n}", "func NewConn(addr string, sess quic.Session, st quic.Stream, conf *WorkflowConfig) *Conn {\n\tlogger.Debug(\"[zipper] inits a new connection.\")\n\tc := &Conn{\n\t\tConn: quic.NewConn(\"\", core.ConnTypeNone),\n\t}\n\n\tc.Addr = addr\n\tc.Session = sess\n\tc.Conn.Signal = core.NewFrameStream(st)\n\tc.handleSignal(conf)\n\tc.Conn.OnClosed = c.Close\n\tc.Conn.OnHeartbeatReceived = func() {\n\t\tlogger.Debug(\"Received Ping from client, will send Pong to client.\", \"name\", c.Conn.Name, \"addr\", c.Addr)\n\t\t// when the zipper received Ping from client, send Pong to client.\n\t\tc.Conn.SendSignal(frame.NewPongFrame())\n\t}\n\n\tc.Conn.OnHeartbeatExpired = func() {\n\t\tlogger.Printf(\"❌ The client %s was offline, addr: %s\", c.Conn.Name, c.Addr)\n\t\tst.Close()\n\t\tc.Conn.Close()\n\t}\n\n\treturn c\n}", "func NewConnection(txCh chan<- *Message, rxCh <-chan *Message, addr Address, options ...ConnectionOption) (Connection, error) {\n\tconn := &connection{\n\t\tMutex: &sync.Mutex{},\n\t\tmsgListeners: &msgListeners{listeners: make(map[<-chan *Message]*msgListener)},\n\n\t\taddr: addr,\n\t\ttimeout: 3 * time.Second,\n\n\t\ttxCh: txCh,\n\t\trxCh: rxCh,\n\t\tmsgCh: make(chan *Message),\n\t\tcloseCh: make(chan chan error),\n\t}\n\n\tfor _, option := range options {\n\t\terr := option(conn)\n\t\tif err != nil {\n\t\t\tLog.Infof(\"error setting connection option: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tgo conn.readLoop()\n\treturn conn, nil\n}", "func create_connection(c net.Conn) * Connection {\n file_name := fmt.Sprintf(\"w_stream %d\", time.Now().UnixNano())\n f, _ := os.Create(file_name)\n return &Connection{\n client_ids.Next(),\n \"\",\n c,\n true,\n f,\n make(map[int]*Room),\n }\n}", "func DialToTLSTimeout(addr string, timeout time.Duration, tlsConfig *tls.Config) (*Conn, error) {\n\n\tconst network = \"tcp\"\n\n\tif \"\" == addr {\n\t\taddr = \"127.0.0.1:telnets\"\n\t}\n\n\td := net.Dialer{Timeout: timeout}\n\tconn, err := tls.DialWithDialer(&d, network, addr, tlsConfig)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\tdataReader := newDataReader(conn)\n\tdataWriter := newDataWriter(conn)\n\n\tclientConn := Conn{\n\t\tconn: conn,\n\t\tdataReader: dataReader,\n\t\tdataWriter: dataWriter,\n\t}\n\n\treturn &clientConn, nil\n}", "func newConn(pc *ipv6.PacketConn, src netip.Addr, ifi *net.Interface) (*Conn, netip.Addr, error) {\n\tc := &Conn{\n\t\tpc: pc,\n\n\t\t// The default control message used when none is specified.\n\t\tcm: &ipv6.ControlMessage{\n\t\t\tHopLimit: HopLimit,\n\t\t\tSrc: src.AsSlice(),\n\t\t\tIfIndex: ifi.Index,\n\t\t},\n\n\t\tifi: ifi,\n\t\taddr: src,\n\t}\n\n\treturn c, src, nil\n}", "func NewSafeConn(conn net.Conn, timeout time.Duration) *SafeConn {\n\tsConn := &SafeConn{\n\t\tConn: conn,\n\t\tIdleTimeout: timeout,\n\t\tMaxReadBuffer: 1024, // TODO: remove hardcode\n\t}\n\tsConn.SetDeadline(time.Now().Add(sConn.IdleTimeout))\n\treturn sConn\n}", "func establishConnection(ptp protocol.PointToPoint, timeout ...time.Duration) (*net.Conn, error) {\n\tvar conn net.Conn\n\tvar err error\n\n\taddr := ptp.GetAddress()\n\n\thost, _, err := net.SplitHostPort(addr)\n\n\tproto := \"tcp4\"\n\n\tips, err := net.LookupIP(host)\n\n\tif err == nil {\n\t\t// Test if we are connecting locally to avoid TCP port issue.\n\t\tlocalhost := false\n\t\tfor _, a := range ips {\n\t\t\tlocalhost = localhost || a.IsLoopback()\n\t\t}\n\n\t\tconnc := ptp.GetConnectionChannel()\n\n\t\tif localhost && connc != nil {\n\t\t\tr, w := net.Pipe()\n\t\t\tconn = r\n\t\t\t(*connc) <- &w\n\t\t} else {\n\t\t\tif len(timeout) == 0 {\n\t\t\t\tconn, err = net.Dial(proto, addr)\n\t\t\t} else {\n\t\t\t\tconn, err = net.DialTimeout(proto, addr, timeout[0])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &conn, err\n}", "func NewConnection(port int, bufferSize int) (*connection, <-chan []byte, chan<- []byte, error) {\n\tconn := new(connection)\n\tconn.in, conn.out = make(chan []byte, 1000), make(chan []byte, 1000)\n\tconn.port = port\n\tconn.peers = make([]*peer, 1)\n\tconn.running = true\n\tconn.group = new(sync.WaitGroup)\n\t\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprint(\":\", port))\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tconn.listener = listener.(*net.TCPListener)\n\tgo conn.listen()\n\tgo conn.sendLoop()\n\treturn conn, conn.in, conn.out, nil\n}", "func NewConnection(transport Transport, nc net.Conn, accepted bool) (Connection, error) {\n\tconn := &connection{\n\t\ttran: transport,\n\t\tConn: nc,\n\t}\n\tif accepted {\n\t\tconn.laddr = fmt.Sprintf(\"%s://%s\", conn.tran.Scheme(), conn.Conn.LocalAddr().String())\n\t\tconn.raddr = fmt.Sprintf(\"%s://%s\", conn.Conn.RemoteAddr().Network(), conn.Conn.RemoteAddr().String())\n\t} else {\n\t\tconn.laddr = fmt.Sprintf(\"%s://%s\", conn.Conn.LocalAddr().Network(), conn.Conn.LocalAddr().String())\n\t\tconn.raddr = fmt.Sprintf(\"%s://%s\", conn.tran.Scheme(), conn.Conn.RemoteAddr().String())\n\t}\n\n\treturn conn, nil\n}", "func fdToConn(fd int) net.Conn {\n\tf := os.NewFile(uintptr(fd), \"\")\n\tc, err := net.FileConn(f)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create net.Conn from file descriptor: \", err)\n\t}\n\treturn c\n}", "func NewConnection(packet *layers.TCP) Connection {\n\treturn Connection{src: packet.SrcPort, dst: packet.DstPort}\n}", "func NewThreadSafeConn(c net.Conn) net.Conn {\n\ttype threadSafeConn struct {\n\t\tnet.Conn\n\t\tsync.Locker\n\t}\n\n\treturn &threadSafeConn{\n\t\tConn: c,\n\t\tLocker: &sync.Mutex{},\n\t}\n}", "func (d *Driver) Open(name string) (driver.Conn, error) {\n\tc, err := Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.BusyTimeout(500)\n\treturn &connImpl{c}, nil\n}", "func (g *NOOPTransport) GetConnection(ctx context.Context,\n\ttarget string) (raftio.IConnection, error) {\n\tatomic.AddUint64(&g.tryConnect, 1)\n\tif g.connReq.Fail() {\n\t\treturn nil, ErrRequestedToFail\n\t}\n\tatomic.AddUint64(&g.connected, 1)\n\treturn &NOOPConnection{req: g.req}, nil\n}", "func Dial(network, address string, timeout time.Duration) (net.Conn, error) {\n\tvar conn net.Conn\n\tvar err error\n\tif timeout > 0 {\n\t\tconn, err = net.DialTimeout(network, address, timeout)\n\t} else {\n\t\tconn, err = net.Dial(network, address)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}", "func Dial(network, address string, timeout time.Duration) (net.Conn, error) {\n\tvar conn net.Conn\n\tvar err error\n\tif timeout > 0 {\n\t\tconn, err = net.DialTimeout(network, address, timeout)\n\t} else {\n\t\tconn, err = net.Dial(network, address)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}", "func Dial(addr string) (io.ReadWriteCloser, error) {\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Printf(\"error connecting at %v:%v\\n\", addr, err)\n\t\treturn nil, nil\n\t}\n\n\tpub, priv, err := GenerateKey()\n\n\tif err != nil {\n\t\tlog.Printf(\"error generating key pair:%v\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tsrvKey := new([32]byte)\n\terr = keyExchange(conn, pub, srvKey)\n\tif err != nil {\n\t\tlog.Printf(\"handshake error:%v\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tsrw := &SecureReadWriter{NewSecureReader(conn, priv, srvKey), NewSecureWriter(conn, priv, srvKey), conn}\n\n\treturn srw, nil\n}" ]
[ "0.69614923", "0.6535082", "0.6439303", "0.63323253", "0.6207398", "0.6090019", "0.6024686", "0.5997635", "0.59855705", "0.5922736", "0.5920382", "0.5903573", "0.5891919", "0.5875956", "0.5862222", "0.5819222", "0.5783388", "0.5781409", "0.57801217", "0.57777804", "0.57632226", "0.57340366", "0.57334995", "0.572141", "0.5717823", "0.5712838", "0.57045275", "0.56643033", "0.56406695", "0.56305945", "0.5616885", "0.5597917", "0.558965", "0.55756587", "0.55717784", "0.55643636", "0.55476797", "0.55388755", "0.55367357", "0.55347407", "0.55330956", "0.5525665", "0.55162907", "0.55110306", "0.54953164", "0.5494197", "0.54860467", "0.5481358", "0.5474232", "0.54424155", "0.5435706", "0.5435706", "0.5433947", "0.542566", "0.54225254", "0.54179305", "0.5413412", "0.5411377", "0.54101026", "0.5405412", "0.5402473", "0.5397495", "0.5390922", "0.53906584", "0.53900886", "0.5386511", "0.53798777", "0.5378347", "0.53780407", "0.5370979", "0.537068", "0.5363705", "0.5362577", "0.5354037", "0.53489983", "0.5345658", "0.53388995", "0.53364646", "0.5329461", "0.53196454", "0.5318286", "0.5315872", "0.5315234", "0.5304485", "0.53001195", "0.5299326", "0.52938503", "0.52917665", "0.52832174", "0.5279362", "0.52780426", "0.52722657", "0.52706724", "0.52667236", "0.52655405", "0.52600735", "0.5255091", "0.5236856", "0.5236856", "0.52337" ]
0.6163392
5
NewEndpoint creates a new endpoint. To keep things simple, the endpoint listens on a fixed port number.
func NewEndpoint() *Endpoint { // Create a new Endpoint with an empty list of handler funcs. return &Endpoint{ handler: map[string]HandleFunc{}, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber,\n\twaiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n\treturn newEndpoint(stack, netProto, waiterQueue), nil\n}", "func NewEndpoint(service health.Service) *Endpoint {\n\treturn &Endpoint{\n\t\tservice: service,\n\t}\n}", "func NewEndpoint(resource, httpMethod, route string) *Endpoint {\n\treturn &Endpoint{\n\t\tResource: resource,\n\t\tHTTPMethod: httpMethod,\n\t\tRoute: route,\n\t\tBodyParameters: []*Parameter{},\n\t\tRequests: []*Request{},\n\t}\n}", "func newEndpoint() *testSocket {\n\tp := fmt.Sprintf(\"@%s.sock\", uuid.NewUUID())\n\n\treturn &testSocket{\n\t\tpath: p,\n\t\tendpoint: fmt.Sprintf(\"unix:///%s\", p),\n\t}\n}", "func NewEndpoint(config *config.Configs, result *config.ReturnResult) Endpoint {\n\treturn &endpoint{\n\t\tconfig: config,\n\t\tresult: result,\n\t\tservice: NewService(config, result),\n\t}\n}", "func NewEndpoint(ctx *pulumi.Context,\n\tname string, args *EndpointArgs, opts ...pulumi.ResourceOption) (*Endpoint, error) {\n\tif args == nil || args.EndpointId == nil {\n\t\treturn nil, errors.New(\"missing required argument 'EndpointId'\")\n\t}\n\tif args == nil || args.Service == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Service'\")\n\t}\n\tif args == nil {\n\t\targs = &EndpointArgs{}\n\t}\n\tvar resource Endpoint\n\terr := ctx.RegisterResource(\"gcp:servicedirectory/endpoint:Endpoint\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewEndpoint(conn *websocket.Conn, registry *Registry) *Endpoint {\n\tif registry == nil {\n\t\tregistry = dummyRegistry\n\t}\n\te := &Endpoint{}\n\te.conn = conn\n\te.server.registry = registry\n\te.client.pending = make(map[uint64]*rpc.Call)\n\treturn e\n}", "func NewEndpoint(network, address string, options ...Option) Endpoint {\n\treturn &endpoint{\n\t\tnetwork: network,\n\t\taddress: address,\n\t\toptions: options,\n\t}\n}", "func NewEndpoint(ws *websocket.Conn) *Endpoint {\n\tep := &Endpoint{WebSocket: ws, MessageChannel: make(chan EndpointMessage)}\n\tep.State = \"INITIAL\"\n\treturn ep\n}", "func NewEndpoint(dnsName, recordType string, targets ...string) *Endpoint {\n\treturn NewEndpointWithTTL(dnsName, recordType, TTL(0), targets...)\n}", "func New(bc component.Core) *Endpoint {\n\treturn &Endpoint{\n\t\tCore: bc,\n\t}\n}", "func newEndpoints() *Endpoints {\n\treturn &Endpoints{\n\t\tBackends: map[string]service.PortConfiguration{},\n\t}\n}", "func newServerEndpoint(impl *implementation, role role, args []string) (*endpoint, error) {\n\tusock, err := net.ListenUDP(\"udp\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, port, err := net.SplitHostPort(usock.LocalAddr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\targs = append(args, []string{\"-addr\", fmt.Sprintf(\"localhost:%s\", port)}...)\n\n\tcmd := exec.Command(impl.Path, append(impl.Args, args...)...)\n\n\tep := &endpoint{\n\t\t\"server\",\n\t\trole,\n\t\tusock,\n\t\tnil,\n\t\tcmd,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t}\n\n\terr = ep.getOutputs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsport, err := ep.out.ReadString('\\n')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdebug(\"Read server port=%v\", sport)\n\tsport = strings.TrimSpace(sport)\n\tep.addr, err = net.ResolveUDPAddr(\"udp\", fmt.Sprintf(\"localhost:%s\", sport))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ep, nil\n}", "func NewEndpoint(uuid, key string) *Endpoint {\n\treturn &Endpoint{Uuid: uuid, Key: key}\n}", "func NewEndpoint(name, url string, config EndpointConfig) *Endpoint {\n\tvar endpoint Endpoint\n\tendpoint.name = name\n\tendpoint.url = url\n\tendpoint.EndpointConfig = config\n\tendpoint.defaults()\n\tendpoint.metrics = newSafeMetrics(name)\n\n\t// Configures the inmemory queue, retry, http pipeline.\n\tendpoint.Sink = newHTTPSink(\n\t\tendpoint.url, endpoint.Timeout, endpoint.Headers,\n\t\tendpoint.Transport, endpoint.metrics.httpStatusListener())\n\tendpoint.Sink = events.NewRetryingSink(endpoint.Sink, events.NewBreaker(endpoint.Threshold, endpoint.Backoff))\n\tendpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener())\n\tmediaTypes := append(config.Ignore.MediaTypes, config.IgnoredMediaTypes...)\n\tendpoint.Sink = newIgnoredSink(endpoint.Sink, mediaTypes, config.Ignore.Actions)\n\n\tregister(&endpoint)\n\treturn &endpoint\n}", "func newRESTEndpointService(hostPortStr string) endpointService {\n\treturn endpointService(\n\t\tnewRESTDiscoveryService(fmt.Sprintf(edsRestEndpointTemplate, hostPortStr)),\n\t)\n}", "func NewVirtualEndpoint()(*VirtualEndpoint) {\n m := &VirtualEndpoint{\n Entity: *NewEntity(),\n }\n return m\n}", "func New(lower tcpip.LinkEndpointID) tcpip.LinkEndpointID {\n\treturn stack.RegisterLinkEndpoint(&endpoint{\n\t\tlower: stack.FindLinkEndpoint(lower),\n\t})\n}", "func NewAddEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tp := req.(*AddPayload)\n\t\treturn s.Add(ctx, p)\n\t}\n}", "func NewLocalEndpoint() (*Endpoint, error) {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ip []string\n\tfor _, addr := range addrs {\n\t\tipnet, ok := addr.(*net.IPNet)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif ipnet.IP.IsLoopback() {\n\t\t\tcontinue\n\t\t}\n\t\tif ipnet.IP.To4() != nil {\n\t\t\tip = append(ip, ipnet.IP.String())\n\t\t}\n\t}\n\n\treturn &Endpoint{\n\t\tIP: ip,\n\t\tPort: make(map[string]int),\n\t}, nil\n}", "func NewAddEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req any) (any, error) {\n\t\tep := req.(*AddEndpointInput)\n\t\treturn nil, s.Add(ctx, ep.Payload, ep.Stream)\n\t}\n}", "func newClientEndpoint(impl *implementation, role role, args []string) (*endpoint, error) {\n\tusock, err := net.ListenUDP(\"udp\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, port, err := net.SplitHostPort(usock.LocalAddr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\targs = append(args, []string{\"-addr\", fmt.Sprintf(\"localhost:%s\", port)}...)\n\n\tcmd := exec.Command(impl.Path, append(impl.Args, args...)...)\n\tep := &endpoint{\n\t\t\"client\",\n\t\trole,\n\t\tusock,\n\t\tnil,\n\t\tcmd,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t}\n\n\terr = ep.getOutputs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ep, nil\n}", "func (t *Application_Application_Application) NewEndpoint(Name string) (*Application_Application_Application_Endpoint, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Endpoint == nil {\n\t\tt.Endpoint = make(map[string]*Application_Application_Application_Endpoint)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Endpoint[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Endpoint\", key)\n\t}\n\n\tt.Endpoint[key] = &Application_Application_Application_Endpoint{\n\t\tName: &Name,\n\t}\n\n\treturn t.Endpoint[key], nil\n}", "func NewEndpoints() Endpoints {\n\treturn Endpoints{\n\t\tendpoints: make([]*Endpoint, 0),\n\t\tmapUUID: make(map[string]int),\n\t}\n}", "func NewEndPoint(uid uint32, host string) *pb.EndPoint {\n\treturn &pb.EndPoint{\n\t\tUid: uid,\n\t\tHost: host,\n\t\tPortMap: make(map[string]int32),\n\t}\n}", "func makeEndpoint(hostport, serviceName string) *zipkincore.Endpoint {\n\thost, port, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tportInt, err := strconv.ParseInt(port, 10, 16)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\taddrs, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tvar addr4, addr16 net.IP\n\tfor i := range addrs {\n\t\tif addr := addrs[i].To4(); addr == nil {\n\t\t\tif addr16 == nil {\n\t\t\t\taddr16 = addrs[i].To16() // IPv6 - 16 bytes\n\t\t\t}\n\t\t} else {\n\t\t\tif addr4 == nil {\n\t\t\t\taddr4 = addr // IPv4 - 4 bytes\n\t\t\t}\n\t\t}\n\t\tif addr16 != nil && addr4 != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif addr4 == nil {\n\t\tif addr16 == nil {\n\t\t\treturn nil\n\t\t}\n\t\t// we have an IPv6 but no IPv4, code IPv4 as 0 (none found)\n\t\taddr4 = []byte(\"\\x00\\x00\\x00\\x00\")\n\t}\n\n\tendpoint := zipkincore.NewEndpoint()\n\tendpoint.Ipv4 = (int32)(binary.BigEndian.Uint32(addr4))\n\tendpoint.Ipv6 = []byte(addr16)\n\tendpoint.Port = int16(portInt)\n\tendpoint.ServiceName = serviceName\n\n\treturn endpoint\n}", "func New(e *calc.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t}\n}", "func New(nftOutPath string) Endpoint {\n\treturn config{\n\t\tnftOutPath: nftOutPath,\n\t}\n}", "func NewEndpointFactory(cluster string, nvbs int) c.RouterEndpointFactory {\n\n\treturn func(topic, endpointType, addr string, config c.Config) (c.RouterEndpoint, error) {\n\t\tswitch endpointType {\n\t\tcase \"dataport\":\n\t\t\treturn dataport.NewRouterEndpoint(cluster, topic, addr, nvbs, config)\n\t\tdefault:\n\t\t\tlog.Fatal(\"Unknown endpoint type\")\n\t\t}\n\t\treturn nil, nil\n\t}\n}", "func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.Endpoint {\n\n\tportNumber, err := baseInfo.Port()\n\n\tif err != nil {\n\t\tportNumber = 0\n\t}\n\n\tinfo := &endpointsInfo{\n\t\tip: baseInfo.IP(),\n\t\tport: uint16(portNumber),\n\t\tisLocal: baseInfo.GetIsLocal(),\n\t\tmacAddress: conjureMac(\"02-11\", netutils.ParseIPSloppy(baseInfo.IP())),\n\t\trefCount: new(uint16),\n\t\thnsID: \"\",\n\t\thns: proxier.hns,\n\n\t\tready: baseInfo.Ready,\n\t\tserving: baseInfo.Serving,\n\t\tterminating: baseInfo.Terminating,\n\t}\n\n\treturn info\n}", "func New(endpoint api.Endpoint, authenticator Authenticator) api.Endpoint {\n\tif authenticator == nil {\n\t\tauthenticator = &noOpAuthenticator{}\n\t}\n\n\treturn &proxyEndpoint{\n\t\tendpoint: endpoint,\n\t\tauthenticator: authenticator,\n\t\tlogger: logging.GetLogger(\"ias/proxy\"),\n\t}\n}", "func newAgentEndpoint(id string, conn *websocket.Conn, user string) *agentEndpoint {\n\treturn &agentEndpoint{id, conn, &sync.RWMutex{}, user, false}\n}", "func (c *Client) New() goa.Endpoint {\n\tvar (\n\t\tdecodeResponse = DecodeNewResponse(c.decoder, c.RestoreResponseBody)\n\t)\n\treturn func(ctx context.Context, v interface{}) (interface{}, error) {\n\t\treq, err := c.BuildNewRequest(ctx, v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresp, err := c.NewDoer.Do(req)\n\t\tif err != nil {\n\t\t\treturn nil, goahttp.ErrRequestError(\"spin-broker\", \"new\", err)\n\t\t}\n\t\treturn decodeResponse(resp)\n\t}\n}", "func CreateEndpoint(ctx iris.Context) {\n\t// Add logic to check if given ports exits\n\turi := ctx.Request().RequestURI\n\tfabricID := ctx.Params().Get(\"id\")\n\tfabricData, ok := capdata.FabricDataStore.Data[fabricID]\n\tif !ok {\n\t\terrMsg := fmt.Sprintf(\"Fabric data for uri %s not found\", uri)\n\t\tlog.Error(errMsg)\n\t\tresp := updateErrorResponse(response.ResourceNotFound, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\tctx.StatusCode(http.StatusNotFound)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\n\tvar endpoint model.Endpoint\n\terr := ctx.ReadJSON(&endpoint)\n\tif err != nil {\n\t\terrorMessage := \"error while trying to get JSON body from the request: \" + err.Error()\n\t\tlog.Error(errorMessage)\n\t\tresp := updateErrorResponse(response.MalformedJSON, errorMessage, nil)\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tif len(endpoint.Redundancy) < 1 {\n\t\terrMsg := fmt.Sprintf(\"Endpoint cannot be created, Redudancy in the request is missing: \" + err.Error())\n\t\tresp := updateErrorResponse(response.PropertyMissing, errMsg, []interface{}{\"Redundancy\"})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tif len(endpoint.Redundancy[0].RedundancySet) == 0 {\n\t\terrMsg := fmt.Sprintf(\"Endpoint cannot be created, RedudancySet in the request is missing: \" + err.Error())\n\t\tresp := updateErrorResponse(response.PropertyMissing, errMsg, []interface{}{\"RedudancySet\"})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\t// get all existing endpoints under fabric check for the name\n\tfor _, endpointData := range capdata.EndpointDataStore {\n\t\tif endpoint.Name == endpointData.Endpoint.Name {\n\t\t\terrMsg := \"Endpoint name is already assigned to other endpoint:\" + endpointData.Endpoint.Name\n\t\t\tresp := updateErrorResponse(response.ResourceAlreadyExists, errMsg, []interface{}{\"Endpoint\", endpointData.Endpoint.Name, endpoint.Name})\n\t\t\tctx.StatusCode(http.StatusConflict)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t}\n\tvar switchURI = \"\"\n\tvar portPattern = \"\"\n\tportList := make(map[string]bool)\n\t// check if given ports are present in plugin database\n\tfor i := 0; i < len(endpoint.Redundancy[0].RedundancySet); i++ {\n\t\tportURI := endpoint.Redundancy[0].RedundancySet[i].Oid\n\t\tif _, ok := portList[endpoint.Redundancy[0].RedundancySet[i].Oid]; ok {\n\t\t\terrMsg := \"Duplicate port passed in the request\"\n\t\t\tresp := updateErrorResponse(response.PropertyValueConflict, errMsg, []interface{}{endpoint.Redundancy[0].RedundancySet[i].Oid, endpoint.Redundancy[0].RedundancySet[i].Oid})\n\t\t\tctx.StatusCode(http.StatusBadRequest)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\n\t\t}\n\t\tportList[endpoint.Redundancy[0].RedundancySet[i].Oid] = true\n\n\t\t_, statusCode, resp := getPortData(portURI)\n\t\tif statusCode != http.StatusOK {\n\t\t\tctx.StatusCode(statusCode)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t\tstatusCode, resp = checkEndpointPortMapping(endpoint.Redundancy[0].RedundancySet[i].Oid)\n\t\tif statusCode != http.StatusOK {\n\t\t\tctx.StatusCode(statusCode)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t\tportURIData := strings.Split(portURI, \"/\")\n\t\tswitchID := portURIData[6]\n\t\tswitchIDData := strings.Split(switchID, \":\")\n\t\tswitchURI = switchURI + \"-\" + switchIDData[1]\n\t\tportIDData := strings.Split(portURIData[8], \":\")\n\t\ttmpPortPattern := strings.Replace(portIDData[1], \"eth\", \"\", -1)\n\t\ttmpPortPattern = strings.Replace(tmpPortPattern, \"-\", \"-ports-\", -1)\n\t\tportPattern = tmpPortPattern\n\t}\n\n\tportPolicyGroupList, err := caputilities.GetPortPolicyGroup(fabricData.PodID, switchURI)\n\tif err != nil || len(portPolicyGroupList) == 0 {\n\t\terrMsg := \"Port policy group not found for given ports\"\n\t\tlog.Error(errMsg)\n\t\tresp := updateErrorResponse(response.ResourceNotFound, errMsg, []interface{}{\"protpaths\" + switchURI, \"PolicyGroup\"})\n\t\tctx.StatusCode(http.StatusNotFound)\n\t\tctx.JSON(resp)\n\t\treturn\n\n\t}\n\tpolicyGroupDN := \"\"\n\tfor i := 0; i < len(portPolicyGroupList); i++ {\n\t\tif strings.Contains(portPolicyGroupList[i].BaseAttributes.DistinguishedName, portPattern) {\n\t\t\tpolicyGroupDN = portPolicyGroupList[i].BaseAttributes.DistinguishedName\n\t\t}\n\t}\n\tif policyGroupDN == \"\" {\n\t\terrMsg := \"Port policy group not found for given ports\"\n\t\tlog.Error(errMsg)\n\t\tresp := updateErrorResponse(response.ResourceNotFound, errMsg, []interface{}{portPattern, \"PolicyGroup\"})\n\t\tctx.StatusCode(http.StatusNotFound)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tlog.Info(\"Dn of Policy group:\" + policyGroupDN)\n\tsaveEndpointData(uri, fabricID, policyGroupDN, &endpoint)\n\tcommon.SetResponseHeader(ctx, map[string]string{\n\t\t\"Location\": endpoint.ODataID,\n\t})\n\tctx.StatusCode(http.StatusCreated)\n\tctx.JSON(endpoint)\n}", "func NewEndpoint(githubReporter Reporter, intercomReporter Reporter, storage Uploader, rateLimiter *infra.RateLimiter) *Endpoint {\n\treturn &Endpoint{githubReporter: githubReporter, storage: storage, rateLimiter: rateLimiter, intercomReporter: intercomReporter}\n}", "func New(e *goastarter.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t}\n}", "func NewEndpointCreated() filters.Spec {\n\tvar ec endpointCreated\n\treturn ec\n}", "func NewListenEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tep := req.(*ListenEndpointInput)\n\t\treturn nil, s.Listen(ctx, ep.Stream)\n\t}\n}", "func MakeAddNodeEndpoint(s registry.Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(AddNodeRequest)\n\t\terr := s.AddNode(ctx, req.Token, req.Node)\n\t\treturn AddNodeResponse{Err: err}, nil\n\t}\n}", "func NewEndpointNode(options ...Option) *EndpointNode {\n\tn := &EndpointNode{\n\t\tBasicNode: &BasicNode{},\n\t}\n\tapply(n, options...)\n\treturn n\n}", "func New(config Config) (*Endpoint, error) {\n\tvar err error\n\n\tvar searcherEndpoint *searcher.Endpoint\n\t{\n\t\tsearcherConfig := searcher.Config{\n\t\t\tLogger: config.Logger,\n\t\t\tMiddleware: config.Middleware,\n\t\t\tService: config.Service,\n\t\t}\n\t\tsearcherEndpoint, err = searcher.New(searcherConfig)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tnewEndpoint := &Endpoint{\n\t\tSearcher: searcherEndpoint,\n\t}\n\n\treturn newEndpoint, nil\n}", "func NewEndpointResource(e Endpointer) EndpointResource {\n\treturn EndpointResource{\n\t\tResource: \"endpoint\",\n\t\tName: e.GetName(),\n\t\tPath: e.GetPath(),\n\t\tMethodsList: GetMethodsList(e),\n\t\tMethods: GetMethods(e),\n\t\tMediaTypesList: GetContentTypesList(hAPI, e),\n\t\tMediaTypes: GetContentTypes(hAPI, e),\n\t\tDesc: e.GetDesc(),\n\t\tParams: createEndpointResourceParams(e),\n\t}\n}", "func NewHelloEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn s.Hello(ctx)\n\t}\n}", "func NewEndpoints() *Endpoints {\n\treturn &Endpoints{}\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tHello: NewHelloEndpoint(s),\n\t}\n}", "func (d *Driver) CreateEndpoint(r *sdk.CreateEndpointRequest) (*sdk.CreateEndpointResponse, error) {\n\tendID := r.EndpointID\n\tnetID := r.NetworkID\n\teInfo := r.Interface\n\tlog.Debugf(\"CreateEndpoint called :%v\", r)\n\t// Get the network handler and make sure it exists\n\td.Lock()\n\tnetwork, ok := d.networks[r.NetworkID]\n\td.Unlock()\n\n\tif !ok {\n\t\treturn nil, types.NotFoundErrorf(\"network %s does not exist\", netID)\n\t}\n\n\t// Try to convert the options to endpoint configuration\n\tepConfig, err := parseEndpointOptions(r.Options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create and add the endpoint\n\tnetwork.Lock()\n\tendpoint := &bridgeEndpoint{id: endID, nid: netID, config: epConfig}\n\tnetwork.endpoints[endID] = endpoint\n\tnetwork.Unlock()\n\n\t// On failure make sure to remove the endpoint\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tnetwork.Lock()\n\t\t\tdelete(network.endpoints, endID)\n\t\t\tnetwork.Unlock()\n\t\t}\n\t}()\n\n\t// Generate a name for what will be the host side pipe interface\n\thostIfName, err := netutils.GenerateIfaceName(d.nlh, vethPrefix, vethLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Generate a name for what will be the sandbox side pipe interface\n\tcontainerIfName := network.config.ContainerIfName\n\n\t// Generate and add the interface pipe host <-> sandbox\n\tveth := &netlink.Veth{\n\t\tLinkAttrs: netlink.LinkAttrs{Name: hostIfName, TxQLen: 0},\n\t\tPeerName: containerIfName}\n\tif err = d.nlh.LinkAdd(veth); err != nil {\n\t\treturn nil, types.InternalErrorf(\"failed to add the host (%s) <=> sandbox (%s) pair interfaces: %v\", hostIfName, containerIfName, err)\n\t}\n\n\t// Get the host side pipe interface handler\n\thost, err := d.nlh.LinkByName(hostIfName)\n\tif err != nil {\n\t\treturn nil, types.InternalErrorf(\"failed to find host side interface %s: %v\", hostIfName, err)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\td.nlh.LinkDel(host)\n\t\t}\n\t}()\n\n\t// Get the sandbox side pipe interface handler\n\tsbox, err := d.nlh.LinkByName(containerIfName)\n\tif err != nil {\n\t\treturn nil, types.InternalErrorf(\"failed to find sandbox side interface %s: %v\", containerIfName, err)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\td.nlh.LinkDel(sbox)\n\t\t}\n\t}()\n\n\tnetwork.Lock()\n\tconfig := network.config\n\tnetwork.Unlock()\n\n\t// Add bridge inherited attributes to pipe interfaces\n\tif config.Mtu != 0 {\n\t\terr = d.nlh.LinkSetMTU(host, config.Mtu)\n\t\tif err != nil {\n\t\t\treturn nil, types.InternalErrorf(\"failed to set MTU on host interface %s: %v\", hostIfName, err)\n\t\t}\n\t\terr = d.nlh.LinkSetMTU(sbox, config.Mtu)\n\t\tif err != nil {\n\t\t\treturn nil, types.InternalErrorf(\"failed to set MTU on sandbox interface %s: %v\", containerIfName, err)\n\t\t}\n\t}\n\n\t// Attach host side pipe interface into the bridge\n\tif err = addToBridge(d.nlh, hostIfName, config.BridgeName); err != nil {\n\t\treturn nil, fmt.Errorf(\"adding interface %s to bridge %s failed: %v\", hostIfName, config.BridgeName, err)\n\t}\n\n\t// Store the sandbox side pipe interface parameters\n\tendpoint.srcName = containerIfName\n\tendpoint.macAddress = eInfo.MacAddress\n\tendpoint.addr = eInfo.Address\n\tendpoint.addrv6 = eInfo.AddressIPv6\n\n\t// Up the host interface after finishing all netlink configuration\n\tif err = d.nlh.LinkSetUp(host); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not set link up for host interface %s: %v\", hostIfName, err)\n\t}\n\n\tres := &sdk.CreateEndpointResponse{\n\t\tInterface: &sdk.EndpointInterface{\n\t\t\tAddress: endpoint.addr,\n\t\t\tMacAddress: endpoint.macAddress,\n\t\t},\n\t}\n\n\tlog.Debugf(\"Create endpoint response: %+v\", res)\n\tlog.Debugf(\"Create endpoint %s %+v\", endID, res)\n\treturn res, nil\n}", "func New(port int, version string, requestsCh chan<- []byte) (*Service, error) {\n\tif !(1 <= port && port <= 65535) {\n\t\treturn nil, errors.New(\"ws.new: wrong port\")\n\t}\n\n\tvc, err := versionToConstr(version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tws := &Service{\n\t\tverConstr: vc,\n\t\tconns: make(map[string]conn, 10),\n\t\tdone: make(chan struct{}),\n\t\trequestsCh: requestsCh,\n\t}\n\n\tws.upgrader.CheckOrigin = func(r *http.Request) bool {\n\t\treturn true // allow all origins\n\t}\n\n\tws.upgrader.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {\n\t\terr := writeError(w, reason, status)\n\t\tif err != nil {\n\t\t\tlogger := r.Context().Value(loggerKey).(*log.Entry)\n\t\t\tlogger.WithError(err).Error(\"write error (ws handler)\")\n\t\t}\n\t}\n\n\tsrv := &http.Server{\n\t\tAddr: \"localhost:\" + strconv.Itoa(port),\n\t\tHandler: requestsWrapper(ws.handler),\n\t\tReadTimeout: httpTimeout,\n\t\tWriteTimeout: httpTimeout,\n\t\tMaxHeaderBytes: httpMaxHeaderBytes,\n\t}\n\n\tws.srv = srv\n\n\treturn ws, nil\n}", "func NewServiceEndpoint(\n\tname string,\n\tprotocolName ProtoID,\n\te endpoint.Endpoint,\n\tdec DecodeRequestFunc,\n\tenc EncodeResponseFunc,\n) (*ServiceEndpoint, error) {\n\tif name == \"\" {\n\t\treturn nil, ErrNoName\n\t}\n\n\tif len(protocolName) < 3 || (protocolName[0] == 0 && protocolName[1] == 0 && protocolName[2] == 0) {\n\t\treturn nil, ErrInvalidProtoID\n\t}\n\n\tif e == nil {\n\t\treturn nil, ErrNoEndpoint\n\t}\n\n\tif dec == nil {\n\t\tdec = StdDencode\n\t}\n\n\tif enc == nil {\n\t\tenc = StdDencode\n\t}\n\n\treturn &ServiceEndpoint{\n\t\tName: name,\n\t\tProtocolName: protocolName,\n\t\tE: e,\n\t\tDec: dec,\n\t\tEnc: enc,\n\t}, nil\n}", "func New(endpoint string) *Client {\n\treturn &Client{\n\t\tendpoint: endpoint,\n\t}\n}", "func newGRPCEndpointService(hostPortStr string) (clusterService, error) {\n\tconn, err := mkConnection(hostPortStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\teds := envoyapi.NewEndpointDiscoveryServiceClient(conn)\n\n\treturn endpointService(\n\t\tfnDiscoveryService{\n\t\t\tfetchFn: func(req *envoyapi.DiscoveryRequest) (*envoyapi.DiscoveryResponse, error) {\n\t\t\t\treturn eds.FetchEndpoints(context.Background(), req)\n\t\t\t},\n\t\t\tcloseFn: conn.Close,\n\t\t},\n\t), nil\n}", "func NewEndpoints(endpoint string) (*Endpoints, error) {\n\tif endpoint == \"\" {\n\t\treturn nil, fmt.Errorf(\"endpoint is required\")\n\t}\n\tep, err := url.Parse(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troles, err := urlJoin(ep, \"roles\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tusers, err := urlJoin(ep, \"users\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinputs, err := urlJoin(ep, \"system/inputs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexSets, err := urlJoin(ep, \"system/indices/index_sets\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexSetStats, err := urlJoin(indexSets, \"stats\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstreams, err := urlJoin(ep, \"streams\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tenabledStreams, err := urlJoin(streams, \"enabled\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\talertConditions, err := urlJoin(ep, \"alerts/conditions\")\n\treturn &Endpoints{\n\t\troles: roles,\n\t\tusers: users,\n\t\tinputs: inputs,\n\t\tindexSets: indexSets,\n\t\tindexSetStats: indexSetStats,\n\t\tstreams: streams,\n\t\tenabledStreams: enabledStreams,\n\t\talertConditions: alertConditions,\n\t}, nil\n}", "func New(addr string, port int) *Server {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &Server{\n\t\taddr: addr,\n\t\tport: port,\n\t\tctx: ctx,\n\t\tctxCancel: cancel,\n\t}\n}", "func NewDevEndpoint(ctx *pulumi.Context,\n\tname string, args *DevEndpointArgs, opts ...pulumi.ResourceOption) (*DevEndpoint, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RoleArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RoleArn'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource DevEndpoint\n\terr := ctx.RegisterResource(\"aws-native:glue:DevEndpoint\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *DefaultApiService) CreateEndpoint(ctx _context.Context) ApiCreateEndpointRequest {\n\treturn ApiCreateEndpointRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func New(endpoint string) *EcomClient {\n\ttr := &http.Transport{\n\t\tMaxIdleConnsPerHost: 10,\n\t}\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: timeout,\n\t}\n\n\turl, err := url.Parse(endpoint)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%+v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn &EcomClient{\n\t\tendpoint: endpoint,\n\t\tscheme: url.Scheme,\n\t\thostname: url.Host,\n\t\tport: url.Port(),\n\t\tclient: client,\n\t}\n}", "func New(network, endpoint string) (h *Handler, err error) {\n\tconn, err := net.Dial(network, endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Handler{\n\t\tconn: conn,\n\t\tenc: json.NewEncoder(conn),\n\t}, nil\n}", "func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.segment == nil {\n\t\treturn nil, &tcpip.ErrInvalidEndpointState{}\n\t}\n\n\tf := r.forwarder\n\tep, err := f.listen.performHandshake(r.segment, header.TCPSynOptions{\n\t\tMSS: r.synOptions.MSS,\n\t\tWS: r.synOptions.WS,\n\t\tTS: r.synOptions.TS,\n\t\tTSVal: r.synOptions.TSVal,\n\t\tTSEcr: r.synOptions.TSEcr,\n\t\tSACKPermitted: r.synOptions.SACKPermitted,\n\t}, queue, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ep, nil\n}", "func NewEndpointWithTTL(dnsName, recordType string, ttl TTL, targets ...string) *Endpoint {\n\tcleanTargets := make([]string, len(targets))\n\tfor idx, target := range targets {\n\t\tcleanTargets[idx] = strings.TrimSuffix(target, \".\")\n\t}\n\n\treturn &Endpoint{\n\t\tDNSName: strings.TrimSuffix(dnsName, \".\"),\n\t\tTargets: cleanTargets,\n\t\tRecordType: recordType,\n\t\tLabels: NewLabels(),\n\t\tRecordTTL: ttl,\n\t}\n}", "func NewEndpointRegistry(\n\tid wire.Account,\n\tonNewEndpoint func(wire.Address) wire.Consumer,\n\tdialer Dialer,\n\tser wire.EnvelopeSerializer,\n) *EndpointRegistry {\n\treturn &EndpointRegistry{\n\t\tid: id,\n\t\tonNewEndpoint: onNewEndpoint,\n\t\tdialer: dialer,\n\t\tser: ser,\n\n\t\tendpoints: make(map[wire.AddrKey]*fullEndpoint),\n\t\tdialing: make(map[wire.AddrKey]*dialingEndpoint),\n\n\t\tEmbedding: log.MakeEmbedding(log.WithField(\"id\", id.Address())),\n\t}\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tAdd: NewAddEndpoint(s),\n\t}\n}", "func New(e *step.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tListH: NewListHandler(e.List, uh),\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t\tRemoveH: NewRemoveHandler(e.Remove, uh),\n\t\tUpdateH: NewUpdateHandler(e.Update, uh),\n\t}\n}", "func New(mtu, bufferSize uint32, addr tcpip.LinkAddress, tx, rx QueueConfig) (stack.LinkEndpoint, error) {\n\te := &endpoint{\n\t\tmtu: mtu,\n\t\tbufferSize: bufferSize,\n\t\taddr: addr,\n\t}\n\n\tif err := e.tx.init(bufferSize, &tx); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := e.rx.init(bufferSize, &rx); err != nil {\n\t\te.tx.cleanup()\n\t\treturn nil, err\n\t}\n\n\treturn e, nil\n}", "func NewEndpoints(s Service) Endpoints {\n\treturn endpoints{\n\t\tservice: s,\n\t}\n}", "func CreateEndpoint(projectProvider provider.ProjectProvider, privilegedProjectProvider provider.PrivilegedProjectProvider, serviceAccountProvider provider.ServiceAccountProvider, privilegedServiceAccount provider.PrivilegedServiceAccountProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(addReq)\n\t\terr := req.Validate()\n\t\tif err != nil {\n\t\t\treturn nil, errors.NewBadRequest(err.Error())\n\t\t}\n\t\tsaFromRequest := req.Body\n\t\tproject, err := common.GetProject(ctx, userInfoGetter, projectProvider, privilegedProjectProvider, req.ProjectID)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\t// check if service account name is already reserved in the project\n\t\texistingSAList, err := listSA(ctx, serviceAccountProvider, privilegedServiceAccount, userInfoGetter, project, &saFromRequest)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\tif len(existingSAList) > 0 {\n\t\t\treturn nil, errors.NewAlreadyExists(\"service account\", saFromRequest.Name)\n\t\t}\n\n\t\tsa, err := createSA(ctx, serviceAccountProvider, privilegedServiceAccount, userInfoGetter, project, saFromRequest)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\treturn convertInternalServiceAccountToExternal(sa), nil\n\t}\n}", "func NewExposedPort(port string) (*ExposedPort, error) {\n\tparts := strings.SplitN(port, \"/\", 2)\n\tif len(parts) != 2 {\n\t\treturn nil, errors.New(\"invalid port: \" + port)\n\t}\n\tportInt, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn nil, errors.New(\"invalid port: \" + port)\n\t}\n\treturn &ExposedPort{\n\t\tPort: portInt,\n\t\tProtocol: strings.ToUpper(parts[1]),\n\t}, nil\n}", "func New(port int, backend string) (*Proxy, error) {\n\tu, err := url.Parse(backend)\n\n\tif err != nil {\n\t\treturn new(Proxy), err\n\t}\n\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\n\tif err != nil {\n\t\treturn new(Proxy), err\n\t}\n\n\treturn &Proxy{l, u}, nil\n}", "func newService(namespace, name string) *v1.Service {\n\treturn &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labelMap(),\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: labelMap(),\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{Name: \"port-1338\", Port: 1338, Protocol: \"TCP\", TargetPort: intstr.FromInt(1338)},\n\t\t\t\t{Name: \"port-1337\", Port: 1337, Protocol: \"TCP\", TargetPort: intstr.FromInt(1337)},\n\t\t\t},\n\t\t},\n\t}\n\n}", "func CreateEndpoint(w http.ResponseWriter, req *http.Request) {\n\tvar url ShortURL\n\t_ = json.NewDecoder(req.Body).Decode($url)\n\tvar n1q1Params []interface{}\n\tn1q1Params = append(n1q1Params, url.LongURL)\n\tquery := gocb.NewN1qlQuery(\"SELECT `\" + bucketName + \"`.* FROM `\" + bucketName + \"` WHERE longUrl = $1\")\n\trows, err := bucket.ExecuteN1qlQuery(query, n1qlParams)\n\tif err != nil {\n w.WriteHeader(401)\n w.Write([]byte(err.Error()))\n return\n\t}\n\tvar row ShortURL\n rows.One(&row)\n if row == (ShortURL{}) {\n hd := hashids.NewData()\n h := hashids.NewWithData(hd)\n now := time.Now()\n url.ID, _ = h.Encode([]int{int(now.Unix())})\n url.ShortUrl = \"http://localhost:12345/\" + url.ID\n bucket.Insert(url.ID, url, 0)\n } else {\n url = row\n }\n json.NewEncoder(w).Encode(url)\n}", "func New() Port {\n\treturn &port{}\n}", "func New() HelloServer {\n\thttp.DefaultServeMux = new(http.ServeMux)\n\treturn HelloServer{\n\t\t&http.Server{\n\t\t\tAddr: \":7100\",\n\t\t},\n\t}\n}", "func NewEndpointMiddleware() endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, req interface{}) (resp interface{}, err error) {\n\t\t\tctx = New().WithCtx(ctx)\n\t\t\treturn next(ctx, req)\n\t\t}\n\t}\n}", "func NewEndpoints(s service.Service) Endpoints {\n\treturn Endpoints{\n\t\tGenerateEndpoint: MakeGenerateEndpoint(s),\n\t\tVerifyEndpoint: MakeVerifyEndpoint(s),\n\t}\n}", "func NewEndpoints(c Configuration, alternate func() (Endpoints, error)) (Endpoints, error) {\n\tif endpoints := c.endpoints(); len(endpoints) > 0 {\n\t\treturn ParseURLs(endpoints...)\n\t}\n\n\tif alternate != nil {\n\t\treturn alternate()\n\t}\n\n\treturn nil, errNoConfiguredEndpoints\n}", "func (r *EndpointRegistry) addEndpoint(addr wire.Address, conn Conn, dialer bool) *Endpoint {\n\tr.Log().WithField(\"peer\", addr).Trace(\"EndpointRegistry.addEndpoint\")\n\n\te := newEndpoint(addr, conn)\n\tfe, created := r.fullEndpoint(addr, e)\n\tif !created {\n\t\tif e, closed := fe.replace(e, r.id.Address(), dialer); closed {\n\t\t\treturn e\n\t\t}\n\t}\n\n\tconsumer := r.onNewEndpoint(addr)\n\t// Start receiving messages.\n\tgo func() {\n\t\tif err := e.recvLoop(consumer); err != nil {\n\t\t\tr.Log().WithError(err).Error(\"recvLoop finished unexpectedly\")\n\t\t}\n\t\tfe.delete(e)\n\t}()\n\n\treturn e\n}", "func NewEndpointCore(id string, name string, discoveredBy string, dims map[string]string) *EndpointCore {\n\tif id == \"\" {\n\t\t// Observers must provide an ID or else they are majorly broken\n\t\tpanic(\"EndpointCore cannot be created without an id\")\n\t}\n\n\tec := &EndpointCore{\n\t\tID: ID(id),\n\t\tName: name,\n\t\tDiscoveredBy: discoveredBy,\n\t\textraDimensions: dims,\n\t\textraFields: map[string]interface{}{},\n\t}\n\n\treturn ec\n}", "func EndpointFactory(args *endpoint.Arg, stats *stats.Stats, workerCount uint) (endpoint.EndPoint, error) {\n\tif FailSetup {\n\t\treturn nil, errors.New(\"Forced Error\")\n\t}\n\treturn &fakeEndpoint{}, nil\n}", "func New(endpoint string) *Client {\n\treturn &Client{endpoint, &http.Client{}, \"\"}\n}", "func MakeNewSiteEndpoint(svc service.Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\treq := request.(NewSiteRequest)\n\t\tid, err := svc.NewSite(ctx, req.SiteName)\n\t\treturn NewSiteResponse{SiteID: id, Err: err}, nil\n\t}\n}", "func New(instance string, options ...httptransport.ClientOption) (pb.CustomerServer, error) {\n\n\tif !strings.HasPrefix(instance, \"http\") {\n\t\tinstance = \"http://\" + instance\n\t}\n\tu, err := url.Parse(instance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_ = u\n\n\tpanic(\"No HTTP Endpoints, this client will not work, define bindings in your proto definition\")\n\n\treturn svc.Endpoints{}, nil\n}", "func newHTTPServer(appConfig config.AppConfig, logger services.Logger) services.HTTPServer {\n\treturn services.NewDefaultHTTPServer(appConfig.Port, logger)\n}", "func Create(port string) (net.Listener, *grpc.Server) {\n\t//TODO: Find a better way to pass \"127.0.0.1:\"\n\tlis, err := net.Listen(\"tcp\", \"127.0.0.1:\"+port)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"port\": port,\n\t\t}).WithError(err).Fatal(\"Failed to bind port !\")\n\t\tlog.Print(\"Trying to bind onto another port !\")\n\t}\n\treturn lis, grpc.NewServer()\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tList: NewListEndpoint(s),\n\t\tGet: NewGetEndpoint(s),\n\t\tRandomFacts: NewRandomFactsEndpoint(s),\n\t}\n}", "func CreateEP(t *testing.T, ns string, Name string, multiPort bool, multiAddress bool, addressPrefix string, multiProtocol ...corev1.Protocol) {\n\tif addressPrefix == \"\" {\n\t\taddressPrefix = \"1.1.1\"\n\t}\n\tvar endpointSubsets []corev1.EndpointSubset\n\tnumPorts, numAddresses, addressStart := 1, 1, 0\n\tif multiPort {\n\t\tnumPorts = 3\n\t}\n\tif len(multiProtocol) != 0 {\n\t\tnumPorts = len(multiProtocol)\n\t}\n\tif multiAddress {\n\t\tnumAddresses, addressStart = 3, 0\n\t}\n\n\tfor i := 0; i < numPorts; i++ {\n\t\tprotocol := corev1.ProtocolTCP\n\t\tif len(multiProtocol) != 0 {\n\t\t\tprotocol = multiProtocol[i]\n\t\t}\n\t\tmPort := 8080 + i\n\n\t\tvar addressStartIndex int\n\t\tif !multiPort && !multiAddress {\n\t\t\tnumAddresses, addressStart = 1, 0\n\t\t} else {\n\t\t\taddressStartIndex = addressStart + i\n\t\t}\n\t\tvar epAddresses []corev1.EndpointAddress\n\t\tfor j := 0; j < numAddresses; j++ {\n\t\t\tepAddresses = append(epAddresses, corev1.EndpointAddress{IP: fmt.Sprintf(\"%s.%d\", addressPrefix, addressStartIndex+j+1)})\n\t\t}\n\t\tnumAddresses = numAddresses - 1\n\t\taddressStart = addressStart + numAddresses\n\t\tendpointSubsets = append(endpointSubsets, corev1.EndpointSubset{\n\t\t\tAddresses: epAddresses,\n\t\t\tPorts: []corev1.EndpointPort{{\n\t\t\t\tName: fmt.Sprintf(\"foo%d\", i),\n\t\t\t\tPort: int32(mPort),\n\t\t\t\tProtocol: protocol,\n\t\t\t}},\n\t\t})\n\t}\n\n\tepExample := &corev1.Endpoints{\n\t\tObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: Name},\n\t\tSubsets: endpointSubsets,\n\t}\n\t_, err := KubeClient.CoreV1().Endpoints(ns).Create(context.TODO(), epExample, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in creating Endpoint: %v\", err)\n\t}\n\ttime.Sleep(2 * time.Second)\n}", "func New(e *calc.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tDivideH: NewDivideHandler(e.Divide, uh),\n\t}\n}", "func NewServer(endpoint string) (*Server, error) {\n\n\tret := &Server{}\n\tvar err error\n\tret.Listener, err = net.Listen(\"tcp\", endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret.mux = http.NewServeMux()\n\tret.mux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"This is the monitoring endpoint\"))\n\t})\n\tret.mux.Handle(\"/mon/varz\", expvar.Handler())\n\n\tret.mux.HandleFunc(\"/mon/pprof/\", pprof.Index)\n\tret.mux.HandleFunc(\"/mon/pprof/cmdline\", pprof.Cmdline)\n\tret.mux.HandleFunc(\"/mon/pprof/profile\", pprof.Profile)\n\tret.mux.HandleFunc(\"/mon/pprof/symbol\", pprof.Symbol)\n\tEnableTracing()\n\tret.mux.HandleFunc(\"/mon/trace\", TraceHandler())\n\tret.srv = &http.Server{}\n\treturn ret, nil\n}", "func New(opts Options) (stack.LinkEndpoint, error) {\n\te := &endpoint{\n\t\tmtu: opts.MTU,\n\t\tbufferSize: opts.BufferSize,\n\t\taddr: opts.LinkAddress,\n\t\tpeerFD: opts.PeerFD,\n\t\tonClosed: opts.OnClosed,\n\t\tvirtioNetHeaderRequired: opts.VirtioNetHeaderRequired,\n\t\tgsoMaxSize: opts.GSOMaxSize,\n\t}\n\n\tif err := e.tx.init(opts.BufferSize, &opts.TX); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := e.rx.init(opts.BufferSize, &opts.RX); err != nil {\n\t\te.tx.cleanup()\n\t\treturn nil, err\n\t}\n\n\te.caps = stack.LinkEndpointCapabilities(0)\n\tif opts.RXChecksumOffload {\n\t\te.caps |= stack.CapabilityRXChecksumOffload\n\t}\n\n\tif opts.TXChecksumOffload {\n\t\te.caps |= stack.CapabilityTXChecksumOffload\n\t}\n\n\tif opts.LinkAddress != \"\" {\n\t\te.hdrSize = header.EthernetMinimumSize\n\t\te.caps |= stack.CapabilityResolutionRequired\n\t}\n\n\tif opts.VirtioNetHeaderRequired {\n\t\te.hdrSize += header.VirtioNetHeaderSize\n\t}\n\n\treturn e, nil\n}", "func NewEndpointDescription(endpointUrl PascalString, server ExtensionObjectDefinition, serverCertificate PascalByteString, securityMode MessageSecurityMode, securityPolicyUri PascalString, noOfUserIdentityTokens int32, userIdentityTokens []ExtensionObjectDefinition, transportProfileUri PascalString, securityLevel uint8) *_EndpointDescription {\n\t_result := &_EndpointDescription{\n\t\tEndpointUrl: endpointUrl,\n\t\tServer: server,\n\t\tServerCertificate: serverCertificate,\n\t\tSecurityMode: securityMode,\n\t\tSecurityPolicyUri: securityPolicyUri,\n\t\tNoOfUserIdentityTokens: noOfUserIdentityTokens,\n\t\tUserIdentityTokens: userIdentityTokens,\n\t\tTransportProfileUri: transportProfileUri,\n\t\tSecurityLevel: securityLevel,\n\t\t_ExtensionObjectDefinition: NewExtensionObjectDefinition(),\n\t}\n\t_result._ExtensionObjectDefinition._ExtensionObjectDefinitionChildRequirements = _result\n\treturn _result\n}", "func New(host, port string, h http.Handler) *WebServer {\n\tvar ws WebServer\n\n\tws.Addr = net.JoinHostPort(host, port)\n\tws.Handler = h\n\n\treturn &ws\n}", "func New(addr string, host app.HostService, collector *metrics.Collector) app.Server {\n\treturn &server{\n\t\tsrv: telnet.Server{Addr: addr, Handler: nil},\n\t\thost: host,\n\t\tcollector: collector,\n\t}\n}", "func NewEndpoints(s service.Service) Endpoints {\n\treturn Endpoints{\n\t\tAllEndpoint: MakeAllEndpoint(s),\n\t\tGetEndpoint: MakeGetEndpoint(s),\n\t}\n}", "func MakeEndpointNodeID(hostID, address, port string) string {\n\treturn lookupID(hostID, address, port, func() string {\n\t\treturn MakeAddressNodeID(hostID, address) + ScopeDelim + port\n\t})\n}", "func New(addr string) *Server {\n if addr == \"\" {\n addr = DefaultAddr\n }\n return &Server{\n addr: DefaultAddr,\n ds: newDataStore(),\n done: make(chan struct{}),\n }\n}", "func createEndpoint(paths ...string) string {\n\tendpoint, err := utils.ConstructURL(paths...)\n\tif err != nil {\n\t\tlog.HandleErrorAndExit(\"cannot construct endpoint\", err)\n\t}\n\treturn endpoint\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tAdd: NewAddEndpoint(s),\n\t\tResta: NewRestaEndpoint(s),\n\t\tMultiplicacion: NewMultiplicacionEndpoint(s),\n\t\tDivision: NewDivisionEndpoint(s),\n\t}\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tCreateSession: NewCreateSessionEndpoint(s),\n\t\tUseSession: NewUseSessionEndpoint(s),\n\t}\n}", "func NewDeleteEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tp := req.(*DeletePayload)\n\t\treturn nil, s.Delete(ctx, p)\n\t}\n}", "func New(port string) *Server {\n\treturn &Server{\n\t\tport: port,\n\t\tmanager: endly.New(),\n\t}\n}", "func New(e *todo.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tGetH: NewGetHandler(e.Get, uh),\n\t\tListH: NewListHandler(e.List, uh),\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t\tRemoveH: NewRemoveHandler(e.Remove, uh),\n\t}\n}", "func generateEndpoint(command_name string, args_hash string) string {\n\treturn fmt.Sprintf(\"http://localhost:3001/%s\", path.Join(\"commands\",command_name, args_hash))\n}", "func New(endpoint string) BaseClient {\n\treturn NewWithoutDefaults(endpoint)\n}" ]
[ "0.74292636", "0.72935945", "0.72410816", "0.7215659", "0.71920615", "0.711459", "0.70928013", "0.705746", "0.69879735", "0.6974851", "0.69721305", "0.68303615", "0.6723265", "0.6712752", "0.6605164", "0.6602713", "0.6558774", "0.65187037", "0.64022964", "0.6365016", "0.63610584", "0.6209596", "0.6203358", "0.6123653", "0.6108437", "0.60979474", "0.60764503", "0.6075556", "0.59888357", "0.59879845", "0.5980729", "0.5905845", "0.58581066", "0.5843042", "0.5838468", "0.5834476", "0.5823453", "0.5822517", "0.58159256", "0.5814215", "0.5793094", "0.5780002", "0.57655656", "0.57516503", "0.57405627", "0.5737683", "0.57363343", "0.57352555", "0.57346916", "0.5728066", "0.5726761", "0.57263607", "0.5693105", "0.5684099", "0.56610405", "0.56539375", "0.5641001", "0.5629674", "0.56220806", "0.56123734", "0.5592099", "0.55898917", "0.5583745", "0.55637497", "0.5561949", "0.55585724", "0.55447054", "0.5542989", "0.5535745", "0.552908", "0.5524277", "0.5519087", "0.5475816", "0.5474678", "0.5474238", "0.54732114", "0.5467853", "0.54596967", "0.5459371", "0.54546916", "0.54546463", "0.54491895", "0.5447852", "0.54413813", "0.54342437", "0.54234684", "0.5419775", "0.5418819", "0.5410803", "0.54066557", "0.538818", "0.5377203", "0.5366244", "0.5363872", "0.53628534", "0.535963", "0.5357422", "0.53544366", "0.53506035", "0.53285164" ]
0.74718064
0
AddHandleFunc adds a new function for handling incoming data.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) { e.mutex.Lock() e.handler[name] = f e.mutex.Unlock() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l Listener) AddHandler(cmd string, handleFunc func()) {\n\tl[cmd] = handleFunc\n}", "func (l *logPipe) HandleFunc(hf func(string) error) {\n\tl.handleFunc = hf\n}", "func HandleFunc(name string, handlerFunc func(Response)) {\n\thandlers[name] = toFunction(handlerFunc)\n}", "func HandleFunc(h HandlerFunc) {\n\tapex.Handle(h)\n}", "func HandleFunc(h HandlerFunc) {\n\tapex.Handle(h)\n}", "func HandleFunc(h HandlerFunc) {\n\tapex.Handle(h)\n}", "func (h *MxHandler) HandleFunc(pattern *checkSelection, handler func(http.ResponseWriter, *http.Request)) {\n\th.routes = append(h.routes, &route{pattern, http.HandlerFunc(handler)})\n}", "func (h *RegexpHandler) HandleFunc(pattern *regexp.Regexp, handler func(http.ResponseWriter, *http.Request)) {\n\th.routes = append(h.routes, &Route{pattern, http.HandlerFunc(handler)})\n}", "func (c *CmdRunner) HandleFunc(cmdId string, handler func(cmdMessage CmdMessage)) {\n\tc.Handlers[cmdId] = handler\n}", "func (s *Server) HandleFunc(path string, fn http.HandlerFunc) {\n\ts.Handle(path, http.HandlerFunc(fn))\n}", "func (mx *Mux) HandleFunc(pattern string, handler interface{}) {\n\tmx.handle(ALL, pattern, handler)\n}", "func HandleFunc(c Checker, pattern string, h http.HandlerFunc) {\n\thttp.HandleFunc(pattern, HandlerFunc(c, h))\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request))", "func (mux *Mux) HandleFunc(pattern string, handler HandlerFunc) {\n\tmux.Handle(pattern, handler)\n}", "func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {\n\tmx.handle(mALL, pattern, handlerFn)\n}", "func (app *App) HandleFunc(pattern string, handlerFunc http.HandlerFunc) {\n\tif app.Server() != nil {\n\t\tapp.Server().Handler.(*http.ServeMux).HandleFunc(pattern, handlerFunc)\n\t}\n}", "func (m *ServeMux) HandleFunc(method string, path string, h interface{}) {\n\tm.Handle(method, path, &handlerContainerImpl{\n\t\thandler: h,\n\t\tContext: background,\n\t})\n}", "func (m *Transport) AddHandle(handle interface{}, args ...string) error {\n\th := handle.(gin.HandlerFunc)\n\tif len(args) == 0 {\n\t\tm.engine.Use(h)\n\t} else if len(args) == 2 {\n\t\tm.engine.Handle(args[0], args[1], h)\n\t} else {\n\t\treturn errors.New(\"invalid args\")\n\t}\n\treturn nil\n}", "func (mux *ServeMux) HandleFunc(m Matcher, h HandlerFunc) {\n\tmux.Handle(m, h)\n}", "func (r *Router) HandleFunc(method, path string, handler func(context.Context, taxi.Decoder) (interface{}, error)) {\n\tr.router.HandleFunc(method, path, handler)\n}", "func (r *Mux) HandleFunc(method, path string, handler http.HandlerFunc) {\n\tr.Handle(method, path, http.HandlerFunc(handler))\n}", "func HandleFunc(method string, path string, h interface{}) {\n\tDefaultMux.HandleFunc(method, path, h)\n}", "func (s *server) HandleFunc(path string, handlerFunc http.HandlerFunc) {\n\ts.mux.HandleFunc(path, handlerFunc)\n}", "func (s *Server) HandleFunc(path string, h http.HandlerFunc) {\n\ts.router.HandleFunc(path, h)\n}", "func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}", "func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}", "func (e *Exporter) HandleFunc(url string, f func(w http.ResponseWriter, r *http.Request)) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\tif e.name == \"\" {\n\t\tHTTPHandleFunc(url, f)\n\t\treturn\n\t}\n\n\tif hf, ok := e.handleFuncs[url]; ok {\n\t\thf.Set(f)\n\t\treturn\n\t}\n\thf := &handleFunc{f: f}\n\te.handleFuncs[url] = hf\n\n\tHTTPHandleFunc(e.URLPrefix()+url, func(w http.ResponseWriter, r *http.Request) {\n\t\tif f := hf.Get(); f != nil {\n\t\t\tf(w, r)\n\t\t}\n\t})\n}", "func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}", "func (r *Router) HandleFunc(method, pattern string, fn http.HandlerFunc) Route {\n\treturn r.Handle(method, pattern, http.HandlerFunc(fn))\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {\n\tmux.Handle(pattern, HandlerFunc(handler))\n}", "func (s *Server) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\r\n\ts.router.HandleFunc(pattern, handler)\r\n}", "func (self *ServeMux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tself.Handle(pattern, http.HandlerFunc(handler))\n}", "func (k *Kite) HandleHTTPFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tk.muxer.HandleFunc(pattern, handler)\n}", "func (s *Server) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}", "func (r *Router) HandleFunc(pattern, method string, f func(Context) error) {\n\tr.Handle(pattern, method, HandlerFunc(f))\n}", "func (a *App) HandleFunc(mount string, handler http.HandlerFunc) *route {\n\troute := a.newRoute(mount, handler)\n\troute.buildPatterns(\"\")\n\treturn route\n}", "func (s *Stub) HandleFunc(fn func(http.ResponseWriter, *http.Request)) {\n\ts.response.handler = fn\n}", "func (m *ServeMux) HandleFunc(command string, handler func(conn Conn, cmd Command)) {\n\tif handler == nil {\n\t\tpanic(\"redcon: nil handler\")\n\t}\n\tm.Handle(command, HandlerFunc(handler))\n}", "func (p *spaDevProxy) HandleFunc(w http.ResponseWriter, r *http.Request) {\n\tp.proxy.ServeHTTP(w, r)\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {\n\tif handler == nil {\n\t\tpanic(\"http: nil handler\")\n\t}\n\tmux.Handle(pattern, HandlerFunc(handler))\n}", "func (a *Asock) AddHandler(name string, argmode string, df DispatchFunc) error {\n\tif _, ok := a.d[name]; ok {\n\t\treturn fmt.Errorf(\"handler '%v' already exists\", name)\n\t}\n\tif argmode != \"split\" && argmode != \"nosplit\" {\n\t\treturn fmt.Errorf(\"invalid argmode '%v'\", argmode)\n\t}\n\ta.d[name] = &dispatchFunc{df, argmode}\n\ta.help = \"\"\n\tfor cmd := range a.d {\n\t\ta.help = a.help + cmd + \" \"\n\t}\n\treturn nil\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) error {\n\treturn mux.Handle(pattern, http.HandlerFunc(handler))\n}", "func (s *Subrouter) HandleFunc(m, p string, hfunc http.HandlerFunc) {\n\tk := s.prefix + resolvedPath(p)\n\n\ts.initEndp(k)\n\n\ts.endps[k][m] = []interface{}{hfunc}\n}", "func HandleFunc(code int, data interface{}) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tReply(w, code, data)\n\t}\n}", "func (m *ServeMux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tif handler == nil {\n\t\tpanic(\"http: nil handler\")\n\t}\n\n\tm.Handle(pattern, http.HandlerFunc(handler))\n}", "func (m *RegExpMux) HandleFunc(pattern *regexp.Regexp, handler func(http.ResponseWriter, *http.Request)) {\n\tm.Handle(pattern, http.HandlerFunc(handler))\n}", "func (c *Cluster) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tc.router.HandleFunc(pattern, handler)\n}", "func (mux *XMux) HandleFunc(uriHandler *UriHandler) {\n\tmux.Handle(uriHandler)\n}", "func (r *Router) HandleFunc(path string, f func(context.Context, http.ResponseWriter, *http.Request) error) *Route {\n\treturn r.Handle(path, HandlerFunc(f))\n}", "func (c *RestClient) HandleFunc(h func(http.ResponseWriter, *http.Request)) http.Handler {\n\treturn c.Handle(http.HandlerFunc(h))\n}", "func (fn AddContactHandlerFunc) Handle(params AddContactParams, principal *app.Auth) AddContactResponder {\n\treturn fn(params, principal)\n}", "func HandleFunc(n xml.Name, h xmpp.HandlerFunc) Option {\n\treturn Handle(n, h)\n}", "func HandleFunc(n xml.Name, h xmpp.HandlerFunc) Option {\n\treturn Handle(n, h)\n}", "func (h *Handler) EventHandleFunc(etype string, name string, eventHandler func(Event)) {\n\th.EventHandle(etype, name, EventHandlerFunc(eventHandler))\n}", "func (h *Handler) HandleFunc(path string, f func(w http.ResponseWriter, r *http.Request)) *mux.Route {\n\treturn h.Router.HandleFunc(path, f)\n}", "func (a *App) HandleFunc(p string, l UserLevel, f func(w http.ResponseWriter, r *Request)) {\n\ta.Handle(p, l, HandleFunc(f))\n}", "func (i *Client) Handle(fs []Filter, hf HandlerFunc) {\n\th := &Handler{\n\t\tFilters: fs,\n\t\tHandler: hf,\n\t}\n\n\ti.handlers = append(i.handlers, h)\n}", "func (hf HandlerFunc) Handle(metadata Metadata) error {\n\treturn hf(metadata)\n}", "func (mux *TypeMux) HandleFunc(t Type, f HandlerFunc) {\n\tmux.Handle(t, f)\n}", "func (s *Session) AddMessageHandler(fn func(Message, *Session)) {\n\ts.handlers.msgHandler = fn\n}", "func (m *MuxTracer) HandleFunc(router *mux.Router, pattern string, handler http.HandlerFunc) *mux.Route {\n\treturn router.HandleFunc(pattern, m.TraceHandleFunc(handler))\n}", "func (auth *Mux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request), role models.Role) {\n\tauth.Handle(pattern, http.HandlerFunc(handler), role)\n}", "func (h *Handler) Add(pattern string, handler HandlerFunc, opts ...RouteOption) *Route {\n\tfn := func(w http.ResponseWriter, req *http.Request) {\n\t\terr := handler(w, req)\n\t\tif err != nil {\n\t\t\th.Abort(w, req, err)\n\t\t}\n\t}\n\treturn h.Handle(pattern, http.Handler(http.HandlerFunc(fn)), opts...)\n}", "func (rm *RouterMux) HandleFunc(path string, handler HandlerFunction, description ...string) {\n\tif path == RouteRoot {\n\t\trm.root.handler = NewHandler(handler, path, description...)\n\t\treturn\n\t}\n\tparts := strings.Split(path, \"/\")\n\tnode := rm.root\n\tfor _, part := range parts {\n\t\tif part == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif node.children[part] == nil {\n\t\t\tnode.children[part] = NewNode(node, part, NewHandler(handler, path, description...))\n\t\t}\n\t\tnode = node.children[part]\n\t}\n}", "func (r *Router) HandleFunc(w http.ResponseWriter, req *http.Request) {\n\th, params, err := r.Find(req.Method, req.URL.Path)\n\tif err != nil {\n\t\tif err.Error() == \"no matching patterns\" {\n\t\t\tr.NotFoundHandelr(w, req)\n\t\t\treturn\n\t\t}\n\t\tr.ErrorHandler(w, req)\n\t\treturn\n\t}\n\treq = req.WithContext(setParsedParamsToCtx(req.Context(), params))\n\th(w, req)\n}", "func (m *mDNS) AddHandler(f func(net.Interface, net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}", "func HandleFunc(t Type, f func(ResponseWriter, *Request)) {\n\tDefaultMux.HandleFunc(t, f)\n}", "func (m *mDNS) AddHandler(f func(net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}", "func (rhf HandlerFunc) Handle(req *Request) (resp *Response, err error) {\n\treturn rhf(req)\n}", "func (service *HealthService) HealthCheckHandleFunc() func(w http.ResponseWriter, r *http.Request) {\n\treturn fthealth.Handler(service)\n}", "func (ps *PS) AddHandle(handle htypes.Handle) {\n\tps.Handles = append(ps.Handles, handle)\n}", "func (h *Handler) Add(cmd int32, hf HandlerFunc) {\n\th.router[cmd] = hf\n}", "func EventHandleFunc(etype string, name string, eventHandler func(Event)) {\n\tDefaultHandler.EventHandleFunc(etype, name, eventHandler)\n}", "func (router *Routes) AddHandler(definition string, givenHandler Handler) {\n\telements := strings.Split(definition, \" \")\n\trouter.handlers = append(router.handlers, handler{elements, givenHandler})\n}", "func HandleFunc(aq querier.AgentQuerier) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar err error\n\t\tvar resps []Response\n\t\tpodName := r.URL.Query().Get(\"pod\")\n\t\tnamespace := r.URL.Query().Get(\"namespace\")\n\n\t\tif podName == \"\" && namespace == \"\" {\n\t\t\tresps, err = getAllFlows(aq)\n\t\t} else if podName != \"\" && namespace != \"\" {\n\t\t\t// Pod Namespace must be provided to dump flows of a Pod.\n\t\t\tresps, err = getPodFlows(aq, podName, namespace)\n\t\t} else {\n\t\t\t// Not supported.\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif resps == nil {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.NewEncoder(w).Encode(resps)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t}\n}", "func (h HandlerFunc) Handle(e Event) { h(e) }", "func (dumbRouter *DumbRouter) AddFunctionMapping(funcURL string, function func(req *http.Request, res http.ResponseWriter)) {\n\tdumbRouter.routes[funcURL] = function\n}", "func HandleFunc(h http.Handler, enabledApps map[string]string) httprouter.Handle {\n\tlog.Println(\"in handleFunc\")\n\treturn func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\t\tctx := r.Context()\n\t\tctx = context.WithValue(ctx, \"enabledApps\", enabledApps)\n\t\tctx = context.WithValue(ctx, \"params\", params)\n\t\tr = r.WithContext(ctx)\n\t\th.ServeHTTP(w, r)\n\t}\n}", "func (handler *WebsocketHandler) AddHandler(commandName string, fn func(context.Context, interfaces.WebsocketClient, interfaces.WebsocketCommand)) {\n\thandler.Handlers[commandName] = fn\n}", "func (ph *Handler) SetHandlerFunc(newHandler HandlerFunc) {\n\tph.mu.Lock()\n\tdefer ph.mu.Unlock()\n\tph.handle = newHandler\n}", "func (hm *Mux) AddHandler(op Op, h Func) {\n\thm.l.Lock()\n\thm.handlers[op] = h\n\thm.l.Unlock()\n}", "func NewHandleFunc(h func(*Client)) http.HandlerFunc {\n\n\tupgrader := &websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\treturn true\n\t\t},\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tserveWs(h, upgrader, w, r)\n\t}\n}", "func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {\n\tif r.AddFunc != nil {\n\t\tr.AddFunc(obj)\n\t}\n}", "func HandleAdd(c *cron.Cron, event jobs.JobCron) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar job jobs.Job\n\t\tdata, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treq.InternalError(w, err)\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"无法解析body的内容\")\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(data, &job)\n\t\tif err != nil {\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"Json数据格式或者参数错误\")\n\t\t\treq.BadRequest(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif job.Name == \"\" || job.Scheduler == \"\" || job.Action == \"\" {\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"Json数据格式或者参数错误\")\n\t\t\treq.BadRequestf(w, \"Json数据格式或者参数错误\")\n\t\t\treturn\n\t\t}\n\n\t\terr = event.Add(c, job)\n\t\tif err != nil {\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"任务添加失败\")\n\t\t\treq.InternalError(w, err)\n\t\t\treturn\n\t\t}\n\t\treq.JSON(w, \"任务添加成功\", 200)\n\t}\n}", "func AddHandler(handler HandlerFunc, evts ...string) *HandlerFunc {\n\tfor _, evt := range evts {\n\t\tif evt == \"*\" && len(evts) > 1 {\n\t\t\tlogrus.Warn(AddMultipleWildcardWarning)\n\t\t\treturn AddHandler(handler, \"*\")\n\t\t}\n\t}\n\n\thPtr := &handler\n\n\teventHandlersLock.Lock()\n\tdefer eventHandlersLock.Unlock()\n\n\tfor _, evt := range evts {\n\t\teventHandlers[evt] = append(eventHandlers[evt], hPtr)\n\t}\n\n\treturn hPtr\n}", "func (e *Engine) AddFunc(name string, fn interface{}) *Engine {\n\te.Mutex.Lock()\n\te.Funcmap[name] = fn\n\te.Mutex.Unlock()\n\treturn e\n}", "func (r *Routers) Add(url string, handler func(http.ResponseWriter, *http.Request)) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tif r.urls == nil {\n\t\tr.urls = make(map[string]*func(http.ResponseWriter, *http.Request))\n\t}\n\tr.urls[url] = &handler\n\thttp.HandleFunc(url, handler)\n}", "func RegisterAdminHandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tapiHandlerStore[pattern] = NewAPIHandler(handler)\n\tlog.StartLogger.Infof(\"[admin server] [register api] register a new api %s\", pattern)\n}", "func (fn DepositNewFileHandlerFunc) Handle(params DepositNewFileParams) middleware.Responder {\n\treturn fn(params)\n}", "func (f RouteHandlerFunc) RouteHandle(rm *RouteMatch) { f(rm) }", "func (socket *MockSocket) AddEventHandler(\n\thandler socket.EventHandler,\n) {\n}", "func HandleFuncWithCode(code int) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tReplyWithCode(w, code)\n\t}\n}", "func (fn EchoHandlerFunc) Handle(params EchoParams) middleware.Responder {\n\treturn fn(params)\n}", "func (e *EventHandlerFuncs) OnAdd(table string, model Model) {\n\tif e.AddFunc != nil {\n\t\te.AddFunc(table, model)\n\t}\n}", "func (fn AddClaimHandlerFunc) Handle(params AddClaimParams) middleware.Responder {\n\treturn fn(params)\n}", "func AddUserHandle(service iface.Service) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tpayload := struct {\n\t\t\tName string `json:\"name\"`\n\t\t}{}\n\n\t\terr := json.NewDecoder(r.Body).Decode(&payload)\n\t\tif err != nil {\n\t\t\tFail(w, r, http.StatusBadRequest, \"could not parse payload\")\n\t\t\treturn\n\t\t}\n\n\t\tpayload.Name = strings.TrimSpace(payload.Name)\n\t\tif len(payload.Name) == 0 {\n\t\t\tFail(w, r, http.StatusBadRequest, \"empty name\")\n\t\t\treturn\n\t\t}\n\n\t\tuserID, err := service.AddUser(r.Context(), payload.Name)\n\t\tif err != nil {\n\t\t\tlog.Log(err)\n\t\t\tFail(w, r, http.StatusInternalServerError, \"service failed\")\n\t\t\treturn\n\t\t}\n\n\t\tJSON(w, r, map[string]interface{}{\n\t\t\t\"user_id\": userID,\n\t\t})\n\t}\n}", "func (s *Server) AddHandler(route string, handler http.Handler) {\n\ts.router.Handle(route, handler)\n}", "func (router *Router) PostFunc(path string, handler http.HandlerFunc) {\n\trouter.Handle(\"POST\", path, handler)\n}", "func (h HandlerFunc) Handle(w irc.Writer, ev *irc.Event) {\n\th(w, ev)\n}", "func HandlerFunc(log func(message string, time time.Time, level LogLevel, call CallStack, context ContextMap) error) LogHandler {\n\treturn remoteHandler(log)\n}" ]
[ "0.6896334", "0.6849562", "0.67962635", "0.6719276", "0.6719276", "0.6719276", "0.6665553", "0.66522294", "0.66437876", "0.64656377", "0.6382179", "0.63733864", "0.6351293", "0.634102", "0.6330453", "0.6318307", "0.62964743", "0.6292983", "0.6267896", "0.62676644", "0.6267022", "0.62607217", "0.62477607", "0.6232308", "0.62158805", "0.62158805", "0.62029934", "0.61907643", "0.61870885", "0.6182279", "0.6175737", "0.61754864", "0.6174431", "0.6170246", "0.6168716", "0.6159183", "0.6147757", "0.60715795", "0.6070811", "0.6056701", "0.6049589", "0.6047232", "0.5979215", "0.5968772", "0.59487724", "0.5946904", "0.592694", "0.5925211", "0.59250164", "0.59128237", "0.59004617", "0.59004056", "0.59004056", "0.58984596", "0.58356166", "0.5801194", "0.5786414", "0.5781139", "0.5777651", "0.5723876", "0.5697638", "0.5696652", "0.56909126", "0.5667802", "0.56542003", "0.5617993", "0.560046", "0.55976826", "0.5548152", "0.5538665", "0.55303246", "0.5529652", "0.5523013", "0.550802", "0.5501738", "0.5501003", "0.5494699", "0.5494681", "0.54768336", "0.5459503", "0.54441345", "0.5438022", "0.5430188", "0.5411539", "0.5403524", "0.5395093", "0.53928393", "0.5392592", "0.5384636", "0.53841853", "0.537467", "0.5356929", "0.5334893", "0.5313913", "0.5306438", "0.53024465", "0.5287039", "0.5286194", "0.5282241", "0.5278717" ]
0.781103
0
Listen starts listening on the endpoint port on all interfaces. At least one handler function must have been added through AddHandleFunc() before.
func (e *Endpoint) Listen() error { var err error e.listener, err = net.Listen("tcp", Port) if err != nil { return errors.Wrapf(err, "Unable to listen on port %s\n", Port) } log.Println("Listen on", e.listener.Addr().String()) for { log.Println("Accept a connection request.") conn, err := e.listener.Accept() if err != nil { log.Println("Failed accepting a connection request:", err) continue } log.Println("Handle incoming messages.") go e.handleMessages(conn) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (handler *Handler) Listen() error {\n\treturn handler.engine.Start(g.GetConfig().ListenAddr)\n}", "func (r *EndpointRegistry) Listen(listener Listener) {\n\tif !r.OnCloseAlways(func() {\n\t\tif err := listener.Close(); err != nil {\n\t\t\tr.Log().Debugf(\"EndpointRegistry.Listen: closing listener OnClose: %v\", err)\n\t\t}\n\t}) {\n\t\treturn\n\t}\n\n\t// Start listener and accept all incoming peer connections, writing them to\n\t// the registry.\n\tfor {\n\t\tconn, err := listener.Accept(r.ser)\n\t\tif err != nil {\n\t\t\tr.Log().Debugf(\"EndpointRegistry.Listen: Accept() loop: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tr.Log().Debug(\"EndpointRegistry.Listen: setting up incoming connection\")\n\t\t// setup connection in a separate routine so that new incoming\n\t\t// connections can immediately be handled.\n\t\tgo func() {\n\t\t\tif err := r.setupConn(conn); err != nil {\n\t\t\t\tlog.WithError(err).Error(\"EndpointRegistry could not setup wire/net.Conn\")\n\t\t\t}\n\t\t}()\n\t}\n}", "func (l *Listener) Listen() {\n\tfor {\n\t\tvar client net.Conn\n\t\tvar err error\n\t\tif client, err = util.Accept(l); err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Serve the first Handler which is attached to this listener\n\t\tif len(l.HandlerConfigs) > 0 {\n\t\t\toptions := plugin_v1.HandlerOptions{\n\t\t\t\tClientConnection: client,\n\t\t\t\tHandlerConfig: l.HandlerConfigs[0],\n\t\t\t\tEventNotifier: l.EventNotifier,\n\t\t\t\tResolver: l.Resolver,\n\t\t\t\tShutdownNotifier: func(handler plugin_v1.Handler) {},\n\t\t\t}\n\n\t\t\tl.RunHandlerFunc(\"example-handler\", options)\n\t\t} else {\n\t\t\tclient.Write([]byte(\"Error - no handlers were defined!\"))\n\t\t}\n\t}\n}", "func Listen(endpoint string, rcvBufSize uint32) (*Listener, error) {\n\tnetwork, laddr, err := utils.ResolveEndpoint(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlis := &Listener{\n\t\tendpoint: endpoint,\n\t\trcvBufSize: rcvBufSize,\n\t\tsndBufSize: 0xffff,\n\t}\n\tlis.lowerListener, err = net.Listen(network, laddr.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lis, nil\n}", "func (s *Server) Listen(protoFunc ...socket.ProtoFunc) error {\n\treturn s.peer.Listen(protoFunc...)\n}", "func (b *Bootstrapper) Listen(cfgs ...iris.Configurator) {\n\t// Type asserting host address and applying default if not provided\n\taddr := b.Application.ConfigurationReadOnly().GetOther()[\"host\"].(string)\n\n\tb.Run(iris.Addr(addr), cfgs...)\n}", "func startListen() {\n\tconfig, err := config.Config()\n\tif err != nil {\n\t\tlog.Log().Error.Println(err)\n\t\tpanic(err)\n\t}\n\tstorage, err := storage.Instance()\n\tif err != nil {\n\t\tlog.Log().Error.Println(err)\n\t}\n\tprefix := config.Prefix\n\tconverter := convert.NewConverter(config.CodeLength, storage)\n\tinternalPort := strconv.Itoa(config.Port)\n\tinternalAddress := \":\" + internalPort\n\tln, err := net.Listen(\"tcp\", internalAddress)\n\tif err != nil {\n\t\tlog.Log().Error.Println(err)\n\t\tpanic(err)\n\t}\n\tdefer ln.Close()\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Log().Error.Println(err)\n\t\t\treturn\n\t\t}\n\t\tgo handleConnection(conn, converter, prefix)\n\t}\n}", "func (trans *HTTPTransport) Listen() error {\n\ttrans.mux.HandleFunc(RPCPath, trans.handle)\n\tip, err := utils.GetExternalIP()\n\tif err != nil {\n\t\treturn err\n\t}\n\tlistner, err := net.Listen(\"tcp\", fmt.Sprintf(\"%s:0\", ip))\n\tif err != nil {\n\t\treturn err\n\t}\n\ttrans.addr = fmt.Sprintf(\"http://%s\", listner.Addr().String())\n\tlog.Info(\"Listening on \", trans.addr)\n\tgo http.Serve(listner, trans.mux)\n\ttrans.listening = true\n\treturn nil\n}", "func Listen(addr string, handler HandlerFunc) error {\n\tif addr == \"\" {\n\t\taddr = \"localhost:4573\"\n\t}\n\n\tl, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to bind server\")\n\t}\n\tdefer l.Close() // nolint: errcheck\n\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to accept TCP connection\")\n\t\t}\n\n\t\tgo handler(NewConn(conn))\n\t}\n}", "func (h *Handler) Listen() error {\n\treturn h.engine.Run(util.GetConfig().ListenAddr)\n}", "func (bs *BackendServer) Listen() (err error) {\n\t//var tempDelay time.Duration // how long to sleep on accept failure\n\n\taddr, err := ParseAddr(bs.Addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tutils.Logger.Info(\"net is %s and ipport %s\", addr.Network, addr.IPPort)\n\n\tif addr.Network != \"unix\" {\n\t\tutils.Logger.Error(\"Error Inner Address, Should be unix://\")\n\t\terr = ErrAddr\n\t\treturn\n\t}\n\tos.Remove(addr.IPPort)\n\tl, err := net.Listen(\"unix\", addr.IPPort)\n\tif err != nil {\n\t\tutils.Logger.Error(err.Error())\n\t\treturn\n\t}\n\tln := l.(*net.UnixListener)\n\tgo bs.AcceptAndServe(ln)\n\treturn\n}", "func (l *LoadBalancer) Listen() error {\n\tvar err error\n\tl.listener, err = net.Listen(l.frontend.AddrNetwork, l.frontend.Addr)\n\tif err != nil {\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\tl.Debugf(\"created listening socket\")\n\treturn nil\n}", "func (d *Daemon) Listen() {\n\tdefer d.Socket.Close()\n\trpc.RegisterHandler()\n\n\tfor {\n\t\tconn, err := d.Socket.Accept()\n\t\tif err != nil {\n\t\t\tutils.LogFatalf(\"Socket connection error: %+v\\n\", err)\n\t\t}\n\n\t\tutils.LogInfof(\"New socket connection from %s\\n\", conn.RemoteAddr().String())\n\t\tgo rpc.HandleConnection(conn)\n\t}\n}", "func (m *middlewares) Listen(addr string) error {\n\treturn http.ListenAndServe(addr, m)\n}", "func main() {\n\tlisten_fds := ListenFds()\n\n\tfor _, fd := range listen_fds {\n\t\tl, err := net.FileListener(fd)\n\t\tif err != nil {\n\t\t\t// handle error\n\t\t\tfmt.Println(\"got err\", err)\n\t\t}\n\n\t\thttp.HandleFunc(\"/\", handler)\n\t\thttp.Serve(l, nil)\n\t}\n}", "func (h *Handler) Listen(port string) error {\n\th.Router.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{\n\t\tFormat: \"HTTP ${method} ${uri} Response=${status} ${latency_human}\\n\",\n\t}))\n\t//h.Router.Use(middleware.Logger())\n\n\tif err := h.Router.Start(port); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *GrpcServer) Listen() (err error) {\n\turi := fmt.Sprintf(\"%s:%d\", o.host, o.port)\n\to.listener, err = net.Listen(\"tcp\", uri)\n\tif err != nil {\n\t\to.State = Error\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\to.State = Listen\n\tlog.Printf(\"[GRPC] services started, listen on %s\\n\", uri)\n\treturn err\n}", "func Serve(eventHandler EventHandler, addr ...string) error {\n\tvar lns []*listener\n\tdefer func() {\n\t\tfor _, ln := range lns {\n\t\t\tln.close()\n\t\t}\n\t}()\n\tvar stdlib bool\n\tfor _, addr := range addr {\n\t\tvar ln listener\n\t\tvar stdlibt bool\n\t\tln.network, ln.addr, ln.reuseport, stdlibt = parseAddr(addr)\n\t\tif stdlibt {\n\t\t\tstdlib = true\n\t\t}\n\t\tif ln.network == \"unix\" {\n\t\t\tos.RemoveAll(ln.addr)\t//remove existed socket file for sockets' communication\n\t\t}\n\t\tvar err error\n\t\tif ln.network == \"udp\" {\n\t\t\tif ln.reuseport {\n\t\t\t\t//ln.pconn, err = reuse\n\t\t\t} else {\n\t\t\t\tln.pconn, err = net.ListenPacket(ln.network, ln.addr)\n\t\t\t}\n\t\t} else {\n\t\t\tif ln.reuseport {\n\t\t\t\t//operation for reuseport\n\t\t\t} else {\n\t\t\t\tln.ln, err = net.Listen(ln.network, ln.addr)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif ln.pconn != nil {\n\t\t\tln.lnaddr = ln.pconn.LocalAddr()\n\t\t} else {\n\t\t\tln.lnaddr = ln.ln.Addr()\n\t\t}\n\t\tif !stdlib {\n\t\t\tif err := ln.system(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tlns = append(lns, &ln)\n\t}\n\treturn serve(eventHandler, lns)\n}", "func (wsv *web) Listen() Interface {\n\tvar ltn net.Listener\n\n\tif wsv.isRun.Load().(bool) {\n\t\twsv.err = ErrAlreadyRunning()\n\t\treturn wsv\n\t}\n\tif wsv.conf.Mode == \"unix\" || wsv.conf.Mode == \"unixpacket\" {\n\t\t_ = os.Remove(wsv.conf.Socket)\n\t\tltn, wsv.err = net.Listen(wsv.conf.Mode, wsv.conf.Socket)\n\t\t_ = os.Chmod(wsv.conf.Socket, os.FileMode(0666))\n\t} else {\n\t\tltn, wsv.err = net.Listen(wsv.conf.Mode, wsv.conf.HostPort)\n\t}\n\tif wsv.err != nil {\n\t\treturn wsv\n\t}\n\treturn wsv.Serve(ltn)\n}", "func StartListen(request *restful.Request, response *restful.Response) {\n\tportstring := request.PathParameter(\"port-id\")\n\tglog.Info(\"get the port number\", portstring)\n\tportint, err := strconv.Atoi(portstring)\n\tif err != nil {\n\t\tresponse.WriteError(500, err)\n\t\treturn\n\t}\n\tpid, _, err := lib.Getinfofromport(portint)\n\n\tif pid == -1 {\n\t\tresponse.WriteError(500, errors.New(\"the port is not be listend in this machine ( /proc/net/tcp and /proc/net/tcp6)\"))\n\t\treturn\n\t}\n\tif err != nil {\n\t\tresponse.WriteError(500, err)\n\t\treturn\n\t}\n\t//start listen to specific ip:port for 60s and send the data to es\n\ttimesignal := time.After(time.Second * Defaulttime)\n\t//start collect and check the timesignal every one minutes\n\tif !lib.Activeflag {\n\t\tgo lib.Startcollect(portint, Device, timesignal)\n\t\tlib.Flagmutex.Lock()\n\t\tlib.Activeflag = true\n\t\tresponse.Write([]byte(\"activated\"))\n\t\tlib.Flagmutex.Unlock()\n\t} else {\n\t\tresponse.Write([]byte(\"the server is already been activatied\"))\n\t}\n}", "func (g *Goer) listen() {\n\tif g.socketName == \"\" {\n\t\treturn\n\t}\n\n\tif g.mainSocket == nil {\n\t\tswitch g.Transport {\n\t\tcase \"tcp\", \"tcp4\", \"tcp6\", \"unix\", \"unixpacket\", \"ssl\":\n\t\t\tif len(os.Args) > 2 && os.Args[2] == \"graceful\" {\n\t\t\t\tfile := os.NewFile(3, \"\")\n\t\t\t\tlistener, err := net.FileListener(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlib.Fatal(\"Fail to listen tcp: %v\", err)\n\t\t\t\t}\n\t\t\t\tg.mainSocket = listener.(*net.TCPListener)\n\t\t\t} else {\n\t\t\t\taddr, err := net.ResolveTCPAddr(g.Transport, g.socketName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlib.Fatal(\"fail to resolve addr: %v\", err)\n\t\t\t\t}\n\t\t\t\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlib.Fatal(\"fail to listen tcp: %v\", err)\n\t\t\t\t}\n\t\t\t\tg.mainSocket = listener\n\t\t\t}\n\t\tcase \"udp\", \"upd4\", \"udp6\", \"unixgram\":\n\t\t\tlistener, err := net.ListenPacket(g.Transport, g.socketName)\n\t\t\tif err != nil {\n\t\t\t\tlib.Fatal(err.Error())\n\t\t\t}\n\t\t\tg.mainSocket = listener\n\t\tdefault:\n\t\t\tlib.Fatal(\"unknown transport layer protocol\")\n\t\t}\n\n\t\tlib.Info(\"server start success...\")\n\t\tg.status = StatusRunning\n\n\t\tgo g.resumeAccept()\n\t}\n}", "func (node *Node) Listen() (err error) {\n err = node.server.Listen()\n return\n}", "func listen() {\n\tnodePortSuffix := fmt.Sprintf(\":%v\", *nodePort)\n\tlistener, err := net.Listen(\"tcp\", nodePortSuffix)\n\tutils.CheckForError(err)\n\tlog.Println(\"Listening for incoming messages...\")\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\t// handle error\n\t\t}\n\t\tgo handleIncomingMessage(conn)\n\t}\n}", "func Listen(proto, addr string) (l net.Listener, err error) {\n\treturn NewReusablePortListener(proto, addr)\n}", "func Listen(addr string, handler Handler) error {\n\tlisten, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer listen.Close()\n\n\tfor {\n\t\tconn, err := listen.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuf := make([]byte, types.MaxBufferSize)\n\t\tn, err := conn.Read(buf)\n\t\tif err != nil {\n\t\t\tlog.Logger.Tracef(\"Unable to read buf: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgo handler(conn, buf[:n])\n\t}\n}", "func (s *Server) Listen(address string, port int) {\n\tif port == 0 {\n\t\tport = 8067\n\t}\n\tif address == \"\" {\n\t\taddress = \"0.0.0.0\"\n\t}\n\tserverAddr := fmt.Sprintf(\"%s:%d\", address, port)\n\tfor _, m := range Metrics {\n\t\tprometheus.MustRegister(m)\n\t}\n\thttp.Handle(\"/metrics\", prometheus.Handler())\n\thttp.ListenAndServe(serverAddr, nil)\n}", "func (r *bitroute) Listen(hostPort string) error {\n\treturn http.ListenAndServe(hostPort, r)\n}", "func Listen(outputDirectory string) error {\n\t// Create listener\n\taddrStr := fmt.Sprintf(\":%v\", port)\n\tlistener, listenErr := net.Listen(\"tcp\", addrStr)\n\tif listenErr != nil {\n\t\treturn listenErr\n\t}\n\n\t// Prepare output directory path\n\toutputDirectory = strings.TrimRight(outputDirectory, \"/\")\n\n\t// Listen forever\n\tfor {\n\t\t// Accept connection\n\t\tconn, connErr := listener.Accept()\n\t\tif connErr != nil {\n\t\t\tlog.Printf(\"Encountered error while accepting from %s: %s\", conn.RemoteAddr().String(), connErr)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if we should restrict connections from peers\n\t\thandleConnection := true\n\t\tif restrictToPeers {\n\t\t\tfound := false\n\t\t\t// Loop over peers\n\t\t\tfor _, p := range peers {\n\t\t\t\t// Check if we found the remote address in our peers list\n\t\t\t\tif p.Address == conn.RemoteAddr().String() {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Handle connection only if its a peer\n\t\t\thandleConnection = found\n\t\t}\n\n\t\tif handleConnection {\n\t\t\t// Handle in a separate thread\n\t\t\tgo handle(conn, outputDirectory)\n\t\t}\n\t}\n}", "func StartListen(request *restful.Request, response *restful.Response) {\n\tportstring := request.PathParameter(\"port-id\")\n\tglog.Info(\"get the port number\", portstring)\n\tportint, err := strconv.Atoi(portstring)\n\tif err != nil {\n\t\tresponse.WriteError(500, err)\n\t\treturn\n\t}\n\tpid, pname, err := lib.Getinfofromport(portint)\n\n\tif pid == -1 {\n\t\tresponse.WriteError(500, errors.New(\"the port is not be listend in this machine ( /proc/net/tcp and /proc/net/tcp6)\"))\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tresponse.WriteError(500, err)\n\t\treturn\n\n\t}\n\tglog.Info(pname, pid)\n\n\t//create the process instance and get the detail info of specified pid\n\tPdetail := &model.ProcessDetail{\n\t\tProcess: &process.Process{Pid: 22637},\n\t}\n\tcmd, err := Pdetail.Cmdinfo()\n\tif err != nil {\n\t\tglog.Info(err)\n\t}\n\tglog.Info(cmd)\n\t//TODO get more info of this instance\n\n\t//start listen to specific ip:port for 60s and send the data to es\n\ttimesignal := time.After(time.Second * 30)\n\n\t//start collect and check the timesignal every one minutes\n\tgo lib.Startcollect(portint, device, timesignal)\n\n\tresponse.Write([]byte(\"activated\"))\n\n}", "func (bs *NpvizServer) Listen(port uint) {\n\thttp.Handle(\"/\", bs.router)\n\tserverAddr := fmt.Sprintf(\"0.0.0.0:%d\", port)\n\tlogrus.Info(fmt.Sprintf(\"Starting HTTP server at %s\", serverAddr))\n\tlogrus.Info(http.ListenAndServe(serverAddr, bs.router))\n}", "func (z *Zipkin) Listen(ln net.Listener, acc telegraf.Accumulator) {\n\tif err := z.server.Serve(ln); err != nil {\n\t\t// Because of the clean shutdown in `(*Zipkin).Stop()`\n\t\t// We're expecting a server closed error at some point\n\t\t// So we don't want to display it as an error.\n\t\t// This interferes with telegraf's internal data collection,\n\t\t// by making it appear as if a serious error occurred.\n\t\tif err != http.ErrServerClosed {\n\t\t\tacc.AddError(fmt.Errorf(\"error listening: %w\", err))\n\t\t}\n\t}\n}", "func (s *Server) listen(listener net.Listener) {\n\tfor {\n\t\t// Accept a connection\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tif s.shutdown {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.logger.Printf(\"[ERR] consul.rpc: failed to accept RPC conn: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo s.handleConn(conn, false)\n\t\tmetrics.IncrCounter([]string{\"rpc\", \"accept_conn\"}, 1)\n\t}\n}", "func (s *Service) Listen() (err error) {\n\t// Initialize error channel\n\terrC := make(chan error, 2)\n\t// Listen to HTTP (if needed)\n\tgo s.listenHTTP(errC)\n\t// Listen to HTTPS (if needed)\n\tgo s.listenHTTPS(errC)\n\t// Return any error which may come down the error channel\n\treturn <-errC\n}", "func (m *manager) Listen(addr string) error {\n\tlog.Printf(\"Listening on %s\", addr)\n\treturn http.ListenAndServe(addr, m.router)\n}", "func StartListening() {\n\thttp.HandleFunc(\"/health\", GenerateHandler(\"^/health$\", HealthHandler))\n\thttp.HandleFunc(\"/static/\", GenerateHandler(\"^/(static/(js/|css/|media/)[a-zA-Z0-9._]*)$\", FileHandler))\n\thttp.HandleFunc(\"/audits/\", GenerateHandler(\"^/(static/[a-zA-Z0-9._-]*)$\", FileHandler))\n\thttp.HandleFunc(\"/api/\", GenerateHandler(\"^/api/(get/(all|inventory|host))$\", APIHandler))\n\thttp.HandleFunc(\"/\", GenerateHandler(\"^/(.*)$\", FileHandler))\n\ta := fmt.Sprintf(\"%s:%s\", config.Host, config.Port)\n\tlogger.Infof(\"Start listening \\\"%s\\\"...\", a)\n\tlogger.Fatale(http.ListenAndServe(a, nil), \"Server crashed !\")\n}", "func Listen(connectionService *ConnectionService) {\n\tinbound := make(chan UDPPacket)\n\tpacketConn, err := net.ListenPacket(\"udp\", \"127.0.0.1:69\")\n\tif err != nil {\n\t\tlog.Fatal(\"Error listening:\", err)\n\t\treturn\n\t}\n\tdefer packetConn.Close()\n\n\tgo read(packetConn, inbound)\n\tgo process(packetConn, inbound, connectionService)\n\tlog.Println(\"Server listening on port 69\")\n\tfor {\n\t}\n}", "func (s *Server) listen() {\n\tdefer (Track(\"listen\", s.log))()\n\tvar err error\n\ts.listener, err = net.Listen(\"tcp\", s.port)\n\tif err != nil {\n\t\ts.log(\"listen() failed to start per error: %+v\", err)\n\t\tpanic(err)\n\t}\n\ts.log(\"listening at %s\", s.port)\n\tfor {\n\t\tconn, err := s.listener.Accept()\n\t\tif err != nil {\n\t\t\ts.log(\"failed to accept client per error: %+v\", err)\n\t\t} else {\n\t\t\tclient := NewClient(conn, s.clientInput)\n\t\t\ts.log(\"accepted %s\", client.ID)\n\t\t\ts.newClients <- client\n\t\t}\n\t}\n}", "func (observer *Observer) Listen() {\n\tobserver.connector.Listen(int(define.ObserverPort))\n}", "func Listen(network, laddr string, config *tls.Config,) (net.Listener, error)", "func Listen(addr string, handler ConnectionHandler, done chan struct{}) (err error) {\n\tln, err := net.Listen(\"tcp\", addr)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error listening on %s, %v \", addr, err)\n\t\tclose(done)\n\t\treturn\n\t}\n\n\tgo listen(ln, handler, done)\n\treturn\n}", "func Listen() {\n\tgo listenForIncomingSessions()\n}", "func Listen(pattern string, f func(*Response)) Handler {\n\treturn Handler{\n\t\tMethod: PublicMsg,\n\t\tPattern: pattern,\n\t\tRun: f,\n\t}\n}", "func (s *SyslogService) Bind() (err error) {\n\ts.ln, err = net.Listen(string(s.ConType), \":\"+s.Port)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor {\n\t\ts.conn, err = s.ln.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t}\n\treturn\n}", "func ListenAny(handler ConnectionHandler, done chan struct{}) (addr *net.TCPAddr, err error) {\n\tln, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't listen on any ip address, %v\", err)\n\t\tclose(done)\n\t\treturn\n\t}\n\n\taddr, err = net.ResolveTCPAddr(ln.Addr().Network(), ln.Addr().String())\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't resolve %v, %v\", ln.Addr(), err)\n\t\tclose(done)\n\t\treturn\n\t}\n\n\tgo listen(ln, handler, done)\n\treturn\n}", "func listen() error {\n\t// define https variable\n\taddress := config.Get(\"https\", \"address\")\n\tcertificate := config.Get(\"https\", \"certificate\")\n\tkey := config.Get(\"https\", \"key\")\n\n\t// return error\n\treturn http.ListenAndServeTLS(address, certificate, key, nil)\n}", "func Listen(addr, cpanelHostname string, targetCollection *targets.TargetCollection, pool *Pool) error {\n\tif addr == \"\" {\n\t\treturn errors.New(\"addr must be provided\")\n\t}\n\n\tif cpanelHostname == \"\" {\n\t\treturn errors.New(\"cpanelHostname must be provided\")\n\t}\n\n\tif targetCollection == nil {\n\t\treturn errors.New(\"targetCollection must be provided\")\n\t}\n\n\tif addr == \"\" {\n\t\treturn errors.New(\"addr is not defined\")\n\t}\n\n\tif cpanelHostname == \"\" {\n\t\treturn errors.New(\"cpanelHostname is not defined\")\n\t}\n\n\tr := mux.NewRouter()\n\n\t// CPANEL stuff\n\tr.Host(cpanelHostname).Path(\"/\").HandlerFunc(handlers.HandleHTMLRequest)\n\tr.Host(cpanelHostname).PathPrefix(\"/api\").PathPrefix(\"/api\").Handler(handlers.NewAPIRequestRouter())\n\tr.Host(cpanelHostname).PathPrefix(\"/intercom\").HandlerFunc(handlers.HandleSocketRequest)\n\n\t// proxy stuff\n\tr.HandleFunc(\"/\", createProxyHandlerFunc(targetCollection, pool))\n\n\tserver := &http.Server{\n\t\tHandler: r,\n\t\tAddr: addr,\n\t\tReadTimeout: 15 * time.Second,\n\t\tWriteTimeout: 15 * time.Second,\n\t}\n\n\tlog.Println(\"SERVER: listening @\", addr)\n\n\treturn server.ListenAndServe()\n}", "func (s *Server) Listen() error {\n\tif s.hasListeners { // already done this\n\t\treturn nil\n\t}\n\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", s.Host, s.Port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th, p, err := swag.SplitHostPort(listener.Addr().String())\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Host = h\n\ts.Port = p\n\ts.httpServerL = listener\n\n\ts.hasListeners = true\n\treturn nil\n}", "func (net *Network) Listen() {\n\tfor addr, vm := range net.vms {\n\t\tgo net.listenAndRun(addr, vm)\n\t}\n\n\tgo net.routePackets()\n}", "func (server *Server) Listen() {\n\thttp.Handle(\"/\", server.Router)\n\tlog.Println(\"Listening...\")\n\thttp.ListenAndServe(\":3000\", nil)\n}", "func (g *Gateway) listen() {\n\tfor {\n\t\tconn, err := g.listener.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tgo g.acceptConn(conn)\n\t}\n}", "func (b *broker) listen(addr string) (err error) {\n\tlistener, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Listening on %s\\n\", listener.Addr())\n\tdefer listener.Close()\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc, err := b.container.NewConnection(conn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Make this a server connection. Must be done before Open()\n\t\tc.Server() // Server-side protocol negotiation.\n\t\tc.Listen() // Enable remotely-opened endpoints.\n\t\tif err := c.Open(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tutil.Debugf(\"accept %s\\n\", c)\n\t\t// Accept remotely-opened endpoints on the connection\n\t\tgo b.accept(c)\n\t}\n}", "func (e *EventRoll) Listen(f ...Callabut) {\n\tfor _, v := range f {\n\t\te.Handlers.Add(v, nil)\n\t}\n}", "func (v *vine) Listen(ctx *context.T, protocol, address string) (flow.Listener, error) {\n\tn, a, baseProtocol, err := parseListeningAddress(ctx, address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl, err := baseProtocol.Listen(ctx, n, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tladdr := l.Addr()\n\tlocalTag := getLocalTag(ctx)\n\treturn &listener{\n\t\tbase: l,\n\t\taddr: addr(createDialingAddress(laddr.Network(), laddr.String(), localTag)),\n\t\tvine: v,\n\t\tlocalTag: localTag,\n\t}, nil\n}", "func listen(addr string) (net.Listener, error) {\n\treturn winio.ListenPipe(windowsPipeName(addr), nil)\n}", "func (c *Controller) Listen(port string) {\n\taddr, _ := net.ResolveTCPAddr(\"tcp\", port)\n\n\tvar err error\n\tc.listener, err = net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer c.listener.Close()\n\n\tlog.Println(\"Listening for connections on\", addr)\n\tfor {\n\t\tconn, err := c.listener.AcceptTCP()\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tgo c.handleConnection(conn)\n\t}\n\n}", "func (notifee *Notifee) Listen(network.Network, multiaddr.Multiaddr) {}", "func Listen(laddr net.Addr, raddr string, config *Config) (net.Listener, <-chan error, error) {\n\treturn ListenContext(context.Background(), laddr, raddr, config)\n}", "func (bn *BasicNotifiee) Listen(n net.Network, addr ma.Multiaddr) {\n\tglog.V(4).Infof(\"Notifiee - Listen: %v\", addr)\n}", "func (_m *PeerResolver) Listen(ctx context.Context, addr string) {\n\t_m.Called(ctx, addr)\n}", "func (*endpoint) Listen(int) *tcpip.Error {\n\treturn tcpip.ErrNotSupported\n}", "func (app *Goa) Listen(addr string) error {\n\treturn http.ListenAndServe(addr, app)\n}", "func StartListening() {\n\tlistenAddress := config.Config.HTTP.Address + \":\" + config.Config.HTTP.Port\n\tgo func() {\n\t\tE.Start(listenAddress)\n\t}()\n\n\tlog.Info().Msgf(\"Starting to listening for HTTP requests on %s\", listenAddress)\n}", "func (s *Service) Listen() {\n\n}", "func Listen(port int) Probes {\n\tif port <= 0 {\n\t\treturn new(nullListener)\n\t}\n\n\tp := &listener{\n\t\talive: new(atomic.Value),\n\t\tready: new(atomic.Value),\n\t\tserver: &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%d\", port),\n\t\t},\n\t}\n\tp.alive.Store(true)\n\tp.ready.Store(false)\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/healthz\", newHandler(p.alive))\n\tmux.HandleFunc(\"/readyz\", newHandler(p.ready))\n\tp.server.Handler = mux\n\n\tp.wg.Add(1)\n\tgo func() {\n\t\tdefer p.wg.Done()\n\t\tp.server.ListenAndServe()\n\t}()\n\n\treturn p\n}", "func (g *Gatekeeper) Listen() error {\n\tmux := bone.New()\n\n\tmux.Post(\"/v0/machine/aws\", routes.AWSBootstrapRoute(g.defaults.Org, g.defaults.Team, g.api))\n\n\tg.s.Handler = loggingHandler(mux)\n\th := httpdown.HTTP{}\n\n\tvar err error\n\tg.hd, err = h.ListenAndServe(g.s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn g.hd.Wait()\n}", "func Listen(appTitle, addr string, webfiles *fs.FS, hostname string) error {\n\tAPPTITLE = appTitle\n\tHOSTNAME = hostname\n\n\tif HOSTNAME == \"\" {\n\t\thn, err := os.Hostname()\n\t\tif err == nil {\n\t\t\tHOSTNAME = hn\n\t\t} else {\n\t\t\tHOSTNAME = \"localhost\"\n\t\t}\n\t}\n\n\t//webfiles := getFileSystem()\n\tt, err := template.ParseFS(*webfiles, \"*.html\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstaticfiles, err := fs.Sub(*webfiles, \"static\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trouter := gin.Default()\n\trouter.Use(gzip.Gzip(gzip.DefaultCompression))\n\trouter.SetHTMLTemplate(t)\n\trouter.StaticFS(\"/img\", http.FS(staticfiles))\n\trouter.StaticFS(\"/js\", http.FS(staticfiles))\n\trouter.NoRoute(notFoundHandler)\n\trouter.GET(\"/\", viewHandler)\n\trouter.GET(\"/view/:page\", viewHandler)\n\n\tv1 := router.Group(\"/api/v1\")\n\tapiv1.V1Routes(v1)\n\n\treturn router.Run(addr)\n}", "func Listen(settings args.NetworkConfig) {\n\tfmt.Printf(\"Server Listening on %s\\n\", settings.Port)\n\tport := \":\" + settings.Port\n\tserver, err := net.Listen(\"tcp\", port)\n\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tdefer server.Close()\n\n\tfor {\n\t\tclient, err := server.Accept()\n\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn\n\t\t}\n\n\t\t// TODO limit number of goroutines\n\t\tgo onClient(client)\n\t}\n}", "func (s *Server) Listen() {\n\tif s.logs != nil {\n\t\ts.logs.Info(fmt.Sprintf(\"Listening at %s\", s.addr))\n\t}\n\n\ts.httpServer.ListenAndServe()\n}", "func (s Server) Start(port int, cb func(Message) error) {\n\tlistenTo := fmt.Sprintf(\":%d\", port)\n\tlog.Printf(\"Launching server on port %d\", port)\n\tlistener, err := net.Listen(\"tcp\", listenTo)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tc := Client{conn: conn, Id: NewClientId(), raw: s.Raw, Index: s.nextIndex}\n\t\ts.nextIndex++\n\t\tgo handleClient(c, s.Raw, cb)\n\t}\n}", "func (s *Service) Listen(ctx context.Context, address string, timeout time.Duration) error {\n\tvar wg sync.WaitGroup\n\tdefer func() { s.teardown(); wg.Wait() }()\n\n\terr := s.Bind(ctx, address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.mutex.Lock()\n\ts.running = true\n\tl := s.listener\n\ts.mutex.Unlock()\n\n\tfor s.running {\n\t\tif timeout != 0 {\n\t\t\tif err := s.refreshTimeout(timeout); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tif err.(net.Error).Timeout() {\n\t\t\t\ts.mutex.Lock()\n\t\t\t\tif s.conncounter == 0 {\n\t\t\t\t\ts.mutex.Unlock()\n\t\t\t\t\treturn ServiceTimeoutError{}\n\t\t\t\t}\n\t\t\t\ts.mutex.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !s.running {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\ts.mutex.Lock()\n\t\ts.conncounter++\n\t\ts.mutex.Unlock()\n\t\twg.Add(1)\n\t\tgo s.handleConnection(ctx, conn, &wg)\n\t}\n\n\treturn nil\n}", "func (l *HostIPListener) Listen(cancel <-chan interface{}, conn client.Connection) {\n\tzzk.Listen2(cancel, conn, l)\n}", "func (s *Server) Listen() error {\n\treturn s.hs.Serve(s.ln)\n}", "func listen(listenAddress string, tlsConfig *tls.Config) (listener net.Listener, listenedIP string, port string, err error) {\n\tif tlsConfig == nil {\n\t\tlistener, err = net.Listen(\"tcp4\", listenAddress)\n\t} else {\n\t\tlistener, err = tls.Listen(\"tcp4\", listenAddress, tlsConfig)\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\trealAddr := listener.Addr().String()\n\tlistenedIP, port, err = net.SplitHostPort(realAddr)\n\tif err != nil {\n\t\treturn\n\t}\n\tip := net.ParseIP(listenedIP)\n\tif ip.IsUnspecified() {\n\t\tif iputil.IsIPv6Address(ip) {\n\t\t\tlistenedIP = iputil.GetLocalIPv6()\n\t\t\tif listenedIP == \"\" {\n\t\t\t\tlistenedIP = iputil.GetLocalIP()\n\t\t\t}\n\t\t} else {\n\t\t\tlistenedIP = iputil.GetLocalIP()\n\t\t}\n\t}\n\treturn\n}", "func Bind(port int) {\r\n\tlisten, err := net.Listen(\"tcp\", \"0.0.0.0:\"+strconv.Itoa(port))\r\n\tExitOnError(err)\r\n\tdefer listen.Close()\r\n\r\n\tfor {\r\n\t\tconn, err := listen.Accept()\r\n\t\tif err != nil {\r\n\t\t\tPrintError(\"Cannot bind to selected port\")\r\n\t\t}\r\n\t\thandleBind(conn)\r\n\t}\r\n}", "func ServerListen(handler http.Handler) {\n\n\tfmt.Println(fmt.Sprintf(tmpl,\n\t\tconfig.GetString(\"app.name\"),\n\t\tconfig.GetString(\"app.port\"),\n\t))\n\n\tserver := &http.Server{\n\t\tAddr: \":\" + config.GetString(\"app.port\"),\n\t\tHandler: handler,\n\t\tReadTimeout: config.GetDuration(\"app.read_timeout\") * time.Second,\n\t\tWriteTimeout: config.GetDuration(\"app.write_timeout\") * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\n\terr := server.ListenAndServe()\n\tlog.Error(err)\n}", "func (Transport) Listen(ctx context.Context, addr string) (net.Listener, error) {\n\treturn Listen(addr)\n}", "func (bot *Bot) Listen() error {\n\tfmt.Println(\"Listening for messages...\")\n\n\tfor {\n\t\tm, err := bot.receive()\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\tbot.reply(m, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tif m == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\thandler, ok := bot.handlers[m.Command]\n\t\tif !ok {\n\t\t\tbot.reply(m, fmt.Sprintf(\"No handler for %s\", m.Command))\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO: Don't let this blow up\n\t\tresp := handler.Fn(m)\n\n\t\tbot.reply(m, resp)\n\t}\n}", "func Listen(sc ServerConfig) {\n\tlistenSocket, er := zmq.NewSocket(zmq.DEALER)\n\tif er != nil {\n\t\tpanic (\"Unable to open socket for listening\")\n\t}\n\tdefer listenSocket.Close()\n\tlistenSocket.Bind(\"tcp://\" + sc.Url)\n\tlistenSocket.SetRcvtimeo(2*time.Second)\n\tfor {\n\t\tmsg, err := listenSocket.Recv(0)\n\t\tif err != nil {\n\t\t\tclose(sc.Input)\n\t\t\treturn\n\t\t} else {\n\t\t\tsc.N_msgRcvd++\n\t\t}\n\t\t\n\t\tmessage := new(Envelope)\n\t\tjson.Unmarshal([]byte(msg), message)\n\t\tsc.Input <- message\n\t}\n}", "func server() error {\r\n\tendpoint := NewEndpoint()\r\n\r\n\t// Add the handle funcs.\r\n\tendpoint.AddHandleFunc(\"STRING\", handleStrings)\r\n\tendpoint.AddHandleFunc(\"GOB\", handleGob)\r\n\r\n\t// Start listening.\r\n\treturn endpoint.Listen()\r\n}", "func ListenAndServe(endpoint string, logger *zerolog.Logger, handler http.Handler, writeTimeout time.Duration, readTimeout time.Duration) {\n\tlog := logger.With().Str(\"SERVER\", \"Listen and Serve\").Logger()\n\tsrv := &http.Server{\n\t\tHandler: handler,\n\t\tAddr: endpoint,\n\t\tWriteTimeout: writeTimeout,\n\t\tReadTimeout: readTimeout,\n\t}\n\n\tc := make(chan os.Signal, 1)\n\tidleConnections := make(chan struct{})\n\tsignal.Notify(c, os.Interrupt, syscall.SIGINT)\n\tgo func() {\n\t\t<-c\n\t\t// create context with timeout\n\t\tctx, cancel := context.WithTimeout(context.Background(), writeTimeout)\n\t\tdefer cancel()\n\n\t\t// start http shutdown\n\t\tif err := srv.Shutdown(ctx); err != nil {\n\t\t\tlog.Error().AnErr(\"shutdown\", err)\n\t\t}\n\n\t\tclose(idleConnections)\n\t}()\n\n\tlog.Info().Msg(\"listening at \" + endpoint)\n\tif err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\tlog.Fatal().Msg(\"listen and Serve fail \" + err.Error())\n\t}\n\n\tlog.Info().Msg(\"waiting idle connections...\")\n\t<-idleConnections\n\tlog.Info().Msg(\"bye bye\")\n}", "func ListenAndServe(ctx context.Context, bin, address, port string) {\n\tfmt.Println(`\n\n███████╗███████╗██╗ ███████╗ ███████╗███████╗████████╗███████╗███████╗███╗ ███╗\n██╔════╝██╔════╝██║ ██╔════╝ ██╔════╝██╔════╝╚══██╔══╝██╔════╝██╔════╝████╗ ████║\n███████╗█████╗ ██║ █████╗█████╗█████╗ ███████╗ ██║ █████╗ █████╗ ██╔████╔██║\n╚════██║██╔══╝ ██║ ██╔══╝╚════╝██╔══╝ ╚════██║ ██║ ██╔══╝ ██╔══╝ ██║╚██╔╝██║\n███████║███████╗███████╗██║ ███████╗███████║ ██║ ███████╗███████╗██║ ╚═╝ ██║\n╚══════╝╚══════╝╚══════╝╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝`)\n\tlog.Info(ctx, \"server listening\", \"bin\", bin, \"address\", address, \"port\", port)\n\thttp.ListenAndServe(fmt.Sprintf(\"%s:%s\", address, port), mux)\n}", "func Listen() {\n\tlog.Println(\"Configuring router\")\n\trouter := mux.NewRouter()\n\n\tlog.Println(\"Mapping end-point [GET]/note/{id}\")\n\trouter.HandleFunc(\"/note/{id}\", HandleFindNoteByID).Methods(\"GET\")\n\n\tlog.Println(\"Mapping end-point [POST]/note/{id}\")\n\trouter.HandleFunc(\"/note\", HandleCreateNote).Methods(\"POST\")\n\n\tlog.Println(\"Mapping end-point [PUT]/note\")\n\trouter.HandleFunc(\"/note\", HandleUpdateNote).Methods(\"PUT\")\n\n\tlog.Println(\"Mapping end-point [DELETE]/note/{id}\")\n\trouter.HandleFunc(\"/note/{id}\", HandleDeleteNoteByID).Methods(\"DELETE\")\n\n\tlog.Println(\"Listening on port 9101\")\n\tlog.Fatal(http.ListenAndServe(\":9101\", router))\n}", "func main() {\n\tlistener, err := net.Listen(\"tcp\", \"localhost:21\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\tcontinue\n\t\t}\n\t\tgo handleConn(conn)\n\t}\n}", "func (a *Available) Listen(fn func(e events.Event) error) error {\n\treturn a.Add(\"events\", buildListen(fn))\n}", "func Listen(port int) {\n\tgo func() {\n\t\tsrv := &dns.Server{Addr: \":\" + strconv.Itoa(port), Net: \"udp\"}\n\t\tlog.Printf(\"Listening on udp port %d\", port)\n\t\tif err := srv.ListenAndServe(); err != nil {\n\t\t\tlog.Fatalf(\"Failed to set udp listener: %s\\n\", err.Error())\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tsrv := &dns.Server{Addr: \":\" + strconv.Itoa(port), Net: \"tcp\"}\n\t\tlog.Printf(\"Listening on tcp port %d\", port)\n\t\tif err := srv.ListenAndServe(); err != nil {\n\t\t\tlog.Fatalf(\"Failed to set tcp listener: %s\\n\", err.Error())\n\t\t}\n\t}()\n}", "func (srv *Server) StartListen() {\n\tvar err error\n\tsrv.Listener, err = net.Listen(\"tcp\", fmt.Sprintf(\"%s:%s\", srv.Host, srv.Port))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"🔓 Listening on %s:%s\", srv.Host, srv.Port)\n\thttp.Serve(srv.Listener, nil)\n}", "func (pipe *slimPipe) Listen() error {\n\tpipe.infoLogger.Println(\"Listening on Stdin\")\n\treturn nil\n}", "func Run(p uint, s HandlerSpawner) {\n\t//Start listening for connections\n\tl, e := net.Listen(\"tcp\", \":\"+strconv.Itoa(int(p)))\n\tif e != nil {\n\t\tutil.Log(fmt.Errorf(\"error %q listening on port %d\", e, p), LOG_SERVER)\n\t\treturn\n\t}\n\tdefer l.Close()\n\tfor {\n\t\tif c, e := l.Accept(); e != nil {\n\t\t\tutil.Log(fmt.Errorf(\"error %q accepting connection\", e), LOG_SERVER)\n\t\t} else {\n\t\t\t//Spawn a handler for each new connection.\n\t\t\tgo func(cn net.Conn) {\n\t\t\t\th := s.Spawn()\n\t\t\t\th.Start(cn)\n\t\t\t}(c)\n\t\t}\n\t}\n}", "func Listen(path string) (net.Listener, error) {\n\treturn winio.ListenPipe(path, nil)\n}", "func Listen(path string) (net.Listener, error) {\n\treturn defaultListener.Listen(path)\n}", "func Listen(rawurl string) (listener net.Listener, err error) {\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tswitch u.Scheme {\n\tcase \"fd\":\n\t\tvar fd uint64\n\t\tfd, err = strconv.ParseUint(u.Host, 10, 8)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t// NOTE: The name argument doesn't really matter apparently\n\t\tsockfile := os.NewFile(uintptr(fd), fmt.Sprintf(\"fd://%d\", fd))\n\t\tlistener, err = net.FileListener(sockfile)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\tvar laddr *net.TCPAddr\n\t\tladdr, err = net.ResolveTCPAddr(u.Scheme, u.Host)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlistener, err = net.ListenTCP(\"tcp\", laddr)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (o *Opts) StartListening() {\n\tif _networkAvailable() {\n\t\tfor {\n\t\t\te := <-*o.eventChan\n\t\t\tfor _, event := range o.EventsToListen {\n\t\t\t\tif event != \"All\" && event != \"all\" {\n\t\t\t\t\tif strings.Compare(e[\"Event\"], event) == 0 {\n\t\t\t\t\t\t// e represents filtered events\n\t\t\t\t\t\to.EventHandler(e)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// e represents all events\n\t\t\t\t\to.EventHandler(e)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func Listen(addr net.Addr, publishAddr SignedETHAddr) (net.Conn, error) {\n\treturn ListenWithLog(addr, publishAddr, zap.NewNop())\n}", "func listen(buf []byte) error {\n\tn, addr, err := socket.ReadFromUDP(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif n > 0 {\n\t\thandle(buf[0:n], addr)\n\t}\n\n\treturn nil\n}", "func (s *Server) Start() error {\n\ts.quit = make(chan interface{})\n\tc := winio.PipeConfig{\n\t\t//\n\t\t// SDDL encoded.\n\t\t//\n\t\t// (system = SECURITY_NT_AUTHORITY | SECURITY_LOCAL_SYSTEM_RID)\n\t\t// owner: system\n\t\t// ACE Type: (A) Access Allowed\n\t\t// grant: (GA) GENERIC_ALL to (WD) Everyone\n\t\t//\n\t\tSecurityDescriptor: \"O:SYD:(A;;GA;;;WD)\",\n\t}\n\tl, err := winio.ListenPipe(npipeEndpoint[len(protocol):], &c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"port server listen error: %w\", err)\n\t}\n\ts.listener = l\n\tfor {\n\t\tconn, err := s.listener.Accept()\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase <-s.quit:\n\t\t\t\ts.eventLogger.Info(uint32(windows.NO_ERROR), \"port server received a stop signal\")\n\t\t\t\treturn nil\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"port server connection accept error: %w\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tgo s.handleEvent(conn)\n\t\t}\n\t}\n}", "func (ts *TCPServer) Listen() error {\n\tfor {\n\t\tconn, err := ts.listener.Accept()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot accept: %v\", err)\n\t\t}\n\t\tts.connections = append(ts.connections, conn)\n\t\tgo ts.handleConnection(conn)\n\t}\n}", "func (r *MasterRouter) Listen(net, laddr string) (net.Listener, error) {\n\treturn nil, fmt.Errorf(\"not implemented\")\n}", "func (server *TcpBridgeServer) Listen(callbacks ...func(manager *ConnectionManager)) error {\n\n\tif server.Port <= 0 || server.Port > 65535 {\n\t\treturn errors.New(\"invalid port range: \" + strconv.Itoa(server.Port))\n\t}\n\n\tlistener, e1 := net.Listen(\"tcp\", \":\"+strconv.Itoa(server.Port))\n\tif e1 != nil {\n\t\tpanic(e1)\n\t\treturn nil\n\t}\n\tlogger.Info(\"server listening on port:\", server.Port)\n\t// keep accept connections.\n\tfor {\n\t\tconn, e1 := listener.Accept()\n\t\tmanager := &ConnectionManager{\n\t\t\tConn: conn,\n\t\t\tSide: ServerSide,\n\t\t}\n\t\tif e1 != nil {\n\t\t\tlogger.Error(\"accept new conn error:\", e1)\n\t\t\tmanager.Destroy()\n\t\t} else {\n\t\t\tlogger.Debug(\"accept a new connection from remote addr:\", conn.RemoteAddr().String())\n\t\t\tconnectionPool.Exec(func() {\n\t\t\t\tmanager.State = StateConnected\n\t\t\t\tcommon.Try(func() {\n\t\t\t\t\tServe(manager, callbacks...)\n\t\t\t\t}, func(i interface{}) {\n\t\t\t\t})\n\t\t\t})\n\t\t}\n\t}\n\treturn nil\n}", "func (h *RouterHandle) Listen(in *api.ListenRequest, stream api.Router_ListenServer) error {\n\tvar (\n\t\ttoken = (*token)(in.GetToken())\n\t)\n\n\tdesc, ok := token.getDesc()\n\tif !ok {\n\t\tlog.Warn(errInvTkn)\n\t\treturn nil\n\t}\n\n\tlogger := log.WithFields(log.Fields{\n\t\t\"token\": ((*api.Token)(token)).ToShort(),\n\t\t\"user\": desc.userID,\n\t\t\"image\": desc.image,\n\t})\n\n\tlogger.Info(\"Listen is requested\")\n\n\tbody, ok := desc.getBody()\n\tif !ok {\n\t\tlogger.Fatal(errNExists)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stream.Context().Done():\n\t\t\t\treturn\n\t\t\tcase sig := <-body.transmit:\n\t\t\t\tstream.Send(&sig)\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-stream.Context().Done()\n\tlogger.Info(\"stops listening\")\n\treturn nil\n}", "func (socket *MockSocket) Listen() {\n}" ]
[ "0.66203743", "0.66116935", "0.6564344", "0.6537847", "0.65230536", "0.64692837", "0.6373706", "0.6306666", "0.6273131", "0.6236138", "0.6213746", "0.6199536", "0.61531854", "0.6143046", "0.61272234", "0.6126517", "0.6125706", "0.60635346", "0.6059511", "0.60571146", "0.6048463", "0.603517", "0.60350597", "0.60242194", "0.6021994", "0.60017455", "0.5997511", "0.5986991", "0.59779716", "0.59722114", "0.5966631", "0.59564453", "0.59562474", "0.5954543", "0.59449613", "0.59447104", "0.59274", "0.5926183", "0.5923238", "0.5916032", "0.59151626", "0.59040654", "0.5897863", "0.58957785", "0.5887205", "0.5881628", "0.58806306", "0.5874953", "0.5861262", "0.58484375", "0.5847465", "0.584044", "0.58333504", "0.58296216", "0.5822413", "0.5812097", "0.5795287", "0.57915384", "0.5784554", "0.57831407", "0.5778073", "0.5777078", "0.5773523", "0.57645655", "0.5761478", "0.5756868", "0.5755545", "0.57542497", "0.5747637", "0.57470775", "0.57402825", "0.5739478", "0.57354504", "0.5732654", "0.573099", "0.57272583", "0.57212204", "0.5719829", "0.57166153", "0.5706171", "0.5703536", "0.5688491", "0.56851256", "0.5679659", "0.5678202", "0.5674294", "0.56572837", "0.5656448", "0.56563526", "0.5655004", "0.565453", "0.5651912", "0.5648913", "0.563626", "0.5636089", "0.5633597", "0.56328315", "0.56327426", "0.5625518", "0.56154114" ]
0.6941947
0
handleMessages reads the connection up to the first newline. Based on this string, it calls the appropriate HandleFunc.
func (e *Endpoint) handleMessages(conn net.Conn) { // Wrap the connection into a buffered reader for easier reading. rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) defer conn.Close() // Read from the connection until EOF. Expect a command name as the // next input. Call the handler that is registered for this command. for { log.Print("Receive command '") cmd, err := rw.ReadString('\n') switch { case err == io.EOF: log.Println("Reached EOF - close this connection.\n ---") return case err != nil: log.Println("\nError reading command. Got: '"+cmd+"'\n", err) return } // Trim the request string - ReadString does not strip any newlines. cmd = strings.Trim(cmd, "\n ") log.Println(cmd + "'") // Fetch the appropriate handler function from the 'handler' map and call it. e.mutex.RLock() handleCommand, ok := e.handler[cmd] e.mutex.RUnlock() if !ok { log.Println("Command '" + cmd + "' is not registered.") return } handleCommand(rw) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *MetricReceiver) handleMessage(addr net.Addr, msg []byte) {\n\tbuf := bytes.NewBuffer(msg)\n\tfor {\n\t\tline, readerr := buf.ReadBytes('\\n')\n\n\t\t// protocol does not require line to end in \\n, if EOF use received line if valid\n\t\tif readerr != nil && readerr != io.EOF {\n\t\t\tr.handleError(fmt.Errorf(\"error reading message from %s: %s\", addr, readerr))\n\t\t\treturn\n\t\t} else if readerr != io.EOF {\n\t\t\t// remove newline, only if not EOF\n\t\t\tif len(line) > 0 {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t}\n\n\t\t// Only process lines with more than one character\n\t\tif len(line) > 1 {\n\t\t\tmetric, err := parseLine(line)\n\t\t\tif err != nil {\n\t\t\t\tr.handleError(fmt.Errorf(\"error parsing line %q from %s: %s\", line, addr, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo r.Handler.HandleMetric(metric)\n\t\t}\n\n\t\tif readerr == io.EOF {\n\t\t\t// if was EOF, finished handling\n\t\t\treturn\n\t\t}\n\t}\n}", "func HandleMessages(body *string) error {\n\tvar m Message\n\n\terr := json.Unmarshal([]byte(*body), &m)\n\tif err != nil {\n\t\tlog.Printf(\"Error unmarshal sqs message body into %v error:%v\", m, err)\n\t\treturn err\n\t}\n\n\tswitch m.Category {\n\tcase CategoryConsumer:\n\t\treturn unmarshalConsumer(m)\n\tcase CategoryBusiness:\n\t\treturn unmarshalBusiness(m)\n\tcase CategoryAccount:\n\t\treturn unmarshalAccount(m)\n\tcase CategoryCard:\n\t\treturn unmarshalCard(m)\n\t}\n\n\treturn nil\n\n}", "func (c *Conn) handleMessages() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.outputChan:\n\t\t\t_, err := io.WriteString(c.c, msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error writing to conn %d: %s\\n\", c.id, err)\n\t\t\t}\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (M *ConnectionManager) handleMessages() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-M.Messages:\n\t\t\tterr.VerbPrint(outputTo, 5, verb, M.ID, \"RECIEVED:\", msg)\n\t\t}\n\t}\n}", "func (u *Input) goHandleMessages(ctx context.Context) {\n\tu.wg.Add(1)\n\n\tgo func() {\n\t\tdefer u.wg.Done()\n\n\t\tdec := decoder.New(u.encoding)\n\t\tbuf := make([]byte, 0, MaxUDPSize)\n\t\tfor {\n\t\t\tmessage, remoteAddr, err := u.readMessage()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tu.Errorw(\"Failed reading messages\", zap.Error(err))\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif u.OneLogPerPacket {\n\t\t\t\tlog := truncateMaxLog(message)\n\t\t\t\tu.handleMessage(ctx, remoteAddr, dec, log)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tscanner := bufio.NewScanner(bytes.NewReader(message))\n\t\t\tscanner.Buffer(buf, MaxUDPSize)\n\n\t\t\tscanner.Split(u.splitFunc)\n\n\t\t\tfor scanner.Scan() {\n\t\t\t\tu.handleMessage(ctx, remoteAddr, dec, scanner.Bytes())\n\t\t\t}\n\t\t\tif err := scanner.Err(); err != nil {\n\t\t\t\tu.Errorw(\"Scanner error\", zap.Error(err))\n\t\t\t}\n\t\t}\n\t}()\n}", "func (svc ChatService) HandleConnection(session session.Session, conn session.Connection) {\n\tb, _ := ioutil.ReadAll(conn)\n\treceivedChatMessages = receivedChatMessages + \"<br>\" + string(b[:])\n}", "func (bot *Hitbot) MessageHandler() {\n\tfor {\n\t\t_, p, err := bot.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t//log.Printf(\"Message: %v\", string(p)) //debug info\n\t\tif string(p[:3]) == \"2::\" {\n\t\t\tbot.conn.WriteMessage(websocket.TextMessage, []byte(\"2::\"))\n\t\t\t//log.Print(\"Ping!\")\n\t\t\tcontinue\n\t\t} else if string(p[:3]) == \"1::\" {\n\t\t\tlog.Print(\"Connection successful!\")\n\t\t\tfor _, channel := range bot.channels {\n\t\t\t\tbot.joinChannel(channel)\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if string(p[:4]) == \"5:::\" {\n\t\t\tbot.parseMessage(p[4:])\n\t\t}\n\t}\n}", "func HandleReceiver(conn net.Conn) {\n\tdefer conn.Close()\n\tvar buf [1024]byte\n\tfor {\n\t\t// read upto 1024 bytes\n\t\tn, err := conn.Read(buf[0:])\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tmsg := string(buf[0:n])\n\t\tfmt.Println(\"Messaged received from client: \", msg)\n\t\tmessage := ParseMessage(msg)\n\t\tmessage.ExecuteCommand(conn)\n\t\tbreak\n\t}\n\tfmt.Println(\"Done handle Receiver\", Users)\n\n}", "func (irc *IrcCon) handleIncomingMessages() {\n\tscan := bufio.NewScanner(irc.con)\n\tfor scan.Scan() {\n\t\tmes := ParseMessage(scan.Text())\n\t\tconsumed := false\n\t\tfor _,t := range irc.tr {\n\t\t\tif t.Condition(mes) {\n\t\t\t\tconsumed = t.Action(irc,mes)\n\t\t\t}\n\t\t\tif consumed {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !consumed {\n\t\t\tirc.Incoming <- mes\n\t\t}\n\t}\n}", "func (s *Socket) handleMessagesIn() {\n\tfor {\n\t\tm := <-s.messagesIn\n\t\tfmt.Printf(\"Receiving message: %v\", m)\n\t\tswitch m.MessageType {\n\t\tcase PLACE_ORDER:\n\t\t\ts.placeOrder(m.Payload)\n\t\tcase CANCEL_ORDER:\n\t\t\ts.cancelOrder(m.Payload)\n\t\tcase SIGNED_DATA:\n\t\t\ts.executeOrder(m.Payload)\n\t\tcase DONE:\n\t\tdefault:\n\t\t\tpanic(\"Unknown message type\")\n\t\t}\n\t}\n}", "func (handler *BotHandler) handleMessages() {\n\thandler.McRunner.WaitGroup.Add(1)\n\tdefer handler.McRunner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase msg := <-handler.McRunner.MessageChannel:\n\t\t\tmessage := message{Timestamp: time.Now().Format(time.RFC3339), Message: msg}\n\t\t\tmessageJSON, _ := json.Marshal(message)\n\t\t\theader := header{Type: \"msg\", Data: messageJSON}\n\t\t\thandler.sock.WriteJSON(header)\n\t\tcase <-handler.killChannel:\n\t\t\treturn\n\t\t}\n\t}\n}", "func receiveMessages(conn net.Conn) {\n\tvar data []byte\n\tbuffer := make([]byte, bufferSize)\n\n\tfor {\n\t\tfor {\n\t\t\tn, err := conn.Read(buffer)\n\t\t\tif err != nil && err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tbuffer = buffer[:n]\n\t\t\tdata = append(data, buffer...)\n\t\t\tif data[len(data)-1] == endLine {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfmt.Printf(\"%s\\n\", data[:len(data)-1])\n\t\tdata = make([]byte, 0)\n\t}\n}", "func (s *SlaveNode) handleMessages(featurePipe *os.File) {\n\treader := bufio.NewReader(featurePipe)\n\tfor {\n\t\tif msg, err := reader.ReadString('\\n'); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\tmsg = strings.TrimRight(msg, \"\\n\")\n\t\t\ts.featureL.Lock()\n\t\t\ts.features[msg] = true\n\t\t\ts.featureL.Unlock()\n\t\t\ts.fileMonitor.Add(msg)\n\t\t}\n\t}\n}", "func (em *EventManager) handleMessages(c chan interface{}) {\n\tlog.Printf(\"Starting message handler routine\")\n\n\tfor {\n\t\t// Load events\n\t\tm, open := <-c\n\t\tif open {\n\t\t\t// Call message handler\n\t\t\tlog.Printf(\"Received message generic\")\n\t\t\tem.handleMessage(m)\n\t\t} else {\n\t\t\t// Exit message handling go-routine\n\t\t\tlog.Println(\"Exiting client routine\")\n\t\t\tbreak\n\t\t}\n\t}\n\tlog.Printf(\"Exiting message handler routine\")\n\tem.wg.Done()\n}", "func (tv *TV) MessageHandler() (err error) {\n\tdefer func() {\n\t\ttv.resMutex.Lock()\n\t\tfor _, ch := range tv.res {\n\t\t\tclose(ch)\n\t\t}\n\t\ttv.res = nil\n\t\ttv.resMutex.Unlock()\n\t}()\n\n\tfor {\n\t\tmt, p, err := tv.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif mt != websocket.TextMessage {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := Message{}\n\n\t\terr = json.Unmarshal(p, &msg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttv.resMutex.Lock()\n\t\tch := tv.res[msg.ID]\n\t\ttv.resMutex.Unlock()\n\n\t\tch <- msg\n\t}\n}", "func (a *Adapter) handleMessage() {\n\tfor {\n\t\t_, input, err := a.Conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not read message! %s\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t// Decodes input message\n\t\terr, meta, data := lib.DecodeMessage(&input)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not decode incoming message! %s\", err)\n\t\t}\n\n\t\tif glob.V_LOG_IO_MSG {\n\t\t\tlog.Infof(\"Message received! \\nMeta: %s \\nData: %s\", meta, data)\n\t\t}\n\n\t\tgo a.TraverseCBs(meta, data)\n\t}\n}", "func (g *Gossiper) HandleClientMessages() {\n\tg.ConnectToClient()\n\n\tpacketBytes := make([]byte, buffsize)\n\tmsg := &message.Message{}\n\n\tfor {\n\t\tnRead, _, err := g.clientConn.ReadFromUDP(packetBytes)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error: read from buffer failed.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif nRead > 0 {\n\t\t\tprotobuf.Decode(packetBytes, msg)\n\t\t\tprintClientMessage(*msg)\n\t\t\tg.PrintPeers()\n\n\t\t\trumorMsg := message.RumorMessage{Origin: g.name, ID: messageID, Text: msg.Text}\n\t\t\tg.rumorMsgs.AddMessage(g.name, rumorMsg)\n\t\t\tg.newMsgs = append(g.newMsgs, rumorMsg)\n\t\t\tmessageID++\n\t\t\tg.myStatus.SetStatus(g.name, messageID)\n\n\t\t\tpacket := &gossippacket.GossipPacket{Rumor: &rumorMsg}\n\t\t\tgo g.rumorMonger(*packet, nil)\n\t\t}\n\t}\n}", "func (b *Builder) HandleMessages() {\n\tfor {\n\t\tm := <-b.incoming\n\t\tswitch message := m.(type) {\n\t\tcase *rmake.RequiredFileMessage:\n\t\t\tslog.Info(\"Received required file.\")\n\t\t\t//Get a file from another node\n\t\t\tb.newfiles <- message\n\n\t\tcase *rmake.BuilderRequest:\n\t\t\tslog.Info(\"Received builder request.\")\n\t\t\tb.RequestQueue.Push(message)\n\n\t\tcase *rmake.BuilderResult:\n\t\t\tslog.Info(\"Received builder result.\")\n\t\t\tb.HandleBuilderResult(message)\n\n\t\tdefault:\n\t\t\tslog.Warnf(\"Received invalid message type. '%s'\", reflect.TypeOf(message))\n\t\t}\n\t}\n}", "func handle_conn(conn * Connection) {\n for conn.connected {\n messages := conn.Receive()\n if conn.connected && messages != nil {\n for _, message := range messages {\n fmt.Println(\"Received message\", string(message.Serialize()))\n handle_message(conn, message)\n }\n }\n }\n}", "func HandleMessage(msg *WeechatMessage, handler HandleWeechatMessage) error {\n\t// Got an empty message, simply don't process it for now. We can figure\n\t// out how to handle this.\n\tif msg == nil {\n\t\tfmt.Printf(\"Got Nil message to handle.\\n\")\n\t\treturn nil\n\t}\n\tswitch msg.Msgid {\n\tcase \"listbuffers\", \"_buffer_opened\":\n\t\t// parse out the list of buffers which are Hda objects.\n\t\tbufffers := msg.Object.Value.(WeechatHdaValue)\n\t\tbuflist := make(map[string]*WeechatBuffer, len(bufffers.Value))\n\n\t\tfor _, each := range bufffers.Value {\n\t\t\tbuf := &WeechatBuffer{\n\t\t\t\tShortName: each[\"short_name\"].Value.(string),\n\t\t\t\tFullName: each[\"full_name\"].Value.(string),\n\t\t\t\tTitle: each[\"title\"].Value.(string),\n\t\t\t\tNumber: each[\"number\"].Value.(int32),\n\t\t\t\tLocalVars: each[\"local_variables\"].Value.(map[WeechatObject]WeechatObject),\n\t\t\t\tLines: make([]*WeechatLine, 0),\n\t\t\t\t// this is essentially a list of strings, pointers,\n\t\t\t\t// the first pointer of which is the buffer' pointer.\n\t\t\t\tPath: each[\"__path\"].Value.([]string)[1],\n\t\t\t}\n\t\t\tbuflist[buf.Path] = buf\n\t\t}\n\n\t\thandler.HandleListBuffers(buflist)\n\n\tcase \"_buffer_line_added\":\n\t\tfor _, each := range msg.Object.Value.(WeechatHdaValue).Value {\n\t\t\taddLine(handler, each)\n\t\t}\n\tcase \"listlines\":\n\t\tlines := msg.Object.Value.(WeechatHdaValue).Value\n\t\tfor i := len(lines) - 1; i >= 0; i-- {\n\t\t\taddLine(handler, lines[i])\n\t\t}\n\tcase \"nicklist\", \"_nicklist\":\n\t\t// handle list of nicks.\n\t\tvar nicks []*WeechatNick\n\t\tnickValues := msg.Object.Value.(WeechatHdaValue).Value\n\t\tvar buffer = \"default\"\n\t\tfor _, val := range nickValues {\n\n\t\t\titem := &WeechatNick{\n\t\t\t\tName: val[\"name\"].as_string(),\n\t\t\t\tColor: val[\"color\"].as_string(),\n\t\t\t\tLevel: val[\"level\"].as_int(),\n\t\t\t\tPrefix: val[\"prefix\"].as_string(),\n\t\t\t\tPrefixColor: val[\"prefix_color\"].as_string(),\n\t\t\t\tGroup: val[\"group\"].as_bool(),\n\t\t\t\tVisible: val[\"visible\"].as_bool(),\n\t\t\t}\n\n\t\t\tnicks = append(nicks, item)\n\t\t\tbuffer = val[\"__path\"].Value.([]string)[2]\n\t\t}\n\t\thandler.HandleNickList(buffer, nicks)\n\tcase \"error\":\n\t\thandler.Default(msg)\n\tdefault:\n\t\thandler.Default(msg)\n\t}\n\treturn nil\n}", "func receiveMessages(conn *websocket.Conn) {\n\tdefer disconnect(conn)\n\tfor {\n\t\tvar demarshaled struct {\n\t\t\tCommand string\n\t\t\tBody string\n\t\t\tClient models.Client\n\t\t}\n\t\terr := conn.ReadJSON(&demarshaled)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: Unable to read message from client\")\n\t\t\tlog.Println(\"Disconnecting client...\")\n\t\t\tbreak\n\t\t}\n\t\tmessage := &models.Message{\n\t\t\tCommand: demarshaled.Command,\n\t\t\tBody: demarshaled.Body,\n\t\t\tClient: &demarshaled.Client,\n\t\t}\n\t\trequest := serverRequest{\n\t\t\tMessage: message,\n\t\t\tClient: models.CloneClient(clients[conn]),\n\t\t}\n\n\t\tswitch command := message.GetCommand(); command {\n\t\tcase \"login\":\n\t\t\tloginRequests <- request\n\t\tcase \"newuser\":\n\t\t\tnewUserRequests <- request\n\t\tcase \"send\":\n\t\t\tsendRequests <- request\n\t\tcase \"logout\":\n\t\t\tlogoutRequests <- request\n\t\tcase \"help\":\n\t\t\thelpRequests <- request\n\t\tdefault:\n\t\t\tlog.Println(\"Received unrecognized command -\", command, \"- from client\")\n\t\t}\n\t}\n}", "func (c *Client) Read(data string) (err error) {\n\tmsg, err := parseMessage(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tm, ok := c.events[Unknown]\n\tif ok {\n\t\tfor _, f := range m {\n\t\t\tf(c, msg)\n\t\t}\n\t}\n\n\tif msg.Command == Unknown {\n\t\treturn // Already called these handlers.\n\t}\n\n\tif m, ok = c.events[msg.Command]; !ok {\n\t\treturn\n\t}\n\n\tfor _, f := range m {\n\t\tf(c, msg)\n\t}\n\n\treturn\n}", "func (c *Conn) handleConnection() {\n\tdefer c.Close()\n\n\tfmt.Fprintf(c.c, welcomeText, c.username)\n\n\tgo c.handleMessages()\n\n\tscanner := bufio.NewScanner(c.c)\n\tfor scanner.Scan() {\n\t\tinput := scanner.Text()\n\n\t\tif input == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif input[0] == '/' {\n\t\t\tif !c.handleCommand(input) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tc.Announce(input)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Print(\"error scanning lines:\", err)\n\t}\n}", "func HandleNewMessages(conn io.ReadWriteCloser, msgs chan<- *messages.Message, welcomes chan<- *messages.ArborMessage) {\n\treadMessages := messages.MakeMessageReader(conn)\n\tdefer close(msgs)\n\tfor fromServer := range readMessages {\n\t\tswitch fromServer.Type {\n\t\tcase messages.WELCOME:\n\t\t\twelcomes <- fromServer\n\t\t\tclose(welcomes)\n\t\t\twelcomes = nil\n\t\tcase messages.NEW_MESSAGE:\n\t\t\t// add the new message\n\t\t\tmsgs <- fromServer.Message\n\t\tdefault:\n\t\t\tlog.Println(\"Unknown message type: \", fromServer.String)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func readLoop(conn Connection, finish *sync.WaitGroup) {\n defer func() {\n if p := recover(); p != nil {\n holmes.Error(\"panics: %v\", p)\n }\n finish.Done()\n conn.Close()\n }()\n\n for conn.IsRunning() {\n select {\n case <-conn.GetCloseChannel():\n return\n\n default:\n msg, err := conn.GetMessageCodec().Decode(conn)\n if err != nil {\n holmes.Error(\"Error decoding message %v\", err)\n if _, ok := err.(ErrorUndefined); ok {\n // update heart beat timestamp\n conn.SetHeartBeat(time.Now().UnixNano())\n continue\n }\n return\n }\n\n // update heart beat timestamp\n conn.SetHeartBeat(time.Now().UnixNano())\n handler := GetHandler(msg.MessageNumber())\n if handler == nil {\n if conn.GetOnMessageCallback() != nil {\n holmes.Info(\"Message %d call onMessage()\", msg.MessageNumber())\n conn.GetOnMessageCallback()(msg, conn)\n } else {\n holmes.Warn(\"No handler or onMessage() found for message %d\", msg.MessageNumber())\n }\n continue\n }\n\n // send handler to handleLoop\n conn.GetMessageHandlerChannel()<- MessageHandler{msg, handler}\n }\n }\n}", "func MessagesHandler(w http.ResponseWriter, r *http.Request) {\n\tclient := context.Get(r, \"redis.Client\").(*redis.Client)\n\tu := url.Values{}\n\n\tu.Set(\"a\", \"sf-muni\")\n\n\trouteTags := r.URL.Query().Get(\"route_tags\")\n\tif routeTags != \"\" {\n\t\troutes := strings.Split(routeTags, \",\")\n\t\tfor n := range routes {\n\t\t\tu.Add(\"r\", routes[n])\n\t\t}\n\t}\n\n\tbody, err := getFromCache(client, r.URL.String())\n\tif err == nil {\n\t\tfmt.Fprintf(w, \"%s\", body)\n\t\treturn\n\t}\n\n\tbody, err = fetch(\"messages\", u)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\twriteToCache(client, r.URL.String(), body)\n\n\tfmt.Fprintf(w, \"%s\", body)\n}", "func handleStrings(rw *bufio.ReadWriter) {\r\n\t// Receive a string.\r\n\tlog.Print(\"Receive STRING message:\")\r\n\ts, err := rw.ReadString('\\n')\r\n\tif err != nil {\r\n\t\tlog.Println(\"Cannot read from connection.\\n\", err)\r\n\t}\r\n\ts = strings.Trim(s, \"\\n \")\r\n\tlog.Println(s)\r\n\t_, err = rw.WriteString(\"Thank you.\\n\")\r\n\tif err != nil {\r\n\t\tlog.Println(\"Cannot write to connection.\\n\", err)\r\n\t}\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\tlog.Println(\"Flush failed.\", err)\r\n\t}\r\n}", "func processMessages(conn net.Conn, c chan *packetInfo, t *Tracker) {\n\tdefer t.remove(conn)\n\n\tscanner := bufio.NewScanner(conn)\n\tscanner.Split(bufio.ScanBytes)\n\n\tvar validMagic bool\n\tvar numReadBytes int\n\tneedFrame := true\n\n\tvar b bytes.Buffer\n\tvar mb bytes.Buffer\n\n\tfor {\n\t\tfor scanner.Scan() {\n\t\t\tscanBytes := scanner.Bytes()\n\t\t\tb.Write(scanBytes)\n\n\t\t\tif needFrame {\n\t\t\t\tif b.Len() >= headerLength {\n\t\t\t\t\tvalidMagic, numReadBytes = processFrame(&b)\n\n\t\t\t\t\tif !validMagic {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", \"invalid magic\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tif numReadBytes <= 0 || numReadBytes > maxMessageLength {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", \"invalid message length\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tneedFrame = false\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif b.Len() >= numReadBytes {\n\t\t\t\tmsg := make([]byte, numReadBytes)\n\t\t\t\tb.Read(msg)\n\n\t\t\t\tmb.Write(msg)\n\n\t\t\t\tneedFrame = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", err.Error())\n\t\t\tbreak\n\t\t}\n\n\t\tif mb.Len() == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Extend the deadline, we got a valid full message.\n\t\tconn.SetDeadline(time.Now().Add(deadline))\n\n\t\tif err := sendToPacketInfoChan(&mb, conn, c, t); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", err.Error())\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (p *MultiLineParser) Handle(input *DecodedInput) {\n\tp.inputChan <- input\n}", "func (serv *BusServer) handleConnection(conn net.Conn) {\n\n\t// create a buffered reader.\n\treader := bufio.NewReader(conn)\n\n\t// attempt to read a line.\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\n\t\t// error occured before reaching delimiter.\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// no error. handle the line.\n\t\tserv.readHandler(serv, conn, line)\n\n\t}\n}", "func reader(conn *websocket.Conn ) {\n\tfor {\n\t\t// read in a message\n\t\tmessageType, p, err := conn.ReadMessage()\n\t\tif err != nil && len(p) != 0 {\n\t\t\talltime <- (string)(p)\n\t\t\tif websocket.IsCloseError(err, websocket.CloseNormalClosure) {\n\t\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\t}\n\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tprocessMessage(messageType,string(p))\n\n\t}\n}", "func (nc *NetClient) handleMessage(m *arbor.ProtocolMessage) {\n\tswitch m.Type {\n\tcase arbor.NewMessageType:\n\t\tif !nc.Archive.Has(m.UUID) {\n\t\t\tif nc.receiveHandler != nil {\n\t\t\t\tnc.receiveHandler(m.ChatMessage)\n\t\t\t\t// ask Notifier to handle the message\n\t\t\t\tnc.Notifier.Handle(nc, m.ChatMessage)\n\t\t\t}\n\t\t\tif m.Parent != \"\" && !nc.Archive.Has(m.Parent) {\n\t\t\t\tnc.Query(m.Parent)\n\t\t\t}\n\t\t}\n\tcase arbor.WelcomeType:\n\t\tif !nc.Has(m.Root) {\n\t\t\tnc.Query(m.Root)\n\t\t}\n\t\tfor _, recent := range m.Recent {\n\t\t\tif !nc.Has(recent) {\n\t\t\t\tnc.Query(recent)\n\t\t\t}\n\t\t}\n\tcase arbor.MetaType:\n\t\tnc.HandleMeta(m.Meta)\n\t}\n}", "func handleConnection(conn *net.Conn, inputChannel chan string, KeyChannel chan string) {\n\treader := bufio.NewReader(*conn)\n\tfor {\n\t\tmsg, er := reader.ReadString('\\n')\n\t\tif er != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif msg[0] == 'k' {\n\t\t\t//Receive key\n\t\t\tKeyChannel <- msg\n\t\t} else {\n\t\t\tinputChannel <- msg\n\t\t}\n\t}\n}", "func (s *Serve) HandleMessage(msg *nsq.Message) (err error) {\n\tif string(msg.Body) == \"TOBEFAILED\" {\n\t\treturn errors.New(\"fail this message\")\n\t}\n\n\tvar m url.Values\n\terr = json.Unmarshal(msg.Body, &m)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tresC <- m\n\treturn\n}", "func ReadMessage(conn *websocket.Conn, stopChan chan<- bool) {\n\tfor {\n\t\treader := bufio.NewReader(os.Stdin)\n\t\ttext, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tstopChan <- true\n\t\t\treturn\n\t\t}\n\t\terr = conn.WriteMessage(websocket.TextMessage, []byte(text))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (self *OFSwitch) handleMessages(dpid net.HardwareAddr, msg util.Message) {\n\tlog.Debugf(\"Received message: %+v, on switch: %s\", msg, dpid.String())\n\n\tswitch t := msg.(type) {\n\tcase *common.Header:\n\t\tswitch t.Header().Type {\n\t\tcase openflow13.Type_Hello:\n\t\t\t// Send Hello response\n\t\t\th, err := common.NewHello(4)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Error creating hello message\")\n\t\t\t}\n\t\t\tself.Send(h)\n\n\t\tcase openflow13.Type_EchoRequest:\n\t\t\t// Send echo reply\n\t\t\tres := openflow13.NewEchoReply()\n\t\t\tself.Send(res)\n\n\t\tcase openflow13.Type_EchoReply:\n\t\t\tself.lastUpdate = time.Now()\n\n\t\tcase openflow13.Type_FeaturesRequest:\n\n\t\tcase openflow13.Type_GetConfigRequest:\n\n\t\tcase openflow13.Type_BarrierRequest:\n\n\t\tcase openflow13.Type_BarrierReply:\n\n\t\t}\n\tcase *openflow13.ErrorMsg:\n\t\terrMsg := GetErrorMessage(t.Type, t.Code, 0)\n\t\tmsgType := GetErrorMessageType(t.Data)\n\t\tlog.Errorf(\"Received OpenFlow1.3 error: %s on message %s\", errMsg, msgType)\n\t\tresult := MessageResult{\n\t\t\tsucceed: false,\n\t\t\terrType: t.Type,\n\t\t\terrCode: t.Code,\n\t\t\txID: t.Xid,\n\t\t\tmsgType: UnknownMessage,\n\t\t}\n\t\tself.publishMessage(t.Xid, result)\n\n\tcase *openflow13.VendorHeader:\n\t\tlog.Debugf(\"Received Experimenter message, VendorType: %d, ExperimenterType: %d, VendorData: %+v\", t.Vendor, t.ExperimenterType, t.VendorData)\n\t\tswitch t.ExperimenterType {\n\t\tcase openflow13.Type_TlvTableReply:\n\t\t\treply := t.VendorData.(*openflow13.TLVTableReply)\n\t\t\tstatus := TLVTableStatus(*reply)\n\t\t\tself.tlvMgr.TLVMapReplyRcvd(self, &status)\n\t\tcase openflow13.Type_BundleCtrl:\n\t\t\tresult := MessageResult{\n\t\t\t\txID: t.Header.Xid,\n\t\t\t\tsucceed: true,\n\t\t\t\tmsgType: BundleControlMessage,\n\t\t\t}\n\t\t\treply := t.VendorData.(*openflow13.BundleControl)\n\t\t\tself.publishMessage(reply.BundleID, result)\n\t\t}\n\n\tcase *openflow13.SwitchFeatures:\n\t\tswitch t.Header.Type {\n\t\tcase openflow13.Type_FeaturesReply:\n\t\t\tgo func() {\n\t\t\t\tswConfig := openflow13.NewSetConfig()\n\t\t\t\tswConfig.MissSendLen = 128\n\t\t\t\tself.Send(swConfig)\n\t\t\t\tself.Send(openflow13.NewSetControllerID(self.ctrlID))\n\t\t\t}()\n\t\t}\n\n\tcase *openflow13.SwitchConfig:\n\t\tswitch t.Header.Type {\n\t\tcase openflow13.Type_GetConfigReply:\n\n\t\tcase openflow13.Type_SetConfig:\n\n\t\t}\n\tcase *openflow13.PacketIn:\n\t\tlog.Debugf(\"Received packet(ofctrl): %+v\", t)\n\t\t// send packet rcvd callback\n\t\tself.app.PacketRcvd(self, (*PacketIn)(t))\n\n\tcase *openflow13.FlowRemoved:\n\n\tcase *openflow13.PortStatus:\n\t\t// FIXME: This needs to propagated to the app.\n\tcase *openflow13.PacketOut:\n\n\tcase *openflow13.FlowMod:\n\n\tcase *openflow13.PortMod:\n\n\tcase *openflow13.MultipartRequest:\n\n\tcase *openflow13.MultipartReply:\n\t\tlog.Debugf(\"Received MultipartReply\")\n\t\trep := (*openflow13.MultipartReply)(t)\n\t\tif self.monitorEnabled {\n\t\t\tkey := fmt.Sprintf(\"%d\", rep.Xid)\n\t\t\tch, found := monitoredFlows.Get(key)\n\t\t\tif found {\n\t\t\t\treplyChan := ch.(chan *openflow13.MultipartReply)\n\t\t\t\treplyChan <- rep\n\t\t\t}\n\t\t}\n\t\t// send packet rcvd callback\n\t\tself.app.MultipartReply(self, rep)\n\tcase *openflow13.VendorError:\n\t\terrData := t.ErrorMsg.Data.Bytes()\n\t\tresult := MessageResult{\n\t\t\tsucceed: false,\n\t\t\terrType: t.Type,\n\t\t\terrCode: t.Code,\n\t\t\texperimenter: int32(t.ExperimenterID),\n\t\t\txID: t.Xid,\n\t\t}\n\t\texperimenterID := binary.BigEndian.Uint32(errData[8:12])\n\t\terrMsg := GetErrorMessage(t.Type, t.Code, experimenterID)\n\t\texperimenterType := binary.BigEndian.Uint32(errData[12:16])\n\t\tswitch experimenterID {\n\t\tcase openflow13.ONF_EXPERIMENTER_ID:\n\t\t\tswitch experimenterType {\n\t\t\tcase openflow13.Type_BundleCtrl:\n\t\t\t\tbundleID := binary.BigEndian.Uint32(errData[16:20])\n\t\t\t\tresult.msgType = BundleControlMessage\n\t\t\t\tself.publishMessage(bundleID, result)\n\t\t\t\tlog.Errorf(\"Received Vendor error: %s on ONFT_BUNDLE_CONTROL message\", errMsg)\n\t\t\tcase openflow13.Type_BundleAdd:\n\t\t\t\tbundleID := binary.BigEndian.Uint32(errData[16:20])\n\t\t\t\tresult.msgType = BundleAddMessage\n\t\t\t\tself.publishMessage(bundleID, result)\n\t\t\t\tlog.Errorf(\"Received Vendor error: %s on ONFT_BUNDLE_ADD_MESSAGE message\", errMsg)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Errorf(\"Received Vendor error: %s\", errMsg)\n\t\t}\n\t}\n}", "func (o *handler) handle(client mqtt.Client, msg mqtt.Message) {\r\n\t// We extract the count and write that out first to simplify checking for missing values\r\n\tvar m Message\r\n\tvar resp Session\r\n\tif err := json.Unmarshal(msg.Payload(), &resp); err != nil {\r\n\t\tfmt.Printf(\"Message could not be parsed (%s): %s\", msg.Payload(), err)\r\n\t\treturn\r\n\t}\r\n\tfmt.Println(resp)\r\n\tswitch resp.Type {\r\n\tcase CMDMSG_OFFER:\r\n\t\tenc.Decode(resp.Data, &m)\r\n\t\tNotice(m)\r\n\tcase CMDMSG_DISC:\r\n\t\tvar devcmd DiscoveryCmd\r\n\t\tenc.Decode(resp.Data, &devcmd)\r\n\t\tDiscoveryDev(&devcmd)\r\n\tcase CMDMSG_WAKE:\r\n\t\tvar fing Fing\r\n\t\tenc.Decode(resp.Data, &fing)\r\n\t\twakemac(fing)\r\n\tcase CMDMSG_UPDATE:\r\n\t\tvar newver *versionUpdate\r\n\t\tGetUpdateMyself(newver)\r\n\tcase CMDMSG_MR2:\r\n\t\tvar mr2info Mr2Msg\r\n\t\tenc.Decode(resp.Data, &mr2info)\r\n\t\tMr2HostPort(&mr2info)\r\n\t}\r\n}", "func (srv *Server) handleMessage(msg *Message) error {\n\tswitch msg.msgType {\n\tcase MsgSignalBinary:\n\t\tfallthrough\n\tcase MsgSignalUtf8:\n\t\tfallthrough\n\tcase MsgSignalUtf16:\n\t\tsrv.handleSignal(msg)\n\n\tcase MsgRequestBinary:\n\t\tfallthrough\n\tcase MsgRequestUtf8:\n\t\tfallthrough\n\tcase MsgRequestUtf16:\n\t\tsrv.handleRequest(msg)\n\n\tcase MsgRestoreSession:\n\t\treturn srv.handleSessionRestore(msg)\n\tcase MsgCloseSession:\n\t\treturn srv.handleSessionClosure(msg)\n\t}\n\treturn nil\n}", "func (srv *Server) handleMessage(msg *Message) error {\n\tswitch msg.msgType {\n\tcase MsgSignalBinary:\n\t\tfallthrough\n\tcase MsgSignalUtf8:\n\t\tfallthrough\n\tcase MsgSignalUtf16:\n\t\tsrv.handleSignal(msg)\n\n\tcase MsgRequestBinary:\n\t\tfallthrough\n\tcase MsgRequestUtf8:\n\t\tfallthrough\n\tcase MsgRequestUtf16:\n\t\tsrv.handleRequest(msg)\n\n\tcase MsgRestoreSession:\n\t\treturn srv.handleSessionRestore(msg)\n\tcase MsgCloseSession:\n\t\treturn srv.handleSessionClosure(msg)\n\t}\n\treturn nil\n}", "func Handler(conn net.Conn, pubsub *PubSub) {\n\n\treader := bufio.NewReader(conn)\n\n\tdata, err := reader.ReadString('\\n')\n\n\tif err != nil {\n\t\t//log.Fatal(err)\n\t\treturn\n\t}\n\n\tcommand := strings.Split(strings.TrimSuffix(data, \"\\n\"), \" \")\n\n\tswitch command[0] {\n\n\tcase \"PUBLISH\":\n\t\tgo Publish(conn, command, pubsub)\n\n\tcase \"SUBSCRIBE\":\n\t\tgo Subscribe(conn, command, pubsub)\n\t}\n\n}", "func (s *Server) handleRead(ctx context.Context, username string, c *websocket.Conn) error {\n\tvar data Message\n\n\tif err := wsjson.Read(ctx, c, &data); err != nil {\n\t\ts.userMu.Lock()\n\t\ts.unsubscribe(username)\n\t\ts.userMu.Unlock()\n\n\t\treturn err\n\t}\n\n\tdata.Username = username\n\n\ts.log.Info().Msgf(\"received %s message: %s\", data.Username, data.Text)\n\n\ts.writeCh <- data\n\n\treturn nil\n}", "func handleConnection(ms *TLVServ, conn net.Conn) {\n\tvar msgHandler *mtypeInfo\n\tvar ok bool\n\n\tdefer conn.Close()\n\n\tlog.Printf(\"%s connected\\n\", conn.RemoteAddr())\n\n\tfor {\n\t\t// Block till there is data to read.\n\t\tmtype, datalen, readErr := readHeader(conn)\n\t\tif readErr != nil {\n\t\t\tif readErr == io.EOF {\n\t\t\t\tlog.Printf(\"%s disconected\\n\", conn.RemoteAddr())\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tlog.Println(readErr)\n\t\t\t}\n\t\t}\n\n\t\t// datalen might be 0, but that's ok, we'll get back and empty array\n\t\tdata, err := readData(conn, datalen, time.Duration(ms.readTimeout))\n\t\tif err == nil {\n\t\t\t// Get the handler function for this message type (mtype)\n\t\t\tms.mtypeHandlersLock.RLock()\n\t\t\tmsgHandler, ok = ms.mtypeHandlers[mtype]\n\t\t\tms.mtypeHandlersLock.RUnlock()\n\t\t\tif ok == true {\n\t\t\t\tgo handlerRunner(msgHandler, conn, data)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"missing handler for mtype %v\\n\", mtype)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}", "func (u *UnityServer) readMessage() {\n\tfor {\n\t\tdata := make([]byte, 8192)\n\t\tn, err := u.conn.Read(data)\n\t\tif err != nil {\n\t\t\tu.Logger.Errorf(\"Error: Reading socket - %v\", err)\n\t\t\tu.stop <- true\n\t\t\tbreak\n\t\t}\n\t\tu.incoming <- string(data[:n])\n\t}\n}", "func (i *Irc) handleMsg(msg irc.Msg) {\n\tbotMsg := i.buildMessage(msg)\n\n\tswitch msg.Cmd {\n\tcase irc.ERROR:\n\t\tlog.Info().Msgf(\"Received error: \" + msg.Raw)\n\n\tcase irc.PING:\n\t\ti.Client.Out <- irc.Msg{Cmd: irc.PONG}\n\n\tcase irc.PONG:\n\t\t// OK, ignore\n\n\tcase irc.ERR_NOSUCHNICK:\n\t\tfallthrough\n\n\tcase irc.ERR_NOSUCHCHANNEL:\n\t\tfallthrough\n\n\tcase irc.RPL_MOTD:\n\t\tfallthrough\n\n\tcase irc.RPL_NAMREPLY:\n\t\tfallthrough\n\n\tcase irc.RPL_TOPIC:\n\t\tfallthrough\n\n\tcase irc.KICK:\n\t\tfallthrough\n\n\tcase irc.TOPIC:\n\t\tfallthrough\n\n\tcase irc.MODE:\n\t\tfallthrough\n\n\tcase irc.JOIN:\n\t\tfallthrough\n\n\tcase irc.PART:\n\t\tfallthrough\n\n\tcase irc.NOTICE:\n\t\tfallthrough\n\n\tcase irc.NICK:\n\t\tfallthrough\n\n\tcase irc.RPL_WHOREPLY:\n\t\tfallthrough\n\n\tcase irc.RPL_ENDOFWHO:\n\t\tbotMsg.Kind = bot.Event\n\t\ti.event(i, bot.Event, botMsg)\n\n\tcase irc.PRIVMSG:\n\t\tbotMsg.Kind = bot.Message\n\t\ti.event(i, bot.Message, botMsg)\n\n\tcase irc.QUIT:\n\t\tos.Exit(1)\n\n\tdefault:\n\t\tcmd := irc.CmdNames[msg.Cmd]\n\t\tlog.Debug().Msgf(\"(%s) %s\", cmd, msg.Raw)\n\t}\n}", "func (b *unixBus) HandleMessage(uuid uuid.UUID, msg interface{}) (<-chan interface{}, error) {\n\tsockAddr := fmt.Sprintf(\"%s/%v.sock\", endpointsDir, uuid)\n\n\tc, err := net.Dial(\"unix\", sockAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tenc := gob.NewEncoder(c)\n\terr = enc.Encode(message{Type: messageTypeSend, Message: msg})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tch := make(chan interface{})\n\n\tgo func() {\n\t\tdefer close(ch)\n\t\tdefer c.Close()\n\n\t\tdec := gob.NewDecoder(c)\n\t\tvar r message\n\t\tfor {\n\t\t\terr := dec.Decode(&r)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tswitch r.Type {\n\t\t\tcase messageTypeResult:\n\t\t\t\tch <- r.Message\n\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"Invalid response so ignoring\")\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch, nil\n}", "func handlerMsg(msg []byte) {\n\tchats <- string(msg)\n}", "func handleMessage(s *Server, p *TCPPeer) {\n\t// Disconnect the peer when we break out of the loop.\n\tdefer func() {\n\t\ts.unregister <- p\n\t}()\n\n\tfor {\n\t\tmsg := <-p.receive\n\t\tcommand := msg.commandType()\n\n\t\ts.logger.Printf(\"IN :: %d :: %s :: %v\", p.id(), command, msg)\n\n\t\tswitch command {\n\t\tcase cmdVersion:\n\t\t\tresp := s.handleVersionCmd(msg, p)\n\t\t\tp.nonce = msg.Payload.(*payload.Version).Nonce\n\t\t\tp.send <- resp\n\t\tcase cmdAddr:\n\t\t\ts.handleAddrCmd(msg, p)\n\t\tcase cmdGetAddr:\n\t\t\ts.handleGetaddrCmd(msg, p)\n\t\tcase cmdInv:\n\t\t\tresp := s.handleInvCmd(msg, p)\n\t\t\tp.send <- resp\n\t\tcase cmdBlock:\n\t\t\ts.handleBlockCmd(msg, p)\n\t\tcase cmdConsensus:\n\t\tcase cmdTX:\n\t\tcase cmdVerack:\n\t\t\tgo s.sendLoop(p)\n\t\tcase cmdGetHeaders:\n\t\tcase cmdGetBlocks:\n\t\tcase cmdGetData:\n\t\tcase cmdHeaders:\n\t\t}\n\t}\n}", "func (g *Gossiper) SimpleHandleClientMessages() {\n\tg.ConnectToClient()\n\n\tpacketBytes := make([]byte, buffsize)\n\tmsg := &message.Message{}\n\n\tfor {\n\t\tnRead, _, err := g.clientConn.ReadFromUDP(packetBytes)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error: read from buffer failed.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif nRead > 0 {\n\t\t\tprotobuf.Decode(packetBytes, msg)\n\t\t\tprintClientMessage(*msg)\n\t\t\tg.PrintPeers()\n\t\t\tsimpleMsg := &message.SimpleMessage{OriginalName: g.name, RelayPeerAddr: g.udpAddr.String(), Contents: msg.Text}\n\t\t\t(*g).PrintPeers()\n\t\t\tpacket := &gossippacket.GossipPacket{Simple: simpleMsg}\n\t\t\tg.broadcastSimpleMessage(packet, nil)\n\t\t}\n\t}\n}", "func (mp *MessageProcessor) Handle(event *irc.Event) {\n\tmessage := event.Message()\n\tmessage = mp.StripInput(message)\n\n\tpaintCommand, err := mp.Parse(message)\n\tif err == nil {\n\t\tmp.Chan <- *paintCommand\n\t} else {\n\t\tfmt.Printf(\"ERROR: command %q encountered the following error: %q\", message, err)\n\t}\n\n}", "func HandleConsoleMessage(ctx *context.LegscContext, msgBytes []byte) {\n\tmsg := &message.Console{}\n\terr := message.Unmarshal(msgBytes, msg)\n\tif err != nil {\n\t\tlog.Error(\"failed in parsing message: \", err)\n\t\tsendConsoleError(err, msg, ctx)\n\t\treturn\n\t}\n\n\tswitch msg.State {\n\tcase message.ConsoleStartState:\n\t\thandleStartMessage(ctx, msg)\n\tcase message.ConsoleCloseState:\n\t\thandleCloseMessage(ctx, msg)\n\tcase message.ConsoleInputState:\n\t\thandleInputMessage(ctx, msg)\n\t}\n}", "func HandleMessage(w http.ResponseWriter, r *http.Request) {\n\tif token == \"\" {\n\t\tlog.Printf(\"no bot token available\")\n\t\treturn\n\t}\n\n\tbot, err := tgbotapi.NewBotAPI(token)\n\tif err != nil {\n\t\tlog.Printf(\"could not create telegram bot: %v\\n\", err)\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Printf(\"could not read request: %v\\n\", err)\n\t\treturn\n\t}\n\n\tvar update tgbotapi.Update\n\n\tif err := json.Unmarshal(data, &update); err != nil {\n\t\tlog.Printf(\"could not parse request: %v\\n\", err)\n\t\treturn\n\t}\n\n\tcid := update.Message.Chat.ID\n\n\tswitch {\n\tcase update.Message.Voice != nil:\n\t\thandleVoiceMessage(bot, cid, update.Message.Voice)\n\tcase update.Message.Text != \"\":\n\t\thandleTextMessage(bot, cid, update.Message.Text)\n\t}\n}", "func (th *TailHandler) HandleMessage(m *nsq.Message) error {\n\tth.messagesShown++\n\tif err := th.printMessage(th.writer, m); err != nil {\n\t\tfmt.Printf(\"err %v\\n\", err)\n\t\treturn err\n\t}\n\tif th.totalMessages > 0 && th.totalMessages < th.messagesShown {\n\t\tos.Exit(0)\n\t}\n\treturn nil\n}", "func handleConnection(conn net.Conn) {\n\tdefer conn.Close()\n\n\tmsg := make([]byte, 4096)\n\tn, _ := conn.Read(msg)\n\n\tif !HandleMessage(msg[:n], conn) {\n\t\tfmt.Println(\"Error with connection: \" + conn.RemoteAddr().String())\n\t}\n\n}", "func readClientMessages(ws *websocket.Conn, msgChan chan Message) {\n\tfor {\n\t\tvar message Message\n\n\t\t// check to see if there's anything shouldAllowPrompt from the server and if so\n\t\t// send to our channel\n\t\terr := websocket.JSON.Receive(ws, &message)\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error on receiving json on web socket connection %s\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tmsgChan <- message\n\t}\n}", "func (s *Server) handleConnection(sock net.Conn) {\n\tmsg := make([]byte, 4096)\n\n\tn, _ := sock.Read(msg)\n\n\tlog.Printf(\"Incoming msg: %s\\n\", string(msg[:n]))\n\n\ts.SIPMessageReaction(sock, string(msg[:n]))\n}", "func (messenger *TCPMessenger) handleConn(c *net.TCPConn, channel chan Message) {\r\n\tmsg, err := messenger.recvMessage(c)\r\n\tif err != nil {\r\n\t\tlog.Println(\"[ERROR] Failed to rcvmessage: \" + err.Error())\r\n\t}\r\n\tif err == io.EOF {\r\n\t\tlog.Println(\"[DEBUG] Closing connection.\")\r\n\t\tc.Close()\r\n\t\treturn\r\n\t}\r\n\t// log.Println(\"[DEBUGMessage recieved \", msg)\r\n\t// Quesues messages for processing in the channel\r\n\tchannel <- msg\r\n\r\n}", "func (s *Server) handleRead(pubKey credentials.StaticSizedPublicKey, done <-chan struct{}) {\n\ttr, err := s.connMgr.getTransport(pubKey)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase in := <-tr.Read():\n\t\t\t// Unmarshal the message\n\t\t\tmsg := &message.Message{}\n\t\t\tif err := UnmarshalProtoMessage(in, msg); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Handle the message request or response\n\t\t\tswitch ex := msg.Exchange.(type) {\n\t\t\tcase *message.Message_Request:\n\t\t\t\ts.handleMessageRequest(pubKey, ex.Request)\n\t\t\tcase *message.Message_Response:\n\t\t\t\ts.handleMessageResponse(ex.Response)\n\t\t\tdefault:\n\t\t\t\t// log.Println(\"Invalid message type\")\n\t\t\t}\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func handleMessage(msg *game.InMessage, ws *websocket.Conn, board *game.Board) {\n\tfmt.Println(\"Message Got: \", msg)\n\n}", "func handleMessages() {\n\tfor {\n\t\tmsg := <-broadcaster\n\n\t\tstoreInRedis(msg)\n\t\tmessageClients(msg)\n\t}\n}", "func (api *APIv1) MessagesHandler(w http.ResponseWriter, r *http.Request) {\n\twr := &HTTPResponder{w, r, \"\"}\n\n\tconn, err := api.upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\twr.Error(HTTPErr, err)\n\t\treturn\n\t}\n\tid, messages := api.messages.Subscribe()\n\n\tfor {\n\t\tm, ok := <-messages // Blocks until we have a message.\n\t\tif !ok {\n\t\t\t// Channel is now closed.\n\t\t\tbreak\n\t\t}\n\t\tam, _ := json.Marshal(&APIv1Message{m.(*Message).typ, m.(*Message).text})\n\n\t\terr = conn.WriteMessage(websocket.TextMessage, am)\n\t\tif err != nil {\n\t\t\t// Silently unsubscribe, the client has gone away.\n\t\t\tbreak\n\t\t}\n\t}\n\tapi.messages.Unsubscribe(id)\n\tconn.Close()\n}", "func handleConnection(connection net.Conn, log *logs.MultipleLog) {\n\n\tlog.Infof(\"%s just Connected. \\n\", connection.RemoteAddr().String())\n\n\tfor {\n\t\tnetData, err := bufio.NewReader(connection).ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Error(\"Listening of:\" + connection.RemoteAddr().String() + \" stopped.\")\n\t\t\treturn\n\t\t}\n\n\t\tlog.Info(connection.RemoteAddr().String() + \" Says : \" + strings.TrimSpace(string(netData)))\n\n\t\tconnection.Write([]byte(string(\"Server: Message recived \\n\")))\n\n\t\tdefer connection.Close()\n\t}\n\n}", "func listenForMessages(in io.Reader, out chan *deviceResponse, closed chan struct{}) {\n\tfor {\n\t\t// stream the reply back in 64 byte chunks\n\t\tchunk := make([]byte, 64)\n\t\tvar reply []byte\n\t\tvar kind uint16\n\t\tfor {\n\t\t\t// Read next chunk\n\t\t\tif _, err := io.ReadFull(in, chunk); err != nil {\n\n\t\t\t\t// Abort if this keepkey has closed its connections\n\t\t\t\tselect {\n\t\t\t\tcase <-closed:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tfmt.Println(color.RedString(\"Unable to read chunk from device:\", err)) // TODO: move to device specific log\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t//TODO: check transport header\n\n\t\t\t//if it is the first chunk, retreive the reply message type and total message length\n\t\t\tvar payload []byte\n\n\t\t\tif len(reply) == 0 {\n\t\t\t\tkind = binary.BigEndian.Uint16(chunk[3:5])\n\t\t\t\treply = make([]byte, 0, int(binary.BigEndian.Uint32(chunk[5:9])))\n\t\t\t\tpayload = chunk[9:]\n\t\t\t} else {\n\t\t\t\tpayload = chunk[1:]\n\t\t\t}\n\t\t\t// Append to the reply and stop when filled up\n\t\t\tif left := cap(reply) - len(reply); left > len(payload) {\n\t\t\t\treply = append(reply, payload...)\n\t\t\t} else {\n\t\t\t\treply = append(reply, payload[:left]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tout <- &deviceResponse{reply, kind}\n\t}\n}", "func (conn *Conn) recv() {\n\tfor {\n\t\ts, err := conn.io.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlogging.Error(\"irc.recv(): %s\", err.Error())\n\t\t\tconn.shutdown()\n\t\t\treturn\n\t\t}\n\t\ts = strings.Trim(s, \"\\r\\n\")\n\t\tlogging.Debug(\"<- %s\", s)\n\n\t\tif line := parseLine(s); line != nil {\n\t\t\tline.Time = time.Now()\n\t\t\tconn.in <- line\n\t\t} else {\n\t\t\tlogging.Warn(\"irc.recv(): problems parsing line:\\n %s\", s)\n\t\t}\n\t}\n}", "func (c *Common) ListenForMessages() {\n\n defer func() {\n c.rp.Stop()\n }()\n\n for {\n for message := range c.receivedMessages {\n c.HandleReceivedMessage(message);\n }\n }\n}", "func (conn *Conn) recv() {\n\tfor {\n\t\ts, err := conn.io.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlogging.Error(\"irc.recv(): %s\", err.Error())\n\t\t\t}\n\t\t\t// We can't defer this, because Close() waits for it.\n\t\t\tconn.wg.Done()\n\t\t\tconn.Close()\n\t\t\treturn\n\t\t}\n\t\ts = strings.Trim(s, \"\\r\\n\")\n\t\tlogging.Debug(\"<- %s\", s)\n\n\t\tif line := ParseLine(s); line != nil {\n\t\t\tline.Time = time.Now()\n\t\t\tconn.in <- line\n\t\t} else {\n\t\t\tlogging.Warn(\"irc.recv(): problems parsing line:\\n %s\", s)\n\t\t}\n\t}\n}", "func readFromServer(conn net.Conn) {\n for {\n message, err := bufio.NewReader(conn).ReadString('\\n')\n \n if err != nil {\n fmt.Println(\"Server down:\", err)\n return\n }\n \n message = strings.Trim(string(message), \"\\n\")\n \n fmt.Println(\"\\r<<\", string(message))\n }\n}", "func handle(conn net.Conn) {\n\tscan := bufio.NewReader(conn)\n\tfor {\n\t\tmsg, err := scan.ReadString('\\n') // recv\n\t\tmsg = encrypt(msg)\n\t\tif err != nil {\n\t\t\tprint(err)\n\t\t\tbreak\n\t\t} else if msg == \"Server is now down...\\n\" {\n\t\t\tfmt.Printf(\"\\rServer> %s\\n\", msg)\n\t\t\tconn.Close()\n\t\t\tos.Exit(0)\n\t\t}\n\t\tfmt.Print(\"\\r\" + msg)\n\t\tfmt.Print(\"> \")\n\t}\n}", "func messageListen(c net.Conn) {\n\tfor {\n\t\tbuf := make([]byte, 4096)\n\t\tlength, err := c.Read(buf)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Fail to read data, %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnetMsg := message.NetMessage{}\n\t\terr = json.Unmarshal(buf[:length], &netMsg)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Decode message error: \", err)\n\t\t\treturn\n\t\t}\n\n\t\troutes(netMsg.MsgName, []byte(netMsg.Data))\n\t}\n}", "func Parse(message string) (l *msg.Line, err error) {\n\tl, err = lexServerMsg(message)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar output string\n\tvar context string\n\tswitch l.Cmd() {\n\tcase \"NOTICE\":\n\t\ttrail := l.Args()[len(l.Args())-1]\n\t\tif strings.HasPrefix(trail, \"\\001\") &&\n\t\t\tstrings.HasSuffix(trail, \"\\001\") {\n\t\t\tvar query string\n\t\t\toutput, context, query = ctcp(l.Nick(), l.Args())\n\n\t\t\t// create a new argument list to send to the handler\n\t\t\t// the first argument describes what kind of query is\n\t\t\t// being made\n\t\t\told := l.Args()\n\t\t\ttmp := make([]string, len(old)+1)\n\t\t\ttmp[0] = query\n\t\t\tfor i := range old {\n\t\t\t\ttmp[i+1] = old[i]\n\t\t\t}\n\n\t\t\tl.SetArgs(tmp)\n\t\t\tl.SetCmd(\"CTCP\")\n\t\t\tbreak\n\t\t}\n\t\toutput, context = notice(l.Nick(), l.Args())\n\tcase \"NICK\":\n\t\toutput, context = nick(l.Nick(), l.Args())\n\tcase \"MODE\":\n\t\toutput, context = mode(l.Nick(), l.Args())\n\tcase \"PRIVMSG\":\n\t\ttrail := l.Args()[len(l.Args())-1]\n\t\tif strings.HasPrefix(trail, \"\\001\") &&\n\t\t\tstrings.HasSuffix(trail, \"\\001\") {\n\t\t\tvar query string\n\t\t\toutput, context, query = ctcp(l.Nick(), l.Args())\n\n\t\t\t// create a new argument list to send to the handler\n\t\t\t// the first argument describes what kind of query is\n\t\t\t// being made\n\t\t\told := l.Args()\n\t\t\ttmp := make([]string, len(old)+1)\n\t\t\ttmp[0] = query\n\t\t\tfor i := range old {\n\t\t\t\ttmp[i+1] = old[i]\n\t\t\t}\n\n\t\t\tl.SetArgs(tmp)\n\t\t\tl.SetCmd(\"CTCP\")\n\t\t\tbreak\n\t\t}\n\n\t\toutput, context = privMsg(l.Nick(), l.Args())\n\t\tr := \"^\\\\W\"\n\t\tregex := regexp.MustCompile(r)\n\t\tif !regex.MatchString(context) {\n\t\t\tl.SetCmd(\"P2PMSG\")\n\t\t}\n\tcase \"PART\":\n\t\toutput, context = part(l.Nick(), l.Args())\n\tcase \"PING\":\n\t\toutput, context = ping(l.Args())\n\tcase \"PONG\":\n\t\t// TODO: Handle so that pongs from the server doesn't\n\t\t// print, but pongs from other users do\n\t\toutput, context = \"\", \"\"\n\tcase \"JOIN\":\n\t\toutput, context = join(l.Nick(), l.Args())\n\tcase \"QUIT\":\n\t\toutput, context = quit(l.Nick(), l.Args())\n\tcase \"328\":\n\t\toutput, context, err = chanUrl(l.Args())\n\tcase \"329\":\n\t\toutput, context, err = chanCreated(l.Args())\n\tcase \"332\":\n\t\toutput, context, err = topic(l.Args())\n\tcase \"333\":\n\t\toutput, context, err = topicSetBy(l.Args())\n\tcase \"353\":\n\t\toutput, context = nickList(l.Args())\n\tcase \"366\":\n\t\toutput, context = nickListEnd(l.Args())\n\tcase \"401\":\n\t\toutput, context = noSuchTarget(l.Args())\n\tcase \"470\":\n\t\toutput, context = forward(l.Args())\n\tdefault:\n\t\t// check for numeric commands\n\t\tr := regexp.MustCompile(\"^\\\\d+$\")\n\t\tif r.MatchString(l.Cmd()) {\n\t\t\toutput, context = numeric(l.Nick(), l.Args())\n\t\t} else {\n\t\t\terr = errors.New(\"Unknown command.\")\n\t\t\treturn\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\n\tl.SetOutput(output)\n\tl.SetContext(context)\n\treturn\n}", "func (p *SingleLineParser) Handle(input *DecodedInput) {\n\tp.inputChan <- input\n}", "func handleMessages(messages <-chan amqp.Delivery, handler AmqpHandler, waitGroup *sync.WaitGroup) {\n\tdefer waitGroup.Done()\n\tfor msg := range messages {\n\t\twaitGroup.Add(1)\n\t\tgo handleDeliveryWithRetry(msg, handler, waitGroup)\n\t}\n}", "func (svc *Client) ProcessMessages(ctx context.Context, queue string, processFunc func(*stomp.Message) error) error {\n\tstompConn, err := svc.getConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubQueue, err := stompConn.Subscribe(\n\t\tfmt.Sprintf(\"/queue/%s\", queue),\n\t\tstomp.AckAuto,\n\t\tstomp.SubscribeOpt.Header(\"durable-subscription-name\", hostname),\n\t\tstomp.SubscribeOpt.Header(\"subscription-type\", \"MULTICAST\"))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn subQueue.Unsubscribe()\n\t\tcase msg, ok := <-subQueue.C:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif msg.Err != nil {\n\t\t\t\treturn msg.Err\n\t\t\t}\n\n\t\t\terr := processFunc(msg)\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// No message recv'd\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (b *Board) HandleIncomingMessages() {\n\tHandleIncomingMessages(b.cfg.ConnectionConfig.MulticastAddress, b.handlingMessage)\n}", "func (a *Agent) handleTCPMessages(c net.Conn) {\n\tdefer func() {\n\t\tif err := c.Close(); err != nil {\n\t\t\tlogger.Debug(err)\n\t\t}\n\t}()\n\tvar buf []byte\n\tmessageBuffer := bytes.NewBuffer(buf)\n\tconnReader := bufio.NewReader(c)\n\n\t// Read incoming tcp messages from client until we hit a valid JSON message.\n\t// If we don't get valid JSON or a ping request after 500ms, close the\n\t// connection (timeout).\n\treadDeadline := time.Now().Add(TCPSocketReadDeadline)\n\n\t// Only allow 500ms of IO. After this time, all IO calls on the connection\n\t// will fail.\n\tif err := c.SetReadDeadline(readDeadline); err != nil {\n\t\tlogger.WithError(err).Error(\"error setting read deadline\")\n\t\treturn\n\t}\n\n\t// It is possible that our buffered readers/writers will cause us\n\t// to iterate.\n\tfor time.Now().Before(readDeadline) {\n\t\t_, err := connReader.WriteTo(messageBuffer)\n\t\t// Check error condition. If it's a timeout error, continue so we can read\n\t\t// any remaining partial packets. Any other error type returns.\n\t\tif err != nil {\n\t\t\tif opError, ok := err.(*net.OpError); ok && !opError.Timeout() {\n\t\t\t\tlogger.Debugf(\"error reading message from tcp socket %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif match := pingRe.Match(messageBuffer.Bytes()); match {\n\t\t\tlogger.Debug(\"tcp socket received ping\")\n\t\t\t_, err = c.Write([]byte(\"pong\"))\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"could not write response to tcp socket\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t// Check our received data for valid JSON. If we get invalid JSON at this point,\n\t\t// read again from client, add any new message to the buffer, and parse\n\t\t// again.\n\t\tvar event types.Event\n\t\tvar result v1.CheckResult\n\t\tif err = json.Unmarshal(messageBuffer.Bytes(), &result); err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err = translateToEvent(a, result, &event); err != nil {\n\t\t\tlogger.WithError(err).Error(\"1.x returns \\\"invalid\\\"\")\n\t\t\treturn\n\t\t}\n\n\t\t// Prepare the event by mutating it as required so it passes validation\n\t\tif err = prepareEvent(a, &event); err != nil {\n\t\t\tlogger.WithError(err).Error(\"invalid event\")\n\t\t\treturn\n\t\t}\n\n\t\t// At this point, should receive valid JSON, so send it along to the\n\t\t// message sender.\n\t\tpayload, err := json.Marshal(event)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"could not marshal json payload\")\n\t\t\treturn\n\t\t}\n\n\t\ta.sendMessage(transport.MessageTypeEvent, payload)\n\t\t_, _ = c.Write([]byte(\"ok\"))\n\t\treturn\n\t}\n\t_, _ = c.Write([]byte(\"invalid\"))\n}", "func makeMessageStreamHandler(handler api.ProtocolHandler, logger *logging.Logger) messageStreamHandler {\n\treturn func(in <-chan []byte, reply chan<- []byte) {\n\t\tfor msgBytes := range in {\n\n\t\t\tmsg, msgStr, err := handler.Unwrap(msgBytes)\n\t\t\tif err != nil {\n\n\t\t\t\tlogger.Warningf(\"Failed to unmarshal message: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger.Debugf(\"Received %s\", msgStr)\n\n\t\t\tif replyChan, new, err := handler.Handle(msg); err != nil {\n\t\t\t\tlogger.Warningf(\"Failed to handle %s: %s\", msgStr, err)\n\t\t\t} else if replyChan != nil {\n\t\t\t\tm, more := <-replyChan\n\t\t\t\tif !more {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treplyBytes, err := handler.Wrap(m)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\treply <- replyBytes\n\t\t\t} else if !new {\n\t\t\t\tlogger.Infof(\"Dropped %s\", msgStr)\n\t\t\t} else {\n\t\t\t\tlogger.Debugf(\"Handled %s\", msgStr)\n\t\t\t}\n\t\t}\n\t}\n}", "func handleMessages() {\n\tvar msg node.Message\n\tvar jsonMessage Message\n\tfor {\n\t\tmsg = <-uiChannel\n\t\tjsonMessage.Message = msg.Content\n\t\tjsonMessage.Peer = msg.Peer\n\t\tfor client := range clients {\n\t\t\t// Send message to web application\n\t\t\terr := client.WriteJSON(jsonMessage)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\t\tclient.Close()\n\t\t\t\tdelete(clients, client)\n\t\t\t}\n\t\t}\n\t}\n}", "func (g *GCMMessageHandler) HandleMessages(msg interfaces.KafkaMessage) {\n\tg.sendMessage(msg)\n}", "func (client *Client) Read() {\n\tvar message Message\n\tfor {\n\t\tif err := client.socket.ReadJSON(&message); err != nil {\n\t\t\tbreak\n\t\t}\n\t\t// Call findHandler to know which handler to call. If handler in router map value matches key then call it.\n\t\tif handler, found := client.findHandler(message.Name); found {\n\t\t\thandler(client, message.Data)\n\t\t}\n\t}\n\t// close connection once finished.\n\tclient.socket.Close()\n}", "func MessageReceiveHandler(s *discordgo.Session, m *discordgo.MessageCreate) {\n\t// Ignore bot messages\n\tif m.Author.Bot {\n\t\treturn\n\t}\n\n\t// Check for command\n\tif strings.HasPrefix(m.Content, config.Get().Prefix) {\n\t\t// Get Server object\n\t\tc, err := s.Channel(m.ChannelID)\n\t\tif err != nil {\n\t\t\tlogger.Log.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t\tserver, err := db.GetServer(c.GuildID)\n\t\tif err != nil {\n\t\t\tlogger.Log.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Execute command\n\t\ttrigger := strings.Split(m.Content, config.Get().Prefix)[1]\n\t\tfor _, cmd := range command.MessageCommands {\n\t\t\tif cmd.Trigger() == trigger {\n\t\t\t\tcmd.Execute(server, s, m)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Fallback for wrong command\n\t\t// Create response message\n\t\tmessage := &structure.Message{\n\t\t\tTitle: \"Unknown command. Try using `!kod-help` for a list of all game commands.\",\n\t\t\tType: \"system\",\n\t\t\tIcon: \"https://cdn.discordapp.com/attachments/512302843437252611/512302951814004752/ac6918be09a389876ee5663d6b08b55a.png\",\n\t\t\tFooter: \"Command execution feedback.\",\n\t\t}\n\n\t\t// Build Embed\n\t\tembed := builder.BuildEmbed(message)\n\n\t\t// Send response\n\t\t_, err = s.ChannelMessageSendEmbed(m.ChannelID, embed)\n\t\tif err != nil {\n\t\t\tlogger.Log.Error(err.Error())\n\t\t}\n\t}\n}", "func (w Wrapper) OnReadInMessages(f ReadInMessagesHandler) {\n\tw.longpoll.EventNew(6, func(i []interface{}) error {\n\t\tvar event ReadInMessages\n\t\tif err := event.parse(i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf(event)\n\n\t\treturn nil\n\t})\n}", "func readMessages(in io.Reader) ([]*message, error) {\n\tvar msgs []*message\n\tinput := bufio.NewScanner(in)\n\tfor {\n\t\tif msg, err := scanMessage(input); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tmsgs = append(msgs, msg)\n\t\t}\n\t}\n\treturn msgs, input.Err()\n}", "func (h *myMessageHandler) HandleMessage(m *nsq.Message) error {\n\tif len(m.Body) == 0 {\n\t\treturn nil\n\t}\n\n\terr := h.processMessage(m.Body)\n\treturn err\n}", "func (c *Controller) handleMessages() {\n\tfor {\n\t\tmsg := c.chatroom.Receive()\n\t\tif msg.Info.Action == Broadcast {\n\t\t\tc.window.MsgQ <- &iMessage{\n\t\t\t\tSenderID: msg.SenderID,\n\t\t\t\tSenderName: msg.SenderName,\n\t\t\t\tType: Public,\n\t\t\t\tBody: msg.Info.Body,\n\t\t\t}\n\t\t}\n\t}\n}", "func (h *messageHandler) HandleMessage(m *nsq.Message) error {\n\t//Process the Message\n\tvar request Message\n\tif err := json.Unmarshal(m.Body, &request); err != nil {\n\t\tlog.Println(\"Error when Unmarshaling the message body, Err : \", err)\n\t\t// Returning a non-nil error will automatically send a REQ command to NSQ to re-queue the message.\n\t\treturn err\n\t}\n\t//Print the Message\n\tlog.Println(\"Message\")\n\tfmt.Println(request)\n\tlog.Println(\"--------------------\")\n\tlog.Println(\"Name : \", request.Name)\n\tlog.Println(\"Content : \", request.Content)\n\tlog.Println(\"Timestamp : \", request.Timestamp)\n\tlog.Println(\"--------------------\")\n\tlog.Println(\"\")\n\t// Will automatically set the message as finish\n\treturn nil\n}", "func (h *Handler) HandleMessage(body []byte) ([]byte, error) {\n\tt := struct {\n\t\tType string `json:\"type\"`\n\t}{}\n\tif err := json.Unmarshal(body, &t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmessageMapping := map[string]handlerFunc{\n\t\t\"url_verification\": h.handleURLVerification,\n\t\t\"event_callback\": h.handleEvent,\n\t}\n\n\tfn, ok := messageMapping[t.Type]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown event type %q\", t.Type)\n\t}\n\toutput, err := fn(body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %v\", t.Type, err)\n\t}\n\treturn output, nil\n}", "func newMessageHandler(url string) (*messageHandler, error) {\n\tc, err := net.Dial(\"tcp\", url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn := &messageHandler{\n\t\tc,\n\t\tjson.NewEncoder(c),\n\t\tmap[string][]interface{}{},\n\t\tmake(chan struct{}),\n\t}\n\n\t// start listening for json messages\n\tgo func() {\n\t\tr := bufio.NewReader(c)\n\t\tfor {\n\t\t\t// messages are separated by a newline\n\t\t\tline, err := r.ReadBytes('\\n')\n\t\t\tif err != nil {\n\t\t\t\tclose(conn.closer)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// sometimes we'll receive empty lines, ignore those\n\t\t\tif len(line) <= 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// extract the 'msg' field\n\t\t\tvar m struct{ Msg string }\n\t\t\tdeny(json.Unmarshal(line, &m))\n\n\t\t\t// callbacks are cleared when called\n\t\t\tcallbacks := conn.callbacks[m.Msg]\n\t\t\tconn.callbacks[m.Msg] = []interface{}{}\n\n\t\t\tif len(callbacks) == 0 {\n\t\t\t\tfmt.Println(\"unhandled message\", m.Msg)\n\t\t\t}\n\n\t\t\tfor _, f := range callbacks {\n\t\t\t\tm := reflect.New(reflect.TypeOf(f).In(0))\n\t\t\t\tdeny(json.Unmarshal(line, m.Interface()))\n\t\t\t\treflect.ValueOf(f).Call([]reflect.Value{m.Elem()})\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn conn, nil\n}", "func (r *Ricochet) ProcessMessages(service RicochetService) {\n\tfor {\n\t\toc := <-r.newconns\n\t\tif oc == nil {\n\t\t\treturn\n\t\t}\n\t\tgo r.processConnection(oc, service)\n\t}\n}", "func LoginHandle(msg []byte, c echo.Context) (recv []byte, err error) {\n\tdefer util.Stack()\n\n\tabsMessage := &pf.AbsMessage{}\n\terr = absMessage.Unmarshal(msg)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmsgID := absMessage.GetMsgID()\n\tmsgBody := absMessage.GetMsgBody()\n\n\tswitch msgID {\n\tcase int32(pf.Login):\n\t\tloginSend := &pf.LoginSend{}\n\t\terr = loginSend.Unmarshal(msgBody)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\ttoken, id, loginRecv := handleLogin(loginSend, c)\n\t\trecv, err = loginRecv.Marshal()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tabsMessage.Token = token\n\t\tutil.LogSend(msgID, id, 0, loginSend, \"Login\")\n\t\tutil.LogRecv(msgID, id, 0, loginRecv, \"Login\")\n\tdefault:\n\t\terr = def.ErrHandleLogin\n\t\treturn\n\t}\n\n\tabsMessage.MsgBody = recv\n\trecv, err = absMessage.Marshal()\n\treturn\n}", "func Handler(conn *websocket.Conn) {\n\t// handle connected\n\tvar userId string\n\tvar err error\n\tif userId, err = doConnected(conn); err != nil {\n\t\tfmt.Println(\"Client connect error: \", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Client connected, userId: \", userId)\n\n\tfor {\n\t\tmsg := new(Message)\n\n\t\tif err := websocket.JSON.Receive(conn, msg); err != nil {\n\t\t\tfmt.Println(\"Can't receive, error: \", err)\n\t\t\tbreak\n\t\t}\n\n\t\tmsg.UpdateAt = Timestamp()\n\n\t\tfmt.Println(\"Received from client: \", msg)\n\n\t\t// handle received message\n\t\tif err := doReceived(conn, msg); err != nil {\n\t\t\tfmt.Println(\"Received message error: \", err)\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// handle disConnected\n\tif err := doDisConnected(userId, conn); err != nil {\n\t\tfmt.Println(\"Client disconnected error: \", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Client disconnected, userId: \", userId)\n}", "func (s *Socket) listenToMessagesIn() {\n\tfor {\n\t\tmessage := new(Message)\n\t\terr := s.connection.ReadJSON(&message)\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {\n\t\t\t\tlog.Printf(\"Error: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\ts.messagesIn <- message\n\t}\n}", "func (irc *IrcCon) handleOutgoingMessages() {\n\tfor s := range irc.outgoing {\n\t\t_,err := fmt.Fprint(irc.con, s + \"\\r\\n\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 200)\n\t}\n}", "func (s *Server) HandleRecv(w http.ResponseWriter, r *http.Request) {\n\tif s.listing {\n\t\tp := strings.TrimPrefix(strings.Replace(r.URL.Path, \"recv/\", \"\", 1), \"/\")\n\t\tif p != \"\" {\n\t\t\tfmt.Fprintln(w, \"I recieved a push from:\",\n\t\t\t\tr.Header.Get(\"X-I2p-Destb32\"),\n\t\t\t\t\"And for now, I did nothing with it because I am dumb)\")\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, \"FALSE\")\n\t\treturn\n\t}\n\tfmt.Fprintln(w, \"Listings disabled for this server\")\n\treturn\n}", "func cmdHandler(c *Client, msg []byte) {\n\tlog.Infof(\"Received Message: %s ...\", msg[:int(math.Min(float64(len(msg)), 128))])\n\tlog.Debugf(\"Count of goroutines=%d\", runtime.NumGoroutine())\n\n\t// Decode JSON message\n\tvar cmd manager.PlayerCommand\n\terr := json.Unmarshal(msg, &cmd)\n\tif err != nil {\n\t\tsendStatusError(c, \"Message could not be decoded as JSON\", err.Error())\n\t\treturn\n\t}\n\n\tswitch cmd.Cmd {\n\tcase \"status\":\n\t\tsendStatusOKMsg(c, \"\") //, statusString[gp_daemon_status])\n\tcase \"pre_start\":\n\t\tpreStartCommand(c)\n\tcase \"start\":\n\t\tstartCommand(c)\n\tcase \"stop\":\n\t\tstopCommand(c)\n\tcase \"script\":\n\t\tscriptCommand(c, &cmd)\n\tcase \"getmd5\":\n\t\thandleGetMD5(c, &cmd)\n\tcase \"datafile\":\n\t\thandleDataFile(c, &cmd)\n\tcase \"nextchunk\":\n\t\thandleDataChunk(c, &cmd)\n\tcase \"get_results\":\n\t\tgetResultsCommand(c, &cmd)\n\tdefault:\n\t\tsendStatusError(c, fmt.Sprintf(\"Message not supported: %s ...\", msg[:int(math.Min(float64(len(msg)), 128))]), \"\")\n\t}\n\tlog.Debug(\"Message handled\")\n}", "func (p *Paradise) HandleCommands() {\n\tfmt.Println(\"Got client on: \", p.ip)\n\tp.writeMessage(220, \"Welcome to Paradise\")\n\tfor {\n\t\tline, err := p.reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\t//continue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tcommand, param := parseLine(line)\n\t\tp.command = command\n\t\tp.param = param\n\n\t\tfn := CommandMap[command]\n\t\tif fn == nil {\n\t\t\tp.writeMessage(550, \"not allowed\")\n\t\t} else {\n\t\t\tfn(p)\n\t\t}\n\t}\n}", "func process_messages() {\n\tvar content string\n\n\tfor {\n\t\t//Get available messages (if any)\n\t\tmsg := messenger.Msnger.Receive_message()\n\n\t\t//No message is available\n\t\tif msg.Type == mylib.NONE {\n\t\t\t//Wait ~1sec and try again\n\t\t\ttime.Sleep(1000000000)\n\t\t\tcontinue\n\t\t}\n\n\t\tcontent = msg.Content\n\t\t//Chat message: print to chat window\n\t\tif msg.Type == mylib.CHAT_MESSAGE {\n\t\t\tmesge := msg.Content\n\n\t\t\tif strings.HasPrefix(mesge, \"L \") {\n\t\t\t\tcontent = fmt.Sprintf(\"%v: %v\", msg.Orig_source, strings.TrimLeft(mesge, \"L \"))\n\t\t\t\tchatting.Msg += content + \"\\n\"\n\t\t\t\tqml.Changed(chatting, &chatting.Msg)\n\n\t\t\t} else if strings.HasPrefix(mesge, \"G \") {\n\t\t\t\tcontent = fmt.Sprintf(\"%v: %v\", msg.Orig_source, strings.TrimLeft(mesge, \"G \"))\n\t\t\t\tglobalchatting.Msg += content + \"\\n\"\n\t\t\t\tqml.Changed(globalchatting, &globalchatting.Msg)\n\t\t\t}\n\n\t\t} else if msg.Type == mylib.CREATE_ROOM {\n\t\t\t//A room was created: add room to room list\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\trooms[decoded[0]] = fmt.Sprintf(\"%v:%v:%v\", decoded[1], decoded[2], decoded[3])\n\t\t} else if msg.Type == mylib.JOIN_ROOM {\n\t\t\t//A node joined a room: add node to members list if this node is in the same room\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tif my_room == decoded[0] {\n\t\t\t\troom_members = append(room_members, fmt.Sprintf(\"%v:%v:%v\", decoded[1], decoded[2], decoded[3]))\n\t\t\t}\n\t\t} else if msg.Type == mylib.START_GAME {\n\t\t\t//A game was started\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tdelete(rooms, decoded[0])\n\t\t\tif my_room == decoded[0] {\n\t\t\t\tin_game = true\n\t\t\t}\n\t\t} else if msg.Type == mylib.DELETE_ROOM {\n\t\t\t//A room was deleted: remove room from room list\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tdelete(rooms, decoded[0])\n\t\t\tif my_room == decoded[0] {\n\t\t\t\tmy_room = \"\"\n\t\t\t\tin_room = false\n\t\t\t\troom_members = make([]string, 0, 0)\n\t\t\t}\n\t\t} else if msg.Type == mylib.LEAVE_ROOM {\n\t\t\t//A node left a room: remove node from members list if this node is in the same room\n\t\t\tcontent = msg.Content\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tif my_room == decoded[0] {\n\t\t\t\tfor i := range room_members {\n\t\t\t\t\tif room_members[i] == fmt.Sprintf(\"%v:%v:%v\", decoded[1], decoded[2], decoded[3]) {\n\t\t\t\t\t\troom_members = append(room_members[:i], room_members[i+1:]...)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if msg.Type == mylib.MOVE {\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\t// Should be board_num, player_team, player_color, turn, origLoc, newLoc, capture_pieceString\n\t\t\tboard_num, _ := strconv.Atoi(decoded[0])\n\t\t\tteam, _ := strconv.Atoi(decoded[1])\n\t\t\tcolor, _ := strconv.Atoi(decoded[2])\n\t\t\tturn, _ := strconv.Atoi(decoded[3])\n\t\t\torigLoc, _ := strconv.Atoi(decoded[4])\n\t\t\tnewLoc, _ := strconv.Atoi(decoded[5])\n\t\t\tcapturedI := decoded[6]\n\t\t\tcapturedT := decoded[7]\n\t\t\tcapturedU, _ := strconv.Atoi(decoded[8])\n\t\t\tUpdateFromOpponent(board_num, team, color, turn, origLoc, newLoc, capturedI, capturedT, capturedU)\n\t\t} else if msg.Type == mylib.PLACE {\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\t// Should be board_num, player_team, player_color, turn, origLoc, newLoc, capture_pieceString\n\t\t\tboard_num, _ := strconv.Atoi(decoded[0])\n\t\t\tteam, _ := strconv.Atoi(decoded[1])\n\t\t\tcolor, _ := strconv.Atoi(decoded[2])\n\t\t\tturn, _ := strconv.Atoi(decoded[3])\n\t\t\tloc, _ := strconv.Atoi(decoded[4])\n\t\t\tpiece, _ := strconv.Atoi(decoded[5])\n\t\t\tpieceImage := decoded[6]\n\t\t\tpieceType := decoded[7]\n\t\t\tpieceTeam, _ := strconv.Atoi(decoded[8])\n\t\t\tUpdatePlace(board_num, team, color, turn, loc, piece, pieceImage, pieceType, pieceTeam)\n\t\t} else if msg.Type == mylib.GAMEOVER {\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tboard_num, _ := strconv.Atoi(decoded[0])\n\t\t\tteam, _ := strconv.Atoi(decoded[1])\n\t\t\tcause := decoded[2]\n\t\t\tEndGame(board_num, team, cause)\n\t\t\tif cause == \"King\" {\n\t\t\t\t// The king was captured\n\t\t\t} else {\n\t\t\t\t// Somebody ran out of time\n\t\t\t}\n\t\t}\n\t\tmsg.Type = mylib.NONE\n\t}\n}", "func (c *Common) HandleReceivedMessage(message messages.Messager) {\n\n switch message.(type) {\n case *messages.LocationMessage:\n c.rp.HandleLocationMessage(message.(*messages.LocationMessage))\n case *messages.MotorSpeedMessage:\n c.rp.HandleMotorSpeedMessage(message.(*messages.MotorSpeedMessage))\n }\n\n}", "func ListenForMessages(socket *websocket.Conn, onMessage func(structs.Message) error, onError func(error)) {\n\tfor {\n\t\t_, bytes, err := socket.ReadMessage()\n\t\tif err != nil {\n\t\t\tif isCloseError(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tonError(errors.Wrap(err, \"Error reading from socket\"))\n\t\t\tcontinue\n\t\t}\n\t\tmessage, err := MessageFromJSON(bytes)\n\t\tif err != nil {\n\t\t\tonError(errors.Wrap(err, fmt.Sprintf(\"Error unmarshaling message: '%s'\", bytes)))\n\t\t}\n\t\terr = onMessage(message)\n\t\tif err != nil {\n\t\t\tonError(errors.Wrap(err, \"Error returned by on message handler\"))\n\t\t}\n\t}\n}", "func (cg *CandlesGroup) parseMessage(msg []byte) {\n\tt := bytes.TrimLeftFunc(msg, unicode.IsSpace)\n\tvar err error\n\t// either a channel data array or an event object, raw json encoding\n\tif bytes.HasPrefix(t, []byte(\"[\")) {\n\t\tcg.handleMessage(msg)\n\t} else if bytes.HasPrefix(t, []byte(\"{\")) {\n\t\tif err = cg.handleEvent(msg); err != nil {\n\t\t\tlog.Println(\"[BITFINEX] Error handling event: \", err)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"[BITFINEX] unexpected message: %s\", msg)\n\t}\n\tif err != nil {\n\t\tfmt.Println(\"[BITFINEX] Error handleMessage: \", err, string(msg))\n\t}\n}", "func (mgmt *Management) handleFrontendMessage(received []byte) {\n\tvar cm collaborationMessage\n\terr := json.Unmarshal(received, &cm)\n\tif err != nil {\n\t\tlog.Println(\"Error while unmarshalling collaborationMessage:\", err)\n\t}\n\n\tswitch cm.Event {\n\tcase \"ABTU\":\n\t\tmgmt.doc.FrontendToABTU <- cm.Content\n\tcase \"AccessControl\":\n\t//\tTODO Handle access control messages\n\tcase \"Cursor\":\n\t//\tTODO Handle cursor messages\n\t}\n}", "func (c *Conn) OnMessage(messageType int, p []byte) {\n\tfor _, str := range strings.Split(string(p), \"\\r\\n\") {\n\t\tif str != \"\" {\n\t\t\tdata, _ := utils.GbkToUtf8([]byte(str))\n\t\t\tdoc := xml.NewDecoder(bytes.NewReader(data))\n\t\t\tnode := c.respParseAttr(doc)\n\t\t\tnode.Raw = string(data)\n\t\t\tswitch node.getAttr(\"id\") {\n\t\t\tcase \"1\":\n\t\t\t\tstatus := node.getElem(\"result\").getAttr(\"status\")\n\t\t\t\tif status == \"ok\" {\n\t\t\t\t\tc.key1 = node.getElem(\"key\").getAttr(\"key1\")\n\t\t\t\t\t// 初始化心跳\n\t\t\t\t\tc.loginChatServerSuccess()\n\t\t\t\t} else {\n\t\t\t\t\tEventChan <- EventMessage{Type: \"error\", Msg: fmt.Sprintf(\"%v\", \"进入直播间失败\")}\n\t\t\t\t}\n\t\t\tcase \"2\":\n\t\t\tcase \"3\":\n\t\t\tdefault:\n\t\t\t\tc.socketData(node)\n\t\t\t}\n\t\t\tc.pushNode(node)\n\t\t}\n\t}\n}" ]
[ "0.6561378", "0.6528215", "0.64411", "0.64160395", "0.6366687", "0.6283122", "0.6273615", "0.6269291", "0.6257093", "0.6256277", "0.61980337", "0.6155845", "0.6121732", "0.61168164", "0.601344", "0.6010404", "0.59898597", "0.5966513", "0.59574014", "0.5955264", "0.58938617", "0.5886083", "0.58251786", "0.58229846", "0.5765552", "0.57468873", "0.57331735", "0.5732407", "0.5658851", "0.56189036", "0.5618875", "0.55945134", "0.5594153", "0.5589223", "0.5542452", "0.55244976", "0.5524469", "0.5517586", "0.5517586", "0.5513526", "0.5509128", "0.5503757", "0.54992527", "0.5495385", "0.5493428", "0.5487539", "0.54849243", "0.5484654", "0.54842305", "0.5484129", "0.5474079", "0.5474019", "0.5465442", "0.5453857", "0.54402745", "0.54353", "0.543004", "0.54296136", "0.54280156", "0.5420371", "0.5418044", "0.5417457", "0.541648", "0.54151595", "0.5414307", "0.54119277", "0.54083675", "0.5405065", "0.54020584", "0.5393194", "0.5391204", "0.53851795", "0.5384205", "0.5359276", "0.53577554", "0.5348933", "0.53401935", "0.53360623", "0.53127044", "0.53103966", "0.5307052", "0.5306911", "0.53047556", "0.52935183", "0.52796584", "0.5277324", "0.52768785", "0.52745837", "0.52745", "0.52657014", "0.52649117", "0.5263485", "0.5260125", "0.5254791", "0.52530473", "0.5251399", "0.5250213", "0.5247093", "0.52410215", "0.5237266" ]
0.71781325
0
/ Now let's create two handler functions. The easiest case is where our adhoc protocol only sends string data. The second handler receives and processes a struct that was sent as GOB data. handleStrings handles the "STRING" request.
func handleStrings(rw *bufio.ReadWriter) { // Receive a string. log.Print("Receive STRING message:") s, err := rw.ReadString('\n') if err != nil { log.Println("Cannot read from connection.\n", err) } s = strings.Trim(s, "\n ") log.Println(s) _, err = rw.WriteString("Thank you.\n") if err != nil { log.Println("Cannot write to connection.\n", err) } err = rw.Flush() if err != nil { log.Println("Flush failed.", err) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (serv *Server) handleText(conn int, payload []byte) {\n\tvar (\n\t\tlogp = `handleText`\n\n\t\thandler RouteHandler\n\t\terr error\n\t\tctx context.Context\n\t\treq *Request\n\t\tres *Response\n\t\tok bool\n\t)\n\n\tres = _resPool.Get().(*Response)\n\tres.reset()\n\n\tctx, ok = serv.Clients.Context(conn)\n\tif !ok {\n\t\terr = errors.New(\"client context not found\")\n\t\tres.Code = http.StatusInternalServerError\n\t\tres.Message = err.Error()\n\t\tgoto out\n\t}\n\n\treq = _reqPool.Get().(*Request)\n\treq.reset()\n\n\terr = json.Unmarshal(payload, req)\n\tif err != nil {\n\t\tres.Code = http.StatusBadRequest\n\t\tres.Message = err.Error()\n\t\tgoto out\n\t}\n\n\thandler, err = req.unpack(serv.routes)\n\tif err != nil {\n\t\tres.Code = http.StatusBadRequest\n\t\tres.Message = req.Target\n\t\tgoto out\n\t}\n\tif handler == nil {\n\t\tres.Code = http.StatusNotFound\n\t\tres.Message = req.Method + \" \" + req.Target\n\t\tgoto out\n\t}\n\n\treq.Conn = conn\n\n\t*res = handler(ctx, req)\n\nout:\n\tif req != nil {\n\t\tres.ID = req.ID\n\t\t_reqPool.Put(req)\n\t}\n\n\terr = serv.sendResponse(conn, res)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t\tserv.ClientRemove(conn)\n\t}\n\n\t_resPool.Put(res)\n}", "func serveStringHandler(str string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tw.Write([]byte(str))\n\t})\n}", "func (serv *Server) handleBin(conn int, payload []byte) {}", "func Handle(req []byte) string {\n\tlog.SetOutput(os.Stderr)\n\tvar n interface{}\n\terr := json.Unmarshal(req, &n)\n\tif err != nil {\n\t\tlog.Printf(\"unable to Unmarshal request. %v\", err)\n\t\treturn \"\"\n\t}\n\n\tdata := n.(map[string]interface{})\n\n\tlog.Println(data[\"Type\"])\n\tif data[\"Type\"].(string) == confirmation {\n\t\tsubscribeURL := data[\"SubscribeURL\"].(string)\n\t\tlog.Printf(\"SubscribeURL %v\", subscribeURL)\n\t\tconfirmSubscription(subscribeURL)\n\t\treturn \"just subscribed to \" + subscribeURL\n\t} else if data[\"Type\"].(string) == notification {\n\t\tmessage := data[\"Message\"].(string)\n\t\tlog.Println(\"Received this message : \", message)\n\t\treturn message\n\t}\n\n\tlog.Printf(\"Unknown data type %v\", data[\"Type\"])\n\treturn fmt.Sprintf(\"Unknown data type %v\", data[\"Type\"])\n}", "func handleRequest(req string) string {\n\tresponse := \"\"\n\n\tif len(req) > 0 {\n\t\ts := strings.Split(req, \":\")\n\t\tif len(s) < 2 {\n\t\t\tresponse = \"0001:Invalid request\"\n\t\t} else {\n\t\t\tresponse = processRequest(s[0], s[1])\n\t\t}\n\t} else {\n\t\tresponse = \"0000:Empty request\"\n\t}\n\n\treturn response\n}", "func Handle(bytes []byte) string {\n\treq := &Request{}\n\tif err := json.Unmarshal(bytes, req); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn fmt.Sprintf(\"Hello, Go. You said: %s\", string(bytes))\n}", "func (c *Conn) handle(cmd string, arg string) {\n\t// If panic happens during command handling - send 421 response\n\t// and close connection.\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tc.writeResponse(421, EnhancedCode{4, 0, 0}, \"Internal server error\")\n\t\t\tc.Close()\n\n\t\t\tstack := debug.Stack()\n\t\t\tc.server.ErrorLog.Printf(\"panic serving %v: %v\\n%s\", c.conn.RemoteAddr(), err, stack)\n\t\t}\n\t}()\n\n\tif cmd == \"\" {\n\t\tc.protocolError(500, EnhancedCode{5, 5, 2}, \"Error: bad syntax\")\n\t\treturn\n\t}\n\n\tcmd = strings.ToUpper(cmd)\n\tswitch cmd {\n\tcase \"SEND\", \"SOML\", \"SAML\", \"EXPN\", \"HELP\", \"TURN\":\n\t\t// These commands are not implemented in any state\n\t\tc.writeResponse(502, EnhancedCode{5, 5, 1}, fmt.Sprintf(\"%v command not implemented\", cmd))\n\tcase \"HELO\", \"EHLO\", \"LHLO\":\n\t\tlmtp := cmd == \"LHLO\"\n\t\tenhanced := lmtp || cmd == \"EHLO\"\n\t\tif c.server.LMTP && !lmtp {\n\t\t\tc.writeResponse(500, EnhancedCode{5, 5, 1}, \"This is a LMTP server, use LHLO\")\n\t\t\treturn\n\t\t}\n\t\tif !c.server.LMTP && lmtp {\n\t\t\tc.writeResponse(500, EnhancedCode{5, 5, 1}, \"This is not a LMTP server\")\n\t\t\treturn\n\t\t}\n\t\tc.handleGreet(enhanced, arg)\n\tcase \"MAIL\":\n\t\tc.handleMail(arg)\n\tcase \"RCPT\":\n\t\tc.handleRcpt(arg)\n\tcase \"VRFY\":\n\t\tc.writeResponse(252, EnhancedCode{2, 5, 0}, \"Cannot VRFY user, but will accept message\")\n\tcase \"NOOP\":\n\t\tc.writeResponse(250, EnhancedCode{2, 0, 0}, \"I have sucessfully done nothing\")\n\tcase \"RSET\": // Reset session\n\t\tc.reset()\n\t\tc.writeResponse(250, EnhancedCode{2, 0, 0}, \"Session reset\")\n\tcase \"BDAT\":\n\t\tc.handleBdat(arg)\n\tcase \"DATA\":\n\t\tc.handleData(arg)\n\tcase \"QUIT\":\n\t\tc.writeResponse(221, EnhancedCode{2, 0, 0}, \"Bye\")\n\t\tc.Close()\n\tcase \"AUTH\":\n\t\tif c.server.AuthDisabled {\n\t\t\tc.protocolError(500, EnhancedCode{5, 5, 2}, \"Syntax error, AUTH command unrecognized\")\n\t\t} else {\n\t\t\tc.handleAuth(arg)\n\t\t}\n\tcase \"STARTTLS\":\n\t\tc.handleStartTLS()\n\tdefault:\n\t\tmsg := fmt.Sprintf(\"Syntax errors, %v command unrecognized\", cmd)\n\t\tc.protocolError(500, EnhancedCode{5, 5, 2}, msg)\n\t}\n}", "func (s *Server) SendString(ctx context.Context, in *RequestString) (*Response, error) {\n\ta := in.GetMess()\n\tfmt.Println(a)\n\treturn &Response{Sent: true}, nil\n}", "func (s *Server) handleWhatever() {}", "func (s *Server) handlerConn(c net.Conn) {\n\tdefer c.Close()\n\tbuf := make([]byte, 2048)\n\trcvPacketSize, err := c.Read(buf)\n\tif err != nil && err != io.EOF {\n\t\tlog.Println(\"Read error: \", err)\n\t\treturn\n\t}\n\tdata := buf[:rcvPacketSize]\n\n\trec := strings.Split(string(data), \" \")\n\tlog.Println(\"Received data: \", rec)\n\n\t// rec must have 3 field (as at form)\n\tif len(rec) <= 3 {\n\t\tif err := s.db.Insert(rec); err != nil {\n\t\t\tlog.Printf(\"Insert error: %v\\n\", err)\n\t\t}\n\t\tlog.Printf(\"Save record in DB: %v\\n\", rec)\n\n\t\tif _, err = c.Write([]byte(\"OK\")); err != nil {\n\t\t\tlog.Printf(\"Response send error: %v\\n\", err)\n\t\t}\n\t}\n}", "func (network *Network) HTTPhandler(w http.ResponseWriter, r *http.Request){\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tbody, error := ioutil.ReadAll(r.Body) // Read Request\n\t\tdefer r.Body.Close() // Always CLOSE.\n\t\t// Check for errors or if body is empty.\n\t\tif error != nil || removeQuotationMarks(string(body)) == \"\" {\n\t\t\thttp.Error(w, \"ERROR\", http.StatusBadRequest)\n\t\t\tfmt.Println(\"Error when POST\")\n\t\t} else{\n\t\t\t// Same as in Cli.go Store\n\t\t\thashedFileString := NewKademliaIDFromData(string(body))\n\t\t\tnetwork.Store([]byte(body),hashedFileString)\n\t\t\thashSuffix := hashedFileString.String()\n\n\t\t\tmessage := map[string]string{ hashSuffix: string(body)} // JSON DATA FORMAT\n\t\t\tjsonValue,_ := json.Marshal(message)\n\n\t\t\tw.Header().Set(\"Location\", URLprefix+hashSuffix)\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\tw.WriteHeader(http.StatusCreated)\t// Status 201 as detailed\n\n\t\t\tw.Write(jsonValue)\n\t\t\tfmt.Println(\"HTTP Data Written. Hash = \", hashSuffix )\n\t\t}\n\tcase \"GET\":\n\t\t// Checks if there is something after the prefix. /objects/XXXXXXXXXXXXXX\n\t\tURLcomponents := strings.Split(r.URL.Path, \"/\")\t// [ \"\", \"objects\", \"hash\" ]\n\t\thashValue := URLcomponents[2]\n\t\t// Check if there is a hashvalue of correct size.\n\t\tif(len(hashValue) != 40){\n\t\t\thttp.Error(w, \"ERROR\", http.StatusLengthRequired)\n\t\t\tfmt.Println(\"Error when GET \", hashValue, \" is not of correct length. (40)\")\n\t\t}else{\n\t\t\t\t// Same as in Cli.go Get\n\t\t\t\thash := NewKademliaID(hashValue)\n\t\t\t\tdata, nodes := network.DataLookup(hash)\n\t\t\t\tif data != nil {\n\t\t\t\t\t// If data is not nil, send OK status and write.\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\t\tw.Write(data)\n\t\t\t\t\tfmt.Println(\"HTTP Data Read. Input was = \", string(data) )\n\t\t\t\t} else if len(nodes) > 0{\n\t\t\t\t\thttp.Error(w, \"ERROR\", http.StatusNotFound)\n\t\t\t\t\tfmt.Println(\"Error when GET - DataLookUP (Length)\")\n\t\t\t\t} else {\n\t\t\t\t\thttp.Error(w, \"ERROR\", http.StatusNoContent)\n\t\t\t\t\tfmt.Println(\"Error when GET - DataLookUP\")\n\t\t\t\t}\n\t\t}\n\tdefault:\n\t\thttp.Error(w, \"Wrong. Use POST or GET\", http.StatusMethodNotAllowed)\n\t}\n}", "func handleHello(args[]interface{}){\n\tm := args[0].(*msg.Hello)\n\ta := args[1].(gate.Agent)\n\tlog.Debug(\"Received <%v>\", m.Name) //~ Print all. (Not just m.Name)\n\tswitch randNo.Intn(2){\n\tcase 0:\n\t\tlog.Debug(\"msg.Hello sent\")\n\t\ta.WriteMsg(&msg.Hello{\n\t\t\tName:\"God\",\n\t\t})\n\tdefault:\n\t\tlog.Debug(\"msg.Gate sent\")\n\t\ta.WriteMsg(&msg.Gate{\n\t\t\tHost:\"www.blizzard.com\",\n\t\t})\n\t}\n}", "func (u AnonUser) sendHandler() {\n\t//namespace := fmt.Sprintf(\"/%s\", u.socket.Id())\n\tfor {\n\t\tselect {\n\t\t\tcase data := <-u.Send:\n\t\t\tswitch dataType := data.(type) {\n\t\t\t\tcase Joined:\n\t\t\t\t/*msg, err := json.Marshal(dataType)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Panic(\"unable to marshal message\")\n\t\t\t\t}\n\t\t\t\t*/\n\t\t\t\tu.socket.Emit(\"joined\", dataType)\n\t\t\t\tcase Command:\n\t\t\t\tswitch dataType.Cmd {\n\t\t\t\t\tcase C_Leave:\n\t\t\t\t\tu.socket.Emit(\"disconnect\")\n\t\t\t\t\treturn\n\t\t\t\t\tcase C_Kick:\n\t\t\t\t\tu.socket.Emit(\"kick\", dataType.Data.(string))\n\t\t\t\t\tu.socket.Emit(\"disconnect\")\n\t\t\t\t\treturn\n\t\t\t\t\tcase C_End:\n\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\tlog.Print(\"AnonUser sendHandler: unknown command\")\n\t\t\t\t}\n\t\t\t\tcase GameMessage:\n\t\t\t\t/*msg, err := json.Marshal(dataType.Msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Panic(\"unable to marshal message\")\n\t\t\t\t}\n\t\t\t\t*/\n\t\t\t\tu.socket.Emit(\"msgplayer\", dataType.Msg)\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Print(\"AnonUser sendHandler: unknown type received\")\n\t\t\t}\n\t\t}\n\t}\n}", "func (o *handler) handle(client mqtt.Client, msg mqtt.Message) {\r\n\t// We extract the count and write that out first to simplify checking for missing values\r\n\tvar m Message\r\n\tvar resp Session\r\n\tif err := json.Unmarshal(msg.Payload(), &resp); err != nil {\r\n\t\tfmt.Printf(\"Message could not be parsed (%s): %s\", msg.Payload(), err)\r\n\t\treturn\r\n\t}\r\n\tfmt.Println(resp)\r\n\tswitch resp.Type {\r\n\tcase CMDMSG_OFFER:\r\n\t\tenc.Decode(resp.Data, &m)\r\n\t\tNotice(m)\r\n\tcase CMDMSG_DISC:\r\n\t\tvar devcmd DiscoveryCmd\r\n\t\tenc.Decode(resp.Data, &devcmd)\r\n\t\tDiscoveryDev(&devcmd)\r\n\tcase CMDMSG_WAKE:\r\n\t\tvar fing Fing\r\n\t\tenc.Decode(resp.Data, &fing)\r\n\t\twakemac(fing)\r\n\tcase CMDMSG_UPDATE:\r\n\t\tvar newver *versionUpdate\r\n\t\tGetUpdateMyself(newver)\r\n\tcase CMDMSG_MR2:\r\n\t\tvar mr2info Mr2Msg\r\n\t\tenc.Decode(resp.Data, &mr2info)\r\n\t\tMr2HostPort(&mr2info)\r\n\t}\r\n}", "func GeneralConvHandler(req, name string, res http.ResponseWriter) string {\n\n\tfmt.Println(\"General conversation...\")\n\trand.Seed(time.Now().UnixNano())\n\tusername = name\n\tmessage := filterForMessagesComparision(req)\n\tmatch := false\n\n\t// determine type of message\n\tif !match {\n\t\tisGreetingPlain := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.InitialGreetingsPlain); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.InitialGreetingsPlain[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isGreetingPlain {\n\t\t\ttemp := greetingPlainController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisGreetingName := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.InitialGreetingsName); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.InitialGreetingsName[i] {\n\t\t\t\t\tfmt.Println(\"contains \", strings.ToLower(s), \" \", messagesParser.InitialGreetingsName[i])\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isGreetingName {\n\t\t\ttemp := greetingNameController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisHelp := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.Help); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.Help[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isHelp {\n\t\t\ttemp := helpController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisAbout := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.About); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.About[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isAbout {\n\t\t\ttemp := aboutController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tfmt.Println(\"inside\")\n\t\tisAge := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.Age); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.Age[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isAge {\n\t\t\ttemp := ageController(message)\n\t\t\tfmt.Println(\"temp age is \", temp)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisBirthday := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.Birthday); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.Birthday[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isBirthday {\n\t\t\ttemp := birthdayController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\treturn speak\n\n}", "func Handle(req []byte) string {\n\treturn fmt.Sprintf(\"Hello, %s!\", sub.Name())\n}", "func client(ip string) error {\r\n\t// Some test data. Note how GOB even handles maps, slices, and\r\n\t// recursive data structures without problems.\r\n\ttestStruct := complexData{\r\n\t\tN: 23,\r\n\t\tS: \"string data\",\r\n\t\tM: map[string]int{\"one\": 1, \"two\": 2, \"three\": 3},\r\n\t\tP: []byte(\"abc\"),\r\n\t\tC: &complexData{\r\n\t\t\tN: 256,\r\n\t\t\tS: \"Recursive structs? Piece of cake!\",\r\n\t\t\tM: map[string]int{\"01\": 1, \"10\": 2, \"11\": 3},\r\n\t\t},\r\n\t}\r\n\r\n\t// Open a connection to the server.\r\n\trw, err := Open(ip + Port)\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Client: Failed to open connection to \"+ip+Port)\r\n\t}\r\n\r\n\t// Send a STRING request.\r\n\t// Send the request name.\r\n\t// Send the data.\r\n\tlog.Println(\"Send the string request.\")\r\n\tn, err := rw.WriteString(\"STRING\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not send the STRING request (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\tn, err = rw.WriteString(\"Additional data.\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not send additional STRING data (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\tlog.Println(\"Flush the buffer.\")\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Flush failed.\")\r\n\t}\r\n\r\n\t// Read the reply.\r\n\tlog.Println(\"Read the reply.\")\r\n\tresponse, err := rw.ReadString('\\n')\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Client: Failed to read the reply: '\"+response+\"'\")\r\n\t}\r\n\r\n\tlog.Println(\"STRING request: got a response:\", response)\r\n\r\n\t// Send a GOB request.\r\n\t// Create an encoder that directly transmits to `rw`.\r\n\t// Send the request name.\r\n\t// Send the GOB.\r\n\tlog.Println(\"Send a struct as GOB:\")\r\n\tlog.Printf(\"Outer complexData struct: \\n%#v\\n\", testStruct)\r\n\tlog.Printf(\"Inner complexData struct: \\n%#v\\n\", testStruct.C)\r\n\tenc := gob.NewEncoder(rw)\r\n\tn, err = rw.WriteString(\"GOB\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not write GOB data (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\terr = enc.Encode(testStruct)\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Encode failed for struct: %#v\", testStruct)\r\n\t}\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Flush failed.\")\r\n\t}\r\n\treturn nil\r\n}", "func (echo *echoImpl) SendString(_ fidl.Context, inValue string) error {\n\treturn nil\n}", "func handler() {\n\n\t//get mime type\n\tif pMimeType != \"\" {\n\t\tv := Formatters[\"MIME-TYPE\"]\n\t\t_, resmsg := v.Format(v.Mode, pMimeType)\n\t\tfmt.Println(resmsg)\n\t\treturn\n\t}\n\t//get mime list\n\tif pMimeList {\n\t\tv := Formatters[\"MIME-LIST\"]\n\t\t_, resmsg := v.Format(v.Mode, pMimeType)\n\t\tfmt.Println(resmsg)\n\t\treturn\n\t}\n\t//encode\n\tif pEncData != \"\" {\n\t\tv := Formatters[\"ENC-DATA\"]\n\t\trescode, resmsg := v.Format(v.Mode, pEncData)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//encode-url\n\tif pEncUrl != \"\" {\n\t\tv := Formatters[\"ENC-URL\"]\n\t\trescode, resmsg := v.Format(v.Mode, pEncUrl)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//decode\n\tif pDecData != \"\" {\n\t\tv := Formatters[\"DEC-DATA\"]\n\t\trescode, resmsg := v.Format(v.Mode, pDecData)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//decode-url\n\tif pDecUrl != \"\" {\n\t\tv := Formatters[\"DEC-URL\"]\n\t\trescode, resmsg := v.Format(v.Mode, pDecUrl)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//encode:b64\n\tif pBase64Enc != \"\" {\n\t\tv := Formatters[\"B64-ENC-DATA\"]\n\t\trescode, resmsg := v.Format(v.Mode, pBase64Enc)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//encode-url:b64\n\tif pBase64UrlEnc != \"\" {\n\t\tv := Formatters[\"B64-ENC-URL\"]\n\t\trescode, resmsg := v.Format(v.Mode, pBase64UrlEnc)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//decode:b64\n\tif pBase64Dec != \"\" {\n\t\tv := Formatters[\"B64-DEC-DATA\"]\n\t\trescode, resmsg := v.Format(v.Mode, pBase64Dec)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//decode-url:b64\n\tif pBase64UrlDec != \"\" {\n\t\tv := Formatters[\"B64-DEC-URL\"]\n\t\trescode, resmsg := v.Format(v.Mode, pBase64UrlDec)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//html-esc\n\tif pHtmlEsc != \"\" {\n\t\tv := Formatters[\"HTML-ENC-DATA\"]\n\t\trescode, resmsg := v.Format(v.Mode, pHtmlEsc)\n\t\tshowStatus(rescode, resmsg)\n\t\treturn\n\t}\n\t//html-esc-url\n\tif pHtmlUrlEsc != \"\" {\n\t\tv := Formatters[\"HTML-ENC-URL\"]\n\t\trescode, resmsg := v.Format(v.Mode, pHtmlUrlEsc)\n\t\tif !pHttpServe {\n\t\t\tshowStatus(rescode, resmsg)\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//qrcode\n\tif pQRCodeGen != \"\" {\n\t\tv := Formatters[\"QR-CODE-GEN\"]\n\t\trescode, resmsg := v.Format(v.Mode, pQRCodeGen)\n\t\tif !pHttpServe {\n\t\t\tshowStatus(rescode, resmsg)\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\t//serve http\n\tif pHttpServe {\n\t\tinitHttpRouters()\n\t\treturn\n\t}\n\n}", "func (h *CorgeEchoStringHandler) Handle(\n\tctx context.Context,\n\treqHeaders map[string]string,\n\twireValue *wire.Value,\n) (bool, zanzibar.RWTStruct, map[string]string, error) {\n\tvar req clientsCorgeCorge.Corge_EchoString_Args\n\tvar res clientsCorgeCorge.Corge_EchoString_Result\n\n\tif err := req.FromWire(*wireValue); err != nil {\n\t\treturn false, nil, nil, err\n\t}\n\tr, respHeaders, err := h.echostring(ctx, reqHeaders, &req)\n\n\tif err != nil {\n\t\treturn false, nil, nil, err\n\t}\n\tres.Success = &r\n\n\treturn err == nil, &res, respHeaders, nil\n}", "func (h *Handler) handle(method string, params *json.RawMessage) (res interface{}, err error) {\n\tstart := time.Now()\n\tlog.Debug(\"Received %s message\", method)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Error(\"Panic in handler for %s: %s\", method, r)\n\t\t\tlog.Debug(\"%s\\n%v\", r, string(debug.Stack()))\n\t\t\terr = &jsonrpc2.Error{\n\t\t\t\tCode: jsonrpc2.CodeInternalError,\n\t\t\t\tMessage: fmt.Sprintf(\"%s\", r),\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debug(\"Handled %s message in %s\", method, time.Since(start))\n\t\t}\n\t}()\n\n\tswitch method {\n\tcase \"initialize\":\n\t\tinitializeParams := &lsp.InitializeParams{}\n\t\tif err := json.Unmarshal(*params, initializeParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.initialize(initializeParams)\n\tcase \"initialized\":\n\t\t// Not doing anything here. Unsure right now what this is really for.\n\t\treturn nil, nil\n\tcase \"shutdown\":\n\t\treturn nil, nil\n\tcase \"exit\":\n\t\t// exit is a request to terminate the process. We do this preferably by shutting\n\t\t// down the RPC connection but if we can't we just die.\n\t\tif h.Conn != nil {\n\t\t\tif err := h.Conn.Close(); err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to close connection: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatalf(\"No active connection to shut down\")\n\t\t}\n\t\treturn nil, nil\n\tcase \"textDocument/didOpen\":\n\t\tdidOpenParams := &lsp.DidOpenTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didOpenParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didOpen(didOpenParams)\n\tcase \"textDocument/didChange\":\n\t\tdidChangeParams := &lsp.DidChangeTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didChangeParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didChange(didChangeParams)\n\tcase \"textDocument/didSave\":\n\t\tdidSaveParams := &lsp.DidSaveTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didSaveParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didSave(didSaveParams)\n\tcase \"textDocument/didClose\":\n\t\tdidCloseParams := &lsp.DidCloseTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didCloseParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didClose(didCloseParams)\n\tcase \"textDocument/formatting\":\n\t\tformattingParams := &lsp.DocumentFormattingParams{}\n\t\tif err := json.Unmarshal(*params, formattingParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.formatting(formattingParams)\n\tcase \"textDocument/completion\":\n\t\tcompletionParams := &lsp.CompletionParams{}\n\t\tif err := json.Unmarshal(*params, completionParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.completion(completionParams)\n\tcase \"textDocument/documentSymbol\":\n\t\tsymbolParams := &lsp.DocumentSymbolParams{}\n\t\tif err := json.Unmarshal(*params, symbolParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.symbols(symbolParams)\n\tcase \"textDocument/declaration\":\n\t\tfallthrough\n\tcase \"textDocument/definition\":\n\t\tpositionParams := &lsp.TextDocumentPositionParams{}\n\t\tif err := json.Unmarshal(*params, positionParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.definition(positionParams)\n\tdefault:\n\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeMethodNotFound}\n\t}\n}", "func SimpleMsgHandler(d Message) {\n\tdata,_ := d.(MSGStruct)\n\n\tidentifier := data.GetId() + strconv.Itoa(data.GetRound())\n\tif _, seen := MessageReceiveSet[identifier]; !seen {\n\t\tMessageReceiveSet[identifier] = true\n\n\t\thashStr := ConvertBytesToString(Hash([]byte(data.GetData())))\n\n\t\t//include the data with key the original data and val its hashvalue\n\n\t\tif recData,exist := DataSet[data.GetData()]; exist {\n\t\t\trecData.update(d, hashStr)\n\t\t} else {\n\t\t\treceiveData := receiveData{}\n\t\t\treceiveData.update(d, hashStr)\n\t\t\tDataSet[data.GetData()] = &receiveData\n\t\t}\n\n\t\t//Main logic\n\t\tm := ECHOStruct{Header:ECHO, Id:data.GetId(), HashData:hashStr, Round: data.GetRound(), SenderId:MyID}\n\t\t//fmt.Printf(\"HeyMsg: %+v\\n\",m)\n\n\t\t//if l, ok := simpleEchoRecCountSet[m]; ok {\n\t\t//\tl = append(l, d.GetSenderId())\n\t\t//\tsimpleEchoRecCountSet[m] = l\n\t\t//} else {\n\t\t//\tsimpleEchoRecCountSet[m] = []string{d.GetSenderId()}\n\t\t//}\n\n\t\t//ToDo: Send Echo to all servers\n\t\tif _, sent := EchoSentSet[identifier]; !sent {\n\t\t\tEchoSentSet[identifier] = true\n\t\t\tsendReq := PrepareSend{M: m, SendTo:\"all\"}\n\t\t\tSendReqChan <- sendReq\n\t\t}\n\t\tSimpleCheck(d)\n\t}\n\n}", "func Tribes_Interpreter(mypayload TribePayload) {\n\n\tmycommand := GetJSONCommand(mypayload.TPbuffer)\n\n\tswitch mycommand {\n\n\tcase \"NOOP\":\n\t\tbreak // doing nothing\n\t\t//\n\t\t// Implementation of single post exchange\n\tcase \"HEREPOST\":\n\t\t// herepost just returns the requested post\n\t\terr := Tribes_BE_POST(mypayload.TPbuffer[0:mypayload.TPsize])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[UDP-INT] Cannot execute HEREPOST: %s \", err.Error())\n\t\t}\n\t\t// each function should have the full buffer when starting\n\t\t// the ones with BE are saving something.\n\t\t// the ones with FE are answeing back (so they need to know who to answer\n\t\t// all FE functions will return a []byte to shoot with Shoot_JSON\n\tcase \"GIMMEPOST\":\n\t\t// gimmepost just requires to send a post back\n\t\t// giving the messageID as argument\n\t\t// those functions starting with GIMME are asked to reply to the peer\n\t\t//\n\t\t// Implementation of PEERS exchange\n\tcase \"HEREPEERS\":\n\t\terr := Tribes_BE_PEERS(mypayload.TPbuffer[0:mypayload.TPsize])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[UDP-INT] Cannot execute HEREPEERS: %s \", err.Error())\n\t\t}\n\t\t// herepeers gives a list of known peers\n\tcase \"GIMMEPEERS\":\n\t\t// asks for a list of known peers\n\t\t//\n\t\t// Implementation of GROUPS exchange\n\tcase \"HEREGROUPS\":\n\t\terr := Tribes_BE_Groups(mypayload.TPbuffer[0:mypayload.TPsize])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[UDP-GRP] Cannot execute HEREGROUPS: %s \", err.Error())\n\t\t}\n\t\t// Receives the list of active groups\n\tcase \"GIMMEGROUPS\":\n\t\t// Asks for the list of active groups\n\t\t//\n\t\t// Implementation of group index: to have a list of messageIDs for a group\n\tcase \"HEREINDEX\":\n\t\t// Gives a list of MessageIDs on a specified group\n\tcase \"GIMMEINDEX\":\n\t\t// Asks for a list of posts in a specified group\n\t\t//\n\n\t// whatever else is lost\n\tdefault:\n\t\tbreak\n\n\t}\n\n}", "func Handler(conn net.Conn, pubsub *PubSub) {\n\n\treader := bufio.NewReader(conn)\n\n\tdata, err := reader.ReadString('\\n')\n\n\tif err != nil {\n\t\t//log.Fatal(err)\n\t\treturn\n\t}\n\n\tcommand := strings.Split(strings.TrimSuffix(data, \"\\n\"), \" \")\n\n\tswitch command[0] {\n\n\tcase \"PUBLISH\":\n\t\tgo Publish(conn, command, pubsub)\n\n\tcase \"SUBSCRIBE\":\n\t\tgo Subscribe(conn, command, pubsub)\n\t}\n\n}", "func HandlerMessage(aResponseWriter http.ResponseWriter, aRequest *http.Request) {\n\taRequest.ParseForm()\n\n\tbody := aRequest.Form\n\tlog.Printf(\"aRequest.Form=%s\", body)\n\tbytesBody, err := ioutil.ReadAll(aRequest.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading body, err=%s\", err.Error())\n\t}\n\t//\tlog.Printf(\"bytesBody=%s\", string(bytesBody))\n\n\t//check Header Token\n\t//\theaderAuthentication := aRequest.Header.Get(STR_Authorization)\n\t//\tisValid, userId := DbIsTokenValid(headerAuthentication, nil)\n\t//\tlog.Printf(\"HandlerMessage, headerAuthentication=%s, isValid=%t, userId=%d\", headerAuthentication, isValid, userId)\n\t//\tif !isValid {\n\t//\t\tresult := new(objects.Result)\n\t//\t\tresult.ErrorMessage = STR_MSG_login\n\t//\t\tresult.ResultCode = http.StatusOK\n\t//\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t//\t\treturn\n\t//\t}\n\n\treport := new(objects.Report)\n\tjson.Unmarshal(bytesBody, report)\n\tlog.Printf(\"HandlerMessage, report.ApiKey=%s, report.ClientId=%s, report.Message=%s, report.Sequence=%d, report.Time=%d\",\n\t\treport.ApiKey, report.ClientId, report.Message, report.Sequence, report.Time)\n\tvar isApiKeyValid = false\n\tif report.ApiKey != STR_EMPTY {\n\t\tisApiKeyValid, _ = IsApiKeyValid(report.ApiKey)\n\t}\n\tif !isApiKeyValid {\n\t\tresult := new(objects.Result)\n\t\tresult.ErrorMessage = STR_MSG_invalidapikey\n\t\tresult.ResultCode = http.StatusOK\n\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t\treturn\n\t}\n\n\tDbAddReport(report.ApiKey, report.ClientId, report.Time, report.Sequence, report.Message, report.FilePath, nil)\n\n\tresult := new(objects.Result)\n\tresult.ErrorMessage = STR_EMPTY\n\tresult.ResultCode = http.StatusOK\n\tServeResult(aResponseWriter, result, STR_template_result)\n}", "func (o *Okcoin) WsHandleData(respRaw []byte) error {\n\tif bytes.Equal(respRaw, []byte(pongBytes)) {\n\t\treturn nil\n\t}\n\tvar dataResponse WebsocketDataResponse\n\terr := json.Unmarshal(respRaw, &dataResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dataResponse.ID != \"\" {\n\t\tif !o.Websocket.Match.IncomingWithData(dataResponse.ID, respRaw) {\n\t\t\treturn fmt.Errorf(\"couldn't match incoming message with id: %s and operation: %s\", dataResponse.ID, dataResponse.Operation)\n\t\t}\n\t\treturn nil\n\t}\n\tif len(dataResponse.Data) > 0 {\n\t\tswitch dataResponse.Arguments.Channel {\n\t\tcase wsInstruments:\n\t\t\treturn o.wsProcessInstruments(respRaw)\n\t\tcase wsTickers:\n\t\t\treturn o.wsProcessTickers(respRaw)\n\t\tcase wsCandle3M, wsCandle1M, wsCandle1W, wsCandle1D, wsCandle2D, wsCandle3D, wsCandle5D,\n\t\t\twsCandle12H, wsCandle6H, wsCandle4H, wsCandle2H, wsCandle1H, wsCandle30m, wsCandle15m,\n\t\t\twsCandle5m, wsCandle3m, wsCandle1m, wsCandle3Mutc, wsCandle1Mutc, wsCandle1Wutc, wsCandle1Dutc,\n\t\t\twsCandle2Dutc, wsCandle3Dutc, wsCandle5Dutc, wsCandle12Hutc, wsCandle6Hutc:\n\t\t\treturn o.wsProcessCandles(respRaw)\n\t\tcase wsTrades:\n\t\t\treturn o.wsProcessTrades(respRaw)\n\t\tcase wsOrderbooks,\n\t\t\twsOrderbooksL5,\n\t\t\twsOrderbookL1,\n\t\t\twsOrderbookTickByTickL400,\n\t\t\twsOrderbookTickByTickL50:\n\t\t\treturn o.wsProcessOrderbook(respRaw, dataResponse.Arguments.Channel)\n\t\tcase wsStatus:\n\t\t\tvar resp WebsocketStatus\n\t\t\terr = json.Unmarshal(respRaw, &resp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor x := range resp.Data {\n\t\t\t\tsystemStatus := fmt.Sprintf(\"%s %s on system %s %s service type From %s To %s\", systemStateString(resp.Data[x].State), resp.Data[x].Title, resp.Data[x].System, systemStatusServiceTypeString(resp.Data[x].ServiceType), resp.Data[x].Begin.Time().String(), resp.Data[x].End.Time().String())\n\t\t\t\tif resp.Data[x].Href != \"\" {\n\t\t\t\t\tsystemStatus = fmt.Sprintf(\"%s Href: %s\\n\", systemStatus, resp.Data[x].Href)\n\t\t\t\t}\n\t\t\t\tif resp.Data[x].RescheduleDescription != \"\" {\n\t\t\t\t\tsystemStatus = fmt.Sprintf(\"%s Rescheduled Description: %s\", systemStatus, resp.Data[x].RescheduleDescription)\n\t\t\t\t}\n\t\t\t\tlog.Warnf(log.ExchangeSys, systemStatus)\n\t\t\t}\n\t\t\to.Websocket.DataHandler <- resp\n\t\t\treturn nil\n\t\tcase wsAccount:\n\t\t\treturn o.wsProcessAccount(respRaw)\n\t\tcase wsOrder:\n\t\t\treturn o.wsProcessOrders(respRaw)\n\t\tcase wsOrdersAlgo:\n\t\t\treturn o.wsProcessAlgoOrder(respRaw)\n\t\tcase wsAlgoAdvance:\n\t\t\treturn o.wsProcessAdvancedAlgoOrder(respRaw)\n\t\t}\n\t\to.Websocket.DataHandler <- stream.UnhandledMessageWarning{\n\t\t\tMessage: o.Name + stream.UnhandledMessage + string(respRaw),\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar errorResponse WebsocketErrorResponse\n\terr = json.Unmarshal(respRaw, &errorResponse)\n\tif err == nil && errorResponse.ErrorCode > 0 {\n\t\treturn fmt.Errorf(\"%v error - %v message: %s \",\n\t\t\to.Name,\n\t\t\terrorResponse.ErrorCode,\n\t\t\terrorResponse.Message)\n\t}\n\tvar eventResponse WebsocketEventResponse\n\terr = json.Unmarshal(respRaw, &eventResponse)\n\tif err == nil && eventResponse.Event != \"\" {\n\t\tswitch eventResponse.Event {\n\t\tcase \"login\":\n\t\t\tif o.Websocket.Match.IncomingWithData(\"login\", respRaw) {\n\t\t\t\to.Websocket.SetCanUseAuthenticatedEndpoints(eventResponse.Code == \"0\")\n\t\t\t}\n\t\tcase \"subscribe\", \"unsubscribe\":\n\t\t\to.Websocket.DataHandler <- eventResponse\n\t\tcase \"error\":\n\t\t\tif o.Verbose {\n\t\t\t\tlog.Debugf(log.ExchangeSys,\n\t\t\t\t\to.Name+\" - \"+eventResponse.Event+\" on channel: \"+eventResponse.Channel)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func handlePostMessage(w http.ResponseWriter, r *http.Request) {\n\t//TODO do some sanity checks on the body or message passed in.\n\t//TODO use this as part of validation if r.Header.Get(\"Content-Type\") == \"application/x-www-form-urlencoded\" {\n\tfmt.Printf(\"Got input new method.\\n\")\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmessage := string(body)\n\tfmt.Printf(\"Message sent in \" + message + \"\\n\")\n\t//Add message to the map....\n\tvar messageid string = addToMessageMap(message)\n\tfmt.Printf(\"Message ID \" + messageid + \"\\n\")\n\n\t//return json object with message id\n\n\tmis := messageIdStruct{messageid, message}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tif err := json.NewEncoder(w).Encode(mis); err != nil {\n\t\tpanic(err)\n\t}\n}", "func handlerFunc(w http.ResponseWriter, r *http.Request) {\n\n\t// //get the string from the input box\n\t//userSent := r.Header.Get(\"userAskEliza\")\n\t userSent := r.URL.Query().Get(\"value\")\n\n\t//send the answer to the user\n\tfmt.Fprintf(w, \"\\n%s\\n\", util.ReplyQuestion(userSent))\n\n}", "func TestBasicMethodChannelStringCodecHandle(t *testing.T) {\n\tcodec := StringCodec{}\n\tmessenger := NewTestingBinaryMessenger()\n\tchannel := NewBasicMessageChannel(messenger, \"ch\", codec)\n\tchannel.HandleFunc(func(message interface{}) (reply interface{}, err error) {\n\t\tmessageString, ok := message.(string)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"message is invalid type, expected string\")\n\t\t}\n\t\treply = messageString + \" world\"\n\t\treturn reply, nil\n\t})\n\tencodedMessage, err := codec.EncodeMessage(\"hello\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to encode message: %v\", err)\n\t}\n\tencodedReply, err := messenger.MockSend(\"ch\", encodedMessage)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treply, err := codec.DecodeMessage(encodedReply)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to decode reply: %v\", err)\n\t}\n\tt.Log(spew.Sdump(reply))\n\treplyString, ok := reply.(string)\n\tif !ok {\n\t\tt.Fatal(\"reply is invalid type, expected string\")\n\t}\n\tEqual(t, \"hello world\", replyString)\n}", "func handleGetData(request []byte, bc *Blockchain) {\n\tvar buff bytes.Buffer\n\tvar payload getdata\n\n\tbuff.Write(request[commandLength:])\n\tdec := gob.NewDecoder(&buff)\n\terr := dec.Decode(&payload)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif payload.Type == \"block\" {\n\t\tblock, err := bc.GetBlock([]byte(payload.ID))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tsendBlock(payload.AddrFrom, &block)\n\t}\n\n\tif payload.Type == \"tx\" {\n\t\ttxID := hex.EncodeToString(payload.ID)\n\t\ttx := mempool[txID]\n\n\t\tsendTx(payload.AddrFrom, &tx)\n\t\t// delete(mempool, txID)\n\t}\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"%s: start\", serviceName)\n\tdefer log.Printf(\"%s: stop\", serviceName)\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\t defer r.Body.Close()\n\n\n\tlog.Printf(\"%s: input data %s\", serviceName, body)\n\tstrings := inputString{}\n\terr = json.Unmarshal(body, &strings)\n\tif err != nil {\n\t\tlogError(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\toutputData, err := palindrome2.FindSubPalindromes(strings.InputString)\n\tif err != nil {\n\t\tlogError(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tlog.Printf(\"%s: output data %v\", serviceName, outputData)\n\n\toutputStruct := outputStrings{\n\t\tSubPalindromes: outputData,\n\t}\n\n\toutputJSON, err := json.Marshal(outputStruct)\n\tif err != nil {\n\t\tlogError(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\t_, err = w.Write(outputJSON)\n\tif err != nil {\n\t\tlogError(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func handleData(pc net.PacketConn, addr net.Addr, pd *PacketData, connectionSvc *ConnectionService) {\n\tconnectionSvc.writeData(addr.String(), pd.BlockNum, pd.Data)\n\tackPacket := &PacketAck{pd.BlockNum}\n\tsendResponse(pc, addr, ackPacket)\n}", "func HandlerClientInfoSend(responseWriter http.ResponseWriter, request *http.Request) {\n\trequest.ParseForm()\n\n\tbody := request.Form\n\tlog.Printf(\"aRequest.Form=%s\", body)\n\tbytesBody, err := ioutil.ReadAll(request.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading body, err=%s\", err.Error())\n\t}\n\t//\tlog.Printf(\"bytesBody=%s\", string(bytesBody))\n\n\tclientInfo := new(objects.ClientInfo)\n\tjson.Unmarshal(bytesBody, clientInfo)\n\tlog.Printf(\"HandlerClientInfo, clientInfo.ApiKey=%s, clientInfo.ClientId=%s, clientInfo.Name=%s, clientInfo.Manufacturer=%s, clientInfo.Model=%s, clientInfo.DeviceId=%s\",\n\t\tclientInfo.ApiKey, clientInfo.ClientId, clientInfo.Name, clientInfo.Manufacturer, clientInfo.Model, clientInfo.DeviceId)\n\tvar isApiKeyValid = false\n\tif clientInfo.ApiKey != STR_EMPTY {\n\t\tisApiKeyValid, _ = IsApiKeyValid(clientInfo.ApiKey)\n\t}\n\tif !isApiKeyValid {\n\t\tresult := new(objects.Result)\n\t\tresult.ErrorMessage = STR_MSG_invalidapikey\n\t\tresult.ResultCode = http.StatusOK\n\t\tServeResult(responseWriter, result, STR_template_result)\n\t\treturn\n\t}\n\n\terrorAdd := DbAddClientInfo(clientInfo.ApiKey, clientInfo.ClientId, clientInfo.Name, clientInfo.Manufacturer,\n\t\tclientInfo.Model, clientInfo.DeviceId, nil)\n\tif errorAdd != nil {\n\t\tlog.Printf(\"HandlerClientInfo, errorAdd=%s\", errorAdd.Error())\n\t\tServeError(responseWriter, errorAdd.Error(), STR_template_page_error_html)\n\t}\n}", "func handleSend(conn *userConn, content []byte) {\n\tvar msg cliproto_up.Send\n\tif err := proto.Unmarshal(content, &msg); err != nil {\n\t\tconn.conn.Close()\n\t\treturn\n\t}\n\n\tsessionsLock.Lock()\n\n\t// Authentication check.\n\tif conn.session == 0 {\n\t\tconn.conn.Close()\n\t\tsessionsLock.Unlock()\n\t\treturn\n\t}\n\n\tsessionsLock.Unlock()\n\n\tuserMsg := new(relay.UserMessage)\n\tuserMsg.Sender = conn.session\n\tuserMsg.Recipient = *msg.Recipient\n\tuserMsg.Tag = *msg.Tag\n\tuserMsg.Content = *msg.Content\n\tuserMsg.Ttl = 3\n\n\t// Deliver this message.\n\tdeliver(userMsg)\n}", "func (_obj *Apilangpack) Dispatch(tarsCtx context.Context, _val interface{}, tarsReq *requestf.RequestPacket, tarsResp *requestf.ResponsePacket, _withContext bool) (err error) {\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_is := codec.NewReader(tools.Int8ToByte(tarsReq.SBuffer))\n\t_os := codec.NewBuffer()\n\tswitch tarsReq.SFuncName {\n\tcase \"langpack_getStrings\":\n\t\tvar params TLlangpack_getStrings\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\n\t\t\terr = params.ReadBlock(_is, 1, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_reqTup_ := tup.NewUniAttribute()\n\t\t\t_reqTup_.Decode(_is)\n\n\t\t\tvar _tupBuffer_ []byte\n\n\t\t\t_reqTup_.GetBuffer(\"params\", &_tupBuffer_)\n\t\t\t_is.Reset(_tupBuffer_)\n\t\t\terr = params.ReadBlock(_is, 0, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\tvar _jsonDat_ map[string]interface{}\n\t\t\terr = json.Unmarshal(_is.ToBytes(), &_jsonDat_)\n\t\t\t{\n\t\t\t\t_jsonStr_, _ := json.Marshal(_jsonDat_[\"params\"])\n\t\t\t\tif err = json.Unmarshal([]byte(_jsonStr_), &params); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Decode reqpacket fail, error version:\", tarsReq.IVersion)\n\t\t\treturn err\n\t\t}\n\n\t\tvar _funRet_ []LangPackString\n\t\tif _withContext == false {\n\t\t\t_imp := _val.(_impApilangpack)\n\t\t\t_funRet_, err = _imp.Langpack_getStrings(&params)\n\t\t} else {\n\t\t\t_imp := _val.(_impApilangpackWithContext)\n\t\t\t_funRet_, err = _imp.Langpack_getStrings(tarsCtx, &params)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\t\t\t_os.Reset()\n\n\t\t\terr = _os.WriteHead(codec.LIST, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = _os.Write_int32(int32(len(_funRet_)), 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, v := range _funRet_ {\n\n\t\t\t\terr = v.WriteBlock(_os, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_tupRsp_ := tup.NewUniAttribute()\n\n\t\t\terr = _os.WriteHead(codec.LIST, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = _os.Write_int32(int32(len(_funRet_)), 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, v := range _funRet_ {\n\n\t\t\t\terr = v.WriteBlock(_os, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t_tupRsp_.PutBuffer(\"\", _os.ToBytes())\n\t\t\t_tupRsp_.PutBuffer(\"tars_ret\", _os.ToBytes())\n\n\t\t\t_os.Reset()\n\t\t\terr = _tupRsp_.Encode(_os)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\t_rspJson_ := map[string]interface{}{}\n\t\t\t_rspJson_[\"tars_ret\"] = _funRet_\n\n\t\t\tvar _rspByte_ []byte\n\t\t\tif _rspByte_, err = json.Marshal(_rspJson_); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_os.Reset()\n\t\t\terr = _os.Write_slice_uint8(_rspByte_)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase \"langpack_getLanguage\":\n\t\tvar params TLlangpack_getLanguage\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\n\t\t\terr = params.ReadBlock(_is, 1, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_reqTup_ := tup.NewUniAttribute()\n\t\t\t_reqTup_.Decode(_is)\n\n\t\t\tvar _tupBuffer_ []byte\n\n\t\t\t_reqTup_.GetBuffer(\"params\", &_tupBuffer_)\n\t\t\t_is.Reset(_tupBuffer_)\n\t\t\terr = params.ReadBlock(_is, 0, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\tvar _jsonDat_ map[string]interface{}\n\t\t\terr = json.Unmarshal(_is.ToBytes(), &_jsonDat_)\n\t\t\t{\n\t\t\t\t_jsonStr_, _ := json.Marshal(_jsonDat_[\"params\"])\n\t\t\t\tif err = json.Unmarshal([]byte(_jsonStr_), &params); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Decode reqpacket fail, error version:\", tarsReq.IVersion)\n\t\t\treturn err\n\t\t}\n\n\t\tvar _funRet_ LangPackLanguage\n\t\tif _withContext == false {\n\t\t\t_imp := _val.(_impApilangpack)\n\t\t\t_funRet_, err = _imp.Langpack_getLanguage(&params)\n\t\t} else {\n\t\t\t_imp := _val.(_impApilangpackWithContext)\n\t\t\t_funRet_, err = _imp.Langpack_getLanguage(tarsCtx, &params)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\t\t\t_os.Reset()\n\n\t\t\terr = _funRet_.WriteBlock(_os, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_tupRsp_ := tup.NewUniAttribute()\n\n\t\t\terr = _funRet_.WriteBlock(_os, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_tupRsp_.PutBuffer(\"\", _os.ToBytes())\n\t\t\t_tupRsp_.PutBuffer(\"tars_ret\", _os.ToBytes())\n\n\t\t\t_os.Reset()\n\t\t\terr = _tupRsp_.Encode(_os)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\t_rspJson_ := map[string]interface{}{}\n\t\t\t_rspJson_[\"tars_ret\"] = _funRet_\n\n\t\t\tvar _rspByte_ []byte\n\t\t\tif _rspByte_, err = json.Marshal(_rspJson_); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_os.Reset()\n\t\t\terr = _os.Write_slice_uint8(_rspByte_)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase \"langpack_getLangPack\":\n\t\tvar params TLlangpack_getLangPack\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\n\t\t\terr = params.ReadBlock(_is, 1, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_reqTup_ := tup.NewUniAttribute()\n\t\t\t_reqTup_.Decode(_is)\n\n\t\t\tvar _tupBuffer_ []byte\n\n\t\t\t_reqTup_.GetBuffer(\"params\", &_tupBuffer_)\n\t\t\t_is.Reset(_tupBuffer_)\n\t\t\terr = params.ReadBlock(_is, 0, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\tvar _jsonDat_ map[string]interface{}\n\t\t\terr = json.Unmarshal(_is.ToBytes(), &_jsonDat_)\n\t\t\t{\n\t\t\t\t_jsonStr_, _ := json.Marshal(_jsonDat_[\"params\"])\n\t\t\t\tif err = json.Unmarshal([]byte(_jsonStr_), &params); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Decode reqpacket fail, error version:\", tarsReq.IVersion)\n\t\t\treturn err\n\t\t}\n\n\t\tvar _funRet_ LangPackDifference\n\t\tif _withContext == false {\n\t\t\t_imp := _val.(_impApilangpack)\n\t\t\t_funRet_, err = _imp.Langpack_getLangPack(&params)\n\t\t} else {\n\t\t\t_imp := _val.(_impApilangpackWithContext)\n\t\t\t_funRet_, err = _imp.Langpack_getLangPack(tarsCtx, &params)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\t\t\t_os.Reset()\n\n\t\t\terr = _funRet_.WriteBlock(_os, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_tupRsp_ := tup.NewUniAttribute()\n\n\t\t\terr = _funRet_.WriteBlock(_os, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_tupRsp_.PutBuffer(\"\", _os.ToBytes())\n\t\t\t_tupRsp_.PutBuffer(\"tars_ret\", _os.ToBytes())\n\n\t\t\t_os.Reset()\n\t\t\terr = _tupRsp_.Encode(_os)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\t_rspJson_ := map[string]interface{}{}\n\t\t\t_rspJson_[\"tars_ret\"] = _funRet_\n\n\t\t\tvar _rspByte_ []byte\n\t\t\tif _rspByte_, err = json.Marshal(_rspJson_); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_os.Reset()\n\t\t\terr = _os.Write_slice_uint8(_rspByte_)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase \"langpack_getDifference\":\n\t\tvar params TLlangpack_getDifference\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\n\t\t\terr = params.ReadBlock(_is, 1, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_reqTup_ := tup.NewUniAttribute()\n\t\t\t_reqTup_.Decode(_is)\n\n\t\t\tvar _tupBuffer_ []byte\n\n\t\t\t_reqTup_.GetBuffer(\"params\", &_tupBuffer_)\n\t\t\t_is.Reset(_tupBuffer_)\n\t\t\terr = params.ReadBlock(_is, 0, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\tvar _jsonDat_ map[string]interface{}\n\t\t\terr = json.Unmarshal(_is.ToBytes(), &_jsonDat_)\n\t\t\t{\n\t\t\t\t_jsonStr_, _ := json.Marshal(_jsonDat_[\"params\"])\n\t\t\t\tif err = json.Unmarshal([]byte(_jsonStr_), &params); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Decode reqpacket fail, error version:\", tarsReq.IVersion)\n\t\t\treturn err\n\t\t}\n\n\t\tvar _funRet_ LangPackDifference\n\t\tif _withContext == false {\n\t\t\t_imp := _val.(_impApilangpack)\n\t\t\t_funRet_, err = _imp.Langpack_getDifference(&params)\n\t\t} else {\n\t\t\t_imp := _val.(_impApilangpackWithContext)\n\t\t\t_funRet_, err = _imp.Langpack_getDifference(tarsCtx, &params)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\t\t\t_os.Reset()\n\n\t\t\terr = _funRet_.WriteBlock(_os, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_tupRsp_ := tup.NewUniAttribute()\n\n\t\t\terr = _funRet_.WriteBlock(_os, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_tupRsp_.PutBuffer(\"\", _os.ToBytes())\n\t\t\t_tupRsp_.PutBuffer(\"tars_ret\", _os.ToBytes())\n\n\t\t\t_os.Reset()\n\t\t\terr = _tupRsp_.Encode(_os)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\t_rspJson_ := map[string]interface{}{}\n\t\t\t_rspJson_[\"tars_ret\"] = _funRet_\n\n\t\t\tvar _rspByte_ []byte\n\t\t\tif _rspByte_, err = json.Marshal(_rspJson_); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_os.Reset()\n\t\t\terr = _os.Write_slice_uint8(_rspByte_)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase \"langpack_getLanguages\":\n\t\tvar params TLlangpack_getLanguages\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\n\t\t\terr = params.ReadBlock(_is, 1, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_reqTup_ := tup.NewUniAttribute()\n\t\t\t_reqTup_.Decode(_is)\n\n\t\t\tvar _tupBuffer_ []byte\n\n\t\t\t_reqTup_.GetBuffer(\"params\", &_tupBuffer_)\n\t\t\t_is.Reset(_tupBuffer_)\n\t\t\terr = params.ReadBlock(_is, 0, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\tvar _jsonDat_ map[string]interface{}\n\t\t\terr = json.Unmarshal(_is.ToBytes(), &_jsonDat_)\n\t\t\t{\n\t\t\t\t_jsonStr_, _ := json.Marshal(_jsonDat_[\"params\"])\n\t\t\t\tif err = json.Unmarshal([]byte(_jsonStr_), &params); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Decode reqpacket fail, error version:\", tarsReq.IVersion)\n\t\t\treturn err\n\t\t}\n\n\t\tvar _funRet_ []LangPackLanguage\n\t\tif _withContext == false {\n\t\t\t_imp := _val.(_impApilangpack)\n\t\t\t_funRet_, err = _imp.Langpack_getLanguages(&params)\n\t\t} else {\n\t\t\t_imp := _val.(_impApilangpackWithContext)\n\t\t\t_funRet_, err = _imp.Langpack_getLanguages(tarsCtx, &params)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tarsReq.IVersion == basef.TARSVERSION {\n\t\t\t_os.Reset()\n\n\t\t\terr = _os.WriteHead(codec.LIST, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = _os.Write_int32(int32(len(_funRet_)), 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, v := range _funRet_ {\n\n\t\t\t\terr = v.WriteBlock(_os, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t} else if tarsReq.IVersion == basef.TUPVERSION {\n\t\t\t_tupRsp_ := tup.NewUniAttribute()\n\n\t\t\terr = _os.WriteHead(codec.LIST, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = _os.Write_int32(int32(len(_funRet_)), 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, v := range _funRet_ {\n\n\t\t\t\terr = v.WriteBlock(_os, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t_tupRsp_.PutBuffer(\"\", _os.ToBytes())\n\t\t\t_tupRsp_.PutBuffer(\"tars_ret\", _os.ToBytes())\n\n\t\t\t_os.Reset()\n\t\t\terr = _tupRsp_.Encode(_os)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if tarsReq.IVersion == basef.JSONVERSION {\n\t\t\t_rspJson_ := map[string]interface{}{}\n\t\t\t_rspJson_[\"tars_ret\"] = _funRet_\n\n\t\t\tvar _rspByte_ []byte\n\t\t\tif _rspByte_, err = json.Marshal(_rspJson_); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_os.Reset()\n\t\t\terr = _os.Write_slice_uint8(_rspByte_)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"func mismatch\")\n\t}\n\tvar _status map[string]string\n\ts, ok := current.GetResponseStatus(tarsCtx)\n\tif ok && s != nil {\n\t\t_status = s\n\t}\n\tvar _context map[string]string\n\tc, ok := current.GetResponseContext(tarsCtx)\n\tif ok && c != nil {\n\t\t_context = c\n\t}\n\t*tarsResp = requestf.ResponsePacket{\n\t\tIVersion: tarsReq.IVersion,\n\t\tCPacketType: 0,\n\t\tIRequestId: tarsReq.IRequestId,\n\t\tIMessageType: 0,\n\t\tIRet: 0,\n\t\tSBuffer: tools.ByteToInt8(_os.ToBytes()),\n\t\tStatus: _status,\n\t\tSResultDesc: \"\",\n\t\tContext: _context,\n\t}\n\n\t_ = _is\n\t_ = _os\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func Handle(req []byte) string {\n\tcurrentTweet := tweet{}\n\n\tif err := json.Unmarshal(req, &currentTweet); err != nil {\n\t\treturn fmt.Sprintf(\"Unable to unmarshal event: %s\", err.Error())\n\t}\n\n\tif strings.Contains(currentTweet.Text, \"RT\") ||\n\t\tcurrentTweet.Text == \"alexellisuk_bot\" ||\n\t\tcurrentTweet.Username == \"colorisebot\" ||\n\t\tcurrentTweet.Username == \"scmsFaAS\" ||\n\t\tcurrentTweet.Username == \"openfaas\" {\n\t\treturn \"filtered the tweet out\"\n\t}\n\n\tdiscordURL := readSecret(\"twitter-discord-webhook-url\")\n\tdiscordMsg := discordMessage{\n\t\tContent: \"@\" + currentTweet.Username + \": \" + currentTweet.Text + \" (via \" + currentTweet.Link + \")\",\n\t\tUsername: \"@\" + currentTweet.Username,\n\t}\n\n\tbodyBytes, _ := json.Marshal(discordMsg)\n\thttpReq, err := http.NewRequest(http.MethodPost, discordURL, bytes.NewReader(bodyBytes))\n\tif err != nil {\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"resErr: %s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\thttpReq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tres, err := http.DefaultClient.Do(httpReq)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"resErr: %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tbodyRes, _ := ioutil.ReadAll(res.Body)\n\n\tif res.StatusCode != http.StatusAccepted &&\n\t\tres.StatusCode != http.StatusOK &&\n\t\tres.StatusCode != http.StatusNoContent {\n\t\tfmt.Fprintf(os.Stderr, \"unexpected status code: %d, body: %s\", res.StatusCode, string(bodyRes))\n\t\tos.Exit(1)\n\t}\n\n\treturn fmt.Sprintf(\"tweet forwarded [%d]\", res.StatusCode)\n}", "func HandleRpcs(cmd PB_CommandToServer, params RPC_UserParam, rpcHandler RPC_AllHandlersInteract, responseHandler RPC_ResponseHandlerInterface) {\n\n\tsplits := strings.Split(cmd.Command, \".\")\n\n\tif len(splits) != 2 {\n\t\tnoDevErr(errors.New(\"HandleRpcs: splic is not 2 parts\"))\n\t\treturn\n\t}\n\n\tswitch splits[0] {\n\n\tcase \"RPC_Auth\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Auth\n\t\trpc := rpcHandler.RPC_Auth\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Auth\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"CheckPhone\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.CheckPhone(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.CheckPhone\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.CheckPhone\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCode\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCode(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCode\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCode\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCodeToSms\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCodeToSms(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCodeToSms\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCodeToSms\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCodeToTelgram\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCodeToTelgram(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCodeToTelgram\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCodeToTelgram\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SingUp\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SingUp(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SingUp\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SingUp\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SingIn\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SingIn(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SingIn\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SingIn\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"LogOut\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.LogOut(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.LogOut\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.LogOut\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Chat\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Chat\n\t\trpc := rpcHandler.RPC_Chat\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Chat\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"AddNewMessage\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_AddNewMessage{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.AddNewMessage(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.AddNewMessage\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_AddNewMessage\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_AddNewMessage\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_AddNewMessage\",\"RPC_Chat.AddNewMessage\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetRoomActionDoing\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetRoomActionDoing{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetRoomActionDoing(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetRoomActionDoing\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetRoomActionDoing\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetRoomActionDoing\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetRoomActionDoing\",\"RPC_Chat.SetRoomActionDoing\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetMessagesRangeAsSeen\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetChatMessagesRangeAsSeen{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetMessagesRangeAsSeen(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetMessagesRangeAsSeen\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetChatMessagesRangeAsSeen\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetChatMessagesRangeAsSeen\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetChatMessagesRangeAsSeen\",\"RPC_Chat.SetMessagesRangeAsSeen\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"DeleteChatHistory\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_DeleteChatHistory{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.DeleteChatHistory(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.DeleteChatHistory\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_DeleteChatHistory\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteChatHistory\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteChatHistory\",\"RPC_Chat.DeleteChatHistory\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"DeleteMessagesByIds\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_DeleteMessagesByIds{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.DeleteMessagesByIds(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.DeleteMessagesByIds\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_DeleteMessagesByIds\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteMessagesByIds\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteMessagesByIds\",\"RPC_Chat.DeleteMessagesByIds\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetMessagesAsReceived\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetMessagesAsReceived{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetMessagesAsReceived(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetMessagesAsReceived\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetMessagesAsReceived\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetMessagesAsReceived\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetMessagesAsReceived\",\"RPC_Chat.SetMessagesAsReceived\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"EditMessage\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_EditMessage{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.EditMessage(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.EditMessage\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_EditMessage\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_EditMessage\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_EditMessage\",\"RPC_Chat.EditMessage\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetChatList\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetChatList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetChatList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetChatList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetChatList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatList\",\"RPC_Chat.GetChatList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetChatHistoryToOlder\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetChatHistoryToOlder{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetChatHistoryToOlder(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetChatHistoryToOlder\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetChatHistoryToOlder\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatHistoryToOlder\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatHistoryToOlder\",\"RPC_Chat.GetChatHistoryToOlder\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetFreshAllDirectMessagesList\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetFreshAllDirectMessagesList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetFreshAllDirectMessagesList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetFreshAllDirectMessagesList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetFreshAllDirectMessagesList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetFreshAllDirectMessagesList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetFreshAllDirectMessagesList\",\"RPC_Chat.GetFreshAllDirectMessagesList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Other\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Other\n\t\trpc := rpcHandler.RPC_Other\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Other\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"Echo\": //each pb_service_method\n\t\t\tload := &PB_OtherParam_Echo{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.Echo(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Other.Echo\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_OtherResponse_Echo\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_OtherResponse_Echo\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_OtherResponse_Echo\",\"RPC_Other.Echo\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Sync\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Sync\n\t\trpc := rpcHandler.RPC_Sync\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Sync\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"GetGeneralUpdates\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_GetGeneralUpdates{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetGeneralUpdates(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.GetGeneralUpdates\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_GetGeneralUpdates\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetGeneralUpdates\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetGeneralUpdates\",\"RPC_Sync.GetGeneralUpdates\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetNotifyUpdates\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_GetNotifyUpdates{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetNotifyUpdates(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.GetNotifyUpdates\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_GetNotifyUpdates\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetNotifyUpdates\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetNotifyUpdates\",\"RPC_Sync.GetNotifyUpdates\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncDirectUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncDirectUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncDirectUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncDirectUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncDirectUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncDirectUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncDirectUpdateId\",\"RPC_Sync.SetLastSyncDirectUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncGeneralUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncGeneralUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncGeneralUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncGeneralUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncGeneralUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncGeneralUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncGeneralUpdateId\",\"RPC_Sync.SetLastSyncGeneralUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncNotifyUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncNotifyUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncNotifyUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncNotifyUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncNotifyUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncNotifyUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncNotifyUpdateId\",\"RPC_Sync.SetLastSyncNotifyUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_UserOffline\":\n\n\t\t//rpc,ok := rpcHandler.RPC_UserOffline\n\t\trpc := rpcHandler.RPC_UserOffline\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_UserOffline\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"BlockUser\": //each pb_service_method\n\t\t\tload := &PB_UserParam_BlockUser{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.BlockUser(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.BlockUser\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_BlockUser\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_BlockUser\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_BlockUser\",\"RPC_UserOffline.BlockUser\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UnBlockUser\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UnBlockUser{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UnBlockUser(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UnBlockUser\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UnBlockUser\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UnBlockUser\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UnBlockUser\",\"RPC_UserOffline.UnBlockUser\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UpdateAbout\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UpdateAbout{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UpdateAbout(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UpdateAbout\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UpdateAbout\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateAbout\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateAbout\",\"RPC_UserOffline.UpdateAbout\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UpdateUserName\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UpdateUserName{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UpdateUserName(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UpdateUserName\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UpdateUserName\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateUserName\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateUserName\",\"RPC_UserOffline.UpdateUserName\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"ChangePrivacy\": //each pb_service_method\n\t\t\tload := &PB_UserParam_ChangePrivacy{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.ChangePrivacy(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.ChangePrivacy\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponseOffline_ChangePrivacy\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponseOffline_ChangePrivacy\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponseOffline_ChangePrivacy\",\"RPC_UserOffline.ChangePrivacy\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"ChangeAvatar\": //each pb_service_method\n\t\t\tload := &PB_UserParam_ChangeAvatar{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.ChangeAvatar(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.ChangeAvatar\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_ChangeAvatar\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_ChangeAvatar\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_ChangeAvatar\",\"RPC_UserOffline.ChangeAvatar\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_User\":\n\n\t\t//rpc,ok := rpcHandler.RPC_User\n\t\trpc := rpcHandler.RPC_User\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_User\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"CheckUserName\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.CheckUserName(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_User.CheckUserName\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName\",\"RPC_User.CheckUserName\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetBlockedList\": //each pb_service_method\n\t\t\tload := &PB_UserParam_BlockedList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetBlockedList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_User.GetBlockedList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_BlockedList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_BlockedList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_BlockedList\",\"RPC_User.GetBlockedList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tdefault:\n\t\tnoDevErr(errors.New(\"rpc dosent exisit for: \" + cmd.Command))\n\t}\n}", "func Handler(res http.ResponseWriter, req *http.Request) {\n\t// First, decode the JSON response body\n\tbody := &models.TelegramRequest{}\n\tif err := json.NewDecoder(req.Body).Decode(body); err != nil {\n\t\tfmt.Println(\"could not decode request body\", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Received text :- \" + body.Message.Text + \" , from :- \" + body.Message.From.FirstName)\n\n\tif val, err := strconv.Atoi(body.Message.Text); err == nil {\n\t\tdataToSend := \"\"\n\t\tif len(body.Message.Text) == 6 {\n\t\t\tdataToSend = cowin.FetchDataByPinCode(val)\n\t\t} else if len(body.Message.Text) <= 3 {\n\t\t\tdataToSend = cowin.FetchDataByDistrictId(val)\n\t\t} else {\n\t\t\tsendMessage.SendTelegramUsingWebhook(body.Message.Chat.Id,\n\t\t\t\t\"Please enter valid pincode or district id\")\n\t\t\treturn\n\t\t}\n\t\tif dataToSend == \"\" {\n\t\t\tfmt.Println(\"Error empty string, so not sending data\")\n\t\t\tsendMessage.SendTelegramUsingWebhook(body.Message.Chat.Id,\n\t\t\t\t\"Unable to fetch data, please try again after sometime\")\n\t\t} else {\n\t\t\t//fmt.Println(dataToSend)\n\t\t\tsendMessage.SendTelegramUsingWebhook(body.Message.Chat.Id, dataToSend)\n\t\t\tfmt.Println(\"Data sent\")\n\t\t}\n\t} else {\n\t\tsendMessage.SendTelegramUsingWebhook(body.Message.Chat.Id,\n\t\t\t\"Please enter either valid pincode or district id\")\n\t}\n}", "func DoStringWithPayload(L *lua.LState) int {\n\tbody := L.CheckString(1)\n\tpayload := L.CheckString(2)\n\tp := NewLuaPlugin(L, 2)\n\tp.body = &body\n\tp.jobPayload = &payload\n\tud := L.NewUserData()\n\tud.Value = p\n\tL.SetMetatable(ud, L.GetTypeMetatable(`plugin_ud`))\n\tL.Push(ud)\n\treturn 1\n}", "func helloHandler() string { return \"Hello\" }", "func HandleData(ct string, data []byte) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", ct)\n\t\t_, _ = w.Write(data)\n\t}\n}", "func (client *Client) Do(funcname string, data []byte, flag byte) (handle string, err error) {\n var datatype uint32\n if flag & JOB_LOW == JOB_LOW {\n if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_LOW_BG\n } else {\n datatype = common.SUBMIT_JOB_LOW\n }\n } else if flag & JOB_HIGH == JOB_HIGH {\n if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_HIGH_BG\n } else {\n datatype = common.SUBMIT_JOB_HIGH\n }\n } else if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_BG\n } else {\n datatype = common.SUBMIT_JOB\n }\n\n uid := strconv.Itoa(int(client.ai.Id()))\n l := len(funcname) + len(uid) + len(data) + 2\n rel := make([]byte, 0, l)\n rel = append(rel, []byte(funcname)...) // len(funcname)\n rel = append(rel, '\\x00') // 1 Byte\n rel = append(rel, []byte(uid)...) // len(uid)\n rel = append(rel, '\\x00') // 1 Byte\n rel = append(rel, data...) // len(data)\n client.writeJob(newJob(common.REQ, datatype, rel))\n // Waiting for JOB_CREATED\n select {\n case job := <-client.jobCreated:\n return string(job.Data), nil\n case <-time.After(client.TimeOut):\n return \"\", common.ErrJobTimeOut\n }\n return\n}", "func HandleMessage(msg *WeechatMessage, handler HandleWeechatMessage) error {\n\t// Got an empty message, simply don't process it for now. We can figure\n\t// out how to handle this.\n\tif msg == nil {\n\t\tfmt.Printf(\"Got Nil message to handle.\\n\")\n\t\treturn nil\n\t}\n\tswitch msg.Msgid {\n\tcase \"listbuffers\", \"_buffer_opened\":\n\t\t// parse out the list of buffers which are Hda objects.\n\t\tbufffers := msg.Object.Value.(WeechatHdaValue)\n\t\tbuflist := make(map[string]*WeechatBuffer, len(bufffers.Value))\n\n\t\tfor _, each := range bufffers.Value {\n\t\t\tbuf := &WeechatBuffer{\n\t\t\t\tShortName: each[\"short_name\"].Value.(string),\n\t\t\t\tFullName: each[\"full_name\"].Value.(string),\n\t\t\t\tTitle: each[\"title\"].Value.(string),\n\t\t\t\tNumber: each[\"number\"].Value.(int32),\n\t\t\t\tLocalVars: each[\"local_variables\"].Value.(map[WeechatObject]WeechatObject),\n\t\t\t\tLines: make([]*WeechatLine, 0),\n\t\t\t\t// this is essentially a list of strings, pointers,\n\t\t\t\t// the first pointer of which is the buffer' pointer.\n\t\t\t\tPath: each[\"__path\"].Value.([]string)[1],\n\t\t\t}\n\t\t\tbuflist[buf.Path] = buf\n\t\t}\n\n\t\thandler.HandleListBuffers(buflist)\n\n\tcase \"_buffer_line_added\":\n\t\tfor _, each := range msg.Object.Value.(WeechatHdaValue).Value {\n\t\t\taddLine(handler, each)\n\t\t}\n\tcase \"listlines\":\n\t\tlines := msg.Object.Value.(WeechatHdaValue).Value\n\t\tfor i := len(lines) - 1; i >= 0; i-- {\n\t\t\taddLine(handler, lines[i])\n\t\t}\n\tcase \"nicklist\", \"_nicklist\":\n\t\t// handle list of nicks.\n\t\tvar nicks []*WeechatNick\n\t\tnickValues := msg.Object.Value.(WeechatHdaValue).Value\n\t\tvar buffer = \"default\"\n\t\tfor _, val := range nickValues {\n\n\t\t\titem := &WeechatNick{\n\t\t\t\tName: val[\"name\"].as_string(),\n\t\t\t\tColor: val[\"color\"].as_string(),\n\t\t\t\tLevel: val[\"level\"].as_int(),\n\t\t\t\tPrefix: val[\"prefix\"].as_string(),\n\t\t\t\tPrefixColor: val[\"prefix_color\"].as_string(),\n\t\t\t\tGroup: val[\"group\"].as_bool(),\n\t\t\t\tVisible: val[\"visible\"].as_bool(),\n\t\t\t}\n\n\t\t\tnicks = append(nicks, item)\n\t\t\tbuffer = val[\"__path\"].Value.([]string)[2]\n\t\t}\n\t\thandler.HandleNickList(buffer, nicks)\n\tcase \"error\":\n\t\thandler.Default(msg)\n\tdefault:\n\t\thandler.Default(msg)\n\t}\n\treturn nil\n}", "func Handle(req []byte) string {\n\n\tvar ip Input\n\n\terr := json.Unmarshal(req, &ip)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"Error: Failed to parse input : %v \\n %s\", err, help)\n\t}\n\n\tr, rerr := regexp.Compile(ip.Regex)\n\tif rerr != nil {\n\t\treturn fmt.Sprintf(\"Error: Failed to compile regex %s : %v\", ip.Regex, rerr)\n\t}\n\n\tvar op Output\n\top.Match = true\n\top.Matches = r.FindAllString(ip.Data, -1)\n\tif len(op.Matches) == 0 {\n\t\top.Match = false\n\t}\n\n\tmdata, merr := json.Marshal(op)\n\tif merr != nil {\n\t\treturn fmt.Sprintf(\"Error: Failed to marshal output %v : %v\", op, merr)\n\t}\n\treturn string(mdata)\n}", "func (s *server) handlePacket(p packet, c *conn) error {\n\tglog.Infof(\"handling packet type: %c, data: %s, upgraded: %t\", p.typ, p.data, c.upgraded())\n\tvar encode func(packet) error\n\tif c.upgraded() {\n\t\tencode = newPacketEncoder(c).encode\n\t} else {\n\t\tencode = func(pkt packet) error {\n\t\t\treturn newPayloadEncoder(c).encode([]packet{pkt})\n\t\t}\n\t}\n\tswitch p.typ {\n\tcase packetTypePing:\n\t\treturn encode(packet{typ: packetTypePong, data: p.data})\n\tcase packetTypeMessage:\n\t\tif c.pubConn != nil {\n\t\t\tc.pubConn.onMessage(p.data)\n\t\t}\n\tcase packetTypeClose:\n\t\tc.Close()\n\t}\n\treturn nil\n}", "func GenericHandler(response http.ResponseWriter, request *http.Request){\n\n\t// Set cookie and MIME type in the HTTP headers.\n\tSetMyCookie(response)\n\tresponse.Header().Set(\"Content-type\", \"text/plain\")\n\n\t// Parse URL and POST data into the request.Form\n\terr := request.ParseForm()\n\tif err != nil {\n\t\thttp.Error(response, fmt.Sprintf(\"error parsing url %v\", err), 500)\n\t}\n\n\t// Send the text diagnostics to the client.\n\tfmt.Fprint(response, \"FooWebHandler says ... \\n\")\n\tfmt.Fprintf(response, \" request.Method '%v'\\n\", request.Method)\n\tfmt.Fprintf(response, \" request.RequestURI '%v'\\n\", request.RequestURI)\n\tfmt.Fprintf(response, \" request.URL.Path '%v'\\n\", request.URL.Path)\n\tfmt.Fprintf(response, \" request.Form '%v'\\n\", request.Form)\n\tfmt.Fprintf(response, \" request.Cookies() '%v'\\n\", request.Cookies())\n}", "func handler(p gopacket.Packet) []*osc.Message {\n\n transport := p.TransportLayer()\n application := p.ApplicationLayer()\n\n var msgs []*osc.Message\n\n if application != nil {\n if application.LayerType() == layers.LayerTypeDNS {\n msg := osc.NewMessage(\"/dns\")\n msg.Append(\"hello\")\n msgs = append(msgs, msg)\n }\n }\n\n if transport != nil {\n if transport.LayerType() == layers.LayerTypeTCP {\n msg := osc.NewMessage(\"/tcp\")\n msg.Append(\"hello\")\n msgs = append(msgs, msg)\n } else if transport.LayerType() == layers.LayerTypeUDP {\n msg := osc.NewMessage(\"/udp\")\n msg.Append(\"hello\")\n msgs = append(msgs, msg)\n }\n }\n\n return msgs\n}", "func HandleList(words []string, orMes string) (string, error) {\r\n\tmessage := \"\"\r\n\tif len(words) < 2 {\r\n\t\treturn \"short\", nil\r\n\t}\r\n\tswitch words[1] {\r\n\tcase \"hops\":\r\n\t\tfor _, hop := range data.HopVarieties {\r\n\t\t\tline := fmt.Sprintf(\"%s | alpha=%.2f\\n\", hop.Name, hop.AlphaAcidUnits)\r\n\t\t\tmessage += line\r\n\t\t}\r\n\tcase \"styles\", \"style\":\r\n\t\tfor _, style := range data.Styles {\r\n\t\t\tline := fmt.Sprintf(\"%s\\n\", style.Name)\r\n\t\t\tmessage += line\r\n\t\t}\r\n\tcase \"fermentables\", \"ferm\":\r\n\t\tfor _, ferm := range data.Fermentables {\r\n\t\t\tline := fmt.Sprintf(\"%s | type=%s\\n\", ferm.Name, ferm.Type)\r\n\t\t\tmessage += line\r\n\t\t}\r\n\tcase \"yeast\", \"cultures\":\r\n\t\tfor _, yeast := range data.Cultures {\r\n\t\t\tline := fmt.Sprintf(\"%s | att=%.2f\\n\", yeast.Name, yeast.Attenuation)\r\n\t\t\tmessage += line\r\n\t\t}\r\n\t}\r\n\treturn message, nil\r\n}", "func t1Handler(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"ONE\\n\"))\n}", "func Handler(res http.ResponseWriter, req *http.Request) {\n\t// First, decode the JSON response body\n\tfullBody := &types.Update{}\n\n\tif err := json.NewDecoder(req.Body).Decode(fullBody); err != nil {\n\t\tfmt.Println(\"could not decode request full body\", err)\n\t\treturn\n\t}\n\tfmt.Println(\"PRINTING REQUEST FULL BODY\")\n\tfmt.Printf(\"%+v\\n\", fullBody)\n\tif fullBody.Message == nil {\n\t\treturn\n\t}\n\tfmt.Printf(\"%+v\\n\", fullBody.Message.Text)\n\t// Check if the message contains the word \"marco\"\n\t// if not, return without doing anything\n\tif strings.Contains(fullBody.Message.Text,\"testDB\"){\n\t\tdb.Add(fullBody.Message.Text)\n\t}\n\tif strings.Contains(fullBody.Message.Text,\"testTag\"){\n\t\terr := say(fullBody.Message.Chat,fullBody.Message.From, fmt.Sprintf(\"[inline mention of a user](tg://user?id=%d)\",fullBody.Message.From.ID))\n\t\tif err != nil {\n\t\t\tfmt.Println(\"error in sending reply:\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tif !strings.Contains(strings.ToLower(fullBody.Message.Text), \"marco\") {\n\t\treturn\n\t}\n\n\t// If the text contains marco, call the `sayPolo` function, which\n\t// is defined below\n\tif err := sayPolo(fullBody.Message.Chat.ID); err != nil {\n\t\tfmt.Println(\"error in sending reply:\", err)\n\t\treturn\n\t}\n\n\n\t// log a confirmation message if the message is sent successfully\n\tfmt.Println(\"reply sent\")\n}", "func MainHandler(resp http.ResponseWriter, _ *http.Request) {\r\n resp.Write([]byte(\"Hi there! I'm PoGoBot!\"))\r\n}", "func Handle(req []byte) string {\n\twebhook, err := ioutil.ReadFile(\"/var/openfaas/secrets/webhook-url\")\n\twebhookURL := string(webhook)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"Unable to read secret file\")\n\t}\n\n\tpayload := Payload{Text: string(req)}\n\treqPayload, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"Unable to marshal\")\n\t}\n\treader := bytes.NewReader(reqPayload)\n\n\tclient := http.Client{}\n\trequest, _ := http.NewRequest(http.MethodPost, webhookURL, reader)\n\t_, err = client.Do(request)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"Unable to send message\")\n\t}\n\n\treturn fmt.Sprintf(\"Message sent successfully\")\n}", "func handle(rw io.ReadWriter, y, x, p interface{}, todo <-chan int, dsts <-chan interface{}) error {\n\t// Read request.\n\treq := new(request)\n\tif err := json.NewDecoder(rw).Decode(req); err != nil {\n\t\treturn fmt.Errorf(\"receive request: %v\", err)\n\t}\n\n\tswitch req.Type {\n\tdefault:\n\t\t// Error occurred in protocol, not user code.\n\t\treturn fmt.Errorf(`unknown request type: \"%s\"`, req.Type)\n\n\tcase recvType:\n\t\ti := <-todo\n\t\txi := reflect.ValueOf(x).Index(i).Interface()\n\t\tresp := &inputResp{i, xi, p}\n\t\tif err := json.NewEncoder(rw).Encode(resp); err != nil {\n\t\t\treturn fmt.Errorf(\"send input: %v\", err)\n\t\t}\n\t\treturn nil\n\n\tcase sendType:\n\t\tbody := &outputReq{Y: <-dsts}\n\t\tif err := json.Unmarshal(req.Body, body); err != nil {\n\t\t\treturn fmt.Errorf(\"receive output: %v\", err)\n\t\t}\n\t\t// Send the error if one occurred, nil otherwise.\n\t\tif body.Err != nil {\n\t\t\treturn fmt.Errorf(\"slave error: %s\", *body.Err)\n\t\t}\n\t\t// Assign value to output slice.\n\t\treflect.ValueOf(y).Index(body.Index).Set(reflect.ValueOf(body.Y).Elem())\n\t\treturn nil\n\t}\n}", "func handler(w http.ResponseWriter, r *http.Request) {\n\tvar f interface{}\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to Parse Body\")\n\t}\n\n\terr = json.Unmarshal(body, &f)\n\tif err != nil{\n\t\tfmt.Printf(\"Unable to Unmarshal request\")\n\t}\n\n\tdata := f.(map[string]interface{})\n\tfmt.Println(data[\"Type\"].(string))\n\n\tif data[\"Type\"].(string) == subConfrmType {\n\t\tsubcribeURL := data[\"SubscribeURL\"].(string)\n\t\tgo confirmSubscription(subcribeURL)\n\t} else if data[\"Type\"].(string) == notificationType {\n\t\tfmt.Println(\"Recieved this message : \", data[\"Message\"].(string))\n\t}\n\n\tfmt.Fprintf(w, \"Sucess\")\n}", "func handleTextImpl(c context.Context, w http.ResponseWriter, r *http.Request, tag string) error {\n\tvar id int64\n\tif x := r.FormValue(\"x\"); x != \"\" {\n\t\txid, err := strconv.ParseUint(x, 16, 64)\n\t\tif err != nil || xid == 0 {\n\t\t\treturn fmt.Errorf(\"%w: failed to parse text id: %v\", ErrClientBadRequest, err)\n\t\t}\n\t\tid = int64(xid)\n\t} else {\n\t\t// Old link support, don't remove.\n\t\txid, err := strconv.ParseInt(r.FormValue(\"id\"), 10, 64)\n\t\tif err != nil || xid == 0 {\n\t\t\treturn fmt.Errorf(\"%w: failed to parse text id: %v\", ErrClientBadRequest, err)\n\t\t}\n\t\tid = xid\n\t}\n\tbug, crash, err := checkTextAccess(c, r, tag, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, ns, err := getText(c, tag, id)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"datastore: no such entity\") {\n\t\t\terr = fmt.Errorf(\"%w: %v\", ErrClientBadRequest, err)\n\t\t}\n\t\treturn err\n\t}\n\tif err := checkAccessLevel(c, r, config.Namespaces[ns].AccessLevel); err != nil {\n\t\treturn err\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t// Unfortunately filename does not work in chrome on linux due to:\n\t// https://bugs.chromium.org/p/chromium/issues/detail?id=608342\n\tw.Header().Set(\"Content-Disposition\", \"inline; filename=\"+textFilename(tag))\n\taugmentRepro(c, w, tag, bug, crash)\n\tw.Write(data)\n\treturn nil\n}", "func (p *Plugin) ServeHTTP(c *plugin.Context, w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tvar dfr IncomingRequest\n\tif err := json.NewDecoder(r.Body).Decode(&dfr); err != nil {\n\t\tp.API.LogError(\"Cannot decode\", \"err\", err.Error())\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tp.API.LogInfo(\"user\", \"t\", dfr.Intent.Params.Username)\n\tif dfr.Intent.Params.Username == nil || *dfr.Intent.Params.Username.Resolved == \"\" {\n\n\t}\n\n\tvar response *OutgoingResponse\n\tvalidateUser := func() string {\n\t\tif dfr.User.Params.UserName == nil || *dfr.User.Params.UserName == \"\" {\n\t\t\tresponse = getResponseWithText(\"Sorry, you didn't set your mattermost username!\")\n\t\t\treturn \"\"\n\t\t}\n\t\tidB, err := p.API.KVGet(*dfr.User.Params.UserName)\n\t\tif err != nil || idB == nil {\n\t\t\tresponse = getResponseWithText(\"Sorry, you didn't enable google assistant integration!\")\n\t\t\treturn \"\"\n\t\t}\n\t\tu, _ := p.API.GetUserByUsername(*dfr.User.Params.UserName)\n\n\t\treturn u.Id\n\t}\n\n\thandler := *dfr.Handler.Name\n\t// handler = \"set_username\"\n\tswitch handler {\n\tcase \"get_status\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleGetStatus(userId)\n\t\t\tif nErr != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"read_direct_messages\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleReadMessages(userId)\n\t\t\tif nErr != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"change_status\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleStatusChange(*dfr.Intent.Params.Status.Resolved, userId)\n\t\t\tif nErr != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"set_username\":\n\t\t{\n\t\t\tresponse = &OutgoingResponse{\n\t\t\t\tUser: &gUser{\n\t\t\t\t\tParams: gUserParams{\n\t\t\t\t\t\tUserName: dfr.Intent.Params.Username.Resolved, //model.NewString(\"sysadmin\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tPrompt: &gPrompt{},\n\t\t\t}\n\t\t}\n\tcase \"send_message\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleSendDM(userId, dfr.Scene.Slots.Username.Value, *dfr.Intent.Params.Message.Resolved)\n\t\t\tif nErr != nil {\n\t\t\t\tresponse = getResponseWithText(\"Sorry, can't find that user!\")\n\t\t\t}\n\t\t}\n\tdefault:\n\t\t{\n\t\t\tresponse = getResponseWithText(\"Sorry, don't know what to do!\")\n\t\t}\n\t}\n\tsuggestions := []gSuggestions{\n\t\t{Title: \"Change status to away\"},\n\t\t{Title: \"Status Report\"},\n\t\t{Title: \"Read messages\"},\n\t\t{Title: \"Write message\"},\n\t}\n\tresponse.Prompt.Suggestions = &suggestions\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(response)\n\n}", "func handleData(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(r.Method, r.URL.Path)\n\tif r.Method == \"GET\" {\n\t\t// Handle GET /data/ by getting all. Ignore single-item GET requests \n\t\t// for now\n\t\tif r.URL.Path == \"/data/\" {\n\t\t\tvar data []Element\n\t\t\tdatabase.RLock()\n\t\t\tdefer database.RUnlock()\n\t\t\tfor k, v := range database.elements {\n\t\t\t\tdata = append(data, Element{k, v})\n\t\t\t}\n\t\t\tpayload, err := json.Marshal(data)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"JSON error\", err.Error())\n\t\t\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write([]byte(string(payload)))\n\t\t} else {\n\t\t\tw.Write([]byte(\"Individual GETs are not yet implemented\"))\n\t\t}\n\t} else if r.Method == \"POST\" {\n\t\t// Handle POST /data/ by adding a new element to the database...\n\t\tdatabase.Lock()\n\t\tdefer database.Unlock()\n\t\tdatabase.counter += 1\n\t\tx := database.counter\n\t\tdatabase.elements[x] = strconv.FormatInt(x, 10)\n\t\tw.Write([]byte(\"dummy data added\"))\n\n\t\t// NB: here's a helpful code snippet for reading JSON from the request:\n\t\t//\n\t\t// contents, err := ioutil.ReadAll(r.Body)\n\t\t// if err != nil {\n\t\t// ...\n\t\t// }\n\t\t// var d DataRow // NB: you'll need to define the DataRow type\n\t\t// err = json.Unmarshal(contents, &d)\n\t} else {\n\t\tw.Write([]byte(\"Method not yet supported\"))\n\t}\n}", "func (self *OFSwitch) handleMessages(dpid net.HardwareAddr, msg util.Message) {\n\tlog.Debugf(\"Received message: %+v, on switch: %s\", msg, dpid.String())\n\n\tswitch t := msg.(type) {\n\tcase *common.Header:\n\t\tswitch t.Header().Type {\n\t\tcase openflow13.Type_Hello:\n\t\t\t// Send Hello response\n\t\t\th, err := common.NewHello(4)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Error creating hello message\")\n\t\t\t}\n\t\t\tself.Send(h)\n\n\t\tcase openflow13.Type_EchoRequest:\n\t\t\t// Send echo reply\n\t\t\tres := openflow13.NewEchoReply()\n\t\t\tself.Send(res)\n\n\t\tcase openflow13.Type_EchoReply:\n\t\t\tself.lastUpdate = time.Now()\n\n\t\tcase openflow13.Type_FeaturesRequest:\n\n\t\tcase openflow13.Type_GetConfigRequest:\n\n\t\tcase openflow13.Type_BarrierRequest:\n\n\t\tcase openflow13.Type_BarrierReply:\n\n\t\t}\n\tcase *openflow13.ErrorMsg:\n\t\terrMsg := GetErrorMessage(t.Type, t.Code, 0)\n\t\tmsgType := GetErrorMessageType(t.Data)\n\t\tlog.Errorf(\"Received OpenFlow1.3 error: %s on message %s\", errMsg, msgType)\n\t\tresult := MessageResult{\n\t\t\tsucceed: false,\n\t\t\terrType: t.Type,\n\t\t\terrCode: t.Code,\n\t\t\txID: t.Xid,\n\t\t\tmsgType: UnknownMessage,\n\t\t}\n\t\tself.publishMessage(t.Xid, result)\n\n\tcase *openflow13.VendorHeader:\n\t\tlog.Debugf(\"Received Experimenter message, VendorType: %d, ExperimenterType: %d, VendorData: %+v\", t.Vendor, t.ExperimenterType, t.VendorData)\n\t\tswitch t.ExperimenterType {\n\t\tcase openflow13.Type_TlvTableReply:\n\t\t\treply := t.VendorData.(*openflow13.TLVTableReply)\n\t\t\tstatus := TLVTableStatus(*reply)\n\t\t\tself.tlvMgr.TLVMapReplyRcvd(self, &status)\n\t\tcase openflow13.Type_BundleCtrl:\n\t\t\tresult := MessageResult{\n\t\t\t\txID: t.Header.Xid,\n\t\t\t\tsucceed: true,\n\t\t\t\tmsgType: BundleControlMessage,\n\t\t\t}\n\t\t\treply := t.VendorData.(*openflow13.BundleControl)\n\t\t\tself.publishMessage(reply.BundleID, result)\n\t\t}\n\n\tcase *openflow13.SwitchFeatures:\n\t\tswitch t.Header.Type {\n\t\tcase openflow13.Type_FeaturesReply:\n\t\t\tgo func() {\n\t\t\t\tswConfig := openflow13.NewSetConfig()\n\t\t\t\tswConfig.MissSendLen = 128\n\t\t\t\tself.Send(swConfig)\n\t\t\t\tself.Send(openflow13.NewSetControllerID(self.ctrlID))\n\t\t\t}()\n\t\t}\n\n\tcase *openflow13.SwitchConfig:\n\t\tswitch t.Header.Type {\n\t\tcase openflow13.Type_GetConfigReply:\n\n\t\tcase openflow13.Type_SetConfig:\n\n\t\t}\n\tcase *openflow13.PacketIn:\n\t\tlog.Debugf(\"Received packet(ofctrl): %+v\", t)\n\t\t// send packet rcvd callback\n\t\tself.app.PacketRcvd(self, (*PacketIn)(t))\n\n\tcase *openflow13.FlowRemoved:\n\n\tcase *openflow13.PortStatus:\n\t\t// FIXME: This needs to propagated to the app.\n\tcase *openflow13.PacketOut:\n\n\tcase *openflow13.FlowMod:\n\n\tcase *openflow13.PortMod:\n\n\tcase *openflow13.MultipartRequest:\n\n\tcase *openflow13.MultipartReply:\n\t\tlog.Debugf(\"Received MultipartReply\")\n\t\trep := (*openflow13.MultipartReply)(t)\n\t\tif self.monitorEnabled {\n\t\t\tkey := fmt.Sprintf(\"%d\", rep.Xid)\n\t\t\tch, found := monitoredFlows.Get(key)\n\t\t\tif found {\n\t\t\t\treplyChan := ch.(chan *openflow13.MultipartReply)\n\t\t\t\treplyChan <- rep\n\t\t\t}\n\t\t}\n\t\t// send packet rcvd callback\n\t\tself.app.MultipartReply(self, rep)\n\tcase *openflow13.VendorError:\n\t\terrData := t.ErrorMsg.Data.Bytes()\n\t\tresult := MessageResult{\n\t\t\tsucceed: false,\n\t\t\terrType: t.Type,\n\t\t\terrCode: t.Code,\n\t\t\texperimenter: int32(t.ExperimenterID),\n\t\t\txID: t.Xid,\n\t\t}\n\t\texperimenterID := binary.BigEndian.Uint32(errData[8:12])\n\t\terrMsg := GetErrorMessage(t.Type, t.Code, experimenterID)\n\t\texperimenterType := binary.BigEndian.Uint32(errData[12:16])\n\t\tswitch experimenterID {\n\t\tcase openflow13.ONF_EXPERIMENTER_ID:\n\t\t\tswitch experimenterType {\n\t\t\tcase openflow13.Type_BundleCtrl:\n\t\t\t\tbundleID := binary.BigEndian.Uint32(errData[16:20])\n\t\t\t\tresult.msgType = BundleControlMessage\n\t\t\t\tself.publishMessage(bundleID, result)\n\t\t\t\tlog.Errorf(\"Received Vendor error: %s on ONFT_BUNDLE_CONTROL message\", errMsg)\n\t\t\tcase openflow13.Type_BundleAdd:\n\t\t\t\tbundleID := binary.BigEndian.Uint32(errData[16:20])\n\t\t\t\tresult.msgType = BundleAddMessage\n\t\t\t\tself.publishMessage(bundleID, result)\n\t\t\t\tlog.Errorf(\"Received Vendor error: %s on ONFT_BUNDLE_ADD_MESSAGE message\", errMsg)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Errorf(\"Received Vendor error: %s\", errMsg)\n\t\t}\n\t}\n}", "func DoString(L *lua.LState) int {\n\tbody := L.CheckString(1)\n\tp := NewLuaPlugin(L, 2)\n\tp.body = &body\n\tud := L.NewUserData()\n\tud.Value = p\n\tL.SetMetatable(ud, L.GetTypeMetatable(`plugin_ud`))\n\tL.Push(ud)\n\treturn 1\n}", "func (h DefaultHandler) Handle(client *hub.Client, data []byte) {\n\tclient.SendJSON(\"error\", \"unknown message type\")\n}", "func WriteString(data []byte, str string, stype string_t, pos *int, l int) {\n switch stype {\n case NULLSTR:\n checkSize(len(data[*pos:]), len(str))\n // Write the string and then terminate with 0x00 byte.\n copy(data[*pos:], str)\n checkSize(len(data[*pos:]), len(str) + 1)\n *pos += len(str)\n data[*pos] = 0x00\n *pos++\n\n case LENENCSTR:\n // Write the encoded length.\n WriteLenEncInt(data, uint64(len(str)), pos)\n // Then write the string as a FIXEDSTR.\n WriteString(data, str, FIXEDSTR, pos, l)\n\n case FIXEDSTR:\n\n checkSize(len(data[*pos:]), l)\n // Pads the string with 0's to fill the specified length l.\n copy(data[*pos:*pos+l], str)\n *pos += l\n\n case EOFSTR:\n\n checkSize(len(data[*pos:]), len(str))\n // Copies the string into the data.\n *pos += copy(data[*pos:], str)\n }\n}", "func RetriveFirstLine(str string,resp *Value){\n str=str[2:len(str)-1]\n var temp string\n for i:=0;i!=len(str);i++{\n if int(str[i])!=32{\n temp=temp+str[i:i+1]\n }\n }\n mode:=strings.Split(temp,\":\")[0]\n infor:=strings.Split(temp,\":\")[1]\n //fmt.Printf(\"mode is %s,infor is %s\\n\",mode,infor)\n attributes:=strings.Split(infor,\",\")\n io:=strings.Split(attributes[0],\"=\")[1]\n bw:=strings.Split(attributes[1],\"=\")[1]\n iops:=strings.Split(attributes[2],\"=\")[1]\n runt:=strings.Split(attributes[3],\"=\")[1]\n //fmt.Printf(\"io=%s,bw=%s,iops=%s,runt=%s\\n\",io,bw,iops,runt)\n if mode==\"read\"{\n resp.Read_io=io\n resp.Read_bw=bw\n resp.Read_iops=iops\n resp.Read_runt=runt\n }else if mode==\"write\"{\n resp.Write_io=io\n resp.Write_bw=bw\n resp.Write_iops=iops\n resp.Write_runt=runt\n }\n}", "func handle(req typhon.Request, service, path string) typhon.Response {\n\turl := fmt.Sprintf(requestFormat, service, path)\n\n\tslog.Trace(req, \"Handling parsed URL: %v\", url)\n\n\tconn, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:80\", service))\n\tif err != nil {\n\t\tslog.Error(req, \"Unable to connect to %s: %v\", service, err)\n\t\treturn typhon.Response{Error: terrors.NotFound(\"service\", fmt.Sprintf(\"Unable to connect to %v\", service), nil)}\n\t}\n\tdefer conn.Close()\n\n\treq.Host = service\n\treq.URL.Scheme = \"http\"\n\treq.URL.Path = \"/\" + strings.TrimPrefix(path, \"/\")\n\treq.URL.Host = service\n\n\treturn req.Send().Response()\n}", "func handle(ctx p2p.HandlerContext) error {\n\tif ctx.IsRequest() {\n\t\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\t\tzap.String(\"address\", ctx.ID().Address),\n\t\t\tzap.String(\"public key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\t\tzap.String(\"handler context\", \"is request\"),\n\t\t)\n\t\treturn nil\n\t}\n\n\tobj, err := ctx.DecodeMessage()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tmsg, ok := obj.(*messageOverP2P)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tif len(msg.contents) == 0 {\n\t\treturn nil\n\t}\n\n\tatomic.AddUint32(&receivedMessageOverP2P, 1)\n\n\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\tzap.String(\"address\", ctx.ID().Address),\n\t\tzap.String(\"Public Key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\tzap.String(\"Content Size\", humanize.Bytes(uint64(len(msg.contents)))),\n\t)\n\n\treturn nil\n}", "func Handler(ctx context.Context, input Input) (Response, error) {\n\tvar buf bytes.Buffer\n\tToken := os.Getenv(\"BOT_KEY\")\n\tdg, err := discordgo.New(\"Bot \" + Token)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating bot reason: \", err)\n\t}\n\n\tfmt.Println(input.ChannelID)\n\n\tclient := dg.Open()\n\tif client != nil {\n\t\tfmt.Println(\"Error opening client session. Reason: \", client)\n\t}\n\n\trandom, err := dg.ChannelMessageSend(input.ChannelID, input.Text)\n\tif err != nil {\n\t\tfmt.Println(\"Message send failed, readin: \", err)\n\t}\n\tfmt.Println(random)\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"message\": input.Text,\n\t})\n\tif err != nil {\n\t\treturn Response{StatusCode: 404}, err\n\t}\n\tjson.HTMLEscape(&buf, body)\n\n\tresp := Response{\n\t\tStatusCode: 200,\n\t\tIsBase64Encoded: false,\n\t\tBody: buf.String(),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"X-MyCompany-Func-Reply\": \"hello-handler\",\n\t\t},\n\t}\n\n\treturn resp, nil\n}", "func handleHTTPResponse(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(r.URL.Path)\n\tif r.Method == \"GET\" {\n\t\tif r.URL.Path == \"/getpaths\" {\n\t\t\tquery := r.URL.Query()\n\t\t\tdestination := query[\"dest\"][0]\n\t\t\tsource := query[\"source\"][0]\n\t\t\t// sessionID := query[\"sid\"][0]\n\n\t\t\tformPath(source, destination)\n\t\t\tGetLatencyForPaths()\n\n\t\t\tmessage := \"\"\n\t\t\tfor i := 0; i < len(latencyArray); i++ {\n\t\t\t\tmessage += strconv.FormatFloat(latencyArray[i], 'f', -1, 32)\n\t\t\t\tmessage += \" \"\n\t\t\t}\n\n\t\t\tw.Write([]byte(message))\n\t\t}\n\t} else if r.Method == \"POST\" {\n\t\tif r.URL.Path == \"/setpath\" {\n\t\t\tpath := r.URL.Query()[\"path\"][0]\n\t\t\tpathNumber, err := strconv.Atoi(path)\n\t\t\tCheckError(err)\n\t\t\tsessionID := \"0001000100010001\"\n\t\t\tfmt.Println(pathsArray)\n\t\t\tselectedPath := []string{pathsArray[pathNumber][0], pathsArray[pathNumber][1], pathsArray[pathNumber][2]}\n\t\t\tsetPath(selectedPath, sessionID)\n\t\t\ttellClientToStart(pathsArray[pathNumber][0])\n\t\t\tw.Write([]byte(selectedPath[0] + \" \" + selectedPath[1] + \" \" + selectedPath[2]))\n\t\t} else if r.URL.Path == \"/resettest\" {\n\t\t\t// wipeDatabase()\n\t\t\tw.Write([]byte(\"success\"))\n\t\t}\n\t}\n}", "func GenerateStringHandler(dconfigKey string, setter SetterString, getter GetterString) Handler {\n\thandlerFunc := func(newConfig interface{}) {\n\n\t\toldValue := getter()\n\t\tnewValue, ok := newConfig.(string)\n\t\tif !ok {\n\t\t\tlog.WithField(\"dconfigKey\", dconfigKey).Error(\"Cannot get key from dconfig; just using the old value.\")\n\t\t} else if oldValue == newValue {\n\t\t\tlog.Info(\"oldValue is equal to newValue, do not need to update\")\n\t\t} else {\n\t\t\tsetter(string(newValue))\n\t\t}\n\t\tlog.WithFields(log.Fields{fmt.Sprintf(\"old%s\", dconfigKey): oldValue, fmt.Sprintf(\"new%s\", dconfigKey): getter()}).\n\t\t\tInfo(\"new value vs old value for the dconfig key\")\n\t}\n\treturn handlerFunc\n}", "func (handler ShortURLForwardingHandler) handleAddingNewShortURL(w http.ResponseWriter, r *http.Request) {\n\t// Read the body of the request and parse the new URL to be added.\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif checkAndHandleError(err, w, r) != nil {\n\t\treturn\n\t}\n\tvar newURL NewURLSubmission\n\tif err := json.Unmarshal(data, &newURL); err != nil {\n\t\tlog.Printf(\"Failed to unmarshal json: %v\", err.Error())\n\t}\n\n\tkey := handler.Storage.AddNewURL(newURL.URL)\n\n\t// Prepare a JSON response to let the other end know the new key for the URL.\n\tresponse := ShortenedURL{Key: key}\n\tjsonResponse, err := json.Marshal(response)\n\tif checkAndHandleError(err, w, r) != nil {\n\t\treturn\n\t}\n\n\t// Write response.\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(jsonResponse)\n\tlog.Printf(\"Added new short URL: %v as key: %v\", newURL.URL, key)\n}", "func Handle(message interface{}, ws *websocket.Conn) {\n\tvar err error\n\n\tswitch message.(type) {\n\tcase *twitter.Tweet:\n\n\t\tt := message.(*twitter.Tweet)\n\n\t\tword := WordCount(t.Text)\n\n\t\tif err = websocket.JSON.Send(ws, word); err != nil {\n\t\t\tfmt.Println(\"Can't send\")\n\t\t\tbreak\n\t\t}\n\n\t\t// fmt.Println(WordCount(t.Text))\n\t}\n}", "func (recv *Value) TakeString(vString string) {\n\tc_v_string := C.CString(vString)\n\tdefer C.free(unsafe.Pointer(c_v_string))\n\n\tC.g_value_take_string((*C.GValue)(recv.native), c_v_string)\n\n\treturn\n}", "func (s *Server) handleConn(conn io.ReadWriteCloser) {\n\tvar resps_bs []byte\n\n\t// receive\n\tdata, err := bufio.NewReader(conn).ReadBytes('\\n')\n\tif err != nil {\n\t\terrmsg := \"reciving connection get an error:\" + err.Error()\n\t\tfmt.Println(errmsg)\n\t\tresps_bs = encodeResponse(\n\t\t\tNewResponse(\"\", nil,\n\t\t\t\tNewJsonrpcErr(ParseErr, errmsg, nil),\n\t\t\t),\n\t\t)\n\t\tconn.Write(resps_bs)\n\t\treturn\n\t}\n\n\t// parse request, must support multi request\n\treqs, err := parseRequest(data)\n\tif err != nil {\n\t\tresps_bs = encodeResponse(\n\t\t\tNewResponse(\"\", nil,\n\t\t\t\tNewJsonrpcErr(ParseErr, err.Error(), nil),\n\t\t\t),\n\t\t)\n\t\tconn.Write(resps_bs)\n\t\treturn\n\t}\n\n\tresps := s.handleWithRequests(reqs)\n\tif len(resps) > 1 {\n\t\tresps_bs = encodeMultiResponse(resps)\n\t} else {\n\t\tresps_bs = encodeResponse(resps[0])\n\t}\n\n\tprintln(\"response:\", string(resps_bs))\n\tresps_bs = append(resps_bs, byte('\\n'))\n\tconn.Write(resps_bs)\n}", "func handleCase1(data []byte, option map[string][]string) ([]byte, error) {\n\tresult := fmt.Sprintf(\"(Executing case 1 with data (%s))\", string(data))\n\tfmt.Println(result)\n\treturn []byte(result), nil\n}", "func (c *app) handle(msg message) {\n\tswitch msg := msg.(type) {\n\n\tcase *challenge:\n\t\tgo c.handleChallenge(msg)\n\t\treturn\n\n\tcase *event:\n\t\tfor _, x := range c.domains {\n\t\t\tx.subLock.RLock()\n\t\t\tif binding, ok := x.subscriptions[msg.Subscription]; ok {\n\t\t\t\tx.subLock.RUnlock()\n\t\t\t\tDebug(\"Event %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\tgo x.handlePublish(msg, binding)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tx.subLock.RUnlock()\n\t\t\t}\n\t\t}\n\n\t\t// We can't be delivered to a sub we don't have... right?\n\t\tWarn(\"No handler registered for subscription:\", msg.Subscription)\n\n\tcase *invocation:\n\t\tfor _, x := range c.domains {\n\t\t\tx.regLock.RLock()\n\t\t\tif binding, ok := x.registrations[msg.Registration]; ok {\n\t\t\t\tx.regLock.RUnlock()\n\t\t\t\tDebug(\"Invoking %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\tgo x.handleInvocation(msg, binding)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tx.regLock.RUnlock()\n\t\t\t}\n\t\t}\n\n\t\ts := fmt.Sprintf(\"no handler for registration: %v\", msg.Registration)\n\t\tWarn(s)\n\n\t\tm := &errorMessage{\n\t\t\tType: iNVOCATION,\n\t\t\tRequest: msg.Request,\n\t\t\tDetails: make(map[string]interface{}),\n\t\t\tError: ErrNoSuchRegistration,\n\t\t}\n\n\t\tc.Queue(m)\n\n\t// Handle call results seperately to account for progressive calls\n\tcase *result:\n\t\t// If this is a progress call call the handler, do not alert the listener\n\t\t// Listener is only updated once the call completes\n\t\tif p, ok := msg.Details[\"progress\"]; ok {\n\t\t\tx := p.(bool)\n\t\t\tif x {\n\t\t\t\tfor _, x := range c.domains {\n\t\t\t\t\tif binding, ok := x.handlers[msg.Request]; ok {\n\t\t\t\t\t\tDebug(\"Result %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\t\t\tgo x.handleResult(msg, binding)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tc.findListener(msg)\n\t\t}\n\n\tcase *welcome:\n\t\tDebug(\"Received WELCOME, reestablishing state with the fabric\")\n\t\tc.open = true\n\t\tc.SetState(Ready)\n\n\t\t// Reset retry delay after successful connection.\n\t\tc.retryDelay = initialRetryDelay\n\n\t\tgo c.replayRegistrations()\n\t\tgo c.replaySubscriptions()\n\n\tcase *goodbye:\n\t\tc.Connection.Close(\"Fabric said goodbye. Closing connection\")\n\n\tdefault:\n\t\tc.findListener(msg)\n\t}\n}", "func (s *Server) handleGetData(request []byte) {\n\tvar payload serverutil.MsgGetData\n\tif err := getPayload(request, &payload); err != nil {\n\t\tlog.Panic(err)\n\t}\n\taddr := payload.AddrSender.String()\n\tp, _ := s.GetPeer(addr)\n\tp.IncreaseBytesReceived(uint64(len(request)))\n\ts.AddPeer(p)\n\ts.Log(true, fmt.Sprintf(\"GetData kind: %s, with ID:%s received from %s\", payload.Kind, hex.EncodeToString(payload.ID), addr))\n\n\tif payload.Kind == \"block\" {\n\t\t//block\n\t\t//on recupère le block si il existe\n\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\tif block != nil {\n\t\t\t//envoie le block au noeud créateur de la requete\n\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t} else {\n\t\t\tfmt.Println(\"block is nil :( handleGetData\")\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t\t\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\t\t\t\tif block != nil {\n\t\t\t\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t} else {\n\t\ttx := mempool.Mempool.GetTx(hex.EncodeToString(payload.ID))\n\t\tif tx != nil {\n\t\t\ts.SendTx(payload.AddrSender, tx)\n\t\t}\n\t}\n}", "func handleEvent(e *event) {\n\tswitch e.eType {\n\tcase Follow:\n\t\tfollow(e)\n\tcase Unfollow:\n\t\tunFollow(e)\n\tcase Broadcast:\n\t\tclients.Range(func(h *ClientHandler) {\n\t\t\th.Write(e)\n\t\t})\n\tcase PrivateMessage:\n\t\tif h, ok := clients.Get(e.to); ok {\n\t\t\th.Write(e)\n\t\t}\n\tcase StatusUpdate:\n\t\tupdateStatus(e)\n\tdefault:\n\t\tlog.Panicf(\"Could not recognize event type %v\", e.eType)\n\t}\n}", "func (c app) handle(msg message) {\n\tswitch msg := msg.(type) {\n\n\tcase *event:\n\t\tfor _, x := range c.domains {\n\t\t\tif binding, ok := x.subscriptions[msg.Subscription]; ok {\n\t\t\t\tgo x.handlePublish(msg, binding)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tWarn(\"No handler registered for subscription:\", msg.Subscription)\n\n\tcase *invocation:\n\t\tfor _, x := range c.domains {\n\t\t\tif binding, ok := x.registrations[msg.Registration]; ok {\n\t\t\t\tgo x.handleInvocation(msg, binding)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tWarn(\"No handler registered for registration:\", msg.Registration)\n\t\ts := fmt.Sprintf(\"no handler for registration: %v\", msg.Registration)\n\t\tm := &errorMessage{Type: iNVOCATION, Request: msg.Request, Details: make(map[string]interface{}), Error: s}\n\n\t\tif err := c.Send(m); err != nil {\n\t\t\tWarn(\"error sending message:\", err)\n\t\t}\n\n\tcase *goodbye:\n\t\tc.Close(\"Fabric said goodbye. Closing connection\")\n\n\tdefault:\n\t\tid, ok := requestID(msg)\n\n\t\t// Catch control messages here and replace getMessageTimeout\n\n\t\tif ok {\n\t\t\tif l, found := c.listeners[id]; found {\n\t\t\t\tl <- msg\n\t\t\t} else {\n\t\t\t\tlog.Println(\"no listener for message\", msg)\n\t\t\t\tInfo(\"Listeners: \", c.listeners)\n\t\t\t\tpanic(\"Unhandled message!\")\n\t\t\t}\n\t\t} else {\n\t\t\tpanic(\"Bad handler picking up requestID!\")\n\t\t}\n\t}\n}", "func handleConnection(conn net.Conn) {\n\tencoder := json.NewEncoder(conn)\n\tdecoder := json.NewDecoder(conn)\n\n\tvar incomingMsg BackendPayload\n\t// recieveing the response from the backend through the json decoder\n\terr := decoder.Decode(&incomingMsg)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tswitch incomingMsg.Mode { // choose function based on the mode sent by front end server\n\tcase \"getTasks\":\n\t\tgetTasks(encoder)\n\tcase \"createTask\":\n\t\tcreateTask(incomingMsg)\n\tcase \"updateTask\":\n\t\tupdateTask(incomingMsg)\n\tcase \"deleteTask\":\n\t\tdeleteTask(incomingMsg)\n\t}\n}", "func (m *WebsocketRoutineManager) websocketDataHandler(exchName string, data interface{}) error {\n\tswitch d := data.(type) {\n\tcase string:\n\t\tlog.Infoln(log.WebsocketMgr, d)\n\tcase error:\n\t\treturn fmt.Errorf(\"exchange %s websocket error - %s\", exchName, data)\n\tcase stream.FundingData:\n\t\tif m.verbose {\n\t\t\tlog.Infof(log.WebsocketMgr, \"%s websocket %s %s funding updated %+v\",\n\t\t\t\texchName,\n\t\t\t\tm.FormatCurrency(d.CurrencyPair),\n\t\t\t\td.AssetType,\n\t\t\t\td)\n\t\t}\n\tcase *ticker.Price:\n\t\tif m.syncer.IsRunning() {\n\t\t\terr := m.syncer.WebsocketUpdate(exchName,\n\t\t\t\td.Pair,\n\t\t\t\td.AssetType,\n\t\t\t\tSyncItemTicker,\n\t\t\t\tnil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\terr := ticker.ProcessTicker(d)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm.syncer.PrintTickerSummary(d, \"websocket\", err)\n\tcase []ticker.Price:\n\t\tfor x := range d {\n\t\t\tif m.syncer.IsRunning() {\n\t\t\t\terr := m.syncer.WebsocketUpdate(exchName,\n\t\t\t\t\td[x].Pair,\n\t\t\t\t\td[x].AssetType,\n\t\t\t\t\tSyncItemTicker,\n\t\t\t\t\tnil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\terr := ticker.ProcessTicker(&d[x])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.syncer.PrintTickerSummary(&d[x], \"websocket\", err)\n\t\t}\n\tcase stream.KlineData:\n\t\tif m.verbose {\n\t\t\tlog.Infof(log.WebsocketMgr, \"%s websocket %s %s kline updated %+v\",\n\t\t\t\texchName,\n\t\t\t\tm.FormatCurrency(d.Pair),\n\t\t\t\td.AssetType,\n\t\t\t\td)\n\t\t}\n\tcase []stream.KlineData:\n\t\tfor x := range d {\n\t\t\tif m.verbose {\n\t\t\t\tlog.Infof(log.WebsocketMgr, \"%s websocket %s %s kline updated %+v\",\n\t\t\t\t\texchName,\n\t\t\t\t\tm.FormatCurrency(d[x].Pair),\n\t\t\t\t\td[x].AssetType,\n\t\t\t\t\td)\n\t\t\t}\n\t\t}\n\tcase *orderbook.Depth:\n\t\tbase, err := d.Retrieve()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif m.syncer.IsRunning() {\n\t\t\terr := m.syncer.WebsocketUpdate(exchName,\n\t\t\t\tbase.Pair,\n\t\t\t\tbase.Asset,\n\t\t\t\tSyncItemOrderbook,\n\t\t\t\tnil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tm.syncer.PrintOrderbookSummary(base, \"websocket\", nil)\n\tcase *order.Detail:\n\t\tif !m.orderManager.Exists(d) {\n\t\t\terr := m.orderManager.Add(d)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.printOrderSummary(d, false)\n\t\t} else {\n\t\t\tod, err := m.orderManager.GetByExchangeAndID(d.Exchange, d.OrderID)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = od.UpdateOrderFromDetail(d)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = m.orderManager.UpdateExistingOrder(od)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.printOrderSummary(d, true)\n\t\t}\n\tcase []order.Detail:\n\t\tfor x := range d {\n\t\t\tif !m.orderManager.Exists(&d[x]) {\n\t\t\t\terr := m.orderManager.Add(&d[x])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tm.printOrderSummary(&d[x], false)\n\t\t\t} else {\n\t\t\t\tod, err := m.orderManager.GetByExchangeAndID(d[x].Exchange, d[x].OrderID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\terr = od.UpdateOrderFromDetail(&d[x])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\terr = m.orderManager.UpdateExistingOrder(od)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tm.printOrderSummary(&d[x], true)\n\t\t\t}\n\t\t}\n\tcase order.ClassificationError:\n\t\treturn fmt.Errorf(\"%w %s\", d.Err, d.Error())\n\tcase stream.UnhandledMessageWarning:\n\t\tlog.Warnln(log.WebsocketMgr, d.Message)\n\tcase account.Change:\n\t\tif m.verbose {\n\t\t\tm.printAccountHoldingsChangeSummary(d)\n\t\t}\n\tcase []account.Change:\n\t\tif m.verbose {\n\t\t\tfor x := range d {\n\t\t\t\tm.printAccountHoldingsChangeSummary(d[x])\n\t\t\t}\n\t\t}\n\tcase []trade.Data:\n\t\tif m.verbose {\n\t\t\tlog.Infof(log.Trade, \"%+v\", d)\n\t\t}\n\tcase []fill.Data:\n\t\tif m.verbose {\n\t\t\tlog.Infof(log.Fill, \"%+v\", d)\n\t\t}\n\tdefault:\n\t\tif m.verbose {\n\t\t\tlog.Warnf(log.WebsocketMgr,\n\t\t\t\t\"%s websocket Unknown type: %+v\",\n\t\t\t\texchName,\n\t\t\t\td)\n\t\t}\n\t}\n\treturn nil\n}", "func handler(conn net.Conn) {\n\t//\twe ensure that the connection will be closed after this function finished it's execution\n\tdefer conn.Close()\n\n\t//\twe capture curent timestamp\n\tnow := time.Now()\n\n\t//\twe read data from client\n\tbuf := make([]byte, 512, 512)\n\tconn.Read(buf)\n\tdata := string(buf)\n\tfmt.Printf(\"Data: %s\\n\", data)\n\n\t//\twe translate the data into time format\n\tresult := \"\"\n\tswitch data {\n\tcase \"RFC1123\":\n\t\tresult = now.Format(time.RFC1123)\n\tcase \"RFC3339\":\n\t\tresult = now.Format(time.RFC1123)\n\tdefault:\n\t\t//\tcustom format\n\t\tresult = now.Format(\"2006-01-02 15:04:05.999999999\")\n\t}\n\n\t//\twe send the tresponse\n\tconn.Write([]byte(result))\n}", "func handleGet(f func(name, addr string) (string, string, error)) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\trw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\tenc := json.NewEncoder(rw)\n\n\t\tvars := mux.Vars(req)\n\t\tname, addr, err := f(vars[\"name\"], vars[\"addr\"])\n\t\tif err != nil {\n\t\t\trw.WriteHeader(http.StatusNotFound)\n\t\t\tenc.Encode(Error{Error: err.Error()})\n\t\t\treturn\n\t\t}\n\t\tif addr != \"\" {\n\t\t\taddr = fmt.Sprintf(\"0x%v\", addr)\n\t\t}\n\n\t\tenc.Encode(Response{\n\t\t\tName: name,\n\t\t\tAddr: addr,\n\t\t})\n\t})\n}", "func processRequest(req *CustomProtocol.Request) {\n\n\tpayload := CustomProtocol.ParsePayload(req.Payload)\n\tswitch req.OpCode {\n\tcase CustomProtocol.ActivateGPS:\n\t\tflagStolen(\"gps\", payload[0])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.FlagStolen:\n\t\tflagStolen(\"laptop\", payload[0])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.FlagNotStolen:\n\t\t//TODO: temp fix < 12\n\t\tif len(payload[0]) < 12 {\n\t\t\tflagNotStolen(\"gps\", payload[0])\n\t\t} else {\n\t\t\tflagNotStolen(\"laptop\", payload[0])\n\t\t}\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1 //TO DO CHANGE\n\t\treq.Response <- res\n\tcase CustomProtocol.NewAccount:\n\t\tSignUp(payload[0], payload[1], payload[2], payload[3], payload[4])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.NewDevice:\n\t\tregisterNewDevice(payload[0], payload[1], payload[2], payload[3])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateDeviceGPS:\n\t\tupdated := updateDeviceGps(payload[0], payload[1], payload[2])\n\t\tres := make([]byte, 2)\n\t\tif updated == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.VerifyLoginCredentials:\n\t\taccountValid, passwordValid := VerifyAccountInfo(payload[0], payload[1])\n\t\tres := make([]byte, 2)\n\t\tif accountValid {\n\t\t\tres[0] = 1\n\t\t\tif passwordValid {\n\t\t\t\tres[1] = 1\n\t\t\t} else {\n\t\t\t\tres[0] = 0\n\t\t\t}\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t\tres[1] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.SetAccount:\n\t\taccSet := updateAccountInfo(payload[0], payload[1], payload[2])\n\t\tres := make([]byte, 1)\n\t\tif accSet == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.GetDevice:\n\t\tres := make([]byte, 5)\n\n\t\tif payload[0] == \"gps\" {\n\t\t\tres = getGpsDevices(payload[1])\n\t\t} else if payload[0] == \"laptop\" {\n\t\t\tres = getLaptopDevices(payload[1])\n\t\t} else {\n\t\t\tfmt.Println(\"CustomProtocol.GetDevice payload[0] must be either gps or laptop\")\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.SetDevice:\n\tcase CustomProtocol.GetDeviceList:\n\t\tres := []byte{}\n\t\tres = append(res, getLaptopDevices(payload[0])...)\n\t\tres = append(res, 0x1B)\n\t\tres = append(res, getGpsDevices(payload[0])...)\n\t\treq.Response <- res\n\tcase CustomProtocol.CheckDeviceStolen:\n\t\tisStolen := IsDeviceStolen(payload[0])\n\t\tres := make([]byte, 1)\n\t\tif isStolen == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateUserKeylogData:\n\t\tboolResult := UpdateKeylog(payload[0], payload[1])\n\t\tres := make([]byte, 1)\n\t\tif boolResult == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateUserIPTraceData:\n\t\tboolResult := UpdateTraceRoute(payload[0], payload[1])\n\t\tres := make([]byte, 1)\n\t\tif boolResult == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tdefault:\n\t}\n}", "func Handle(req []byte) string {\n\tlog.Println(\"Request with \", req)\n\tapi = CreateAPI()\n\tresult, err := api.All()\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\tjsonBytes, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(jsonBytes)\n}", "func main() {\n\n fmt.Println(\"Launching server...\")\n conn, err := net.Dial(\"tcp\", \"127.0.0.1:3001\")\n if err != nil {\n\t fmt.Println(\"error\")\n }\n // listen on all interfaces\n// ln, _ := net.Listen(\"tcp\", \":3001\")\n\n // accept connection on port\n //conn, _ := ln.Accept()\n // buf := make([]byte, 0, 8192) \n //tmp := make([]byte, 8192);\n // run loop forever (or until ctrl-c)\n for {\n // will listen for message to process ending in newline (\\n)\n // message, _ := bufio.NewReader(conn).ReadString('\\n')\n mess, err1 := ioutil.ReadAll(conn)\n if err1 != nil {\n fmt.Println(\"error\")\n }\n\n \tfmt.Println(reflect.TypeOf(mess))\n defer conn.Close()\n domain := \"test\"\n fmt.Fprintf(conn, \"%s\", domain);\n // return ioutil.ReadAll(conn)\n // buf = append(buf, tmp[:conn])\n // fmt.Println(\"total size:\", len(buf))\n break\n // output message received\n //fmt.Print(\"Message Received:\", string(message))\n // sample process for string received\n //newmessage := strings.ToUpper(message)\n // send new string back to client\n // conn.Write([]byte(newmessage + \"\\n\"))\n }\n}", "func handlePacket(c *Conn, data []byte, addr Addr,\n\trh Handler) {\n\n\tmsg, err := ParseDgramMessage(data)\n\tif err != nil {\n\t\tlog.Printf(\"Error parsing %v\", err)\n\t\treturn\n\t}\n\n\trv := rh.ServeCOAP(c, msg)\n\tif rv != nil {\n\t\tTransmit(c, addr, rv)\n\t}\n}", "func onStringRoutineQuit(s *String) {\n\tstringID := s.ID\n\ts.destroyString() // delete the string on local server\n\tdispatcher_client.SendStringDelReq(stringID)\n}", "func (reqParams *ReqParams) doReqStr(out *string) (int, error) {\n\tresp, err := reqParams.do()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\terr = reqParams.readStr(resp, out)\n\tcos.DrainReader(resp.Body)\n\tresp.Body.Close()\n\treturn resp.StatusCode, err\n}", "func AddressBookHandler(w http.ResponseWriter, r *http.Request) {\n switch r.Method {\n case \"GET\":\n fetchData(r)\n case \"POST\":\n postData(r)\n default:\n fmt.Println(\"No match to route\")\n }\n\n}", "func dResponseWriter(w http.ResponseWriter, data interface{}, HStat int) error {\n\tdataType := reflect.TypeOf(data)\n\tif dataType.Kind() == reflect.String {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/text\")\n\n\t\t_, err := w.Write([]byte(data.(string)))\n\t\treturn err\n\t} else if reflect.PtrTo(dataType).Kind() == dataType.Kind() {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\toutData, err := json.MarshalIndent(data, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tzerolog.Error().Msg(err.Error())\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = w.Write(outData)\n\t\treturn err\n\t} else if reflect.Struct == dataType.Kind() {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\toutData, err := json.MarshalIndent(data, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tzerolog.Error().Msg(err.Error())\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = w.Write(outData)\n\t\treturn err\n\t} else if reflect.Slice == dataType.Kind() {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\toutData, err := json.MarshalIndent(data, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tzerolog.Error().Msg(err.Error())\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = w.Write(outData)\n\t\treturn err\n\t}\n\n\treturn errors.New(\"we could not be able to support data type that you passed\")\n}", "func (p *Plain_node) handleRequest(conn net.Conn) error {\n\t// Make a buffer to hold incoming data.\n\tdefer conn.Close()\n\n\tbuf := make([]byte, 1024)\n\n\t// Read the incoming connection into the buffer.\n\t_ , err := conn.Read(buf)\n\n\tif err != nil {\n\t\teprint(err)\n\t\treturn err\n\t}\n\n\tmsg := strings.Trim(string(buf), \"\\x00\")\n\tmsg = strings.Trim(msg, \"\\n\")\n\n\tswitch msg[0] {\n\t\tcase 'J':\n\t\t\tfmt.Println(DHT_PREFIX+\"Joinging Request Received.\")\n\t\t\tp.handle_join(msg, conn)\n\t\tcase 'A':\n\t\t\tfmt.Println(DHT_PREFIX+\"Join Ack Received.\")\n\t\t\tp.handle_join_ack(msg)\n\t\tcase 'B':\n\t\t\tfmt.Println(DHT_PREFIX+\"Newbie joined.\")\n\t\t\tp.add_newbie(msg)\n\t\tcase APP_PREFIX:\n\t\t\tfmt.Println(DHT_PREFIX+\"Application Data Received.\")\n\t\t\tforward_to_app(msg)\n\t\tdefault:\n\t\t\tfmt.Println(DHT_PREFIX+\"Unknown msg format\")\n\t\t//\tconn.Write([]byte(\"Don't Know What You Mean by\"+msg))\n\t}\n\n\treturn nil\n}", "func (_obj *LacService) Dispatch(ctx context.Context, _val interface{}, req *requestf.RequestPacket, resp *requestf.ResponsePacket, withContext bool) (err error) {\n\t_is := codec.NewReader(tools.Int8ToByte(req.SBuffer))\n\t_os := codec.NewBuffer()\n\tswitch req.SFuncName {\n\tcase \"test\":\n\t\terr := test(ctx, _val, _os, _is, withContext)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"lacTag\":\n\t\terr := lacTag(ctx, _val, _os, _is, withContext)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"func mismatch\")\n\t}\n\tvar _status map[string]string\n\ts, ok := current.GetResponseStatus(ctx)\n\tif ok && s != nil {\n\t\t_status = s\n\t}\n\tvar _context map[string]string\n\tc, ok := current.GetResponseContext(ctx)\n\tif ok && c != nil {\n\t\t_context = c\n\t}\n\t*resp = requestf.ResponsePacket{\n\t\tIVersion: 1,\n\t\tCPacketType: 0,\n\t\tIRequestId: req.IRequestId,\n\t\tIMessageType: 0,\n\t\tIRet: 0,\n\t\tSBuffer: tools.ByteToInt8(_os.ToBytes()),\n\t\tStatus: _status,\n\t\tSResultDesc: \"\",\n\t\tContext: _context,\n\t}\n\treturn nil\n}", "func (s *SWIM) handle(msg pb.Message) {\n\n\ts.handlePbk(msg.PiggyBack)\n\n\tswitch p := msg.Payload.(type) {\n\tcase *pb.Message_Ping:\n\t\ts.handlePing(msg)\n\tcase *pb.Message_Ack:\n\t\t// handle ack\n\tcase *pb.Message_IndirectPing:\n\t\ts.handleIndirectPing(msg)\n\tcase *pb.Message_Membership:\n\t\ts.handleMembership(p.Membership, msg.Address)\n\tdefault:\n\n\t}\n}", "func (sock *Server) parse(line string) {\n\tsplit := strings.SplitN(line, \" \", 4)\n\tsplit = append(split, make([]string, 4-len(split), 4-len(split))...)\n\n\tswitch true {\n\tcase split[0] == \"PING\":\n\t\tsock.pong(split[1]) //Ping e.g.: PING :B97B6379\n\tcase split[1] == \"JOIN\":\n\t\teventOnJoin(sock, split[2][1:], getNick(split[0]))\n\tcase split[1] == \"PART\":\n\t\tif len(split[3]) == 0 {\n\t\t\tsplit[3] = \" \"\n\t\t}\n\t\teventOnPart(sock, split[2], getNick(split[0]), split[3][1:])\n\tcase split[1] == \"QUIT\":\n\t\tif split[3] != \"\" {\n\t\t\tsplit[2] += \" \" + split[3]\n\t\t}\n\t\tif len(split[2]) == 0 {\n\t\t\tsplit[2] = \" \"\n\t\t}\t\n\t\teventOnQuit(sock, getNick(split[0]), split[2][1:])\n\tcase split[1] == \"PRIVMSG\":\n\t\tnick := getNick(split[0])\n\t\tchannel := split[2]\n\t\tif channel == sock.Nickname {\n\t\t\tchannel = nick\n\t\t}\n\t\tif len(split[3]) == 0 {\n\t\t\tsplit[3] = \" \"\n\t\t}\n\t\teventOnPrivmsg(sock, channel, nick, split[3][1:])\n\tcase split[1] == \"NOTICE\":\n\t\tnick := getNick(split[0])\n\t\tchannel := split[2]\n\t\tif channel == sock.Nickname {\n\t\t\tchannel = nick\n\t\t}\n\t\tif len(split[3]) == 0 {\n\t\t\tsplit[3] = \" \"\n\t\t}\t\n\t\teventOnNotice(sock, channel, nick, split[3][1:])\n\tcase isNum(split[1]):\n\t\teventOnReply(sock, split[1], split[2], split[3])\n\t}\n}", "func Handler(m *tb.Message) {\n\n\tfmt.Printf(\"%s, by %s %d\\n\", m.Text, m.Sender.FirstName, m.Sender.ID) // Terminal output\n\n\t// BadWordsParser treats every badword senteces\n\tBadwordsParser(m, m.Text)\n}", "func (msq *MockSend) handler() {\nout:\n\tfor {\n\t\tselect {\n\t\tcase <-msq.quit:\n\t\t\tbreak out\n\t\tcase inv := <-msq.requestQueue:\n\t\t\tmsq.conn.RequestData(inv)\n\t\tcase msg := <-msq.msgQueue:\n\t\t\tmsq.conn.WriteMessage(msg)\n\t\t}\n\t}\n}", "func (f *Handler) dispatch(msg *provisioners.Message) error {\n\tf.logger.Info(\"Message to be sent = \", zap.Any(\"msg\", msg))\n\t f.logger.Info(\"Message Payload\", zap.Any(\"payload\", string(msg.Payload[:])))\n\t\n\t jStr := string(msg.Payload[:])\n\t fmt.Println(jStr)\n \n\t type Payload struct {\n\t\t Id string `json:\"id\"`\n\t\t Data string `json:\"data\"`\n\t }\n\t \n\t var payload Payload\n \n\t json.Unmarshal([]byte(jStr), &payload)\n\t fmt.Printf(\"%+v\\n\", payload.Data)\n\t dataDec, _ := b64.StdEncoding.DecodeString(payload.Data)\n\t fmt.Printf(\"%+v\\n\", string(dataDec))\n\t \n\t type Data struct {\n\t\t Source string `json:\"source\"`\n\t\t Type string `json:\"type\"`\n\t }\n\t \n\t var data Data\n\t \n\t \n\t json.Unmarshal([]byte(string(dataDec)), &data)\n\t fmt.Printf(\"%+v\\n\", data)\n\t var destination = f.destination\n\t if data.Source == \"GITHUB\" {\n\t\tdestination = getRequiredEnv(\"GITHUB_CHANNEL_URL\")\n\t } else if data.Source == \"FRESHBOOKS\" {\n\t\t destination = getRequiredEnv(\"FRESHBOOKS_CHANNEL_URL\")\n\t } else {\n\t\t destination = getRequiredEnv(\"COMMON_CHANNEL_URL\")\n\t }\n\t fmt.Printf(\"%+v\\n\", destination)\n\n\t err := f.dispatcher.DispatchMessage(msg, destination, \"\", provisioners.DispatchDefaults{})\n\t if err != nil {\n\t\t f.logger.Error(\"Error dispatching message\", zap.String(\"destination\", destination))\n\t\t f.logger.Error(\"Error received\", zap.Error(err))\n\t }\n\t return err\n }", "func EventHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tvar remote string\n\tif tmp := r.Header.Get(\"X-Forwarded-For\"); tmp != \"\" {\n\t\tremote = tmp\n\t} else {\n\t\tremote = r.RemoteAddr\n\t}\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t}).Debugln(\"Incomming event from:\", remote, \"With Header:\", r.Header)\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t}).Debugln(\"Request params:\", r.Form)\n\t// required fields\n\tif len(r.Form[\"appid\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No appid\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"adname\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No adname\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"adid\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No adid\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"device\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No device\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"idfa\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No idfa\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"point\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No point\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"ts\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No ts\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"sign\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No sign\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"keyword\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No keyword\", 400)\n\t\treturn\n\t}\n\t// set a new avro record\n\tstr := fmt.Sprintf(\"adid=%sadname=%sappid=%sdevice=%sidfa=%spoint=%sts=%skey=%s\", r.Form[\"adid\"][0], r.Form[\"adname\"][0], r.Form[\"appid\"][0], r.Form[\"device\"][0], r.Form[\"idfa\"][0], r.Form[\"point\"][0], r.Form[\"ts\"][0], conf.Extension.Anwo.Key)\n\tcrypted := md5.Sum([]byte(str))\n\tif fmt.Sprintf(\"%x\", crypted) != strings.Split(r.Form[\"sign\"][0], \",\")[0] {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"adwo\",\n\t\t}).Warnf(\"Sign not matched!: %x :%s\\n, bypass sign check? %s\", crypted, r.Form[\"sign\"][0], *sign_check)\n\t\tif !*sign_check {\n\t\t\tErrorAndReturnCode(w, \"Sign mismatched!\", 400)\n\t\t\treturn\n\t\t}\n\t}\n\trecord, err := avro.NewRecord()\n\tif err != nil {\n\t\tErrorAndReturnCode(w, \"Failed to set a new avro record:\"+err.Error(), 500)\n\t\treturn\n\t}\n\t// optional fields\n\tif len(r.Form[\"ip\"]) > 0 {\n\t\trecord.Set(\"ip\", r.Form[\"ip\"][0])\n\t}\n\t// set required fields\n\trecord.Set(\"did\", r.Form[\"idfa\"][0])\n\tnsec, err := strconv.ParseInt(r.Form[\"ts\"][0], 10, 64)\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"adwo\",\n\t\t}).Errorln(\"Failed to parse ts to int:\", err)\n\t\tErrorAndReturnCode(w, \"Failed to parse ts:\"+err.Error(), 500)\n\t\treturn\n\t}\n\tt := time.Unix(0, nsec*1000000)\n\trecord.Set(\"timestamp\", t.Format(time.RFC3339))\n\trecord.Set(\"id\", r.Form[\"keyword\"][0])\n\trecord.Set(\"event\", \"anwo_postback\")\n\trecord.Set(\"os\", \"ios\")\n\t// extensions fields\n\textension := map[string](interface{}){}\n\tfor k, v := range r.Form {\n\t\tif k != \"ip\" && k != \"aid\" && k != \"idfa\" && k != \"timestamp\" && k != \"keyword\" && k != \"sign\" && k != \"ts\" {\n\t\t\textension[k] = v[0]\n\t\t}\n\t}\n\tif len(extension) != 0 {\n\t\trecord.Set(\"extension\", extension)\n\t}\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t\t\"record\": record.String(),\n\t}).Infoln(\"Recieved post back.\")\n\t// encode avro\n\tbuf := new(bytes.Buffer)\n\tif err = avro.Encode(buf, record); err != nil {\n\t\tErrorAndReturnCode(w, \"Failed to encode avro record:\"+err.Error(), 500)\n\t\treturn\n\t}\n\turl := fmt.Sprintf(\"%s?params=%s\", conf.Extension.Anwo.Td_postback_url, r.Form[\"keyword\"][0])\n\tgo func(url string) {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"adwo\",\n\t\t\t\"url\": url,\n\t\t}).Infoln(\"Send postback to adserver with request url.\")\n\n\t\trequest, err := http.NewRequest(\"GET\", url, nil)\n\t\tif err != nil {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Errorln(\"Failed to create request:\", err)\n\t\t\treturn\n\t\t}\n\t\trequest.Header.Add(\"Connection\", \"keep-alive\")\n\t\tresp, err := client.Do(request)\n\t\tif err != nil {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Errorln(\"Failed to send clk to remote server:\", err)\n\t\t\treturn\n\t\t}\n\t\tif resp.StatusCode != 200 {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Errorln(\"Error when send td_postback:\", resp.Status)\n\t\t\tstr, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Debugln(\"Resp body:\", string(str))\n\t\t\tresp.Body.Close()\n\t\t\treturn\n\t\t}\n\t\tio.Copy(ioutil.Discard, resp.Body)\n\t\tresp.Body.Close()\n\t}(url)\n\n\t// send to kafka\n\tpart, offset, err := kafka.SendByteMessage(buf.Bytes(), \"default\")\n\tif err != nil {\n\t\tfail_safe.Println(\"error:\", err)\n\t\tfail_safe.Println(\"record:\", record)\n\t\tfail_safe.Println(\"data:\", buf.Bytes())\n\t\tErrorAndReturnCode(w, \"Failed to send message to kafka:\"+err.Error()+\"Data has been writen to a backup file. Please contact us.\", 200)\n\t\treturn\n\t}\n\t// done\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t}).Debugf(\"New record partition=%d\\toffset=%d\\n\", part, offset)\n\tw.WriteHeader(200)\n\tfmt.Fprintf(w, \"1 messages have been writen.\")\n}", "func (w *RESPWriter) writeStr(s string) {\n\tw.buf.WriteRune(respSimpleString)\n\tw.buf.WriteString(s)\n\tw.buf.Write(DELIMS)\n}", "func FwdHandler (data Message) (bool, bool){\n\tidentifier := data.GetId() + strconv.Itoa(data.GetRound()) + data.GetSenderId();\n\t/*\n\t\thave seen echo + 1\n\t*/\n\t//data type check\n\thashStr := ConvertBytesToString(Hash([]byte(data.GetData())))\n\tfmt.Printf(\"Fwd: %+v\\n\",data)\n\tm := REQStruct{Header:REQ, Id:data.GetId(), HashData:hashStr, Round: data.GetRound()}\n\tif hasSent(ReqSentSet[m], data.GetSenderId()) {\n\t\tif _,seen := FwdReceiveSet[identifier]; !seen {\n\t\t\tFwdReceiveSet[identifier] = true\n\t\t\tDataSet[data.GetData()] = hashStr\n\t\t\t//check\n\t\t\treturn true, true\n\t\t}\n\t\treturn true, false\n\t}\n\treturn false, false\n\n}", "func packetFunc_client(socketid int, buff []byte, nlen int) error {\n\t//llog.Debugf(\"packetFunc: socketid=%d, bufferlen=%d\", socketid, nlen)\n\ttarget, name, buffbody, err := message.UnPackHead(buff, nlen)\n\t//llog.Debugf(\"packetFunc_client %d %s %d\", target, name, nlen)\n\tif nil != err {\n\t\treturn fmt.Errorf(\"packetFunc_client Decode error: %s\", err.Error())\n\t\t//This.closeClient(socketid)\n\t} else {\n\t\tty, ok := This.netMap.Load(name)\n\t\tif !ok { //msg to me,client使用的是server type\n\t\t\thandler, ok := handlerMap[name]\n\t\t\tif ok {\n\t\t\t\tnm := &gorpc.M{Id: socketid, Name: name, Data: buffbody}\n\t\t\t\tgorpc.MGR.Send(handler, \"ServiceHandler\", nm)\n\t\t\t} else {\n\t\t\t\tllog.Errorf(\"packetFunc_client handler is nil, drop it[%s]\", name)\n\t\t\t}\n\t\t} else { //msg to other server\n\t\t\ttarget = ty.(int)\n\t\t\trecvPackMsgClient(socketid, target, buff)\n\t\t}\n\t}\n\treturn nil\n}", "func jsonHandler(w http.ResponseWriter, r *http.Request) {\r\n w.Header().Set(\"Content-Type\", \"application/json\")\r\n json.NewEncoder(w).Encode(&Message{helloWorldString})\r\n}" ]
[ "0.6016482", "0.5864663", "0.58399796", "0.5835402", "0.5817867", "0.57098216", "0.57005876", "0.5564361", "0.5549767", "0.5534213", "0.55311394", "0.55142814", "0.55067146", "0.55020815", "0.55018497", "0.5486916", "0.5477967", "0.5446462", "0.54205203", "0.5399795", "0.5399771", "0.5349209", "0.5348368", "0.5284797", "0.527199", "0.5247757", "0.5218177", "0.5216902", "0.51996976", "0.517995", "0.5164879", "0.5161571", "0.51570237", "0.51424086", "0.51348066", "0.513256", "0.5115899", "0.51131994", "0.5098841", "0.5092392", "0.50729895", "0.5066939", "0.50627583", "0.5039263", "0.50316626", "0.5029464", "0.50285023", "0.50280946", "0.5008716", "0.49921098", "0.49913904", "0.49911198", "0.4985988", "0.498529", "0.4981996", "0.49695635", "0.49663702", "0.49567568", "0.49562904", "0.49513865", "0.49493065", "0.49406213", "0.49340162", "0.4927172", "0.49155903", "0.49148983", "0.4914885", "0.49066415", "0.49046743", "0.49031684", "0.4900536", "0.4899137", "0.48969698", "0.48935637", "0.48932892", "0.48854372", "0.48729306", "0.48709944", "0.48692775", "0.48560435", "0.4855074", "0.48524755", "0.4847279", "0.48466232", "0.48464787", "0.484585", "0.48441002", "0.48436895", "0.48414657", "0.48378026", "0.4831639", "0.48297724", "0.48267126", "0.48223317", "0.48221615", "0.4821536", "0.4821255", "0.48186234", "0.48185423", "0.48179117" ]
0.677268
0
handleGob handles the "GOB" request. It decodes the received GOB data into a struct.
func handleGob(rw *bufio.ReadWriter) { log.Print("Receive GOB data:") var data complexData // Create a decoder that decodes directly into a struct variable. dec := gob.NewDecoder(rw) err := dec.Decode(&data) if err != nil { log.Println("Error decoding GOB data:", err) return } // Print the complexData struct and the nested one, too, to prove // that both travelled across the wire. log.Printf("Outer complexData struct: \n%#v\n", data) log.Printf("Inner complexData struct: \n%#v\n", data.C) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *Decoder) GOB(val interface{}) {\n\tgobd := gob.NewDecoder(d.buf)\n\tif err := gobd.Decode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to decode: %v\", err)\n\t}\n}", "func (z *Rat) GobDecode(buf []byte) error {}", "func GobDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)\n}", "func DecodeGob(data []byte, v interface{}) error {\n\tb := bytes.NewBuffer(data)\n\treturn gob.NewDecoder(b).Decode(v)\n}", "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func GobDecode(data []byte, obj interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)\n}", "func gobDecode(buf []byte, into interface{}) error {\n\tif buf == nil {\n\t\treturn nil\n\t}\n\tdec := gob.NewDecoder(bytes.NewReader(buf))\n\treturn dec.Decode(into)\n}", "func (z *Int) GobDecode(buf []byte) error {}", "func GobDecode(b []byte) (interface{}, error) {\n\tvar result interface{}\n\terr := gob.NewDecoder(bytes.NewBuffer(b)).Decode(&result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func GobUnmarshal(i interface{}, b []byte) error {\n\tbuf := bytes.NewBuffer(b)\n\tdecoder := gob.NewDecoder(buf)\n\treturn decoder.Decode(i)\n}", "func (s *Store) GobDecode(data []byte) error {\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\tbuf := bytes.NewBuffer(data)\n\n\tdecoder := gob.NewDecoder(buf)\n\tvar version uint8\n\terr := decoder.Decode(&version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = decoder.Decode(&s.data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor key, _ := range s.data {\n\t\ts.doKeyChanged(key)\n\t}\n\n\treturn nil\n}", "func (z *Float) GobDecode(buf []byte) error {}", "func ReadGob(path string, object interface{}) error {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tdecoder := gob.NewDecoder(file)\n\tif err = decoder.Decode(object); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (g *Gammas) GobDecode(data []byte) error {\n\tvar err error\n\tfor len(data) > 0 {\n\t\tg2 := new(bn256.G2)\n\t\tdata, err = g2.Unmarshal(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*g = append(*g, g2)\n\t}\n\treturn nil\n}", "func FromGob(data []byte, dst interface{}) error {\n\treturn NewGobber().From(data, dst)\n}", "func (loc *LogOddsCell) GobDecode(buf []byte) error {\n\tr := bytes.NewBuffer(buf)\n\tdecoder := gob.NewDecoder(r)\n\n\terr := decoder.Decode(&loc.logOddsVal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}", "func (s *CountMinSketch) GobDecode(data []byte) error {\n\tbuf := bytes.NewBuffer(data)\n\t_, err := s.ReadFrom(buf)\n\treturn err\n}", "func (t *Time) GobDecode(data []byte) error {}", "func gobInfoDecode(gobBytes []byte) (*storage.GobInfo, error) {\n\tgobInfo := &storage.GobInfo{}\n\tbuf := bytes.NewReader(gobBytes)\n\tgobDec := realgob.NewDecoder(buf)\n\terr := gobDec.Decode(gobInfo)\n\treturn gobInfo, err\n}", "func (d *DFA) GobDecode(bs []byte) error {\n\tbuffer := bytes.NewBuffer(bs)\n\tdecoder := gob.NewDecoder(buffer)\n\tvar initial State\n\tvar table []Cell\n\tif err := decoder.Decode(&initial); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode initial state\")\n\t}\n\tif err := decoder.Decode(&table); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode sparse table\")\n\t}\n\td.initial = initial\n\td.table = table\n\treturn nil\n}", "func (info *ImageInfoType) GobDecode(buf []byte) (err error) {\n\tfields := []interface{}{&info.data, &info.smask, &info.n, &info.w, &info.h,\n\t\t&info.cs, &info.pal, &info.bpc, &info.f, &info.dp, &info.trns, &info.scale, &info.dpi}\n\tr := bytes.NewBuffer(buf)\n\tdecoder := gob.NewDecoder(r)\n\tfor j := 0; j < len(fields) && err == nil; j++ {\n\t\terr = decoder.Decode(fields[j])\n\t}\n\n\tinfo.i, err = generateImageID(info)\n\treturn\n}", "func (k *Key) GobDecode(buf []byte) error {\n\tnk, err := NewKeyEncoded(string(buf))\n\tif err != nil {\n\t\treturn err\n\t}\n\t*k = *nk\n\treturn nil\n}", "func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}", "func client(ip string) error {\r\n\t// Some test data. Note how GOB even handles maps, slices, and\r\n\t// recursive data structures without problems.\r\n\ttestStruct := complexData{\r\n\t\tN: 23,\r\n\t\tS: \"string data\",\r\n\t\tM: map[string]int{\"one\": 1, \"two\": 2, \"three\": 3},\r\n\t\tP: []byte(\"abc\"),\r\n\t\tC: &complexData{\r\n\t\t\tN: 256,\r\n\t\t\tS: \"Recursive structs? Piece of cake!\",\r\n\t\t\tM: map[string]int{\"01\": 1, \"10\": 2, \"11\": 3},\r\n\t\t},\r\n\t}\r\n\r\n\t// Open a connection to the server.\r\n\trw, err := Open(ip + Port)\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Client: Failed to open connection to \"+ip+Port)\r\n\t}\r\n\r\n\t// Send a STRING request.\r\n\t// Send the request name.\r\n\t// Send the data.\r\n\tlog.Println(\"Send the string request.\")\r\n\tn, err := rw.WriteString(\"STRING\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not send the STRING request (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\tn, err = rw.WriteString(\"Additional data.\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not send additional STRING data (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\tlog.Println(\"Flush the buffer.\")\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Flush failed.\")\r\n\t}\r\n\r\n\t// Read the reply.\r\n\tlog.Println(\"Read the reply.\")\r\n\tresponse, err := rw.ReadString('\\n')\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Client: Failed to read the reply: '\"+response+\"'\")\r\n\t}\r\n\r\n\tlog.Println(\"STRING request: got a response:\", response)\r\n\r\n\t// Send a GOB request.\r\n\t// Create an encoder that directly transmits to `rw`.\r\n\t// Send the request name.\r\n\t// Send the GOB.\r\n\tlog.Println(\"Send a struct as GOB:\")\r\n\tlog.Printf(\"Outer complexData struct: \\n%#v\\n\", testStruct)\r\n\tlog.Printf(\"Inner complexData struct: \\n%#v\\n\", testStruct.C)\r\n\tenc := gob.NewEncoder(rw)\r\n\tn, err = rw.WriteString(\"GOB\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not write GOB data (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\terr = enc.Encode(testStruct)\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Encode failed for struct: %#v\", testStruct)\r\n\t}\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Flush failed.\")\r\n\t}\r\n\treturn nil\r\n}", "func (val *Value) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gv gobValue\n\terr := dec.Decode(&gv)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Value: %s\", err)\n\t}\n\tif gv.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Value encoding version %d; only 0 is supported\", gv.Version)\n\t}\n\n\t// big.Float seems to, for some reason, lose its \"pointerness\" when we\n\t// round-trip it, so we'll fix that here.\n\tif bf, ok := gv.V.(big.Float); ok {\n\t\tgv.V = &bf\n\t}\n\n\tval.ty = gv.Ty\n\tval.v = gv.V\n\n\treturn nil\n}", "func (b *Binance) ReadGob(file string) error {\n\tf, _ := os.Open(file)\n\tgob.Register(&stg.KeepStrategy{})\n\tdecode := gob.NewDecoder(f)\n\tif err := decode.Decode(b); err != nil {\n\t\treturn err\n\t}\n\tos.Remove(file)\n\treturn nil\n}", "func (t *Type) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gt gobType\n\terr := dec.Decode(&gt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Type: %s\", err)\n\t}\n\tif gt.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Type encoding version %d; only 0 is supported\", gt.Version)\n\t}\n\n\tt.typeImpl = gt.Impl\n\n\treturn nil\n}", "func MustGobDecode(b []byte) interface{} {\n\tbDecoded, err := GobDecode(b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn bDecoded\n}", "func (t *Timestamp) GobDecode(data []byte) error {\n\tvar tm time.Time\n\n\tif err := tm.UnmarshalBinary(data); err != nil {\n\t\treturn err\n\t}\n\n\t*t = Timestamp(tm)\n\n\treturn nil\n}", "func (set *AppleSet) GobDecode(b []byte) error {\n\tset.s.Lock()\n\tdefer set.s.Unlock()\n\n\tbuf := bytes.NewBuffer(b)\n\treturn gob.NewDecoder(buf).Decode(&set.m)\n}", "func (g *Graph) GobDecode(b []byte) (err error) {\n\t// decode into graphGob\n\tgGob := &graphGob{}\n\tbuf := bytes.NewBuffer(b)\n\tdec := gob.NewDecoder(buf)\n\n\terr = dec.Decode(gGob)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// add the vertexes\n\tfor _, key := range gGob.Vertexes {\n\t\tg.Add(key)\n\t}\n\n\t// connect the vertexes\n\tfor key, neighbors := range gGob.Edges {\n\t\tfor otherKey, weight := range neighbors {\n\t\t\tif ok := g.Connect(key, otherKey, weight); !ok {\n\t\t\t\treturn errors.New(\"invalid edge endpoints\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func RatGobDecode(z *big.Rat, buf []byte) error", "func (d *Decimal) GobDecode(data []byte) error {\n\treturn d.UnmarshalBinary(data)\n}", "func (r *Record) GetGobField(d *Db, number uint16, e interface{}) error {\n\tif r.GetFieldType(d, number) != BLOBTYPE {\n\t\treturn WDBError(\"Not an blob valued field\")\n\t}\n\n\tenc := C.wg_get_field(d.db, r.rec, C.wg_int(number))\n\tslen := int(C.wg_decode_blob_len(d.db, enc))\n\tsval := C.wg_decode_blob(d.db, enc)\n\n\tvar goSlice []byte\n\tsliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&goSlice)))\n\tsliceHeader.Cap = slen\n\tsliceHeader.Len = slen\n\tsliceHeader.Data = uintptr(unsafe.Pointer(sval))\n\n\tbuffer := bytes.NewBuffer(goSlice)\n\tdecoder := gob.NewDecoder(buffer)\n\n\tif err := decoder.Decode(e); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewGobSerializer() gbus.Serializer {\n\treturn &Gob{\n\t\tlock: &sync.Mutex{},\n\t\tregisteredSchemas: make(map[string]reflect.Type),\n\t}\n}", "func IntGobDecode(z *big.Int, buf []byte) error", "func (bo *BytesObj) GJSONParse() (res gjson.Result) {\n\tif bo != nil && bo.IsObject() && len(bo.data) > 0 {\n\t\tunsafeStr := *(*string)(unsafe.Pointer(&bo.data))\n\t\tres = gjson.Parse(unsafeStr)\n\t}\n\treturn\n}", "func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}", "func (e *Encoder) PutGOB(val interface{}) {\n\tgobe := gob.NewEncoder(e)\n\tif err := gobe.Encode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to encode %v: %v\", val, err)\n\t}\n}", "func GobGenerateDecoder(r io.Reader) Decoder {\n\treturn gob.NewDecoder(r)\n}", "func EncodeGob(data interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\terr := gob.NewEncoder(b).Encode(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}", "func (a *Array) GobDecode(data []byte) error {\n\tbuf := bytes.NewReader(data)\n\tdec := gob.NewDecoder(buf)\n\n\terr := checkErr(\n\t\tdec.Decode(&a.bits),\n\t\tdec.Decode(&a.length),\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"bit: decode failed (%v)\", err)\n\t}\n\n\treturn err\n}", "func (t *Tensor) GobDecode(b []byte) error {\n\tr := bytes.NewReader(b)\n\tdec := gob.NewDecoder(r)\n\n\tvar dt tf.DataType\n\terr := dec.Decode(&dt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar shape []int64\n\terr = dec.Decode(&shape)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar tensor *tf.Tensor\n\tswitch dt {\n\tcase tf.String:\n\t\t// TensorFlow Go package currently does not support\n\t\t// string serialization. Let's do it ourselves.\n\t\tvar str string\n\t\terr = dec.Decode(&str)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttensor, err = tf.NewTensor(str)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\ttensor, err = tf.ReadTensor(dt, shape, r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt.Tensor = tensor\n\treturn nil\n}", "func GobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tencoder := gob.NewEncoder(&buff)\n\tif err := encoder.Encode(data); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func EncodeGob(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tencoder := gob.NewEncoder(&b)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tdata = b.Bytes()\n\treturn\n}", "func handleGetData(request []byte, bc *Blockchain) {\n\tvar buff bytes.Buffer\n\tvar payload getdata\n\n\tbuff.Write(request[commandLength:])\n\tdec := gob.NewDecoder(&buff)\n\terr := dec.Decode(&payload)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif payload.Type == \"block\" {\n\t\tblock, err := bc.GetBlock([]byte(payload.ID))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tsendBlock(payload.AddrFrom, &block)\n\t}\n\n\tif payload.Type == \"tx\" {\n\t\ttxID := hex.EncodeToString(payload.ID)\n\t\ttx := mempool[txID]\n\n\t\tsendTx(payload.AddrFrom, &tx)\n\t\t// delete(mempool, txID)\n\t}\n}", "func FloatGobDecode(z *big.Float, buf []byte) error", "func decodeLeaseRequestGob(hdrBytes []byte, gobBytes []byte) (leaseReq *LeaseRequest, jreq *jsonRequest) {\n\tvar err error\n\n\tjreq = &jsonRequest{}\n\tioReq := &ioRequestRetryRpc{}\n\n\thdrBuf := bytes.NewBuffer(hdrBytes)\n\n\t// Unmarshal the IoRequest header (always binary)\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Len\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Protocol\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Version\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Type\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Magic\")\n\t}\n\n\t// now unmarshal the jsonRequest fields using gob (can't fail)\n\t_, _ = decodeLeaseRequestGobBuffer.Write(gobBytes)\n\terr = decodeLeaseRequestGobDecoder.Decode(jreq)\n\tif err != nil {\n\t\tpanic(\"decodeLeaseRequestGobDecoder.Decode\")\n\t}\n\tleaseReq = jreq.Params[0].(*LeaseRequest)\n\n\treturn\n}", "func gobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tenc := gob.NewEncoder(&buff)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func NewGobTranscoder() *GobTranscoder {\n\tret := &GobTranscoder{\n\t\tinBytes: &bytes.Buffer{},\n\t\toutBytes: &bytes.Buffer{},\n\t\tencoderMut: &sync.Mutex{},\n\t\tdecoderMut: &sync.Mutex{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.outBytes)\n\tret.decoder = gob.NewDecoder(ret.inBytes)\n\treturn ret\n}", "func (x *Rat) GobEncode() ([]byte, error) {}", "func (t *capsuleType) GobEncode() ([]byte, error) {\n\treturn nil, fmt.Errorf(\"cannot gob-encode capsule type %q\", t.FriendlyName(friendlyTypeName))\n}", "func (b *BoatHandle) Recv(e interface{}) error {\n\treturn b.stdoutGob.Decode(e)\n}", "func GobDecodeFromFile(filename string, object interface{}) error {\n file, err := os.Open(filename)\n if err != nil {\n // Might be caused by file does not exist\n return gobDebug.Error(err)\n }\n defer file.Close()\n decoder := gob.NewDecoder(file)\n if err := decoder.Decode(object); err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func (server *Server) handleRequestBlob(client *Client, message *Message) {\n\trequestBlob := &protocol.RequestBlob{}\n\terr := protobuf.Unmarshal(message.buffer, requestBlob)\n\tif err != nil {\n\t\tclient.Panic(err)\n\t\treturn\n\t}\n\n\t//userState := &protocol.UserState{}\n\n\t// Request for user textures\n\t// TODO: Why count if you only want to know 1 count?\n\tif len(requestBlob.SessionTexture) > 0 {\n\t\tfor _, sid := range requestBlob.SessionTexture {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\t// TODO: NOT OK, use errors, don't leave everyone including yourself in the fucking dark\n\t\t\t\t// TODO: No, and its a validation!!!!!\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasTexture() {\n\t\t\t\t\t// TODO: Replace this shit alter, just get the first major structure changes in\n\t\t\t\t\t//buffer, err := blobStore.Get(target.user.TextureBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//userState.Reset()\n\t\t\t\t\t//userState.Session = protobuf.Uint32(uint32(target.Session()))\n\t\t\t\t\t//// TODO: What is a texture????? BETTER NAMES\n\t\t\t\t\t//userState.Texture = buffer\n\t\t\t\t\t//if err := client.sendMessage(userState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Request for user comments\n\t// TODO: Stop counting os high!\n\tif len(requestBlob.SessionComment) > 0 {\n\t\tfor _, sid := range requestBlob.SessionComment {\n\t\t\t// TODO: Err not ok!\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\t// TODO: REPEATED VALIDATION!!!!!\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasComment() {\n\t\t\t\t\t// TODO: Ughh just comment blob shit out now for the first major structure changes to work and tackle this after\n\t\t\t\t\t//buffer, err := requestBlob.Get(target.user.CommentBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\t// TODO: There is no reason to repeat these fucntions for each class, its just bad\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//userState.Reset()\n\t\t\t\t\t//userState.Session = protobuf.Uint32(uint32(target.Session()))\n\t\t\t\t\t//userState.Comment = protobuf.String(string(buffer))\n\t\t\t\t\t//if err := client.sendMessage(userState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tchannelState := &protocol.ChannelState{}\n\n\t// Request for channel descriptions\n\t// TODO: Added up, there is SO MUCH WASTE. THESE ARE PER MESSAGE!\n\tif len(requestBlob.ChannelDescription) > 0 {\n\t\tfor _, cid := range requestBlob.ChannelDescription {\n\t\t\tif channel, ok := server.Channels[cid]; ok {\n\t\t\t\tif channel.HasDescription() {\n\t\t\t\t\tchannelState.Reset()\n\t\t\t\t\t//buffer, err := requestBlob.Get(channel.DescriptionBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//// TODO: you should be asking yourself, if you are doing a conversion everytime you use a variable, is there something majorly wrong? the answer is yes\n\t\t\t\t\t//channelState.ChannelID = protobuf.Uint32(channel.ID)\n\t\t\t\t\t//channelState.Description = protobuf.String(string(buffer))\n\t\t\t\t\t//if err := client.sendMessage(channelState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func Unmarshal(data []byte, typ DataFormat, target interface{}) {\n\tswitch typ {\n\tcase GOB:\n\t\tbuf := bytes.NewReader(data)\n\t\tgob.NewDecoder(buf).Decode(target)\n\n\tdefault:\n\t\tif err := json.Unmarshal(data, target); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func gobHisto(vs ...*Histo) (err error) {\n\tfile, err := os.Create(\"r-g-b.gob\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tenc := gob.NewEncoder(file)\n\tfor _, v := range vs {\n\t\terr = enc.Encode(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (b *Binance) SaveGob(file string) error {\n\tf, _ := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0777)\n\tgob.Register(&stg.KeepStrategy{})\n\tencode := gob.NewEncoder(f)\n\tif err := encode.Encode(b); err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn nil\n}", "func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}", "func BenchmarkDecodingGobTweet(b *testing.B) {\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttw := Tweet{}\n\t\tdec := gob.NewDecoder(&gobTw)\n\t\terr := dec.Decode(&tw)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"Error unmarshaling json: %v\", err)\n\t\t}\n\t}\n}", "func (m *Model) GOB() ([]byte, error) {\n\tif m.Emm != nil {\n\t\treturn m.Emm.GOB()\n\t}\n\tif m.Snow != nil {\n\t\treturn m.Snow.GOB()\n\t}\n\treturn nil, nil\n}", "func SaveGob(path string, object interface{}) error {\n\tfile, err := os.Create(path)\n\tdefer file.Close()\n\tif err == nil {\n\t\tencoder := gob.NewEncoder(file)\n\t\tencoder.Encode(object)\n\t}\n\treturn err\n}", "func TestPutGOB(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\t// Setup the test server.\n\tmux := newMultiplexer(assert)\n\tts := restaudit.StartServer(mux, assert)\n\tdefer ts.Close()\n\terr := mux.Register(\"test\", \"gob\", NewTestHandler(\"putgob\", assert))\n\tassert.Nil(err)\n\t// Perform test requests.\n\treq := restaudit.NewRequest(\"POST\", \"/base/test/gob\")\n\treqData := TestCounterData{\"test\", 4711}\n\treq.MarshalBody(assert, restaudit.ApplicationGOB, reqData)\n\tresp := ts.DoRequest(req)\n\tresp.AssertStatusEquals(200)\n\trespData := TestCounterData{}\n\tresp.AssertUnmarshalledBody(&respData)\n\tassert.Equal(respData, reqData)\n}", "func (serv *Server) handleBin(conn int, payload []byte) {}", "func (proxy *HuobiProxy) decode(message interface{}) error {\n\tgzip, err := gzip.NewReader(proxy.conn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdecoder := json.NewDecoder(gzip)\n\terr = decoder.Decode(message)\n\n\treturn err\n}", "func TestEncodeDecodeGob(t *testing.T) {\n\ttestEncodeDecodeFunctions(t,\n\t\tencodeLeaseRequestGob, encodeLeaseReplyGob,\n\t\tdecodeLeaseRequestGob, decodeLeaseReplyGob)\n}", "func encodeLeaseRequestGob(leaseReq *LeaseRequest, jreq *jsonRequest) (hdrBytes []byte, gobBytes []byte) {\n\tvar err error\n\n\thdrBuf := &bytes.Buffer{}\n\n\t// the Lease Request is part of the gob request\n\tjreq.Params[0] = leaseReq\n\n\t// marshal jreq (and lease request)\n\terr = encodeLeaseRequestGobEncoder.Encode(jreq)\n\tif err != nil {\n\t\tpanic(\"encodeLeaseRequestGobEncoder\")\n\t}\n\n\t// consume the results encoded in the (global) buffer\n\tgobBytes = make([]byte, encodeLeaseRequestGobBuffer.Len())\n\tn, err := encodeLeaseRequestGobBuffer.Read(gobBytes)\n\tif n != cap(gobBytes) {\n\t\tpanic(\"didn't read enough bytes\")\n\t}\n\n\t// now create the IoRequest header and Marshal it\n\t// (this is always binary)\n\tioReq := ioRequestRetryRpc{\n\t\tHdr: ioHeader{\n\t\t\tLen: uint32(len(gobBytes)),\n\t\t\tProtocol: uint16(1),\n\t\t\tVersion: 1,\n\t\t\tType: 1,\n\t\t\tMagic: 0xCAFEFEED,\n\t\t},\n\t}\n\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Len\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Protocol\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Version\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Type\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Magic\")\n\t}\n\n\thdrBytes = hdrBuf.Bytes()\n\treturn\n}", "func GobMarshal(i interface{}) ([]byte, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tencoder := gob.NewEncoder(buf)\n\terr := encoder.Encode(i)\n\treturn buf.Bytes(), err\n}", "func handleEscreveBloco(writer http.ResponseWriter, req *http.Request) {\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tvar m Mensagem\n\n\tdecoder := json.NewDecoder(req.Body)\n\tif err := decoder.Decode(&m); err != nil {\n\t\trespondWithJSON(writer, req, http.StatusBadRequest, req.Body)\n\t\treturn\n\t}\n\tdefer req.Body.Close()\n\n\t//garante atomicidade ao criar o bloco\n\tmutex.Lock()\n\n\tnovoBloco := geraBloco(Blockchain[len(Blockchain)-1], m.Dados, m.Dificuldade)\n\n\t//desfaz o lock\n\tmutex.Unlock()\n\n\tif blocoValido(novoBloco, Blockchain[len(Blockchain)-1]) {\n\t\tBlockchain = append(Blockchain, novoBloco)\n\t\tspew.Dump(novoBloco)\n\t}\n\n\trespondWithJSON(writer, req, http.StatusCreated, novoBloco)\n}", "func decodeLeaseReplyGob(hdrBytes []byte, gobBytes []byte) (leaseReply *LeaseReply, jreply *jsonReply) {\n\tvar err error\n\n\tleaseReply = &LeaseReply{}\n\tjreply = &jsonReply{}\n\tjreply.Result = leaseReply\n\tioReply := &ioReplyRetryRpc{}\n\n\thdrBuf := bytes.NewBuffer(hdrBytes)\n\n\t// Unmarshal the IoReply header (always binary)\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Len\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Protocol\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Version\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Type\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Magic\")\n\t}\n\n\t// now unmarshal the jsonReply fields using gob (can't fail)\n\t_, _ = decodeLeaseReplyGobBuffer.Write(gobBytes)\n\terr = decodeLeaseReplyGobDecoder.Decode(jreply)\n\tif err != nil {\n\t\tpanic(\"decodeLeaseReplyGobDecoder.Decode\")\n\t}\n\tleaseReply = jreply.Result.(*LeaseReply)\n\n\treturn\n}", "func BenchmarkRpcLeaseDecodeGob(b *testing.B) {\n\tbenchmarkRpcLeaseDecode(b,\n\t\tencodeLeaseRequestGob, encodeLeaseReplyGob,\n\t\tdecodeLeaseRequestGob, decodeLeaseReplyGob)\n}", "func (server *Server) handleRequestBlob(client *Client, msg *Message) {\n\tblobreq := &mumbleproto.RequestBlob{}\n\terr := proto.Unmarshal(msg.buf, blobreq)\n\tif err != nil {\n\t\tclient.Panic(err)\n\t\treturn\n\t}\n\n\tuserstate := &mumbleproto.UserState{}\n\n\t// Request for user textures\n\tif len(blobreq.SessionTexture) > 0 {\n\t\tfor _, sid := range blobreq.SessionTexture {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasTexture() {\n\t\t\t\t\tbuf, err := BlobStore.Get(target.user.TextureBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tuserstate.Reset()\n\t\t\t\t\tuserstate.Session = proto.Uint32(uint32(target.Session()))\n\t\t\t\t\tuserstate.Texture = buf\n\t\t\t\t\tif err := client.sendMessage(userstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Request for user comments\n\tif len(blobreq.SessionComment) > 0 {\n\t\tfor _, sid := range blobreq.SessionComment {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasComment() {\n\t\t\t\t\tbuf, err := BlobStore.Get(target.user.CommentBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tuserstate.Reset()\n\t\t\t\t\tuserstate.Session = proto.Uint32(uint32(target.Session()))\n\t\t\t\t\tuserstate.Comment = proto.String(string(buf))\n\t\t\t\t\tif err := client.sendMessage(userstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tchanstate := &mumbleproto.ChannelState{}\n\n\t// Request for channel descriptions\n\tif len(blobreq.ChannelDescription) > 0 {\n\t\tfor _, cid := range blobreq.ChannelDescription {\n\t\t\tif channel, ok := server.Channels[int(cid)]; ok {\n\t\t\t\tif channel.HasDescription() {\n\t\t\t\t\tchanstate.Reset()\n\t\t\t\t\tbuf, err := BlobStore.Get(channel.DescriptionBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tchanstate.ChannelId = proto.Uint32(uint32(channel.Id))\n\t\t\t\t\tchanstate.Description = proto.String(string(buf))\n\t\t\t\t\tif err := client.sendMessage(chanstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func GobEncode(v interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := gob.NewEncoder(&buf).Encode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (s *BasePlSqlParserListener) EnterLob_segname(ctx *Lob_segnameContext) {}", "func GobEncode(value interface{}) []byte {\n buf := bytes.NewBuffer(make([]byte, 0, 1024))\n encoder := gob.NewEncoder(buf)\n // encode unknown type might cause some error\n err := encoder.Encode(value)\n if err != nil {\n gobDebug.Panicf(\"Failed to encode a value: %+v\\n%v\\n\", value, err)\n }\n return buf.Bytes()\n}", "func (info *ImageInfoType) GobEncode() (buf []byte, err error) {\n\tfields := []interface{}{info.data, info.smask, info.n, info.w, info.h, info.cs,\n\t\tinfo.pal, info.bpc, info.f, info.dp, info.trns, info.scale, info.dpi}\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\tfor j := 0; j < len(fields) && err == nil; j++ {\n\t\terr = encoder.Encode(fields[j])\n\t}\n\tif err == nil {\n\t\tbuf = w.Bytes()\n\t}\n\treturn\n}", "func gobEncode(value interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\terr := enc.Encode(value)\n\treturn buf.Bytes(), err\n}", "func (s *DataAPI) handleOrderBook(thing interface{}) (interface{}, error) {\n\treq, ok := thing.(*msgjson.OrderBookSubscription)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"orderbook request unparseable\")\n\t}\n\n\tmkt, err := dex.MarketName(req.Base, req.Quote)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't parse requested market\")\n\t}\n\treturn s.bookSource.Book(mkt)\n}", "func FacebookCallbackGETHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>>> FacebookCallbackGETHandler\")\r\n\r\n\tmode := r.FormValue(\"hub.mode\")\r\n\tlog.Debugf(c, \"Hub Mode: %v\", mode)\r\n\r\n\tchallenge := r.FormValue(\"hub.challenge\")\r\n\tlog.Debugf(c, \"Hub Challenge: %v\", challenge)\r\n\r\n\tverify_token := r.FormValue(\"hub.verify_token\")\r\n\tlog.Debugf(c, \"Hub Verify Token: %v\", verify_token)\r\n\r\n\tif verify_token != VERIFY_TOKEN {\r\n\t\tlog.Errorf(c, \"Error, bad verification token: %v\", verify_token)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tif mode != \"subscribe\" {\r\n\t\tlog.Errorf(c, \"Error, bad mode: %v\", mode)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Fprintf(w, \"%v\", challenge)\r\n}", "func (client *GremlinResourcesClient) getGremlinGraphHandleResponse(resp *http.Response) (GremlinResourcesClientGetGremlinGraphResponse, error) {\n\tresult := GremlinResourcesClientGetGremlinGraphResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GremlinGraphGetResults); err != nil {\n\t\treturn GremlinResourcesClientGetGremlinGraphResponse{}, err\n\t}\n\treturn result, nil\n}", "func (s *BasePlSqlParserListener) EnterLob_item(ctx *Lob_itemContext) {}", "func (g *Gammas) GobEncode() ([]byte, error) {\n\tbuff := bytes.Buffer{}\n\tif g != nil {\n\t\tfor _, g2 := range *g {\n\t\t\tbuff.Write(g2.Marshal())\n\t\t}\n\t}\n\treturn buff.Bytes(), nil\n}", "func writeLob(wr *bufio.Writer) error {\n\n\tif err := wr.WriteByte(0); err != nil {\n\t\treturn err\n\t}\n\tif err := wr.WriteInt32(0); err != nil {\n\t\treturn err\n\t}\n\tif err := wr.WriteInt32(0); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tm.verifyHandler(w, r)\n\t\treturn\n\t}\n\n\tvar rec Receive\n\n\t// consume a *copy* of the request body\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(body))\n\n\terr := json.Unmarshal(body, &rec)\n\tif err != nil {\n\t\terr = xerrors.Errorf(\"could not decode response: %w\", err)\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"could not decode response:\", err)\n\t\trespond(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rec.Object != \"page\" {\n\t\tfmt.Println(\"Object is not page, undefined behaviour. Got\", rec.Object)\n\t\trespond(w, http.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tif m.verify {\n\t\tif err := m.checkIntegrity(r); err != nil {\n\t\t\tfmt.Println(\"could not verify request:\", err)\n\t\t\trespond(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.dispatch(rec)\n\n\trespond(w, http.StatusAccepted) // We do not return any meaningful response immediately so it should be 202\n}", "func encodeLeaseReplyGob(leaseReply *LeaseReply, jreply *jsonReply) (hdrBytes []byte, gobBytes []byte) {\n\tvar err error\n\n\thdrBuf := &bytes.Buffer{}\n\n\t// the Lease Reply is part of the JSON reply\n\tjreply.Result = leaseReply\n\n\t// marshal jreq (and lease request)\n\terr = encodeLeaseReplyGobEncoder.Encode(jreply)\n\tif err != nil {\n\t\tpanic(\"encodeLeaseReplyGobEncoder\")\n\t}\n\n\t// consume the results encoded in the (global) buffer\n\tgobBytes = make([]byte, encodeLeaseReplyGobBuffer.Len())\n\tn, err := encodeLeaseReplyGobBuffer.Read(gobBytes)\n\tif n != cap(gobBytes) {\n\t\tpanic(\"didn't read enough bytes\")\n\t}\n\n\t// now create the IoReply header and Marshal it\n\t// (this is always binary)\n\tioReply := ioReplyRetryRpc{\n\t\tHdr: ioHeader{\n\t\t\tLen: uint32(len(gobBytes)),\n\t\t\tProtocol: uint16(1),\n\t\t\tVersion: 1,\n\t\t\tType: 1,\n\t\t\tMagic: 0xCAFEFEED,\n\t\t},\n\t}\n\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Len\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Protocol\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Version\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Type\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Magic\")\n\t}\n\n\thdrBytes = hdrBuf.Bytes()\n\treturn\n}", "func (recv *Value) GetObject() Object {\n\tretC := C.g_value_get_object((*C.GValue)(recv.native))\n\tretGo := *ObjectNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func NewGobDecoderLight() *GobDecoderLight {\n\tret := &GobDecoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.decoder = gob.NewDecoder(ret.bytes)\n\treturn ret\n}", "func (s *CountMinSketch) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := s.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (d *Person) GobEncode() ([]byte, error) {\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\terr := encoder.Encode(d.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = encoder.Encode(d.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn w.Bytes(), nil\n}", "func HandleDecompression(r *retryablehttp.Request, bodyOrig []byte) (bodyDec []byte, err error) {\n\tencodingHeader := strings.ToLower(r.Header.Get(\"Accept-Encoding\"))\n\tif encodingHeader == \"gzip\" {\n\t\tgzipreader, err := gzip.NewReader(bytes.NewReader(bodyOrig))\n\t\tif err != nil {\n\t\t\treturn bodyDec, err\n\t\t}\n\t\tdefer gzipreader.Close()\n\n\t\tbodyDec, err = ioutil.ReadAll(gzipreader)\n\t\tif err != nil {\n\t\t\treturn bodyDec, err\n\t\t}\n\n\t\treturn bodyDec, nil\n\t}\n\n\treturn bodyOrig, nil\n}", "func EncodeGobZlib(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tcompressor, err := zlib.NewWriterLevel(&b, zlib.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoder := gob.NewEncoder(compressor)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = compressor.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = b.Bytes()\n\treturn\n}", "func DecodeGMMessage(ws *websocket.Conn) (*GMMessage, error) {\n\n\t// gob decoding\n\tvar m GMMessage\n\tvar msg = make([]byte, 2048)\n\tl, err := ws.Read(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"DecodeGMMessage() ws.Read() error: %s\", err)\n\t}\n\traw := msg[0:l]\n\tdecBuf := bytes.NewBuffer(raw)\n\terr = gob.NewDecoder(decBuf).Decode(&m)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"DecodeGMMessage() gob.Decode() error: %s\", err)\n\t}\n\treturn &m, nil\n}", "func (b BoatAPI) Recv(e interface{}) error {\n\treturn b.stdin.Decode(e)\n}", "func ParseBinReader(r io.Reader, path string) (*GLTF, error) {\n\n\t// Read header\n\tvar header GLBHeader\n\terr := binary.Read(r, binary.LittleEndian, &header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check magic and version\n\tif header.Magic != GLBMagic {\n\t\treturn nil, fmt.Errorf(\"invalid GLB Magic field\")\n\t}\n\tif header.Version < 2 {\n\t\treturn nil, fmt.Errorf(\"GLB version:%v not supported\", header.Version)\n\t}\n\n\t// Read first chunk (JSON)\n\tbuf, err := readChunk(r, GLBJson)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse JSON into gltf object\n\tbb := bytes.NewBuffer(buf)\n\tgltf, err := ParseJSONReader(bb, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for and read second chunk (binary, optional)\n\tdata, err := readChunk(r, GLBBin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgltf.data = data\n\n\treturn gltf, nil\n}", "func (s *BasePlSqlParserListener) EnterLob_parameters(ctx *Lob_parametersContext) {}", "func BobPurchaseDataAPIHandler(w http.ResponseWriter, r *http.Request) {\n\tLog := Logger.NewSessionLogger()\n\n\tLog.Infof(\"start purchase data...\")\n\tvar plog PodLog\n\tplog.Result = LOG_RESULT_FAILED\n\tplog.Operation = LOG_OPERATION_TYPE_BOB_TX\n\tdefer func() {\n\t\terr := insertLogToDB(plog)\n\t\tif err != nil {\n\t\t\tLog.Warnf(\"insert log error! %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnodeRecovery(w, Log)\n\t}()\n\n\trequestData := r.FormValue(\"request_data\")\n\tvar data RequestData\n\terr := json.Unmarshal([]byte(requestData), &data)\n\tif err != nil {\n\t\tLog.Warnf(\"invalid parameter. data=%v, err=%v\", requestData, err)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"success to parse request data. data=%v\", requestData)\n\n\tif data.MerkleRoot == \"\" || data.AliceIP == \"\" || data.AliceAddr == \"\" || data.BulletinFile == \"\" || data.PubPath == \"\" {\n\t\tLog.Warnf(\"invalid parameter. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"read parameters. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tplog.Detail = fmt.Sprintf(\"merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tbulletin, err := readBulletinFile(data.BulletinFile, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to read bulletin File. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tplog.Detail = fmt.Sprintf(\"%v, merkle root=%v,\", plog.Detail, bulletin.SigmaMKLRoot)\n\n\tLog.Debugf(\"step0: prepare for transaction...\")\n\tvar params = BobConnParam{data.AliceIP, data.AliceAddr, bulletin.Mode, data.SubMode, data.OT, data.UnitPrice, \"\", bulletin.SigmaMKLRoot}\n\tnode, conn, params, err := preBobConn(params, ETHKey, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to prepare net for transaction. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err := node.Close(); err != nil {\n\t\t\tfmt.Errorf(\"failed to close client node: %v\", err)\n\t\t}\n\t\tif err := conn.Close(); err != nil {\n\t\t\tLog.Errorf(\"failed to close connection on client side: %v\", err)\n\t\t}\n\t}()\n\tLog.Debugf(\"[%v]step0: success to establish connecting session with Alice. Alice IP=%v, Alice address=%v\", params.SessionID, params.AliceIPAddr, params.AliceAddr)\n\tplog.Detail = fmt.Sprintf(\"%v, sessionID=%v,\", plog.Detail, params.SessionID)\n\tplog.SessionId = params.SessionID\n\n\tvar tx BobTransaction\n\ttx.SessionID = params.SessionID\n\ttx.Status = TRANSACTION_STATUS_START\n\ttx.Bulletin = bulletin\n\ttx.AliceIP = params.AliceIPAddr\n\ttx.AliceAddr = params.AliceAddr\n\ttx.Mode = params.Mode\n\ttx.SubMode = params.SubMode\n\ttx.OT = params.OT\n\ttx.UnitPrice = params.UnitPrice\n\ttx.BobAddr = fmt.Sprintf(\"%v\", ETHKey.Address.Hex())\n\n\tLog.Debugf(\"[%v]step0: success to prepare for transaction...\", params.SessionID)\n\ttx.Status = TRANSACTION_STATUS_START\n\terr = insertBobTxToDB(tx)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to save transaction to db for Bob. err=%v\", err)\n\t\tfmt.Fprintf(w, fmt.Sprintf(RESPONSE_TRANSACTION_FAILED, \"failed to save transaction to db for Bob.\"))\n\t\treturn\n\t}\n\n\tvar response string\n\tif tx.Mode == TRANSACTION_MODE_PLAIN_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForPOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForPC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForPAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t}\n\t} else if tx.Mode == TRANSACTION_MODE_TABLE_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForTAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\tcase TRANSACTION_SUB_MODE_VRF:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.PhantomKeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\t}\n\t}\n\tvar resp Response\n\terr = json.Unmarshal([]byte(response), &resp)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to parse response. response=%v, err=%v\", response, err)\n\t\tfmt.Fprintf(w, RESPONSE_FAILED_TO_RESPONSE)\n\t\treturn\n\t}\n\tif resp.Code == \"0\" {\n\t\tplog.Result = LOG_RESULT_SUCCESS\n\t}\n\tLog.Debugf(\"[%v]the transaction finish. merkel root=%v, response=%v\", params.SessionID, bulletin.SigmaMKLRoot, response)\n\tfmt.Fprintf(w, response)\n\treturn\n}", "func (tg *TradesGroup) handleMessage(msg []byte) {\n\tvar resp []interface{}\n\tvar e event\n\tvar err error\n\n\tif err := json.Unmarshal(msg, &resp); err != nil {\n\t\treturn\n\t}\n\tchanID := int64Value(resp[0])\n\tif chanID > 0 {\n\t\te, err = tg.get(chanID)\n\t\tif err != nil {\n\t\t\tlog.Println(\"[BITFINEX] Error getting subscriptions: \", chanID, err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\treturn\n\t}\n\n\tif ut, ok := resp[1].(string); ok {\n\t\tif ut == \"hb\" {\n\t\t\treturn\n\t\t}\n\t\tif ut == \"tu\" {\n\t\t\t// handling update\n\t\t\tdataType := \"u\"\n\t\t\tif d, ok := resp[2].([]interface{}); ok {\n\t\t\t\ttrade := tg.mapTrade(e.Symbol, d)\n\t\t\t\tgo tg.publish([]schemas.Trade{trade}, dataType, nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tif snap, ok := resp[1].([]interface{}); ok {\n\t\t// handling snapshot\n\t\tvar trades []schemas.Trade\n\t\tdataType := \"s\"\n\t\tfor _, trade := range snap {\n\t\t\tif d, ok := trade.([]interface{}); ok {\n\t\t\t\ttrades = append(trades, tg.mapTrade(e.Symbol, d))\n\t\t\t}\n\t\t}\n\t\tgo tg.publish(trades, dataType, nil)\n\t\treturn\n\t}\n\treturn\n}", "func (serv *Server) handleBadRequest(conn int) {\n\tvar (\n\t\tlogp = `handleBadRequest`\n\t\tframeClose []byte = NewFrameClose(false, StatusBadRequest, nil)\n\n\t\terr error\n\t)\n\n\terr = Send(conn, frameClose, serv.Options.ReadWriteTimeout)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t\tgoto out\n\t}\n\n\t_, err = Recv(conn, serv.Options.ReadWriteTimeout)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t}\nout:\n\tserv.ClientRemove(conn)\n}", "func Unmarshal(data []byte, o interface{}) error {\n\tbuf := bytes.NewBuffer(data)\n\tdecoder := gob.NewDecoder(buf)\n\n\terr := decoder.Decode(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}" ]
[ "0.6926054", "0.66555226", "0.6592844", "0.6570786", "0.6549313", "0.64678264", "0.6391482", "0.62011355", "0.6196608", "0.61929303", "0.61607766", "0.59881604", "0.5935874", "0.5881003", "0.58700323", "0.58210737", "0.58197033", "0.57850504", "0.5690908", "0.5690442", "0.5686748", "0.56638813", "0.56572914", "0.5608017", "0.55713826", "0.55372554", "0.5523141", "0.54800683", "0.5429624", "0.5420902", "0.54192984", "0.53919137", "0.53774", "0.5356768", "0.5334358", "0.532737", "0.5301362", "0.5293168", "0.5288366", "0.5273113", "0.52659565", "0.5260154", "0.52507067", "0.5224778", "0.5180355", "0.51612175", "0.5159923", "0.51460814", "0.5133972", "0.5119151", "0.51149905", "0.50819886", "0.50699764", "0.50220436", "0.4986858", "0.49383414", "0.49376664", "0.4926881", "0.48956898", "0.48747617", "0.48611993", "0.4855154", "0.48474643", "0.4822705", "0.48036206", "0.4782992", "0.4777453", "0.4766349", "0.47632957", "0.47583956", "0.47030598", "0.46997923", "0.4695346", "0.46656644", "0.46491122", "0.46442923", "0.46354702", "0.46353072", "0.46335134", "0.46263716", "0.46195844", "0.4612684", "0.46069264", "0.4592263", "0.45920724", "0.45889163", "0.45848116", "0.45788744", "0.45721906", "0.4567531", "0.45659015", "0.45588705", "0.45518538", "0.45466805", "0.45461625", "0.45418277", "0.45380887", "0.45158634", "0.45107564", "0.45102805" ]
0.8036416
0
/ The client and server functions With all this in place, we can now set up client and server functions. The client function connects to the server and sends STRING and GOB requests. The server starts listening for requests and triggers the appropriate handlers. client is called if the app is called with connect=`ip addr`.
func client(ip string) error { // Some test data. Note how GOB even handles maps, slices, and // recursive data structures without problems. testStruct := complexData{ N: 23, S: "string data", M: map[string]int{"one": 1, "two": 2, "three": 3}, P: []byte("abc"), C: &complexData{ N: 256, S: "Recursive structs? Piece of cake!", M: map[string]int{"01": 1, "10": 2, "11": 3}, }, } // Open a connection to the server. rw, err := Open(ip + Port) if err != nil { return errors.Wrap(err, "Client: Failed to open connection to "+ip+Port) } // Send a STRING request. // Send the request name. // Send the data. log.Println("Send the string request.") n, err := rw.WriteString("STRING\n") if err != nil { return errors.Wrap(err, "Could not send the STRING request ("+strconv.Itoa(n)+" bytes written)") } n, err = rw.WriteString("Additional data.\n") if err != nil { return errors.Wrap(err, "Could not send additional STRING data ("+strconv.Itoa(n)+" bytes written)") } log.Println("Flush the buffer.") err = rw.Flush() if err != nil { return errors.Wrap(err, "Flush failed.") } // Read the reply. log.Println("Read the reply.") response, err := rw.ReadString('\n') if err != nil { return errors.Wrap(err, "Client: Failed to read the reply: '"+response+"'") } log.Println("STRING request: got a response:", response) // Send a GOB request. // Create an encoder that directly transmits to `rw`. // Send the request name. // Send the GOB. log.Println("Send a struct as GOB:") log.Printf("Outer complexData struct: \n%#v\n", testStruct) log.Printf("Inner complexData struct: \n%#v\n", testStruct.C) enc := gob.NewEncoder(rw) n, err = rw.WriteString("GOB\n") if err != nil { return errors.Wrap(err, "Could not write GOB data ("+strconv.Itoa(n)+" bytes written)") } err = enc.Encode(testStruct) if err != nil { return errors.Wrapf(err, "Encode failed for struct: %#v", testStruct) } err = rw.Flush() if err != nil { return errors.Wrap(err, "Flush failed.") } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func client(){\n // Connect to the server through tcp/IP.\n\tconnection, err := net.Dial(\"tcp\", (\"127.0.0.1\" + \":\" + \"9090\"))\n\tupdateListener , listErr := net.Dial(\"tcp\", (\"127.0.0.1\" + \":\" + \"9091\"))\n\t// If connection failed crash.\n\tcheck(err)\n\tcheck(listErr)\n\t//Create separate thread for updating client.\n\tgo update(updateListener)\n\t//Configure the language.\n\tvalidateLang()\n\t//Time to log in to the account.\n\tloginSetUp(connection)\n\t//handling requests from usr\n\thandlingRequests(connection)\n}", "func server() error {\r\n\tendpoint := NewEndpoint()\r\n\r\n\t// Add the handle funcs.\r\n\tendpoint.AddHandleFunc(\"STRING\", handleStrings)\r\n\tendpoint.AddHandleFunc(\"GOB\", handleGob)\r\n\r\n\t// Start listening.\r\n\treturn endpoint.Listen()\r\n}", "func (s *server) handleClient(client *client) {\n\tdefer client.closeConn()\n\tsc := s.configStore.Load().(ServerConfig)\n\ts.log().Debugf(\"Handle client [%s], id: %d\", client.RemoteIP, client.ID)\n\n\t// Initial greeting\n\tgreeting := fmt.Sprintf(\"220 %s UMBO SMTP #%d (%d) %s\",\n\t\tsc.Hostname, client.ID,\n\t\ts.clientPool.GetActiveClientsCount(), time.Now().Format(time.RFC3339))\n\n\thelo := fmt.Sprintf(\"250 %s Hello\", sc.Hostname)\n\t// ehlo is a multi-line reply and need additional \\r\\n at the end\n\tehlo := fmt.Sprintf(\"250-%s Hello\\r\\n\", sc.Hostname)\n\n\t// Extended feature advertisements\n\tmessageSize := fmt.Sprintf(\"250-SIZE %d\\r\\n\", sc.MaxSize)\n\tadvertiseAuth := \"250-AUTH LOGIN\\r\\n\"\n\tpipelining := \"250-PIPELINING\\r\\n\"\n\tadvertiseTLS := \"250-STARTTLS\\r\\n\"\n\tadvertiseEnhancedStatusCodes := \"250-ENHANCEDSTATUSCODES\\r\\n\"\n\t// The last line doesn't need \\r\\n since string will be printed as a new line.\n\t// Also, Last line has no dash -\n\thelp := \"250 HELP\"\n\n\tif sc.TLS.AlwaysOn {\n\t\ttlsConfig, ok := s.tlsConfigStore.Load().(*tls.Config)\n\t\tif !ok {\n\t\t\ts.mainlog().Error(\"Failed to load *tls.Config\")\n\t\t} else if err := client.upgradeToTLS(tlsConfig); err == nil {\n\t\t\tadvertiseTLS = \"\"\n\t\t} else {\n\t\t\t// server requires TLS, but can't handshake\n\t\t\ts.log().WithError(err).Debugf(\"[%s] Failed TLS handshake\", client.RemoteIP)\n\t\t\tclient.kill()\n\t\t}\n\t}\n\tif !sc.TLS.StartTLSOn {\n\t\t// STARTTLS turned off, don't advertise it\n\t\tadvertiseTLS = \"\"\n\t}\n\tr := response.Canned\n\tloginInfo := LoginInfo{\n\t\tstatus: false,\n\t}\n\tif client.isAlive() {\n\t\terr := client.sendResponse(s.timeout.Load().(time.Duration), greeting)\n\t\tif err != nil {\n\t\t\ts.log().WithError(err).Debug(\"error with response\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar firstMessage = true\n\tvar cmdLogs []string\n\n\tfor client.isAlive() {\n\t\tclient.bufin.setLimit(CommandLineMaxLength)\n\t\tinput, err := s.readCommand(client)\n\t\ts.log().Debugf(\"Client sent: %s\", input)\n\t\tif err == io.EOF {\n\t\t\ts.log().WithError(err).Debugf(\"Client closed the connection: %s\", client.RemoteIP)\n\t\t\treturn\n\t\t} else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {\n\t\t\ts.log().WithError(err).Warnf(\"Timeout: %s\", client.RemoteIP)\n\t\t\treturn\n\t\t} else if err == LineLimitExceeded {\n\t\t\terr := client.sendResponse(s.timeout.Load().(time.Duration), r.FailLineTooLong)\n\t\t\tif err != nil {\n\t\t\t\ts.log().WithError(err).Debug(\"error writing response\")\n\t\t\t}\n\t\t\tclient.kill()\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\ts.log().WithError(err).Debugf(\"Read error: %s\", client.RemoteIP)\n\t\t\tclient.kill()\n\t\t\treturn\n\t\t}\n\t\tif s.isShuttingDown() {\n\t\t\ts.handleShotdown(client, r)\n\t\t\treturn\n\t\t}\n\n\t\tcmdLen := len(input)\n\t\tif cmdLen > CommandVerbMaxLength {\n\t\t\tcmdLen = CommandVerbMaxLength\n\t\t}\n\t\tcmd := bytes.ToUpper(input[:cmdLen])\n\n\t\t// keep SMTP command logs for debug purpose\n\t\tcmdLogs = append(cmdLogs, strings.Split(string(cmd), \" \")[0])\n\n\t\tswitch {\n\t\tcase cmdHELO.match(cmd):\n\t\t\tclient.Helo = string(bytes.Trim(input[4:], \" \"))\n\t\t\tclient.resetTransaction()\n\t\t\tclient.Envelope.HelloBeginAt = time.Now().UTC()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), helo)\n\t\tcase cmdEHLO.match(cmd):\n\t\t\tclient.Helo = string(bytes.Trim(input[4:], \" \"))\n\t\t\tclient.resetTransaction()\n\t\t\tclient.Envelope.HelloBeginAt = time.Now().UTC()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), ehlo,\n\t\t\t\tmessageSize,\n\t\t\t\tadvertiseAuth,\n\t\t\t\tpipelining,\n\t\t\t\tadvertiseTLS,\n\t\t\t\tadvertiseEnhancedStatusCodes,\n\t\t\t\thelp,\n\t\t\t)\n\t\tcase cmdHELP.match(cmd):\n\t\t\tquote := response.GetQuote()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), \"214-OK\\r\\n\", quote)\n\t\tcase sc.XClientOn && cmdXCLIENT.match(cmd):\n\t\t\tif toks := bytes.Split(input[8:], []byte{' '}); len(toks) > 0 {\n\t\t\t\tfor i := range toks {\n\t\t\t\t\tif vals := bytes.Split(toks[i], []byte{'='}); len(vals) == 2 {\n\t\t\t\t\t\tif bytes.Compare(vals[1], []byte(\"[UNAVAILABLE]\")) == 0 {\n\t\t\t\t\t\t\t// skip\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif bytes.Compare(vals[0], []byte(\"ADDR\")) == 0 {\n\t\t\t\t\t\t\tclient.RemoteIP = string(vals[1])\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif bytes.Compare(vals[0], []byte(\"HELO\")) == 0 {\n\t\t\t\t\t\t\tclient.Helo = string(vals[1])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessMailCmd)\n\t\tcase cmdMAIL.match(cmd):\n\t\t\tif !s.isAuthentication(sc.AuthenticationRequired, loginInfo.status) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthRequired)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif client.isInTransaction() {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailNestedMailCmd)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tclient.Envelope.MailBeginAt = time.Now().UTC()\n\t\t\tclient.MailFrom, err = client.parsePath([]byte(input[10:]), client.parser.MailFrom)\n\t\t\tif err != nil {\n\t\t\t\ts.log().WithError(err).Error(\"MAIL parse error\", \"[\"+string(input[10:])+\"]\")\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), err)\n\t\t\t\tbreak\n\t\t\t} else if client.parser.NullPath {\n\t\t\t\t// bounce has empty from address\n\t\t\t\tclient.MailFrom = mail.Address{}\n\t\t\t}\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessMailCmd)\n\t\tcase cmdRCPT.match(cmd):\n\t\t\tif !s.isAuthentication(sc.AuthenticationRequired, loginInfo.status) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthRequired)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif len(client.RcptTo) > rfc5321.LimitRecipients {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.ErrorTooManyRecipients)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tto, err := client.parsePath([]byte(input[8:]), client.parser.RcptTo)\n\t\t\tif err != nil {\n\t\t\t\ts.log().WithError(err).Error(\"RCPT parse error\", \"[\"+string(input[8:])+\"]\")\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), err.Error())\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !s.allowsHost(to.Host) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.ErrorRelayDenied, \" \", to.Host)\n\t\t\t} else {\n\t\t\t\tclient.PushRcpt(to)\n\t\t\t\trcptError := s.backend().ValidateRcpt(client.Envelope)\n\t\t\t\tif rcptError != nil {\n\t\t\t\t\tclient.PopRcpt()\n\t\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailRcptCmd, \" \", rcptError.Error())\n\t\t\t\t} else {\n\t\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessRcptCmd)\n\t\t\t\t}\n\t\t\t}\n\t\tcase cmdRSET.match(cmd):\n\t\t\tclient.resetTransaction()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessResetCmd)\n\t\tcase cmdVRFY.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessVerifyCmd)\n\t\tcase cmdNOOP.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessNoopCmd)\n\t\tcase cmdQUIT.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessQuitCmd)\n\t\t\tclient.kill()\n\t\tcase cmdDATA.match(cmd):\n\t\t\tif !s.isAuthentication(sc.AuthenticationRequired, loginInfo.status) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthRequired)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.log().WithError(err).Debug(\"error writing response\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif len(client.RcptTo) == 0 {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailNoRecipientsDataCmd)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessDataCmd)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tclient.CmdLogs = cmdLogs\n\t\t\tif firstMessage {\n\t\t\t\tclient.Envelope.ConnectBeginAt = client.ConnectedAt\n\t\t\t}\n\n\t\t\terr = s.handleData(client, sc, r)\n\t\t\tfirstMessage = false\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase cmdAuth.match(cmd):\n\t\t\tif loginInfo.status == true {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailNoIdentityChangesPermitted)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tclient.Envelope.AuthBeginAt = time.Now().UTC()\n\t\t\tcmds := strings.Split(string(input), \" \")\n\t\t\tif len(cmds) > 2 {\n\t\t\t\tl, err := s.handleAuthWithUsername(client, cmds[2], r)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthNotAccepted)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tloginInfo = l\n\t\t\t} else {\n\t\t\t\tl, err := s.handleAuth(client, r)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tloginInfo = l\n\t\t\t}\n\n\t\tcase sc.TLS.StartTLSOn && cmdSTARTTLS.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessStartTLSCmd)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif s.handleStartTLS(client, sc) {\n\t\t\t\tadvertiseTLS = \"\"\n\t\t\t}\n\t\tdefault:\n\t\t\tclient.errors++\n\t\t\tif client.errors >= MaxUnrecognizedCommands {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailMaxUnrecognizedCmd)\n\t\t\t\tclient.kill()\n\t\t\t} else {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailUnrecognizedCmd)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\ts.log().WithError(err).Debug(\"error with response\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func main() {\n\tlog.Println(\"Starting\")\n\n\t// Client pool\n\tclientPool = sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\tlog.Println(\"New HTTP client\")\n\t\t\tptr := &http.Transport{}\n\t\t\tpclient := &http.Client{Transport: ptr}\n\t\t\treturn pclient\n\t\t},\n\t}\n\n\t// Webserver\n\tif debug {\n\t\thttp.Handle(\"/\", http.FileServer(http.Dir(\".\"))) // Testing only\n\t}\n\thttp.Handle(\"/proxy\", websocket.Handler(socketHandler)) // Handler\n\n\t/*http.HandleFunc(\"/echo\", func(w http.ResponseWriter, req *http.Request) {\n\t\ts := websocket.Server{Handler: websocket.Handler(EchoServer)}\n\t\ts.ServeHTTP(w, req)\n\t})*/\n\n\terr := http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n\tlog.Println(\"Shutting down\")\n}", "func client(server_ip string, server_port string) {\n\n //Leitura da mensagem (arquivo) do terminal\n //ioutil.ReadAll lê até chegar a um EOF (end of file)\n str, err := ioutil.ReadAll(os.Stdin)\n checkErrorClient(err)\n\n //Tradução das strings recebidas (server_ip e server_port) para um endereço TCP\n addr, err := net.ResolveTCPAddr(\"tcp\", fmt.Sprintf(\"%s:%s\", server_ip, server_port))\n checkErrorClient(err)\n\n //Criação do socket e estabelecimento de conexão com o servidor\n conn, err := net.DialTCP(\"tcp\", nil, addr)\n checkErrorClient(err)\n\n //Fecha a conexão após a função finalizar sua execução\n defer conn.Close()\n\n //Enviando a mensagem do cliente para o servidor através da conexão estabelecida\n conn.Write(str)\n\n os.Exit(0)\n}", "func ClientListen(cnnl chan string) {\n\thttp.HandleFunc(\"/\", HttpHandler)\n\thttp.ListenAndServe(\":8080\", nil)\n}", "func main() {\n\tdefer gock.Off()\n\n\tgock.New(\"http://httpbin.org\").\n\t\tGet(\"/*\").\n\t\tReply(204).\n\t\tSetHeader(\"Server\", \"gock\")\n\n\tcli := gentleman.New()\n\n\tcli.UseHandler(\"before dial\", func(ctx *context.Context, h context.Handler) {\n\t\tgock.InterceptClient(ctx.Client)\n\t\th.Next(ctx)\n\t})\n\n\tres, err := cli.Request().URL(\"http://httpbin.org/get\").Send()\n\tif err != nil {\n\t\tfmt.Errorf(\"Error: %s\", err)\n\t}\n\n\tfmt.Printf(\"Status: %d\\n\", res.StatusCode)\n\tfmt.Printf(\"Server header: %s\\n\", res.Header.Get(\"Server\"))\n}", "func main() {\n\tregisterHandlers()\n\tappChatroom.Run() // run the chatroom app\n\t// start the server\n\tch := make(chan bool) // a channel used to get errors\n\tdefer close(ch)\n\tgo startHTTPServer(ch)\n\tgo startHTTPSServer(ch)\n\t<-ch\n\t<-ch\n\tlog.Fatal(\"Servers stopped with errors.\")\n}", "func http_server(w http.ResponseWriter, r *http.Request) {\n\tinstance_name := os.Getenv(\"NAME\")\n\tfmt.Println(\">> CLIENT: Manejando peticion HTTP CLIENTE: \", instance_name)\n\t// Comprobamos que el path sea exactamente '/' sin parámetros\n\tif r.URL.Path != \"/\" {\n\t\thttp.Error(w, \"404 not found.\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Comprobamos el tipo de peticion HTTP\n\tswitch r.Method {\n\t// Devolver una página sencilla con una forma html para enviar un mensaje\n\tcase \"GET\":\n\t\tfmt.Println(\">> CLIENT: Devolviendo form.html\")\n\t\t// Leer y devolver el archivo form.html contenido en la carpeta del proyecto\n\t\thttp.ServeFile(w, r, \"form.html\")\n\n\t// Publicar un mensaje a Google PubSub\n\tcase \"POST\":\n\t\tfmt.Println(\">> CLIENT: Iniciando envio de mensajes\")\n\t\t// Si existe un error con la forma enviada entonces no seguir\n\t\tif err := r.ParseForm(); err != nil {\n\t\t\tfmt.Fprintf(w, \"ParseForm() err: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Obtener el nombre enviado desde la forma\n\t\tname := r.FormValue(\"name\")\n\t\t// Obtener el mensaje enviado desde la forma\n\t\tmsg := r.FormValue(\"msg\") + \"desde \" + instance_name\n\n\t\t// Publicar el mensaje, convertimos el objeto JSON a String\n\t\tsendMessage(name, msg)\n\n\t\t// Enviamos informacion de vuelta, indicando que fue generada la peticion\n\t\tfmt.Fprintf(w, \"¡Mensaje Publicado!\\n\")\n\t\tfmt.Fprintf(w, \"Name = %s\\n\", name)\n\t\tfmt.Fprintf(w, \"Message = %s\\n\", msg)\n\n\t// Cualquier otro metodo no sera soportado\n\tdefault:\n\t\tfmt.Fprintf(w, \"Metodo %s no soportado \\n\", r.Method)\n\t\treturn\n\t}\n}", "func SocketClient(message []string, addr string) {\n\t//addr := strings.Join([]string{IP, strconv.Itoa(port)}, \":\")\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Printf(\"Router %s: Ip address could not be resolved. The message has reached the last node in the chain\", selectedRouter.Name)\n\t\tlog.Printf(\"Router %s: The message is :%s\", selectedRouter.Name, addr)\n\t\t//log.Fatalln(err)\n\t\t//os.Exit(1)\n\t\treturn\n\t}\n\n\tdefer conn.Close()\n\tencoder := gob.NewEncoder(conn)\n\t// a := []byte(\"Hell\")\n\t// b := []byte(\"add\")\n\n\tp := &onions.Message{message}\n\tencoder.Encode(p)\n\n\t//conn.Write([]byte(message))\n\t//conn.Write([]byte(StopCharacter))\n\t//log.Printf(\"Send: %s\", message)\n\n\t// buff := make([]byte, 1024)\n\t// n, _ := conn.Read(buff)\n\t// log.Printf(\"Receive: %s\", buff[:n])\n\n}", "func main() {\n\tlog.SetFlags(log.LstdFlags | log.Lmicroseconds)\n\tlog.Printf(\"My peers are %v\", os.Getenv(\"PEERS\"))\n\tlog.Printf(\"traffic is %v\", os.Getenv(\"TRAFFIC\"))\n\tpeers := []*node.Peer{}\n\tfor _, s := range strings.Split(os.Getenv(\"PEERS\"), \" \") {\n\t\tp := &node.Peer{\n\t\t\tHost: fmt.Sprintf(\"node-%s\", s),\n\t\t\tPort: s}\n\t\tpeers = append(peers, p)\n\t}\n\n\n\tvar traffic = false\n\tif os.Getenv(\"TRAFFIC\") == \"1\" {\n\t\ttraffic = true\n\t}\n\n\tclientNode = client.NewClient(fmt.Sprintf(\"node-%s\", os.Getenv(\"PORT\")), os.Getenv(\"PORT\"), peers, uiChannel, nodeChannel, traffic)\n\n\terr := clientNode.SetupRPC()\n\tif err != nil {\n\t\tlog.Fatal(\"RPC setup error:\", err)\n\t}\n\terr = clientNode.Peer()\n\tif err != nil {\n\t\tlog.Fatal(\"Peering error:\", err)\n\t}\n\n\tfs := http.FileServer(http.Dir(\"../public\"))\n\thttp.Handle(\"/\", fs)\n\n\thttp.HandleFunc(\"/ws\", handleConnections)\n\thttp.HandleFunc(\"/disconnect\", handleDisconnect)\n\thttp.HandleFunc(\"/connect\", handleConnect)\n\thttp.HandleFunc(\"/getID\", handleGetID)\n\tgo handleMessages()\n\n\tgo func() {\n\t\terr := http.ListenAndServe(HttpPort, nil)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t\t}\n\t}()\n\n\tif traffic == true{\n\t\tclientNode.Start()\n\t}\n\n\tfor {\n\t\ttime.Sleep(time.Hour)\n\t}\n}", "func ClientHandler( conn net.Conn, server *MUDServer ) {\n addr := conn.RemoteAddr()\n fmt.Printf( \"New client connected from %s\\n\", addr )\n newClient := NewClient( conn, server, server.clientList )\n go ClientSender( newClient )\n go ClientReader( newClient, server )\n server.clientList.PushBack( newClient )\n}", "func client(wsUri string) {\n\n\ttlsConfig := tls.Config{}\n\ttlsConfig.InsecureSkipVerify = true\n\tdialer := websocket.Dialer{TLSClientConfig: &tlsConfig}\n\trequestHeader := http.Header{}\n\trequestHeader.Set(\"origin\", \"http://localhost/\")\n\tconn, _, err := dialer.Dial(wsUri, requestHeader)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Printf(\"The gowebsock client is connected to %s\\n\", wsUri)\n\n\treaderResultChan := make(chan readerResult)\n\tgo reader(conn, readerResultChan)\n\n\twriterCommandChan := make(chan writerCommand)\n\tgo writer(conn, writerCommandChan)\n\n\tstdinReaderChan := make(chan string)\n\tgo stdinReader(stdinReaderChan)\n\n\tfor {\n\t\tselect {\n\t\tcase stdinMessage := <-stdinReaderChan:\n\t\t\tvar messageType int\n\t\t\tdata := \"\"\n\t\t\tswitch stdinMessage {\n\t\t\tcase \"close\":\n\t\t\t\tmessageType = 8\n\t\t\tcase \"ping\":\n\t\t\t\tmessageType = 9\n\t\t\tcase \"pong\":\n\t\t\t\tmessageType = 10\n\t\t\tdefault:\n\t\t\t\tmessageType = 1\n\t\t\t\tdata = stdinMessage\n\t\t\t}\n\t\t\twriterCommandChan <- writerCommand{false, messageType, []byte(data)}\n\t\tcase readerResult := <-readerResultChan:\n\t\t\tif readerResult.err == nil {\n\t\t\t\toutput := \"Server: type = \" + messageTypeString(readerResult.messageType) + \", data = \" + string(readerResult.data) + \"\\n\"\n\t\t\t\tfmt.Printf(output)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%s\\n\", readerResult.err)\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\t\t}\n\t}\n}", "func main() {\n\t\t//initialize the roomlist and start the nickname service\n\t\tInitialize_irc() //block until everything is initialized\n\t\tgo Nick_service()\n\t\t\n\t\t//start listening over the network\n\t\tservice := \"0.0.0.0:6667\"\n tcpAddr, err := net.ResolveTCPAddr(\"tcp\", service)\n checkError(err)\n listener, err := net.ListenTCP(\"tcp\", tcpAddr)\n checkError(err)\n fmt.Println(\"Server listerning\")\n //For each new connection, create a handler and set a new, higher ID\n for id:=0; true; id++ {\n conn, err := listener.Accept()\n if err == nil {\n fmt.Println(\"Connection OK!\");\n go Handle_conn(conn, id);\n } else {\n fmt.Println(\"Listener error. Sad day :-(\\n\");\n conn.Close()\n break\n }\n }\n}", "func listenClientRPCs() {\n\tkvServer := rpc.NewServer()\n\tkv := new(KVServer)\n\tkvServer.Register(kv)\n\tl, err := net.Listen(\"tcp\", listenClientIpPort)\n\tcheckError(\"Error in listenClientRPCs(), net.Listen()\", err, true)\n\tfmt.Println(\"Listening for client RPC calls on:\", listenClientIpPort)\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tcheckError(\"Error in listenClientRPCs(), l.Accept()\", err, true)\n\t\tkvServer.ServeConn(conn)\n\t}\n}", "func (server *Server) httpHandler(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\n\t// First validate the HTTP request.\n\tif r.Method != \"GET\" {\n\t\terr = errors.New(\"Method Not Allowed\")\n\t\thttp.Error(w, err.Error(), 405)\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tif r.Header.Get(\"Origin\") != \"http://\"+r.Host {\n\t\terr = errors.New(\"Origin Not Allowed\")\n\t\thttp.Error(w, err.Error(), 403)\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// Everything checks out; upgrade to WebSocket protocol.\n\tvar ws *websocket.Conn\n\tws, err = websocket.Upgrade(w, r.Header, nil, 1024, 1024)\n\n\t// Handle a handshake error or a general error.\n\tif _, ok := err.(websocket.HandshakeError); ok {\n\t\terr = errors.New(\"Not a websocket handshake\")\n\t\thttp.Error(w, err.Error(), 400)\n\t\tlog.Println(err)\n\t\treturn\n\t} else if err != nil {\n\t\terr = errors.New(\"Internal Server Error\")\n\t\thttp.Error(w, err.Error(), 500)\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// No errors, register a new client.\n\tclient := &Client{}\n\tclient.RemoteAddr = r.RemoteAddr\n\tclient.Header = r.Header\n\tclient.Websocket = ws\n\n\tserver.Clients = append(server.Clients, client)\n\tserver.NumClients++\n\n\t// Create a World for the client and register it with the Server too.\n\tworld := life.New(50, 50)\n\tclient.World = world\n\n\tserver.Worlds = append(server.Worlds, world)\n\tserver.NumWorlds++\n\n\terr = ws.WriteMessage(websocket.OpText, []byte(\"Welcome to game of life!\"))\n\n\t// Start the session listener.\n\tgo server.ListenToClient(client)\n\n\t// TODO: This should be moved into methods that modify NumClient and NumWorlds.\n\tlog.Println(server.NumClients, \"clients and\", server.NumWorlds, \"worlds\")\n}", "func mainClient(ctx *cli.Context) error {\n\tcheckClientSyntax(ctx)\n\taddr := \":\" + strconv.Itoa(warpServerDefaultPort)\n\tswitch ctx.NArg() {\n\tcase 1:\n\t\taddr = ctx.Args()[0]\n\t\tif !strings.Contains(addr, \":\") {\n\t\t\taddr += \":\" + strconv.Itoa(warpServerDefaultPort)\n\t\t}\n\tcase 0:\n\tdefault:\n\t\tfatal(errInvalidArgument(), \"Too many parameters\")\n\t}\n\thttp.HandleFunc(\"/ws\", serveWs)\n\tconsole.Infoln(\"Listening on\", addr)\n\tfatalIf(probe.NewError(http.ListenAndServe(addr, nil)), \"Unable to start client\")\n\treturn nil\n}", "func main() {\n\n fmt.Println(\"Launching server...\")\n conn, err := net.Dial(\"tcp\", \"127.0.0.1:3001\")\n if err != nil {\n\t fmt.Println(\"error\")\n }\n // listen on all interfaces\n// ln, _ := net.Listen(\"tcp\", \":3001\")\n\n // accept connection on port\n //conn, _ := ln.Accept()\n // buf := make([]byte, 0, 8192) \n //tmp := make([]byte, 8192);\n // run loop forever (or until ctrl-c)\n for {\n // will listen for message to process ending in newline (\\n)\n // message, _ := bufio.NewReader(conn).ReadString('\\n')\n mess, err1 := ioutil.ReadAll(conn)\n if err1 != nil {\n fmt.Println(\"error\")\n }\n\n \tfmt.Println(reflect.TypeOf(mess))\n defer conn.Close()\n domain := \"test\"\n fmt.Fprintf(conn, \"%s\", domain);\n // return ioutil.ReadAll(conn)\n // buf = append(buf, tmp[:conn])\n // fmt.Println(\"total size:\", len(buf))\n break\n // output message received\n //fmt.Print(\"Message Received:\", string(message))\n // sample process for string received\n //newmessage := strings.ToUpper(message)\n // send new string back to client\n // conn.Write([]byte(newmessage + \"\\n\"))\n }\n}", "func MainClient(mon []MonContent) {\n\tfmt.Printf(\"time-init==%s = %s\\n\", strconv.FormatInt(time.Now().UnixNano()/int64(time.Millisecond), 10), time.RFC3339)\n\tcheckLocal, checkLocalPort := GetOutboundIP()\n\tfmt.Printf(\"LOCAL::%s\\n\", checkLocal.String()+\":\"+strconv.Itoa(checkLocalPort))\n\tif len(mon) > 0 {\n\t\tfor i := range mon {\n\t\t\tconn := Conn{IP: mon[i].IP, Port: mon[i].Port, Sq: mon[i].Send, Rq: mon[i].Recv}\n\t\t\t// for {\n\t\t\t// \tmess := <-sq\n\t\t\t// \tif len(mess) > 0 {\n\t\t\t// \t\tfmt.Printf(\"mosters::%s\\n\", mess)\n\t\t\t// \t\trq <- \"verywell\"\n\t\t\t// \t}\n\t\t\t// \ttime.Sleep(time.Millisecond * 300)\n\t\t\t// }\n\t\t\tconnection(conn)\n\t\t}\n\t}\n\n}", "func main() {\n\tif runtime.GOOS != \"linux\" {\n\t\tfmt.Println(aurora.Red(\"Sorry mate, this is a Linux app\"))\n\t\treturn\n\t}\n\n\te := echo.New()\n\te.HideBanner = true\n\te.Debug = true\n\te.Server.ReadTimeout = 1 * time.Minute\n\te.Server.WriteTimeout = 1 * time.Minute\n\n\tport := \"10591\"\n\tif os.Getenv(\"PORT\") != \"\" {\n\t\tport = os.Getenv(\"PORT\")\n\t}\n\n\t// Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\te.Use(middleware.CORS())\n\n\te.GET(\"info\", getInfo)\n\te.POST(\"upgrade\", doUpgrade)\n\n\t// Start the service\n\te.Logger.Fatal(e.Start(\":\" + port))\n}", "func runClient(address string, port int) {\n\t// connect via smc\n\tconn, err := smcDial(address, port)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer conn.Close()\n\tfmt.Printf(\"Connected to server\\n\")\n\n\t// sent text, read reply an\n\ttext := \"Hello, world\\n\"\n\tfmt.Fprintf(conn, text)\n\tfmt.Printf(\"Sent %d bytes to server: %s\", len(text), text)\n\treply, err := bufio.NewReader(conn).ReadString('\\n')\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"Read %d bytes from server: %s\", len(reply), reply)\n}", "func main() {\n\terr := clientMain()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func initClientConn(netType string, addr string, port string) {\n\tConn, err := net.Dial(\"tcp\", addr+\":\"+port)\n\tif err != nil {\n\t\tmisc.Err(\"cannot connect to server\")\n\t}\n\tmisc.Info(\"Client Connected\")\n\tgo handleClientInput(Conn)\n\thandleClientConn(Conn)\n}", "func main() {\n\n\tvar logger *simple.Logger\n\n\tif os.Getenv(\"LOG_LEVEL\") == \"\" {\n\t\tlogger = &simple.Logger{Level: \"info\"}\n\t} else {\n\t\tlogger = &simple.Logger{Level: os.Getenv(\"LOG_LEVEL\")}\n\t}\n\terr := validator.ValidateEnvars(logger)\n\tif err != nil {\n\t\tos.Exit(-1)\n\t}\n\n\t// setup our client connectors (message producer)\n\tconn := connectors.NewClientConnectors(logger)\n\n\t// call the start server function\n\tlogger.Info(\"Starting server on port \" + os.Getenv(\"SERVER_PORT\"))\n\tstartHttpServer(conn)\n}", "func ClientInit(conf config.AppConfig, parentDriver *driver.DB, rep *handlers.DBRepo) {\n\t// conf is the application config, from goBlender\n\tapp = conf\n\trepo = rep\n\n\t// If we have additional databases (external to this application) we set the connection here.\n\t// The connection is specified in goBlender preferences.\n\t//conn := app.AlternateConnection\n\n\t// loggers\n\tinfoLog = app.InfoLog\n\terrorLog = app.ErrorLog\n\n\t// In case we need it, we get the database connection from goBlender and save it,\n\tparentDB = parentDriver\n\n\t// We can access handlers from goBlender, but need to initialize them first.\n\tif app.Database == \"postgresql\" {\n\t\thandlers.NewPostgresqlHandlers(parentDB, app.ServerName, app.InProduction, &app)\n\t} else {\n\t\thandlers.NewMysqlHandlers(parentDB, app.ServerName, app.InProduction, &app)\n\t}\n\n\t// Set a different template for home page, if needed.\n\t//repo.SetHomePageTemplate(\"client-sample.page.tmpl\")\n\n\t// Set a different template for inside pages, if needed.\n\t//repo.SetDefaultPageTemplate(\"client-sample.page.tmpl\")\n\n\t// Create client middleware\n\tNewClientMiddleware(app)\n}", "func InitializeServer(host string) (server *network.WebServer) {\n\trand.Seed(time.Now().UTC().UnixNano())\n\t// Make sure folders exist that we want:\n\tif err := ensureBindDirs(); err != nil {\n\t\tLog.Error(\"Failed to have home working dir to put the files into at ~/Desktop/bind, err: \", err)\n\t} else {\n\t\tLog.Info(\"bind dirs ensured!\")\n\t}\n\tif os.Args[0] != \"d\" { //development mode\n\t\tgin.SetMode(gin.ReleaseMode)\n\t}\n\tr := gin.New()\n\tr.LoadHTMLGlob(\"public/tmpl/*.html\")\n\tr.StaticFS(\"/videos\", http.Dir(basePath+\"/videos\"))\n\tr.StaticFS(\"/frames\", http.Dir(basePath+\"/frames\"))\n\tr.Static(\"/public\", \"./public\")\n\tr.GET(\"/\", getIndex)\n\tr.POST(\"/g\", postIndex)\n\tr.GET(\"/g\", getIndex)\n\tr.GET(\"/about\", getAbout)\n\tr.GET(\"/jobs\", getJobs)\n\tr.GET(\"/code\", getCode)\n\tmel = melody.New() // melody middleware\n\n\t// websocket route\n\tr.GET(\"/ws\",func(ctx *gin.Context){\n\t\t// handle request with Melody\n\t\tmel.HandleRequest(ctx.Writer,ctx.Request)\n\t})\n\n\t// Melody message handler\n\tmel.HandleMessage(func(ses *melody.Session,msg []byte){\n\t\t// broadcast message to connected sockets\n\t\tmel.Broadcast(msg)\n\t})\n\n\n\tr.GET(\"/openframes\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/frames\")\n\t})\n\tr.GET(\"/openvideos\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/videos\")\n\t})\n\tr.GET(\"/openlogs\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/logs\")\n\t})\n\tr.GET(\"/toggleClipYt\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/logs\")\n\t})\n\t// go requests(mel)\n\t// go jobUpdates(mel)\n\n\treturn network.InitializeWebServer(r, host)\n}", "func main() {\n\t// Construct a new \"server\"; its methods are HTTP endpoints.\n\tserver, err := newServer()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Construct a router which binds URLs + HTTP verbs to methods of server.\n\trouter := httprouter.New()\n\trouter.POST(\"/v1/events\", server.createEvent)\n\trouter.GET(\"/v1/ltv\", server.getLTV)\n\n\t// Listen and serve HTTP traffic on port 3000.\n\tif err := http.ListenAndServe(\":3000\", router); err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\tvar addr string\n\tflag.StringVar(&addr, \"e\", \":4040\", \"service address endpoint\")\n\tflag.Parse()\n\n\t// create local addr for socket\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t// announce service using ListenTCP\n\t// which a TCPListener.\n\tl, err := net.ListenTCP(\"tcp\", laddr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer l.Close()\n\tfmt.Println(\"listening at (tcp)\", laddr.String())\n\n\t// req/response loop\n\tfor {\n\t\t// use TCPListener to block and wait for TCP\n\t\t// connection request using AcceptTCP which creates a TCPConn\n\t\tconn, err := l.AcceptTCP()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"failed to accept conn:\", err)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"connected to: \", conn.RemoteAddr())\n\n\t\tgo handleConnection(conn)\n\t}\n}", "func main() {\n\n\tconst apiName = \"handle1\"\n\ttStr := `_` + I.ToS(time.Now().UnixNano())\n\tif len(os.Args) > 1 {\n\t\tapp := fiber.New()\n\n\t\tmode := os.Args[1]\n\t\tswitch mode {\n\t\tcase `apiserver`:\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\treturn c.SendString(I.ToS(rand.Int63()) + tStr)\n\t\t\t})\n\n\t\tcase `apiproxy`:\n\t\t\t// connect as request on request-reply\n\n\t\t\tconst N = 8\n\t\t\tcounter := uint32(0)\n\t\t\tncs := [N]*nats.Conn{}\n\t\t\tmutex := sync.Mutex{}\n\t\t\tconn := func() *nats.Conn {\n\t\t\t\tidx := atomic.AddUint32(&counter, 1) % N\n\t\t\t\tnc := ncs[idx]\n\t\t\t\tif nc != nil {\n\t\t\t\t\treturn nc\n\t\t\t\t}\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\t\t\t\tif ncs[idx] != nil {\n\t\t\t\t\treturn ncs[idx]\n\t\t\t\t}\n\t\t\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\t\t\tL.PanicIf(err, `nats.Connect`)\n\t\t\t\tncs[idx] = nc\n\t\t\t\treturn nc\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor _, nc := range ncs {\n\t\t\t\t\tif nc != nil {\n\t\t\t\t\t\tnc.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// handler\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\tmsg, err := conn().Request(apiName, []byte(I.ToS(rand.Int63())), time.Second)\n\t\t\t\tif L.IsError(err, `nc.Request`) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Use the response\n\t\t\t\treturn c.SendString(string(msg.Data))\n\t\t\t})\n\t\tdefault:\n\t\t}\n\n\t\tlog.Println(mode + ` started ` + tStr)\n\t\tlog.Fatal(app.Listen(\":3000\"))\n\n\t} else {\n\t\t// worker\n\t\tlog.Println(`worker started ` + tStr)\n\n\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\tL.PanicIf(err, `nats.Connect`)\n\t\tdefer nc.Close()\n\n\t\tconst queueName = `myqueue`\n\n\t\t//// connect as reply on request-reply (sync)\n\t\t//sub, err := nc.QueueSubscribeSync(apiName, queueName)\n\t\t//L.PanicIf(err, `nc.SubscribeSync`)\n\t\t//\n\t\t////Wait for a message\n\t\t//for {\n\t\t//\tmsg, err := sub.NextMsgWithContext(context.Background())\n\t\t//\tL.PanicIf(err, `sub.NextMsgWithContext`)\n\t\t//\n\t\t//\terr = msg.Respond([]byte(string(msg.Data) + tStr))\n\t\t//\tL.PanicIf(err, `msg.Respond`)\n\t\t//}\n\n\t\t//// channel (async) -- error slow consumer\n\t\t//ch := make(chan *nats.Msg, 1)\n\t\t//_, err = nc.ChanQueueSubscribe(apiName, queueName, ch)\n\t\t//L.PanicIf(err, `nc.ChanSubscribe`)\n\t\t//for {\n\t\t//\tselect {\n\t\t//\tcase msg := <-ch:\n\t\t//\t\tL.PanicIf(msg.Respond([]byte(string(msg.Data)+tStr)), `msg.Respond`)\n\t\t//\t}\n\t\t//}\n\n\t\t// callback (async)\n\t\t_, err = nc.QueueSubscribe(apiName, queueName, func(msg *nats.Msg) {\n\t\t\tres := string(msg.Data) + tStr\n\t\t\tL.PanicIf(msg.Respond([]byte(res)), `msg.Respond`)\n\t\t})\n\n\t\tvar line string\n\t\tfmt.Scanln(&line) // wait for input so not exit\n\t}\n}", "func client(serverIp string, serverPort string) {\n //TCPAddr\n tcpAddr, err := net.ResolveTCPAddr(\"tcp\", serverIp + serverPort)\n checkErrorClient(err)\n\n //TCPConn\n conn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n checkErrorClient(err)\n reader := bufio.NewReader(os.Stdin)\n buf := make([]byte, SendBufferSize)\n\n for {\n readTotal, err := reader.Read(buf)\n if err != nil {\n if err != io.EOF {\n checkErrorClient(err)\n }\n break\n }\n _, err = conn.Write(buf[:readTotal])\n checkErrorClient(err)\n }\n\n checkErrorClient(err)\n os.Exit(0)\n}", "func (s *Server) HandleClient() {\n\tdefer s.Close()\n\n\tdb := DB.NewDataStore()\n\tdefer db.Close()\n\n\tvar msg message.Message\n\ts.r.Decode(&msg)\n\n\tif msg.CMD == command.Reserved {\n\t\treturn\n\t}\n\n\tswitch msg.CMD {\n\tcase command.Register:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing Register command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing Register command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processRegister(msg, db)\n\tcase command.Ping:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing Ping command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing Ping command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processPing(msg, db)\n\n\tcase command.ScanFile:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing ScanFile command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing ScanFile command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processScanFile(msg, db)\n\n\tcase command.ScanDir:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing ScanDir command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing ScanDir command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processScanDir(msg, db)\n\n\tcase command.ScanPID:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing ScanPID command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing ScanPID command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processScanPID(msg, db)\n\t}\n}", "func main() {\n\n kingpin.MustParse(app.Parse(os.Args[1:]))\n\n\tvar port int = *appPort\n\tvar kfilepath string = *appPrivatekeyfile\n var kinfopath string = *appInfo\n\n fmt.Printf(\"Sigserv3 - listening on port %d.\\n\", port)\n\n suite := ed25519.NewAES128SHA256Ed25519(true) \n kv, err := crypto.SchnorrLoadKeypair(kfilepath, suite)\n if err != nil {\n \tfmt.Println(\"Error \" + err.Error())\n \treturn\n }\n\n info, err := LoadInfo(kinfopath)\n if err != nil {\n fmt.Println(\"Error \" + err.Error())\n return\n }\n\n // I don't know if there's a way to \n // do std::bind-like behaviour in GO.\n // for C++ what I'd do is pretty simple: \n // newfunc := std::bind(&func, args to bind)\n var signBlindImpl connectionhandler = func(conn net.Conn) {\n signBlindlySchnorr(conn, suite, kv, info)\n }\n serve(port, signBlindImpl)\n}", "func mainClient() {\n\tfmt.Println(\"Starting Tramservice client\")\n\tclient := new(tramservice.Client)\n\terr := client.Init(ServerAddress)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error connecting to server.\")\n\t}\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\t// fmt.Println(*serverAddr)\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \tfor {\n\t\t// \t\tmsg := <-msgc // a message to send\n\t\t// \t\tclient.InitChatClient(*myTitle, serverAddr)\n\n\t\t// \t\terr := client.Chat(msg)\n\t\t// \t\tif err != nil {\n\t\t// \t\t\t// restart the client\n\t\t// \t\t\tfmt.Printf(\"send Err: %v\", err)\n\t\t// \t\t}\n\t\t// \t}\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func main() {\n\n\tgo EdgeMapper()\n\tgo mapClients()\n\tgo handleDb()\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/\", simpleHandler)\n\trouter.HandleFunc(\"/webSocket\", handleClientSocket)\n\trouter.HandleFunc(\"/ws\", handleEdgeSocket)\n\trouter.PathPrefix(\"/\").Handler(http.FileServer(http.Dir(\"./\")))\n\n\terr := http.ListenAndServe(\":4000\", router)\n\n\t//\terr := http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), router)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\ttrn := terrain.New()\n\n\tctx := message.NewClientContext()\n\tctx.Terrain = trn\n\tctx.Clock = clock.NewService()\n\n\thdlr := handler.New(ctx)\n\n\ttlsConfig, err := crypto.GetClientTlsConfig()\n\tif err != nil {\n\t\tlog.Println(\"WARN: could not load TLS config\")\n\t}\n\n\tvar c net.Conn\n\tif tlsConfig != nil {\n\t\tc, err = tls.Dial(\"tcp\", \"127.0.0.1:9999\", tlsConfig)\n\t} else {\n\t\tc, err = net.Dial(\"tcp\", \"127.0.0.1:9999\")\n\t}\n\n\tif err != nil {\n\t\tpanic(\"Dial: \" + err.Error())\n\t}\n\n\treader := bufio.NewReader(c)\n\tclientIO := &message.IO{\n\t\tReader: reader,\n\t\tWriter: c,\n\t}\n\n\tgo hdlr.Listen(clientIO)\n\n\t/*err = builder.SendPing(clientIO.Writer)\n\tif err != nil {\n\t\tpanic(\"SendPing: \" + err.Error())\n\t}*/\n\n\terr = builder.SendLogin(clientIO.Writer, \"root\", \"root\")\n\tif err != nil {\n\t\tpanic(\"SendLogin: \" + err.Error())\n\t}\n\n\tfor {\n\t}\n\n\t/*err = builder.SendChatSend(clientIO.Writer, \"Hello World!\")\n\tif err != nil {\n\t\tpanic(\"SendChatSend: \" + err.Error())\n\t}*/\n\n\t/*err = c.Close()\n\tif err != nil {\n\t\tpanic(\"Close: \" + err.Error())\n\t}*/\n}", "func main() {\n\t// load config and construct the server shared environment\n\tcfg := common.LoadConfig()\n\tlog := services.NewLogger(cfg)\n\n\t// create repository\n\trepo, err := repository.NewRepository(cfg, log)\n\tif err != nil {\n\t\tlog.Fatalf(\"Can not create application data repository. Terminating!\")\n\t}\n\n\t// setup GraphQL API handler\n\thttp.Handle(\"/api\", handlers.ApiHandler(cfg, repo, log))\n\n\t// show the server opening info and start the server with DefaultServeMux\n\tlog.Infof(\"Welcome to Fantom Rocks API server on [%s]\", cfg.BindAddr)\n\tlog.Fatal(http.ListenAndServe(cfg.BindAddr, nil))\n}", "func main() {\n\tvar (\n\t\thostname string\n\t\tid string\n name string\n client string\n\t\tcache *infra.Cache\n\t\tserver *infra.Server\n\t\tconsole *infra.Console\n\t)\n\n\tflag.Parse()\n\n\tcache = infra.NewCache()\n\n\thostname = *localAddress + \":\" + *localPort\n client = *clientAddress\n\n\t// If an id isn't provided, we use the hostname instead\n\tif *instanceId != \"\" {\n\t\tid = *instanceId\n\t} else {\n\t\tid = hostname\n\t}\n \n if *carrierName != \"\" {\n name = *carrierName\n } else if *ringAddress != \"\" {\n name = *ringAddress\n } else {\n name = hostname\n }\n \n server = infra.NewServer(id, name, hostname, client, cache)\n\tconsole = infra.NewConsole(cache, server)\n\n\t// Spawn goroutines to handle both interfaces\n\tgo server.Run(*ringAddress)\n\tgo console.Run()\n\n\t// Wait fo the server to finish\n\t<-server.Done()\n}", "func main() {\n\t//establish connection to the primary replica\n\t//connect to server\n\tconn_main_replica, err := net.Dial(\"tcp\", \"localhost:8084\")\n\tdefer conn_main_replica.Close()\n\tif err != nil {\n\t\tpanic(\"Failed connect to conn_main_replica\\n\")\n\t}\n\n\t//load user list for faster access to a list of current users\n\tload_user_list()\n\thandle_requests(conn_main_replica)\n}", "func Client(domain string, port int, username, password string) {\n\tr := newRtspConn(domain, port, username, password)\n\tport = r.c.RemoteAddr().(*net.TCPAddr).Port\n\tcommands := []int{1, 2, 3, 4, 6}\n\tfor i := 0; i < len(commands); i++ {\n\t\tr = getCommand(r, commands[i])\n\t\tconnectionWrite(r)\n\t\tr = connectionRead(r)\n\t}\n\t//connectionClose(r)\n}", "func setUpClientAndServer(handler Handler) (*client4.Client, *Server) {\n\t// strong assumption, I know\n\tloAddr := net.ParseIP(\"127.0.0.1\")\n\tladdr := net.UDPAddr{\n\t\tIP: loAddr,\n\t\tPort: randPort(),\n\t}\n\ts := NewServer(laddr, handler)\n\tgo s.ActivateAndServe()\n\n\tc := client4.NewClient()\n\t// FIXME this doesn't deal well with raw sockets, the actual 0 will be used\n\t// in the UDP header as source port\n\tc.LocalAddr = &net.UDPAddr{IP: loAddr, Port: randPort()}\n\tfor {\n\t\tif s.LocalAddr() != nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tlog.Printf(\"Waiting for server to run...\")\n\t}\n\tc.RemoteAddr = s.LocalAddr()\n\tlog.Printf(\"Client.RemoteAddr: %s\", c.RemoteAddr)\n\n\treturn c, s\n}", "func (m *Master) server() {\n\trpc.Register(m)\n\trpc.HandleHTTP()\n\tos.Create(\"mr-socket\")\n\n\tl, e := net.Listen(\"tcp\", \"0.0.0.0:8080\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tlog.Printf(\"Server is running at %s\\n\", l.Addr().String())\n\tgo http.Serve(l, nil)\n}", "func init() {\n\t// In this example, we will hard code the port. Later the environment\n\t// will dictate.\n\tport = 7718\n\t// Set up the heartbeat ticker.\n\theartbeat = time.NewTicker(60 * time.Second)\n\n\t// Setup the service router.\n\tgin.SetMode(gin.ReleaseMode)\n\trouter = gin.New()\n\n\t// Make sure we are still alive.\n\trouter.GET(\"/stats/:pgm\", GetCircuitStats)\n\n\t// These are the services we will be listening for.\n\trouter.POST(\"/add/:word\", ReceiveWrapper)\n\t// Get the number of heartbeats put out by the application (also in real-time).\n\trouter.GET(\"/beats\", GetHeartbeatCount)\n\t// Make sure we are still alive.\n\trouter.GET(\"/ping\", PingTheAPI)\n}", "func ClientMain(player Player) {\n\taddr := DefaultServerAddress\n\tif len(os.Args) > 1 {\n\t\tport, err := strconv.Atoi(os.Args[1])\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"invalid value for port: %q\", os.Args[1])\n\t\t}\n\t\taddr = &net.TCPAddr{\n\t\t\tIP: net.IPv4(127, 0, 0, 1),\n\t\t\tPort: port,\n\t\t}\n\t}\n\tvar state BasicState\n\tclient, err := OpenClient(addr, player, &state)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot connect to server: %s\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.DebugTo = os.Stderr\n\terr = client.Run()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error while running: %s\", err)\n\t\tos.Exit(2)\n\t}\n}", "func main() {\n\tch := make(chan string)\n\tgo IOHandler(ch)\n\tfmt.Println(\"Server started\")\n\tservice := \"localhost:3000\"\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", service)\n\tif err != nil {\n\t\tfmt.Println(\"Could not resolve: \", service)\n\t\tos.Exit(1)\n\t} else {\n\t\tlistener, err := net.Listen(tcpAddr.Network(), tcpAddr.String())\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Could not listen on: \", tcpAddr)\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tdefer listener.Close()\n\t\t\tfor {\n\t\t\t\tfmt.Println(\"Listening for clients\")\n\t\t\t\tconn, err := listener.Accept()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Client error: \", err)\n\t\t\t\t} else {\n\t\t\t\t\t//Create routine for each connected client\n\t\t\t\t\tgo ConnHandler(conn, ch)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func main() {\n // Create a TCP socket\n socket, _ := net.Listen(\"tcp\", \":8080\")\n fmt.Print(\"Server started. Listening on port 8080\\r\\n\\r\\n\")\n\n\n // Loop forever, listening for connections\n for {\n // Pause until a client connects\n connection, _ := socket.Accept()\n\n // Handle each incoming connection as a GO routine\n // Server can handle an arbitrary amount of connections\n go handleClient(connection)\n }\n}", "func main() {\n\terr := runClient()\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\n\treturn\n}", "func ClientServer(w http.ResponseWriter, r *http.Request) {\n\t//client := r.URL.Path[len(\"/clients/\"):]\n\t//fmt.Fprint(w, \"success\")\n}", "func main() {\n\n\t//init api\n\tserver.Init()\n}", "func main() {\n\t// Spin off the hub\n\thub := newHub()\n\tgo hub.run()\n\n\thttp.Handle(\"/frontend/dist/\", http.StripPrefix(\"/frontend/dist/\", http.FileServer(http.Dir(\"./frontend/dist/\"))))\n\thttp.Handle(\"/assets/\", http.StripPrefix(\"/assets/\", http.FileServer(http.Dir(\"./assets/\"))))\n\t// Serve index.html specifically\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"index.html\")\n\t})\n\thttp.HandleFunc(\"/api/socket\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveWs(hub, w, r)\n\t})\n\n\tport := \":4567\"\n\tlog.Println(\"Server listening at localhost\" + port)\n\thttp.ListenAndServe(port, nil)\n\n}", "func main() {\n\n\tfmt.Println(\"Launching server...\")\n\n\tconnMap = make(map[string]net.Conn) // Allocate and initialise a map with no given size\n\tuserMap = make(map[net.Conn]string) // Allocate and initialise a map with no given size\n\n\targs := os.Args\n\n\tvar connPort = \"\"\n\n\tif len(args) == 2 && checkServerPort(args[1]) { // Verify a port number is given and check it\n\t\tconnPort = args[1]\n\t} else { // Else use port 8081 by default\n\t\tconnPort = \"8081\"\n\t}\n\n\tfmt.Print(\"IP address: \")\n\tgetPreferredIPAddress() // Prints out the preferred IP address of the specific computer\n\tfmt.Println(\"Port number: \" + connPort)\n\n\t// Listens for connection requests\n\tln, err := net.Listen(\"tcp\", \":\"+connPort)\n\n\t// Error check\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Defer (wait till surrounding functions have finished) the execution of ln.Close()\n\tdefer ln.Close()\n\n\t// Semi-infinite loop that accepts connections, checks for errors and executes a goroutine\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Accept error: \", err)\n\t\t\treturn\n\t\t}\n\t\tgo connection(conn) // goroutine execution of the connection function concurrently\n\t}\n}", "func main() {\n\n\tlog.Printf(\"Server started\")\n\n\trouter := sw.NewRouter()\n\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"5000\"\n\t}\n\n\theadersOk := handlers.AllowedHeaders([]string{\"X-Requested-With\", \"Content-Type\"})\n\toriginsOk := handlers.AllowedOrigins([]string{\"*\"})\n\tmethodsOk := handlers.AllowedMethods([]string{\"GET\", \"HEAD\", \"POST\", \"PUT\", \"OPTIONS\"})\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, handlers.CORS(originsOk, headersOk, methodsOk)(router)))\n}", "func main() {\n\tctx := context.Background()\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t// New server multiplexer\n\tmux := runtime.NewServeMux()\n\topts := []grpc.DialOption{grpc.WithInsecure()}\n\n\t// Our gRPC host address\n\tconn := os.Getenv(\"SERVICE_ADDRESS\")\n\tapiAddress := os.Getenv(\"API_ADDRESS\")\n\n\tlog.Printf(\"Connecting to gRPC server on: %s\\n\", conn)\n\tlog.Printf(\"Starting API on: %s\\n\", apiAddress)\n\n\t// Register the handler to an endpoint\n\terr := gw.RegisterUserServiceHandlerFromEndpoint(ctx, mux, conn, opts)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Return a server instance\n\thttp.ListenAndServe(apiAddress, mux)\n}", "func main() {\n\n\t// Process args.\n\n\t// the TCP address on which the fserver listens to RPC connections from the aserver\n\tfserverTcp := os.Args[1]\n\tfserverTcpG = fserverTcp\n\n\t// the UDP address on which the fserver receives client connections\n\tfserver := os.Args[2]\n\tfserverUdpAddr, err := net.ResolveUDPAddr(\"udp\", fserver)\n\thandleError(err)\n\n\tmsg := make([]byte, 1024)\n\n\t// Global fserver ip:port info\n\tfserverIpPort = fserver\n\n\t// Read the rest of the args as a fortune message\n\tfortune := strings.Join(os.Args[3:], \" \")\n\tfortuneG = fortune\n\n\t// Debug to see input from command line args\n\tfmt.Printf(\"fserver Listening on %s\\nFortune: %s\\n\", fserverIpPort, fortune)\n\n\t// concurrent running of rcp connection\n\n\tconn, err := net.ListenUDP(\"udp\", fserverUdpAddr)\n\thandleError(err)\n\n\tgo handleRpcConnection()\n\tdefer conn.Close()\n\n\t// refactor to global variable\n\tconndp = conn\n\t// udp client concurrency\n\tfor {\n\t\tn, clientAddr, err := conn.ReadFromUDP(msg)\n\t\thandleError(err)\n\t\tgo handleClientConnection(msg[:], n, clientAddr.String())\n\t}\n}", "func main() {\n\tflag.Parse()\n\t// TODO: verify if this is actually beneficial\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\t// load up config.json\n\tconf := loadConfig()\n\n\t// redis client\n\tclient := redis.NewTCPClient(&redis.Options{\n\t\tAddr: conf.RedisHost,\n\t\tPassword: conf.RedisPass,\n\t\tDB: conf.RedisDB,\n\t})\n\n\tif ping := client.Ping(); ping.Err() != nil {\n\t\tlog.Fatal(ping.Err())\n\t}\n\n\t// redis pubsub connection\n\tps := client.PubSub()\n\n\t// prepare a socketmap\n\tsm := NewSocketMap(ps)\n\n\t// loop for receiving messages from Redis pubsub, and forwarding them on to relevant ws connection\n\tgo redisPump(ps, sm)\n\n\tdefer func() {\n\t\tps.Close()\n\t\tclient.Close()\n\t}()\n\n\t// prepare server\n\thttp.Handle(\"/\", NewGeobinServer(conf, NewRedisWrapper(client), ps, sm))\n\n\t// Start up HTTP server\n\tlog.Println(\"Starting server at\", conf.Host, conf.Port)\n\terr := http.ListenAndServe(fmt.Sprintf(\"%v:%d\", conf.Host, conf.Port), nil)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t}\n}", "func socketHandler(ws *websocket.Conn) {\n\tvar err error\n\tfor {\n\t\tvar reply string\n\n\t\tif err = websocket.Message.Receive(ws, &reply); err != nil {\n\t\t\tfmt.Println(\"Can't receive\")\n\t\t\tbreak\n\t\t}\n\n\t\tif debug {\n\t\t\tfmt.Println(\"Received from client: \" + reply)\n\t\t}\n\n\t\t// Json decode\n\t\tvar req = AOWRequest{}\n\t\tif err := json.Unmarshal([]byte(reply), &req); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\t// Get client from pool\n\t\tpclient := clientPool.Get().(*http.Client)\n\n\t\t// Prepare request\n\t\tpreq, perr := http.NewRequest(req.Method, fmt.Sprintf(\"%s\", req.URI), nil)\n\t\tfor k, v := range req.Headers {\n\t\t\tpreq.Header.Add(k, v)\n\t\t}\n\t\tpresp, perr := pclient.Do(preq)\n\t\tif perr != nil {\n\t\t\tlog.Printf(\"%s\\n\", perr)\n\n\t\t\t// Return client to pool\n\t\t\tclientPool.Put(pclient)\n\t\t\treturn\n\t\t}\n\t\tdefer presp.Body.Close()\n\t\tbody, readErr := ioutil.ReadAll(presp.Body)\n\t\tif readErr != nil {\n\t\t\tlog.Printf(\"%s\\n\", readErr)\n\t\t}\n\n\t\t// Response wrapper\n\t\tvar resp = AOWResponse{}\n\t\tresp.Id = req.Id\n\t\tresp.Text = string(body)\n\t\tresp.Status = presp.StatusCode\n\t\tresp.Headers = presp.Header\n\t\trespBytes, jErr := json.Marshal(resp)\n\t\tif jErr != nil {\n\t\t\tpanic(jErr)\n\t\t}\n\n\t\t// Return client to pool\n\t\tclientPool.Put(pclient)\n\n\t\t// Msg\n\t\tmsg := string(respBytes)\n\n\t\t// Send to client\n\t\tif debug {\n\t\t\tfmt.Println(\"Sending to client: \" + msg)\n\t\t}\n\t\tif err = websocket.Message.Send(ws, msg); err != nil {\n\t\t\tfmt.Println(\"Can't send\")\n\t\t\tbreak\n\t\t}\n\t}\n}", "func main() {\n\tserver.New().Start()\n}", "func startServerMode() {\n\t// Create or open log directory\n\tf, err := os.OpenFile(WORKDIR+`/server.log`, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tl(err.Error(), true, true)\n\t}\n\tdefer f.Close()\n\tlog.SetOutput(f)\n\tl(\"Starting server...\", false, true)\n\tvar listener net.Listener\n\tif appConfig.Tls {\n\t\tcert, err := tls.LoadX509KeyPair(WORKDIR+\"/cert.pem\", WORKDIR+\"/key.pem\")\n\t\tcheckErr(\"Unable to import TLS certificates\", err, true)\n\t\tconfig := tls.Config{Certificates: []tls.Certificate{cert}}\n\t\tnow := time.Now()\n\t\tconfig.Time = func() time.Time { return now }\n\t\tconfig.Rand = rand.Reader\n\t\tlistener, err = tls.Listen(\"tcp\", appConfig.Server.Address+\":\"+appConfig.Server.Port, &config)\n\t\tcheckErr(\"Unable to create TLS listener\", err, false)\n\t} else {\n\t\tvar err error\n\t\tlistener, err = net.Listen(\"tcp\", appConfig.Server.Address+\":\"+appConfig.Server.Port)\n\t\tcheckErr(\"Unable to create listener\", err, true)\n\t}\n\tgo server.start()\n\tif len(appConfig.Api.Port) > 0 {\n\t\tgo startHttpServer()\n\t}\n\tfor {\n\t\tconnection, err := listener.Accept()\n\t\tcheckErr(\"Unable to accept incoming connection\", err, true)\n\t\tclient := &Client{socket: connection, data: make(chan Job)}\n\t\tserver.register <- client\n\t\tgo server.receive(client)\n\t\tgo server.send(client)\n\t}\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\n\t// go myServer()\n\t// go myClient()\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \t// for {\n\t\t// \tserverAddr, server := filesource.SearchAddressForThefile(\"Liben.jpg\")\n\t\t// \tfmt.Println(*serverAddr)\n\t\t// \tfmt.Println(*server)\n\t\t// \tclient.InitFileClient(serverAddr, server)\n\t\t// \tclient.DownloadFile(\"Liben.jpg\")\n\t\t// \t// }\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func (srv *Server) ServeHTTP(\n\tresp http.ResponseWriter,\n\treq *http.Request,\n) {\n\tswitch req.Method {\n\tcase \"OPTIONS\":\n\t\tsrv.hooks.OnOptions(resp)\n\t\treturn\n\tcase \"WEBWIRE\":\n\t\tsrv.handleMetadata(resp)\n\t\treturn\n\t}\n\n\t// Establish connection\n\tconn, err := srv.upgrader.Upgrade(resp, req, nil)\n\tif err != nil {\n\t\tsrv.errorLog.Print(\"Upgrade failed:\", err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\t// Register connected client\n\tnewClient := &Client{\n\t\tsrv,\n\t\t&sync.Mutex{},\n\t\tconn,\n\t\ttime.Now(),\n\t\treq.Header.Get(\"User-Agent\"),\n\t\tnil,\n\t}\n\n\tsrv.clientsLock.Lock()\n\tsrv.clients = append(srv.clients, newClient)\n\tsrv.clientsLock.Unlock()\n\n\t// Call hook on successful connection\n\tsrv.hooks.OnClientConnected(newClient)\n\n\tfor {\n\t\t// Await message\n\t\t_, message, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tif newClient.Session != nil {\n\t\t\t\t// Mark session as inactive\n\t\t\t\tsrv.SessionRegistry.deregister(newClient)\n\t\t\t}\n\n\t\t\tif websocket.IsUnexpectedCloseError(\n\t\t\t\terr,\n\t\t\t\twebsocket.CloseGoingAway,\n\t\t\t\twebsocket.CloseAbnormalClosure,\n\t\t\t) {\n\t\t\t\tsrv.warnLog.Printf(\"Reading failed: %s\", err)\n\t\t\t}\n\n\t\t\tsrv.hooks.OnClientDisconnected(newClient)\n\t\t\treturn\n\t\t}\n\n\t\t// Parse message\n\t\tvar msg Message\n\t\tif err := msg.Parse(message); err != nil {\n\t\t\tsrv.errorLog.Println(\"Failed parsing message:\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t// Prepare message\n\t\t// Reference the client associated with this message\n\t\tmsg.Client = newClient\n\n\t\tmsg.createReplyCallback(newClient, srv)\n\t\tmsg.createFailCallback(newClient, srv)\n\n\t\t// Handle message\n\t\tif err := srv.handleMessage(&msg); err != nil {\n\t\t\tsrv.errorLog.Printf(\"CRITICAL FAILURE: %s\", err)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (m *Master) server() {\n\trpc.Register(m)\n\trpc.HandleHTTP()\n\tl, e := net.Listen(\"tcp\", \":1234\")\n\t//l, e := net.Listen(\"unix\", sockname)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tgo http.Serve(l, nil)\n}", "func main() {\n\tapp := iris.New()\n\tapp.Logger().SetLevel(\"debug\")\n\n\t// Just a test route which reads some data and responds back with json.\n\tapp.Post(\"/read-write\", readWriteHandler)\n\n\tapp.Get(\"/get\", getHandler)\n\n\t// The target ip:port.\n\tapp.Listen(\":9090\")\n}", "func server(ctx *cli.Context) {\n\t//configInit()\n\tinitLogLevel()\n\tlog.Fatal(http.ListenAndServe(Config().GetString(\"addr\"), handler)\n}", "func handleRequests() {\n\taddress := \"localhost\"\n\taddress += \":\"\n\taddress += \"8000\"\n\tfmt.Printf(\"Server at address %v is up\\n\", \"http://\"+address)\n\n\thttp.HandleFunc(\"/\", homePage)\n\thttp.HandleFunc(\"/author\", author)\n\thttp.HandleFunc(\"/articles\", returnAllArticles)\n\tlog.Fatal(http.ListenAndServe(address, nil)) // nill: use DefaultServerMux\n}", "func main() {\n\tflag.StringVar(&MODE, \"mode\", MODE, \"server/client\")\n\tflag.StringVar(&SERVER_ADDR, \"server\", SERVER_ADDR, \"mode: server => listen, mode: client => connect to\")\n\tflag.StringVar(&PayLoad, \"pl\", PayLoad, \"PayLoad\")\n\tflag.BoolVar(&PrintDump, \"d\", PrintDump, \"Print dump\")\n\tflag.PrintDefaults()\n\tflag.Parse()\n\n\tswitch strings.ToUpper(MODE) {\n\tcase \"S\":\n\t\tserver(SERVER_ADDR)\n\tdefault:\n\t\tclient(SERVER_ADDR)\n\t}\n}", "func main() {\n\tfmt.Println(\"app start.\")\n\tgo loginserver.StartListen()\n\tgo gateserver.StartListen()\n\t<-make(chan int)\n}", "func main() {\r\n\tbind := fmt.Sprintf(\"%s:%s\", getIP(), getPort())\r\n\tlog.Println(\"Listening on\", bind)\r\n\r\n\terr := http.ListenAndServe(bind, http.HandlerFunc(mainHandle))\r\n\tif err != nil {\r\n\t\tpanic(\"ListenAndServe: \" + err.Error())\r\n\t}\r\n}", "func server() {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/\", respond.HandleRequest)\n\tlog.Fatal(http.ListenAndServe(\":\"+*port, mux))\n}", "func Client(scc node.ControllerClient, ln net.Listener, auth *protoAuth.BlimpAuth,\n\tname string, port uint32) error {\n\n\tfields := log.Fields{\n\t\t\"listen\": ln.Addr().String(),\n\t\t\"name\": name,\n\t\t\"port\": port,\n\t}\n\n\tfor {\n\t\tstream, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.WithFields(fields).Trace(\"new connection\")\n\t\tgo func() {\n\t\t\tconnect(scc, stream, auth, name, port)\n\t\t\tlog.WithFields(fields).Trace(\"finish connection\")\n\t\t}()\n\t}\n}", "func (cs *ChatService) startHTTPServer() {\n\tr := mux.NewRouter()\n\thangoutsHandler := &hangoutsHTTPHandler{\n\t\tbroker: cs.broker,\n\t}\n\n\tr.Handle(\"/\", hangoutsHandler)\n\n\thttp.Handle(\"/\", r)\n\n\tcs.quitError <- http.ListenAndServe(fmt.Sprintf(\":%d\", cs.httpPort), nil)\n}", "func StartServer() {\n\t// read config\n\tserverConfig = CreateConfig()\n\t// create database\n\tbasketsDb = createBasketsDatabase()\n\tif basketsDb == nil {\n\t\tlog.Print(\"[error] failed to create basket database\")\n\t\treturn\n\t}\n\n\t// HTTP clients\n\thttpClient = new(http.Client)\n\tinsecureTransport := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}\n\thttpInsecureClient = &http.Client{Transport: insecureTransport}\n\n\t// configure service HTTP router\n\trouter := httprouter.New()\n\n\t// basket names\n\trouter.GET(\"/\"+BASKETS_ROOT, GetBaskets)\n\n\t// basket management\n\trouter.GET(\"/\"+BASKETS_ROOT+\"/:basket\", GetBasket)\n\trouter.POST(\"/\"+BASKETS_ROOT+\"/:basket\", CreateBasket)\n\trouter.PUT(\"/\"+BASKETS_ROOT+\"/:basket\", UpdateBasket)\n\trouter.DELETE(\"/\"+BASKETS_ROOT+\"/:basket\", DeleteBasket)\n\n\trouter.GET(\"/\"+BASKETS_ROOT+\"/:basket/responses/:method\", GetBasketResponse)\n\trouter.PUT(\"/\"+BASKETS_ROOT+\"/:basket/responses/:method\", UpdateBasketResponse)\n\n\t// requests management\n\trouter.GET(\"/\"+BASKETS_ROOT+\"/:basket/requests\", GetBasketRequests)\n\trouter.DELETE(\"/\"+BASKETS_ROOT+\"/:basket/requests\", ClearBasket)\n\n\t// web pages\n\trouter.GET(\"/\", ForwardToWeb)\n\trouter.GET(\"/\"+WEB_ROOT, WebIndexPage)\n\trouter.GET(\"/\"+WEB_ROOT+\"/:basket\", WebBasketPage)\n\t//router.ServeFiles(\"/\"+WEB_ROOT+\"/*filepath\", http.Dir(\"./src/github.com/darklynx/request-baskets/web\"))\n\n\t// basket requests\n\trouter.NotFound = http.HandlerFunc(AcceptBasketRequests)\n\n\tgo shutdownHook()\n\n\tlog.Printf(\"[info] starting HTTP server on port: %d\", serverConfig.ServerPort)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", serverConfig.ServerPort), router))\n}", "func main() {\n\tvar (\n\t\tscript = flag.String(\"s\", \"/mnt/htdocs/royal/public/index.php\", \"server script absolute file path\")\n\t\turi = flag.String(\"uri\", \"/\", \"request uri\")\n\t\treqData = flag.String(\"d\", \"\", \"send data in a POST request in form of k=v&k1=v1\")\n\t\thost = flag.String(\"h\", \"127.0.0.1\", \"fastcgi server host\")\n\t\tport = flag.Int(\"p\", 9000, \"fastcgi server port\")\n\t\tversion = flag.Bool(\"version\", false, \"show version\")\n\t)\n\tflag.Parse()\n\n\tif *version {\n\t\tshowVersion()\n\t\tos.Exit(0)\n\t}\n\n\trequestMethod := \"GET\"\n\tif *reqData != \"\" {\n\t\trequestMethod = \"POST\"\n\t}\n\n\tenv := make(map[string]string)\n\tenv[\"REQUEST_METHOD\"] = requestMethod\n\tenv[\"SCRIPT_FILENAME\"] = *script\n\tenv[\"REQUEST_URI\"] = *uri\n\tenv[\"SERVER_SOFTWARE\"] = \"go / fcgiclient \"\n\tenv[\"REMOTE_ADDR\"] = \"127.0.0.1\"\n\tenv[\"SERVER_PROTOCOL\"] = \"HTTP/1.1\"\n\tenv[\"QUERY_STRING\"] = *reqData\n\n\tfcgi, err := fcgiclient.New(*host, *port)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresponse, err := fcgi.Request(env, *reqData)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(response)\n}", "func main() {\n\thttp.ListenAndServe(\"127.0.0.1:8080\", NewServer())\n}", "func runClient(dialString string) {\n\tclient, err := client.New(dialString)\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot create gRPC client to %s. %v\", dialString, err)\n\t}\n\tfor {\n\t\ttime.Sleep(10 * time.Second)\n\t\t_, err = client.Hello(context.Background(), &emptypb.Empty{})\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error saying hello. %+v\", err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"Hello was successful\")\n\t}\n}", "func main() {\n\tgo func() { log.Fatal(echoServer()) }()\n\n\terr := clientMain()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\tname := flag.String(\"name\", \"echo\", \"server name\")\n\tport := flag.String(\"port\", \"3000\", \"server port\")\n\tflag.Parse()\n\n\t// Echo instance\n\te := echo.New()\n\n\t// Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\t// Route => handler\n\te.GET(\"/\", func(c echo.Context) error {\n\t\treturn c.HTML(http.StatusOK, fmt.Sprintf(\"<div style='font-size: 8em;'>Hello from upstream server %s!</div>\", *name))\n\t})\n\te.GET(\"/alive\", func(c echo.Context) error {\n\t\tdata := map[string]interface{}{\n\t\t\t\"alive\": true,\n\t\t\t\"hostname\": \"localhost:\" + *port,\n\t\t\t\"serviceName\": *name,\n\t\t\t\"num_cpu\": runtime.NumCPU(),\n\t\t\t\"num_goroutine\": runtime.NumGoroutine(),\n\t\t\t\"go_version\": runtime.Version(),\n\t\t\t\"build_date\": Buildstamp,\n\t\t\t\"commit\": Commit,\n\t\t\t\"startup_time\": startupTime,\n\t\t}\n\t\treturn c.JSON(http.StatusOK, data)\n\t})\n\n\t// Start server\n\te.Logger.Fatal(e.Start(fmt.Sprintf(\":%s\", *port)))\n}", "func (s *Server) handleClient(ctx context.Context, conn net.Conn) {\n\treader := bufio.NewReader(conn)\n\treq := new(request)\n\n\tif err := s.read(reader, req); err != nil {\n\t\tlog.Println(\"ERROR: error reading request, \", err)\n\t\treturn\n\t}\n\n\tready := make(chan bool, 1)\n\tclientKey := s.addClient(req.ClientName, conn.RemoteAddr().String(), s.getTime(), ready)\n\tdefer s.cleanClient(conn, clientKey)\n\n\ts.processClient(ctx, conn, ready)\n}", "func main() {\n\thttpPort := 8080\n\tserver := api.NewServer()\n\tlog.Printf(\"Server running on Port: %v\\n\", httpPort)\n\terr := http.ListenAndServe(fmt.Sprintf(\":%d\", httpPort), logRequest(server))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n\tconst port = 8090\n\tfmt.Printf(\"Listening on port: %d\\n\", port)\n\thttp.HandleFunc(\"/\", requestHandler)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", port), nil)\n}", "func main() {\n\tfmt.Println(\"Go Demo with net/http server\")\n\n\t// initialize empty itemStore\n\titemStore := store.InitializeStore()\n\tserver.StartRouter(itemStore)\n}", "func main() {\n\thttp.HandleFunc(\"/test\", TestServer)\n\thttp.Handle(\"/\", http.FileServer(http.Dir(\".\")))\n\thttp.Handle(\"/echo\", websocket.Handler(Echo))\n\thttp.Handle(\"/echo2\", websocket.Handler(EchoServer))\n\terr := http.ListenAndServe(\":12345\", nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n}", "func main() {\n\tgwMux := runtime.NewServeMux()\n\tendPoint := \"localhost:8081\"\n\topt := []grpc.DialOption{grpc.WithTransportCredentials(helper.GetClientCreds())}\n\t// prod\n\tif err := pbfiles.RegisterProdServiceHandlerFromEndpoint(context.Background(), gwMux, endPoint, opt); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// order\n\tif err := pbfiles.RegisterOrderServiceHandlerFromEndpoint(context.Background(), gwMux, endPoint, opt); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttpServer := &http.Server{\n\t\tAddr: \":8080\",\n\t\tHandler: gwMux,\n\t}\n\n\tif err := httpServer.ListenAndServe(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func launchClientInterface(bc *Blockchain) {\n\tbuf := bufio.NewReader(os.Stdin)\n\tfor {\n\t\tfmt.Print(\"> \")\n\t\tsentence, err := buf.ReadBytes('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tline := strings.TrimSuffix(string(sentence), \"\\n\")\n\t\targs := strings.Split(line, \" \")\n\n\t\t//TODO: error checking on args\n\n\t\tswitch args[0] {\n\t\tcase \"add\":\n\t\t\tif len(args) != 4 {\n\t\t\t\tcolor.Red(\"Usage: add [recipient addr] [data] [salt]\\n\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tto := args[1]\n\t\t\tdata := args[2]\n\t\t\tsalt := args[3]\n\t\t\tclientAddHandler(to, data, salt, bc)\n\t\t\t\n\t\tcase \"get\":\n\t\t\tif len(args) != 3 {\n\t\t\t\tcolor.Red(\"Usage: get [data] [salt]\\n\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdata := args[1]\n\t\t\tsalt := args[2]\n\t\t\tclientGetHandler(data, salt, bc)\n\t\tcase \"send\":\n\t\t\tif len(args) != 5 {\n\t\t\t\tcolor.Red(\"Usage: send [sender addr] [recipient addr] [data] [salt]\\n\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfrom := args[1]\n\t\t\tto := args[2]\n\t\t\tdata := args[3]\n\t\t\tsalt := args[4]\n\t\t\tclientSendHandler(from, to, data, salt, bc)\n\t\tcase \"print\":\n\t\t\tclientPrintHandler(bc)\n\t\tdefault:\n\t\t\tif args[0] != \"\" {\n\t\t\t\tcolor.Red(\"Unknown command!\")\n\t\t\t}\n\t\t}\n\t}\n}", "func Server(resolveNet, listenNet, port, topic string) {\n\tudpAddr, err := net.ResolveUDPAddr(resolveNet, com.StringJoin(\":\", port))\n\tif err != nil {\n\t\tbeego.Info(fmt.Sprintf(\"%v Fatal error %v\", os.Stderr, err.Error()))\n\t\tos.Exit(1)\n\t}\n\tconn, err := net.ListenUDP(listenNet, udpAddr)\n\tif err != nil {\n\t\tbeego.Error(fmt.Sprintf(\"%v Fatal error %v\", os.Stderr, err.Error()))\n\t\tos.Exit(1)\n\t}\n\tdefer conn.Close()\n\tfor {\n\t\thandleClient(conn, topic)\n\t}\n}", "func main() {\r\n\tconnect := flag.String(\"connect\", \"\", \"IP address of process to join. If empty, go into listen mode.\")\r\n\tflag.Parse()\r\n\r\n\t// If the connect flag is set, go into client mode.\r\n\tif *connect != \"\" {\r\n\t\terr := client(*connect)\r\n\t\tif err != nil {\r\n\t\t\tlog.Println(\"Error:\", errors.WithStack(err))\r\n\t\t}\r\n\t\tlog.Println(\"Client done.\")\r\n\t\treturn\r\n\t}\r\n\r\n\t// Else go into server mode.\r\n\terr := server()\r\n\tif err != nil {\r\n\t\tlog.Println(\"Error:\", errors.WithStack(err))\r\n\t}\r\n\r\n\tlog.Println(\"Server done.\")\r\n}", "func StartClient(port string) {\n\tfmt.Println(\"StartClient\")\n}", "func Server(l net.Listener) {\n\tcont := electron.NewContainer(\"server\")\n\tc, err := cont.Accept(l)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tl.Close() // This server only accepts one connection\n\t// Process incoming endpoints till we get a Receiver link\n\tvar r electron.Receiver\n\tfor r == nil {\n\t\tin := <-c.Incoming()\n\t\tswitch in := in.(type) {\n\t\tcase *electron.IncomingSession, *electron.IncomingConnection:\n\t\t\tin.Accept() // Accept the incoming connection and session for the receiver\n\t\tcase *electron.IncomingReceiver:\n\t\t\tin.SetCapacity(10)\n\t\t\tin.SetPrefetch(true) // Automatic flow control for a buffer of 10 messages.\n\t\t\tr = in.Accept().(electron.Receiver)\n\t\tcase nil:\n\t\t\treturn // Connection is closed\n\t\tdefault:\n\t\t\tin.Reject(amqp.Errorf(\"example-server\", \"unexpected endpoint %v\", in))\n\t\t}\n\t}\n\tgo func() { // Reject any further incoming endpoints\n\t\tfor in := range c.Incoming() {\n\t\t\tin.Reject(amqp.Errorf(\"example-server\", \"unexpected endpoint %v\", in))\n\t\t}\n\t}()\n\t// Receive messages till the Receiver closes\n\trm, err := r.Receive()\n\tfor ; err == nil; rm, err = r.Receive() {\n\t\tfmt.Printf(\"server received: %q\\n\", rm.Message.Body())\n\t\trm.Accept() // Signal to the client that the message was accepted\n\t}\n\tfmt.Printf(\"server receiver closed: %v\\n\", err)\n}", "func main() {\n\tvar addr = flag.String(\"addr\", \":\"+GetPort(), \"Server http address\")\n\tflag.Parse()\n\tinitOAuth2()\n\tr := newRoom()\n\tr.tracer = New(os.Stdout)\n\thttp.Handle(\"/chat\", MustAuth(&templateHandler{filename: \"chat.html\"}))\n\thttp.Handle(\"/login\", &templateHandler{filename: \"login.html\"})\n\thttp.HandleFunc(\"/auth/\", loginHandler)\n\thttp.Handle(\"/room\", r)\n\thttp.HandleFunc(\"/logout\", logoutHandler)\n\thttp.Handle(\"/upload\", &templateHandler{filename: \"upload.html\"})\n\thttp.HandleFunc(\"/uploader\", uploaderHandler)\n\thttp.Handle(\"/avatars/\", http.StripPrefix(\"/avatars/\", http.FileServer(http.Dir(\"./avatars\"))))\n\tgo r.run()\n\tlog.Println(\"Running the server at\", *addr)\n\tif err := http.ListenAndServe(*addr, nil); err != nil {\n\t\tlog.Fatal(\"ListenAndServe:\", err)\n\t}\n}", "func (s *Server) server() {\n\trpc.Register(s)\n\trpc.HandleHTTP()\n\t//l, e := net.Listen(\"tcp\", \":1234\")\n\tsockname := masterSock()\n\tos.Remove(sockname)\n\tl, e := net.Listen(\"unix\", sockname)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tgo http.Serve(l, nil)\n}", "func Example_clientServer() {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\") // tcp4 so example will work on ipv6-disabled platforms\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// SERVER: start the server running in a separate goroutine\n\tvar waitServer sync.WaitGroup // We will wait for the server goroutine to finish before exiting\n\twaitServer.Add(1)\n\tgo func() { // Run the server in the background\n\t\tdefer waitServer.Done()\n\t\tServer(l)\n\t}()\n\n\t// CLIENT: Send messages to the server\n\taddr := l.Addr()\n\tc, err := electron.Dial(addr.Network(), addr.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ts, err := c.Sender()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\tmsg := fmt.Sprintf(\"hello %v\", i)\n\t\t// Send and wait for the Outcome from the server.\n\t\t// Note: For higher throughput, use SendAsync() to send a stream of messages\n\t\t// and process the returning stream of Outcomes concurrently.\n\t\ts.SendSync(amqp.NewMessageWith(msg))\n\t}\n\tc.Close(nil) // Closing the connection will stop the server\n\n\twaitServer.Wait() // Let the server finish\n\n\t// Output:\n\t// server received: \"hello 0\"\n\t// server received: \"hello 1\"\n\t// server received: \"hello 2\"\n\t// server receiver closed: EOF\n}", "func main() {\n\tzerr := os.Unsetenv(\"lastUserName\")\n\taerr := os.Unsetenv(\"lastUserID\")\n\tberr := os.Unsetenv(\"lastUserHandle\")\n\tif zerr != nil {\n\t\tfmt.Println(zerr)\n\t}\n\tif aerr != nil {\n\t\tfmt.Println(aerr)\n\t}\n\tif berr != nil {\n\t\tfmt.Println(berr)\n\t}\n\n\t//Load env\n\terr := godotenv.Load()\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading .env file\")\n\t\tfmt.Println(\"Error loading .env file\")\n\t}\n\n\tfmt.Println(\"Starting Server\")\n\n\tif args := os.Args; len(args) > 1 && args[1] == \"-register\" {\n\t\tgo client.RegisterWebhook()\n\t} else {\n\t\tfmt.Println(\"No registration\")\n\t}\n\n\t//Create a new Mux Handler\n\tm := mux.NewRouter()\n\t//Listen to the base url and send a response\n\tm.HandleFunc(\"/\", func(writer http.ResponseWriter, _ *http.Request) {\n\t\twriter.WriteHeader(200)\n\t\tfmt.Fprintf(writer, \"Server is up and running\")\n\t})\n\t//Listen to crc check and handle\n\tm.HandleFunc(\"/webhook/twitter\", CrcCheck).Methods(\"GET\")\n\tm.HandleFunc(\"/webhook/twitter\", WebhookHandler).Methods(\"POST\")\n\tm.HandleFunc(\"/webhook/twilio\", SMSHandler).Methods(\"POST\")\n\n\t//Start Server\n\tserver := &http.Server{\n\t\tHandler: m,\n\t}\n\tserver.Addr = \":9090\"\n\tserver.ListenAndServe()\n}", "func startClient(t *testing.T, port uint) *Conn {\n\tconn := NewConn()\n\tgo monitor(conn.Err, t)\n\tconn.AddHandler(receiveReply)\n\n\taddr := fmt.Sprintf(\"127.0.0.1:%d\", port)\n\tif err := conn.Dial(addr); err != nil {\n\t\tt.Fatalf(\"Cannot connect to server: %q\", err)\n\t\treturn nil\n\t}\n\n\tmsg := []byte(expectedRequest)\n\tconn.Unicast(msg)\n\n\treturn conn\n}", "func (clientservice *ClientService) ServeHTTP(response http.ResponseWriter, request *http.Request) {\n\t// print(\" I Am Called ... \")\n\tconn, er := upgrader.Upgrade(response, request, nil)\n\tif er != nil {\n\t\t// print(\"Error Upgrading the request \")\n\t\treturn\n\t}\n\tsession := clientservice.SessionHandler.GetSession(request)\n\tif session == nil {\n\t\t// print(\"He Doesn't Have a session ... \")\n\t\treturn\n\t}\n\tuser := clientservice.UserSer.GetUserByID(session.UserID)\n\tif user == nil {\n\t\t// print(\"There is no User by this id \")\n\t\treturn\n\t}\n\tclient := &Client{\n\t\tClientService: clientservice,\n\t\tConns: map[string]*entity.ClientConn{\n\t\t\tGetClientIPFromRequest(request): &entity.ClientConn{Conn: conn, Message: make(chan []byte), IP: GetClientIPFromRequest(request)},\n\t\t},\n\t\tID: user.ID,\n\t\tMessage: make(chan entity.EEMBinary),\n\t\tSessionHandler: clientservice.SessionHandler,\n\t\tUser: user,\n\t\tRequest: request,\n\t\tMainService: clientservice.MainService,\n\t\tMessageSer: clientservice.MessageSer,\n\t\tAlieSer: clientservice.AlieSer,\n\t\tSeenConfirmMsg: make(chan entity.SeenConfirmMessage),\n\t}\n\t// println(\"Sent to the register channel .. \")\n\tclientservice.MainService.Register <- client\n}", "func main() {\n\tapp.InitData()\n\n\tutils.SetLogLevel(utils.Level(app.Data.Config.LogLevel))\n\n\tapp.Data.DB.AutoMigrate(&models.Application{})\n\tapp.Data.DB.AutoMigrate(&models.Feedback{})\n\n\t// Get the mux router object\n\trouter := routers.InitRoutes()\n\n\t// Handle CORS\n\tc := cors.New(cors.Options{\n\t\tAllowedOrigins: app.Data.Config.AllowedOrigins,\n\t})\n\n\t// Create a negroni instance\n\tn := negroni.Classic()\n\tn.Use(c)\n\tn.UseHandler(router)\n\n\t// Create the Server\n\tserver := &http.Server{\n\t\tAddr: app.Data.Config.Server,\n\t\tHandler: n,\n\t}\n\n\tutils.Info.Printf(\"Listening on http://%s ...\", app.Data.Config.Server)\n\t// Running the HTTP Server\n\tserver.ListenAndServe()\n}", "func (srv *Server) ServeClient(conn net.Conn) (err error) {\n\n\tc := newConn(conn, srv.timeout)\n\n\tdefer func() {\n\t\tif msg := recover(); msg != nil {\n\t\t\tlog.Error(string(debug.Stack()))\n\t\t\tlog.Error(c.summ, \" panic:\", msg)\n\t\t\tsrv.removeConn(c)\n\t\t\tc.Close()\n\t\t}\n\t}()\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Error(c.summ, err)\n\t\t}\n\t\tsrv.removeConn(c)\n\t\tc.Close()\n\t}()\n\tsrv.addConn(c)\n\n\t//clientChan := make(chan struct{})\n\t//// Read on `conn` in order to detect client disconnect\n\t//go func() {\n\t//\t// Close chan in order to trigger eventual selects\n\t//\tdefer close(clientChan)\n\t//\tdefer Debugf(\"Client disconnected\")\n\t//\t// FIXME: move conn within the request.\n\t//\tif false {\n\t//\t\tio.Copy(ioutil.Discard,c.nc )\n\t//\t}\n\t//}()\n\n\tvar clientAddr string\n\n\tclientAddr = c.nc.RemoteAddr().String()\n\tvar session session.Session\n\t//session.Conntime = time.Now()\n\tfor {\n\t\tif c.timeout > 0 {\n\t\t\tdeadline := time.Now().Add(c.timeout)\n\t\t\tif err := c.nc.SetReadDeadline(deadline); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\trequest, err := parseRequest(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t//session.Lasttime = time.Now()\n\t\trequest.Host = clientAddr\n\t\t//request.ClientChan = clientChan\n\t\treply, err := srv.Apply(request, &session)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if strings.ToLower(request.Name) == \"auth\" {\n\t\t\tsession.Authed = true\n\t\t}\n\t\t//if _, err = reply.WriteTo(c.w); err != nil {\n\t\t//\treturn err\n\t\t//}\n\t\tif c.timeout > 0 {\n\t\t\tdeadline := time.Now().Add(c.timeout)\n\t\t\tif err := c.nc.SetWriteDeadline(deadline); err != nil {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\tif err = c.writeRESP(reply); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}", "func createAndStartServer() {\n\thttp.HandleFunc(\"/\", HomeHandler)\n\thttp.HandleFunc(\"/getShortLink\", onGetShortLink)\n\thttp.HandleFunc(\"/getRedirectLink\", onGetRedirectLink)\n\thttp.HandleFunc(\"/getVisits\", onGetVisits)\n\thttp.HandleFunc(\"/registerNewKey\", onRegisterNewKey)\n\thttp.ListenAndServe(os.Getenv(\"APP_URL\"), nil) // getting env var for port\n}", "func main() {\n\t// conect db\n\tconnect_db()\n\t// connect router untuk url\n\troutes()\n\n\tdefer db.Close()\n\t//server aktif pada port :8000\n\tfmt.Println(\"Server is Actived in port :8000\")\n\thttp.ListenAndServe(\":8000\", nil)\n}", "func main() {\n\tcalculix := serverCalculix.NewCalculix()\n\terr := rpc.Register(calculix)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot register the calculix\")\n\t\treturn\n\t}\n\trpc.HandleHTTP()\n\tl, e := net.Listen(\"tcp\", \":1234\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\terr = http.Serve(l, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot serve the calculix\")\n\t\treturn\n\t}\n}", "func createclient(conn net.Conn) {\n\n\tlog.Printf(\"createclient: remote connection from: %v\", conn.RemoteAddr())\n\n\tname, err := readInput(conn, \"Please Enter Name: \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\twriteFormattedMsg(conn, \"Welcome \"+name)\n\n\t//initialize client struct\n\tclient := &client{\n\t\tMessage: make(chan string),\n\t\tConn: conn,\n\t\tName: name,\n\t\tRoom: \"\",\n\t}\n\n\tlog.Printf(\"new client created: %v %v\", client.Conn.RemoteAddr(), client.Name)\n\n\t//spin off separate send, receive\n\tgo client.send()\n\tgo client.receive()\n\n\t//print help\n\twriteFormattedMsg(conn, help)\n}", "func main() {\n\n\tlog.Println(\"launching tcp server...\")\n\n\t// start tcp listener on all interfaces\n\t// note that each connection consumes a file descriptor\n\t// you may need to increase your fd limits if you have many concurrent clients\n\tln, err := net.Listen(\"tcp\", \":8081\")\n\tif err != nil {\n\t\tlog.Fatalf(\"could not listen: %s\", err)\n\t}\n\tdefer ln.Close()\n\n\tfor {\n\t\tlog.Println(\"waiting for incoming TCP connections...\")\n\t\t// Accept blocks until there is an incoming TCP connection\n\t\tincoming, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"couldn't accept %s\", err)\n\t\t}\n\n\t\tincomingConn, err := yamux.Client(incoming, yamux.DefaultConfig())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"couldn't create yamux %s\", err)\n\t\t}\n\n\t\tlog.Println(\"starting a gRPC server over incoming TCP connection\")\n\n\t\tvar conn *grpc.ClientConn\n\t\t// gRPC dial over incoming net.Conn\n\t\tconn, err = grpc.Dial(\":7777\", grpc.WithInsecure(),\n\t\t\tgrpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {\n\t\t\t\treturn incomingConn.Open()\n\t\t\t}),\n\t\t)\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t\t}\n\n\t\t// handle connection in goroutine so we can accept new TCP connections\n\t\tgo handleConn(conn)\n\t}\n}" ]
[ "0.6618485", "0.65211356", "0.646171", "0.638421", "0.6304717", "0.6287283", "0.6227207", "0.6171384", "0.6169162", "0.60877883", "0.6082746", "0.6051843", "0.6039413", "0.6016558", "0.6005998", "0.599888", "0.59954786", "0.5988336", "0.5975594", "0.59596217", "0.59543747", "0.5952127", "0.5949981", "0.5927621", "0.5925937", "0.5912194", "0.59091496", "0.5906462", "0.58927876", "0.5892293", "0.58847135", "0.58747095", "0.5867609", "0.58420324", "0.58409727", "0.58318657", "0.58289784", "0.58192223", "0.58142304", "0.58090085", "0.58037466", "0.57959557", "0.5788047", "0.57819134", "0.57712126", "0.577002", "0.576508", "0.57625616", "0.57451457", "0.574292", "0.57416", "0.57364875", "0.5735231", "0.5726571", "0.5717673", "0.5715473", "0.57133436", "0.5707281", "0.57059646", "0.5703471", "0.5703297", "0.57000387", "0.5697043", "0.56926674", "0.5687085", "0.5681718", "0.56813097", "0.56728905", "0.56695795", "0.566167", "0.56582403", "0.56572574", "0.5655163", "0.5654936", "0.56485766", "0.5642845", "0.5641944", "0.56298816", "0.56276196", "0.562279", "0.56154794", "0.5615054", "0.560743", "0.5607085", "0.5603428", "0.5592665", "0.55918926", "0.5591424", "0.5589109", "0.55844593", "0.5582184", "0.557987", "0.5579836", "0.5579633", "0.5577563", "0.5570567", "0.5565663", "0.5563316", "0.55599624", "0.5558854" ]
0.6478236
2
server listens for incoming requests and dispatches them to registered handler functions.
func server() error { endpoint := NewEndpoint() // Add the handle funcs. endpoint.AddHandleFunc("STRING", handleStrings) endpoint.AddHandleFunc("GOB", handleGob) // Start listening. return endpoint.Listen() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func server() {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/\", respond.HandleRequest)\n\tlog.Fatal(http.ListenAndServe(\":\"+*port, mux))\n}", "func handleRequests() {\n\taddress := \"localhost\"\n\taddress += \":\"\n\taddress += \"8000\"\n\tfmt.Printf(\"Server at address %v is up\\n\", \"http://\"+address)\n\n\thttp.HandleFunc(\"/\", homePage)\n\thttp.HandleFunc(\"/author\", author)\n\thttp.HandleFunc(\"/articles\", returnAllArticles)\n\tlog.Fatal(http.ListenAndServe(address, nil)) // nill: use DefaultServerMux\n}", "func handlerServer(w http.ResponseWriter, r *http.Request) {\n\tsetHeader(w, r)\n\treadCookies(r)\n\tserver := r.URL.Query().Get(\"server\")\n\taction := r.URL.Query().Get(\"action\")\n\tswitch action {\n\tcase \"reloader\":\n\t\t_, _ = io.WriteString(w, getServerTubes(server))\n\t\treturn\n\tcase \"clearTubes\":\n\t\t_ = r.ParseForm()\n\t\tclearTubes(server, r.Form)\n\t\t_, _ = io.WriteString(w, `{\"result\":true}`)\n\t\treturn\n\t}\n\t_, _ = io.WriteString(w, tplServer(getServerTubes(server), server))\n}", "func (h *Handler) serveServers(w http.ResponseWriter, r *http.Request) {}", "func handleRequests() {\n\thttp.HandleFunc(\"/\", homePage)\n\thttp.HandleFunc(\"/movies\", getAllMoviesSortedDetails)\n\tlog.Fatal(http.ListenAndServe(\":8081\", nil))\n}", "func (s *Server) handleWhatever() {}", "func server(serCh serverChannelsT) {\n\tmux := goji.NewMux()\n\tmux.HandleFunc(pat.Get(\"/files\"), makeClientRequestHandler(serCh))\n\tmux.HandleFunc(pat.Post(\"/\"), makeWatcherInputHandler(serCh))\n\thttp.ListenAndServe(\"localhost:3000\", mux)\n}", "func handleRequests() {\n\trouter := mux.NewRouter().StrictSlash( true )\n\trouter.HandleFunc(\"/\", homeLink)\n\trouter.HandleFunc(\"/server\", createServer).Methods(\"POST\")\n\trouter.HandleFunc(\"/servers/{id}\", getOneServer).Methods(\"GET\")\n\trouter.HandleFunc(\"/servers/{id}\", updateServer).Methods(\"PATCH\")\n\trouter.HandleFunc(\"/servers/{id}\", deleteServer).Methods(\"DELETE\")\n\trouter.HandleFunc(\"/servers\", getAllServers).Methods(\"GET\")\n\trouter.HandleFunc(\"/servers\", findAddress).Methods(\"POST\")\n\tlog.Fatal(http.ListenAndServe(\":8888\", router))\n}", "func handleServerRequests(t *testing.T, ctx context.Context, wg *sync.WaitGroup, sshCn ssh.Conn, reqs <-chan *ssh.Request) {\n\tdefer wg.Done()\n\tfor r := range reqs {\n\t\tif !r.WantReply {\n\t\t\tcontinue\n\t\t}\n\t\tif r.Type != \"tcpip-forward\" {\n\t\t\tr.Reply(false, nil)\n\t\t\tcontinue\n\t\t}\n\t\tvar args struct {\n\t\t\tHost string\n\t\t\tPort uint32\n\t\t}\n\t\tif !unmarshalData(r.Payload, &args) {\n\t\t\tr.Reply(false, nil)\n\t\t\tcontinue\n\t\t}\n\t\tln, err := net.Listen(\"tcp\", net.JoinHostPort(args.Host, strconv.Itoa(int(args.Port))))\n\t\tif err != nil {\n\t\t\tr.Reply(false, nil)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar resp struct{ Port uint32 }\n\t\t_, resp.Port = splitHostPort(ln.Addr().String())\n\t\tif err := r.Reply(true, marshalData(resp)); err != nil {\n\t\t\tt.Errorf(\"request reply error: %v\", err)\n\t\t\tln.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo handleLocalListener(t, ctx, wg, sshCn, ln, args.Host)\n\n\t}\n}", "func server(ctx *cli.Context) {\n\t//configInit()\n\tinitLogLevel()\n\tlog.Fatal(http.ListenAndServe(Config().GetString(\"addr\"), handler)\n}", "func main() {\n\tlisten_fds := ListenFds()\n\n\tfor _, fd := range listen_fds {\n\t\tl, err := net.FileListener(fd)\n\t\tif err != nil {\n\t\t\t// handle error\n\t\t\tfmt.Println(\"got err\", err)\n\t\t}\n\n\t\thttp.HandleFunc(\"/\", handler)\n\t\thttp.Serve(l, nil)\n\t}\n}", "func main() {\n\tch := make(chan string)\n\tgo IOHandler(ch)\n\tfmt.Println(\"Server started\")\n\tservice := \"localhost:3000\"\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", service)\n\tif err != nil {\n\t\tfmt.Println(\"Could not resolve: \", service)\n\t\tos.Exit(1)\n\t} else {\n\t\tlistener, err := net.Listen(tcpAddr.Network(), tcpAddr.String())\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Could not listen on: \", tcpAddr)\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tdefer listener.Close()\n\t\t\tfor {\n\t\t\t\tfmt.Println(\"Listening for clients\")\n\t\t\t\tconn, err := listener.Accept()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Client error: \", err)\n\t\t\t\t} else {\n\t\t\t\t\t//Create routine for each connected client\n\t\t\t\t\tgo ConnHandler(conn, ch)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func main() {\n\tserver := http.Server{\n\t\tAddr: \":5000\",\n\t\tHandler: myHandler(),\n\t}\n\n\tserver.ListenAndServe()\n}", "func (c *Connection) listenServer() {\n\tin := bufio.NewScanner(c.Conn)\n\tfor in.Scan() {\n\t\tif p, err := Parse(in.Text()); err != nil {\n\t\t\tlogger.Print(\"parse error:\", err)\n\t\t} else {\n\t\t\tc.ServerChan <- p\n\t\t}\n\t}\n\tclose(done)\n}", "func main() {\n\tln, err := net.Listen(\"tcp\", \":8888\")\n\tif err != nil {\n\t\t// handle the error, e.g. `log.Fatal(err)`\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"Listening on \", ln.Addr())\n\tfor {\n\t\tc, err := ln.Accept()\n\t\tif err == nil {\n\t\t\t// do something with `c`\n\t\t\tfmt.Println(\"Connection: \", c)\n\t\t\t// Start goroutines by prepending the `go` keyword to call the serve function\n\t\t\tgo serve(c)\n\t\t}\n\t}\n}", "func handleRequests() {\n\n\thttp.HandleFunc(\"/\", home)\n\thttp.HandleFunc(\"/greet-me/\", greetMe)\n\thttp.HandleFunc(\"/books\", getBooks)\n\thttp.HandleFunc(\"/book\", createBook)\n\thttp.HandleFunc(\"/book/\", getBookById)\n\thttp.HandleFunc(\"/books/title/\", getBookByTitle)\n\n\tlog.Fatal(http.ListenAndServe(\":10000\", nil))\n}", "func serve(svr *http.Server) {\n\tlog.Info(\"accepting connections\", zap.String(\"addr\", config.Bind))\n\tif err := svr.ListenAndServe(); err != nil {\n\t\tlog.Fatal(\"error serving requests\", zap.Error(err))\n\t}\n}", "func main() {\n\thttp.HandleFunc(\"/api/backend\", handler.HandleBackendCall)\n\thttp.HandleFunc(\"/api/schema\", handler.HandleSchemaCall)\n\thttp.HandleFunc(\"/api/redirect\", handler.HandleRedirectCall)\n\thttp.HandleFunc(\"/api/add\", handler.HandleAddCall)\n\tfmt.Println(\"Waiting...\")\n\thttp.ListenAndServe(\":8080\", nil)\n\n}", "func Server(handler *handler, host string) error {\n\t//run server\n\thttp.HandleFunc(\"/books\", handler.booksHandler)\n\thttp.HandleFunc(\"/books/\", handler.booksHandlerByID)\n\treturn http.ListenAndServe(host, nil)\n}", "func handleRequests() {\n\trouter := mux.NewRouter().StrictSlash(true)\n\tsubRouter := router.PathPrefix(\"/nobel/winners\").Subrouter()\n\n\t// Routes consist of a path and a handler function.\n\tsubRouter.HandleFunc(\"/fetch/all\", getNobelWinnersList).Methods(\"GET\")\n\tsubRouter.HandleFunc(\"/fetch/{id}\", getNobelWinnersByID).Methods(\"GET\")\n\n\tlog.Print(\"Listening port 8081...\")\n\t// Bind to a port and pass our router in\n\tlog.Fatal(http.ListenAndServe(\":8081\", router))\n}", "func server(server_port string) {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:\"+server_port)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer l.Close()\n\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t// 注意这里不能起goroutine去执行,不然test10和test15过不了\n\t\thandleConn(conn)\n\t}\n\n}", "func (srv *server) Serve() {\n\tfor {\n\t\tcli, err := srv.l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Closing server:\", err)\n\t\t\tbreak // on srv.l.Close\n\t\t}\n\t\tgo srv.handle(newConn(cli))\n\t}\n}", "func handleRequests() {\n r := mux.NewRouter()\n\n // Paths\n r.HandleFunc(\"/mine\", mineBlockHandler).Methods(\"POST\")\n r.HandleFunc(\"/{index}\", getBlockHandler)\n r.HandleFunc(\"/\", getBlockchainHandler)\n\n // Run the server\n port := \":10000\"\n fmt.Println(\"\\nListening on port \" + port[1:])\n log.Fatal(http.ListenAndServe(port, r))\n}", "func server(port string) error {\n\tlistener, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer listener.Close()\n\tlog.Println(\"Started server at\", listener.Addr())\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Connection failed:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Accepted connection from\", conn.RemoteAddr())\n\t\t// Handle multiple client connections concurrently\n\t\tgo handleConnection(conn)\n\t}\n}", "func (s *Server) ListenAndServe() error {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/postreceive\", s.GHEventHandler)\n\n\t// add any handlers\n\tloggedHandlers := handlers.LoggingHandler(os.Stdout, r)\n\n\tsrv := &http.Server{\n\t\tHandler: loggedHandlers,\n\t\tAddr: s.Addr,\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\tfmt.Println(fmt.Sprintf(\"Listening on %s; Path for event is %s\", s.Addr, \"/postreceive\"))\n\n\treturn srv.ListenAndServe()\n}", "func main() {\n\tHandleRequests( )\n}", "func ListenAndServe() {\n\tparseflags()\n\n\tport := \":\" + GetOption(portflag, \"PORT\", \"8080\")\n\n\tserverMux := http.NewServeMux()\n\tserver := http.Server{Addr: port, Handler: serverMux}\n\n\t// Use a channel to signal server closure\n\tserverClosed := make(chan struct{})\n\n\tlog.Printf(\"Server version: %v\", version)\n\n\t// Call init functions\n\tif initfuncs != nil {\n\t\tlog.Println(\"Initializing modules...\")\n\t\tfor _, initfunction := range initfuncs {\n\t\t\tinitfunction()\n\t\t}\n\t}\n\n\t// Set up handlers\n\tfor path, handler := range handlers {\n\t\tserverMux.HandleFunc(path, handler)\n\t}\n\n\t// Set up home route, if specified and valid\n\tif roothandlerpath != \"\" {\n\t\troothandler, ok := handlers[roothandlerpath]\n\t\tif ok {\n\t\t\tserverMux.HandleFunc(\"/\", roothandler)\n\t\t}\n\t}\n\n\tgo func() {\n\t\tsignalReceived := make(chan os.Signal, 1)\n\n\t\t// Handle SIGINT\n\t\tsignal.Notify(signalReceived, os.Interrupt)\n\t\t// Handle SIGTERM\n\t\tsignal.Notify(signalReceived, syscall.SIGTERM)\n\n\t\t// Wait for signal\n\t\t<-signalReceived\n\n\t\tlog.Println(\"Server shutting down...\")\n\t\tif err := server.Shutdown(context.Background()); err != nil {\n\t\t\t// Error from closing listeners, or context timeout:\n\t\t\tlog.Fatalf(\"Error during HTTP server shutdown: %v.\", err)\n\t\t}\n\n\t\tclose(serverClosed)\n\t}()\n\n\t// Start listening using the server\n\tlog.Printf(\"Server starting on port %v...\\n\", port)\n\tif err := server.ListenAndServe(); err != http.ErrServerClosed {\n\t\tlog.Fatalf(\"The server failed with the following error: %v.\\n\", err)\n\t}\n\n\t<-serverClosed\n\n\t// Call shutdown functions\n\tif shutdownfuncs != nil {\n\t\tlog.Println(\"Shutting down modules...\")\n\t\tfor _, shutdownfunction := range shutdownfuncs {\n\t\t\tshutdownfunction()\n\t\t}\n\t}\n\n\tlog.Println(\"Server shut down.\")\n}", "func (s *Server) httpServer(log *logrus.Entry) *http.Server {\n\taddr := fmt.Sprintf(\"%s:%d\", s.config.HTTPServer.Host, s.config.HTTPServer.Port)\n\tlog.Debugf(\"http server will listen on: %s\", addr)\n\n\tmux := mux.NewRouter()\n\tfor _, route := range []struct {\n\t\t// name of the route\n\t\tname string\n\t\t// path of the route\n\t\tpath string\n\t\t// allowed methods for this route\n\t\tmethods string\n\t\t// handler is the http handler to run if the route matches\n\t\thandler func(http.ResponseWriter, *http.Request)\n\t\t// excluded tells if the route should be added to the router,\n\t\t// it's in the negative form so that the default behaviour is to add\n\t\t// the route to the router\n\t\texcluded bool\n\t}{\n\t\t{\n\t\t\tname: \"GetMovies\",\n\t\t\tpath: \"/movies\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.movieIndex,\n\t\t},\n\t\t{\n\t\t\tname: \"GetMovie\",\n\t\t\tpath: \"/movies/{id}\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.getMovieDetails,\n\t\t},\n\t\t{\n\t\t\tname: \"DeleteMovie\",\n\t\t\tpath: \"/movies/{id}\",\n\t\t\tmethods: \"DELETE\",\n\t\t\thandler: s.deleteMovie,\n\t\t},\n\t\t{\n\t\t\tname: \"DownloadMovie\",\n\t\t\tpath: \"/movies/{id}/download\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.serveMovie,\n\t\t\texcluded: !s.config.HTTPServer.ServeFiles,\n\t\t},\n\t\t{\n\t\t\tname: \"DownloadMovieSubtitle\",\n\t\t\tpath: \"/movies/{id}/subtitles/{lang}/download\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.serveMovieSubtitle,\n\t\t\texcluded: !s.config.HTTPServer.ServeFiles,\n\t\t},\n\t\t{\n\t\t\tname: \"UpdateMovieSubtitle\",\n\t\t\tpath: \"/movies/{id}/subtitles/{lang}\",\n\t\t\tmethods: \"POST\",\n\t\t\thandler: s.updateMovieSubtitle,\n\t\t},\n\t\t{\n\t\t\tname: \"GetShows\",\n\t\t\tpath: \"/shows\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.showIds,\n\t\t},\n\t\t{\n\t\t\tname: \"GetShow\",\n\t\t\tpath: \"/shows/{id}\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.getShowDetails,\n\t\t},\n\t\t{\n\t\t\tname: \"DeleteShow\",\n\t\t\tpath: \"/shows/{id}\",\n\t\t\tmethods: \"DELETE\",\n\t\t\thandler: s.deleteShow,\n\t\t},\n\t\t{\n\t\t\tname: \"GetSeason\",\n\t\t\tpath: \"/shows/{id}/seasons/{season:[0-9]+}\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.getSeasonDetails,\n\t\t},\n\t\t{\n\t\t\tname: \"DeleteSeason\",\n\t\t\tpath: \"/shows/{id}/seasons/{season:[0-9]+}\",\n\t\t\tmethods: \"DELETE\",\n\t\t\thandler: s.deleteSeason,\n\t\t},\n\t\t{\n\t\t\tname: \"GetEpisode\",\n\t\t\tpath: \"/shows/{id}/seasons/{season:[0-9]+}/episodes/{episode:[0-9]+}\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.getShowEpisodeIDDetails,\n\t\t},\n\t\t{\n\t\t\tname: \"DeleteEpisode\",\n\t\t\tpath: \"/shows/{id}/seasons/{season:[0-9]+}/episodes/{episode:[0-9]+}\",\n\t\t\tmethods: \"DELETE\",\n\t\t\thandler: s.deleteEpisode,\n\t\t},\n\t\t{\n\t\t\tname: \"UpdateEpisodeSubtitle\",\n\t\t\tpath: \"/shows/{id}/seasons/{season:[0-9]+}/episodes/{episode:[0-9]+}/subtitles/{lang}\",\n\t\t\tmethods: \"POST\",\n\t\t\thandler: s.updateEpisodeSubtitle,\n\t\t},\n\t\t{\n\t\t\tname: \"DownloadEpisode\",\n\t\t\tpath: \"/shows/{id}/seasons/{season:[0-9]+}/episodes/{episode:[0-9]+}/download\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.serveEpisode,\n\t\t\texcluded: !s.config.HTTPServer.ServeFiles,\n\t\t},\n\t\t{\n\t\t\tname: \"DownloadEpisodeSubtitle\",\n\t\t\tpath: \"/shows/{id}/seasons/{season:[0-9]+}/episodes/{episode:[0-9]+}/subtitles/{lang}/download\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.serveEpisodeSubtitle,\n\t\t\texcluded: !s.config.HTTPServer.ServeFiles,\n\t\t},\n\t\t{\n\t\t\tname: \"Wishlist\",\n\t\t\tpath: \"/wishlist\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.wishlist,\n\t\t},\n\t\t{\n\t\t\tname: \"AddTorrent\",\n\t\t\tpath: \"/torrents\",\n\t\t\tmethods: \"POST\",\n\t\t\thandler: s.addTorrent,\n\t\t},\n\t\t{\n\t\t\tname: \"ListTorrents\",\n\t\t\tpath: \"/torrents\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.getTorrents,\n\t\t},\n\t\t{\n\t\t\tname: \"RemoveTorrent\",\n\t\t\tpath: \"/torrents/{id}\",\n\t\t\tmethods: \"DELETE\",\n\t\t\thandler: s.removeTorrent,\n\t\t},\n\t\t{\n\t\t\tname: \"GetModulesStatus\",\n\t\t\tpath: \"/modules/status\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: s.getModulesStatus,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofIndex\",\n\t\t\tpath: \"/debug/pprof/\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Index,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofBlock\",\n\t\t\tpath: \"/debug/pprof/block\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Index,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofGoroutine\",\n\t\t\tpath: \"/debug/pprof/goroutine\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Index,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofHeap\",\n\t\t\tpath: \"/debug/pprof/heap\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Index,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofMutex\",\n\t\t\tpath: \"/debug/pprof/mutex\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Index,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofCmdline\",\n\t\t\tpath: \"/debug/pprof/cmdline\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Cmdline,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofProfile\",\n\t\t\tpath: \"/debug/pprof/profile\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Profile,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofSymbol\",\n\t\t\tpath: \"/debug/pprof/symbol\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Symbol,\n\t\t},\n\t\t{\n\t\t\tname: \"PprofTrace\",\n\t\t\tpath: \"/debug/pprof/trace\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: pprof.Trace,\n\t\t},\n\t\t{\n\t\t\tname: \"Metrics\",\n\t\t\tpath: \"/metrics\",\n\t\t\tmethods: \"GET\",\n\t\t\thandler: promhttp.Handler().ServeHTTP,\n\t\t},\n\t} {\n\t\tif route.excluded {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Register the route\n\t\tmux.HandleFunc(route.path, route.handler).Name(route.name).Methods(route.methods)\n\t}\n\n\tn := negroni.New()\n\n\t// Panic recovery\n\tn.Use(negroni.NewRecovery())\n\n\t// Use logrus as logger\n\tn.Use(newLogrusMiddleware(s.log.Logger, s.config.HTTPServer.LogExcludePaths))\n\n\t// Add basic auth if configured\n\tif s.config.HTTPServer.BasicAuth {\n\t\tlog.Info(\"server will require basic authentication\")\n\t\tn.Use(NewBasicAuthMiddleware(s.config.HTTPServer.BasicAuthUser, s.config.HTTPServer.BasicAuthPassword))\n\t}\n\n\t// Add token auth middleware if token configuration file specified\n\tif s.authManager != nil {\n\t\tn.Use(auth.NewMiddleware(s.authManager, mux))\n\t\tmux.HandleFunc(\"/tokens/allowed\", s.tokenGetAllowed).Name(\"TokenGetAllowed\")\n\t}\n\n\t// Wrap the router\n\tn.UseHandler(mux)\n\n\treturn &http.Server{Addr: addr, Handler: n}\n}", "func serve(port int, handler connectionhandler) {\n \n if port < 1024 || port > 65535 {\n // todo: how does go handle errors.\n }\n\n portspec := fmt.Sprintf(\":%d\", port)\n\n sock, err := net.Listen(\"tcp\", portspec)\n if err != nil {\n // error\n fmt.Printf(\"%d\", err)\n }\n\n for {\n conn, err := sock.Accept()\n if err != nil {\n fmt.Printf(\"%d\", err) \n }\n go handler(conn) \n }\n}", "func ServerAnswerHandler() {\n\tfor {\n\t\tmsg := make([]byte, 1024)\n\t\tif _, err := ws.Read(msg); err != nil {\n\t\t\tlog.Print(\"Connection closed...\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Printf(\"Received: %s\", msg)\n\t\t}\n\t}\n}", "func serve(config *config.Config) {\n\trouter := gin.Default()\n\n\t// Set the config in our handlers to give them access to server configuration\n\thandlers.SetConfig(config)\n\n\t// Initialize our routes to point to our handlers\n\tapi := router.Group(config.Server.APIPrefix)\n\tapi.GET(\"/ping\", handlers.PingHandler)\n\tapi.GET(\"/posts\", handlers.PostsHandler)\n\n\t// Configure the HTTP server\n\tserver := &http.Server {\n\t\tAddr: config.Server.Address,\n\t\tHandler: router,\n\t}\n\n\t// Start the HTTP server\n\tlog.Println(\"Starting HatchWays API Server\")\n\tif err := server.ListenAndServe(); err != nil {\n\t\tlog.Fatal(\"Error starting HatchWays API Server: \" + err.Error())\n\t}\n\n}", "func RunHTTPServer(addr string) error {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/method/{method}\", methodsHandler)\n\tr.HandleFunc(\"/\", staticHandler)\n\treturn http.ListenAndServe(addr, r)\n}", "func main() {\n\tserver := http.Server{\n\t\tAddr: \"127.0.0.1:8080\",\n\t}\n\thttp.HandleFunc(\"/write\", writeExample)\n\thttp.HandleFunc(\"/writeheader\", writeHeaderExample)\n\thttp.HandleFunc(\"/redirect\", headerExample)\n\thttp.HandleFunc(\"/json\", jsonExample)\n\tserver.ListenAndServe()\n}", "func ListenAndServe(module *js.Object) {\n\tmodule.Get(\"exports\").Set(\n\t\t\"handler\",\n\t\tfunc(event, context, callback *js.Object) {\n\t\t\t_alexa := js.Global.Call(\"require\", \"alexa-sdk\")\n\n\t\t\tvar alexaResponse = _alexa.Call(\n\t\t\t\t\"handler\", event, context)\n\t\t\talexaResponse.Call(\"registerHandlers\", handlers)\n\t\t\talexaResponse.Call(\"execute\")\n\t\t})\n}", "func main() {\n\tif err := http.ListenAndServe(port, handler()); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n\n\tvar dog dogHandler\n\tvar cat catHandler\n\n\tmux := http.NewServeMux()\n\n\tmux.Handle(\"/dog/\", dog)\n\tmux.Handle(\"/cat/\", cat)\n\n\t// The server listens and serves all requests coming to to locahost:9000\n\t// using mux as the multiplexer\n\thttp.ListenAndServe(\":9000\", mux)\n\n}", "func (h *Handler) handleRequests() {\n\thttp.HandleFunc(\"/\", homePage)\n\thttp.HandleFunc(\"/customers\", h.returnAllCustomers)\n\tlog.Fatal(http.ListenAndServe(frontendPort, nil))\n}", "func startEventsHandler(h *HTTP, tasks *sync.WaitGroup) {\n\tfor {\n\t\tif event := <-h.events; event != nil {\n\t\t\teventMessage := event.Error()\n\t\t\tfmt.Println(eventMessage)\n\t\t\tswitch {\n\t\t\tcase strings.Contains(eventMessage, \"http: Server closed\"):\n\t\t\t\th.Server.ErrorLog.Printf(\"server was closed\")\n\t\t\t\ttasks.Done()\n\t\t\t\treturn\n\t\t\tcase strings.Contains(eventMessage, \"bind: address already in use\"):\n\t\t\t\th.Server.ErrorLog.Printf(\"failed to start server: '%s' is already in use\", h.Options.Addr.String())\n\t\t\t\thandleShutdown(h, event)\n\t\t\t\ttasks.Done()\n\t\t\t\treturn\n\t\t\tcase strings.Contains(eventMessage, \"received signal: \"):\n\t\t\t\th.Server.ErrorLog.Printf(\"server %s\", eventMessage)\n\t\t\t\thandleShutdown(h, event)\n\t\t\t\th.Server.Close()\n\t\t\tdefault:\n\t\t\t\th.Server.ErrorLog.Printf(\"unknown event: %s\", event)\n\t\t\t}\n\t\t}\n\t}\n}", "func registerHandlers(s *server) error {\n\terr := s.AddHandler(\"set\", cmdSet)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"get\", cmdGet)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"delete\", cmdDelete)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"stats\", cmdStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"quit\", cmdQuit)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func main() {\n\n\tlog.Printf(\"Server started\")\n\n\trouter := sw.NewRouter()\n\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"5000\"\n\t}\n\n\theadersOk := handlers.AllowedHeaders([]string{\"X-Requested-With\", \"Content-Type\"})\n\toriginsOk := handlers.AllowedOrigins([]string{\"*\"})\n\tmethodsOk := handlers.AllowedMethods([]string{\"GET\", \"HEAD\", \"POST\", \"PUT\", \"OPTIONS\"})\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, handlers.CORS(originsOk, headersOk, methodsOk)(router)))\n}", "func RunServer(server *ophttp.Server) {\n\thttp.Handle(\"/greeting\", http.HandlerFunc(GreetingHandler))\n\tserver.Start()\n}", "func main() {\n\tlog.Printf(\"listening on %s and serving files from %s\\n\", port, dir)\n\thttp.ListenAndServe(port, server.Handler(dir))\n}", "func main() {\n\t// Construct a new \"server\"; its methods are HTTP endpoints.\n\tserver, err := newServer()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Construct a router which binds URLs + HTTP verbs to methods of server.\n\trouter := httprouter.New()\n\trouter.POST(\"/v1/events\", server.createEvent)\n\trouter.GET(\"/v1/ltv\", server.getLTV)\n\n\t// Listen and serve HTTP traffic on port 3000.\n\tif err := http.ListenAndServe(\":3000\", router); err != nil {\n\t\tpanic(err)\n\t}\n}", "func handleRequests() {\n\trouter := mux.NewRouter()\n\n\trouter.HandleFunc(\"/\", index).Methods(\"GET\")\n\n\t// GET, POST, PUT, DELETE\n\trouter.HandleFunc(\"/todos\", ListTodos).Methods(\"GET\")\n\trouter.HandleFunc(\"/todos\", AddTodo).Methods(\"POST\")\n\trouter.HandleFunc(\"/todos/{id:[0-9]+}\", UpdateTodo).Methods(\"PUT\")\n\trouter.HandleFunc(\"/todos/{id:[0-9]+}\", DeleteTodo).Methods(\"DELETE\")\n\n\tlog.Fatal(http.ListenAndServe(\"localhost:3000\", router))\n}", "func (s *Server) listenAndServe(connString string) {\n\ts.listener, _ = net.Listen(\"tcp\", connString)\n\tcommon.LogI(\"Server %p is running on: %s\\n\", s, connString)\n\n\tfor {\n\t\tconn, err := s.listener.Accept()\n\n\t\tif err != nil {\n\t\t\tcommon.LogE(\"Failed to accept the connection:\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tcommon.LogI(\"A new connection was accepted:%s\\n\", conn.RemoteAddr().String())\n\t\ts.pending <- conn\n\t}\n}", "func (s *Server) serve(lis net.Listener) {\n\ts.wg.Add(1)\n\tgo func() {\n\t\tlog.Infof(\"Listening on %s\", lis.Addr())\n\t\terr := s.httpServer.Serve(lis)\n\t\tlog.Tracef(\"Finished serving RPC: %v\", err)\n\t\ts.wg.Done()\n\t}()\n}", "func (r *router) ListenAndServe() {\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)\n\n\tgo func() {\n\t\tif err := r.server.ListenAndServeTLS(\"\", \"\"); err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Failed to listen and serve webhook server\")\n\t\t}\n\t}()\n\tlog.Info(\"NSE webhook injector has started\")\n\n\t<-sigCh\n}", "func (s *Server) ListenAndServe(h Handler) (err error) {\n\t// Listen given port.\n\ts.listener, err = net.Listen(\"tcp\", s.addr)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\t_ = s.listener.Close()\n\t}()\n\n\ts.Log(fmt.Sprintf(\"waiting for requests on %s\", s.addr))\n\n\t// Waiting for new connections.\n\tfor {\n\t\tif s.stop {\n\t\t\tbreak\n\t\t}\n\n\t\t// Accept new connection.\n\t\tconnRaw, err := s.listener.Accept()\n\t\tif err != nil {\n\t\t\ts.Log(err)\n\t\t\tcontinue\n\t\t}\n\t\ts.Log(fmt.Sprintf(\"connection accepted from %s\", connRaw.RemoteAddr()))\n\t\tconn := NewConn(&connRaw, s.idleTimeout, s.bytesLimit)\n\t\ts.addConn(conn)\n\n\t\t// Process each connection concurrently.\n\t\tgo func(conn *Conn) {\n\t\t\tdefer func(conn *Conn) {\n\t\t\t\ts.closeConn(conn)\n\t\t\t\ts.Log(\"connection closed\")\n\t\t\t}(conn)\n\n\t\t\tr := bufio.NewReader(conn)\n\t\t\tw := bufio.NewWriter(conn)\n\t\t\tcbuf := make(chan []byte)\n\t\t\ttimeout := time.After(s.idleTimeout)\n\t\t\tfor {\n\t\t\t\t// Waiting for reading from the connection.\n\t\t\t\tgo func(cbuf chan []byte, r *bufio.Reader) {\n\t\t\t\t\tbuf := make([]byte, BufSize)\n\t\t\t\t\t_, err := r.Read(buf)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.Log(err)\n\t\t\t\t\t}\n\t\t\t\t\tbuf = bytes.Trim(buf, \"\\x00\")\n\t\t\t\t\tcbuf <- buf\n\t\t\t\t}(cbuf, r)\n\n\t\t\t\tselect {\n\t\t\t\tcase <-timeout:\n\t\t\t\t\t// Oops, we caught a timeout.\n\t\t\t\t\terr = io.EOF\n\t\t\t\t\treturn\n\t\t\t\tcase buf := <-cbuf:\n\t\t\t\t\t// Red buffer isn't empty, process the data.\n\t\t\t\t\tout, err := h.Handle(buf)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.Log(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// Write response to connection.\n\t\t\t\t\t_, err = w.Write(out)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.Log(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// Flush the connection.\n\t\t\t\t\terr = w.Flush()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.Log(err)\n\t\t\t\t\t}\n\t\t\t\t\t// Update timeout,\n\t\t\t\t\ttimeout = time.After(s.idleTimeout)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(conn)\n\t}\n\n\treturn\n}", "func main() {\n\thttp.HandleFunc(\"/\", handler)\n\n\thttp.HandleFunc(\"/count\", counter)\n\tlog.Fatal(http.ListenAndServe(\"localhost:8181\", nil))\n}", "func main() {\n\tln, err := net.Listen(\"tcp\", \":8080\")\n\tif err != nil {\n\t\t// handle error\n\t\tlog.Println(err)\n\t}\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\t// handle error\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tgo handleConnection(conn)\n\t}\n\n}", "func (mds *metadataService) listen() {\n\thandler := http.NewServeMux()\n\thandler.HandleFunc(\"/latest\", makeSecure(mds.getServices, mds))\n\thandler.HandleFunc(\"/latest/api/token\", makeSecure(mds.getv2Token, mds))\n\thandler.HandleFunc(\"/latest/meta-data/iam/security-credentials/\", makeSecure(mds.enumerateRoles, mds))\n\thandler.HandleFunc(\"/latest/meta-data/iam/security-credentials\", makeSecure(mds.enumerateRoles, mds))\n\thandler.HandleFunc(\"/latest/meta-data/iam/security-credentials/hologram-access\", makeSecure(mds.getCredentials, mds))\n\thandler.HandleFunc(\"/latest/meta-data/instance-id\", makeSecure(mds.getInstanceID, mds))\n\thandler.HandleFunc(\"/latest/meta-data/placement/availability-zone\", makeSecure(mds.getAvailabilityZone, mds))\n\thandler.HandleFunc(\"/latest/meta-data/public-hostname\", makeSecure(mds.getPublicDNS, mds))\n\n\terr := http.Serve(mds.listener, handler)\n\n\tif err != nil {\n\t\tif strings.HasSuffix(err.Error(), \"use of closed network connection\") {\n\t\t\t// this happens when Close() is called, and it's normal\n\t\t\treturn\n\t\t}\n\t\tpanic(err)\n\t}\n}", "func Server(t string, klogmax int, ktaskmax int, ratelimit int) {\n\tInitConfig(t, klogmax, ktaskmax, ratelimit)\n\thandleRequests()\n}", "func init() {\n\tmux = http.NewServeMux()\n\tmux.Handle(\"/\", handlerFn(\"Hello World!\"))\n\tmux.Handle(\"/foo\", handlerFn(\"foo\"))\n\tmux.Handle(\"/bar\", handlerFn(\"bar\"))\n\tmux.Handle(\"/baz\", handlerFn(\"baz\"))\n}", "func requestListener() {\n\tfor {\n\t\tconnId, payLoad, err := lspServer.Read()\n\t\tif err != nil {\n\t\t\tlogger.Println(\"Failure handler with clientId\", connId)\n\t\t\tfailureHandler(connId)\n\t\t} else {\n\t\t\tincomingMsgHanler(connId, payLoad)\n\t\t}\n\t}\n}", "func (s *Server) Run() {\n\tfor {\n\t\tconn, err := s.l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Accept error: %v\\n\", err)\n\t\t} else {\n\t\t\tgo s.handlerConn(conn)\n\t\t}\n\t}\n}", "func startAnonServerListener(listener *net.TCPListener) {\n\tfmt.Println(\"[debug] AnonServer Listener started...\");\n\tbuf := new(bytes.Buffer)\n\tfor {\n\t\tbuf.Reset()\n\t\tconn, err := listener.AcceptTCP()\n\t\tutil.CheckErr(err)\n\t\t_, err = io.Copy(buf, conn)\n\t\tutil.CheckErr(err)\n\t\tHandle(buf.Bytes(), anonServer)\n\t}\n}", "func EchoServer() {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:8053\")\n\tif err != nil {\n\t\tfmt.Printf(\"Failure to listen: %s\\n\", err.Error())\n\t\treturn\n\t}\n\tfor {\n\t\tif c, err := l.Accept(); err == nil {\n\t\t\tgo Echo(c)\n\t\t}\n\t}\n}", "func initServer() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/consumables\", consumablesListHandler).Methods(\"GET\")\n\trouter.HandleFunc(\"/consumables\", optionsHandler).Methods(\"OPTIONS\")\n\trouter.HandleFunc(\"/consumables\", consumablesCreateHandler).Methods(\"POST\")\n\trouter.HandleFunc(\"/ingest\", optionsHandler).Methods(\"OPTIONS\")\n\trouter.HandleFunc(\"/ingest\", ingestHandler).Methods(\"POST\")\n\trouter.HandleFunc(\"/status/now\", statusHandler).Methods(\"GET\")\n\trouter.HandleFunc(\"/status/time\", statusTimeHandler).Methods(\"GET\")\n\thttp.Handle(\"/\", router)\n\thttp.ListenAndServe(fmt.Sprintf(\":%s\", os.Getenv(\"CAFFEINE_PORT\")), nil)\n}", "func serve() error {\n\n\trouter := configureRoutes()\n\n\thttp.Handle(\"/\", router)\n\n\t// Define port and set to default if environment variable is not set\n\tport := PORT\n\tif len(os.Getenv(\"GO_PORT\")) > 0 {\n\t\tport = os.Getenv(\"GO_PORT\")\n\t}\n\n\tlogger.Info(\"Initiating HTTP Server on Port %v\", port)\n\treturn (http.ListenAndServe(port, router))\n}", "func RunHTTPServer(ctx context.Context, createHandler func(router chi.Router) http.Handler) {\n\tapiRouter := chi.NewRouter()\n\tsetMiddlewares(apiRouter)\n\n\trootRouter := chi.NewRouter()\n\trootRouter.Mount(\"/api\", createHandler(apiRouter))\n\n\tsrv := &http.Server{\n\t\tAddr: \":\" + os.Getenv(\"PORT\"),\n\t\tHandler: rootRouter,\n\t}\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\t_ = srv.ListenAndServe()\n\t}()\n\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\tgo func() {\n\t\t_ = <-sigs\n\t\t_ = srv.Shutdown(ctx)\n\t}()\n\n\twg.Wait()\n}", "func main() {\n\tregisterHandlers()\n\tappChatroom.Run() // run the chatroom app\n\t// start the server\n\tch := make(chan bool) // a channel used to get errors\n\tdefer close(ch)\n\tgo startHTTPServer(ch)\n\tgo startHTTPSServer(ch)\n\t<-ch\n\t<-ch\n\tlog.Fatal(\"Servers stopped with errors.\")\n}", "func WebhookServer(w http.ResponseWriter, req *http.Request) {\n\tif req.Method == \"POST\" && req.URL.Path == \"/webhook-endpoint\" {\n\t\tvar delivery Delivery\n\t\tdecoder := json.NewDecoder(req.Body)\n\t\terr := decoder.Decode(&delivery)\n\t\tdefer req.Body.Close()\n\t\tif err != nil {\n\t\t\thttp.Error(w, `400 bad request`, http.StatusBadRequest)\n\t\t}\n\t\tlog.Printf(\"received %d event(s).\", len(delivery.Messages))\n\t\tInsertEvents(delivery.Messages...)\n\t\tio.WriteString(w, \"OK\")\n\n\t} else if req.Method == \"GET\" && req.URL.Path == \"/health\" {\n\t\tio.WriteString(w, \"OK\")\n\n\t} else {\n\t\thttp.Error(w, \"404 not found\", http.StatusNotFound)\n\t\treturn\n\t}\n}", "func main() {\n\tfmt.Println(\"Rest API v2.0 - Mux Routers\")\n\tHandle.HandleRequests()\n}", "func main() {\n\n\thandleRequests()\n}", "func (s *Server) Serve(ctx context.Context, handler Handler) error {\n\ts.cancelMx.Lock()\n\ts.ctx, s.cancel = context.WithCancel(ctx)\n\ts.cancelMx.Unlock()\n\n\tfor {\n\t\tconn, err := s.listener.Accept()\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\n\t\t\tcase <-s.ctx.Done():\n\t\t\t\t// If parent context is not done, so\n\t\t\t\t// server context is canceled by Close.\n\t\t\t\treturn nil\n\t\t\tdefault:\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tgo func() {\n\t\t\t_ = s.serveConn(s.ctx, handler, conn)\n\t\t}()\n\t}\n}", "func HandleRequests() {\n\thttp.HandleFunc(\"/\", HomePage)\n\thttp.HandleFunc(\"/articles\", AllArticles)\n\tlog.Fatal(http.ListenAndServe(\":8081\", nil))\n}", "func runServer() {\n\t// listen and serve on 0.0.0.0:8080 (for windows \"localhost:8080\")\n\tlog.Fatalln(router.Run(fmt.Sprintf(\":%s\", env.AppPort)))\n}", "func RunServer(server client.Service) {\n\tr := mux.NewRouter()\n\tr.HandleFunc(server.HealthCheck, func(w http.ResponseWriter, r *http.Request) {\n\t\t// an example API handler\n\t\tjson.NewEncoder(w).Encode(map[string]bool{\"ok\": true})\n\t}).Methods(\"GET\")\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: fmt.Sprintf(\"%s:%d\", server.URL, server.Port),\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\tlog.Fatal(srv.ListenAndServe())\n}", "func Server(l net.Listener) {\n\tcont := electron.NewContainer(\"server\")\n\tc, err := cont.Accept(l)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tl.Close() // This server only accepts one connection\n\t// Process incoming endpoints till we get a Receiver link\n\tvar r electron.Receiver\n\tfor r == nil {\n\t\tin := <-c.Incoming()\n\t\tswitch in := in.(type) {\n\t\tcase *electron.IncomingSession, *electron.IncomingConnection:\n\t\t\tin.Accept() // Accept the incoming connection and session for the receiver\n\t\tcase *electron.IncomingReceiver:\n\t\t\tin.SetCapacity(10)\n\t\t\tin.SetPrefetch(true) // Automatic flow control for a buffer of 10 messages.\n\t\t\tr = in.Accept().(electron.Receiver)\n\t\tcase nil:\n\t\t\treturn // Connection is closed\n\t\tdefault:\n\t\t\tin.Reject(amqp.Errorf(\"example-server\", \"unexpected endpoint %v\", in))\n\t\t}\n\t}\n\tgo func() { // Reject any further incoming endpoints\n\t\tfor in := range c.Incoming() {\n\t\t\tin.Reject(amqp.Errorf(\"example-server\", \"unexpected endpoint %v\", in))\n\t\t}\n\t}()\n\t// Receive messages till the Receiver closes\n\trm, err := r.Receive()\n\tfor ; err == nil; rm, err = r.Receive() {\n\t\tfmt.Printf(\"server received: %q\\n\", rm.Message.Body())\n\t\trm.Accept() // Signal to the client that the message was accepted\n\t}\n\tfmt.Printf(\"server receiver closed: %v\\n\", err)\n}", "func startServer(port string, handler http.Handler) {\n\terr := http.ListenAndServe(port, handler)\n\tif err != nil {\n\t\tlogger.Fatal(\"ListenAndServe: \", err)\n\t}\n}", "func main() {\n handleRequests()\n}", "func RunServer() {\n\tapp := applicationContext{\n\t\tconfig: config.LoadConfig(),\n\t\ttrackerLevel: RATIOLESS,\n\t}\n\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/announce\", app.requestHandler)\n\tmux.HandleFunc(\"/scrape\", scrapeHandler)\n\thttp.ListenAndServe(\":3000\", mux)\n}", "func InitRequestHandler() {\n\tr := gin.Default()\n\n\tv1 := r.Group(\"/api/v1\")\n\t{\n\n\t\tv1.GET(\"/healthz\", func(c *gin.Context) {\n\t\t\tc.String(200, \"OK\")\n\t\t})\n\n\t\tv1.POST(\"/payments\", func(c *gin.Context) {\n\t\t\tc.JSON(200, gin.H{\"message\": \"pong\"})\n\t\t})\n\n\t}\n\n\tr.Run()\n}", "func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, shutdown func()) error {\n\tdump := make(chan os.Signal, 32)\n\tsetupDumpStacks(dump)\n\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl, err := serveListener(socketFlag)\n\tif err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error\n\t\treturn err\n\t}\n\tgo func() {\n\t\tdefer l.Close()\n\t\tif err := server.Serve(ctx, l); err != nil &&\n\t\t\t!strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\tlog.G(ctx).WithError(err).Fatal(\"containerd-shim: ttrpc server failure\")\n\t\t}\n\t}()\n\tlogger := log.G(ctx).WithFields(logrus.Fields{\n\t\t\"pid\": os.Getpid(),\n\t\t\"path\": path,\n\t\t\"namespace\": namespaceFlag,\n\t})\n\tgo func() {\n\t\tfor range dump {\n\t\t\tdumpStacks(logger)\n\t\t}\n\t}()\n\n\tgo handleExitSignals(ctx, logger, shutdown)\n\treturn reap(ctx, logger, signals)\n}", "func main() {\n // Create a TCP socket\n socket, _ := net.Listen(\"tcp\", \":8080\")\n fmt.Print(\"Server started. Listening on port 8080\\r\\n\\r\\n\")\n\n\n // Loop forever, listening for connections\n for {\n // Pause until a client connects\n connection, _ := socket.Accept()\n\n // Handle each incoming connection as a GO routine\n // Server can handle an arbitrary amount of connections\n go handleClient(connection)\n }\n}", "func echoServer(addr string) error {\n\tlistener, err := quic.ListenAddr(addr, generateTLSConfig(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Println(\"start listen:\", addr)\n\n\tfor {\n\t\tsess, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"listener error \", err.Error())\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"accept new session %v\\n\", sess)\n\t\tgo serveSession(sess)\n\n\t}\n}", "func (srv *Server) ApplyHandlers() {\n\tsrv.router.Handle(\"/*\", http.FileServer(http.Dir(\"./web\")))\n\tsrv.router.Get(\"/socket\", srv.socketHandler)\n}", "func main(){\n\thttp.HandleFunc(\"/data\",index_handle)\n\thttp.HandleFunc(\"/save\",post_handle)\n\tfmt.Println(http.ListenAndServe(\":4040\",nil))\n\tfmt.Println(\"Server started at port 4041\")\n}", "func (d *Dispatcher) Serve(l net.Listener) {\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Accept error: %v\", err)\n\t\t}\n\t\tgo d.handleConn(conn)\n\t}\n}", "func startServer() {\n\t// index file\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Redirect(w, r, \"/static/\", http.StatusFound)\n\t}) //设置访问的路由\n\n\t// static file\n\thttp.HandleFunc(\"/static/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, r.URL.Path[1:])\n\t})\n\n\t// other logic handlers\n\thttp.HandleFunc(\"/rank\", rank)\n\thttp.HandleFunc(\"/top\", top)\n\t//\thttp.HandleFunc(\"/update\", update)\n\n\terr := http.ListenAndServe(\":9090\", nil) //设置监听的端口\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t}\n}", "func ListenAndServe(endpoint string, logger *zerolog.Logger, handler http.Handler, writeTimeout time.Duration, readTimeout time.Duration) {\n\tlog := logger.With().Str(\"SERVER\", \"Listen and Serve\").Logger()\n\tsrv := &http.Server{\n\t\tHandler: handler,\n\t\tAddr: endpoint,\n\t\tWriteTimeout: writeTimeout,\n\t\tReadTimeout: readTimeout,\n\t}\n\n\tc := make(chan os.Signal, 1)\n\tidleConnections := make(chan struct{})\n\tsignal.Notify(c, os.Interrupt, syscall.SIGINT)\n\tgo func() {\n\t\t<-c\n\t\t// create context with timeout\n\t\tctx, cancel := context.WithTimeout(context.Background(), writeTimeout)\n\t\tdefer cancel()\n\n\t\t// start http shutdown\n\t\tif err := srv.Shutdown(ctx); err != nil {\n\t\t\tlog.Error().AnErr(\"shutdown\", err)\n\t\t}\n\n\t\tclose(idleConnections)\n\t}()\n\n\tlog.Info().Msg(\"listening at \" + endpoint)\n\tif err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\tlog.Fatal().Msg(\"listen and Serve fail \" + err.Error())\n\t}\n\n\tlog.Info().Msg(\"waiting idle connections...\")\n\t<-idleConnections\n\tlog.Info().Msg(\"bye bye\")\n}", "func main() {\n\thttp.HandleFunc(\"/echo\", routes.Echo)\n\thttp.HandleFunc(\"/invert\", routes.Invert)\n\thttp.HandleFunc(\"/flatten\", routes.Flatten)\n\thttp.HandleFunc(\"/sum\", routes.Sum)\n\thttp.HandleFunc(\"/multiply\", routes.Multiply)\n\thttp.HandleFunc(\"/health\", routes.Health)\n\thttp.ListenAndServe(\":8080\", nil)\n}", "func main() {\n\thttp.HandleFunc(\"/view/\", viewHandler)\t// Handle any requests under the path /view/\n\thttp.HandleFunc(\"/edit/\", editHandler)\t// Handle any requests under the path /edit/\n\thttp.HandleFunc(\"/save/\", saveHandler)\t// Handle any requests under the path /save/\n\thttp.ListenAndServe(\":8080\", nil)\n}", "func (ls *LowStock) ListenAndServe(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tmsgUpdates, err := ls.messenger.Updates(ls.lastUpdateID + 1)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed getting messenger msgUpdates: %s\", err)\n\t\t\t\ttime.Sleep(fallbackTimeout)\n\t\t\t}\n\t\t\tls.handleUpdates(ctx, msgUpdates)\n\t\t}\n\t}\n}", "func (state *InMemoryState) runServer() {\n\n\tlog.WithFields(log.Fields{\n\t\t\"port\": state.opts.APIServerPort,\n\t}).Info(\"[runServer] starting HTTP server\")\n\tdefer log.Info(\"[runServer] stopping HTTP server\")\n\n\tgo func() {\n\t\t// for debugging:\n\t\t// http://stackoverflow.com/questions/19094099/how-to-dump-goroutine-stacktraces\n\t\tlog.Println(http.ListenAndServe(\":6060\", nil))\n\t}()\n\n\tr := rpc.NewServer()\n\tr.RegisterCodec(rpcjson.NewCodec(), \"application/json\")\n\tr.RegisterCodec(rpcjson.NewCodec(), \"application/json;charset=UTF-8\")\n\tr.RegisterInterceptFunc(rpcInterceptFunc)\n\tr.RegisterAfterFunc(rpcAfterFunc)\n\td := NewDotmeshRPC(state, state.userManager)\n\terr := r.RegisterService(d, \"\") // deduces name from type name\n\tif err != nil {\n\t\tlog.Printf(\"Error while registering services %s\", err)\n\t}\n\n\trouter := mux.NewRouter()\n\n\trouter.Handle(\"/rpc\", Instrument(state)(NewAuthHandler(r, state.userManager)))\n\n\trouter.Handle(\n\t\t\"/filesystems/{filesystem}/{fromSnap}/{toSnap}\",\n\t\tInstrument(state)(NewAuthHandler(state.NewZFSSendingServer(), state.userManager)),\n\t).Methods(\"GET\")\n\n\trouter.Handle(\n\t\t\"/filesystems/{filesystem}/{fromSnap}/{toSnap}\",\n\t\tInstrument(state)(NewAuthHandler(state.NewZFSReceivingServer(), state.userManager)),\n\t).Methods(\"POST\")\n\n\t// display diff since the last commit\n\trouter.Handle(\"/diff/{namespace}:{name}\", Instrument(state)(NewAuthHandler(NewDiffHandler(state), state.userManager))).Methods(\"GET\")\n\trouter.Handle(\"/diff/{namespace}:{name}/{snapshotID}\", Instrument(state)(NewAuthHandler(NewDiffHandler(state), state.userManager))).Methods(\"GET\")\n\n\t// list files in the latest snapshot\n\trouter.Handle(\"/s3/{namespace}:{name}\", Instrument(state)(NewAuthHandler(NewS3Handler(state), state.userManager))).Methods(\"GET\")\n\t// list files in a specific snapshot\n\trouter.Handle(\"/s3/{namespace}:{name}/snapshot/{snapshotId}\", Instrument(state)(NewAuthHandler(NewS3Handler(state), state.userManager))).Methods(\"GET\")\n\t// download a file from a specific snapshot, or just get its size\n\trouter.Handle(\"/s3/{namespace}:{name}/snapshot/{snapshotId}/{key:.*}\", Instrument(state)(NewAuthHandler(NewS3Handler(state), state.userManager))).Methods(\"GET\", \"HEAD\")\n\t// put file into master\n\trouter.Handle(\"/s3/{namespace}:{name}/{key:.*}\", Instrument(state)(NewAuthHandler(NewS3Handler(state), state.userManager))).Methods(\"PUT\")\n\t// put file into other branch\n\trouter.Handle(\"/s3/{namespace}:{name}@{branch}/{key:.*}\", Instrument(state)(NewAuthHandler(NewS3Handler(state), state.userManager))).Methods(\"PUT\")\n\n\t// delete file on master\n\trouter.Handle(\"/s3/{namespace}:{name}/{key:.*}\", Instrument(state)(NewAuthHandler(NewS3Handler(state), state.userManager))).Methods(\"DELETE\")\n\t// delete file on another branch\n\trouter.Handle(\"/s3/{namespace}:{name}@{branch}/{key:.*}\", Instrument(state)(NewAuthHandler(NewS3Handler(state), state.userManager))).Methods(\"DELETE\")\n\n\trouter.HandleFunc(\"/check\",\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tfmt.Fprintf(w, \"OK\")\n\t\t},\n\t)\n\n\trouter.Handle(\"/metrics\", promhttp.Handler())\n\n\tif os.Getenv(\"PRINT_HTTP_LOGS\") != \"\" {\n\t\tloggingRouter := handlers.LoggingHandler(getLogfile(\"requests\"), router)\n\t\t// TODO: take server port from the config\n\t\terr = http.ListenAndServe(fmt.Sprintf(\":%s\", state.opts.APIServerPort), loggingRouter)\n\t} else {\n\t\terr = http.ListenAndServe(fmt.Sprintf(\":%s\", state.opts.APIServerPort), router)\n\t}\n\n\tif err != nil {\n\t\tutils.Out(fmt.Sprintf(\"Unable to listen on port %s: '%s'\\n\", state.opts.APIServerPort, err))\n\t\tlog.Fatalf(\"Unable to listen on port %s: '%s'\", state.opts.APIServerPort, err)\n\t}\n}", "func main() {\n\thttp.HandleFunc(\"/test\", TestServer)\n\thttp.Handle(\"/\", http.FileServer(http.Dir(\".\")))\n\thttp.Handle(\"/echo\", websocket.Handler(Echo))\n\thttp.Handle(\"/echo2\", websocket.Handler(EchoServer))\n\terr := http.ListenAndServe(\":12345\", nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n}", "func metricsServerListenAndServe() {\n\taulogging.Logger.NoCtx().Info().Print(\"starting metrics server on \" + configuration.MetricsPort())\n\tmetricsServeMux := http.NewServeMux()\n\tmetricsServeMux.Handle(\"/metrics\", promhttp.Handler())\n\terr := http.ListenAndServe(configuration.MetricsPort(), metricsServeMux)\n\tif err != nil {\n\t\taulogging.Logger.NoCtx().Fatal().WithErr(err).Print(\"failed to start metrics http server: \" + err.Error())\n\t}\n}", "func init() {\n\thttp.HandleFunc(\"/notify\", errorAdapter(notifyHandler))\n\thttp.HandleFunc(\"/processnotification\", notifyProcessorHandler)\n}", "func (l *Listener) Listen() {\n\tfor {\n\t\tvar client net.Conn\n\t\tvar err error\n\t\tif client, err = util.Accept(l); err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Serve the first Handler which is attached to this listener\n\t\tif len(l.HandlerConfigs) > 0 {\n\t\t\toptions := plugin_v1.HandlerOptions{\n\t\t\t\tClientConnection: client,\n\t\t\t\tHandlerConfig: l.HandlerConfigs[0],\n\t\t\t\tEventNotifier: l.EventNotifier,\n\t\t\t\tResolver: l.Resolver,\n\t\t\t\tShutdownNotifier: func(handler plugin_v1.Handler) {},\n\t\t\t}\n\n\t\t\tl.RunHandlerFunc(\"example-handler\", options)\n\t\t} else {\n\t\t\tclient.Write([]byte(\"Error - no handlers were defined!\"))\n\t\t}\n\t}\n}", "func (s *testDoQServer) Serve() {\n\tfor {\n\t\tconn, err := s.listener.Accept(context.Background())\n\t\tif err == quic.ErrServerClosed {\n\t\t\t// Finish serving on ErrServerClosed error.\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Debug(\"error while accepting a new connection: %v\", err)\n\t\t}\n\n\t\tgo s.handleQUICConnection(conn)\n\t}\n}", "func (s *Server) server() {\n\trpc.Register(s)\n\trpc.HandleHTTP()\n\t//l, e := net.Listen(\"tcp\", \":1234\")\n\tsockname := masterSock()\n\tos.Remove(sockname)\n\tl, e := net.Listen(\"unix\", sockname)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tgo http.Serve(l, nil)\n}", "func (s *Server) listen(listener net.Listener) {\n\tfor {\n\t\t// Accept a connection\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tif s.shutdown {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.logger.Printf(\"[ERR] consul.rpc: failed to accept RPC conn: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo s.handleConn(conn, false)\n\t\tmetrics.IncrCounter([]string{\"rpc\", \"accept_conn\"}, 1)\n\t}\n}", "func (hs *HttpServer) Start() (err error) {\n\t//panic(\"todo - StartServer\")\n\n\t// Start listening to the server port\n\n\t// Accept connection from client\n\n\t// Spawn a go routine to handle request\n\tport := hs.ServerPort\n\thost := \"0.0.0.0\"\n\t//delim := \"/r/n\"\n\tln, err := net.Listen(\"tcp\", host+port)\n\tdefer ln.Close()\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\tlog.Println(\"Listening to connections at '\"+host+\"' on port\", port)\n\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil{\n\t\t\tlog.Panicln(err)\n\t\t}\n\t\t\n\t\tgo hs.handleConnection(conn)\n\t}\n\n\treturn err\n\n\n}", "func Serve(l net.Listener) error {\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgo handleConn(conn)\n\t}\n}", "func (serv *Server) GET(url string, handlers ...Handler) {\n\tserv.Handle(\"GET\", url, handlers...)\n}", "func main() {\n\thttp.HandleFunc(\"/\", foo)\n\thttp.HandleFunc(\"/dog/\", bar)\n\thttp.HandleFunc(\"/me/\", myName)\n\thttp.ListenAndServe(\":8080\", nil)\n\n}", "func (cs *ChatService) startHTTPServer() {\n\tr := mux.NewRouter()\n\thangoutsHandler := &hangoutsHTTPHandler{\n\t\tbroker: cs.broker,\n\t}\n\n\tr.Handle(\"/\", hangoutsHandler)\n\n\thttp.Handle(\"/\", r)\n\n\tcs.quitError <- http.ListenAndServe(fmt.Sprintf(\":%d\", cs.httpPort), nil)\n}", "func Server(\n\tctx context.Context,\n\tcfg *config.ServerConfig,\n\tdb *database.Database,\n\tauthProvider auth.Provider,\n\tcacher cache.Cacher,\n\tcertificateSigner keys.KeyManager,\n\tsmsSigner keys.KeyManager,\n\tlimiterStore limiter.Store,\n) (http.Handler, error) {\n\t// Setup sessions\n\tsessionOpts := &sessions.Options{\n\t\tDomain: cfg.CookieDomain,\n\t\tMaxAge: int(cfg.SessionDuration.Seconds()),\n\t\tSecure: !cfg.DevMode,\n\t\tSameSite: http.SameSiteStrictMode,\n\t\tHttpOnly: true,\n\t}\n\tsessions := cookiestore.New(func() ([][]byte, error) {\n\t\treturn db.GetCookieHashAndEncryptionKeys()\n\t}, sessionOpts)\n\n\t// Create the router\n\tr := mux.NewRouter()\n\n\tr.Use(middleware.GzipResponse())\n\n\t// Install common security headers\n\tr.Use(middleware.SecureHeaders(cfg.DevMode, \"html\"))\n\n\t// Mount and register static assets before any middleware.\n\t{\n\t\tsub := r.PathPrefix(\"\").Subrouter()\n\t\tsub.Use(middleware.ConfigureStaticAssets(cfg.DevMode))\n\n\t\tstaticFS := assets.ServerStaticFS()\n\t\tfileServer := http.FileServer(http.FS(staticFS))\n\t\tsub.PathPrefix(\"/static/\").Handler(http.StripPrefix(\"/static/\", fileServer))\n\n\t\t// Browers and devices seem to always hit this - serve it to keep our logs\n\t\t// cleaner.\n\t\tsub.Path(\"/favicon.ico\").Handler(fileServer)\n\t}\n\n\tsub := r.PathPrefix(\"\").Subrouter()\n\n\t// Create the renderer\n\th, err := render.New(ctx, assets.ServerFS(), cfg.DevMode)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create renderer: %w\", err)\n\t}\n\n\t// Include the current URI\n\tcurrentPath := middleware.InjectCurrentPath()\n\tsub.Use(currentPath)\n\n\t// Request ID injection\n\tpopulateRequestID := middleware.PopulateRequestID(h)\n\tsub.Use(populateRequestID)\n\n\t// Trace ID injection\n\tpopulateTraceID := middleware.PopulateTraceID()\n\tr.Use(populateTraceID)\n\n\t// Logger injection\n\tpopulateLogger := middleware.PopulateLogger(logging.FromContext(ctx))\n\tsub.Use(populateLogger)\n\n\t// Recovery injection\n\trecovery := middleware.Recovery(h)\n\tsub.Use(recovery)\n\n\t// Common observability context\n\tctx, obs := middleware.WithObservability(ctx)\n\tsub.Use(obs)\n\n\t// Inject template middleware - this needs to be first because other\n\t// middlewares may add data to the template map.\n\tpopulateTemplateVariables := middleware.PopulateTemplateVariables(cfg)\n\tsub.Use(populateTemplateVariables)\n\n\t// Load localization\n\tlocales, err := i18n.Load(i18n.WithReloading(cfg.DevMode))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to setup i18n: %w\", err)\n\t}\n\n\t// Process localization parameters.\n\tprocessLocale := middleware.ProcessLocale(locales)\n\tsub.Use(processLocale)\n\n\thttplimiter, err := limitware.NewMiddleware(ctx, limiterStore,\n\t\tlimitware.UserIDKeyFunc(ctx, \"server:ratelimit:\", cfg.RateLimit.HMACKey),\n\t\tlimitware.AllowOnError(false))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create limiter middleware: %w\", err)\n\t}\n\n\t// Enable debug headers\n\tprocessDebug := middleware.ProcessDebug()\n\tsub.Use(processDebug)\n\n\t// Sessions\n\trequireSession := middleware.RequireSession(sessions, []interface{}{auth.SessionKeyFirebaseCookie}, h)\n\tsub.Use(requireSession)\n\n\t// Install the CSRF protection middleware.\n\thandleCSRF := middleware.HandleCSRF(h)\n\tsub.Use(handleCSRF)\n\n\t// Create common middleware\n\trequireAuth := middleware.RequireAuth(cacher, authProvider, db, h, cfg.SessionIdleTimeout, cfg.SessionDuration)\n\tcheckIdleNoAuth := middleware.CheckSessionIdleNoAuth(h, cfg.SessionIdleTimeout)\n\trequireEmailVerified := middleware.RequireEmailVerified(authProvider, h)\n\tloadCurrentMembership := middleware.LoadCurrentMembership(h)\n\trequireMembership := middleware.RequireMembership(h)\n\trequireSystemAdmin := middleware.RequireSystemAdmin(h)\n\trequireMFA := middleware.RequireMFA(authProvider, h)\n\tprocessFirewall := middleware.ProcessFirewall(h, \"server\")\n\trateLimit := httplimiter.Handle\n\n\t// health\n\t{\n\t\t// We don't need locales or template parsing, minimize middleware stack by\n\t\t// forking from r instead of sub.\n\t\tsub := r.PathPrefix(\"\").Subrouter()\n\t\tsub.Use(populateRequestID)\n\t\tsub.Use(populateLogger)\n\t\tsub.Use(recovery)\n\t\tsub.Use(obs)\n\t\tsub.Handle(\"/health\", controller.HandleHealthz(db, h, cfg.IsMaintenanceMode())).Methods(http.MethodGet)\n\t}\n\n\t{\n\t\tloginController := login.New(authProvider, cacher, cfg, db, h)\n\t\t{\n\t\t\tsub := sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Handle(\"/session\", loginController.HandleCreateSession()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/signout\", loginController.HandleSignOut()).Methods(http.MethodGet)\n\n\t\t\tsub = sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Use(checkIdleNoAuth)\n\n\t\t\tsub.Handle(\"/\", loginController.HandleLogin()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/reset-password\", loginController.HandleShowResetPassword()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/reset-password\", loginController.HandleSubmitResetPassword()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleShowSelectNewPassword()).\n\t\t\t\tQueries(\"oobCode\", \"\", \"mode\", \"resetPassword\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleSubmitNewPassword()).\n\t\t\t\tQueries(\"oobCode\", \"\", \"mode\", \"resetPassword\").Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleReceiveVerifyEmail()).\n\t\t\t\tQueries(\"oobCode\", \"{oobCode:.+}\", \"mode\", \"{mode:(?:verifyEmail|recoverEmail)}\").Methods(http.MethodGet)\n\n\t\t\t// Realm selection & account settings\n\t\t\tsub = sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(requireAuth)\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Use(loadCurrentMembership)\n\t\t\tsub.Handle(\"/login\", loginController.HandleReauth()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login\", loginController.HandleReauth()).Queries(\"redir\", \"\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/post-authenticate\", loginController.HandlePostAuthenticate()).Methods(http.MethodGet, http.MethodPost, http.MethodPut, http.MethodPatch)\n\t\t\tsub.Handle(\"/login/select-realm\", loginController.HandleSelectRealm()).Methods(http.MethodGet, http.MethodPost)\n\t\t\tsub.Handle(\"/login/change-password\", loginController.HandleShowChangePassword()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/change-password\", loginController.HandleSubmitChangePassword()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/account\", loginController.HandleAccountSettings()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleShowVerifyEmail()).\n\t\t\t\tQueries(\"mode\", \"verifyEmail\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleSubmitVerifyEmail()).\n\t\t\t\tQueries(\"mode\", \"verifyEmail\").Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/register-phone\", loginController.HandleRegisterPhone()).Methods(http.MethodGet)\n\t\t}\n\t}\n\n\t// codes\n\t{\n\t\tsub := sub.PathPrefix(\"/codes\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tsub.Handle(\"\", http.RedirectHandler(\"/codes/issue\", http.StatusSeeOther)).Methods(http.MethodGet)\n\t\tsub.Handle(\"/\", http.RedirectHandler(\"/codes/issue\", http.StatusSeeOther)).Methods(http.MethodGet)\n\n\t\t// API for creating new verification codes. Called via AJAX.\n\t\tissueapiController := issueapi.New(cfg, db, limiterStore, smsSigner, h)\n\t\tsub.Handle(\"/issue\", issueapiController.HandleIssueUI()).Methods(http.MethodPost)\n\t\tsub.Handle(\"/batch-issue\", issueapiController.HandleBatchIssueUI()).Methods(http.MethodPost)\n\n\t\tcodesController := codes.NewServer(cfg, db, h)\n\t\tcodesRoutes(sub, codesController)\n\t}\n\n\t// mobileapp\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/mobile-apps\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tmobileappsController := mobileapps.New(db, h)\n\t\tmobileappsRoutes(sub, mobileappsController)\n\t}\n\n\t// apikeys\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/apikeys\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tapikeyController := apikey.New(cacher, db, h)\n\t\tapikeyRoutes(sub, apikeyController)\n\t}\n\n\t// users\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/users\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tuserController := user.New(authProvider, cacher, db, h)\n\t\tuserRoutes(sub, userController)\n\t}\n\n\t// stats\n\t{\n\t\tsub := sub.PathPrefix(\"/stats\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tstatsController := stats.New(cacher, db, h)\n\t\tstatsRoutes(sub, statsController)\n\t}\n\n\t// realms\n\t{\n\t\tsub := sub.PathPrefix(\"/realm\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\trealmadminController := realmadmin.New(cfg, db, limiterStore, h, cacher)\n\t\trealmadminRoutes(sub, realmadminController)\n\n\t\tpublicKeyCache, err := keyutils.NewPublicKeyCache(ctx, cacher, cfg.CertificateSigning.PublicKeyCacheDuration)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trealmkeysController := realmkeys.New(cfg, db, certificateSigner, publicKeyCache, h)\n\t\trealmkeysRoutes(sub, realmkeysController)\n\n\t\trealmSMSKeysController := smskeys.New(cfg, db, publicKeyCache, h)\n\t\trealmSMSkeysRoutes(sub, realmSMSKeysController)\n\t}\n\n\t// webhooks\n\t{\n\t\t// We don't need locales or template parsing, minimize middleware stack by\n\t\t// forking from r instead of sub.\n\t\tsub := r.PathPrefix(\"/webhooks\").Subrouter()\n\t\tsub.Use(populateRequestID)\n\t\tsub.Use(populateLogger)\n\t\tsub.Use(recovery)\n\t\tsub.Use(obs)\n\n\t\twebhooksController := webhooks.New(cacher, db, h)\n\t\twebhooksRoutes(sub, webhooksController)\n\t}\n\n\t// JWKs\n\t{\n\t\tsub := sub.PathPrefix(\"/jwks\").Subrouter()\n\t\tsub.Use(rateLimit)\n\n\t\tjwksController, err := jwks.New(ctx, db, cacher, h)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create jwks controller: %w\", err)\n\t\t}\n\t\tjwksRoutes(sub, jwksController)\n\t}\n\n\t// System admin\n\t{\n\t\tsub := sub.PathPrefix(\"/admin\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireSystemAdmin)\n\t\tsub.Use(rateLimit)\n\n\t\tadminController := admin.New(cfg, cacher, db, authProvider, limiterStore, h)\n\t\tsystemAdminRoutes(sub, adminController)\n\t}\n\n\t// Blanket handle any missing routes.\n\tr.NotFoundHandler = populateTemplateVariables(processLocale(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontroller.NotFound(w, r, h)\n\t\treturn\n\t})))\n\n\t// Wrap the main router in the mutating middleware method. This cannot be\n\t// inserted as middleware because gorilla processes the method before\n\t// middleware.\n\tmux := http.NewServeMux()\n\tmux.Handle(\"/\", middleware.MutateMethod()(r))\n\treturn mux, nil\n}", "func Handler(s *Server) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ts.handler(w, r)\n\t})\n}", "func (s *Server) Dispatch() error {\n\thandler := cors.Default().Handler(s.router)\n\tlistener, err := net.Listen(\"tcp\", s.listenAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.log.Info(\"API server listening on %q\", s.listenAddress)\n\treturn http.Serve(listener, handler)\n}" ]
[ "0.68494654", "0.6800467", "0.67156327", "0.6608716", "0.65980685", "0.6566974", "0.6520821", "0.64973724", "0.6458118", "0.6454185", "0.64205277", "0.64116293", "0.6407764", "0.6395274", "0.6377654", "0.636549", "0.6357758", "0.63493574", "0.6325145", "0.6282549", "0.62823975", "0.6274351", "0.62525034", "0.6212859", "0.6208683", "0.6206311", "0.61994344", "0.61951673", "0.61714906", "0.6170709", "0.61638653", "0.6157021", "0.6156773", "0.61488175", "0.6148351", "0.61440784", "0.6139796", "0.6133975", "0.613003", "0.61178565", "0.61140054", "0.6109569", "0.61079735", "0.6107112", "0.60991204", "0.6095845", "0.6079731", "0.60659456", "0.60639644", "0.60620624", "0.6061621", "0.60561466", "0.60559565", "0.605223", "0.6051027", "0.60482556", "0.60482067", "0.60401636", "0.6039902", "0.6036444", "0.60310876", "0.6025925", "0.60239863", "0.6019304", "0.60146433", "0.6012012", "0.60116106", "0.600485", "0.5998155", "0.5989078", "0.598108", "0.59779716", "0.59766644", "0.5976627", "0.59637797", "0.59620476", "0.59611285", "0.59596336", "0.5954243", "0.59490025", "0.59429353", "0.59427834", "0.59400004", "0.5936086", "0.59293085", "0.592693", "0.5923643", "0.5923313", "0.59199536", "0.5918622", "0.59165406", "0.5916", "0.5915277", "0.5913436", "0.59114105", "0.5910411", "0.5909741", "0.5905998", "0.5905589", "0.5904639" ]
0.6814072
1
/ Main Main starts either a client or a server, depending on whether the `connect` flag is set. Without the flag, the process starts as a server, listening for incoming requests. With the flag the process starts as a client and connects to the host specified by the flag value. Try "localhost" or "127.0.0.1" when running both processes on the same machine. main
func main() { connect := flag.String("connect", "", "IP address of process to join. If empty, go into listen mode.") flag.Parse() // If the connect flag is set, go into client mode. if *connect != "" { err := client(*connect) if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Client done.") return } // Else go into server mode. err := server() if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Server done.") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\tflag.StringVar(&MODE, \"mode\", MODE, \"server/client\")\n\tflag.StringVar(&SERVER_ADDR, \"server\", SERVER_ADDR, \"mode: server => listen, mode: client => connect to\")\n\tflag.StringVar(&PayLoad, \"pl\", PayLoad, \"PayLoad\")\n\tflag.BoolVar(&PrintDump, \"d\", PrintDump, \"Print dump\")\n\tflag.PrintDefaults()\n\tflag.Parse()\n\n\tswitch strings.ToUpper(MODE) {\n\tcase \"S\":\n\t\tserver(SERVER_ADDR)\n\tdefault:\n\t\tclient(SERVER_ADDR)\n\t}\n}", "func main() {\n\tserver.New().Start()\n}", "func main() {\n\tportNo := os.Args[1]\n\tstartServerMode(portNo)\n}", "func main() {\n\n\tfmt.Println(\"Launching server...\")\n\n\tconnMap = make(map[string]net.Conn) // Allocate and initialise a map with no given size\n\tuserMap = make(map[net.Conn]string) // Allocate and initialise a map with no given size\n\n\targs := os.Args\n\n\tvar connPort = \"\"\n\n\tif len(args) == 2 && checkServerPort(args[1]) { // Verify a port number is given and check it\n\t\tconnPort = args[1]\n\t} else { // Else use port 8081 by default\n\t\tconnPort = \"8081\"\n\t}\n\n\tfmt.Print(\"IP address: \")\n\tgetPreferredIPAddress() // Prints out the preferred IP address of the specific computer\n\tfmt.Println(\"Port number: \" + connPort)\n\n\t// Listens for connection requests\n\tln, err := net.Listen(\"tcp\", \":\"+connPort)\n\n\t// Error check\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Defer (wait till surrounding functions have finished) the execution of ln.Close()\n\tdefer ln.Close()\n\n\t// Semi-infinite loop that accepts connections, checks for errors and executes a goroutine\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Accept error: \", err)\n\t\t\treturn\n\t\t}\n\t\tgo connection(conn) // goroutine execution of the connection function concurrently\n\t}\n}", "func main() {\n\n\tvar logger *simple.Logger\n\n\tif os.Getenv(\"LOG_LEVEL\") == \"\" {\n\t\tlogger = &simple.Logger{Level: \"info\"}\n\t} else {\n\t\tlogger = &simple.Logger{Level: os.Getenv(\"LOG_LEVEL\")}\n\t}\n\terr := validator.ValidateEnvars(logger)\n\tif err != nil {\n\t\tos.Exit(-1)\n\t}\n\n\t// setup our client connectors (message producer)\n\tconn := connectors.NewClientConnectors(logger)\n\n\t// call the start server function\n\tlogger.Info(\"Starting server on port \" + os.Getenv(\"SERVER_PORT\"))\n\tstartHttpServer(conn)\n}", "func main() {\n\tserver.StartUp(false)\n}", "func main() {\n\tif len(os.Args) != 2 {\n\t\tlog.Fatal(\"Usage: ./server-go [server port]\")\n\t}\n\tserver_port := os.Args[1]\n\tserver(server_port)\n}", "func ClientMain(player Player) {\n\taddr := DefaultServerAddress\n\tif len(os.Args) > 1 {\n\t\tport, err := strconv.Atoi(os.Args[1])\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"invalid value for port: %q\", os.Args[1])\n\t\t}\n\t\taddr = &net.TCPAddr{\n\t\t\tIP: net.IPv4(127, 0, 0, 1),\n\t\t\tPort: port,\n\t\t}\n\t}\n\tvar state BasicState\n\tclient, err := OpenClient(addr, player, &state)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot connect to server: %s\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.DebugTo = os.Stderr\n\terr = client.Run()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error while running: %s\", err)\n\t\tos.Exit(2)\n\t}\n}", "func main() {\n args := args.Parse(os.Args)\n fmt.Println(\"[MAIN] App starting\")\n\n switch args.Mode {\n case \"agent\":\n go agent.Run(args.Source, args.ServerAddress, args.ServerPort)\n case \"server\":\n go server.Run(args.BindAddress, args.BindPort)\n case \"mixed\":\n go server.Run(args.BindAddress, args.BindPort)\n go agent.Run(args.Source, args.ServerAddress, args.ServerPort)\n default:\n fmt.Println(\"[MAIN] No agent, no server running\")\n }\n\n for {\n time.Sleep(100 * time.Millisecond)\n }\n}", "func main() {\r\n\tbind := fmt.Sprintf(\"%s:%s\", getIP(), getPort())\r\n\tlog.Println(\"Listening on\", bind)\r\n\r\n\terr := http.ListenAndServe(bind, http.HandlerFunc(mainHandle))\r\n\tif err != nil {\r\n\t\tpanic(\"ListenAndServe: \" + err.Error())\r\n\t}\r\n}", "func main() {\n\terr := clientMain()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\tbindTo := flag.String(\n\t\t\"l\", \"0.0.0.0:999\", \"interface and port to listen at\")\n\tflag.Parse()\n\trunServer(*bindTo)\n}", "func main() {\n\ta := App{}\n\ta.Initialize()\n\ta.Run(\":8000\")\n}", "func main() {\n\ts := master.New()\n\tif err := s.Run(port); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n if len(os.Args) != 2 {\n log.Panic(\"args:\", \"<port>\")\n }\n port := os.Args[1]\n startServer(port)\n}", "func main() {\n\terr := runClient()\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\n\treturn\n}", "func main() {\n\tflag.Parse()\n\n\tproxy := launcher.NewProxy()\n\tif !*quiet {\n\t\tproxy.Logger = os.Stdout\n\t}\n\n\tl, err := net.Listen(\"tcp\", *addr)\n\tif err != nil {\n\t\tutils.E(err)\n\t}\n\n\tfmt.Println(\"Remote control url is\", \"ws://\"+l.Addr().String())\n\n\tsrv := &http.Server{Handler: proxy}\n\tutils.E(srv.Serve(l))\n}", "func mainClient(ctx *cli.Context) error {\n\tcheckClientSyntax(ctx)\n\taddr := \":\" + strconv.Itoa(warpServerDefaultPort)\n\tswitch ctx.NArg() {\n\tcase 1:\n\t\taddr = ctx.Args()[0]\n\t\tif !strings.Contains(addr, \":\") {\n\t\t\taddr += \":\" + strconv.Itoa(warpServerDefaultPort)\n\t\t}\n\tcase 0:\n\tdefault:\n\t\tfatal(errInvalidArgument(), \"Too many parameters\")\n\t}\n\thttp.HandleFunc(\"/ws\", serveWs)\n\tconsole.Infoln(\"Listening on\", addr)\n\tfatalIf(probe.NewError(http.ListenAndServe(addr, nil)), \"Unable to start client\")\n\treturn nil\n}", "func main() {\n\targs := os.Args[1:]\n\tswitch len(args) {\n\tcase 0:\n\t\tstartServerFromFile(configFile)\n\t\treturn\n\tcase 1:\n\t\ta := args[0]\n\t\tif a == createFile {\n\t\t\twriteConfigurationToFile()\n\t\t\treturn\n\t\t} else if a == help || a == help2 {\n\t\t\tprintHelp()\n\t\t\treturn\n\t\t}\n\tcase 2:\n\t\tif args[0] == useConfig && args[1] != \"\" {\n\t\t\tstartServerFromFile(args[1])\n\t\t\treturn\n\t\t}\n\t}\n\tprintHelp()\n}", "func main() {\n\tfmt.Printf(\"%sBuilding SQL Connection%s\\n\", GREEN, NC)\n\tMySQL.BuildConnection()\n\t/* How to print to stdout */\n\tfmt.Printf(\"Main starting\\n\")\n\t/* This is how you call functions in Go */\n\tserve()\n}", "func main() {\n\n\t// Ref. https://gobyexample.com/command-line-arguments\n\targsWithProg := os.Args\n\n\tif len(argsWithProg) > 1 {\n\t\t// os.Args[0] will be \"smi-main\"\n\t\tswitch os.Args[1] {\n\t\tcase \"s\":\n\t\t\tsmi.Server()\n\t\tcase \"c\":\n\t\t\tsmi.Client()\n\t\t}\n\n\t} else {\n\t\tfmt.Println(\"Please specify the mode: s for server, c for client: smi-main s or smi-main c\")\n\t}\n}", "func main() {\n\tconfig.SetVersion(\"0.1.0\")\n\tconfig.Load()\n\tsync.StartProcessing()\n\tserver.StartServer()\n}", "func main() {\n\tserver := server.NewHTTPServer()\n\tserver.Start(3000)\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar err error\n\ts, err := NewServer(TESTDB, 10, 2, ioutil.Discard , \":9123\")\n\tif err!=nil {\n\t\tpanic(err)\n\t}\n\tts=s\n\ts.Start()\n\tos.Exit(m.Run())\n}", "func main() {\n\tif err := cmd.RunServer(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\tvar (\n\t\thostname string\n\t\tid string\n name string\n client string\n\t\tcache *infra.Cache\n\t\tserver *infra.Server\n\t\tconsole *infra.Console\n\t)\n\n\tflag.Parse()\n\n\tcache = infra.NewCache()\n\n\thostname = *localAddress + \":\" + *localPort\n client = *clientAddress\n\n\t// If an id isn't provided, we use the hostname instead\n\tif *instanceId != \"\" {\n\t\tid = *instanceId\n\t} else {\n\t\tid = hostname\n\t}\n \n if *carrierName != \"\" {\n name = *carrierName\n } else if *ringAddress != \"\" {\n name = *ringAddress\n } else {\n name = hostname\n }\n \n server = infra.NewServer(id, name, hostname, client, cache)\n\tconsole = infra.NewConsole(cache, server)\n\n\t// Spawn goroutines to handle both interfaces\n\tgo server.Run(*ringAddress)\n\tgo console.Run()\n\n\t// Wait fo the server to finish\n\t<-server.Done()\n}", "func StartMainServer(mainHost string, workerCount int) {\n\tserver := &fasthttp.Server{\n\t\tHandler: anyHTTPHandler,\n\t}\n\n\tpreforkServer := prefork.New(server, workerCount)\n\n\tif !prefork.IsChild() {\n\t\tfmt.Printf(\"Server started server on http://%s\\n\", mainHost)\n\t}\n\n\tif err := preforkServer.ListenAndServe(mainHost); err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\t//init()\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\thostname = \"MISSING\"\n\t}\n\n\tinnerPort := os.Getenv(\"BACKEND_PORT\")\n\tif innerPort == \"\" {\n\t\tlog.Printf(\"Running on %s:5001\", hostname)\n\t\tlog.Fatal(http.ListenAndServe(\":5001\", nil))\n\t} else {\n\t\tlog.Printf(\"Running on %s:%s\", hostname, innerPort)\n\t\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%s\", innerPort), nil))\n\t}\n}", "func main() {\n\n\t// Process args.\n\n\t// the TCP address on which the fserver listens to RPC connections from the aserver\n\tfserverTcp := os.Args[1]\n\tfserverTcpG = fserverTcp\n\n\t// the UDP address on which the fserver receives client connections\n\tfserver := os.Args[2]\n\tfserverUdpAddr, err := net.ResolveUDPAddr(\"udp\", fserver)\n\thandleError(err)\n\n\tmsg := make([]byte, 1024)\n\n\t// Global fserver ip:port info\n\tfserverIpPort = fserver\n\n\t// Read the rest of the args as a fortune message\n\tfortune := strings.Join(os.Args[3:], \" \")\n\tfortuneG = fortune\n\n\t// Debug to see input from command line args\n\tfmt.Printf(\"fserver Listening on %s\\nFortune: %s\\n\", fserverIpPort, fortune)\n\n\t// concurrent running of rcp connection\n\n\tconn, err := net.ListenUDP(\"udp\", fserverUdpAddr)\n\thandleError(err)\n\n\tgo handleRpcConnection()\n\tdefer conn.Close()\n\n\t// refactor to global variable\n\tconndp = conn\n\t// udp client concurrency\n\tfor {\n\t\tn, clientAddr, err := conn.ReadFromUDP(msg)\n\t\thandleError(err)\n\t\tgo handleClientConnection(msg[:], n, clientAddr.String())\n\t}\n}", "func main() {\n\tfmt.Println(\"app start.\")\n\tgo loginserver.StartListen()\n\tgo gateserver.StartListen()\n\t<-make(chan int)\n}", "func main() {\n\n\t//init api\n\tserver.Init()\n}", "func mainExample() {\n\tfmt.Printf(\"webserv main running.\\n\")\n\tw := NewWebServer(\"127.0.0.1:7708\", nil)\n\tw.Start()\n\tselect {}\n\t// ...\n\tw.Stop()\n}", "func main() {\n\tregisterHandlers()\n\tappChatroom.Run() // run the chatroom app\n\t// start the server\n\tch := make(chan bool) // a channel used to get errors\n\tdefer close(ch)\n\tgo startHTTPServer(ch)\n\tgo startHTTPSServer(ch)\n\t<-ch\n\t<-ch\n\tlog.Fatal(\"Servers stopped with errors.\")\n}", "func main() {\n\thttp.ListenAndServe(\"127.0.0.1:8080\", NewServer())\n}", "func main() {\n\t// get environment variables\n\tport := os.Getenv(portEnv)\n\t// default for port\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t}\n\tlog.Print(\"[Info][Main] Creating server...\")\n\ts, err := sessions.NewServer(\":\"+port, os.Getenv(redisAddressEnv),\n\t\tos.Getenv(gameServerImageEnv), deserialiseEnvMap(os.Getenv(gameNodeSelectorEnv)),\n\t\tos.Getenv(cpuLimitEnv))\n\n\tif err != nil {\n\t\tlog.Fatalf(\"[Error][Main] %+v\", err)\n\t}\n\n\tif err := s.Start(); err != nil {\n\t\tlog.Fatalf(\"[Error][Main] %+v\", err)\n\t}\n}", "func main() {\n\tvar port int\n\tvar version bool\n\n\t// parse the flags\n\tflag.IntVar(&port, \"port\", 8080, \"used port\")\n\tflag.BoolVar(&version, \"V\", false, \"version of the program\")\n\tflag.Parse()\n\n\t// if user type -V, the V flag is set up to true\n\tif version {\n\t\t// display the information about the version\n\t\tfmt.Println(\"version 1.0_a\")\n\t\t// otherwise run the server\n\t} else {\n\t\tportNr := strconv.Itoa(port)\n\t\thttp.HandleFunc(\"/time\", getTime)\n\t\thttp.HandleFunc(\"/\", unknownRoute)\n\t\terr := http.ListenAndServe(\":\"+portNr, nil)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t\t}\n\t}\n}", "func main() {\n\tserverIP := os.Args[1]\n\tdataPath := os.Args[2]\n\n\tPublicIp = node.GeneratePublicIP()\n\tfmt.Println(\"The public IP is: [%s], DataPath is: %s\", ERR_COL+PublicIp+ERR_END, ERR_COL+dataPath+ERR_END)\n\t// Listener for clients -> cluster\n\tln1, _ := net.Listen(\"tcp\", PublicIp+\"0\")\n\n\t// Listener for server and other nodes\n\tln2, _ := net.Listen(\"tcp\", PublicIp+\"0\")\n\n\tInitializeDataStructs()\n\t// Open Filesystem on Disk\n\tnode.MountFiles(dataPath, WriteIdCh)\n\t// Open Peer to Peer RPC\n\tListenPeerRpc(ln2)\n\t// Connect to the Server\n\tnode.InitiateServerConnection(serverIP, PeerRpcAddr)\n\t// Open Cluster to App RPC\n\tListenClusterRpc(ln1)\n}", "func main() {\n\tname := flag.String(\"name\", \"echo\", \"server name\")\n\tport := flag.String(\"port\", \"3000\", \"server port\")\n\tflag.Parse()\n\n\t// Echo instance\n\te := echo.New()\n\n\t// Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\t// Route => handler\n\te.GET(\"/\", func(c echo.Context) error {\n\t\treturn c.HTML(http.StatusOK, fmt.Sprintf(\"<div style='font-size: 8em;'>Hello from upstream server %s!</div>\", *name))\n\t})\n\te.GET(\"/alive\", func(c echo.Context) error {\n\t\tdata := map[string]interface{}{\n\t\t\t\"alive\": true,\n\t\t\t\"hostname\": \"localhost:\" + *port,\n\t\t\t\"serviceName\": *name,\n\t\t\t\"num_cpu\": runtime.NumCPU(),\n\t\t\t\"num_goroutine\": runtime.NumGoroutine(),\n\t\t\t\"go_version\": runtime.Version(),\n\t\t\t\"build_date\": Buildstamp,\n\t\t\t\"commit\": Commit,\n\t\t\t\"startup_time\": startupTime,\n\t\t}\n\t\treturn c.JSON(http.StatusOK, data)\n\t})\n\n\t// Start server\n\te.Logger.Fatal(e.Start(fmt.Sprintf(\":%s\", *port)))\n}", "func main() {\r\n\targuments := os.Args\r\n\tif len(arguments) == 1 {\r\n\t\tfmt.Println(\"Please provide a port number!\")\r\n\t\treturn\r\n\t}\r\n\r\n\tPORT := \":\" + arguments[1]\r\n\tl, err := net.Listen(\"tcp4\", PORT)\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t\treturn\r\n\t}\r\n\tdefer l.Close()\r\n\trand.Seed(time.Now().Unix())\r\n\r\n\tfor {\r\n\t\tc, err := l.Accept()\r\n\t\tif err != nil {\r\n\t\t\tfmt.Println(err)\r\n\t\t\treturn\r\n\t\t}\r\n\t\tgo handleConnection_server(c)\r\n\t}\r\n}", "func MainServer(server string) {\n\n\t// Build core, and start goroutine\n\tcore := NewCore()\n\tgo core.main()\n\n\t// Build TCP listener and start goroutine\n\tlis := &Listener{core: core}\n\tgo lis.Listen(\"tcp\", server)\n\n\t// Register monitoring server\n\tgo monitoringServer(core)\n\n\t// Setup SIGINT signal handler, and wait\n\tchannel := make(chan os.Signal)\n\tsignal.Notify(channel, os.Interrupt)\n\t<-channel\n\tlog.Println(\"Stop\")\n}", "func main() {\n\tvar configurationFile string\n\tvar isMaster bool\n\tvar isScribe bool\n\tvar isAddama bool\n\n\tflag.BoolVar(&isMaster, \"m\", false, \"Start as master node.\")\n\tflag.BoolVar(&isScribe, \"s\", false, \"Start as scribe node.\")\n\tflag.BoolVar(&isAddama, \"a\", false, \"Start as addama node.\")\n\tflag.StringVar(&configurationFile, \"config\", \"golem.config\", \"A configuration file for golem services\")\n\tflag.Parse()\n\n\tconfigFile, err := goconf.ReadConfigFile(configurationFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tGlobalLogger(configFile)\n\tGlobalTls(configFile)\n\tSubIOBufferSize(\"default\", configFile)\n\tGoMaxProc(\"default\", configFile)\n\tConBufferSize(\"default\", configFile)\n\tStartHtmlHandler(configFile)\n\n\tif isMaster {\n\t\tStartMaster(configFile)\n\t} else if isScribe {\n\t\tStartScribe(configFile)\n\t} else if isAddama {\n\t\tStartAddama(configFile)\n\t} else {\n\t\tStartWorker(configFile)\n\t}\n}", "func main() {\n\t// starting server\n\tcuxs.StartServer(engine.Router())\n}", "func (d *Daemon) Main(serve func(string, string)) error {\n\tsetUmask()\n\tserve(d.SockPath, d.DbPath)\n\treturn nil\n}", "func main() {\n\tport, exists := os.LookupEnv(\"PORT\")\n\tif !exists {\n\t\tport = \"2001\"\n\t}\n\tfmt.Println(\"Running on port \" + port)\n\thttp.HandleFunc(\"/\", RelayServer)\n\thttp.ListenAndServe(\":\"+port, nil)\n}", "func main() {\n\tcfg := drc.NewConfig()\n\terr := cfg.Parse(os.Args[1:])\n\tif cfg.Version {\n\t\tutils.PrintRawInfo(appName)\n\t\tos.Exit(0)\n\t}\n\tswitch errors.Cause(err) {\n\tcase nil:\n\tcase flag.ErrHelp:\n\t\tos.Exit(0)\n\tdefault:\n\t\tlog.Fatalf(\"parse cmd flags errors: %s\", err)\n\t}\n\n\terr = logutil.InitLogger(&cfg.Log)\n\tif err != nil {\n\t\tlog.Fatalf(\"initialize log error: %s\", err)\n\t}\n\tutils.LogRawInfo(appName)\n\n\tsvr := drc.NewServer(cfg)\n\n\tsc := make(chan os.Signal, 1)\n\tsignal.Notify(sc,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT)\n\n\tgo func() {\n\t\tsig := <-sc\n\t\tlog.Infof(\"got signal [%d], exit\", sig)\n\t\tsvr.Close()\n\t}()\n\n\tif err = svr.Start(); err != nil {\n\t\tlog.Fatalf(\"run server failed: %v\", err)\n\t}\n\tsvr.Close()\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\n\t// go myServer()\n\t// go myClient()\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \t// for {\n\t\t// \tserverAddr, server := filesource.SearchAddressForThefile(\"Liben.jpg\")\n\t\t// \tfmt.Println(*serverAddr)\n\t\t// \tfmt.Println(*server)\n\t\t// \tclient.InitFileClient(serverAddr, server)\n\t\t// \tclient.DownloadFile(\"Liben.jpg\")\n\t\t// \t// }\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func main() {\n\tlogrus.SetFormatter(&logrus.TextFormatter{\n\t\tForceColors: true,\n\t\tFullTimestamp: true,\n\t})\n\tmlog := logrus.WithFields(logrus.Fields{\n\t\t\"component\": componentName,\n\t\t\"version\": env.Version(),\n\t})\n\n\tgrpc_logrus.ReplaceGrpcLogger(mlog.WithField(\"component\", componentName+\"_grpc\"))\n\tmlog.Infof(\"Starting %s\", componentName)\n\n\tgrpcServer, err := createGRPCServer(mlog)\n\tif err != nil {\n\t\tmlog.WithError(err).Fatal(\"failed to create grpc server\")\n\t}\n\t// Start go routines\n\tgo handleExitSignals(grpcServer, mlog)\n\tserveGRPC(env.ServiceAddr(), grpcServer, mlog)\n}", "func main() {\n\t// The ccid is assigned to the chaincode on install (using the “peer lifecycle chaincode install <package>” command) for instance\n\n\tif len(os.Args) < 3 {\n\t\tfmt.Println(\"Please supply:\\n- installed chaincodeID (using the “peer lifecycle chaincode install <package>” command)\\n- chaincode address (host:port)\")\n\t\treturn\n\t}\n\n\tccid := os.Args[1]\n\taddress := os.Args[2]\n\n\tserver := &shim.ChaincodeServer{\n\t\tCCID: ccid,\n\t\tAddress: address,\n\t\tCC: new(SimpleChaincode),\n\t\tTLSProps: shim.TLSProperties{\n\t\t\tDisabled: true,\n\t\t},\n\t}\n\n\tfmt.Println(\"Start Chaincode server on \" + address)\n\terr := server.Start()\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting Simple chaincode: %s\", err)\n\t\treturn\n\t}\n}", "func main() {\n initApplicationConfiguration()\n runtime.GOMAXPROCS(2) // in order for the rpc and http servers to work in parallel\n\n go servers.StartRPCServer()\n servers.StartHTTPServer()\n}", "func main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Printf(\"wrong parameters\\nUsage: %s host port\\n\", os.Args[0])\n\t\tos.Exit(2)\n\t}\n\n\tmsg, err := client.Dial(os.Args[1], os.Args[2])\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(2)\n\t}\n\tfmt.Println(*msg)\n}", "func main() {\n\tcore.Start()\n}", "func main() {\n\t//\n\t// Load startup flags\n\t//\n\tflags := cmd.LoadFlags()\n\n\t//\n\t// Load env.\n\t//\n\tif flags.EnvFile != \"\" {\n\t\terr := env.LoadEnvFile(flags.EnvFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t//\n\t// Select service\n\t//\n\treg := registry.NewRegistryContainer()\n\n\treg.Add(portGateway.ServiceName, portGateway.FactoryMethod)\n\treg.Add(portService.ServiceName, portService.FactoryMethod)\n\n\tserviceFactory, err := reg.Get(flags.Kind)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//\n\t// Create service\n\t//\n\tservice, err := serviceFactory()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//\n\t// Run till the death comes\n\t//\n\tlog.Printf(\"[%s] started serving on '%s'\", flags.Kind, flags.Address)\n\tlog.Fatal(service.Serve(flags.Address))\n}", "func main() {\n\tvar addr string\n\tflag.StringVar(&addr, \"e\", \":4040\", \"service address endpoint\")\n\tflag.Parse()\n\n\t// create local addr for socket\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t// announce service using ListenTCP\n\t// which a TCPListener.\n\tl, err := net.ListenTCP(\"tcp\", laddr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer l.Close()\n\tfmt.Println(\"listening at (tcp)\", laddr.String())\n\n\t// req/response loop\n\tfor {\n\t\t// use TCPListener to block and wait for TCP\n\t\t// connection request using AcceptTCP which creates a TCPConn\n\t\tconn, err := l.AcceptTCP()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"failed to accept conn:\", err)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"connected to: \", conn.RemoteAddr())\n\n\t\tgo handleConnection(conn)\n\t}\n}", "func main() {\n\t// Make websocket\n\tlog.Println(\"Starting sync server\")\n\n\t// TODO: Use command line flag credentials.\n\tclient, err := db.NewClient(\"localhost:28015\")\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't initialize database: \", err.Error())\n\t}\n\tdefer client.Close()\n\n\trouter := sync.NewServer(client)\n\n\t// Make web server\n\tn := negroni.Classic()\n\tn.UseHandler(router)\n\tn.Run(\":8000\")\n}", "func main() {\n\twebserver.ServerStart()\n\twebserver.ServerRequest()\n}", "func main() {\n\tlog.SetLevel(log.DebugLevel)\n\n\tif err := setupDB(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tserver, err := setupServer()\n\tif err != nil {\n\t\tlog.Fatal(\"error setting up server: \", err)\n\t}\n\n\tlog.Println(\"--- listening on \", server.Addr)\n\terr = server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(\"error starting server: \", err)\n\t}\n}", "func main() {\n\t\n\tvar config Config\n\tReadConfig(&config)\n\n\tvar inputScanner *bufio.Scanner\n\n\tif config.Server.Enable {\n\t\t// communicate with TCP/IP server\n\t\tfmt.Printf(\"server mode\\n\")\n\t\t// TODO need to set inputScanner\n\n\t} else if config.Engines.Enable {\n\t\t// play games with multiple engines used\n\t\t// In this mode, we need to hold a full state of the game because no one send the game state.\n\t\tfmt.Printf(\"multi-engine mode\\n\")\n\t\tpanic(\"not implemented now. Can you send pull request?\")\n\t} else {\n\t\t// CLI mode\n\t\tfmt.Printf(\"cli mode\\n\")\n\t\tinputScanner = bufio.NewScanner(os.Stdin)\n\t}\n\n\tConnectEngine(inputScanner, config.Cli.Path)\n}", "func main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Printf(\"argument is Invalid :%v\\n\", os.Args)\n\t\treturn\n\t}\n\tswitch os.Args[1] {\n\tcase \"master\":\n\t\tstartReq, err := json.Marshal(common.Request{\n\t\t\tUrl: os.Args[2],\n\t\t\tFlag: 1,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error(\"err:%v\", err)\n\t\t}\n\t\tdistribute.NewMaster().Run(startReq)\n\tcase \"slave\":\n\t\tdistribute.NewSlave(os.Args[2]).Run()\n\t}\n}", "func main() {\n\tfmt.Println(\"Go Demo with net/http server\")\n\n\t// initialize empty itemStore\n\titemStore := store.InitializeStore()\n\tserver.StartRouter(itemStore)\n}", "func main() {\n\n\tlog.Printf(\"Server started\")\n\n\trouter := sw.NewRouter()\n\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"5000\"\n\t}\n\n\theadersOk := handlers.AllowedHeaders([]string{\"X-Requested-With\", \"Content-Type\"})\n\toriginsOk := handlers.AllowedOrigins([]string{\"*\"})\n\tmethodsOk := handlers.AllowedMethods([]string{\"GET\", \"HEAD\", \"POST\", \"PUT\", \"OPTIONS\"})\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, handlers.CORS(originsOk, headersOk, methodsOk)(router)))\n}", "func init() {\n\t// use all cpus in the system for concurrency\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tlog.SetOutput(os.Stdout)\n\n\tflag.Parse()\n\n\tif *showUsage {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tvar err error\n\tClient, err = as.NewClient(*Host, *Port)\n\tif err != nil {\n\t\tPanicOnError(err)\n\t}\n}", "func main(){\n\tr := ctrl.HttpRouter()\n\tr.Run(\":1234\")\n}", "func main(){\n\n\tname := \"localhost\"\n\tport := 0\n\tfmt.Scanf(\"%d\\n\", &port)\n\t//lanzo a los dos servidores de arriba\n\tgo servRegister(name,port)\t\n\n\tfriendPort := 0\n\t//solicto a este port que me responda de alguan forma si port es diferente de friendport\n\tfmt.Scanf(\"%d\\n\",&friendPort)\n\tif port != friendPort{\n\t\t//agrego a la libreta al friendport, \n\t\tlib[friendPort] = name\n\t\tcliRegister(name, friendPort, port)\n\t}\n\t//lo lanzo sin go para que bloquee\n\tservAdder(name, port)\n\t\n}", "func main() {\n\t// load config and construct the server shared environment\n\tcfg := common.LoadConfig()\n\tlog := services.NewLogger(cfg)\n\n\t// create repository\n\trepo, err := repository.NewRepository(cfg, log)\n\tif err != nil {\n\t\tlog.Fatalf(\"Can not create application data repository. Terminating!\")\n\t}\n\n\t// setup GraphQL API handler\n\thttp.Handle(\"/api\", handlers.ApiHandler(cfg, repo, log))\n\n\t// show the server opening info and start the server with DefaultServeMux\n\tlog.Infof(\"Welcome to Fantom Rocks API server on [%s]\", cfg.BindAddr)\n\tlog.Fatal(http.ListenAndServe(cfg.BindAddr, nil))\n}", "func main() {\n\tservice.StartWebServer(\"8081\")\n}", "func main() {\n\n\t// Calls startup logic\n\tcommon.StartUp()\n\t// Get the mux router object\n\trouter := routers.InitRoutes()\n\n\tserver := &http.Server{\n\t\tAddr: common.AppConfig.Server,\n\t\tHandler: router,\n\t}\n\tlog.Println(\"Listening [products]...\")\n\tserver.ListenAndServe()\n}", "func main() {\n\tconst port = 8090\n\tfmt.Printf(\"Listening on port: %d\\n\", port)\n\thttp.HandleFunc(\"/\", requestHandler)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", port), nil)\n}", "func Main(c *config.Config) error {\n\tif c.IsDebug() {\n\t\tutils.PrettyPrint(c)\n\t}\n\tsignalsToCatch := []os.Signal{\n\t\tos.Interrupt,\n\t\tos.Kill,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGABRT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t}\n\tname := fmt.Sprintf(\"%s (%s)\", c.Name, utils.ExecutableName())\n\tstopCh := make(chan os.Signal, 1)\n\tsignal.Notify(stopCh, signalsToCatch...)\n\ta, err := core.NewAPI(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err = a.Shutdown(); err != nil {\n\t\t\tc.Log().Error(err)\n\t\t\treturn\n\t\t}\n\t\tc.Log().Infof(\"%s shut down\", name)\n\t}()\n\tc.Log().Infof(\"starting %s...\", name)\n\terr = hosts.Start(a, c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err = hosts.Shutdown(); err != nil {\n\t\t\tc.Log().Error(err)\n\t\t\treturn\n\t\t}\n\t\tc.Log().Infof(\"%s shut down\", name)\n\t}()\n\tc.Log().Infof(\"%s %s started\", name, c.Version())\n\t<-stopCh\n\tc.Log().Infof(\"%s shutting down\", name)\n\treturn nil\n}", "func main() {\n\tfmt.Println(\"server is up and running!!\")\n\truntime.GOMAXPROCS(4)\n\n\tapp := gin.Default()\n\n\tsearch.RouterMain(app)\n\n\terr := app.Run(\"0.0.0.0:5000\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"server got fired!!!!\")\n}", "func main() {\n\thelloWorld()\n\tfunctions()\n\tmathFunction()\n\tserver()\n}", "func main() {\n\targs := os.Args[1:]\n\tcentralSystem := ocpp16.NewCentralSystem(nil, nil)\n\thandler := &CentralSystemHandler{chargePoints: map[string]*ChargePointState{}}\n\tcentralSystem.SetNewChargePointHandler(func(chargePointId string) {\n\t\thandler.chargePoints[chargePointId] = &ChargePointState{connectors: map[int]*ConnectorInfo{}, transactions: map[int]*TransactionInfo{}}\n\t\tlog.WithField(\"client\", chargePointId).Info(\"new charge point connected\")\n\t})\n\tcentralSystem.SetChargePointDisconnectedHandler(func(chargePointId string) {\n\t\tlog.WithField(\"client\", chargePointId).Info(\"charge point disconnected\")\n\t\tdelete(handler.chargePoints, chargePointId)\n\t})\n\tcentralSystem.SetCentralSystemCoreListener(handler)\n\tvar listenPort = defaultListenPort\n\tif len(args) > 0 {\n\t\tport, err := strconv.Atoi(args[0])\n\t\tif err != nil {\n\t\t\tlistenPort = port\n\t\t}\n\t}\n\tlog.Infof(\"starting central system on port %v\", listenPort)\n\tcentralSystem.Start(listenPort, \"/{ws}\")\n\tlog.Info(\"stopped central system\")\n}", "func main() {\n\te := godotenv.Load()\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\tr := routers.SetupRouter()\n\trouters.MirrorRouter(r)\n\trouters.ProxyRouter(r)\n\n\tport := os.Getenv(\"port\")\n\n\t// For run on requested port\n\tif len(os.Args) > 1 {\n\t\treqPort := os.Args[1]\n\t\tif reqPort != \"\" {\n\t\t\tport = reqPort\n\t\t}\n\t}\n\n\tif port == \"\" {\n\t\tport = \"8080\" //localhost\n\t}\n\ttype Job interface {\n\t\tRun()\n\t}\n\n\tr.Run(\":\" + port)\n}", "func main() {\n\t// create a listener on TCP port 7777\n\tlis, err := net.Listen(\"tcp\", \":7777\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\t// create a server instance\n\ts := api.Server{}\n\n\t// create the TLS creds\n\tcreds, err := credentials.NewServerTLSFromFile(\"cert/server.crt\", \"cert/server.key\")\n\tif err != nil {\n\t\tlog.Fatalf(\"could not load TLS keys: %s\", err)\n\t}\n\n\t// add credentials to the gRPC options\n\topts := []grpc.ServerOption{grpc.Creds(creds)}\n\n\t// create a gRPC server object\n\tgrpcServer := grpc.NewServer(opts...)\n\n\t// attach the Ping service to the server\n\tapi.RegisterPingServer(grpcServer, &s)\n\n\t// start the server\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n}", "func main() {\n\tflag.BoolVar(&debug, \"debug\", false, \"Enable debug output\")\n\tflag.StringVar(&host, \"host\", \"127.0.0.1\", \"Host to listen on\")\n\tflag.StringVar(&port, \"port\", \"5000\", \"Port to listen on\")\n\tflag.Parse()\n\n\t// create the host:port string for use\n\tlistenAddress := fmt.Sprintf(\"%s:%s\", host, port)\n\tif debug {\n\t\tlog.Printf(\"Listening on %s\", listenAddress)\n\t}\n\n\t// Map /config to our configHandler and wrap it in the log middleware\n\thttp.Handle(\"/config/\", logMiddleware(http.HandlerFunc(configHandler)))\n\n\t// Run forever on all interfaces on port 5000\n\tlog.Fatal(http.ListenAndServe(listenAddress, nil))\n}", "func main() {\n\tgo func() { log.Fatal(echoServer()) }()\n\n\terr := clientMain()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\tconfig, err := config.New()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb, err := db.New(config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb.Migrate()\n\n\trouter := routes.New(db)\n\n\tsrv := server.New(router, config)\n\tif err := srv.Start(); err != nil {\n\t\tlog.Fatalf(\"Error on starting the server %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\tdefer db.Close()\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\t// fmt.Println(*serverAddr)\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \tfor {\n\t\t// \t\tmsg := <-msgc // a message to send\n\t\t// \t\tclient.InitChatClient(*myTitle, serverAddr)\n\n\t\t// \t\terr := client.Chat(msg)\n\t\t// \t\tif err != nil {\n\t\t// \t\t\t// restart the client\n\t\t// \t\t\tfmt.Printf(\"send Err: %v\", err)\n\t\t// \t\t}\n\t\t// \t}\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func main() {\n\n\tlog.Println(\"launching tcp server...\")\n\n\t// start tcp listener on all interfaces\n\t// note that each connection consumes a file descriptor\n\t// you may need to increase your fd limits if you have many concurrent clients\n\tln, err := net.Listen(\"tcp\", \":8081\")\n\tif err != nil {\n\t\tlog.Fatalf(\"could not listen: %s\", err)\n\t}\n\tdefer ln.Close()\n\n\tfor {\n\t\tlog.Println(\"waiting for incoming TCP connections...\")\n\t\t// Accept blocks until there is an incoming TCP connection\n\t\tincoming, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"couldn't accept %s\", err)\n\t\t}\n\n\t\tincomingConn, err := yamux.Client(incoming, yamux.DefaultConfig())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"couldn't create yamux %s\", err)\n\t\t}\n\n\t\tlog.Println(\"starting a gRPC server over incoming TCP connection\")\n\n\t\tvar conn *grpc.ClientConn\n\t\t// gRPC dial over incoming net.Conn\n\t\tconn, err = grpc.Dial(\":7777\", grpc.WithInsecure(),\n\t\t\tgrpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {\n\t\t\t\treturn incomingConn.Open()\n\t\t\t}),\n\t\t)\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t\t}\n\n\t\t// handle connection in goroutine so we can accept new TCP connections\n\t\tgo handleConn(conn)\n\t}\n}", "func Main() {\n\n\tcheckSupportArch()\n\n\tif len(os.Args) > 1 {\n\t\tcmd := os.Args[1]\n\t\tfmt.Println(cmd)\n\t}\n\n\tstartEtcdOrProxyV2()\n}", "func main() {\n\ta := App{}\n\t//\ta.Initialize(\"user\", \"password\", \"db\", \"db_mysql\", 3306)\n\ta.Initialize(\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASSWORD\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t\tos.Getenv(\"DB_HOST\"),\n\t\t3306)\n\n\ta.Run(\":8081\")\n}", "func Main() {\n\tusage := `iOS client v 0.01\n\nUsage:\n sim listen [<sock>]\n sim ls\n\n The commands work as following:\n sim ls will dump a list of currently active testmanagerd simulator sockets. Copy paste a path out of there to use with listen\n sim listen will either take the first available simulator that is running or you can pass it a socket path for a specific sim if you want. once it is running, start a xcuitest in xcode and watch the files with DTX dump being created\n`\n\targuments, err := docopt.ParseDoc(usage)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tls, _ := arguments.Bool(\"ls\")\n\tif ls {\n\t\tlist, err := fu.ListSockets()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not get sockets because: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(list)\n\t\treturn\n\t}\n\n\tsock, _ := arguments.String(\"<sock>\")\n\tif sock == \"\" {\n\t\tlog.Print(\"No socket specified, trying to find active sockets..\")\n\t\tsock, err = fu.FirstSocket()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"could not find socket\")\n\t\t}\n\t\tlog.Printf(\"Using socket:%s\", sock)\n\t}\n\tnewSocket, _ := fu.MoveSock(sock)\n\thandle := proxy.Launch(sock, newSocket)\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\t<-c\n\tlog.Print(\"CTRL+C detected, shutting down\")\n\thandle.Stop()\n\tfu.MoveBack(sock)\n}", "func main() {\n\tconfig := types.SetupConfig()\n\tlog.Printf(\"main.SetupConfig: %#v\\n\", config)\n\n\t/***** Start three GreeterServers(with one of them to be the slowServer). *****/\n\tgrpcAddress := strings.Split(config.GrpcAddress, \",\")\n\tfor i := 0; i < 3; i++ {\n\t\tlis, err := net.Listen(\"tcp\", grpcAddress[i])\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"main.Listen: %v\", err)\n\t\t}\n\t\tdefer lis.Close()\n\t\ts := grpc.NewServer()\n\t\tpb.RegisterPortsDbServer(s, &server{})\n\t\tgo s.Serve(lis)\n\t}\n\n\t/***** Wait for user exiting the program *****/\n\tselect {}\n}", "func main() {\n\thttp.HandleFunc(\"/\", handlers.Home)\n\thttp.HandleFunc(\"/about\", handlers.About)\n\n\tfmt.Printf(\"Staring application on port %s\", portNumber)\n\t_ = http.ListenAndServe(portNumber, nil)\n}", "func main() {\n\n\tlog.SetVerbose(log.DEBUG)\n\n\tdefer func() {\n\t\tif r := recover(); nil != r {\n\t\t\tlog.Error(\"%v\", r)\n\t\t}\n\t}()\n\n\t// parse command line args\n\tvar configFile = flag.String(\"conf\", \"conf.json\", \"configuration file\")\n\tflag.Parse()\n\n\tlog.Info(\"Initializing broker with options from %s.\", *configFile)\n\n\t// init configuration\n\tconfig, err := config.Init(*configFile)\n\tcheckError(err)\n\n\tlog.Info(\"Options read were: %v\", config)\n\n\tport, err := strconv.Atoi(config.Get(\"port\", PORT))\n\tcheckError(err)\n\n\tlog.SetPrefix(fmt.Sprintf(\"broker@%d: \", port))\n\n\tbroker, err = brokerimpl.New(config)\n\tcheckError(err)\n\n\tlistenHttp(port)\n\n}", "func main() {\n\n\targuments := os.Args\n\tif len(arguments) == 1 {\n\t\tfmt.Println(\"Please provide a socket file\")\n\n\t\t// make a sys call to exit the process\n\t\tos.Exit(100)\n\t}\n\n\tsocketFile := arguments[1]\n\n\tlistener, err := net.Listen(\"unix\", socketFile)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(100)\n\t}\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(100)\n\t\t}\n\n\t\t// run the server in another goroutine, thread or\n\t\t// in a way process but child process\n\t\t// because the server.go and main.go belong to same\n\t\t// package, we can access the function without importing it\n\t\tgo echoServer(conn)\n\t}\n\n}", "func Main() {\n\tflag.Parse()\n\n\tif err := run(); err != nil {\n\t\tlog.Warningf(\"%v\", err)\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\tfmt.Println(\"################################\")\n\tfmt.Println(\"#### Hello from MyAppStatus ####\")\n\tfmt.Println(\"################################\")\n\n\tapp.StartServer()\n}", "func main() {\n\n\tgo EdgeMapper()\n\tgo mapClients()\n\tgo handleDb()\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/\", simpleHandler)\n\trouter.HandleFunc(\"/webSocket\", handleClientSocket)\n\trouter.HandleFunc(\"/ws\", handleEdgeSocket)\n\trouter.PathPrefix(\"/\").Handler(http.FileServer(http.Dir(\"./\")))\n\n\terr := http.ListenAndServe(\":4000\", router)\n\n\t//\terr := http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), router)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\n\tc.InitConfig()\n\n\tdb.Connect()\n\n\te := echo.New()\n\n\tr.InitRoutes(e)\n\n\te.Use(middleware.CORSWithConfig(middleware.CORSConfig{\n\t\tAllowOrigins: []string{\"http://localhost:3000\"},\n\t\tAllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept},\n\t}))\n\n\te.Use(middleware.RequestID())\n\te.Pre(middleware.RemoveTrailingSlash())\n\te.Use(middleware.Recover())\n\n\te.Logger.Fatal(e.Start(\":80\"))\n}", "func main() {\n\n\t// Loads env variables\n\t//err := godotenv.Load()\n\t//if err != nil {\n\t//\tlog.Fatal(\"Error loading .env file\")\n\t//\treturn\n\t//}\n\n\t//http.HandleFunc(\"/\", handler)\n\t//log.Fatal(http.ListenAndServe(fmt.Sprintf(\":%s\", \"8080\"), nil))\n\tgodotenv.Load()\n\n\trouter := entry.Initialize()\n\trouter.Run(\":3000\")\n}", "func init() {\n\thostPtr := flag.String(\"host\", \"localhost\", \"ip of host\")\n\tportPtr := flag.String(\"port\", \"12345\", \"port on which to run server\")\n\tflag.Parse()\n\thost = *hostPtr\n\tport = *portPtr\n}", "func main() {\n\n\tif rpcR := checkRunning(); rpcR != nil {\n\t\t// R is running, send stdin to it\n\t\tsendToR(rpcR)\n\t} else {\n\t\t// R is not running, start\n\t\tfmt.Println(\"Starting R\")\n\t\tif r := startR(os.Args[1:]...); r != nil {\n\t\t\t<-r.wait\n\t\t}\n\t}\n}", "func main() {\n\t//establish connection to the primary replica\n\t//connect to server\n\tconn_main_replica, err := net.Dial(\"tcp\", \"localhost:8084\")\n\tdefer conn_main_replica.Close()\n\tif err != nil {\n\t\tpanic(\"Failed connect to conn_main_replica\\n\")\n\t}\n\n\t//load user list for faster access to a list of current users\n\tload_user_list()\n\thandle_requests(conn_main_replica)\n}", "func main() {\n\tfmt.Println(\"Client.go\");\n}", "func main() {\n\n\tuseTLS, err := strconv.ParseBool(os.Args[1])\n\tif err != nil {\n\t\tlogrus.Errorf(\"invalid argument: %s\", err.Error())\n\t}\n\n\tlogrus.Infof(\"Starting HTTP server with tls: %v\", useTLS)\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/nginx_status\", serveNginx)\n\tmux.HandleFunc(\"/json\", serveJSON)\n\n\tif useTLS {\n\t\tstartHTTPS(mux)\n\t} else {\n\t\tstartHTTP(mux)\n\t}\n}", "func main() {\n\n\trouter := NewRouter()\n\tlog.Fatal(http.ListenAndServe(\":8080\", router))\n}", "func main() {\n\t// create a background context (i.e. one that never cancels)\n\tctx := context.Background()\n\n\t// start a libp2p node that listens on a random local TCP port,\n\t// but without running the built-in ping protocol\n\tnode, err := libp2p.New(ctx,\n\t\tlibp2p.ListenAddrStrings(\"/ip4/127.0.0.1/tcp/1234\"),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// configure our own ping protocol\n\tpingService := &ping.PingService{Host: node}\n\tnode.SetStreamHandler(ping.ID, pingService.PingHandler)\n\n\t// print the node's PeerInfo in multiaddr format\n\tpeerInfo := peerstore.AddrInfo{\n\t\tID: node.ID(),\n\t\tAddrs: node.Addrs(),\n\t}\n\taddrs, err := peerstore.AddrInfoToP2pAddrs(&peerInfo)\n\tfmt.Println(\"libp2p node address:\", addrs[0])\n\n\t// print the node's listening addresses\n\tfmt.Println(\"Listen addresses:\", node.Addrs())\n\n\t// shut the node down\n\tif err := node.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\t// load config\n\tconfig.Init()\n\n\t// services\n\tservices.Init()\n\n\t// start gin server\n\trouter.RunGin()\n}", "func main() {\n\t/**\n\t * 先调用ParseCommand()函数解析命令行参数\n\t */\n\tcmd := parseCmd()\n\tif cmd.versionFlag {\n\t\t/**\n\t\t * 。如果用户输入了-version选项,则输出版本信息\n\t\t */\n\t\tfmt.Println(\"version 1.8.0\")\n\t} else if cmd.helpFlag || cmd.class == \"\" {\n\t\t/**\n\t\t * 如果解析出现错误,或者用户输入了-help选项,则调用PrintUsage()函数打印出帮助信息\n\t\t */\n\t\tprintUsage()\n\t} else {\n\t\t/**\n\t\t * 如果一切正常,则调用startJVM()函数启动Java虚拟机\n\t\t * 因为我们还没有真正开始编写Java虚拟机,所以startJVM()函数暂时只是打印一些信息而已,\n\t\t */\n\t\tstartJVM(cmd)\n\t}\n}", "func main() {\n\n\tconfigFile := flag.String(\"config\", \"config/config.yaml\", \"configuration file\")\n\t// debug := flag.Bool(\"debug\", false, \"enable debug mode\")\n\tflag.Parse()\n\n\t// Load global configuration from file\n\terr := cfg.loadConfig(*configFile)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"ConfigFile\": *configFile,\n\t\t\t\"Error\": err,\n\t\t}).Fatal(\"Unable to read configuration file\")\n\t}\n\n\tr := setupRouter()\n\n\t// Check for PORT environment variable config override.\n\tp := cfg.Server.Port\n\tif s := os.Getenv(\"PORT\"); len(s) > 0 {\n\t\tp, _ = strconv.Atoi(s)\n\t}\n\n\t// TODO fix path for logrus. Using fmt temporarily\n\t// log.WithFields(log.Fields{\n\t// \t\"Address\": cfg.Server.Address,\n\t// \t\"Port\": p,\n\t// }).Debug(\"Starting HTTP server\")\n\n\tfmt.Println(\"Starting HTTP Server\")\n\n\t// Listen and Serve at server address and port specified in config file\n\tr.Run(fmt.Sprintf(\"%s:%d\", cfg.Server.Address, p))\n}" ]
[ "0.70549303", "0.6946936", "0.670047", "0.6645153", "0.6628611", "0.66251695", "0.6601195", "0.6575071", "0.6574852", "0.6552101", "0.6540078", "0.64823025", "0.64497507", "0.64278376", "0.6411183", "0.63908523", "0.6383223", "0.6341063", "0.6328398", "0.63155854", "0.6315046", "0.63040376", "0.62960756", "0.6285047", "0.625506", "0.62469316", "0.6225739", "0.6212643", "0.62098104", "0.6194935", "0.6186158", "0.6175976", "0.61572874", "0.6149057", "0.6145319", "0.61401826", "0.6131459", "0.6125525", "0.6095188", "0.6080257", "0.6071658", "0.6070961", "0.60655737", "0.60630846", "0.60548675", "0.6052583", "0.60355014", "0.6025194", "0.6019952", "0.6009825", "0.6006614", "0.5997414", "0.59962827", "0.5994021", "0.5991499", "0.5990538", "0.5986967", "0.59782046", "0.5962863", "0.5958024", "0.5955935", "0.59520894", "0.5945274", "0.5944044", "0.5924219", "0.59207493", "0.5917511", "0.5909673", "0.59022266", "0.59022194", "0.5901981", "0.59002024", "0.5899645", "0.5895471", "0.58930135", "0.58910096", "0.5888845", "0.5883545", "0.58810115", "0.58778125", "0.58730674", "0.58691895", "0.58622324", "0.5854472", "0.5853803", "0.58467233", "0.58392555", "0.58231455", "0.58216596", "0.58212006", "0.5819386", "0.58121663", "0.58102846", "0.5804677", "0.5802714", "0.5794712", "0.5794383", "0.5791641", "0.57891554", "0.578806" ]
0.80571264
0
The Lshortfile flag includes file name and line number in log messages.
func init() { log.SetFlags(log.Lshortfile) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StatusShort(c *Client, files []File, untracked StatusUntrackedMode, lineprefix, lineending string) (string, error) {\n\tvar lsfiles []File\n\tif len(files) == 0 {\n\t\tlsfiles = []File{File(c.WorkDir)}\n\t} else {\n\t\tlsfiles = files\n\t}\n\n\tcfiles, err := LsFiles(c, LsFilesOptions{Cached: true}, lsfiles)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttree := make(map[IndexPath]*IndexEntry)\n\t// It's not an error to use \"git status\" before the first commit,\n\t// so discard the error\n\tif head, err := c.GetHeadCommit(); err == nil {\n\t\ti, err := LsTree(c, LsTreeOptions{FullTree: true, Recurse: true}, head, files)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// this should probably be an LsTreeMap library function, it would be\n\t\t// useful other places..\n\t\tfor _, e := range i {\n\t\t\ttree[e.PathName] = e\n\t\t}\n\t}\n\tvar ret string\n\tvar wtst, ist rune\n\tfor i, f := range cfiles {\n\t\twtst = ' '\n\t\tist = ' '\n\t\tfname, err := f.PathName.FilePath(c)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tswitch f.Stage() {\n\t\tcase Stage0:\n\t\t\tif head, ok := tree[f.PathName]; !ok {\n\t\t\t\tist = 'A'\n\t\t\t} else {\n\t\t\t\tif head.Sha1 == f.Sha1 {\n\t\t\t\t\tist = ' '\n\t\t\t\t} else {\n\t\t\t\t\tist = 'M'\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tstat, err := fname.Stat()\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\twtst = 'D'\n\t\t\t} else {\n\t\t\t\tmtime, err := fname.MTime()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t\tif mtime != f.Mtime || stat.Size() != int64(f.Fsize) {\n\t\t\t\t\twtst = 'M'\n\t\t\t\t} else {\n\t\t\t\t\twtst = ' '\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ist != ' ' || wtst != ' ' {\n\t\t\t\tret += fmt.Sprintf(\"%c%c %v%v\", ist, wtst, fname, lineending)\n\t\t\t}\n\t\tcase Stage1:\n\t\t\tswitch cfiles[i+1].Stage() {\n\t\t\tcase Stage2:\n\t\t\t\tif i >= len(cfiles)-2 {\n\t\t\t\t\t// Stage3 is missing, we've reached the end of the index.\n\t\t\t\t\tret += fmt.Sprintf(\"MD %v%v\", fname, lineending)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswitch cfiles[i+2].Stage() {\n\t\t\t\tcase Stage3:\n\t\t\t\t\t// There's a stage1, stage2, and stage3. If they weren't all different, read-tree would\n\t\t\t\t\t// have resolved it as a trivial stage0 merge.\n\t\t\t\t\tret += fmt.Sprintf(\"UU %v%v\", fname, lineending)\n\t\t\t\tdefault:\n\t\t\t\t\t// Stage3 is missing, but we haven't reached the end of the index.\n\t\t\t\t\tret += fmt.Sprintf(\"MD%v%v\", fname, lineending)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\tcase Stage3:\n\t\t\t\t// Stage2 is missing\n\t\t\t\tret += fmt.Sprintf(\"DM %v%v\", fname, lineending)\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unhandled index\")\n\t\t\t}\n\t\tcase Stage2:\n\t\t\tif i == 0 || cfiles[i-1].Stage() != Stage1 {\n\t\t\t\t// If this is a Stage2, and the previous wasn't Stage1,\n\t\t\t\t// then we know the next one must be Stage3 or read-tree\n\t\t\t\t// would have handled it as a trivial merge.\n\t\t\t\tret += fmt.Sprintf(\"AA %v%v\", fname, lineending)\n\t\t\t}\n\t\t\t// If the previous was Stage1, it was handled by the previous\n\t\t\t// loop iteration.\n\t\t\tcontinue\n\t\tcase Stage3:\n\t\t\t// There can't be just a Stage3 or read-tree would\n\t\t\t// have resolved it as Stage0. All cases were handled\n\t\t\t// by Stage1 or Stage2\n\t\t\tcontinue\n\t\t}\n\t}\n\tif untracked != StatusUntrackedNo {\n\t\tlsfilesopts := LsFilesOptions{\n\t\t\tOthers: true,\n\t\t}\n\t\tif untracked == StatusUntrackedNormal {\n\t\t\tlsfilesopts.Directory = true\n\t\t}\n\n\t\tuntracked, err := LsFiles(c, lsfilesopts, lsfiles)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, f := range untracked {\n\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif name := fname.String(); name == \".\" {\n\t\t\t\tret += \"?? ./\" + lineending\n\t\t\t} else {\n\t\t\t\tret += \"?? \" + name + lineending\n\t\t\t}\n\t\t}\n\t}\n\treturn ret, nil\n\n}", "func LogFileName() {\n\tlFileLength = log.Lshortfile\n}", "func Short() string {\n\treturn fmt.Sprintf(\"%s-%s\", _buildVersion, _buildGitRevision)\n}", "func shortFileName(file string) string {\n\treturn filepath.Base(file)\n}", "func (l Level) NameShort() string {\n\tswitch l {\n\tcase TraceLevel:\n\t\treturn \"TRC\"\n\tcase DebugLevel:\n\t\treturn \"DBG\"\n\tcase InfoLevel:\n\t\treturn \"INF\"\n\tcase WarnLevel:\n\t\treturn \"WRN\"\n\tcase ErrorLevel:\n\t\treturn \"ERR\"\n\tcase FatalLevel:\n\t\treturn \"FTL\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (o *ListIssueGroupOfProjectVersionParams) SetShowshortfilenames(showshortfilenames *bool) {\n\to.Showshortfilenames = showshortfilenames\n}", "func callerShortfile(file string, lastsep_ ...rune) string {\n\tlastsep := '/'\n\tif len(lastsep_) > 0 {\n\t\tlastsep = lastsep_[0]\n\t}\n\tshort := file\n\tfor i := len(file) - 1; i > 0; i-- {\n\t\tif file[i] == byte(lastsep) {\n\t\t\tshort = file[i+1:]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn short\n}", "func ShortFlag(name string) FlagOption {\n\treturn func(f *Flag) {\n\t\tf.alias = name\n\t}\n}", "func LogFilePath() {\n\tlFileLength = log.Llongfile\n}", "func generateStdflagShortFile() string {\n\tvar s string\n\tvar sf string\n\n\t_, fn, ln, ok := runtime.Caller(2)\n\tif ok {\n\t\tsf = fn + \":\" + strconv.Itoa(ln)\n\t\tindex := strings.LastIndex(sf, \"/\")\n\t\tsf = sf[index+1:]\n\t}\n\ts = time.Now().Format(time.RFC3339) + \" \" + sf + \": \"\n\treturn s\n}", "func (f *Formatter) Short() string {\n\tdays, hours, mins, secs := resolve(f.duration)\n\treturn fmt.Sprintf(\"%dd%dh%dm%ds\\n\", days, hours, mins, secs)\n}", "func (f File) VerboseString() string {\n\treturn fmt.Sprintf(\"%6s [%s] [%s] %s\", strconv.Itoa(f.Index)+\":\", f.kind(), f.result(), f.name())\n}", "func (v *Version) ShortVersion() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", v.Release, v.Fixpack, v.Hotfix)\n}", "func Short() string {\n\treturn version\n}", "func (v Version) ShortString() string {\n\treturn fmt.Sprintf(\"%d.%d\", v.Major, v.Minor)\n}", "func DebugfFile(p token.Position, format string, args ...interface{}) {\n\tlogger.Printf(levelDebug.format(p)+format, args...)\n}", "func InitDetailedLogger(f *os.File) {\n\n\tlog.SetReportCaller(true)\n\tlog.SetLevel(logrus.DebugLevel)\n\n\tlog.SetFormatter(&logrus.JSONFormatter{\n\t\tTimestampFormat: \"\",\n\t\tPrettyPrint: true,\n\t\tCallerPrettyfier: func(f *runtime.Frame) (string, string) {\n\t\t\ts := strings.Split(f.Function, \".\")\n\t\t\tfuncname := s[len(s)-1]\n\t\t\t_, filename := path.Split(f.File)\n\t\t\treturn funcname, filename\n\t\t},\n\t})\n\n\t// Set output of logs to Stdout\n\t// Change to f for redirecting to file\n\tlog.SetOutput(os.Stdout)\n\n}", "func addFileLinePrefix(msg string) string {\n\tvar file string\n\n\t// Using runtime.Caller() with calldepth == 2 is enough for getting the\n\t// logger function callers\n\t_, filePath, line, ok := runtime.Caller(2)\n\tif ok {\n\t\tfileParts := strings.Split(filePath, \"/\")\n\t\tfile = fileParts[len(fileParts)-1]\n\t} else {\n\t\t// Not sure if there's a better name or line number for an unknown caller\n\t\tfile = \"???\"\n\t\tline = 0\n\t}\n\n\tprefix := []string{file, \":\", strconv.Itoa(line), \":\"}\n\t// When called from Error, Warn, Info or Debug(), the Print() used\n\t// doesn't know about this additional prefix we're adding, so we\n\t// need to add the space between it and the msg ourselves.\n\tif len(strings.TrimSpace(msg)) > 0 {\n\t\tprefix = append(prefix, \" \")\n\t}\n\n\tprefixedMsg := append(prefix, msg)\n\treturn strings.Join(prefixedMsg, \"\")\n}", "func LogFileFlag(k *kingpin.Application) *string {\n\treturn k.Flag(logger.FileFlag, logger.FileFlagHelp).String()\n}", "func (ln *localen) FmtTimeShort(t time.Time) string {\n\treturn ln.fnFmtTimeShort(ln, t)\n}", "func ShortVersion() string {\n\tidx := strings.LastIndex(Version, \".\")\n\treturn Version[:idx]\n}", "func init() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n}", "func init() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n}", "func init() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n}", "func Infof(format string, v ...interface{}) {\n\tvar s string\n\tjl.stdlog.SetPrefix(\"[INFO] \")\n\tif jl.flag == LstdFlags|Lshortfile {\n\t\ts = generateStdflagShortFile()\n\t}\n\n\tjl.stdlog.Printf(s+format, v...)\n}", "func (s *Instruction) shortDebugString(prefix ...string) string {\n\treturn fmt.Sprintf(\"%s%s type: %s\", strings.Join(prefix, \"\"), s.Name, s.Type.String())\n}", "func (spr *StakingPriceRecord) LogFieldsShort() log.Fields {\n\treturn log.Fields{\n\t\t\"spr_hash\": hex.EncodeToString(spr.SPRHash),\n\t}\n}", "func (l *Logger) formatHeader(buf *[]byte, t time.Time, file string, line int) {\n\t*buf = append(*buf, l.prefix...)\n\tif l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {\n\t\tif l.flag&LUTC != 0 {\n\t\t\tt = t.UTC()\n\t\t}\n\t\tif l.flag&Ldate != 0 {\n\t\t\tyear, month, day := t.Date()\n\t\t\titoa(buf, year, 4)\n\t\t\t*buf = append(*buf, '/')\n\t\t\titoa(buf, int(month), 2)\n\t\t\t*buf = append(*buf, '/')\n\t\t\titoa(buf, day, 2)\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t\tif l.flag&(Ltime|Lmicroseconds) != 0 {\n\t\t\thour, min, sec := t.Clock()\n\t\t\titoa(buf, hour, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, min, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, sec, 2)\n\t\t\tif l.flag&Lmicroseconds != 0 {\n\t\t\t\t*buf = append(*buf, '.')\n\t\t\t\titoa(buf, t.Nanosecond()/1e3, 6)\n\t\t\t}\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t}\n\tif l.flag&(Lshortfile|Llongfile) != 0 {\n\t\tif l.flag&Lshortfile != 0 {\n\t\t\tshort := file\n\t\t\tfor i := len(file) - 1; i > 0; i-- {\n\t\t\t\tif file[i] == '/' {\n\t\t\t\t\tshort = file[i+1:]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tfile = short\n\t\t}\n\t\t*buf = append(*buf, file...)\n\t\t*buf = append(*buf, ':')\n\t\titoa(buf, line, -1)\n\t\t*buf = append(*buf, \": \"...)\n\t}\n}", "func (m *Message) ShortDescription() string {\n\treturn fmt.Sprintf(\"%s\", fmt.Sprintf(m.definition, m.args...))\n}", "func LfileLabel(fpath string) (string, error) {\n\treturn lFileLabel(fpath)\n}", "func Debugf(format string, v ...interface{}) {\n\tif Debug == true {\n\t\torigin := log.Flags()\n\t\tlog.SetFlags(log.LstdFlags | log.Llongfile)\n\t\tlog.Printf(format, v...)\n\t\tlog.SetFlags(origin)\n\t}\n}", "func fileSource(filename string, i int) string {\n\treturn fmt.Sprintf(\"%s:%d\", filename, i)\n}", "func SetupLoggingLong(lvl string) {\n\tSetLogger(log.New(os.Stderr, \"\", log.LstdFlags|log.Llongfile|log.Lmicroseconds), strings.ToLower(lvl))\n}", "func Debugln(v ...interface{}) {\n\tif Debug == true {\n\t\torigin := log.Flags()\n\t\tlog.SetFlags(log.LstdFlags | log.Llongfile)\n\t\tlog.Println(v...)\n\t\tlog.SetFlags(origin)\n\t}\n}", "func (o *ListIssueGroupOfProjectVersionParams) WithShowshortfilenames(showshortfilenames *bool) *ListIssueGroupOfProjectVersionParams {\n\to.SetShowshortfilenames(showshortfilenames)\n\treturn o\n}", "func (ln *localen) FmtDateShort(t time.Time) string {\n\treturn ln.fnFmtDateShort(ln, t)\n}", "func (ts TimeStamp) FormatShort() string {\n\treturn ts.Format(\"Jan 02, 2006\")\n}", "func (a *ASTNode) showTail(fn string) error {\n\tr, err := os.Open(fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\trb := bufio.NewReader(r)\n\tlast := \"\"\n\tfor {\n\t\tln, err := rb.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif ln != \"\" {\n\t\t\tlast = ln\n\t\t}\n\t}\n\tif last != \"\" {\n\t\tfmt.Fprint(os.Stderr, last)\n\t}\n\treturn nil\n}", "func printUsage(f *os.File, kind HelpType) {\n\tconst usageTempl = `\nUSAGE:\n{{.Sp3}}{{.AppName}} --mount=<directory> --shadow=<directory> [--out=<file>]\n{{.Sp3}}{{.AppNameFiller}} [(--csv | --json)] [--ro]\n{{.Sp3}}{{.AppName}} --help\n{{.Sp3}}{{.AppName}} --version\n{{if eq .UsageVersion \"short\"}}\nUse '{{.AppName}} --help' to get detailed information about options and\nexamples of usage.{{else}}\n\nDESCRIPTION:\n{{.Sp3}}{{.AppName}} mounts a synthesized file system which purpose is to generate\n{{.Sp3}}trace events for each low level file I/O operation executed on any file\n{{.Sp3}}or directory under its control. Examples of such operations are open(2),\n{{.Sp3}}read(2), write(2), close(2), access(2), etc.\n\n{{.Sp3}}{{.AppName}} exposes the contents of the directory specified by the\n{{.Sp3}}option '--shadow' via the path specified by the option '--mount'. {{.AppName}}\n{{.Sp3}}generates a trace event for each I/O operation and forwards the operation\n{{.Sp3}}to the target file system, that is, the one which actually hosts the shadow\n{{.Sp3}}directory. See the EXAMPLES section below.\n\n{{.Sp3}}Individual trace events generated by {{.AppName}} are written to the specified\n{{.Sp3}}output file (option --out) in the specified format.\n\n\nOPTIONS:\n{{.Sp3}}--mount=<directory>\n{{.Tab1}}This is the top directory through which the files and directories residing\n{{.Tab1}}under the shadow directory will be exposed. See the EXAMPLES section below.\n{{.Tab1}}The specified directory must exist and must be empty.\n\n{{.Sp3}}--shadow=<directory>\n{{.Tab1}}This is a directory where the files and directories you want to trace\n{{.Tab1}}actually reside.\n{{.Tab1}}The specified directory must exist but may be empty.\n\n{{.Sp3}}--out=<file>\n{{.Tab1}}Path of the text file to write the trace events to. If this file\n{{.Tab1}}does not exist it will be created, otherwise new events will be appended.\n{{.Tab1}}Note that this file cannot be located under the shadow directory.\n{{.Tab1}}Use '-' (dash) to write the trace events to the standard output.\n{{.Tab1}}In addition, you can specify a file name with extension '.csv' or '.json'\n{{.Tab1}}to instruct {{.AppName}} to emit records in the corresponding format,\n{{.Tab1}}as if you had used the '--csv' or '--json' options (see below).\n{{.Tab1}}Default: write trace records to standard output.\n\n{{.Sp3}}--csv\n{{.Tab1}}Format each individual trace event generated by {{.AppName}} as a set of\n{{.Tab1}}comma-separated values in a single line.\n{{.Tab1}}Note that not all events contain the same information since each\n{{.Tab1}}low level I/O operation requires specific arguments. Please refer to\n{{.Tab1}}the documentation at 'https://github.com/airnandez/{{.AppName}}' for\n{{.Tab1}}details on the format of each event.\n{{.Tab1}}CSV is the default output format unless the output file name (see option\n{{.Tab1}}'--out' above) has a '.json' extension.\n\n{{.Sp3}}--json\n{{.Tab1}}Format each individual trace event generated by {{.AppName}} as\n{{.Tab1}}a JSON object. Events in this format are self-described but not all\n{{.Tab1}}events contain the same information since each low level I/O operation\n{{.Tab1}}requires specific arguments. Please refer to the documentation\n{{.Tab1}}at 'https://github.com/airnandez/{{.AppName}}' for details on the format\n{{.Tab1}}of each event.\n\n{{.Sp3}}--ro\n{{.Tab1}}Expose the shadow file system as a read-only file system.\n{{.Tab1}}Default: if this option is not specified, the file system is mounted in\n{{.Tab1}}read-write mode.\n\n{{.Sp3}}--allowother\n{{.Tab1}}Allow other users to access the file system.\n\n{{.Sp3}}--help\n{{.Tab1}}Show this help\n\n{{.Sp3}}--version\n{{.Tab1}}Show version information and source repository location\n\nEXAMPLES:\n{{.Sp3}}To trace file I/O operations on files under $HOME/data use:\n\n{{.Tab1}}{{.AppName}} --mount=/tmp/trace --shadow=$HOME/data\n\n{{.Sp3}}After a successfull mount, the contents under $HOME/data are also\n{{.Sp3}}accessible by using the path /tmp/trace. For instance, if the\n{{.Sp3}}file $HOME/data/hello.txt exists, {{.AppName}} traces all the file I/O\n{{.Sp3}}operations induced by the command:\n\n{{.Tab1}}cat /tmp/trace/hello.txt\n\n{{.Sp3}}Trace events for each one of the low level operations induced by the\n{{.Sp3}}'cat' command above will be written to the output file, the standard\n{{.Sp3}}output in this particular example.\n\n{{.Sp3}}You can also create new files under /tmp/trace. For instance, the file\n{{.Sp3}}I/O operations induced by the shell command:\n\n{{.Tab1}}echo \"This is a new file\" > /tmp/trace/newfile.txt\n\n{{.Sp3}}will be traced and the file will actually be created in\n{{.Sp3}}$HOME/data/newfile.txt. This file will persist even after unmounting\n{{.Sp3}}{{.AppName}} (see below on how to unmount the synthetized file system).\n\n{{.Sp3}}Please note that any destructive action, such as removing or modifying\n{{.Sp3}}the contents of a file or directory using the path /tmp/trace will\n{{.Sp3}}affect the corresponding file or directory under $HOME/data.\n{{.Sp3}}For example, the command:\n\n{{.Tab1}}rm /tmp/trace/notes.txt\n\n{{.Sp3}}will have the same destructive effect as if you had executed\n\n{{.Tab1}}rm $HOME/data/notes.txt\n\n{{.Sp3}}To unmount the file system exposed by {{.AppName}} use:\n\n{{.Tab1}}umount /tmp/trace\n\n{{.Sp3}}Alternatively, on MacOS X you can also use the diskutil(8) command:\n\n{{.Tab1}}/usr/sbin/diskutil unmount /tmp/trace\n{{end}}\n`\n\n\tfields := map[string]string{\n\t\t\"AppName\": programName,\n\t\t\"AppNameFiller\": strings.Repeat(\" \", len(programName)),\n\t\t\"Sp2\": \" \",\n\t\t\"Sp3\": \" \",\n\t\t\"Sp4\": \" \",\n\t\t\"Sp5\": \" \",\n\t\t\"Sp6\": \" \",\n\t\t\"Tab1\": \"\\t\",\n\t\t\"Tab2\": \"\\t\\t\",\n\t\t\"Tab3\": \"\\t\\t\\t\",\n\t\t\"Tab4\": \"\\t\\t\\t\\t\",\n\t\t\"Tab5\": \"\\t\\t\\t\\t\\t\",\n\t\t\"Tab6\": \"\\t\\t\\t\\t\\t\\t\",\n\t\t\"UsageVersion\": \"short\",\n\t}\n\tif kind == HelpLong {\n\t\tfields[\"UsageVersion\"] = \"long\"\n\t}\n\tminWidth, tabWidth, padding := 8, 4, 0\n\ttabwriter := tabwriter.NewWriter(f, minWidth, tabWidth, padding, byte(' '), 0)\n\ttempl := template.Must(template.New(\"\").Parse(usageTempl))\n\ttempl.Execute(tabwriter, fields)\n\ttabwriter.Flush()\n}", "func WarnfFile(p token.Position, format string, args ...interface{}) {\n\tlogger.Printf(levelWarn.format(p)+format, args...)\n}", "func (s Status) ShortPrint() string {\n\tsflag := fmt.Sprintf(\"Status: 0x%x\\n\", s.Header)\n\tsflag += fmt.Sprintf(\" session:%d\\n\", s.Session)\n\tif sarflags.GetStr(s.Header, \"reqtstamp\") == \"yes\" {\n\t\tsflag += fmt.Sprintf(\" timestamp:%s\\n\", s.Tstamp.Print())\n\t}\n\tsflag += fmt.Sprintf(\" errcode:%s\\n\", sarflags.GetStr(s.Header, \"errcode\"))\n\tsflag += fmt.Sprintf(\" progress:%d\\n\", s.Progress)\n\tsflag += fmt.Sprintf(\" inrespto:%d\\n\", s.Inrespto)\n\tsflag += fmt.Sprintf(\" numb holes:%d\", len(s.Holes))\n\treturn sflag\n}", "func (s Segment) ShortString() string {\n\tmib, _, objs := s.SpeedPerSec()\n\tspeed := \"\"\n\tif mib > 0 {\n\t\tspeed = fmt.Sprintf(\"%.02f MiB/s, \", mib)\n\t}\n\treturn fmt.Sprintf(\"%s%.02f obj/s (%v)\",\n\t\tspeed, objs, s.EndsBefore.Sub(s.Start).Round(time.Millisecond))\n}", "func (p LeclercParser) GetShortURL() string {\n\treturn p.shortURL\n}", "func ShortName(n string) Field {\n\treturn func(p *Packet) error {\n\t\treturn p.append(shortName, []byte(n))\n\t}\n}", "func (v *Video) LogFullFileInfo() error {\n\tres, err := shell.ExecuteCommand(v.l, \"ffprobe\", v.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv.l.Println(string(res))\n\treturn nil\n}", "func (l *Logger) FormatHeader(t time.Time, funcName string, file string, line int, levelName string) {\n\tl.buf = l.buf[:0]\n\tbuf := &l.buf\n\t*buf = append(*buf, l.prefix...)\n\tif len(l.prefix) > 0 {\n\t\t*buf = append(*buf, \" | \"...)\n\t}\n\tif l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {\n\t\tif l.flag&LUTC != 0 {\n\t\t\tt = t.UTC()\n\t\t}\n\t\tif l.flag&Ldate != 0 {\n\t\t\tyear, month, day := t.Date()\n\t\t\titoa(buf, year, 4)\n\t\t\t*buf = append(*buf, '/')\n\t\t\titoa(buf, int(month), 2)\n\t\t\t*buf = append(*buf, '/')\n\t\t\titoa(buf, day, 2)\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t\tif l.flag&(Ltime|Lmicroseconds) != 0 {\n\t\t\thour, min, sec := t.Clock()\n\t\t\titoa(buf, hour, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, min, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, sec, 2)\n\t\t\tif l.flag&Lmicroseconds != 0 {\n\t\t\t\t*buf = append(*buf, '.')\n\t\t\t\titoa(buf, t.Nanosecond()/1e3, 6)\n\t\t\t}\n\t\t\t//*buf = append(*buf, ' ')\n\t\t}\n\t}\n\t*buf = append(*buf, \" | \"...)\n\tif l.flag&(Lshortfile|Llongfile) != 0 {\n\t\tif l.flag&Lshortfile != 0 {\n\t\t\tshort := file\n\t\t\tfor i := len(file) - 1; i > 0; i-- {\n\t\t\t\tif file[i] == '/' {\n\t\t\t\t\tshort = file[i+1:]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tfile = short\n\t\t}\n\n\t\tl.cache = l.cache[:0]\n\t\tl.cache = append(l.cache, funcName...)\n\t\tl.cache = append(l.cache, ':')\n\t\tl.cache = append(l.cache, file...)\n\t\tl.cache = append(l.cache, ':')\n\t\titoa(&l.cache, line, -1)\n\n\t\t*buf = append(*buf, fmt.Sprintf(\"%-60s\", string(l.cache))...)\n\t}\n\n\tid := l.GetGID()\n\tif len(id) > 0 {\n\t\t*buf = append(*buf, \" | \"...)\n\t\t*buf = append(*buf, fmt.Sprintf(\"%-3s\", id)...)\n\t}\n\n\tif len(levelName) > 0 {\n\t\t*buf = append(*buf, \" | \"...)\n\t\t*buf = append(*buf, fmt.Sprintf(\"%-5s\", levelName)...)\n\t}\n\n}", "func ShortUsageFunc(fn func()) Option {\n\treturn Option{short_usage: fn}\n}", "func usage() { // nolint\n\tfmt.Fprint(os.Stderr, \"Usage: sherlock --options log-file\\n\") // nolint:gas\n\tflag.PrintDefaults()\n}", "func (c *Command) ShortDescription() string {\n\treturn strings.Split(c.Description, \"\\n\")[0]\n}", "func (d Data) ShortPrint() string {\n\tsflag := fmt.Sprintf(\"Data: 0x%x\\n\", d.Header)\n\tif sarflags.GetStr(d.Header, \"reqtstamp\") == \"yes\" {\n\t\tsflag += fmt.Sprintf(\" tstamp:%s\\n\", d.Tstamp.Print())\n\t}\n\tsflag += fmt.Sprintf(\" session:%d,\", d.Session)\n\tsflag += fmt.Sprintf(\" offset:%d,\", d.Offset)\n\tsflag += fmt.Sprintf(\" paylen:%d\", len(d.Payload))\n\treturn sflag\n}", "func ShortPrint(f Frame) string {\n\treturn f.ShortPrint()\n}", "func shortPath(path string) string {\n\tif rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {\n\t\treturn rel\n\t}\n\treturn path\n}", "func (*Short) Descriptor() ([]byte, []int) {\n\treturn file_pb_shortener_proto_rawDescGZIP(), []int{2}\n}", "func (l *Logger) Verbosefln(format string, v ...interface{}) {\n\tif l.Verbose {\n\t\tlog.Printf(format+\"\\n\", v...)\n\t}\n}", "func (s *Service) FullShort(c context.Context, pn, ps int64, source string) (res []*webmdl.Mi, err error) {\n\tvar (\n\t\taids []int64\n\t\tip = metadata.String(c, metadata.RemoteIP)\n\t\tm = make(map[int64]string)\n\t)\n\tif aids, err = s.aids(c, pn, ps); err != nil {\n\t\treturn\n\t}\n\tif res, err = s.archiveWithTag(c, aids, ip, m, source); err != nil {\n\t\tlog.Error(\"s.archiveWithTag error(%v)\", err)\n\t}\n\treturn\n}", "func (entry *Entry) Debugf(format string, args ...interface{}) {\n\tif logger.Level >= logrus.DebugLevel {\n\t\tentry.e.Data[\"file\"] = fileInfo(2)\n\t\tentry.e.Debugf(format, args...)\n\t}\n}", "func (*Shorten) Descriptor() ([]byte, []int) {\n\treturn file_shortener_proto_rawDescGZIP(), []int{0}\n}", "func TestSetLogLevelPrefixFlags(t *testing.T) {\n\tvar buf bytes.Buffer\n\ttst := New(LogDebug, Full, &buf, \"\", Lshortfile)\n\ttst.SetFlags(0)\n\tf := tst.Flags()\n\tif f != 0 {\n\t\tt.Errorf(\"flags: got %d; want 0\", f)\n\t}\n\ttst.SetLevel(LogDebug)\n\ttst.Error(\"error\")\n\tif buf.String() != \"ERROR: error\\n\" {\n\t\tt.Errorf(\"write error line: got %q; want \\\"ERROR: error\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.Errorf(\"errorf: %d %s\", 42, \"eleven\")\n\tif buf.String() != \"ERROR: errorf: 42 eleven\\n\" {\n\t\tt.Errorf(\"write errorf line: got %q; want \\\"ERROR: errorf: 42 eleven\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.Errorln(\"errorln:\", 42, \"eleven\")\n\tif buf.String() != \"ERROR: errorln: 42 eleven\\n\" {\n\t\tt.Errorf(\"write errorln line: got %q; want \\\"ERROR: errorln: 42 eleven\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.Info(\"info\")\n\tif buf.String() != \"INFO: info\\n\" {\n\t\tt.Errorf(\"write info line: got %q; want \\\"INFO: info\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.Infof(\"infof: %d\", 42)\n\tif buf.String() != \"INFO: infof: 42\\n\" {\n\t\tt.Errorf(\"write infof line: got %q; want \\\"INFO: infof: 42\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.Infoln(\"infoln:\", 42)\n\tif buf.String() != \"INFO: infoln: 42\\n\" {\n\t\tt.Errorf(\"write infoln line: got %q; want \\\"INFO: infoln: 42\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.SetPrefix(\"abc\")\n\tp := tst.Prefix()\n\tif p != \"abc\" {\n\t\tt.Errorf(\"prefix: got %q; want \\\"abc\\\"\", p)\n\t}\n\ttst.SetLevelStringType(Char)\n\ttst.Debug(\"debug\")\n\tif buf.String() != \"abcD: debug\\n\" {\n\t\tt.Errorf(\"write debug line: %q; want \\\"abcD: debug\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.Debugf(\"debugf: %d\", 42)\n\tif buf.String() != \"abcD: debugf: 42\\n\" {\n\t\tt.Errorf(\"write debugf line: %q; want \\\"abcD: debugf: 42\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.Debugln(\"debugln:\", 42)\n\tif buf.String() != \"abcD: debugln: 42\\n\" {\n\t\tt.Errorf(\"write debugln line: %q; want \\\"abcD: debugln: 42\\n\\\"\", buf.String())\n\t}\n\tbuf.Reset()\n\ttst.SetLevel(LogNone)\n\tif tst.GetLevel() != LogNone {\n\t\tt.Errorf(\"logger severity level: got %s, want %s\", tst.GetLevel(), LogNone)\n\t}\n\ttst.Error(\"error\")\n\tif buf.Len() > 0 {\n\t\tt.Errorf(\"write error line: expected no bytes to be written, %d were\", buf.Len())\n\t}\n\ttst.Info(\"info\")\n\tif buf.Len() > 0 {\n\t\tt.Errorf(\"write info line: expected no bytes to be written, %d were\", buf.Len())\n\t}\n\ttst.Debug(\"debug\")\n\tif buf.Len() > 0 {\n\t\tt.Errorf(\"write debug line: expected no bytes to be written, %d were\", buf.Len())\n\t}\n}", "func SetFileShortName(hFile HANDLE, lpShortName string) bool {\n\tlpShortNameStr := unicode16FromString(lpShortName)\n\tret1 := syscall3(setFileShortName, 2,\n\t\tuintptr(hFile),\n\t\tuintptr(unsafe.Pointer(&lpShortNameStr[0])),\n\t\t0)\n\treturn ret1 != 0\n}", "func (p BaseLogCopy) Explain() string {\n\texplain := \"Collect New Relic log files (has overrides)\"\n\tif config.Flags.ShowOverrideHelp {\n\t\texplain += fmt.Sprintf(\"\\n%37s %s\", \" \", \"Override: logpath => set the path of the log file to collect (defaults to finding all logs)\")\n\t\texplain += fmt.Sprintf(\"\\n%37s %s\", \" \", \"Override: lastModifiedDate => in epochseconds, gathers logs newer than last modified date (defaults to now - 7 days)\")\n\t}\n\treturn explain\n}", "func (field *Field) SetShort() {\n\tfield.Short = field.Get(\"short\")\n}", "func usage() {\r\n\r\n\r\n\t// get only the file name from the absolute path in os.Args[0]\r\n\t_, file := filepath.Split(os.Args[0])\r\n\r\n fmt.Fprintf(os.Stderr, \"\\nBasic Usage: %s IP_Adderess:TCP_Port\\n\\n\", file )\r\n fmt.Fprintf(os.Stderr, \"Advanced Flag Usage: %s Flags:\\n\", file )\r\n flag.PrintDefaults()\r\n fmt.Fprintf(os.Stderr, \"\\n\")\r\n os.Exit(2)\r\n}", "func (l *LevelLog) Verbosef(format string, v ...interface{}) {\n\tif l.verbose {\n\t\tl.logger.SetPrefix(\"DEBUG: \")\n\t\tl.logger.Printf(format, v...)\n\t}\n}", "func WithMinLevel(lv Level) OptFunc {\n\treturn func(l *Logger) {\n\t\tl.SetMinLevel(lv)\n\t}\n}", "func ShortFuncName(i interface{}) string {\n\treturn strings.TrimPrefix(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name(), \"main.(*View).\")\n}", "func Infof(msg string, args ...interface{}) {\n\tif !rtLogConf.showFileInfo {\n\t\tlogrus.Infof(msg, args...)\n\t\treturn\n\t}\n\n\tif pc, file, line, ok := runtime.Caller(1); ok {\n\t\tfileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())\n\t\tlogrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Infof(msg, args...)\n\t} else {\n\t\tlogrus.Infof(msg, args...)\n\t}\n}", "func callerFileLog15Handler(callDepth int, h log.Handler) log.Handler {\n\treturn log.FuncHandler(func(r *log.Record) error {\n\t\tr.Call = stack.Caller(callDepth)\n\t\tr.Ctx = append(r.Ctx, \"caller\", fmt.Sprint(r.Call))\n\t\treturn h.Log(r)\n\t})\n}", "func StatusLong(c *Client, files []File, untracked StatusUntrackedMode, lineprefix string) (string, error) {\n\t// If no head commit: \"no changes yet\", else branch info\n\t// Changes to be committed: dgit diff-index --cached HEAD\n\t// Unmerged: git ls-files -u\n\t// Changes not staged: dgit diff-files\n\t// Untracked: dgit ls-files -o\n\tvar ret string\n\tindex, _ := c.GitDir.ReadIndex()\n\thasStaged := false\n\n\tvar lsfiles []File\n\tif len(files) == 0 {\n\t\tlsfiles = []File{File(c.WorkDir)}\n\t} else {\n\t\tlsfiles = files\n\t}\n\t// Start by getting a list of unmerged and keeping them in a map, so\n\t// that we can exclude them from the non-\"unmerged\"\n\tunmergedMap := make(map[File]bool)\n\tunmerged, err := LsFiles(c, LsFilesOptions{Unmerged: true}, lsfiles)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, f := range unmerged {\n\t\tfname, err := f.PathName.FilePath(c)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tunmergedMap[fname] = true\n\t}\n\n\tvar staged []HashDiff\n\thasCommit := false\n\tif head, err := c.GetHeadCommit(); err != nil {\n\t\t// There is no head commit to compare against, so just say\n\t\t// everything in the cache (which isn't unmerged) is new\n\t\tstaged, err := LsFiles(c, LsFilesOptions{Cached: true}, lsfiles)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tvar stagedMsg string\n\t\tif len(staged) > 0 {\n\t\t\thasStaged = true\n\t\t\tfor _, f := range staged {\n\t\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tif _, ok := unmergedMap[fname]; ok {\n\t\t\t\t\t// There's a merge conflict, it'l show up in \"Unmerged\"\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tnew file:\\t%v\\n\", lineprefix, fname)\n\t\t\t}\n\t\t}\n\n\t\tif stagedMsg != \"\" {\n\t\t\tret += fmt.Sprintf(\"%vChanges to be committed:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git rm --cached <file>...\\\" to unstage)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t\tret += stagedMsg\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t} else {\n\t\thasCommit = true\n\t\tstaged, err = DiffIndex(c, DiffIndexOptions{Cached: true}, index, head, files)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t// Staged\n\tif len(staged) > 0 {\n\t\thasStaged = true\n\n\t\tstagedMsg := \"\"\n\t\tfor _, f := range staged {\n\t\t\tfname, err := f.Name.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif _, ok := unmergedMap[fname]; ok {\n\t\t\t\t// There's a merge conflict, it'l show up in \"Unmerged\"\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif f.Src == (TreeEntry{}) {\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tnew file:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else if f.Dst == (TreeEntry{}) {\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tdeleted:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else {\n\t\t\t\tstagedMsg += fmt.Sprintf(\"%v\\tmodified:\\t%v\\n\", lineprefix, fname)\n\t\t\t}\n\t\t}\n\t\tif stagedMsg != \"\" {\n\t\t\tret += fmt.Sprintf(\"%vChanges to be committed:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git reset HEAD <file>...\\\" to unstage)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t\tret += stagedMsg\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t}\n\n\t// We already did the LsFiles for the unmerged, so just iterate over\n\t// them.\n\tif len(unmerged) > 0 {\n\t\tret += fmt.Sprintf(\"%vUnmerged paths:\\n\", lineprefix)\n\t\tret += fmt.Sprintf(\"%v (use \\\"git reset HEAD <file>...\\\" to unstage)\\n\", lineprefix)\n\t\tret += fmt.Sprintf(\"%v (use \\\"git add <file>...\\\" to mark resolution)\\n\", lineprefix)\n\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\n\t\tfor i, f := range unmerged {\n\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tswitch f.Stage() {\n\t\t\tcase Stage1:\n\t\t\t\tswitch unmerged[i+1].Stage() {\n\t\t\t\tcase Stage2:\n\t\t\t\t\tif i >= len(unmerged)-2 {\n\t\t\t\t\t\t// Stage3 is missing, we've reached the end of the index.\n\t\t\t\t\t\tret += fmt.Sprintf(\"%v\\tdeleted by them:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tswitch unmerged[i+2].Stage() {\n\t\t\t\t\tcase Stage3:\n\t\t\t\t\t\t// There's a stage1, stage2, and stage3. If they weren't all different, read-tree would\n\t\t\t\t\t\t// have resolved it as a trivial stage0 merge.\n\t\t\t\t\t\tret += fmt.Sprintf(\"%v\\tboth modified:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// Stage3 is missing, but we haven't reached the end of the index.\n\t\t\t\t\t\tret += fmt.Sprintf(\"%v\\tdeleted by them:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\tcase Stage3:\n\t\t\t\t\t// Stage2 is missing\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\tdeleted by us:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(\"Unhandled index\")\n\t\t\t\t}\n\t\t\tcase Stage2:\n\t\t\t\tif i == 0 || unmerged[i-1].Stage() != Stage1 {\n\t\t\t\t\t// If this is a Stage2, and the previous wasn't Stage1,\n\t\t\t\t\t// then we know the next one must be Stage3 or read-tree\n\t\t\t\t\t// would have handled it as a trivial merge.\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\tboth added:\\t%v\\n\", lineprefix, fname)\n\t\t\t\t}\n\t\t\t\t// If the previous was Stage1, it was handled by the previous\n\t\t\t\t// loop iteration.\n\t\t\t\tcontinue\n\t\t\tcase Stage3:\n\t\t\t\t// There can't be just a Stage3 or read-tree would\n\t\t\t\t// have resolved it as Stage0. All cases were handled\n\t\t\t\t// by Stage1 or Stage2\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\t// If ls-files -u returned something other than\n\t\t\t\t// Stage1-3, there's an unrelated bug somewhere.\n\t\t\t\tpanic(\"Invalid unmerged stage\")\n\t\t\t}\n\t\t}\n\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t}\n\t// Not staged changes\n\tnotstaged, err := DiffFiles(c, DiffFilesOptions{}, lsfiles)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thasUnstaged := false\n\tif len(notstaged) > 0 {\n\t\thasUnstaged = true\n\t\tnotStagedMsg := \"\"\n\t\tfor _, f := range notstaged {\n\t\t\tfname, err := f.Name.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif _, ok := unmergedMap[fname]; ok {\n\t\t\t\t// There's a merge conflict, it'l show up in \"Unmerged\"\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif f.Src == (TreeEntry{}) {\n\t\t\t\tnotStagedMsg += fmt.Sprintf(\"%v\\tnew file:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else if f.Dst == (TreeEntry{}) {\n\t\t\t\tnotStagedMsg += fmt.Sprintf(\"%v\\tdeleted:\\t%v\\n\", lineprefix, fname)\n\t\t\t} else {\n\t\t\t\tnotStagedMsg += fmt.Sprintf(\"%v\\tmodified:\\t%v\\n\", lineprefix, fname)\n\t\t\t}\n\t\t}\n\t\tif notStagedMsg != \"\" {\n\t\t\tret += fmt.Sprintf(\"%vChanges not staged for commit:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git add <file>...\\\" to update what will be committed)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git checkout -- <file>...\\\" to discard changes in working directory)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t\tret += notStagedMsg\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t}\n\n\thasUntracked := false\n\tif untracked != StatusUntrackedNo {\n\t\tlsfilesopts := LsFilesOptions{\n\t\t\tOthers: true,\n\t\t\tExcludeStandard: true, // Configurable some day\n\t\t}\n\t\tif untracked == StatusUntrackedNormal {\n\t\t\tlsfilesopts.Directory = true\n\t\t}\n\n\t\tuntracked, err := LsFiles(c, lsfilesopts, lsfiles)\n\t\tif len(untracked) > 0 {\n\t\t\thasUntracked = true\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif len(untracked) > 0 {\n\t\t\tret += fmt.Sprintf(\"%vUntracked files:\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v (use \\\"git add <file>...\\\" to include in what will be committed)\\n\", lineprefix)\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\n\t\t\tfor _, f := range untracked {\n\t\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t\tif fname.IsDir() {\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\t%v/\\n\", lineprefix, fname)\n\t\t\t\t} else {\n\t\t\t\t\tret += fmt.Sprintf(\"%v\\t%v\\n\", lineprefix, fname)\n\t\t\t\t}\n\t\t\t}\n\t\t\tret += fmt.Sprintf(\"%v\\n\", lineprefix)\n\t\t}\n\t} else {\n\t\tif hasUnstaged {\n\t\t\tret += fmt.Sprintf(\"%vUntracked files not listed (use -u option to show untracked files)\\n\", lineprefix)\n\t\t}\n\t}\n\tvar summary string\n\tswitch {\n\tcase hasStaged && hasUntracked && hasCommit:\n\tcase hasStaged && hasUntracked && !hasCommit:\n\tcase hasStaged && !hasUntracked && hasCommit && !hasUnstaged:\n\tcase hasStaged && !hasUntracked && hasCommit && hasUnstaged:\n\t\tif untracked != StatusUntrackedNo {\n\t\t\tsummary = `no changes added to commit (use \"git add\" and/or \"git commit -a\")`\n\t\t}\n\tcase hasStaged && !hasUntracked && !hasCommit:\n\tcase !hasStaged && hasUntracked && hasCommit:\n\t\tfallthrough\n\tcase !hasStaged && hasUntracked && !hasCommit:\n\t\tsummary = `nothing added to commit but untracked files present (use \"git add\" to track)`\n\tcase !hasStaged && !hasUntracked && hasCommit && !hasUnstaged:\n\t\tsummary = \"nothing to commit, working tree clean\"\n\tcase !hasStaged && !hasUntracked && hasCommit && hasUnstaged:\n\t\tsummary = `no changes added to commit (use \"git add\" and/or \"git commit -a\")`\n\tcase !hasStaged && !hasUntracked && !hasCommit:\n\t\tsummary = `nothing to commit (create/copy files and use \"git add\" to track)`\n\tdefault:\n\t}\n\tif summary != \"\" {\n\t\tret += lineprefix + summary + \"\\n\"\n\t}\n\treturn ret, nil\n}", "func Short() (shaPre string, ver string) {\n\treturn short(Sha, Version)\n}", "func (t *Type) ShortString(pstate *PackageState) string {\n\treturn pstate.Tconv(t, pstate.FmtLeft, pstate.FErr, 0)\n}", "func (m *Msg) PrintFileLine(w io.Writer) (n int, err error) {\n\treturn fmt.Fprintf(w, \"%s:%d\", m.File, m.Line)\n}", "func (game *Game) ShortName() string {\n\tcs := C.GameLongName(game.game)\n\tstr := C.GoString(cs)\n\tC.free(unsafe.Pointer(cs))\n\treturn str\n}", "func (u *User) ShortName(length int) string {\n\treturn strutil.Ellipsis(u.Name, length)\n}", "func MinLevel(level zap.Level) Option {\n\treturn func(sh *Hook) {\n\t\tsh.minLevel = level\n\t}\n}", "func Startf(format string, args ...interface{}) { logRaw(LevelStart, 2, format, args...) }", "func logWithFilename() *log.Entry {\n\t_, file, line, ok := runtime.Caller(2)\n\tif !ok {\n\t\tfile = \"<???>\"\n\t\tline = 0\n\t} else {\n\t\tpath := strings.Split(file, \"/\")\n\t\tif len(path) > 1 {\n\t\t\tfile = path[len(path)-2] + \"/\" + path[len(path)-1]\n\t\t}\n\n\t}\n\treturn log.WithField(\"file\", file).WithField(\"line\", line)\n}", "func (r ReferenceName) Short() string {\n\ts := string(r)\n\tres := s\n\tfor _, format := range RefRevParseRules {\n\t\t_, err := fmt.Sscanf(s, format, &res)\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn res\n}", "func Debugf(format string, v ...interface{}) {\n\tif jl.level != INFO {\n\t\tvar s string\n\t\tjl.stdlog.SetPrefix(\"[DEBUG] \")\n\t\tif jl.flag == LstdFlags|Lshortfile {\n\t\t\ts = generateStdflagShortFile()\n\t\t}\n\n\t\tjl.stdlog.Printf(s+format, v...)\n\t}\n}", "func Debugf(format string, args ...interface{}) {\n\tif logger.Level >= logrus.DebugLevel {\n\t\tentry := logger.WithFields(logrus.Fields{\"file\": fileInfo(2)})\n\t\tentry.Debugf(format, args...)\n\t}\n}", "func (o CustomLayerOutput) ShortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CustomLayer) pulumi.StringOutput { return v.ShortName }).(pulumi.StringOutput)\n}", "func (configMapTemplateFactory) ShortName(definition client.ResourceDefinition) string {\n\tif definition.ConfigMap == nil {\n\t\treturn \"\"\n\t}\n\treturn definition.ConfigMap.Name\n}", "func fileAndLine(lvl ...int) Fields {\n\tlevel := 2\n\tif len(lvl) == 1 {\n\t\tlevel = lvl[0]\n\t}\n\t_, file, line, _ := runtime.Caller(level)\n\t_, fileName := path.Split(file)\n\n\treturn Fields{\n\t\t\"file\": fileName,\n\t\t\"line\": line,\n\t}\n}", "func (p FnacParser) GetShortURL() string {\n\treturn p.shortURL\n}", "func Verbosef(s string) {\n\tif debug != false || verbose != false {\n\t\tlog.Print(fmt.Sprint(s))\n\t}\n}", "func (exec ExecGitFn) showFile(result io.Writer, refspec, file string) error {\r\n\tfileRef := fmt.Sprintf(`%s:%s`, refspec, file)\r\n\treturn exec(result, `show`, fileRef)\r\n}", "func (fs *MediaScan) Verbose(v bool)\t{\n\tfs.verbose = v\n}", "func Debugf(format string, a ...interface{}) {\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\t_, file, line, ok := runtime.Caller(1)\n\t\tif !ok {\n\t\t\tfile = \"<unknown>\"\n\t\t\tline = -1\n\t\t} else {\n\t\t\tfile = filepath.Base(file)\n\t\t}\n\t\tfmt.Fprintf(os.Stderr, fmt.Sprintf(\"[debug] %s:%d %s\\n\", file, line, format), a...)\n\t}\n}", "func ShowShortVersionBanner() {\n\toutput := colors.NewColorWriter(os.Stdout)\n\tInitBanner(output, bytes.NewBufferString(colors.MagentaBold(shortVersionBanner)))\n}", "func ShowShortVersionBanner() {\n\toutput := colors.NewColorWriter(os.Stdout)\n\tInitBanner(output, bytes.NewBufferString(colors.MagentaBold(shortVersionBanner)))\n}", "func Verbosef(format string, args ...interface{}) {\n\tif globalOptions.verbosity >= 1 {\n\t\tPrintf(format, args...)\n\t}\n}", "func (l *LevelLog) Verboseln(v ...interface{}) {\n\tif l.verbose {\n\t\tl.logger.SetPrefix(\"DEBUG: \")\n\t\tl.logger.Println(v...)\n\t}\n}", "func (ref *Reference) ShortName() string {\n\treturn RefName(ref.Name).ShortName()\n}", "func (l LocalLinker) Link(file string, line int) string {\n\treturn localFileFunc(file) + \":\" + strconv.Itoa(line)\n}", "func FmtTimeShort(t time.Time) string {\n\tif t == (time.Time{}) {\n\t\treturn \"\"\n\t}\n\treturn t.Format(\"2006-01-02\")\n}", "func SourceLine(e error) string {\n\tfile, line := Location(e)\n\tif line != 0 {\n\t\treturn fmt.Sprintf(\"%s:%d\", file, line)\n\t}\n\treturn \"\"\n}", "func VerboseLogFilter(hdr *tar.Header) bool {\n\tlog.Println(hdr.Name)\n\treturn true\n}", "func cutShort(h *Handler, c echo.Context) error {\n\tvar u string\n\tvar e error\n\tcustom := c.FormValue(\"custom\")\n\t// TODO: move this in CreateByLongURL\n\tif u, e = algo.NormalizeURL(c.FormValue(\"url\")); e != nil {\n\t\terrorMessage := fmt.Sprintf(\"Error in URL for %s: %s\", c.FormValue(\"url\"), e)\n\t\tlog.Error(errorMessage)\n\t\treturn c.String(http.StatusUnprocessableEntity, errorMessage)\n\t}\n\tresult, e := h.Store.CreateByLongURL(u, custom)\n\tif e != nil {\n\t\terrorMessage := fmt.Sprintf(\"Error in shortening for %s: %s\", c.FormValue(\"url\"), e)\n\t\tlog.Error(errorMessage)\n\t\treturn c.String(http.StatusUnprocessableEntity, errorMessage)\n\t}\n\treturn c.String(http.StatusCreated, result.ShortURL())\n}", "func ServeNameShort(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, \"Full name needs to be at least 5 letters long.\", http.StatusBadRequest)\n}", "func Short() string {\n\tresult := Version\n\n\tif GitBranch != \"\" {\n\t\tif result != \"\" {\n\t\t\tresult += \" \"\n\t\t}\n\n\t\tresult += GitBranch\n\t\tif GitCommit != \"\" {\n\t\t\tresult += fmt.Sprintf(\"#%s\", GitCommit)\n\t\t}\n\t}\n\n\tif result == \"\" {\n\t\treturn \"unknown\"\n\t}\n\n\treturn result\n}", "func ShortLT(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldShort), v))\n\t})\n}" ]
[ "0.5979167", "0.5713366", "0.56390464", "0.5635513", "0.5625114", "0.5615503", "0.5562804", "0.55347633", "0.5523255", "0.541619", "0.54031026", "0.536245", "0.53480095", "0.53017974", "0.52917963", "0.5189681", "0.51692307", "0.50795686", "0.50761473", "0.5056755", "0.5005947", "0.49762866", "0.49762866", "0.49762866", "0.4966531", "0.4950201", "0.49480635", "0.49374518", "0.4934864", "0.4901911", "0.4901396", "0.4900209", "0.4898501", "0.4898383", "0.48964882", "0.48829725", "0.4863313", "0.4862721", "0.48447633", "0.4844491", "0.483955", "0.48383534", "0.4819057", "0.4814047", "0.48077452", "0.48064315", "0.4794802", "0.47869495", "0.47759074", "0.4766785", "0.476124", "0.47607172", "0.47598425", "0.47583324", "0.47579223", "0.47568667", "0.47499084", "0.4743712", "0.47418725", "0.4738399", "0.47374615", "0.47355703", "0.4726394", "0.47244066", "0.4717098", "0.47074685", "0.47030553", "0.46972963", "0.46936852", "0.4686778", "0.46761918", "0.4671518", "0.46705922", "0.4665834", "0.46555787", "0.46555096", "0.465476", "0.46453923", "0.46180847", "0.46144027", "0.46121657", "0.45751643", "0.457161", "0.45673013", "0.4558", "0.4556972", "0.45544046", "0.45538795", "0.45538795", "0.4545195", "0.4543087", "0.45398962", "0.45380935", "0.45363912", "0.45345607", "0.45310533", "0.45255747", "0.45243582", "0.45226225", "0.452239" ]
0.60543543
0
NewHealthController creates a health controller.
func NewHealthController(service *goa.Service) *HealthController { return &HealthController{Controller: service.NewController("HealthController")} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewHealthController(router *mux.Router, r *render.Render) *HealthController {\n\tctrl := &HealthController{router, r}\n\tctrl.Register()\n\treturn ctrl\n}", "func NewHealthController() *HealthController {\n\treturn &HealthController{}\n}", "func NewHealthController() *HealthController {\n\treturn new(HealthController)\n}", "func NewHealthController(service *goa.Service, zapi_list ZAPIStructure) *HealthController {\n\treturn &HealthController{\n\t\tController: service.NewController(\"HealthController\"),\n\t\tzapi_list: zapi_list,\n\t}\n}", "func NewController() *Controller {\n\treturn &Controller{}\n}", "func NewController() Controller {\n\treturn &controller{}\n}", "func NewController(name string) *Controller {\n\treturn &Controller{\n\t\tRoutes: NewRoutes(name),\n\t}\n}", "func NewController() *Controller {\n\treturn &Controller{wrapper: NewWrapper()}\n}", "func NewController() *Controller {\n\treturn &Controller{Logger: logger.NewLogger()}\n}", "func NewController() controller.Controller {\n\treturn &Controller{}\n}", "func NewHealthCheckController(\n\tlogger logging.LoggerInterface,\n\tappMonitor application.MonitorIterface,\n\tdependenciesMonitor services.MonitorIterface,\n) *HealthCheckController {\n\treturn &HealthCheckController{\n\t\tlogger: logger,\n\t\tappMonitor: appMonitor,\n\t\tdependenciesMonitor: dependenciesMonitor,\n\t}\n}", "func NewController() *Controller {\n\treturn &Controller{\n\t\tClouds: make(map[string]CloudProvider),\n\t\t// WorkerOptions: NewWorkerOptions(),\n\t\tprovisionErr: NewErrCloudProvision(),\n\t}\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tClient: client.NewClient(),\n\t}\n}", "func NewController() *Controller {\n controller := Controller{}\n\n return &controller\n}", "func NewController(cfg *config.Config) (*Controller, error) {\n\tsrv, err := service.NewService(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Controller{\n\t\tService: srv,\n\t}, nil\n}", "func New() *Controller {\n\treturn &Controller{}\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tprojectCtl: project.Ctl,\n\t}\n}", "func NewController(client kubernetes.Interface) *Controller {\n\tshared := informers.NewSharedInformerFactory(client, time.Second*30)\n\tinform := shared.Apps().V1().Deployments()\n\tcontrl := &Controller{\n\t\tclient: client,\n\t\tinformer: inform.Informer(),\n\t\tlister: inform.Lister(),\n\t\tlogger: logrus.New(),\n\t\tqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"regitseel\"),\n\t}\n\n\tinform.Informer().AddEventHandler(\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: contrl.enqueue,\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tcontrl.enqueue(new)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\td := obj.(*appsv1.Deployment)\n\t\t\t\tif err := contrl.delete(d); err != nil {\n\t\t\t\t\tcontrl.logger.Errorf(\"failed to delete from api: %v\", d.Name)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\n\treturn contrl\n}", "func NewHospitalController(hospitalService service.HospitalService) HospitalController {\r\n\treturn &hospitalController{\r\n\t\thospitalService: hospitalService,\r\n\t}\r\n}", "func New(b *base.Controller, moduleID string, cu categoryUsecases.Usecase) *Controller {\n\treturn &Controller{\n\t\tb,\n\t\tmoduleID,\n\t\tcu,\n\t}\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tiManager: instance.Mgr,\n\t\tpManager: policy.Mgr,\n\t\tscheduler: scheduler.Sched,\n\t\texecutionMgr: task.NewExecutionManager(),\n\t}\n}", "func (app *Application) NewController(resource *Resource) *Controller {\n\tc := &Controller{\n\t\tresource: resource,\n\t\tcustomHandlers: make(map[route]handlerChain),\n\t}\n\n\tapp.controllers[c.resource] = c\n\treturn c\n}", "func NewController(\n\topt controller.Options,\n\tnotifications chan struct{},\n\tserviceInformer servinginformers.ServiceInformer,\n) *Controller {\n\tlogger, _ := zap.NewProduction()\n\topt.Logger = logger.Sugar()\n\tc := &Controller{\n\t\tBase: controller.NewBase(opt, controllerAgentName, \"Services\"),\n\t}\n\n\tc.Logger.Info(\"Setting up event handlers\")\n\tserviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.Enqueue,\n\t\tUpdateFunc: controller.PassNew(c.Enqueue),\n\t\tDeleteFunc: c.Enqueue,\n\t})\n\n\treturn c\n}", "func NewController(address string) controller.Controller {\n\treturn &Controller{address, nil}\n}", "func NewController(exec boil.ContextExecutor) Controller {\n\trepo := &personRepository{executor: exec}\n\tsvc := &personService{repo: repo}\n\tpc := &personController{service: svc}\n\treturn pc\n}", "func NewController(brigade brigade.Service) Controller {\n\treturn &controller{\n\t\tbrigade: brigade,\n\t}\n}", "func NewController(service *service.PanicService) *PanicController {\n\tvar pc PanicController\n\tpc.Service = service\n\treturn &pc\n}", "func NewController(repository Repository) Controller {\n\treturn controller{repository: repository}\n}", "func NewController() *Controller {\n\treturn &Controller{\n\t\tstats: tabletenv.NewStats(servenv.NewExporter(\"MockController\", \"Tablet\")),\n\t\tqueryServiceEnabled: false,\n\t\tBroadcastData: make(chan *BroadcastData, 10),\n\t\tStateChanges: make(chan *StateChange, 10),\n\t\tqueryRulesMap: make(map[string]*rules.Rules),\n\t}\n}", "func New() *Controller {\n\treturn &Controller{\n\t\tValidatePayload: ValidatePayload,\n\t}\n}", "func NewController(m driver.StackAnalysisInterface) *Controller {\n\treturn &Controller{\n\t\tm: m,\n\t}\n}", "func NewController(params ControllerParams) (*Controller, error) {\n\t// If the BGP control plane is disabled, just return nil. This way the hive dependency graph is always static\n\t// regardless of config. The lifecycle has not been appended so no work will be done.\n\tif !params.DaemonConfig.BGPControlPlaneEnabled() {\n\t\treturn nil, nil\n\t}\n\n\tc := Controller{\n\t\tSig: params.Sig,\n\t\tBGPMgr: params.RouteMgr,\n\t\tPolicyResource: params.PolicyResource,\n\t\tNodeSpec: params.NodeSpec,\n\t}\n\n\tparams.Lifecycle.Append(&c)\n\n\treturn &c, nil\n}", "func NewController(params ControllerParams) (*Controller, error) {\n\t// If the BGP control plane is disabled, just return nil. This way the hive dependency graph is always static\n\t// regardless of config. The lifecycle has not been appended so no work will be done.\n\tif !params.DaemonConfig.BGPControlPlaneEnabled() {\n\t\treturn nil, nil\n\t}\n\n\tc := Controller{\n\t\tSig: params.Sig,\n\t\tBGPMgr: params.RouteMgr,\n\t\tPolicyResource: params.PolicyResource,\n\t\tLocalNodeStore: params.LocalNodeStore,\n\t}\n\n\tparams.Lifecycle.Append(&c)\n\n\treturn &c, nil\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tprojectMgr: project.Mgr,\n\t\tmetaMgr: metamgr.NewDefaultProjectMetadataManager(),\n\t\tallowlistMgr: allowlist.NewDefaultManager(),\n\t}\n}", "func NewController(d *CSIDriver) csi.ControllerServer {\n\treturn &controller{\n\t\tdriver: d,\n\t\tcapabilities: newControllerCapabilities(),\n\t}\n}", "func NewController(commandBus command.Bus) Controller {\n\treturn &controllerImplement{commandBus}\n}", "func NewController(betValidator BetValidator, betService BetService) *Controller {\n\treturn &Controller{\n\t\tbetValidator: betValidator,\n\t\tbetService: betService,\n\t}\n}", "func NewController(betValidator BetValidator, betService BetService) *Controller {\n\treturn &Controller{\n\t\tbetValidator: betValidator,\n\t\tbetService: betService,\n\t}\n}", "func NewController(betService BetService) *Controller {\n\treturn &Controller{\n\t\tbetService: betService,\n\t}\n}", "func NewController(runner pitr.Runner, cluster cluster.Controller) Controller {\n\treturn Controller{\n\t\trunner: runner,\n\t\tcluster: cluster,\n\t}\n}", "func NewController(backendPool pool.Interface) *Controller {\n\treturn &Controller{\n\t\tbackendPool: backendPool,\n\t}\n}", "func NewController(t mockConstructorTestingTNewController) *Controller {\n\tmock := &Controller{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewController(t mockConstructorTestingTNewController) *Controller {\n\tmock := &Controller{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (c *Config) NewController(e *env.Env) *Controller {\n\tctl := NewController(e)\n\tctl.DeviceIndex = c.DeviceIndex\n\tctl.Verbose = c.Verbose\n\treturn ctl\n}", "func NewController(logger *log.Logger, storageApiURL string, config resources.UbiquityPluginConfig) (*Controller, error) {\n\n\tremoteClient, err := remote.NewRemoteClient(logger, storageApiURL, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Controller{logger: logger, Client: remoteClient, exec: utils.NewExecutor()}, nil\n}", "func NewController() Controller {\n\treturn &controller{\n\t\treservedExpiration: defaultReservedExpiration,\n\t\tquotaMgr: quota.Mgr,\n\t}\n}", "func New(b *base.Controller, moduleID string, uu userUsecases.Usecase) *Controller {\n\treturn &Controller{\n\t\tb,\n\t\tmoduleID,\n\t\tuu,\n\t}\n}", "func NewController(cfg *rest.Config) *Controller {\n\tclient := appsv1client.NewForConfigOrDie(cfg)\n\tkubeClient := kubernetes.NewForConfigOrDie(cfg)\n\tqueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())\n\tstopCh := make(chan struct{}) // TODO: hook this up to SIGTERM/SIGINT\n\n\tcsif := externalversions.NewSharedInformerFactoryWithOptions(clusterclient.NewForConfigOrDie(cfg), resyncPeriod)\n\n\tc := &Controller{\n\t\tqueue: queue,\n\t\tclient: client,\n\t\tclusterLister: csif.Cluster().V1alpha1().Clusters().Lister(),\n\t\tkubeClient: kubeClient,\n\t\tstopCh: stopCh,\n\t}\n\tcsif.WaitForCacheSync(stopCh)\n\tcsif.Start(stopCh)\n\n\tsif := informers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)\n\tsif.Apps().V1().Deployments().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.enqueue(obj) },\n\t\tUpdateFunc: func(_, obj interface{}) { c.enqueue(obj) },\n\t})\n\tsif.WaitForCacheSync(stopCh)\n\tsif.Start(stopCh)\n\n\tc.indexer = sif.Apps().V1().Deployments().Informer().GetIndexer()\n\tc.lister = sif.Apps().V1().Deployments().Lister()\n\n\treturn c\n}", "func NewController(db *sql.DB) *Controller {\n\treturn &Controller{db: db}\n}", "func NewController(app AppInterface) *Controller {\n\tc := new(Controller)\n\n\t// for debug logs\n\t// log.SetLevel(log.DebugLevel)\n\n\t// Save the handler\n\tc.app = app\n\treturn c\n}", "func NewController(cxn *connection.Connection, dryRun bool) *Controller {\n\tctl := &Controller{\n\t\tcxn: cxn,\n\t\tprogress: progressbars.New(),\n\t\tdryRun: dryRun,\n\t}\n\tctl.progress.RefreshRate = 3 * time.Second\n\treturn ctl\n}", "func NewController(dao Dao) *Controller {\n\treturn &Controller{Dao: dao}\n}", "func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {\n\tsubscriptionInformer := subscriptioninformersv1alpha1.Get(ctx)\n\teventActivationInformer := eventactivationinformersv1alpha1.Get(ctx)\n\tknativeLib, err := util.NewKnativeLib()\n\tif err != nil {\n\t\tpanic(\"Failed to initialize knative lib\")\n\t}\n\tStatsReporter, err := NewStatsReporter()\n\tif err != nil {\n\t\tpanic(\"Failed to Kyma Subscription Controller stats reporter\")\n\t}\n\n\tr := &Reconciler{\n\t\tBase: reconciler.NewBase(ctx, controllerAgentName, cmw),\n\t\tsubscriptionLister: subscriptionInformer.Lister(),\n\t\teventActivationLister: eventActivationInformer.Lister(),\n\t\tkymaEventingClient: eventbusclient.Get(ctx).EventingV1alpha1(),\n\t\tknativeLib: knativeLib,\n\t\topts: opts.DefaultOptions(),\n\t\ttime: util.NewDefaultCurrentTime(),\n\t\tStatsReporter: StatsReporter,\n\t}\n\timpl := controller.NewImpl(r, r.Logger, reconcilerName)\n\n\tsubscriptionInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\tregisterMetrics()\n\n\treturn impl\n}", "func newHelloController(helloService HelloService) *helloController {\n\treturn &helloController{\n\t\thelloService: helloService,\n\t}\n}", "func NewController(todoService todo.UseCase) *Controller {\n\treturn &Controller{\n\t\ttodoService: todoService,\n\t}\n}", "func NewController(t *testing.T) (*gomock.Controller, context.Context) {\n\tctx := context.Background()\n\treturn gomock.WithContext(ctx, t)\n}", "func NewController(customer customer.Service) *Controller {\n\treturn &Controller{\n\t\tcustomer: customer,\n\t}\n}", "func NewController(context *clusterd.Context, containerImage string) *Controller {\n\treturn &Controller{\n\t\tcontext: context,\n\t\tcontainerImage: containerImage,\n\t}\n}", "func NewController(namespace string) (*Controller, error) {\n\tconfig, err := clientcmd.BuildConfigFromFlags(*ClusterURL, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclientset, err := typedv1.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ns *v1.Namespace\n\tif namespace != \"\" {\n\t\tns, err = clientset.Namespaces().Get(namespace, metav1.GetOptions{})\n\t} else {\n\t\tns, err = createNamespace(clientset)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Controller{\n\t\tclient: clientset,\n\t\tnamespace: ns,\n\t\trestConfig: config,\n\t\tfixedNs: namespace != \"\",\n\t}, nil\n}", "func NewController(\n\texperiment core.ExperimentStore,\n\tevent core.EventStore,\n\tttlc *TTLconfig,\n) *Controller {\n\treturn &Controller{\n\t\texperiment: experiment,\n\t\tevent: event,\n\t\tttlconfig: ttlc,\n\t}\n}", "func NewHealth(logger *log.Logger) health.Service {\n\treturn &healthsrvc{logger}\n}", "func NewController(\n\topts *reconciler.Options,\n\trevisionInformer servinginformers.RevisionInformer,\n\trevSynch RevisionSynchronizer,\n\tinformerResyncInterval time.Duration,\n) *controller.Impl {\n\n\tc := &Reconciler{\n\t\tBase: reconciler.NewBase(*opts, controllerAgentName),\n\t\trevisionLister: revisionInformer.Lister(),\n\t\trevSynch: revSynch,\n\t}\n\timpl := controller.NewImpl(c, c.Logger, \"Autoscaling\")\n\n\tc.Logger.Info(\"Setting up event handlers\")\n\trevisionInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: impl.Enqueue,\n\t\tUpdateFunc: controller.PassNew(impl.Enqueue),\n\t\tDeleteFunc: impl.Enqueue,\n\t})\n\n\treturn impl\n}", "func NewController(productService contract.ProductService) *Controller {\n\tonce.Do(func() {\n\t\tinstance = &Controller{\n\t\t\tproductService: productService,\n\t\t}\n\t})\n\treturn instance\n}", "func NewController(repository storage.Repository, resourceBaseURL string, objectType types.ObjectType, objectBlueprint func() types.Object) *BaseController {\n\treturn &BaseController{\n\t\trepository: repository,\n\t\tresourceBaseURL: resourceBaseURL,\n\t\tobjectBlueprint: objectBlueprint,\n\t\tobjectType: objectType,\n\t}\n}", "func New(ctx context.Context, config *config.CleanupConfig, db *database.Database, h *render.Renderer) *Controller {\n\tlogger := logging.FromContext(ctx)\n\treturn &Controller{\n\t\tconfig: config,\n\t\tdb: db,\n\t\th: h,\n\t\tlogger: logger,\n\t}\n}", "func NewCreateGoalController(cgtRepos *persistence.Services, logger *log.Logger, authorizationService authorization.JwtService) Controller {\n\tcreateGoalUsecase := usecase.NewCreateGoalUsecase(&cgtRepos.Achiever, &cgtRepos.Goal, authorizationService)\n\n\tctrl := &createGoalController{\n\t\tUsecase: createGoalUsecase,\n\t\tLogger: logger,\n\t\tAuthorization: authorizationService,\n\t}\n\treturn ctrl\n}", "func NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\tlogger := logging.FromContext(ctx)\n\n\tbrokercellInformer := brokercell.Get(ctx)\n\tbrokerLister := brokerinformer.Get(ctx).Lister()\n\tdeploymentLister := deploymentinformer.Get(ctx).Lister()\n\tsvcLister := serviceinformer.Get(ctx).Lister()\n\tepLister := endpointsinformer.Get(ctx).Lister()\n\thpaLister := hpainformer.Get(ctx).Lister()\n\n\tbase := reconciler.NewBase(ctx, controllerAgentName, cmw)\n\tr, err := NewReconciler(base, brokerLister, svcLister, epLister, deploymentLister)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to create BrokerCell reconciler\", zap.Error(err))\n\t}\n\tr.hpaLister = hpaLister\n\timpl := v1alpha1brokercell.NewImpl(ctx, r)\n\n\tlogger.Info(\"Setting up event handlers.\")\n\n\t// TODO(https://github.com/google/knative-gcp/issues/912) Change period back to 5 min once controller\n\t// watches for data plane components.\n\tbrokercellInformer.Informer().AddEventHandlerWithResyncPeriod(controller.HandleAll(impl.Enqueue), 30*time.Second)\n\n\t// Watch data plane components created by brokercell so we can update brokercell status immediately.\n\t// 1. Watch deployments for ingress, fanout and retry\n\tdeploymentinformer.Get(ctx).Informer().AddEventHandler(handleResourceUpdate(impl))\n\t// 2. Watch ingress endpoints\n\tendpointsinformer.Get(ctx).Informer().AddEventHandler(handleResourceUpdate(impl))\n\t// 3. Watch hpa for ingress, fanout and retry deployments\n\thpainformer.Get(ctx).Informer().AddEventHandler(handleResourceUpdate(impl))\n\n\treturn impl\n}", "func New(ctx context.Context, config *config.ServerConfig, h *render.Renderer) *Controller {\n\tlogger := logging.FromContext(ctx)\n\n\treturn &Controller{\n\t\tconfig: config,\n\t\th: h,\n\t\tlogger: logger,\n\t}\n}", "func NewController(client CopilotClient, store model.ConfigStore, logger logger, timeout, checkInterval time.Duration) *Controller {\n\treturn &Controller{\n\t\tclient: client,\n\t\tstore: store,\n\t\tlogger: logger,\n\t\ttimeout: timeout,\n\t\tcheckInterval: checkInterval,\n\t\tstorage: storage{\n\t\t\tvirtualServices: make(map[string]*model.Config),\n\t\t\tdestinationRules: make(map[string]*model.Config),\n\t\t},\n\t}\n}", "func NewController() node.Initializer {\n\treturn controller{}\n}", "func NewController(region string, networkManager *NetworkManager, playerManager *PlayerManager, firebase *triebwerk.Firebase, masterServer MasterServerClient) *Controller {\n\treturn &Controller{\n\t\tnetworkManager: networkManager,\n\t\tplayerManager: playerManager,\n\t\tstate: model.NewGameState(region),\n\t\tfirebase: firebase,\n\t\tmasterServer: masterServer,\n\t}\n}", "func New(client vpnkit.Client, services corev1client.ServicesGetter) *Controller {\n\treturn &Controller{\n\t\tservices: services,\n\t\tclient: client,\n\t}\n}", "func New(s *service.Service) *Controller {\n\tlogger.Println(\"New controller instance was initialized\")\n\treturn &Controller{\n\t\tservice: s,\n\t}\n}", "func NewPatientController() *PatientController {\n\treturn &PatientController{}\n}", "func NewController(kubeClient kubernetes.Interface, nodesInformer informers.NodeInformer, nodeLabel *NodeLabel) *Controller {\n\tklog.V(4).Info(\"Creating event broadcaster\")\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(klog.Infof)\n\teventBroadcaster.StartRecordingToSink(&typed.EventSinkImpl{Interface: kubeClient.CoreV1().Events(\"\")})\n\n\tc := &Controller{\n\t\tkubeClient: kubeClient,\n\t\tnodesLister: nodesInformer.Lister(),\n\t\tnodesSynced: nodesInformer.Informer().HasSynced,\n\t\tworkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName),\n\t\trecorder: eventBroadcaster.NewRecorder(scheme.Scheme, api.EventSource{Component: controllerName}),\n\t\tnodeLabel: nodeLabel,\n\t}\n\n\tnodesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.handleNode,\n\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\tc.handleNode(new)\n\t\t},\n\t})\n\n\treturn c\n}", "func NewController(ctx context.Context) *Controller {\n\treturn &Controller{\n\t\tctx: ctx,\n\t\texplorerClient: newClientConn(1000, 10000),\n\t\trecordClient: newClientConn(1000, 10000),\n\t\tsubTree: make(chan *URLEntry, 1000),\n\t\trecord: make(chan *URLEntry, 1000),\n\t\turlCache: make(map[string]*URLEntry),\n\t\texplorerStat: newRoutineStat(0),\n\t\trecordStat: newRoutineStat(0),\n\t}\n}", "func New(tl TemplateLoader, td TemplateDecoder, logger log.Logger, configuration *EnvironmentServiceConfigController, statusPublisher StatusPublisher) *Controller {\n\treturn &Controller{\n\t\ttemplateLoader: tl,\n\t\tTemplateDecoder: td,\n\t\tLogger: logger,\n\t\tConfigurationController: configuration,\n\t\tstatusPublisher: statusPublisher,\n\t}\n}", "func NewController(le *logrus.Entry, bus bus.Bus, conf *Config) (*Controller, error) {\n\tdir := path.Clean(conf.GetDir())\n\tif _, err := os.Stat(dir); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"stat %s\", dir)\n\t}\n\treturn &Controller{\n\t\tle: le,\n\t\tbus: bus,\n\t\tdir: dir,\n\n\t\twatch: conf.GetWatch(),\n\t}, nil\n}", "func NewHealth(r router) *Health {\n\treturn &Health{\n\t\trouter: r,\n\t}\n}", "func NewHelloController(e *echo.Echo) {\n\thandler := &HelloController{}\n\n\te.GET(\"/hello\", handler.Hello)\n}", "func NewController(userService user.Service) chi.Router {\n\tc := Controller{userService}\n\tr := chi.NewRouter()\n\n\tr.Post(\"/\", c.AddUser)\n\tr.Get(\"/{userID}\", c.GetUser)\n\tr.Put(\"/{userID}/name\", c.UpdateName)\n\n\treturn r\n}", "func NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\tlogger := logging.FromContext(ctx)\n\n\tprovisionedserviceInformer := provisionedservice.Get(ctx)\n\n\t// TODO: setup additional informers here.\n\n\tr := &Reconciler{}\n\timpl := v1alpha1provisionedservice.NewImpl(ctx, r)\n\n\tlogger.Info(\"Setting up event handlers.\")\n\n\tprovisionedserviceInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\t// TODO: add additional informer event handlers here.\n\n\treturn impl\n}", "func NewController(client *k8s.KubeClient, nodeID string, serviceClient api.DriveServiceClient, eventRecorder *events.Recorder, log *logrus.Logger) *Controller {\n\treturn &Controller{\n\t\tclient: client,\n\t\tcrHelper: k8s.NewCRHelper(client, log),\n\t\tnodeID: nodeID,\n\t\tdriveMgrClient: serviceClient,\n\t\teventRecorder: eventRecorder,\n\t\tlog: log.WithField(\"component\", \"Controller\"),\n\t}\n}", "func NewController(cfg configuration.Controller, extractor interfaces.JetDropsExtractor, storage interfaces.Storage, pv int) (*Controller, error) {\n\tc := &Controller{\n\t\tcfg: cfg,\n\t\textractor: extractor,\n\t\tstorage: storage,\n\t\tjetDropRegister: make(map[types.Pulse]map[string]struct{}),\n\t\tmissedDataManager: NewMissedDataManager(time.Second*time.Duration(cfg.ReloadPeriod), time.Second*time.Duration(cfg.ReloadCleanPeriod)),\n\t\tplatformVersion: pv,\n\t}\n\treturn c, nil\n}", "func NewHealth() *Health {\n\treturn &Health{\n\t\tcomponents: map[interface{}]*HealthComponentStatus{},\n\t}\n}", "func NewController(\n\tcontrollerLogger *log.Logger,\n\trenderer *render.Render,\n\tauthPublisher *Publisher,\n\tuserRedis *user.RedisManager,\n) *Controller {\n\treturn &Controller{\n\t\tlogger: controllerLogger,\n\t\trender: renderer,\n\t\tauthPublisher: authPublisher,\n\t\tuserRedis: userRedis,\n\t}\n}", "func NewController(\n\tcontrollerLogger *log.Logger,\n\trenderer *render.Render,\n\tuserRedisManager *user.RedisManager,\n\tcategoryRedisManager *RedisManager,\n\tcategoryPublisher *Publisher,\n) *Controller {\n\treturn &Controller{\n\t\tlogger: controllerLogger,\n\t\trender: renderer,\n\t\tuserRedis: userRedisManager,\n\t\tcategoryRedis: categoryRedisManager,\n\t\tcategoryPublisher: categoryPublisher,\n\t}\n}", "func NewController(ctx context.Context, keypfx string, cli state.Repository) *Controller {\n\tctx, cancel := context.WithCancel(ctx)\n\tc := &Controller{\n\t\tkeypfx: fmt.Sprintf(\"%s/task-coordinator/%s\", keypfx, version),\n\t\tcli: cli,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdonec: make(chan struct{}),\n\t}\n\tgo c.run()\n\treturn c\n}", "func NewController(ctx context.Context, clientMap clientmap.ClientMap) (*Controller, error) {\n\tgardenClient, err := clientMap.GetClient(ctx, keys.ForGarden())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbackupBucketInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.BackupBucket{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get BackupBucket Informer: %w\", err)\n\t}\n\tbackupEntryInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.BackupEntry{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get BackupEntry Informer: %w\", err)\n\t}\n\tcontrollerDeploymentInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.ControllerDeployment{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get ControllerDeployment Informer: %w\", err)\n\t}\n\tcontrollerInstallationInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.ControllerInstallation{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get ControllerInstallation Informer: %w\", err)\n\t}\n\tcontrollerRegistrationInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.ControllerRegistration{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get ControllerRegistration Informer: %w\", err)\n\t}\n\tseedInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.Seed{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get Seed Informer: %w\", err)\n\t}\n\tshootInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.Shoot{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get Shoot Informer: %w\", err)\n\t}\n\n\tcontroller := &Controller{\n\t\tgardenClient: gardenClient.Client(),\n\n\t\tcontrollerRegistrationReconciler: NewControllerRegistrationReconciler(logger.Logger, gardenClient.Client()),\n\t\tcontrollerRegistrationSeedReconciler: NewControllerRegistrationSeedReconciler(logger.Logger, gardenClient),\n\t\tseedReconciler: NewSeedReconciler(logger.Logger, gardenClient.Client()),\n\n\t\tcontrollerRegistrationQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controllerregistration\"),\n\t\tcontrollerRegistrationSeedQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controllerregistration-seed\"),\n\t\tseedQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"seed\"),\n\t\tworkerCh: make(chan int),\n\t}\n\n\tbackupBucketInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.backupBucketAdd,\n\t\tUpdateFunc: controller.backupBucketUpdate,\n\t\tDeleteFunc: controller.backupBucketDelete,\n\t})\n\n\tbackupEntryInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.backupEntryAdd,\n\t\tUpdateFunc: controller.backupEntryUpdate,\n\t\tDeleteFunc: controller.backupEntryDelete,\n\t})\n\n\tcontrollerRegistrationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { controller.controllerRegistrationAdd(ctx, obj) },\n\t\tUpdateFunc: func(oldObj, newObj interface{}) { controller.controllerRegistrationUpdate(ctx, oldObj, newObj) },\n\t\tDeleteFunc: controller.controllerRegistrationDelete,\n\t})\n\n\tcontrollerDeploymentInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { controller.controllerDeploymentAdd(ctx, obj) },\n\t\tUpdateFunc: func(oldObj, newObj interface{}) { controller.controllerDeploymentUpdate(ctx, oldObj, newObj) },\n\t})\n\n\tcontrollerInstallationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.controllerInstallationAdd,\n\t\tUpdateFunc: controller.controllerInstallationUpdate,\n\t})\n\n\tseedInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { controller.seedAdd(obj, true) },\n\t\tUpdateFunc: controller.seedUpdate,\n\t\tDeleteFunc: controller.seedDelete,\n\t})\n\n\tshootInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.shootAdd,\n\t\tUpdateFunc: controller.shootUpdate,\n\t\tDeleteFunc: controller.shootDelete,\n\t})\n\n\tcontroller.hasSyncedFuncs = append(controller.hasSyncedFuncs,\n\t\tbackupBucketInformer.HasSynced,\n\t\tbackupEntryInformer.HasSynced,\n\t\tcontrollerRegistrationInformer.HasSynced,\n\t\tcontrollerDeploymentInformer.HasSynced,\n\t\tcontrollerInstallationInformer.HasSynced,\n\t\tseedInformer.HasSynced,\n\t\tshootInformer.HasSynced,\n\t)\n\n\treturn controller, nil\n}", "func NewController(\n\tchopClient chopClientSet.Interface,\n\textClient apiExtensions.Interface,\n\tkubeClient kube.Interface,\n\tchopInformerFactory chopInformers.SharedInformerFactory,\n\tkubeInformerFactory kubeInformers.SharedInformerFactory,\n) *Controller {\n\n\t// Initializations\n\t_ = chopClientSetScheme.AddToScheme(scheme.Scheme)\n\n\t// Setup events\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(log.Info)\n\teventBroadcaster.StartRecordingToSink(\n\t\t&typedCoreV1.EventSinkImpl{\n\t\t\tInterface: kubeClient.CoreV1().Events(\"\"),\n\t\t},\n\t)\n\trecorder := eventBroadcaster.NewRecorder(\n\t\tscheme.Scheme,\n\t\tcoreV1.EventSource{\n\t\t\tComponent: componentName,\n\t\t},\n\t)\n\n\t// Create Controller instance\n\tcontroller := &Controller{\n\t\tkubeClient: kubeClient,\n\t\textClient: extClient,\n\t\tchopClient: chopClient,\n\t\tchiLister: chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Lister(),\n\t\tchiListerSynced: chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Informer().HasSynced,\n\t\tchitLister: chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Lister(),\n\t\tchitListerSynced: chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Informer().HasSynced,\n\t\tserviceLister: kubeInformerFactory.Core().V1().Services().Lister(),\n\t\tserviceListerSynced: kubeInformerFactory.Core().V1().Services().Informer().HasSynced,\n\t\tendpointsLister: kubeInformerFactory.Core().V1().Endpoints().Lister(),\n\t\tendpointsListerSynced: kubeInformerFactory.Core().V1().Endpoints().Informer().HasSynced,\n\t\tconfigMapLister: kubeInformerFactory.Core().V1().ConfigMaps().Lister(),\n\t\tconfigMapListerSynced: kubeInformerFactory.Core().V1().ConfigMaps().Informer().HasSynced,\n\t\tstatefulSetLister: kubeInformerFactory.Apps().V1().StatefulSets().Lister(),\n\t\tstatefulSetListerSynced: kubeInformerFactory.Apps().V1().StatefulSets().Informer().HasSynced,\n\t\tpodLister: kubeInformerFactory.Core().V1().Pods().Lister(),\n\t\tpodListerSynced: kubeInformerFactory.Core().V1().Pods().Informer().HasSynced,\n\t\trecorder: recorder,\n\t}\n\tcontroller.initQueues()\n\tcontroller.addEventHandlers(chopInformerFactory, kubeInformerFactory)\n\n\treturn controller\n}", "func NewController(c *config.Config) (*Controller, error) {\n\tipt, err := iptables.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctr := &Controller{\n\t\tshutdown: make(chan bool),\n\t\tconfig: c,\n\t\tipt: ipt,\n\t}\n\tctr.bridgeAddr, ctr.subnet, err = net.ParseCIDR(c.Network.Subnet)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctr.bridgeInterface, err = CreateNetBridge(\"br\"+c.Network.InterfaceIdent, ctr.bridgeAddr, &net.IPNet{Mask: ctr.subnet.Mask})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctr.wlanAddr = dhcp4.IPAdd(ctr.bridgeAddr, 1)\n\tif c.Network.Wireless.Interface != \"\" {\n\t\tif err := SetInterfaceAddr(c.Network.Wireless.Interface, &net.IPNet{IP: ctr.wlanAddr, Mask: ctr.subnet.Mask}); err != nil {\n\t\t\tDeleteNetBridge(ctr.bridgeInterface.Name)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := ctr.setupFirewall(); err != nil {\n\t\tDeleteNetBridge(ctr.bridgeInterface.Name)\n\t\treturn nil, err\n\t}\n\n\tif err := ctr.startHostapd(); err != nil {\n\t\tDeleteNetBridge(ctr.bridgeInterface.Name)\n\t\treturn nil, err\n\t}\n\n\tctr.wg.Add(1)\n\tgo ctr.circuitBreakerRoutine()\n\tctr.wg.Add(1)\n\tgo ctr.hostapdStatusRoutine()\n\tgo ctr.dhcpDNSRoutine()\n\treturn ctr, nil\n}", "func NewHealth(\n\tstate Status,\n\tmessage string,\n) Health {\n\treturn Health{\n\t\tStatus: state,\n\t\tUrgency: UNKNOWN, // set by the owning Monitor\n\t\tTime: time.Now(),\n\t\tMessage: Message(message),\n\t\tDuration: 0,\n\t}\n}", "func NewController(bookmarkservice *services.BookmarkService, bookmarkcategoryservice *services.BookmarkCategoryService, auth *AuthController) *Controller {\n\treturn &Controller{\n\t\tbmsrv: bookmarkservice,\n\t\tauth: auth,\n\t\tbmcsrv: bookmarkcategoryservice,\n\t}\n}", "func NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\tbrokerInformer := brokerinformer.Get(ctx)\n\teventTypeInformer := eventtypeinformer.Get(ctx)\n\n\tr := &Reconciler{\n\t\teventTypeLister: eventTypeInformer.Lister(),\n\t\tbrokerLister: brokerInformer.Lister(),\n\t}\n\timpl := eventtypereconciler.NewImpl(ctx, r)\n\n\teventTypeInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\t// Tracker is used to notify us that a EventType's Broker has changed so that\n\t// we can reconcile.\n\tr.tracker = impl.Tracker\n\tbrokerInformer.Informer().AddEventHandler(controller.HandleAll(\n\t\tcontroller.EnsureTypeMeta(\n\t\t\tr.tracker.OnChanged,\n\t\t\tv1.SchemeGroupVersion.WithKind(\"Broker\"),\n\t\t),\n\t))\n\n\treturn impl\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tblobMgr: blob.Mgr,\n\t\tblobSizeExpiration: time.Hour * 24, // keep the size of blob in redis with 24 hours\n\t}\n}", "func NewController(informer cache.SharedIndexInformer, conf *config.Config, defaultClient client.ValiClient,\n\tlogger log.Logger) (Controller, error) {\n\tcontroller := &controller{\n\t\tclients: make(map[string]ControllerClient, expectedActiveClusters),\n\t\tconf: conf,\n\t\tdefaultClient: defaultClient,\n\t\tlogger: logger,\n\t}\n\n\tinformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.addFunc,\n\t\tDeleteFunc: controller.delFunc,\n\t\tUpdateFunc: controller.updateFunc,\n\t})\n\n\tstopChan := make(chan struct{})\n\ttime.AfterFunc(conf.ControllerConfig.CtlSyncTimeout, func() {\n\t\tclose(stopChan)\n\t})\n\n\tif !cache.WaitForCacheSync(stopChan, informer.HasSynced) {\n\t\treturn nil, fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\treturn controller, nil\n}", "func New(config Config) (*Controller, error) {\n\tif reflect.DeepEqual(config.Cluster, v1alpha1.KVMConfig{}) {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Cluster must not be empty\", config)\n\t}\n\tif config.Logger == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Logger must not be empty\", config)\n\t}\n\tif config.ManagementK8sClient == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.ManagementK8sClient must not be empty\", config)\n\t}\n\tif config.Name == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Name must not be empty\", config)\n\t}\n\tif config.Selector == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Selector must not be empty\", config)\n\t}\n\tif config.WorkloadK8sClient == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.WorkloadK8sClient must not be empty\", config)\n\t}\n\n\tc := &Controller{\n\t\tmanagementK8sClient: config.ManagementK8sClient,\n\t\tworkloadK8sClient: config.WorkloadK8sClient,\n\t\tlogger: config.Logger,\n\n\t\tstopped: make(chan struct{}),\n\t\tlastReconciled: time.Time{},\n\t\tname: config.Name,\n\t\tselector: config.Selector,\n\t\tcluster: config.Cluster,\n\t}\n\n\treturn c, nil\n}", "func NewGreetController(svc *service.Greeting) *GreetController {\n\treturn &GreetController{\n\t\tsvc: svc,\n\t}\n}", "func NewController(s *SessionInfo, timeout time.Duration) *Controller {\n\tif timeout == 0 {\n\t\ttimeout = DefaultTimeout\n\t}\n\trng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63()))\n\treturn &Controller{\n\t\tsessionInfo: s,\n\t\ttimeout: timeout,\n\n\t\tswitches: map[string][]uint32{},\n\t\tswitchIndices: map[string]int{},\n\n\t\tseqID: uint16(rng.Int63()),\n\t}\n}", "func NewBookController() *BookController {\n\treturn new(BookController)\n}" ]
[ "0.81887347", "0.80589396", "0.7891792", "0.77446157", "0.68483025", "0.677226", "0.67417073", "0.6698271", "0.6693059", "0.66468036", "0.6549667", "0.6535941", "0.6533674", "0.6522994", "0.64922637", "0.6490057", "0.64858145", "0.64842665", "0.642976", "0.64031225", "0.6390199", "0.63852227", "0.6361572", "0.63380086", "0.63208735", "0.6317767", "0.63157684", "0.63122624", "0.6305745", "0.62379426", "0.6236916", "0.6225162", "0.6225049", "0.6203832", "0.6203049", "0.6187288", "0.61872786", "0.61872786", "0.61818963", "0.6181528", "0.61587036", "0.6157305", "0.6157305", "0.6147944", "0.6144085", "0.61321914", "0.6112596", "0.6106345", "0.61010355", "0.60879666", "0.6072976", "0.60720396", "0.60719144", "0.60671", "0.60664916", "0.60508156", "0.6044085", "0.6042744", "0.6010775", "0.6010587", "0.59864944", "0.59607387", "0.5947828", "0.5941752", "0.5929732", "0.59183747", "0.5908792", "0.5895651", "0.5891816", "0.5889743", "0.5876393", "0.5873848", "0.587323", "0.587168", "0.5858415", "0.58545655", "0.5850157", "0.5829847", "0.5825317", "0.5822145", "0.5818497", "0.5814762", "0.58144987", "0.57673144", "0.5758873", "0.5748169", "0.5743816", "0.57343644", "0.5719282", "0.570239", "0.5702168", "0.56968933", "0.5676256", "0.566698", "0.56619215", "0.5659986", "0.56570715", "0.565604", "0.5653676", "0.5633562" ]
0.80731326
1
Health runs the health action.
func (c *HealthController) Health(ctx *app.HealthHealthContext) error { // HealthController_Health: start_implement ver := "unknown" semVer, err := semver.Make(MajorMinorPatch + "-" + ReleaseType + "+git.sha." + GitCommit) if err == nil { ver = semVer.String() } return ctx.OK([]byte("Health OK: " + time.Now().String() + ", semVer: " + ver + "\n")) // HealthController_Health: end_implement }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cmd *HealthHealthCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/api/_ah/health\"\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tresp, err := c.HealthHealth(ctx, path)\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tgoaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint)\n\treturn nil\n}", "func (h HealthHandler) Health(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\t_, _ = w.Write([]byte(\"ok\"))\n}", "func (h *handler) health(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"OK\")\n}", "func Health() (err error) {\n\treturn // yeah, we're good :)\n}", "func (c *HealthController) Health(ctx *app.HealthHealthContext) error {\n\t// HealthController_Health: start_implement\n\n\tfmt.Printf(\"DC: [%s]\\n\", ctx.Dc)\n\tfmt.Printf(\"Host Group: [%s]\\n\", ctx.Hgroup)\n\tfmt.Printf(\"Host Name: [%s]\\n\", ctx.Hostname)\n\n\tfor index, element := range c.zapi_list {\n\t\tfmt.Printf(\"zapi_alias: [%s]\\n\", index)\n\t\tfmt.Printf(\"zapi_url: [%s]\\n\", element.zapi_url)\n\t\tfmt.Printf(\"zapi_username: [%s]\\n\", element.zapi_username)\n\t\tfmt.Printf(\"zapi_password: [%s]\\n\", element.zapi_password)\n\t\tfmt.Printf(\"zapi_version: [%s]\\n\\n\", element.zapi_version)\n\t}\n\n\tresult, err := GetDCStatus(c.zapi_list[ctx.Dc])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Erro communicating with ZAPI: %v\\n\", err)\n\t}\n\tretval, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to UnMarshall JSON object\\n\", err)\n\t}\n\n\t// HealthController_Health: end_implement\n\treturn ctx.OK(retval)\n}", "func doHealth(sdk *sdk.SDK) {\n\ttick := time.Tick(2 * time.Second)\n\tfor {\n\t\terr := sdk.Health()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"[wrapper] Could not send health ping, %v\", err)\n\t\t}\n\t\t<-tick\n\t}\n}", "func doHealth(s *gosdk.SDK, stop <-chan struct{}) {\n\ttick := time.Tick(2 * time.Second)\n\tfor {\n\t\terr := s.Health()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Could not send health ping, %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tlog.Print(\"Stopped health pings\")\n\t\t\treturn\n\t\tcase <-tick:\n\t\t}\n\t}\n}", "func (c *Check) Health(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.Check.Health\")\n\tdefer span.End()\n\n\tvar health struct {\n\t\tStatus string `json:\"status\"`\n\t}\n\n\t// Check if the database is ready.\n\tif err := database.StatusCheck(ctx, c.db); err != nil {\n\n\t\t// If the database is not ready we will tell the client and use a 500\n\t\t// status. Do not respond by just returning an error because further up in\n\t\t// the call stack will interpret that as an unhandled error.\n\t\thealth.Status = \"db not ready\"\n\t\treturn web.Respond(ctx, w, health, http.StatusInternalServerError)\n\t}\n\n\thealth.Status = \"ok\"\n\treturn web.Respond(ctx, w, health, http.StatusOK)\n}", "func doHealth(sdk *sdk.SDK, stop <-chan struct{}) {\n\ttick := time.Tick(2 * time.Second)\n\tfor {\n\t\terr := sdk.Health()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Could not send health ping, %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tlog.Print(\"Stopped health pings\")\n\t\t\treturn\n\t\tcase <-tick:\n\t\t}\n\t}\n}", "func (v *View) health(c echo.Context) error {\n\tif err := v.core.DB.Ping(); err != nil {\n\t\tc.Logger().Error(err)\n\t\treturn c.String(http.StatusInternalServerError, \"unhealthy\")\n\t}\n\treturn c.String(http.StatusOK, \"healthy\")\n}", "func (hh *HealthCheckHandler) Execute(w http.ResponseWriter, r *http.Request) {\n\tuuid := utils.ExtractUUID(r.URL.String())\n\tif uuid == \"\" {\n\t\thttp.Error(w, marshalError(\"invalid uuid\"), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tqueryParams := r.URL.Query()\n\ttimeout, err := time.ParseDuration(queryParams[\"timeout\"][0])\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thc, err := hh.db.Get(uuid)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// make a copy and run the healthcheck\n\ttry := &models.HealthCheck{\n\t\tID: hc.ID,\n\t\tEndpoint: hc.Endpoint,\n\t}\n\n\ttry = service.Run(try, timeout)\n\n\tb, err := json.Marshal(try)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(b)\n}", "func (r *GoMetricsRegistry) RunHealthchecks() {}", "func health(c echo.Context) error {\n\th := &Health{\"Fluffy Radio Api\", \"1.0.0\", \"Just Keep Fluffing!\"}\n\treturn c.JSON(http.StatusOK, h)\n}", "func HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Health called\")\n\tlog.Println(\"Request:\", r)\n\n\tw.WriteHeader(http.StatusOK)\n\n}", "func (c *MockController) Health() error {\n\tc.HealthFuncCalled++\n\n\treturn c.HealthFunc()\n}", "func (es *Eventstore) Health(ctx context.Context) error {\n\treturn es.repo.Health(ctx)\n}", "func (s *server) Health(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (s *server) Health(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (api *API) health(w http.ResponseWriter, req *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"OK\"))\n}", "func (c *Client) Health(ctx context.Context) (err error) {\n\t_, err = c.HealthEndpoint(ctx, nil)\n\treturn\n}", "func healthHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Passed\")\n}", "func Health(w http.ResponseWriter, r *http.Request) {\n\twriteJSON(w, http.StatusOK, healthResponse{\"alive\"})\n}", "func (h *Handler) Health(w http.ResponseWriter, r *http.Request) {\n\twriteResponse(r, w, http.StatusOK, &SimpleResponse{\n\t\tTraceID: tracing.FromContext(r.Context()),\n\t\tMessage: \"OK\",\n\t})\n}", "func Health(c *gin.Context) {\n\tfmt.Println(\"Service healthy\")\n\tc.JSON(http.StatusOK, gin.H{\"status\": http.StatusOK, \"message\": \"Loan service is healthy!\"})\n}", "func (a *API) health(c *gin.Context) (*Health, error) {\n\treturn &Health{\n\t\tStatus: \"OK\",\n\t}, nil\n}", "func (d *WindowsDataplane) reportHealth() {\n\tif d.config.HealthAggregator != nil {\n\t\td.config.HealthAggregator.Report(\n\t\t\thealthName,\n\t\t\t&health.HealthReport{Live: true, Ready: d.doneFirstApply},\n\t\t)\n\t}\n}", "func (c *controller) health(w http.ResponseWriter, r *http.Request) {\n\tlogger.Trace(\"health check\")\n\tw.WriteHeader(http.StatusNoContent)\n}", "func (t ThriftHandler) Health(ctx context.Context) (response *health.HealthStatus, err error) {\n\tresponse, err = t.h.Health(ctx)\n\treturn response, thrift.FromError(err)\n}", "func health(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n}", "func healthHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"Alive!\"))\n}", "func (a *API) health(w http.ResponseWriter, r *http.Request) {\n\tinfo := buildinfo.Get()\n\ta.writeJSON(w, r, http.StatusOK, httputil.HealthCheckResponse{\n\t\tBuildInfo: info,\n\t\tStartedAt: a.startedAt,\n\t})\n}", "func healthHandler(w http.ResponseWriter, r *http.Request) {\n\tbeeline.AddField(r.Context(), \"alive\", true)\n\tw.Write([]byte(`{\"alive\": \"yes\"}`))\n}", "func HealthHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func Health(ctx *gin.Context) {\n\tctx.JSON(http.StatusOK, gin.H{\n\t\t\"status\": 200,\n\t})\n}", "func (h *handler) Health(ctx context.Context) error {\n\treturn h.dbClient.Ping()\n}", "func HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, `OK`)\n}", "func HealthHandler(w http.ResponseWriter, _ *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(healthMsg) //nolint:errcheck\n}", "func HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (fwdclient *Client) HealthCheck() error {\n\tlog.Debugf(\"%s: url=%s\", fwdclient.AppName, fwdclient.ActionUrls.Health)\n\treq, err := http.NewRequest(\"GET\", fwdclient.ActionUrls.Health, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Splunk %s\", fwdclient.Token))\n\tresp, err := fwdclient.httpclient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\" Please check splunk authorization token. %s: Health check failed: %s\", fwdclient.AppName, err)\n\t}\n\tdefer resp.Body.Close()\n\tlog.Debugf(\"%s: status=%d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"%s: Failed during Health check : %d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\t}\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: Failed while reading health response body: %s\", fwdclient.AppName, err)\n\t}\n\thealthCheckResponse := new(HealthCheckResponse)\n\tif err := json.Unmarshal(respBody, healthCheckResponse); err != nil {\n\t\treturn fmt.Errorf(\"%s: health check failed: the response is not JSON but: %s\", fwdclient.AppName, respBody)\n\t}\n\tlog.Debugf(\"%s: code=%d, text=%s\", fwdclient.AppName, healthCheckResponse.Code, healthCheckResponse.Text)\n\treturn nil\n}", "func Health(c *gin.Context) {\n\tresponse := types.APIResponse{Msg: true, Success: true}\n\tc.JSON(http.StatusOK, response)\n}", "func (p *PodcastDownloadService) Health(r *http.Request, args *podcast.PodcastDownloadArgs, reply *podcast.PodcastDownloadReply) error {\n\tnumHealthCalls.Add(1)\n\treply.Success = true\n\treply.Message = \"HI, I'm a podcast downloading service!\"\n\treturn nil\n}", "func (h *ProxyHealth) run() {\n\tcheckHealth := func() {\n\t\th.mu.Lock()\n\t\tdefer h.mu.Unlock()\n\t\tisAvailable := h.check(h.origin)\n\t\th.isAvailable = isAvailable\n\t}\n\n\tgo func() {\n\t\tt := time.NewTicker(h.period)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t.C:\n\t\t\t\tcheckHealth()\n\t\t\tcase <-h.cancel:\n\t\t\t\tt.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprint(w, http.StatusText(http.StatusOK))\n}", "func (checker *Checker) Run() {\n\tgo gohcmd.GracefulStop(checker.cancel)\n\n\tduration, _ := time.ParseDuration(\"15s\")\n\tticker := time.NewTicker(duration)\n\tdefer ticker.Stop()\n\n\tlogrus.Infof(\"Started checker agent\")\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tresponse, err := checker.CheckHealth()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Error checking health: %v\", err)\n\t\t\t\tchecker.RegisterProblem(err)\n\t\t\t} else {\n\t\t\t\tchecker.RegisterResponse(response)\n\t\t\t}\n\n\t\tcase <-checker.ctx.Done():\n\t\t\tlogrus.Info(\"Health checker agent stopped\")\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func HealthCheck(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"WORKING!\")\n}", "func (h *Healthz) Handler(w http.ResponseWriter, r *http.Request) {\n\th.Lock()\n\tdefer h.Unlock()\n\th.healthzCount++\n\tfor name, handler := range h.checks {\n\t\tif err := handler.Check(r); err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\terrMsg := fmt.Sprintf(\"check %v failed, err: %v\", name, err)\n\t\t\t_, _ = w.Write([]byte(errMsg))\n\t\t\tklog.Infof(\"/healthz %v\", errMsg)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(200)\n\t_, _ = w.Write([]byte(\"ok\"))\n\tif h.healthzCount%10 == 0 {\n\t\tklog.V(5).Infof(\"/healthz ok %v\", h.info)\n\t}\n}", "func (h *Handler) HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := h.DB.Ping(); err != nil {\n\t\tlog.Printf(\"health check failed: %s\\n\", err)\n\t\twriteHTTPResponse(w, http.StatusInternalServerError, map[string]string{\"message\": \"I'm unhealthy\"})\n\t} else {\n\t\twriteHTTPResponse(w, http.StatusOK, map[string]string{\"message\": \"I'm healthy\"})\n\t}\n}", "func (c *RESTClient) Health(ctx context.Context) (*model.OverallHealthStatus, error) {\n\tresp, err := c.Client.Products.GetHealth(&products.GetHealthParams{\n\t\tContext: ctx,\n\t}, c.AuthInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.Payload, nil\n}", "func (s *server) serveHealth(w http.ResponseWriter, r *http.Request) {\n\tif s.isShuttingDown {\n\t\thttp.Error(w, \"Shutting Down\", 503)\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}", "func healthHandler(w http.ResponseWriter, r *http.Request) {\n\tlogrus.Info(\"Check Health Status\")\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tresp := make(map[string]string)\n\tresp[\"status\"] = \"200 OK\"\n\tjsonResp, err := json.Marshal(resp)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error happened in JSON marshal. Err: %s\", err)\n\t}\n\n\t//return http code 200\n\tw.WriteHeader(http.StatusOK)\n\t// output\n\tw.Write(jsonResp)\n}", "func (h *healthCheckHandler) HealthCheck(c *gin.Context) {\n\n\terr := h.u.HealthCheck()\n\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"result\": statusUnhealty, \"metadata\": nil})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"result\": statusHealthy, \"metadata\": nil})\n}", "func (r *oauthProxy) healthHandler(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-Type\", jsonMime)\n\tw.Header().Set(versionHeader, version.GetVersion())\n\tw.WriteHeader(http.StatusOK)\n\t_, _ = w.Write([]byte(`{\"status\":\"OK\"}`))\n}", "func healthHandler(response http.ResponseWriter, req *http.Request, rcvr *receiver.Receiver) {\n\tdefer req.Body.Close()\n\tresponse.Header().Set(\"Content-Type\", \"application/json\")\n\n\trcvr.StateLock.Lock()\n\tdefer rcvr.StateLock.Unlock()\n\n\tvar lastChanged time.Time\n\tif rcvr.CurrentState != nil {\n\t\tlastChanged = rcvr.CurrentState.LastChanged\n\t}\n\n\tmessage, _ := json.Marshal(ApiStatus{\n\t\tMessage: \"Healthy!\",\n\t\tLastChanged: lastChanged,\n\t\tServiceChanged: rcvr.LastSvcChanged,\n\t})\n\n\tresponse.Write(message)\n}", "func Healthcheck(context *gin.Context) {\n\tcontext.JSON(http.StatusOK, gin.H{\"message\": \"App up and running\"})\n}", "func (a *infrastructureHandlers) healthCheck(c *gin.Context) {\n\tresponse := HealthCheckResponseSuccess{}\n\tresponse.BuildSha = os.Getenv(\"APP_BUILD_HASH\")\n\tresponse.Name = os.Getenv(\"APP_NAME\")\n\tresponse.Version = os.Getenv(\"APP_VERSION\")\n\tc.JSON(200, response)\n}", "func (h *healthChecker) runHealthCheck() {\n\tt := time.Now()\n\terr := checkStorageHealth(h.s.store)\n\tpassed := time.Since(t)\n\tif err != nil {\n\t\tlog.Errorf(\"server: storage health check failed: %s\", err)\n\t}\n\n\t// Make sure to only hold the mutex to access the fields, and not while\n\t// we're querying the storage object.\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.err = err\n\th.passed = passed\n}", "func HealthHandler(res http.ResponseWriter, req *http.Request) {\n\thc := Healthcheck{Status: \"OK\"}\n\tif err := json.NewEncoder(res).Encode(hc); err != nil {\n\t\tlog.Panic(err)\n\t}\n}", "func HealthHandler(res http.ResponseWriter, req *http.Request) {\n\thc := Healthcheck{Status: \"OK\"}\n\tif err := json.NewEncoder(res).Encode(hc); err != nil {\n\t\tlog.Panic(err)\n\t}\n}", "func GetHealth(c echo.Context) error {\n\tu := int(time.Since(helpers.StartTime).Seconds())\n\tuptime := &Health{\n\t\tStatus: \"ok\",\n\t\tUptime: u,\n\t}\n\n\treturn c.JSON(http.StatusOK, uptime)\n}", "func TestHealth(t *testing.T) {\n\n\tapi := API{}\n\n\trequest, err := http.NewRequest(\"GET\", \"/health\", nil)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\trequestRecorder := httptest.NewRecorder()\n\thandler := http.HandlerFunc(api.Health)\n\thandler.ServeHTTP(requestRecorder, request)\n\n\tbody, err := ioutil.ReadAll(requestRecorder.Body)\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tvar healthResponse model.HealthResponse\n\tjson.Unmarshal(body, &healthResponse)\n\n\tassert.Equal(t, http.StatusOK, requestRecorder.Code)\n\tassert.Equal(t, \"healthy\", healthResponse.Status)\n}", "func Health(h func(http.ResponseWriter, *http.Request)) Option {\n\treturn func(o *Options) {\n\t\to.Health = h\n\t}\n}", "func (e *CityService) Healthcheck(ctx context.Context, req *cityservice.EmptyRequest, rsp *cityservice.Response) error {\n\tutil.WriteLogMain(\"Call Healthcheck Service\")\n\trsp.Msg = \"Success\"\n\treturn nil\n}", "func (c *PackHealth) Health() *util.HealthStatus {\n\tesstatus := dao.IsRunning()\n\tvar esdb *util.DBHealth\n\tif esstatus != nil {\n\t\tesdb = util.NewDBHealth(\"packmongo\", false, esstatus.Error())\n\t} else {\n\t\tesdb = util.NewDBHealth(\"packmongo\", true, \"ok\")\n\t}\n\tservicename := \"pack\"\n\tservicestatus := true\n\thealth := util.NewHealthStatus(servicename, servicestatus, nil)\n\thealth.AddDBToHealthStatus(esdb)\n\treturn health\n}", "func HandleHealth(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write([]byte(`{\"status\": \"ok\"}`))\n}", "func serveHealthStatus(w http.ResponseWriter, req *http.Request) {\n\tfmt.Fprintf(w, \"OK\")\n}", "func healthTaskWork(args []string) error {\n\tif len(args) < 1 {\n\t\treturn errors.New(\"Task Did Not Receive Game ID!\")\n\t}\n\n\tgameTime, err := data.GetRoomHealth(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif gameTime.Add(policy.StaleGameDuration).Before(time.Now().UTC()) {\n\t\tsuperUserRequest, err := policy.RequestWithSuperUser(true, policy.CmdGameDelete, data.SelectGameArgs{GameID: args[0]})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresp := data.DeleteGame(superUserRequest.Header, superUserRequest.BodyFactories, superUserRequest.IsSecureConnection)\n\t\tif resp.ServerError != nil {\n\t\t\treturn resp.ServerError\n\t\t}\n\n\t} else {\n\t\tevent.SubmitGameForHealthCheck(args[0])\n\t}\n\n\treturn nil\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Status OK.\\n\")\n}", "func healthcheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func main() {\n\n\t// Create a new Checker\n\tchecker := health.NewChecker(\n\t\t// A simple successFunc to see if a fake file system up.\n\t\thealth.WithCheck(health.Check{\n\t\t\tName: \"filesystem\",\n\t\t\tTimeout: 2 * time.Second, // A successFunc specific timeout.\n\t\t\tInterceptors: []health.Interceptor{createCheckLogger, logCheck},\n\t\t\tCheck: func(ctx context.Context) error {\n\t\t\t\treturn fmt.Errorf(\"this is a check error\") // example error\n\t\t\t},\n\t\t}),\n\t)\n\n\thandler := health.NewHandler(checker, health.WithMiddleware(createRequestLogger, logRequest))\n\n\t// We Create a new http.Handler that provides health successFunc information\n\t// serialized as a JSON string via HTTP.\n\thttp.Handle(\"/health\", handler)\n\thttp.ListenAndServe(\":3000\", nil)\n}", "func (o ServerEndpointCloudTieringStatusResponseOutput) Health() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ServerEndpointCloudTieringStatusResponse) string { return v.Health }).(pulumi.StringOutput)\n}", "func PostHealth(w http.ResponseWriter, r *http.Request, db *sqlx.DB) {\n\tparams := mux.Vars(r)\n\treturnMessages := make(map[string][]string)\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsession, err := store.Get(r, \"auth\")\n\tif err != nil {\n\t\treturnMessages[\"message\"] = append(returnMessages[\"message\"], err.Error())\n\t\treturnMessages[\"status\"] = append(returnMessages[\"status\"], \"error\")\n\t\t// http.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Convert our session data into an instance of User\n\tuser := User{}\n\tuser, _ = session.Values[\"user\"].(User)\n\n\tif user.Username != \"\" && user.AccessLevel == \"admin\" {\n\t\tvar health []Health\n\n\t\tif err := json.Unmarshal(body, &health); err != nil {\n\t\t\treturnMessages[\"message\"] = append(returnMessages[\"message\"], err.Error())\n\t\t\treturnMessages[\"status\"] = append(returnMessages[\"status\"], \"error\")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\t\tlog.Println(returnMessages)\n\t\t\tif err := json.NewEncoder(w).Encode(returnMessages); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tif len(health) == 0 {\n\t\t\treturnMessages[\"message\"] = append(returnMessages[\"message\"], \"Warning: no items in health\")\n\t\t\treturnMessages[\"status\"] = append(returnMessages[\"status\"], \"warning\")\n\t\t} else {\n\t\t\tif params[\"keys\"] == \"u\" {\n\t\t\t\tfor _, item := range health {\n\t\t\t\t\t_, err := db.Exec(\"INSERT INTO public.health (\"+\n\t\t\t\t\t\t\"username, \"+\n\t\t\t\t\t\t\"ts, \"+\n\t\t\t\t\t\t\"variable, \"+\n\t\t\t\t\t\t\"value\"+\n\t\t\t\t\t\t\") VALUES (\"+\n\t\t\t\t\t\t\" $1,\"+\n\t\t\t\t\t\t\" $2,\"+\n\t\t\t\t\t\t\" $3,\"+\n\t\t\t\t\t\t\" $4\"+\n\t\t\t\t\t\t\") \"+\"ON CONFLICT (\"+\n\t\t\t\t\t\t\"ts, \"+\n\t\t\t\t\t\t\"variable \"+\n\t\t\t\t\t\t\") DO \"+\n\t\t\t\t\t\t\"UPDATE SET \"+\n\t\t\t\t\t\t\" username = $1,\"+\n\t\t\t\t\t\t\" value = $4\",\n\t\t\t\t\t\tuser.Username,\n\t\t\t\t\t\titem.Ts,\n\t\t\t\t\t\titem.Variable,\n\t\t\t\t\t\titem.Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturnMessages[\"message\"] = append(returnMessages[\"message\"], err.Error())\n\t\t\t\t\t\treturnMessages[\"status\"] = append(returnMessages[\"status\"], \"error\")\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturnMessages[\"message\"] = append(returnMessages[\"message\"], \"inserted item into health\")\n\t\t\t\t\t\treturnMessages[\"status\"] = append(returnMessages[\"status\"], \"info\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif params[\"keys\"] == \"p\" {\n\t\t\t\tfor _, item := range health {\n\t\t\t\t\t_, err := db.Exec(\"INSERT INTO public.health (\"+\n\t\t\t\t\t\t\"id, \"+\n\t\t\t\t\t\t\"username, \"+\n\t\t\t\t\t\t\"ts, \"+\n\t\t\t\t\t\t\"variable, \"+\n\t\t\t\t\t\t\"value\"+\n\t\t\t\t\t\t\") VALUES (\"+\n\t\t\t\t\t\t\" $1,\"+\n\t\t\t\t\t\t\" $2,\"+\n\t\t\t\t\t\t\" $3,\"+\n\t\t\t\t\t\t\" $4,\"+\n\t\t\t\t\t\t\" $5\"+\n\t\t\t\t\t\t\") \"+\n\t\t\t\t\t\t\"ON CONFLICT (\"+\n\t\t\t\t\t\t\"id \"+\n\t\t\t\t\t\t\") DO \"+\n\t\t\t\t\t\t\"UPDATE SET \"+\n\t\t\t\t\t\t\" username = $2,\"+\n\t\t\t\t\t\t\" ts = $3,\"+\n\t\t\t\t\t\t\" variable = $4,\"+\n\t\t\t\t\t\t\" value = $5\",\n\t\t\t\t\t\titem.ID,\n\t\t\t\t\t\tuser.Username,\n\t\t\t\t\t\titem.Ts,\n\t\t\t\t\t\titem.Variable,\n\t\t\t\t\t\titem.Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturnMessages[\"message\"] = append(returnMessages[\"message\"], err.Error())\n\t\t\t\t\t\treturnMessages[\"status\"] = append(returnMessages[\"status\"], \"error\")\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturnMessages[\"message\"] = append(returnMessages[\"message\"], \"inserted item into health\")\n\t\t\t\t\t\treturnMessages[\"status\"] = append(returnMessages[\"status\"], \"info\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\tlog.Println(returnMessages)\n\t\tif err := json.NewEncoder(w).Encode(returnMessages); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\tif err := json.NewEncoder(w).Encode(\"access denied\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tlogRequest(r)\n}", "func doHealthcheck(ctx context.Context) {\n\t_, err := State(ctx)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}", "func (hc *HealthController) Index(c echo.Context) error {\n\treturn c.JSON(http.StatusOK, newResponse(\n\t\thttp.StatusOK,\n\t\thttp.StatusText(http.StatusOK),\n\t\t\"OK\",\n\t))\n}", "func GetHealth() Health {\n\treturn Health{\n\t\tsuccess: true,\n\t}\n}", "func healthFunc(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tbody, err := json.Marshal(HealthcheckBody{\n\t\tDescription: \"Web API for ANZ\",\n\t\tCommit: CommitSHA,\n\t\tVersion: Version,\n\t})\n\tif err != nil {\n\t\t// TODO: How to test this scenario?\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tw.Write(body)\n}", "func GetHealth(c *gin.Context) {\n\tservicer := c.MustGet(registry.ServiceKey).(registry.Servicer)\n\thealthCheckSearvice := servicer.NewHealthCheck()\n\n\tvar input model.HealthCheckSearchInput\n\n\terr := c.ShouldBindQuery(&input)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\toutput, err := healthCheckSearvice.GetHealth(input.ID)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, output)\n}", "func (s *Server) HealthCheckHandler(w http.ResponseWriter, r *http.Request) {\n\ttime.Sleep(time.Duration(s.Config.Delay) * time.Second)\n\tstatus := 200\n\tif !s.Config.Healthy {\n\t\tstatus = 500\n\t}\n\tw.WriteHeader(status)\n\tlog.Info(\"host: \", r.Host, \" uri: \", r.RequestURI, \" status: \", status)\n\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func healthHandler(site site.API) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif site == nil || !site.Healthy() {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintln(w, \"OK\")\n\t}\n}", "func (c *Client) Health() error {\n\t_, err := c.Get(c.Endpoint(\"ping\"), url.Values{})\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func TestHealth(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\ts, _, _ := serverutils.StartServer(t, base.TestServerArgs{})\n\tdefer s.Stopper().Stop()\n\n\tu := s.AdminURL() + healthPath\n\thttpClient, err := s.GetHTTPClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresp, err := httpClient.Get(u)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tvar data serverpb.HealthResponse\n\tif err := jsonpb.Unmarshal(resp.Body, &data); err != nil {\n\t\tt.Error(err)\n\t}\n}", "func (c *CmdHandle) CmdHealthMonitor(){\n// log.Println(\"CmdHealthMonitor: Start\")\n\n //Check if any of the commands have been running for ever avg timout time 20s\n// log.Println(\"Monitored Cmd Processing Health\")\n\n // Check the system load if less send more commands for processing\n c.CmdProcessPendingCommands()\n\n// log.Println(\"CmdHealthMonitor: Complete\")\n}", "func (c HTTPGetHealthcheck) Execute() Result {\n\tinput := struct {\n\t\tURL string `json:\"url\"`\n\t}{\n\t\thttp.CleanURL(c.URL),\n\t}\n\n\tclient := http.NewClient(c.URL)\n\n\tstart := time.Now()\n\tresp, err := client.Get(\"\")\n\tend := time.Now()\n\n\tif err != nil {\n\t\treturn FailWithInput(err.Error(), input)\n\t}\n\n\tcontext := HTTPExpectationContext{\n\t\tResponse: resp,\n\t\tResponseTime: end.Sub(start),\n\t}\n\n\treturn c.VerifyExpectation(input, func(assertion interface{}) []*AssertionGroup {\n\t\treturn assertion.(HTTPResponseExpectation).Verify(context)\n\t})\n}", "func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {\n\tvar (\n\t\tnewCommand []string\n\t\treturnCode int\n\t\tcapture bytes.Buffer\n\t\tinStartPeriod bool\n\t)\n\thcCommand := c.HealthCheckConfig().Test\n\tif len(hcCommand) < 1 {\n\t\treturn define.HealthCheckNotDefined, errors.Errorf(\"container %s has no defined healthcheck\", c.ID())\n\t}\n\tswitch hcCommand[0] {\n\tcase \"\", \"NONE\":\n\t\treturn define.HealthCheckNotDefined, errors.Errorf(\"container %s has no defined healthcheck\", c.ID())\n\tcase \"CMD\":\n\t\tnewCommand = hcCommand[1:]\n\tcase \"CMD-SHELL\":\n\t\t// TODO: SHELL command from image not available in Container - use Docker default\n\t\tnewCommand = []string{\"/bin/sh\", \"-c\", strings.Join(hcCommand[1:], \" \")}\n\tdefault:\n\t\t// command supplied on command line - pass as-is\n\t\tnewCommand = hcCommand\n\t}\n\tif len(newCommand) < 1 || newCommand[0] == \"\" {\n\t\treturn define.HealthCheckNotDefined, errors.Errorf(\"container %s has no defined healthcheck\", c.ID())\n\t}\n\tcaptureBuffer := bufio.NewWriter(&capture)\n\thcw := hcWriteCloser{\n\t\tcaptureBuffer,\n\t}\n\tstreams := new(define.AttachStreams)\n\tstreams.OutputStream = hcw\n\tstreams.ErrorStream = hcw\n\n\tstreams.InputStream = bufio.NewReader(os.Stdin)\n\n\tstreams.AttachOutput = true\n\tstreams.AttachError = true\n\tstreams.AttachInput = true\n\n\tlogrus.Debugf(\"executing health check command %s for %s\", strings.Join(newCommand, \" \"), c.ID())\n\ttimeStart := time.Now()\n\thcResult := define.HealthCheckSuccess\n\tconfig := new(ExecConfig)\n\tconfig.Command = newCommand\n\texitCode, hcErr := c.Exec(config, streams, nil)\n\tif hcErr != nil {\n\t\terrCause := errors.Cause(hcErr)\n\t\thcResult = define.HealthCheckFailure\n\t\tif errCause == define.ErrOCIRuntimeNotFound ||\n\t\t\terrCause == define.ErrOCIRuntimePermissionDenied ||\n\t\t\terrCause == define.ErrOCIRuntime {\n\t\t\treturnCode = 1\n\t\t\thcErr = nil\n\t\t} else {\n\t\t\treturnCode = 125\n\t\t}\n\t} else if exitCode != 0 {\n\t\thcResult = define.HealthCheckFailure\n\t\treturnCode = 1\n\t}\n\ttimeEnd := time.Now()\n\tif c.HealthCheckConfig().StartPeriod > 0 {\n\t\t// there is a start-period we need to honor; we add startPeriod to container start time\n\t\tstartPeriodTime := c.state.StartedTime.Add(c.HealthCheckConfig().StartPeriod)\n\t\tif timeStart.Before(startPeriodTime) {\n\t\t\t// we are still in the start period, flip the inStartPeriod bool\n\t\t\tinStartPeriod = true\n\t\t\tlogrus.Debugf(\"healthcheck for %s being run in start-period\", c.ID())\n\t\t}\n\t}\n\n\teventLog := capture.String()\n\tif len(eventLog) > MaxHealthCheckLogLength {\n\t\teventLog = eventLog[:MaxHealthCheckLogLength]\n\t}\n\n\tif timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {\n\t\treturnCode = -1\n\t\thcResult = define.HealthCheckFailure\n\t\thcErr = errors.Errorf(\"healthcheck command exceeded timeout of %s\", c.HealthCheckConfig().Timeout.String())\n\t}\n\thcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)\n\tif err := c.updateHealthCheckLog(hcl, inStartPeriod); err != nil {\n\t\treturn hcResult, errors.Wrapf(err, \"unable to update health check log %s for %s\", c.healthCheckLogPath(), c.ID())\n\t}\n\treturn hcResult, hcErr\n}", "func (h HealthController) Status(c *gin.Context) {\n\tc.String(http.StatusOK, \"Working!\")\n}", "func (r *queryImpl) Health(p graphql.ResolveParams) (interface{}, error) {\n\treturn struct{}{}, nil\n}", "func HealthCheck(w http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"🚑 healthcheck ok!\")\n\tw.WriteHeader(http.StatusOK)\n}", "func health(c echo.Context) error {\n\tvar errResp ErrorResponseData\n\terr := storage.Health()\n\tif err != nil {\n\t\terrResp.Data.Code = \"database_connection_error\"\n\t\terrResp.Data.Description = err.Error()\n\t\terrResp.Data.Status = strconv.Itoa(http.StatusInternalServerError)\n\t\treturn c.JSON(http.StatusInternalServerError, errResp)\n\t}\n\treturn c.String(http.StatusOK, \"Healthy\")\n}", "func healthCheckHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintln(w, \"OK\")\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ready\"))\n}", "func (_e *MockDataCoord_Expecter) CheckHealth(ctx interface{}, req interface{}) *MockDataCoord_CheckHealth_Call {\n\treturn &MockDataCoord_CheckHealth_Call{Call: _e.mock.On(\"CheckHealth\", ctx, req)}\n}", "func (c *ControllerImpl) HealthCheck(ctx context.Context) error {\n\treturn nil\n}", "func healthHandler(w http.ResponseWriter, r *http.Request) {\n\t// expect only GET requests\n\tif r.Method != http.MethodGet {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"unexpected http method\"))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"OK\"))\n}", "func Health(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tio.WriteString(w, \"ok\")\n}", "func (manager *Manager) HealthHandler(healthCheck *healthcheck.Manager) {\n\tlog := logging.For(\"core/healthcheck/handler\").WithField(\"func\", \"healthcheck\")\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGUSR1)\n\n\tfor {\n\t\tselect {\n\t\tcase _ = <-signalChan:\n\t\t\tlog.Debug(\"HealthHandler Debug triggered\")\n\t\t\thealthCheck.Debug()\n\t\tcase checkresult := <-healthCheck.Incoming:\n\t\t\t// Status change entity\n\t\t\t// pool + backend + node = node check Changed\n\t\t\t// pool + backend = backend check changed - applies to nodes\n\t\t\t// pool = pool check changed - applies to vip\n\t\t\tlog.WithField(\"pool\", checkresult.PoolName).WithField(\"backend\", checkresult.BackendName).WithField(\"node\", checkresult.NodeName).WithField(\"actualstatus\", checkresult.ActualStatus.String()).WithField(\"reportedstatus\", checkresult.ReportedStatus.String()).WithField(\"errormsg\", checkresult.ErrorMsg).WithField(\"check\", checkresult.Description).Info(\"Received health update from worker\")\n\n\t\t\t// Set status in healh pool\n\t\t\thealthCheck.SetCheckStatus(checkresult.WorkerUUID, checkresult.ReportedStatus, checkresult.ErrorMsg)\n\n\t\t\t// Get all nodes using the check\n\t\t\tnodeUUIDs := healthCheck.GetPools(checkresult.WorkerUUID)\n\t\t\tlog.WithField(\"nodeuuids\", nodeUUIDs).WithField(\"workeruuid\", checkresult.WorkerUUID).Debug(\"Pools to update\")\n\n\t\t\t// and check each individual node using the above check, to see if status changes\n\t\t\tfor _, nodeUUID := range nodeUUIDs {\n\t\t\t\tactualStatus, poolName, backendName, nodeName, errors := healthCheck.GetNodeStatus(nodeUUID)\n\t\t\t\tcheckresult.ReportedStatus = actualStatus\n\t\t\t\tcheckresult.ErrorMsg = errors\n\t\t\t\tcheckresult.NodeUUID = nodeUUID\n\t\t\t\tif poolName != \"\" {\n\t\t\t\t\tcheckresult.PoolName = poolName\n\t\t\t\t}\n\n\t\t\t\tif nodeName != \"\" {\n\t\t\t\t\tcheckresult.NodeName = nodeName\n\t\t\t\t}\n\n\t\t\t\tif backendName != \"\" {\n\t\t\t\t\tcheckresult.BackendName = backendName\n\t\t\t\t}\n\n\t\t\t\tlog.WithField(\"pool\", checkresult.PoolName).WithField(\"backend\", checkresult.BackendName).WithField(\"node\", checkresult.NodeName).WithField(\"reportedstatus\", checkresult.ReportedStatus.String()).WithField(\"error\", checkresult.ErrorMsg).Info(\"Sending status update to cluster\")\n\t\t\t\tmanager.healthchecks <- checkresult // do not send pointers, since pointer will change data\n\t\t\t}\n\n\t\t}\n\t}\n}", "func GetHealth(w http.ResponseWriter, r *http.Request) {\n\tjson.NewEncoder(w).Encode(health)\n}", "func (h *Health) Get(w http.ResponseWriter, r *http.Request) {\n\th.router.WriteHeader(r.Context(), w, http.StatusOK)\n}", "func (a *HealthApiService) GetHealthExecute(r ApiGetHealthRequest) (HealthCheck, *_nethttp.Response, GenericOpenAPIError) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\texecutionError GenericOpenAPIError\n\t\tlocalVarReturnValue HealthCheck\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HealthApiService.GetHealth\")\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarPath := localBasePath + \"/health\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.zapTraceSpan != nil {\n\t\tlocalVarHeaderParams[\"Zap-Trace-Span\"] = parameterToString(*r.zapTraceSpan, \"\")\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 503 {\n\t\t\tvar v HealthCheck\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, executionError\n}", "func (_e *MockQueryCoord_Expecter) CheckHealth(ctx interface{}, req interface{}) *MockQueryCoord_CheckHealth_Call {\n\treturn &MockQueryCoord_CheckHealth_Call{Call: _e.mock.On(\"CheckHealth\", ctx, req)}\n}", "func HealthCheck(c buffalo.Context) error {\n\tr := render.New(render.Options{})\n\treturn c.Render(200, r.String(\"OK\"))\n}" ]
[ "0.6713238", "0.6543138", "0.6531063", "0.64671564", "0.638338", "0.6375378", "0.63475406", "0.63455915", "0.6333624", "0.6295893", "0.6284236", "0.62841773", "0.628139", "0.6255509", "0.6253855", "0.62408435", "0.6238117", "0.6238117", "0.6195274", "0.61200297", "0.61177963", "0.61132544", "0.6068528", "0.6060152", "0.60412604", "0.59941214", "0.5992696", "0.59643704", "0.5963396", "0.5949841", "0.5927492", "0.5925869", "0.5924146", "0.59003276", "0.5871188", "0.5868008", "0.58343565", "0.58116645", "0.58031493", "0.57873464", "0.57790947", "0.5726053", "0.57023597", "0.5696592", "0.5695704", "0.56942415", "0.5667665", "0.5661655", "0.56596845", "0.56554735", "0.5643426", "0.56421787", "0.5619971", "0.5571197", "0.5566762", "0.55614907", "0.55474", "0.55474", "0.55339223", "0.5532707", "0.55219245", "0.5491302", "0.5487899", "0.5475059", "0.5474116", "0.5468109", "0.5464853", "0.5464514", "0.5463175", "0.5459359", "0.54556084", "0.54495555", "0.54449105", "0.54229605", "0.54155374", "0.5398042", "0.5379235", "0.5374467", "0.5363369", "0.5358365", "0.5355566", "0.5353586", "0.5353037", "0.53502536", "0.53491694", "0.53458697", "0.5336142", "0.53226954", "0.53213316", "0.53205764", "0.5317805", "0.5316842", "0.5310125", "0.53086257", "0.53034", "0.5303002", "0.529127", "0.5284126", "0.52755696", "0.52716696" ]
0.67453545
0
NewGenerator starts foreground goroutine which generates sequence of unsigned ints and puts them in input channel, also it returnes stop channel which need to be triggered when generator need to be stopped
func NewGenerator(input chan<- uint) chan<- bool { stop := make(chan bool) go func() { var current uint = 1 for { select { case input <- current: current++ case <-stop: close(input) return } } }() return stop }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Generator(done chan struct{}) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\n\t\tfor i, j := 0, 1; ; i, j = j, i+j {\n\t\t\tselect {\n\t\t\tcase out <- i:\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn out\n}", "func generate() chan int {\n\tch := make(chan int)\n\tgo func() {\n\t\tfor i := 2; ; i++ {\n\t\t\tch <- i\n\t\t}\n\t}()\n\treturn ch\n}", "func generate(source chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tsource <- i // Send 'i' to channel 'source'.\n\t}\n}", "func gen(num int) <-chan int {\n\t// Run Step 1\n\t// Unbufferred Channel\n\tout := make(chan int, 1) //capacity\n\t// Run Step 2\n\tgo func() {\n\t\tfor i := 1; i < num; i++ {\n\t\t\t// Chi day dc vao channel khi ma len < capacity\n\t\t\tout <- i\n\t\t\t// In ra o day co nghia la da push dc\n\t\t\tfmt.Printf(\"\\n[GEN] channel: %d\", i)\n\t\t}\n\t\tclose(out)\n\t}()\n\t// Run Step\n\treturn out\n}", "func generator(nums ...int) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor _, val := range nums {\n\t\t\tout <- val\n\t\t}\n\t}()\n\n\treturn out\n}", "func generate(nums ...int) <-chan int {\n\t// create channel with results\n\tout := make(chan int)\n\n\t// run new gouroutine\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tfmt.Printf(\"Generating value %d \\n\", n)\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\n\t// return result channel immediately (it's empty at that time)\n\treturn out\n}", "func Generate(ch chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tch <- i // send i to ch\n\t}\n}", "func generator2(nums ...int) <-chan int {\n\tres := make(chan int, buffSize)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tres <- n\n\t\t}\n\t\tclose(res)\n\t}()\n\treturn res\n}", "func generate(a int) (out chan int) {\n\tout = make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor i := 1; i <= a; i++ {\n\t\t\tout <- i\n\t\t}\n\t}()\n\n\treturn\n}", "func gen(integers ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, i := range integers {\n\t\t\tout <- i\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func generate(ch chan int){\n\tfor i:=2;;i++{\n\t\t//Send a number\n\t\tch <-i\n\t}\n}", "func Generate(ch chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tch <- i // Send 'i' to channel 'ch'.\n\t}\n}", "func Generate(ch chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tch <- i // Send 'i' to channel 'ch'.\n\t}\n}", "func gen() <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tfor j := 3; j < 13; j++ {\n\t\t\t\tout <- j\n\t\t\t}\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int)<-chan int{\n\tout := make(chan int, len(nums)) //buffered\n\tgo func(){\n\t\tfor _, n := range nums{\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen2() <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tfor j := 3; j < 13; j++ {\n\t\t\t\tout <- j\n\t\t\t}\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func NewGenerator(iterations int, concurrency int) *Generator {\n\treturn &Generator{\n\t\tconcurrency: uint32(concurrency),\n\t\tinputChan: make(chan bool),\n\t\tlock: &sync.Mutex{},\n\t\tmaxItems: uint32(iterations),\n\t\tpatterns: []Pattern{},\n\t}\n}", "func gen(done <-chan struct{}, nums ...int) <-chan int {\n\tout := make(chan int, len(nums))\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tselect {\n\t\t\tcase out <- n:\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func rand_generator() chan int {\n\tout:=make(chan int)\n\tgo func() {\n\t\tr:=rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\tfor {\n\t\t\tout <- r.Intn(100)\n\t\t}\n\t}()\n\treturn out\n}", "func GenerateSerialIntsStream(ctx context.Context) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor i := 0; ; i++ {\n\t\t\tselect {\n\t\t\tcase s <- i:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}", "func NewNumberRangeGenerator(i interface{}) (outputChan chan stream.Record, controlChan chan ControlAction) {\n\tcfg := i.(*NumberRangeGeneratorConfig)\n\toutputChan = make(chan stream.Record, c.ChanSize)\n\tcontrolChan = make(chan ControlAction, 1)\n\tif cfg.IntervalSize == 0 {\n\t\tcfg.Log.Panic(cfg.Name, \" aborting due to interval size 0 which causes infinite loop\")\n\t}\n\tgo func() {\n\t\tif cfg.PanicHandlerFn != nil {\n\t\t\tdefer cfg.PanicHandlerFn()\n\t\t}\n\t\tcfg.Log.Info(cfg.Name, \" is running\")\n\t\tif cfg.WaitCounter != nil {\n\t\t\tcfg.WaitCounter.Add()\n\t\t\tdefer cfg.WaitCounter.Done()\n\t\t}\n\t\trowCount := int64(0)\n\t\tif cfg.StepWatcher != nil { // if we have been given a StepWatcher struct that can watch our rowCount and output channel length...\n\t\t\tcfg.StepWatcher.StartWatching(&rowCount, &outputChan)\n\t\t\tdefer cfg.StepWatcher.StopWatching()\n\n\t\t}\n\t\t// Iterate over the input records.\n\t\tsendRow := func(inputRec stream.Record, fromNum *float64, toNum *float64) (rowSentOK bool) {\n\t\t\t// Emit low date and hi date record.\n\t\t\trec := stream.NewRecord()\n\t\t\tif cfg.PassInputFieldsToOutput {\n\t\t\t\tinputRec.CopyTo(rec) // ensure the output record contains the input fields.\n\t\t\t}\n\t\t\tif cfg.OutputLeftPaddedNumZeros > 0 { // if we should output strings with leading zeros...\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4LowNum, fmt.Sprintf(\"%0*.0f\", cfg.OutputLeftPaddedNumZeros, *fromNum))\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4HighNum, fmt.Sprintf(\"%0*.0f\", cfg.OutputLeftPaddedNumZeros, *toNum))\n\t\t\t} else {\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4LowNum, *fromNum)\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4HighNum, *toNum)\n\t\t\t}\n\t\t\trowSentOK = safeSend(rec, outputChan, controlChan, sendNilControlResponse) // forward the record\n\t\t\tif rowSentOK {\n\t\t\t\tcfg.Log.Debug(cfg.Name, \" generated: lowNum=\", *fromNum, \"; highNum=\", *toNum)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase controlAction := <-controlChan: // if we have been asked to shutdown...\n\t\t\tcontrolAction.ResponseChan <- nil // respond that we're done with a nil error.\n\t\t\tcfg.Log.Info(cfg.Name, \" shutdown\")\n\t\t\treturn\n\t\tcase rec, ok := <-cfg.InputChan: // for each FromDate record...\n\t\t\tif !ok { // if the input chan was closed...\n\t\t\t\tcfg.InputChan = nil // disable this case.\n\t\t\t} else {\n\t\t\t\tcfg.Log.Info(cfg.Name, \" splitting number range \", rec.GetData(cfg.InputChanFieldName4LowNum), \" to \", rec.GetData(cfg.InputChanFieldName4HighNum), \" using interval value \", cfg.IntervalSize)\n\t\t\t\t// Get the FromDate and ToDate as strings.\n\t\t\t\tfromNumStr := rec.GetDataAsStringPreserveTimeZone(cfg.Log, cfg.InputChanFieldName4LowNum)\n\t\t\t\ttoNumStr := rec.GetDataAsStringPreserveTimeZone(cfg.Log, cfg.InputChanFieldName4HighNum)\n\t\t\t\t// Convert to float(64)\n\t\t\t\tfromNum, err := strconv.ParseFloat(fromNumStr, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error parsing input field for low number: \", err)\n\t\t\t\t}\n\t\t\t\ttoNum, err := strconv.ParseFloat(toNumStr, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error parsing input field for high number: \", err)\n\t\t\t\t}\n\n\t\t\t\t// Richard 20191011 - old extract field values direct to float:\n\t\t\t\t// fromNum, err := getFloat64FromInterface(rec.GetData(cfg.InputChanFieldName4LowNum))\n\t\t\t\t// toNum, err := getFloat64FromInterface(rec.GetData(cfg.InputChanFieldName4HighNum))\n\n\t\t\t\t// Add the increment and emit rows until it is greater than the ToDate.\n\t\t\t\tfor { // while we are outputting less than ToDate...\n\t\t\t\t\tto := fromNum + cfg.IntervalSize\n\t\t\t\t\tif to > toNum { // if this increment overruns the high number...\n\t\t\t\t\t\tbreak // don't output a row!\n\t\t\t\t\t}\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromNum, &to); !rowSentOK {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // increment the row count bearing in mind someone else is reporting on its values.\n\t\t\t\t\tfromNum = to // save FromDate with increment added.\n\t\t\t\t}\n\t\t\t\tif fromNum < toNum || atomic.AddInt64(&rowCount, 0) == 0 {\n\t\t\t\t\t// if we have a final portion of number to output a row for;\n\t\t\t\t\t// or we have not output a row (i.e. when min value = max value)...\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromNum, &toNum); !rowSentOK { // emit the final gap.\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // add a row count.\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.InputChan == nil { // if we processed all data...\n\t\t\t\tbreak // end gracefully.\n\t\t\t}\n\t\t}\n\t\t// Calculate output.\n\t\tclose(outputChan)\n\t\tcfg.Log.Info(cfg.Name, \" complete\")\n\t}()\n\treturn\n}", "func generate(ch chan<- int) {\n\tfor i := 2; i < maxPrimeNum+1; i++ {\n\t\tch <- i\n\t}\n}", "func Start(workerPoolSize int) {\n\tconsumer := Consumer{\n\t\tinputChan: make(chan int, workerPoolSize*10),\n\t\tjobsChan: make(chan int, workerPoolSize),\n\t}\n\n\t//generator := Generator{callbackFunc: consumer.callbackFunc}\n\tgenerator := FiniteGenerator{consumer}\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\twg := &sync.WaitGroup{}\n\n\tgenerator.start()\n\t//go generator.start(ctx)\n\tgo consumer.startConsumer(ctx, cancelFunc)\n\n\twg.Add(workerPoolSize)\n\tfor i := 0; i < workerPoolSize; i++ {\n\t\tgo consumer.workerFunc(wg, i)\n\t}\n\n\t// chan for terminated signals\n\ttermChan := make(chan os.Signal, 1)\n\tsignal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGTSTP)\n\n\tselect {\n\tcase <-termChan:\n\t\t// if terminated\n\t\tfmt.Println(\"=========Shutdown Signal=========\")\n\t\tcancelFunc()\n\tcase <-ctx.Done():\n\t\t// if normally exited\n\t\tfmt.Println(\"=========Normally exited==========\")\n\t}\n\t// Wait until all workers gracefully interupted\n\twg.Wait()\n\n\tfmt.Println(\"==============All workers done!========\")\n}", "func generateIntegers(ctx context.Context, integers chan int) {\n\tn := 0\n\tfor {\n\t\tselect {\n\t\tcase integers <- n:\n\t\t\tn++\n\t\tcase <-ctx.Done(): // React to Context being done. ctx.Done is a channel.\n\t\t\t// We can investigate ctx.Err() to check what has actually happened and act accordingly.\n\t\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\t\tfmt.Println(\"Context timed out.\")\n\t\t\t} else if ctx.Err() == context.Canceled {\n\t\t\t\tfmt.Println(\"Context cancelled.\")\n\t\t\t}\n\n\t\t\t// Remember to close the channel to which we will no longer produce data.\n\t\t\tclose(integers) // Try commenting it out and see what happens.\n\t\t\treturn\n\t\tdefault:\n\t\t\tfmt.Println(\"Waiting for something to happen.\")\n\t\t\ttime.Sleep(100 * time.Millisecond) // Always sleep a while in the default case, otherwise you waste CPU cycles.\n\t\t}\n\t}\n}", "func Generator() <-chan int {\n\n\tout := make(chan int, 1)\n\n\tgo func() {\n\n\t\tout <- 2\n\t\tnum := 3\n\t\tcomposites := make(map[int][]int)\n\n\t\tfor {\n\t\t\tif _, ok := composites[num]; !ok {\n\t\t\t\tout <- num\n\t\t\t\tcomposites[num*num] = []int{num}\n\t\t\t} else {\n\t\t\t\tfor _, prime := range composites[num] {\n\t\t\t\t\tnext := num + prime\n\t\t\t\t\tfor next%2 == 0 {\n\t\t\t\t\t\tnext += prime\n\t\t\t\t\t}\n\t\t\t\t\tif _, ok := composites[next]; ok {\n\t\t\t\t\t\tcomposites[next] = append(composites[next], prime)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcomposites[next] = []int{prime}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdelete(composites, num)\n\t\t\t}\n\t\t\tnum += 2\n\t\t}\n\t}()\n\n\treturn out\n\n}", "func NewGenerator(s []string) chan string {\n\tc := make(chan string)\n\tl := len(s)\n\tgo func() {\n\t\tfor {\n\t\t\tif l < 1 {\n\t\t\t\tc <- \"\"\n\t\t\t}\n\t\t\tfor _, ss := range s {\n\t\t\t\tc <- ss\n\t\t\t}\n\t\t}\n\t}()\n\treturn c\n}", "func generator(msg string) <-chan string { // return read-only channel\n\tc := make(chan string)\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tc <- fmt.Sprintf(\"%s %d\", msg, i)\n\t\t\ttime.Sleep(5 * time.Millisecond)\n\t\t}\n\t}()\n\treturn c\n}", "func primeGenerator() chan int64 {\n\tc := make(chan int64)\n\n\tgo func() {\n\n\t\tc <- 2\n\n\t\tfor i := int64(3); ; i += 2 {\n\t\t\tif isPrime(i) {\n\t\t\t\tc <- i\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c\n}", "func startFibonacci(length int) <-chan int {\n\t// make buffered channel\n\tc := make(chan int, length)\n\t\n\t// run generation concurrently\n\tgo generateFibonacciSequence(c, length)\n\t\n\t// return channel\n\treturn c\n}", "func GenerateRandIntsStream(ctx context.Context) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase s <- rand.Int():\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}", "func GenerateConcurrent(bitsize int, stop chan struct{}) (<-chan *big.Int, <-chan error) {\n\tcount := runtime.GOMAXPROCS(0)\n\tints := make(chan *big.Int, count)\n\terrs := make(chan error, count)\n\n\t// In order to successfully close all goroutines below when the caller wants them to, they require\n\t// a channel that is close()d: just sending a struct{}{} would stop one but not all goroutines.\n\t// Instead of requiring the caller to close() the stop chan parameter we use our own chan for\n\t// this, so that we always stop all goroutines independent of whether the caller close()s stop\n\t// or sends a struct{}{} to it.\n\tstopped := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tclose(stopped)\n\t\tcase <-stopped: // stopped can also be closed by a goroutine that encountered an error\n\t\t}\n\t}()\n\n\t// Start safe prime generation goroutines\n\tfor i := 0; i < count; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\t// Pass stopped chan along; if closed, Generate() returns nil, nil\n\t\t\t\tx, err := Generate(bitsize, stopped)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs <- err\n\t\t\t\t\tclose(stopped)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Only send result and continue generating if we have not been told to stop\n\t\t\t\tselect {\n\t\t\t\tcase <-stopped:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tints <- x\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn ints, errs\n}", "func CreateCounter(cxt context.Context) chan int {\n\tdestination := make(chan int)\n\n\tgo func() { //! go routine\n\t\tdefer close(destination)\n\t\tcounter := 1\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <- cxt.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tdestination <- counter\n\t\t\t\tcounter ++\n\t\t\t\ttime.Sleep(1 * time.Second) //! simulasi slow\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn destination\n}", "func NewGenerator(options *Options) *Generator {\n\tvar g = &Generator{}\n\t// first we validate the flags\n\tif err := options.Validate(); err != nil {\n\t\tpanic(err)\n\t}\n\tg.options = options\n\t// we initiate the values on the generator\n\tg.init()\n\treturn g\n}", "func main() {\n\tch := make(chan int) // Create a new channel.\n\tgo Generate(ch) // Launch Generate goroutine.\n\tfor i := 0; i < 10; i++ {\n\t\tprime := <-ch\n\t\tfmt.Println(prime)\n\t\tch1 := make(chan int)\n\t\tgo Filter(ch, ch1, prime)\n\t\tch = ch1\n\t}\n}", "func generateGoroutines(done chan bool, numGoroutines int) {\n\tfor i := 0; i < numGoroutines; i++ {\n\t\tgo func(done chan bool) {\n\t\t\t<-done\n\t\t}(done)\n\t}\n}", "func genpipe2(nums ...int) <-chan int {\n\tout := make(chan int, len(nums))\n\tfor _, n := range nums {\n\t\tout <- n\n\t}\n\tclose(out)\n\treturn out\n}", "func genStreams() <-chan <-chan int {\n\tout := make(chan (<-chan int))\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor i := 1; i <= 10; i++ {\n\t\t\tstream := make(chan int, 3)\n\t\t\tstream <- i\n\t\t\tstream <- i + 1\n\t\t\tstream <- i + 2\n\t\t\tclose(stream)\n\t\t\tout <- stream\n\t\t}\n\t}()\n\treturn out\n}", "func mux_rand_generator() chan int {\n\t// create two rand generators\n\tgenerator_1:=rand_generator()\n\tgenerator_2:=rand_generator()\n\n\t// create a mux channel\n\tout:=make(chan int)\n\t// read from generator_1 and integrate to channel out\n\tgo func(){\n\t\tfor{\n\t\t\tfmt.Println(\"read from generator 1\")\n\t\t\tout<-<-generator_1\n\t\t}\n\t}()\n\t// read from generator_2 and integrate to channel out\n\tgo func(){\n\t\tfor{\n\t\t\tfmt.Println(\"read from generator 2\")\n\t\t\tout<-<-generator_2\n\t\t}\n\t}()\n\treturn out\n}", "func makeLoopingOutputDevice(loop chan<- int, output chan<- int) func(int) {\n\treturn func(n int) {\n\t\tloop <- n\n\t\toutput <- n\n\t}\n}", "func gen_points(wg *sync.WaitGroup) (<-chan point) {\n\t//counter := 0\n\tout := make(chan point)\n\tgo func(wg *sync.WaitGroup) {\n\t\tfor y := -maxY/2; y < maxY; y++ {\n\t\t\tfor x := -maxX/2; x < maxX; x++ {\n\t\t\t\txx, yy := scale_pixel(x, y)\n\t\t\t\t//fmt.Println(counter)\n\t\t\t\t//counter++\n\t\t\t\tout <- point{xf: xx, yf: yy, xi: x, yi: y}\t\n\t\t\t}//end for\n\t\t}//end for\n\t\tclose(out)\t\n\t}(wg)//end go func\n\twg.Done()\t//This thread is finished\n\treturn out\n}", "func NewGenerator() Generator {\n\treturn Generator{\n\t\tcurrentState: rand.Intn(30),\n\t}\n}", "func mainSieve() {\n\tch := make(chan int) // Create a new channel.\n\tgo Generate(ch) // Launch Generate goroutine.\n\tfor i := 0; i < 10; i++ {\n\t\tprime := <-ch\n\t\tfmt.Println(prime)\n\t\tch1 := make(chan int)\n\t\tgo Filter(ch, ch1, prime)\n\t\tch = ch1\n\t}\n}", "func NewGenerator(hash hash.Hash, f NewCipher) (*Generator, error) {\n\tg := &Generator{\n\t\tkey: make([]byte, keySize, keySize),\n\t\thash: hash,\n\t\tnewCipher: f,\n\t}\n\n\tif err := g.updateCipher(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tctr, err := counter.New(uint(g.cipher.BlockSize()) * byteSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tg.counter = ctr\n\n\tg.buffer = make([]byte, len(g.counter), len(g.counter))\n\n\treturn g, nil\n}", "func generateInputMessages(wg *sync.WaitGroup, termChan chan bool) {\n\tdefer wg.Done()\n\n\tdoTerm := false\n\tticker := time.NewTicker(100 * time.Millisecond)\n\n\tconfig := &kafka.ConfigMap{\n\t\t\"client.id\": \"generator\",\n\t\t\"bootstrap.servers\": bootstrapServers,\n\t\t\"enable.idempotence\": true,\n\t\t\"go.logs.channel.enable\": true,\n\t\t\"go.logs.channel\": logsChan,\n\t}\n\n\tproducer, err := kafka.NewProducer(config)\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\n\ttoppar := kafka.TopicPartition{Topic: &inputTopic, Partition: kafka.PartitionAny}\n\n\taddLog(fmt.Sprintf(\"Generator: producing events to topic %s\", inputTopic))\n\n\tfor !doTerm {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t// Randomize the rate of cars by skipping 20% of ticks.\n\t\t\tif rand.Intn(5) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsendIngressCarEvent(producer, toppar)\n\n\t\tcase e := <-producer.Events():\n\t\t\t// Handle delivery reports\n\t\t\tm, ok := e.(*kafka.Message)\n\t\t\tif !ok {\n\t\t\t\taddLog(fmt.Sprintf(\"Generator: Ignoring producer event %v\", e))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif m.TopicPartition.Error != nil {\n\t\t\t\taddLog(fmt.Sprintf(\"Generator: Message delivery failed: %v: ignoring\", m.TopicPartition))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase <-termChan:\n\t\t\tdoTerm = true\n\t\t}\n\n\t}\n\n\taddLog(fmt.Sprintf(\"Generator: shutting down\"))\n\tproducer.Close()\n}", "func NewGenerator(length int, elements []int) *Generator {\n\t// Calculate number of possible outcomes.\n\tt := int(math.Pow(float64(len(elements)), float64(length)))\n\n\treturn &Generator{\n\t\telements: elements,\n\t\ttotal: t,\n\t\tposition: make([]int, length),\n\t}\n}", "func SourceRandom(length int) <-chan int {\n\tout := make(chan int, 80000000)\n\tgo func() {\n\t\tfor i := 0; i < length; i++ {\n\t\t\tout <- rand.Int()\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func NewDateRangeGenerator(i interface{}) (outputChan chan stream.Record, controlChan chan ControlAction) {\n\tcfg := i.(*DateRangeGeneratorConfig)\n\toutputChan = make(chan stream.Record, c.ChanSize)\n\tcontrolChan = make(chan ControlAction, 1)\n\tif cfg.IntervalSizeSeconds == 0 {\n\t\tcfg.Log.Panic(cfg.Name, \" aborting due to interval size 0 which causes infinite loop\")\n\t}\n\tgo func() {\n\t\tif cfg.PanicHandlerFn != nil {\n\t\t\tdefer cfg.PanicHandlerFn()\n\t\t}\n\t\tcfg.Log.Info(cfg.Name, \" is running\")\n\t\tif cfg.WaitCounter != nil {\n\t\t\tcfg.WaitCounter.Add()\n\t\t\tdefer cfg.WaitCounter.Done()\n\t\t}\n\t\trowCount := int64(0)\n\t\tif cfg.StepWatcher != nil { // if we have been given a StepWatcher struct that can watch our rowCount and output channel length...\n\t\t\tcfg.StepWatcher.StartWatching(&rowCount, &outputChan)\n\t\t\tdefer cfg.StepWatcher.StopWatching()\n\n\t\t}\n\t\t// Build the ToDate.\n\t\tvar toDate time.Time\n\t\tvar err error\n\t\tif cfg.InputChanFieldName4ToDate == \"\" && cfg.ToDateRFC3339orNow == \"\" { // if both possible toDates are missing...\n\t\t\tcfg.Log.Panic(cfg.Name, \" set one of InputChanFieldName4ToDate or a literal in ToDateRFC3339orNow\")\n\t\t}\n\t\tif cfg.InputChanFieldName4ToDate == \"\" { // if we have NOT been given a field name to fetch the toDate from...\n\t\t\t// Now we expect a literal in ToDateRFC3339orNow.\n\t\t\tif regexp.MustCompile(`(?i)(now)`).MatchString(cfg.ToDateRFC3339orNow) { // if we need to fetch Now for the ToDate...\n\t\t\t\ttoDate = time.Now().Truncate(time.Second) // now to the nearest second\n\t\t\t} else { // else we need to parse the ToDate received...\n\t\t\t\ttoDate, err = time.Parse(time.RFC3339, cfg.ToDateRFC3339orNow)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif cfg.UseUTC {\n\t\t\ttoDate = toDate.UTC()\n\t\t}\n\t\t// Iterate over the input records.\n\t\tsendRow := func(inputRec stream.Record, fromDate *time.Time, toDate *time.Time) (rowSentOK bool) {\n\t\t\t// Emit low date and hi date record.\n\t\t\trec := stream.NewRecord()\n\t\t\tif cfg.PassInputFieldsToOutput {\n\t\t\t\tinputRec.CopyTo(rec) // ensure the output record contains the input fields.\n\t\t\t}\n\t\t\trec.SetData(cfg.OutputChanFieldName4LowDate, *fromDate)\n\t\t\trec.SetData(cfg.OutputChanFieldName4HiDate, *toDate)\n\t\t\trowSentOK = safeSend(rec, outputChan, controlChan, sendNilControlResponse) // forward the record\n\t\t\tif rowSentOK {\n\t\t\t\tcfg.Log.Debug(cfg.Name, \" generated: lowDate=\", *fromDate, \"; highDate=\", *toDate)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase controlAction := <-controlChan: // if we have been asked to shutdown...\n\t\t\tcontrolAction.ResponseChan <- nil // respond that we're done with a nil error.\n\t\t\tcfg.Log.Info(cfg.Name, \" shutdown\")\n\t\t\treturn\n\t\tcase rec, ok := <-cfg.InputChan: // for each FromDate record...\n\t\t\tif !ok { // if the input chan was closed...\n\t\t\t\tcfg.InputChan = nil // disable this case.\n\t\t\t} else {\n\t\t\t\t// Get the toDate.\n\t\t\t\tif cfg.InputChanFieldName4ToDate != \"\" {\n\t\t\t\t\tvar castOK bool\n\t\t\t\t\ttoDate, castOK = rec.GetData(cfg.InputChanFieldName4ToDate).(time.Time)\n\t\t\t\t\tif !castOK {\n\t\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" unexpected datatype for input field name \", cfg.InputChanFieldName4ToDate, \", expected time.Time\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcfg.Log.Info(cfg.Name, \" splitting date range \", rec.GetData(cfg.InputChanFieldName4FromDate), \" to \", toDate, \" using interval \", cfg.IntervalSizeSeconds, \" seconds\")\n\t\t\t\t// Get the FromDate.\n\t\t\t\tfromDate, err := getTimeFromInterface(rec.GetData(cfg.InputChanFieldName4FromDate))\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error - \", err)\n\t\t\t\t}\n\t\t\t\t// Add the increment and emit rows until it is greater than the ToDate.\n\t\t\t\tfor { // while we are outputting less than ToDate...\n\t\t\t\t\tto := fromDate.Add(time.Second * time.Duration(cfg.IntervalSizeSeconds))\n\t\t\t\t\tif to.After(toDate) { // if this increment overruns the max date...\n\t\t\t\t\t\tbreak // don't output a row!\n\t\t\t\t\t}\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromDate, &to); !rowSentOK {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // increment the row count bearing in mind someone else is reporting on its values.\n\t\t\t\t\tfromDate = to // save FromDate with increment added.\n\t\t\t\t}\n\t\t\t\tif fromDate.Before(toDate) || atomic.AddInt64(&rowCount, 0) == 0 {\n\t\t\t\t\t// if we have a final portion of time time to output a row for;\n\t\t\t\t\t// or we have not output a row (i.e. when min value = max value)...\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromDate, &toDate); !rowSentOK { // emit the final gap.\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // add a row count.\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.InputChan == nil { // if we processed all data...\n\t\t\t\tbreak // end gracefully.\n\t\t\t}\n\t\t}\n\t\t// Calculate output.\n\t\tclose(outputChan)\n\t\tcfg.Log.Info(cfg.Name, \" complete\")\n\t}()\n\treturn\n}", "func main() {\n // a channel is created with the make() helper function\n ch := make(chan int)\n cj := make(chan int)\n xt := make(chan bool) // a semaphore channel\n var wg sync.WaitGroup\n\n go func() {\n defer close(ch)\n for i := 0; i < 10; i++ {\n /*\n putting a value in a channel blocks execution until it is removed.\n To put a value IN, use the arrow pointing towards the channel.\n */\n ch <- i\n }\n }()\n\n /*\n fan-out pattern: single generator, multiple receivers\n using syng.WaitGroup to orchestrate processes\n */\n\n wg.Add(1)\n go func() {\n defer wg.Done()\n for v := range ch {\n // To take a value OUT, use the arrow with its back pointint to the channel.\n fmt.Println(\"foo:\", v)\n }\n }()\n\n wg.Add(1)\n go func() {\n defer wg.Done()\n for v := range ch {\n fmt.Println(\"bar:\", v)\n }\n }()\n\n /*\n fan-in pattern: multiple generator, single receiver.\n The channel must be closed after the generators are done.\n Using a semaphore channel to synchronize processes\n */\n go func() { // first generator: even numbers\n for i := 0; i < 10; i++ {\n if i%2 == 0 {\n cj <- i // blocks execution\n }\n }\n xt <- true // blocks execution\n }()\n\n go func() { // second generator: odd numbers\n for i := 0; i < 10; i++ {\n if i%2 != 0 {\n cj <- i // blocks execution\n }\n }\n xt <- true // blocks execution\n }()\n\n go func() {\n // this go function will terminate once the generators are done and close the channels, without blocking the execution of the receiver.\n <-xt // discards value from channel\n <-xt\n close(xt)\n close(cj)\n }()\n\n // receiver\n // range loops until the channel is closed (blocks execution)\n for v := range cj {\n fmt.Println(\"foobar:\", v)\n }\n\n // wait for all go routines to finish\n wg.Wait()\n}", "func startPipelineFunction(numbers chan<- int) {\n\tfor i := 1; i <= 10; i++ {\n\t\tnumbers <- i\n\t}\n\tclose(numbers)\n}", "func NewGenerator(opts Options) Generator {\n\tg := &generator{\n\t\topts: opts,\n\t\tlogger: opts.InstrumentOptions().Logger(),\n\t\tr: rand.New(opts.RandSource()),\n\t\tnumPoints: unifStats{\n\t\t\tmin: opts.MinNumPointsPerID(),\n\t\t\tmax: opts.MaxNumPointsPerID(),\n\t\t},\n\t\tidLength: normStats{\n\t\t\tmean: opts.IDLengthMean(),\n\t\t\tstddev: opts.IDLengthStddev(),\n\t\t},\n\t}\n\n\tfor i := 0; i < opts.NumIDs(); i++ {\n\t\tidLen := g.idLength.sample(g.r)\n\t\tg.ids = append(g.ids, randStringBytesMaskImprSrc(idLen, opts.RandSource()))\n\t}\n\treturn g\n}", "func NewGenerator(opts Options) Generator {\n\tg := &generator{\n\t\topts: opts,\n\t\tlogger: opts.InstrumentOptions().Logger(),\n\t\tr: rand.New(opts.RandSource()),\n\t\tnumPoints: unifStats{\n\t\t\tmin: opts.MinNumPointsPerID(),\n\t\t\tmax: opts.MaxNumPointsPerID(),\n\t\t},\n\t\tidLength: normStats{\n\t\t\tmean: opts.IDLengthMean(),\n\t\t\tstddev: opts.IDLengthStddev(),\n\t\t},\n\t}\n\n\tfor i := 0; i < opts.NumIDs(); i++ {\n\t\tidLen := g.idLength.sample(g.r)\n\t\tg.ids = append(g.ids, randStringBytesMaskImprSrc(idLen, opts.RandSource()))\n\t}\n\treturn g\n}", "func input_job(size int, query_chan chan<- []byte, scope int){\n var random_value int = -1\n for i := 1; i <= size; i++ {\n //get random num between 1 and size\n random_value = rand.Intn(scope) + 1\n //convert the int into []byte\n query_chan <- []byte(strconv.Itoa(random_value))\n fmt.Printf(\"输入随机数: %d\\n\", random_value)\n }\n //the last finished signal \"0\"\n query_chan <- []byte(strconv.Itoa(0))\n fmt.Println(\"随机数输入完成\")\n}", "func newGenerator(h hash.Hash, seed []byte) generator {\n\tif h == nil {\n\t\th = sha256.New()\n\t}\n\tb := h.Size()\n\tg := generator{\n\t\tkey: make([]byte, b),\n\t\tcounter: make([]byte, 16),\n\t\tmaxBytesPerRequest: (1 << 15) * b,\n\t\ttemp: make([]byte, b),\n\t\th: h,\n\t}\n\tif len(seed) != 0 {\n\t\t_, _ = g.Write(seed)\n\t}\n\treturn g\n}", "func TestGenerator_NewID(t *testing.T) {\n\tt.Run(\"It generates IDs of the proper length\", func(t *testing.T) {\n\t\twant := 14\n\n\t\tgen, _ := id.NewGenerator(id.WithLength(want))\n\t\tid, err := gen.NewID()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v\", err)\n\t\t}\n\n\t\tif got := len(id); got != want {\n\t\t\tt.Fatalf(\"got: %d want: %d\", got, want)\n\t\t}\n\t})\n\n\tt.Run(\"It generates only unique ids\", func(t *testing.T) {\n\t\twant := 10000\n\t\tres := make(chan string, 10000)\n\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(10000)\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(res)\n\t\t}()\n\n\t\t// 100 goroutines each generating 100 id's for a total of 10000\n\t\tgen, _ := id.NewGenerator()\n\t\tvar i int\n\t\tfor i < 100 {\n\t\t\tgo func() {\n\t\t\t\tvar n int\n\t\t\t\tfor n < 100 {\n\t\t\t\t\tid, err := gen.NewID()\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"%v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tres <- id\n\t\t\t\t\tn++\n\t\t\t\t\twg.Done()\n\t\t\t\t}\n\t\t\t}()\n\t\t\ti++\n\t\t}\n\n\t\tm := map[string]struct{}{}\n\t\tfor id := range res {\n\t\t\tm[id] = struct{}{}\n\t\t}\n\n\t\tif got := len(m); got != want {\n\t\t\tt.Fatalf(\"got: %d want: %d\", got, want)\n\t\t}\n\t})\n\n}", "func main(){\n\trand.Seed(time.Now().UnixNano())\n\n\tlog.Println(\"START\")\n\tstartTime := time.Now()\n\tc := make(chan int, 5)\n\tfor i:= 0; i < cap(c); i++ {\n\t\tgo source(c)\n\t}\n\trnd := <-c\n\tfmt.Println(time.Since(startTime))\n\tfmt.Println(\"result\", rnd)\n\ttime.Sleep(20 * time.Second)\n\tlog.Println(\"END\")\n}", "func NewGenerator(seed int64) *Generator {\n\tg := &Generator{\n\t\tseed: seed,\n\t\tnoise: opensimplex.New(seed),\n\t}\n\n\treturn g\n}", "func generator() {\r\n\r\n flagGo = true\r\n i := 10\r\n for i>1 { //Infinite loop\r\n var sample [samp_len] int //Array to save vaules\r\n sample = getValues() //Getting sample values\r\n writeFile(sample) //Writing samples to file\r\n time.Sleep(time.Second) //Waiting one second\r\n }\r\n}", "func Run(upTo int, printPrime bool) {\n\tsource := make(chan int) // Create a new channel.\n\tgo generate(source) // Launch Generate goroutine.\n\tfor i := 0; i < upTo; i++ {\n\t\tprime := <-source\n\t\tif printPrime {\n\t\t\tfmt.Println(i+1, prime)\n\t\t}\n\t\tdestination := make(chan int)\n\t\tgo filter(source, destination, prime) // launch filter in its own gorouting\n\t\tsource = destination\n\t}\n}", "func GetIntChan()<-chan int{\r\n\tnum:=5\r\n\tch :=make(chan int,num)\r\n\tfor i:=0;i<num;i++{\r\n\t\tch <- i\r\n\t}\r\n\tclose(ch)\r\n\treturn ch\r\n}", "func NewGenerator(i *GeneratorInput) (*Generator, error) {\n\tif i == nil {\n\t\ti = new(GeneratorInput)\n\t}\n\n\tg := &Generator{\n\t\tlowerLetters: i.LowerLetters,\n\t\tupperLetters: i.UpperLetters,\n\t\tdigits: i.Digits,\n\t\tsymbols: i.Symbols,\n\t}\n\n\tif g.lowerLetters == \"\" {\n\t\tg.lowerLetters = LowerLetters\n\t}\n\n\tif g.upperLetters == \"\" {\n\t\tg.upperLetters = UpperLetters\n\t}\n\n\tif g.digits == \"\" {\n\t\tg.digits = Digits\n\t}\n\n\tif g.symbols == \"\" {\n\t\tg.symbols = Symbols\n\t}\n\n\treturn g, nil\n}", "func GenerateRepeatStream(ctx context.Context, list ...interface{}) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor {\n\t\t\tfor i := 0; i < len(list); i++ {\n\t\t\t\tselect {\n\t\t\t\tcase s <- list[i]:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}", "func Generate(ctx context.Context, fn MsgGenFunc, msgPeriod time.Duration) <-chan *osc.Message {\n\tch := make(chan *osc.Message)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tclose(ch)\n\t\t\t\treturn\n\t\t\tcase <-time.After(msgPeriod):\n\t\t\t\tch <- fn()\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch\n}", "func FibonacciGen() chan int {\n\tc := make(chan int)\n\tgo func() {\n\t\tfor i, j := 0, 1; ; i, j = i+j, i {\n\t\t\tc <- i\n\t\t}\n\t}()\n\treturn c\n}", "func generator4(msg string) <-chan string {\n\tc := make(chan string)\n\tgo func() {\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tc <- fmt.Sprintf(\"calling %s %d\", msg, i)\n\t\t\ttime.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)\n\t\t}\n\t}()\n\treturn c\n}", "func gen(accounts ...string) <-chan string {\n\tout := make(chan string)\n\tgo func() {\n\t\tfor _, account := range accounts {\n\t\t\tout <- account\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func NewGenerator() GeneratorFunc {\n\t// Generate security rules\n\t// * cannot be a repeated digit\n\t// * cannot be a suite of following digits (ascending and descending)\n\t// * TODO cannot be in the restricted codes\n\t// So basicaly i randomly pick one number as the first digit and next digit\n\t// cannot be the same, the previous or the next\n\tfilter := compose(\n\t\tnocurrent,\n\t\tnoprevious,\n\t\tnonext,\n\t)\n\treturn func() string {\n\t\tvar charset = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}\n\t\tpin := make([]int, 4)\n\t\tvar current = charset[random.Intn(len(charset))]\n\t\tpin[0] = current\n\n\t\tfor i := 0; i < 3; i++ {\n\t\t\t// TODO i do not like that, need another way to do that\n\t\t\tcharset = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}\n\t\t\tcurrent = in(filter(charset, current))\n\t\t\tpin[i+1] = current\n\t\t}\n\t\treturn strings.Trim(strings.Join(strings.Split(fmt.Sprint(pin), \" \"), \"\"), \"[]\")\n\t}\n}", "func EvenGenerator(done chan struct{}) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\n\t\tfor i, j := 2, 8; ; i, j = j, 4*j+i {\n\t\t\tselect {\n\t\t\tcase out <- i:\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn out\n}", "func GenerateServiceRequest() <-chan Sig { // returns a receive only channel of string\n\tchannel := make(chan Sig)\n\n\tgo func() {\n\t\ttime.Sleep(2 * time.Second) //wait a while before starting\n\t\tIdentity := 0\n\t\t//msisdn := rand.Int()\n\n\t\t// Send CCR every x*rand seconds\n\t\tfor {\n\t\t\t// create random fake MSISDN\n\t\t\tmsisdn := random(1000000, 4999999)\n\t\t\tmsisdn = msisdn + 46702000000\n\n\t\t\tIdentity = Identity + 1\n\n\t\t\tinfoElem := Sig{\n\t\t\t\tCode: \"CCR\",\n\t\t\t\tmsisdn: msisdn,\n\t\t\t\tIdentity: Identity,\n\t\t\t}\n\n\t\t\tsleeptime := time.Second * time.Duration(rand.Intn(10))\n\t\t\tlog.Printf(\"Time until next CCR: %s\", sleeptime)\n\t\t\ttime.Sleep(sleeptime)\n\t\t\tchannel <- infoElem\n\t\t}\n\t}()\n\treturn channel\n}", "func (r *Runner) generate(output chan Result, wg *sizedwaitgroup.SizedWaitGroup) {\n\tif r.options.TargetUrl != \"\" {\n\t\tlog.Info(fmt.Sprintf(\"single target: %s\", r.options.TargetUrl))\n\t\twg.Add()\n\t\tgo r.process(output, r.options.TargetUrl, wg)\n\t} else {\n\t\turls, err := ReadFile(r.options.UrlFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Cann't read url file\")\n\t\t} else {\n\t\t\tlog.Info(fmt.Sprintf(\"Read %d's url totaly\", len(urls)))\n\t\t\tfor _, u := range urls {\n\t\t\t\twg.Add()\n\t\t\t\tgo r.process(output, u, wg)\n\t\t\t}\n\t\t}\n\t}\n}", "func createSignalGenerator() Unit {\n\n\t/*\n\t * Create effects unit.\n\t */\n\tu := signalGenerator{\n\t\tunitStruct: unitStruct{\n\t\t\tunitType: UNIT_SIGNALGENERATOR,\n\t\t\tparams: []Parameter{\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"input_amplitude\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"%\",\n\t\t\t\t\tMinimum: 0,\n\t\t\t\t\tMaximum: 100,\n\t\t\t\t\tNumericValue: 100,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"input_gain\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"dB\",\n\t\t\t\t\tMinimum: -60,\n\t\t\t\t\tMaximum: 0,\n\t\t\t\t\tNumericValue: 0,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_type\",\n\t\t\t\t\tType: PARAMETER_TYPE_DISCRETE,\n\t\t\t\t\tPhysicalUnit: \"\",\n\t\t\t\t\tMinimum: -1,\n\t\t\t\t\tMaximum: -1,\n\t\t\t\t\tNumericValue: -1,\n\t\t\t\t\tDiscreteValueIndex: 0,\n\t\t\t\t\tDiscreteValues: []string{\n\t\t\t\t\t\t\"sine\",\n\t\t\t\t\t\t\"triangle\",\n\t\t\t\t\t\t\"square\",\n\t\t\t\t\t\t\"sawtooth\",\n\t\t\t\t\t\t\"noise\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_frequency\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"Hz\",\n\t\t\t\t\tMinimum: 1,\n\t\t\t\t\tMaximum: 20000,\n\t\t\t\t\tNumericValue: 440,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_amplitude\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"%\",\n\t\t\t\t\tMinimum: 0,\n\t\t\t\t\tMaximum: 100,\n\t\t\t\t\tNumericValue: 100,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_gain\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"dB\",\n\t\t\t\t\tMinimum: -60,\n\t\t\t\t\tMaximum: 0,\n\t\t\t\t\tNumericValue: 0,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &u\n}", "func NewGeneratorBTC() Generator { return generatorBTC{} }", "func (p *password) Gen() <-chan []byte {\n\tch := make(chan []byte, p.options.Generate)\n\tgo p.gen(ch)\n\treturn ch\n}", "func main() {\n\tgo produce()\n\tgo consume()\n\t<-done\n}", "func SourceData(data ...int) <-chan int {\n\tfmt.Println(\"num:\", len(data))\n\tch := make(chan int, 80000000)\n\tgo func() {\n\t\tfor _, v := range data {\n\t\t\tch <- v\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func NewClockGenerator(clock Clicker, d time.Duration, bufferSize ...uint16) (clockGenerator *ClockGenerator) {\n\tvar (\n\t\tnextAt = clock.Now().Truncate(d)\n\t)\n\tclockGenerator = &ClockGenerator{active: true}\n\tif len(bufferSize) < 1 {\n\t\tclockGenerator.ch = make(chan time.Time)\n\t} else {\n\t\tclockGenerator.ch = make(chan time.Time, bufferSize[0])\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tvar now = clock.Now()\n\t\t\tif nextAt.After(now) {\n\t\t\t\tvar sleepDuration = nextAt.Sub(now)\n\t\t\t\ttime.Sleep(sleepDuration)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar lastClockedAt = nextAt\n\t\t\tnextAt = nextAt.Add(d)\n\t\t\tif !clockGenerator.active {\n\t\t\t\tclose(clockGenerator.ch)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tclockGenerator.ch <- lastClockedAt\n\t\t}\n\t}()\n\treturn\n}", "func NewGenerator() *Generator {\n\treturn &Generator{\n\t\ttpls: []string{},\n\t\tfuncs: map[string]interface{}{},\n\t\tdata: map[string]interface{}{},\n\t}\n}", "func NewGenerator(opts ...Option) *Generator {\n\tg := &Generator{}\n\n\tfor _, opt := range opts {\n\t\topt.apply(g)\n\t}\n\n\t// Default time source\n\tif g.clock == nil {\n\t\tg.clock = &systemClock{}\n\t}\n\n\t// Default entropy source\n\tif g.entropy == nil {\n\t\tg.entropy = ulid.Monotonic(rand.New(rand.NewSource(g.clock.Now().UnixNano())), 0)\n\t}\n\n\treturn g\n}", "func produceRandomNumber(c chan int) {\n\tfmt.Printf(\"About to send a random number to the channel.\\n\\n\")\n\t// Let's send something to the channel. The arrow operator shows the direction of data flow.\n\t// The call blocks until the receiver part is ready in case of non-buffered channels.\n\t// For buffered channels, they block only when buffer is full.\n\tc <- rand.Int() % 100 // Side note: every time we run the program we receive the same numbers - why?\n\tfmt.Println(\"! Successfully sent the random number to the channel.\")\n}", "func MockGen(c *gin.Context) {\n\tlog.Info(\"Mock Generator started\")\n\tvar id = \"3b-6cfc0958d2fb\"\n\tdevice := c.Param(\"device\")\n\tchannel := c.Param(\"channel\")\n\ttopic := \"/\" + device + \"/\" + channel\n\tlog.Info(\"Sending messages to topic: \", topic)\n\tticker := time.NewTicker(1 * time.Second)\n\tvar datum = make(map[string]interface{}, 2)\n\t//var data = make(map[string]interface{}, 1)\n\tvar temps = make(map[string]interface{}, 3)\n\n\tclientGone := c.Writer.CloseNotify()\n\tbuffer := make(chan string, 100)\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tdatum[\"timestamp\"] = time.Now().UnixNano() / int64(time.Millisecond)\n\t\t\ttemps[\"id\"] = id\n\t\t\ttemps[\"f\"] = rand.Intn(300-50) + 50\n\t\t\ttemps[\"c\"] = rand.Intn(150-20) + 20\n\t\t\tdatum[\"data\"] = temps\n\t\t\tjsondata, err := json.Marshal(datum)\n\t\t\tlog.Info(\"Generated message\", string(jsondata))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase buffer <- string(jsondata):\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\tc.Stream(func(w io.Writer) bool {\n\t\tselect {\n\t\tcase <-clientGone:\n\t\t\tlog.Info(\"Stopping generator\")\n\t\t\tticker.Stop()\n\t\t\treturn true\n\t\tcase message := <-buffer:\n\t\t\tc.JSON(200, message)\n\t\t\tc.String(200, \"\\n\")\n\t\t\t//c.SSEvent(\"\", message)\n\t\t\treturn true\n\t\t}\n\t})\n}", "func GenerateCSeq() int {\n\treturn rand.Int() % 50000\n}", "func epochRangeGen(epochs string) chan uint64 {\n\tc := make(chan uint64)\n\n\t// string parser for extracting epoch range\n\tgo func(input string) {\n\t\tfor _, part := range strings.Split(input, \",\") {\n\t\t\tif i := strings.Index(part[1:], \"-\"); i == -1 {\n\t\t\t\tn, err := strconv.ParseUint(part, 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tc <- n\n\t\t\t} else {\n\t\t\t\tn1, err := strconv.ParseUint(part[:i+1], 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tn2, err := strconv.ParseUint(part[i+2:], 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif n2 < n1 {\n\t\t\t\t\tfmt.Printf(\"Invalid range %d-%d\\n\", n1, n2)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfor ii := n1; ii <= n2; ii++ {\n\t\t\t\t\tc <- ii\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(c)\n\t}(epochs)\n\treturn c\n}", "func New(workerID uint64) (Snowflake, error) {\n\tif workerID < 0 || workerID > maxWorkerID {\n\t\treturn nil, ErrInvalidWorkerID\n\t}\n\n\tsf := make(chan int64)\n\tgo generator(workerID, sf)\n\treturn sf, nil\n}", "func New(packetChannel chan string) *PacketGenerator {\n\tpacketGenerator := new(PacketGenerator)\n\treturn packetGenerator\n}", "func New(w io.Writer) *Generator {\n return &Generator{w}\n}", "func UnknownThenGoldGenerator(player *Player, actionPipe chan Action, needMovesSignal chan bool) {\n moveGeneratorTemplate(player, actionPipe, needMovesSignal, unknownThenGoldGenerator)\n}", "func CreateEvenGenerator() func() uint {\n\ti := uint(0)\n\treturn func() (retVal uint) {\n\t\tretVal = i\n\t\ti += 2\n\t\treturn\n\t}\n}", "func (gs *GenServer) ProcessLoop(pcs procChannels, pd Process, args ...interface{}) {\n\tstate := pd.(GenServerInt).Init(args...)\n\tgs.state = state\n\tpcs.init <- true\n\tvar chstop chan int\n\tchstop = make(chan int)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Printf(\"GenServerInt recovered: %#v\", r)\n\t\t}\n\t}()\n\tfor {\n\t\tvar message etf.Term\n\t\tvar fromPid etf.Pid\n\t\tselect {\n\t\tcase reason := <-chstop:\n\t\t\tpd.(GenServerInt).Terminate(reason, gs.state)\n\t\tcase msg := <-pcs.in:\n\t\t\tmessage = msg\n\t\tcase msgFrom := <-pcs.inFrom:\n\t\t\tmessage = msgFrom[1]\n\t\t\tfromPid = msgFrom[0].(etf.Pid)\n\n\t\t}\n\t\tlib.Log(\"[%#v]. Message from %#v\\n\", gs.Self, fromPid)\n\t\tswitch m := message.(type) {\n\t\tcase etf.Tuple:\n\t\t\tswitch mtag := m[0].(type) {\n\t\t\tcase etf.Atom:\n\t\t\t\tgs.lock.Lock()\n\t\t\t\tswitch mtag {\n\t\t\t\tcase etf.Atom(\"$gen_call\"):\n\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tfromTuple := m[1].(etf.Tuple)\n\t\t\t\t\t\tcode, reply, state1 := pd.(GenServerInt).HandleCall(&fromTuple, &m[2], gs.state)\n\n\t\t\t\t\t\tgs.state = state1\n\t\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif reply != nil && code == 1 {\n\t\t\t\t\t\t\tpid := fromTuple[0].(etf.Pid)\n\t\t\t\t\t\t\tref := fromTuple[1]\n\t\t\t\t\t\t\trep := etf.Term(etf.Tuple{ref, *reply})\n\t\t\t\t\t\t\tgs.Send(pid, &rep)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\tcase etf.Atom(\"$gen_cast\"):\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tcode, state1 := pd.(GenServerInt).HandleCast(&m[1], gs.state)\n\t\t\t\t\t\tgs.state = state1\n\t\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\tdefault:\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tcode, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)\n\t\t\t\t\t\tgs.state = state1\n\t\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\tcase etf.Ref:\n\t\t\t\tlib.Log(\"got reply: %#v\\n%#v\", mtag, message)\n\t\t\t\tgs.chreply <- &m\n\t\t\tdefault:\n\t\t\t\tlib.Log(\"mtag: %#v\", mtag)\n\t\t\t\tgs.lock.Lock()\n\t\t\t\tgo func() {\n\t\t\t\t\tcode, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)\n\t\t\t\t\tgs.state = state1\n\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\tdefault:\n\t\t\tlib.Log(\"m: %#v\", m)\n\t\t\tgs.lock.Lock()\n\t\t\tgo func() {\n\t\t\t\tcode, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)\n\t\t\t\tgs.state = state1\n\t\t\t\tgs.lock.Unlock()\n\t\t\t\tif code < 0 {\n\t\t\t\t\tchstop <- code\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func asChan(vs ...int) <-chan int {\n\tc := make(chan int)\n\tgo func() {\n\t\tdefer close(c)\n\t\tfor _, v := range vs {\n\t\t\tc <- v\n\t\t\ttime.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)\n\t\t}\n\t\t//close(c)\n\t}()\n\treturn c\n}", "func fibonacciGen(n int, c chan int) {\n\tx, y := 0, 1\n\tfor i := 0; i < n; i++ {\n\t\tc <- x\n\t\tx, y = y, x+y\n\t}\n\tclose(c)\n}", "func fibonacciGen(n int, c chan int) {\n\tx, y := 0, 1\n\tfor i := 0; i < n; i++ {\n\t\tc <- x\n\t\tx, y = y, x+y\n\t}\n\tclose(c)\n}", "func GoldThenUnknownGenerator(player *Player, actionPipe chan Action, needMovesSignal chan bool) {\n moveGeneratorTemplate(player, actionPipe, needMovesSignal, goldThenUnknownGenerator)\n}", "func getNumberWithReturnChan() <-chan int {\n\t// create the channel\n\tc := make(chan int)\n\t// 3.0\n\tgo func() {\n\t\t// push the result into the channel\n\t\tc <- 5\n\t}()\n\n\t// 3.1\n\t// go func() {\n\t// \tfor i := 0; i < 3; i++ {\n\t// \t\tc <- i\n\t// \t}\n\t// \tclose(c)\n\t// }()\n\t// immediately return the channel\n\treturn c\n}", "func newLoop() *loop {\n\treturn &loop{\n\t\tinputCh: make(chan event, inputChSize),\n\t\thandleCb: dummyHandleCb,\n\t\tredrawCb: dummyRedrawCb,\n\n\t\tredrawCh: make(chan struct{}, 1),\n\t\tredrawFull: false,\n\t\tredrawMutex: new(sync.Mutex),\n\n\t\treturnCh: make(chan loopReturn, 1),\n\t}\n}", "func main() {\n c := make(chan int)\n // use go routine\n go func() {\n for i := 0; i < 10; i++ {\n c <- i\n }\n close(c)\n }()\n\n for n := range c {\n fmt.Println(n)\n }\n}", "func NewGenerator(h hash.Hash, seed []byte) io.ReadWriter {\n\tg := newGenerator(h, seed)\n\treturn &g\n}", "func newClockChan(d time.Duration) <-chan chan struct{} {\n\tch := make(chan chan struct{}, 1)\n\tgo func() { time.Sleep(d); ch <- make(chan struct{}) }()\n\treturn ch\n}", "func (a API) Generate(cmd *None) (e error) {\n\tRPCHandlers[\"generate\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}" ]
[ "0.6641966", "0.6627279", "0.65606636", "0.6544673", "0.6534481", "0.646908", "0.63416237", "0.6290985", "0.62610847", "0.6223949", "0.6180005", "0.6167735", "0.6167735", "0.616628", "0.6149821", "0.61059165", "0.61059165", "0.61059165", "0.6101464", "0.6064095", "0.6044233", "0.6017054", "0.5865704", "0.58400893", "0.5803619", "0.578187", "0.5760828", "0.5754095", "0.5753378", "0.56718373", "0.5655918", "0.5632857", "0.5616397", "0.56016785", "0.5582741", "0.5567577", "0.55657136", "0.5511396", "0.54784095", "0.5455717", "0.5391998", "0.5332831", "0.53315556", "0.5327547", "0.5302465", "0.52739376", "0.52546537", "0.52531594", "0.5238203", "0.5227085", "0.52116483", "0.5189294", "0.5186698", "0.5186698", "0.51843405", "0.5133452", "0.5095635", "0.50842565", "0.5074171", "0.5070068", "0.506947", "0.50529903", "0.5034915", "0.5028603", "0.5027213", "0.5018824", "0.5016938", "0.50026834", "0.49923384", "0.49823302", "0.4977874", "0.49776137", "0.49761683", "0.49640006", "0.49590576", "0.4952177", "0.49441683", "0.49360406", "0.4916259", "0.49099314", "0.4907861", "0.49014357", "0.48985213", "0.4897294", "0.4895451", "0.48948127", "0.48747456", "0.48690814", "0.48582467", "0.485305", "0.4849425", "0.4839249", "0.4839249", "0.48257735", "0.48240194", "0.48218974", "0.48006067", "0.48002237", "0.4784323", "0.47837338" ]
0.7869876
0
Asset loads and returns the asset for the given name. It returns an error if the asset could not be found or could not be loaded.
func Asset(name string) ([]byte, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) } return a.bytes, nil } return nil, fmt.Errorf("Asset %s not found", name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Asset(name string) ([]byte, error) {\n cannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n if f, ok := _bindata[cannonicalName]; ok {\n a, err := f()\n if err != nil {\n return nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n }\n return a.bytes, nil\n }\n return nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func (model *GrogModel) GetAsset(name string) (*Asset, error) {\n\tvar foundAsset *Asset\n\tvar mimeType string\n\tvar content = make([]byte, 0)\n\tvar serveExternal int64\n\tvar rendered int64\n\tvar added int64\n\tvar modified int64\n\tvar err error\n\n\trow := model.db.DB.QueryRow(`select mimeType, content, serve_external, rendered,\n\t\tadded, modified from Assets where name = ?`, name)\n\tif row.Scan(&mimeType, &content, &serveExternal, &rendered, &added, &modified) != sql.ErrNoRows {\n\t\tfoundAsset = model.NewAsset(name, mimeType)\n\t\tfoundAsset.Content = content\n\t\tif serveExternal == 1 {\n\t\t\tfoundAsset.ServeExternal = true\n\t\t} else {\n\t\t\tfoundAsset.ServeExternal = false\n\t\t}\n\n\t\tif rendered == 1 {\n\t\t\tfoundAsset.Rendered = true\n\t\t} else {\n\t\t\tfoundAsset.Rendered = false\n\t\t}\n\n\t\tfoundAsset.Added.Set(time.Unix(added, 0))\n\t\tfoundAsset.Modified.Set(time.Unix(modified, 0))\n\t} else {\n\t\terr = fmt.Errorf(\"No asset with name %s\", name)\n\t}\n\n\treturn foundAsset, err\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}", "func Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}" ]
[ "0.7347324", "0.7267579", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763", "0.70949763" ]
0.0
-1
MustAsset is like Asset but panics when Asset would return an error. It simplifies safe initialization of global variables.
func MustAsset(name string) []byte { a, err := Asset(name) if err != nil { panic("asset: Asset(" + name + "): " + err.Error()) } return a }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}" ]
[ "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818" ]
0.0
-1
AssetInfo loads and returns the asset info for the given name. It returns an error if the asset could not be found or could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) } return a.info, nil } return nil, fmt.Errorf("AssetInfo %s not found", name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}", "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}" ]
[ "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746" ]
0.0
-1
AssetNames returns the names of the assets.
func AssetNames() []string { names := make([]string, 0, len(_bindata)) for name := range _bindata { names = append(names, name) } return names }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[]
[]
0.0
-1
RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { return err } info, err := AssetInfo(name) if err != nil { return err } err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) if err != nil { return err } err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) if err != nil { return err } err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) if err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}" ]
[ "0.80596304", "0.80596304", "0.80596304", "0.80596304" ]
0.0
-1
RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File if err != nil { return RestoreAsset(dir, name) } // Dir for _, child := range children { err = RestoreAssets(dir, filepath.Join(name, child)) if err != nil { return err } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n if err != nil { // File\n return RestoreAsset(dir, name)\n } else { // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n if err != nil {\n return err\n }\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, filepath.Join(name, child))\n if err != nil {\n return err\n }\n }\n return nil\n}", "func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}" ]
[ "0.798857", "0.798857", "0.798857", "0.798857", "0.798857", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7973515", "0.7839553", "0.7839553", "0.7839553", "0.7839553" ]
0.0
-1
Copyright 2018 Information Trust Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import "syscall"
func (s *service) Start() error { log.Debug("Starting Proc") s.pipe, _ = s.Proc.StdoutPipe() e := s.Proc.Start() if e != nil { return e } s.isRunning = true log.Debug("Waiting") e = s.Proc.Wait() return e }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func SYSCALL() { ctx.SYSCALL() }", "func PrintSyscall(sc int32) string {\n\tvar syscalls = map[int32]string{\n\t\t0: \"read\",\n\t\t1: \"write\",\n\t\t2: \"open\",\n\t\t3: \"close\",\n\t\t4: \"newstat\",\n\t\t5: \"fstat\",\n\t\t6: \"newlstat\",\n\t\t7: \"poll\",\n\t\t8: \"lseek\",\n\t\t9: \"mmap\",\n\t\t10: \"mprotect\",\n\t\t11: \"munmap\",\n\t\t12: \"brk\",\n\t\t13: \"rt_sigaction\",\n\t\t14: \"rt_sigprocmask\",\n\t\t15: \"rt_sigreturn\",\n\t\t16: \"ioctl\",\n\t\t17: \"pread64\",\n\t\t18: \"pwrite64\",\n\t\t19: \"readv\",\n\t\t20: \"writev\",\n\t\t21: \"access\",\n\t\t22: \"pipe\",\n\t\t23: \"select\",\n\t\t24: \"sched_yield\",\n\t\t25: \"mremap\",\n\t\t26: \"msync\",\n\t\t27: \"mincore\",\n\t\t28: \"madvise\",\n\t\t29: \"shmget\",\n\t\t30: \"shmat\",\n\t\t31: \"shmctl\",\n\t\t32: \"dup\",\n\t\t33: \"dup2\",\n\t\t34: \"pause\",\n\t\t35: \"nanosleep\",\n\t\t36: \"getitimer\",\n\t\t37: \"alarm\",\n\t\t38: \"setitimer\",\n\t\t39: \"getpid\",\n\t\t40: \"sendfile\",\n\t\t41: \"socket\",\n\t\t42: \"connect\",\n\t\t43: \"accept\",\n\t\t44: \"sendto\",\n\t\t45: \"recvfrom\",\n\t\t46: \"sendmsg\",\n\t\t47: \"recvmsg\",\n\t\t48: \"shutdown\",\n\t\t49: \"bind\",\n\t\t50: \"listen\",\n\t\t51: \"getsockname\",\n\t\t52: \"getpeername\",\n\t\t53: \"socketpair\",\n\t\t54: \"setsockopt\",\n\t\t55: \"getsockopt\",\n\t\t56: \"clone\",\n\t\t57: \"fork\",\n\t\t58: \"vfork\",\n\t\t59: \"execve\",\n\t\t60: \"exit\",\n\t\t61: \"wait4\",\n\t\t62: \"kill\",\n\t\t63: \"uname\",\n\t\t64: \"semget\",\n\t\t65: \"semop\",\n\t\t66: \"semctl\",\n\t\t67: \"shmdt\",\n\t\t68: \"msgget\",\n\t\t69: \"msgsnd\",\n\t\t70: \"msgrcv\",\n\t\t71: \"msgctl\",\n\t\t72: \"fcntl\",\n\t\t73: \"flock\",\n\t\t74: \"fsync\",\n\t\t75: \"fdatasync\",\n\t\t76: \"truncate\",\n\t\t77: \"ftruncate\",\n\t\t78: \"getdents\",\n\t\t79: \"getcwd\",\n\t\t80: \"chdir\",\n\t\t81: \"fchdir\",\n\t\t82: \"rename\",\n\t\t83: \"mkdir\",\n\t\t84: \"rmdir\",\n\t\t85: \"creat\",\n\t\t86: \"link\",\n\t\t87: \"unlink\",\n\t\t88: \"symlink\",\n\t\t89: \"readlink\",\n\t\t90: \"chmod\",\n\t\t91: \"fchmod\",\n\t\t92: \"chown\",\n\t\t93: \"fchown\",\n\t\t94: \"lchown\",\n\t\t95: \"umask\",\n\t\t96: \"gettimeofday\",\n\t\t97: \"getrlimit\",\n\t\t98: \"getrusage\",\n\t\t99: \"sysinfo\",\n\t\t100: \"times\",\n\t\t101: \"ptrace\",\n\t\t102: \"getuid\",\n\t\t103: \"syslog\",\n\t\t104: \"getgid\",\n\t\t105: \"setuid\",\n\t\t106: \"setgid\",\n\t\t107: \"geteuid\",\n\t\t108: \"getegid\",\n\t\t109: \"setpgid\",\n\t\t110: \"getppid\",\n\t\t111: \"getpgrp\",\n\t\t112: \"setsid\",\n\t\t113: \"setreuid\",\n\t\t114: \"setregid\",\n\t\t115: \"getgroups\",\n\t\t116: \"setgroups\",\n\t\t117: \"setresuid\",\n\t\t118: \"getresuid\",\n\t\t119: \"setresgid\",\n\t\t120: \"getresgid\",\n\t\t121: \"getpgid\",\n\t\t122: \"setfsuid\",\n\t\t123: \"setfsgid\",\n\t\t124: \"getsid\",\n\t\t125: \"capget\",\n\t\t126: \"capset\",\n\t\t127: \"rt_sigpending\",\n\t\t128: \"rt_sigtimedwait\",\n\t\t129: \"rt_sigqueueinfo\",\n\t\t130: \"rt_sigsuspend\",\n\t\t131: \"sigaltstack\",\n\t\t132: \"utime\",\n\t\t133: \"mknod\",\n\t\t134: \"uselib\",\n\t\t135: \"personality\",\n\t\t136: \"ustat\",\n\t\t137: \"statfs\",\n\t\t138: \"fstatfs\",\n\t\t139: \"sysfs\",\n\t\t140: \"getpriority\",\n\t\t141: \"setpriority\",\n\t\t142: \"sched_setparam\",\n\t\t143: \"sched_getparam\",\n\t\t144: \"sched_setscheduler\",\n\t\t145: \"sched_getscheduler\",\n\t\t146: \"sched_get_priority_max\",\n\t\t147: \"sched_get_priority_min\",\n\t\t148: \"sched_rr_get_interval\",\n\t\t149: \"mlock\",\n\t\t150: \"munlock\",\n\t\t151: \"mlockall\",\n\t\t152: \"munlockall\",\n\t\t153: \"vhangup\",\n\t\t154: \"modify_ldt\",\n\t\t155: \"pivot_root\",\n\t\t156: \"sysctl\",\n\t\t157: \"prctl\",\n\t\t158: \"arch_prctl\",\n\t\t159: \"adjtimex\",\n\t\t160: \"setrlimit\",\n\t\t161: \"chroot\",\n\t\t162: \"sync\",\n\t\t163: \"acct\",\n\t\t164: \"settimeofday\",\n\t\t165: \"mount\",\n\t\t166: \"umount\",\n\t\t167: \"swapon\",\n\t\t168: \"swapoff\",\n\t\t169: \"reboot\",\n\t\t170: \"sethostname\",\n\t\t171: \"setdomainname\",\n\t\t172: \"iopl\",\n\t\t173: \"ioperm\",\n\t\t174: \"create_module\",\n\t\t175: \"init_module\",\n\t\t176: \"delete_module\",\n\t\t177: \"get_kernel_syms\",\n\t\t178: \"query_module\",\n\t\t179: \"quotactl\",\n\t\t180: \"nfsservctl\",\n\t\t181: \"getpmsg\",\n\t\t182: \"putpmsg\",\n\t\t183: \"afs\",\n\t\t184: \"tuxcall\",\n\t\t185: \"security\",\n\t\t186: \"gettid\",\n\t\t187: \"readahead\",\n\t\t188: \"setxattr\",\n\t\t189: \"lsetxattr\",\n\t\t190: \"fsetxattr\",\n\t\t191: \"getxattr\",\n\t\t192: \"lgetxattr\",\n\t\t193: \"fgetxattr\",\n\t\t194: \"listxattr\",\n\t\t195: \"llistxattr\",\n\t\t196: \"flistxattr\",\n\t\t197: \"removexattr\",\n\t\t198: \"lremovexattr\",\n\t\t199: \"fremovexattr\",\n\t\t200: \"tkill\",\n\t\t201: \"time\",\n\t\t202: \"futex\",\n\t\t203: \"sched_setaffinity\",\n\t\t204: \"sched_getaffinity\",\n\t\t205: \"set_thread_area\",\n\t\t206: \"io_setup\",\n\t\t207: \"io_destroy\",\n\t\t208: \"io_getevents\",\n\t\t209: \"io_submit\",\n\t\t210: \"io_cancel\",\n\t\t211: \"get_thread_area\",\n\t\t212: \"lookup_dcookie\",\n\t\t213: \"epoll_create\",\n\t\t214: \"epoll_ctl_old\",\n\t\t215: \"epoll_wait_old\",\n\t\t216: \"remap_file_pages\",\n\t\t217: \"getdents64\",\n\t\t218: \"set_tid_address\",\n\t\t219: \"restart_syscall\",\n\t\t220: \"semtimedop\",\n\t\t221: \"fadvise64\",\n\t\t222: \"timer_create\",\n\t\t223: \"timer_settime\",\n\t\t224: \"timer_gettime\",\n\t\t225: \"timer_getoverrun\",\n\t\t226: \"timer_delete\",\n\t\t227: \"clock_settime\",\n\t\t228: \"clock_gettime\",\n\t\t229: \"clock_getres\",\n\t\t230: \"clock_nanosleep\",\n\t\t231: \"exit_group\",\n\t\t232: \"epoll_wait\",\n\t\t233: \"epoll_ctl\",\n\t\t234: \"tgkill\",\n\t\t235: \"utimes\",\n\t\t236: \"vserver\",\n\t\t237: \"mbind\",\n\t\t238: \"set_mempolicy\",\n\t\t239: \"get_mempolicy\",\n\t\t240: \"mq_open\",\n\t\t241: \"mq_unlink\",\n\t\t242: \"mq_timedsend\",\n\t\t243: \"mq_timedreceive\",\n\t\t244: \"mq_notify\",\n\t\t245: \"mq_getsetattr\",\n\t\t246: \"kexec_load\",\n\t\t247: \"waitid\",\n\t\t248: \"add_key\",\n\t\t249: \"request_key\",\n\t\t250: \"keyctl\",\n\t\t251: \"ioprio_set\",\n\t\t252: \"ioprio_get\",\n\t\t253: \"inotify_init\",\n\t\t254: \"inotify_add_watch\",\n\t\t255: \"inotify_rm_watch\",\n\t\t256: \"migrate_pages\",\n\t\t257: \"openat\",\n\t\t258: \"mkdirat\",\n\t\t259: \"mknodat\",\n\t\t260: \"fchownat\",\n\t\t261: \"futimesat\",\n\t\t262: \"newfstatat\",\n\t\t263: \"unlinkat\",\n\t\t264: \"renameat\",\n\t\t265: \"linkat\",\n\t\t266: \"symlinkat\",\n\t\t267: \"readlinkat\",\n\t\t268: \"fchmodat\",\n\t\t269: \"faccessat\",\n\t\t270: \"pselect6\",\n\t\t271: \"ppoll\",\n\t\t272: \"unshare\",\n\t\t273: \"set_robust_list\",\n\t\t274: \"get_robust_list\",\n\t\t275: \"splice\",\n\t\t276: \"tee\",\n\t\t277: \"sync_file_range\",\n\t\t278: \"vmsplice\",\n\t\t279: \"move_pages\",\n\t\t280: \"utimensat\",\n\t\t281: \"epoll_pwait\",\n\t\t282: \"signalfd\",\n\t\t283: \"timerfd_create\",\n\t\t284: \"eventfd\",\n\t\t285: \"fallocate\",\n\t\t286: \"timerfd_settime\",\n\t\t287: \"timerfd_gettime\",\n\t\t288: \"accept4\",\n\t\t289: \"signalfd4\",\n\t\t290: \"eventfd2\",\n\t\t291: \"epoll_create1\",\n\t\t292: \"dup3\",\n\t\t293: \"pipe2\",\n\t\t294: \"ionotify_init1\",\n\t\t295: \"preadv\",\n\t\t296: \"pwritev\",\n\t\t297: \"rt_tgsigqueueinfo\",\n\t\t298: \"perf_event_open\",\n\t\t299: \"recvmmsg\",\n\t\t300: \"fanotify_init\",\n\t\t301: \"fanotify_mark\",\n\t\t302: \"prlimit64\",\n\t\t303: \"name_tohandle_at\",\n\t\t304: \"open_by_handle_at\",\n\t\t305: \"clock_adjtime\",\n\t\t306: \"sycnfs\",\n\t\t307: \"sendmmsg\",\n\t\t308: \"setns\",\n\t\t309: \"getcpu\",\n\t\t310: \"process_vm_readv\",\n\t\t311: \"process_vm_writev\",\n\t\t312: \"kcmp\",\n\t\t313: \"finit_module\",\n\t\t314: \"sched_setattr\",\n\t\t315: \"sched_getattr\",\n\t\t316: \"renameat2\",\n\t\t317: \"seccomp\",\n\t\t318: \"getrandom\",\n\t\t319: \"memfd_create\",\n\t\t320: \"kexec_file_load\",\n\t\t321: \"bpf\",\n\t\t322: \"execveat\",\n\t\t323: \"userfaultfd\",\n\t\t324: \"membarrier\",\n\t\t325: \"mlock2\",\n\t\t326: \"copy_file_range\",\n\t\t327: \"preadv2\",\n\t\t328: \"pwritev2\",\n\t\t329: \"pkey_mprotect\",\n\t\t330: \"pkey_alloc\",\n\t\t331: \"pkey_free\",\n\t\t332: \"statx\",\n\t\t333: \"io_pgetevents\",\n\t\t334: \"rseq\",\n\t}\n\tvar res string\n\tif scName, ok := syscalls[sc]; ok {\n\t\tres = scName\n\t} else {\n\t\tres = strconv.Itoa(int(sc))\n\t}\n\treturn res\n}", "func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {\n\tentersyscall()\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))\n\texitsyscall()\n\treturn\n}", "func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))\n\treturn\n}", "func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {\n\tentersyscall()\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))\n\texitsyscall()\n\treturn\n}", "func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err unix.Errno)", "func sysMount(device, target, mType string, flag uintptr, data string) error {\n\tif err := syscall.Mount(device, target, mType, flag, data); err != nil {\n\t\treturn err\n\t}\n\n\t// If we have a bind mount or remount, remount...\n\tif flag&syscall.MS_BIND == syscall.MS_BIND &&\n\t\tflag&syscall.MS_RDONLY == syscall.MS_RDONLY {\n\t\treturn syscall.Mount(\n\t\t\tdevice, target, mType, flag|syscall.MS_REMOUNT, data)\n\t}\n\treturn nil\n}", "func (c *Compiler) emitSyscall(frame *Frame, call *ssa.CallCommon) (llvm.Value, error) {\n\tnum := c.getValue(frame, call.Args[0])\n\tvar syscallResult llvm.Value\n\tswitch {\n\tcase c.GOARCH() == \"amd64\":\n\t\tif c.GOOS() == \"darwin\" {\n\t\t\t// Darwin adds this magic number to system call numbers:\n\t\t\t//\n\t\t\t// > Syscall classes for 64-bit system call entry.\n\t\t\t// > For 64-bit users, the 32-bit syscall number is partitioned\n\t\t\t// > with the high-order bits representing the class and low-order\n\t\t\t// > bits being the syscall number within that class.\n\t\t\t// > The high-order 32-bits of the 64-bit syscall number are unused.\n\t\t\t// > All system classes enter the kernel via the syscall instruction.\n\t\t\t//\n\t\t\t// Source: https://opensource.apple.com/source/xnu/xnu-792.13.8/osfmk/mach/i386/syscall_sw.h\n\t\t\tnum = c.builder.CreateOr(num, llvm.ConstInt(c.uintptrType, 0x2000000, false), \"\")\n\t\t}\n\t\t// Sources:\n\t\t// https://stackoverflow.com/a/2538212\n\t\t// https://en.wikibooks.org/wiki/X86_Assembly/Interfacing_with_Linux#syscall\n\t\targs := []llvm.Value{num}\n\t\targTypes := []llvm.Type{c.uintptrType}\n\t\t// Constraints will look something like:\n\t\t// \"={rax},0,{rdi},{rsi},{rdx},{r10},{r8},{r9},~{rcx},~{r11}\"\n\t\tconstraints := \"={rax},0\"\n\t\tfor i, arg := range call.Args[1:] {\n\t\t\tconstraints += \",\" + [...]string{\n\t\t\t\t\"{rdi}\",\n\t\t\t\t\"{rsi}\",\n\t\t\t\t\"{rdx}\",\n\t\t\t\t\"{r10}\",\n\t\t\t\t\"{r8}\",\n\t\t\t\t\"{r9}\",\n\t\t\t\t\"{r11}\",\n\t\t\t\t\"{r12}\",\n\t\t\t\t\"{r13}\",\n\t\t\t}[i]\n\t\t\tllvmValue := c.getValue(frame, arg)\n\t\t\targs = append(args, llvmValue)\n\t\t\targTypes = append(argTypes, llvmValue.Type())\n\t\t}\n\t\tconstraints += \",~{rcx},~{r11}\"\n\t\tfnType := llvm.FunctionType(c.uintptrType, argTypes, false)\n\t\ttarget := llvm.InlineAsm(fnType, \"syscall\", constraints, true, false, llvm.InlineAsmDialectIntel)\n\t\tsyscallResult = c.builder.CreateCall(target, args, \"\")\n\tcase c.GOARCH() == \"386\" && c.GOOS() == \"linux\":\n\t\t// Sources:\n\t\t// syscall(2) man page\n\t\t// https://stackoverflow.com/a/2538212\n\t\t// https://en.wikibooks.org/wiki/X86_Assembly/Interfacing_with_Linux#int_0x80\n\t\targs := []llvm.Value{num}\n\t\targTypes := []llvm.Type{c.uintptrType}\n\t\t// Constraints will look something like:\n\t\t// \"={eax},0,{ebx},{ecx},{edx},{esi},{edi},{ebp}\"\n\t\tconstraints := \"={eax},0\"\n\t\tfor i, arg := range call.Args[1:] {\n\t\t\tconstraints += \",\" + [...]string{\n\t\t\t\t\"{ebx}\",\n\t\t\t\t\"{ecx}\",\n\t\t\t\t\"{edx}\",\n\t\t\t\t\"{esi}\",\n\t\t\t\t\"{edi}\",\n\t\t\t\t\"{ebp}\",\n\t\t\t}[i]\n\t\t\tllvmValue := c.getValue(frame, arg)\n\t\t\targs = append(args, llvmValue)\n\t\t\targTypes = append(argTypes, llvmValue.Type())\n\t\t}\n\t\tfnType := llvm.FunctionType(c.uintptrType, argTypes, false)\n\t\ttarget := llvm.InlineAsm(fnType, \"int 0x80\", constraints, true, false, llvm.InlineAsmDialectIntel)\n\t\tsyscallResult = c.builder.CreateCall(target, args, \"\")\n\tcase c.GOARCH() == \"arm\" && c.GOOS() == \"linux\":\n\t\t// Implement the EABI system call convention for Linux.\n\t\t// Source: syscall(2) man page.\n\t\targs := []llvm.Value{}\n\t\targTypes := []llvm.Type{}\n\t\t// Constraints will look something like:\n\t\t// ={r0},0,{r1},{r2},{r7},~{r3}\n\t\tconstraints := \"={r0}\"\n\t\tfor i, arg := range call.Args[1:] {\n\t\t\tconstraints += \",\" + [...]string{\n\t\t\t\t\"0\", // tie to output\n\t\t\t\t\"{r1}\",\n\t\t\t\t\"{r2}\",\n\t\t\t\t\"{r3}\",\n\t\t\t\t\"{r4}\",\n\t\t\t\t\"{r5}\",\n\t\t\t\t\"{r6}\",\n\t\t\t}[i]\n\t\t\tllvmValue := c.getValue(frame, arg)\n\t\t\targs = append(args, llvmValue)\n\t\t\targTypes = append(argTypes, llvmValue.Type())\n\t\t}\n\t\targs = append(args, num)\n\t\targTypes = append(argTypes, c.uintptrType)\n\t\tconstraints += \",{r7}\" // syscall number\n\t\tfor i := len(call.Args) - 1; i < 4; i++ {\n\t\t\t// r0-r3 get clobbered after the syscall returns\n\t\t\tconstraints += \",~{r\" + strconv.Itoa(i) + \"}\"\n\t\t}\n\t\tfnType := llvm.FunctionType(c.uintptrType, argTypes, false)\n\t\ttarget := llvm.InlineAsm(fnType, \"svc #0\", constraints, true, false, 0)\n\t\tsyscallResult = c.builder.CreateCall(target, args, \"\")\n\tcase c.GOARCH() == \"arm64\" && c.GOOS() == \"linux\":\n\t\t// Source: syscall(2) man page.\n\t\targs := []llvm.Value{}\n\t\targTypes := []llvm.Type{}\n\t\t// Constraints will look something like:\n\t\t// ={x0},0,{x1},{x2},{x8},~{x3},~{x4},~{x5},~{x6},~{x7},~{x16},~{x17}\n\t\tconstraints := \"={x0}\"\n\t\tfor i, arg := range call.Args[1:] {\n\t\t\tconstraints += \",\" + [...]string{\n\t\t\t\t\"0\", // tie to output\n\t\t\t\t\"{x1}\",\n\t\t\t\t\"{x2}\",\n\t\t\t\t\"{x3}\",\n\t\t\t\t\"{x4}\",\n\t\t\t\t\"{x5}\",\n\t\t\t}[i]\n\t\t\tllvmValue := c.getValue(frame, arg)\n\t\t\targs = append(args, llvmValue)\n\t\t\targTypes = append(argTypes, llvmValue.Type())\n\t\t}\n\t\targs = append(args, num)\n\t\targTypes = append(argTypes, c.uintptrType)\n\t\tconstraints += \",{x8}\" // syscall number\n\t\tfor i := len(call.Args) - 1; i < 8; i++ {\n\t\t\t// x0-x7 may get clobbered during the syscall following the aarch64\n\t\t\t// calling convention.\n\t\t\tconstraints += \",~{x\" + strconv.Itoa(i) + \"}\"\n\t\t}\n\t\tconstraints += \",~{x16},~{x17}\" // scratch registers\n\t\tfnType := llvm.FunctionType(c.uintptrType, argTypes, false)\n\t\ttarget := llvm.InlineAsm(fnType, \"svc #0\", constraints, true, false, 0)\n\t\tsyscallResult = c.builder.CreateCall(target, args, \"\")\n\tdefault:\n\t\treturn llvm.Value{}, c.makeError(call.Pos(), \"unknown GOOS/GOARCH for syscall: \"+c.GOOS()+\"/\"+c.GOARCH())\n\t}\n\tswitch c.GOOS() {\n\tcase \"linux\", \"freebsd\":\n\t\t// Return values: r0, r1 uintptr, err Errno\n\t\t// Pseudocode:\n\t\t// var err uintptr\n\t\t// if syscallResult < 0 && syscallResult > -4096 {\n\t\t// err = -syscallResult\n\t\t// }\n\t\t// return syscallResult, 0, err\n\t\tzero := llvm.ConstInt(c.uintptrType, 0, false)\n\t\tinrange1 := c.builder.CreateICmp(llvm.IntSLT, syscallResult, llvm.ConstInt(c.uintptrType, 0, false), \"\")\n\t\tinrange2 := c.builder.CreateICmp(llvm.IntSGT, syscallResult, llvm.ConstInt(c.uintptrType, 0xfffffffffffff000, true), \"\") // -4096\n\t\thasError := c.builder.CreateAnd(inrange1, inrange2, \"\")\n\t\terrResult := c.builder.CreateSelect(hasError, c.builder.CreateSub(zero, syscallResult, \"\"), zero, \"syscallError\")\n\t\tretval := llvm.Undef(c.ctx.StructType([]llvm.Type{c.uintptrType, c.uintptrType, c.uintptrType}, false))\n\t\tretval = c.builder.CreateInsertValue(retval, syscallResult, 0, \"\")\n\t\tretval = c.builder.CreateInsertValue(retval, zero, 1, \"\")\n\t\tretval = c.builder.CreateInsertValue(retval, errResult, 2, \"\")\n\t\treturn retval, nil\n\tcase \"darwin\":\n\t\t// Return values: r0, r1 uintptr, err Errno\n\t\t// Pseudocode:\n\t\t// var err uintptr\n\t\t// if syscallResult != 0 {\n\t\t// err = syscallResult\n\t\t// }\n\t\t// return syscallResult, 0, err\n\t\tzero := llvm.ConstInt(c.uintptrType, 0, false)\n\t\thasError := c.builder.CreateICmp(llvm.IntNE, syscallResult, llvm.ConstInt(c.uintptrType, 0, false), \"\")\n\t\terrResult := c.builder.CreateSelect(hasError, syscallResult, zero, \"syscallError\")\n\t\tretval := llvm.Undef(c.ctx.StructType([]llvm.Type{c.uintptrType, c.uintptrType, c.uintptrType}, false))\n\t\tretval = c.builder.CreateInsertValue(retval, syscallResult, 0, \"\")\n\t\tretval = c.builder.CreateInsertValue(retval, zero, 1, \"\")\n\t\tretval = c.builder.CreateInsertValue(retval, errResult, 2, \"\")\n\t\treturn retval, nil\n\tdefault:\n\t\treturn llvm.Value{}, c.makeError(call.Pos(), \"unknown GOOS/GOARCH for syscall: \"+c.GOOS()+\"/\"+c.GOARCH())\n\t}\n}", "func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {\n\tentersyscall()\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn))\n\texitsyscall()\n\treturn\n}", "func printSyscall(childPid int, regs syscall.PtraceRegs) error {\n\t// Syscall-related data is stored in a number of registers, namely:\n\t// - rax (system call number)\n\t// - rdi (first arg)\n\t// - rsi (second arg)\n\t// - rdx (third arg)\n\t// - r10 (fourth arg)\n\t// - r8 (fifth arg)\n\t// - r9 (sixth arg)\n\t// For each syscall, we need to hardcode which arguments are of interest\n\t// Hence, for educational purposes, only selected syscalls have been\n\t// defined (i.e. are recognized) below. The list can grow over time.\n\tsyscallName := syscallNameByRax[regs.Orig_rax]\n\n\tswitch syscallName {\n\n\tcase \"openat\":\n\t\tdirfd := regs.Rdi\n\t\tpath, err := peekText(childPid, uintptr(regs.Rsi))\n\t\tflags := regs.Rdx\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while reading path text for child pid %d: %w\", childPid, err)\n\t\t}\n\t\tfmt.Printf(\"openat(%d, '%s', %d)\\n\", dirfd, string(path), flags)\n\tcase \"open\":\n\t\tpath, err := peekText(childPid, uintptr(regs.Rdi))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while reading path text for child pid %d: %w\", childPid, err)\n\t\t}\n\t\tflags := regs.Rsi\n\t\tfmt.Printf(\"open('%s', %d)\\n\", path, flags)\n\tcase \"read\":\n\t\tfd := regs.Rdi\n\t\tbuf := regs.Rsi\n\t\tcount := regs.Rdx\n\t\tfmt.Printf(\"read(%d, %d, %d)\\n\", fd, buf, count)\n\tcase \"write\":\n\t\tfd := regs.Rdi\n\t\ttext, err := peekText(childPid, uintptr(regs.Rsi))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while reading path text for child pid %d: %w\", childPid, err)\n\t\t}\n\t\tnbytes := regs.Rdx\n\t\tfmt.Printf(\"write(%d, '%s', %d)\\n\", fd, text, nbytes)\n\tcase \"close\":\n\t\tfd := regs.Rdi\n\t\tfmt.Printf(\"close(%d)\\n\", fd)\n\tdefault:\n\t\t// Print out syscall(n/a) as indication that we cannot parse parameters\n\t\tfmt.Printf(\"%s(n/a)\\n\", syscallName)\n\t}\n\n\treturn nil\n}", "func main() {\n\n // Go requires an absolute path to the binary we want to execute, so we’ll use exec.LookPath to find it (probably /bin/ls).\n // Exec requires arguments in slice form (as apposed to one big string).\n binary, lookErr := exec.LookPath(\"ls\")\n if lookErr != nil {\n panic(lookErr)\n }\n\n args := []string{\"ls\", \"-a\", \"-l\", \"-h\"} //Exec requires arguments in slice form (as apposed to one big string). first argument should be the program name\n\n //Exec also needs a set of environment variables to use. Here we just provide our current environment.\n env := os.Environ()\n\n execErr := syscall.Exec(binary, args, env) //Here’s the actual syscall.Exec call.\n //If this call is successful, the execution of our process will end here and be replaced by the /bin/ls -a -l -h process.\n if execErr != nil {// If there is an error we’ll get a return value.\n panic(execErr)\n }\n}", "func ioctl(fd, cmd, ptr uintptr) error {\n\t_, _, errno := unix.Syscall(unix.SYS_IOCTL, fd, cmd, ptr)\n\tif errno != 0 {\n\t\treturn errno\n\t}\n\treturn nil\n}", "func syscallMode(p PermissionBits) (o uint32) {\n\to |= uint32(p)\n\n\tif p.Setuid() {\n\t\to |= syscall.S_ISUID\n\t}\n\tif p.Setgid() {\n\t\to |= syscall.S_ISGID\n\t}\n\tif p.Sticky() {\n\t\to |= syscall.S_ISVTX\n\t}\n\treturn\n}", "func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {\n\tentersyscall()\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))\n\texitsyscall()\n\treturn\n}", "func (c *Context) SYSCALL() {\n\tc.addinstruction(x86.SYSCALL())\n}", "func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {\n\tentersyscall()\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))\n\texitsyscall()\n\treturn\n}", "func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))\n\treturn\n}", "func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {\n\tentersyscall()\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))\n\texitsyscall()\n\treturn\n}", "func fcntl(fd int, cmd int, arg int) (val int, errno int) {\n r0, _, e1 := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))\n val = int(r0)\n errno = int(e1)\n return\n}", "func Sysctl(name string) (string, error) {\n\tpath := filepath.Clean(filepath.Join(\"/proc\", \"sys\", strings.Replace(name, \".\", \"/\", -1)))\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tif len(data) == 0 {\n\t\treturn \"\", trace.BadParameter(\"empty output from sysctl\")\n\t}\n\treturn string(data[:len(data)-1]), nil\n}", "func Example() *specs.Spec {\n\treturn &specs.Spec{\n\t\tVersion: specs.Version,\n\t\tRoot: &specs.Root{\n\t\t\tPath: \"rootfs\",\n\t\t\tReadonly: true,\n\t\t},\n\t\tProcess: &specs.Process{\n\t\t\tTerminal: true,\n\t\t\tUser: specs.User{},\n\t\t\tArgs: []string{\n\t\t\t\t\"sh\",\n\t\t\t},\n\t\t\tEnv: []string{\n\t\t\t\t\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n\t\t\t\t\"TERM=xterm\",\n\t\t\t},\n\t\t\tCwd: \"/\",\n\t\t\tNoNewPrivileges: true,\n\t\t\tCapabilities: &specs.LinuxCapabilities{\n\t\t\t\tBounding: []string{\n\t\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t},\n\t\t\t\tPermitted: []string{\n\t\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t},\n\t\t\t\tInheritable: []string{\n\t\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t},\n\t\t\t\tAmbient: []string{\n\t\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t},\n\t\t\t\tEffective: []string{\n\t\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tRlimits: []specs.POSIXRlimit{\n\t\t\t\t{\n\t\t\t\t\tType: \"RLIMIT_NOFILE\",\n\t\t\t\t\tHard: uint64(1024),\n\t\t\t\t\tSoft: uint64(1024),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHostname: \"runc\",\n\t\tMounts: []specs.Mount{\n\t\t\t{\n\t\t\t\tDestination: \"/proc\",\n\t\t\t\tType: \"proc\",\n\t\t\t\tSource: \"proc\",\n\t\t\t\tOptions: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"tmpfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"strictatime\", \"mode=755\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/pts\",\n\t\t\t\tType: \"devpts\",\n\t\t\t\tSource: \"devpts\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"newinstance\", \"ptmxmode=0666\", \"mode=0620\", \"gid=5\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/shm\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"shm\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"mode=1777\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/mqueue\",\n\t\t\t\tType: \"mqueue\",\n\t\t\t\tSource: \"mqueue\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys\",\n\t\t\t\tType: \"sysfs\",\n\t\t\t\tSource: \"sysfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"ro\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys/fs/cgroup\",\n\t\t\t\tType: \"cgroup\",\n\t\t\t\tSource: \"cgroup\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"relatime\", \"ro\"},\n\t\t\t},\n\t\t},\n\t\tLinux: &specs.Linux{\n\t\t\tMaskedPaths: []string{\n\t\t\t\t\"/proc/kcore\",\n\t\t\t\t\"/proc/latency_stats\",\n\t\t\t\t\"/proc/timer_list\",\n\t\t\t\t\"/proc/timer_stats\",\n\t\t\t\t\"/proc/sched_debug\",\n\t\t\t\t\"/sys/firmware\",\n\t\t\t\t\"/proc/scsi\",\n\t\t\t},\n\t\t\tReadonlyPaths: []string{\n\t\t\t\t\"/proc/asound\",\n\t\t\t\t\"/proc/bus\",\n\t\t\t\t\"/proc/fs\",\n\t\t\t\t\"/proc/irq\",\n\t\t\t\t\"/proc/sys\",\n\t\t\t\t\"/proc/sysrq-trigger\",\n\t\t\t},\n\t\t\tResources: &specs.LinuxResources{\n\t\t\t\tDevices: []specs.LinuxDeviceCgroup{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllow: false,\n\t\t\t\t\t\tAccess: \"rwm\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNamespaces: []specs.LinuxNamespace{\n\t\t\t\t{\n\t\t\t\t\tType: \"pid\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"network\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"ipc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"uts\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"mount\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))\n\treturn\n}", "func syscallMode(i os.FileMode) (o uint32) {\n\to |= uint32(i.Perm())\n\tif i&os.ModeSetuid != 0 {\n\t\to |= unix.S_ISUID\n\t}\n\tif i&os.ModeSetgid != 0 {\n\t\to |= unix.S_ISGID\n\t}\n\tif i&os.ModeSticky != 0 {\n\t\to |= unix.S_ISVTX\n\t}\n\tif i&os.ModeNamedPipe != 0 {\n\t\to |= unix.S_IFIFO\n\t}\n\tif i&os.ModeDevice != 0 {\n\t\tswitch i & os.ModeCharDevice {\n\t\tcase 0:\n\t\t\to |= unix.S_IFBLK\n\t\tdefault:\n\t\t\to |= unix.S_IFCHR\n\t\t}\n\t}\n\treturn\n}", "func main() {\n\tvar connList []uintptr\n\tisUpgrade := os.Getenv(\"fork\") != \"\"\n\tvar err error\n\tvar ln net.Listener\n\tif isUpgrade {\n\t\tln, err = net.FileListener(os.NewFile(3, \"\"))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tln, _ = net.Listen(\"tcp\", \"localhost:9000\")\n\t}\n\n\tif isUpgrade {\n\t\tconn, err := net.FileConn(os.NewFile(4, \"\"))\n\t\tPanic(err)\n\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tconn.Write([]byte(\"hello\" + os.Getenv(\"fork\")))\n\t\t\t\ttime.Sleep(time.Millisecond * 500)\n\t\t\t\tvar data = make([]byte, 1024)\n\t\t\t\tconn.Read(data)\n\t\t\t\tfmt.Println(string(data))\n\t\t\t}\n\t\t}()\n\t}\n\n\t//defer ln.Close()\n\n\trawConn, ok := ln.(syscall.Conn)\n\tif !ok {\n\t\tpanic(\"not raw\")\n\t}\n\n\traw, err := rawConn.SyscallConn()\n\tPanic(err)\n\n\tvar dupfd uintptr\n\tPanic(raw.Control(func(fd uintptr) {\n\t\tdupfd, err = dupFd(fd)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tPanic(err)\n\t}))\n\n\tconnList = append(connList, dupfd)\n\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\trawConn, ok := conn.(syscall.Conn)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"not raw\")\n\t\t\t}\n\n\t\t\traw, err := rawConn.SyscallConn()\n\t\t\tPanic(err)\n\n\t\t\tvar dupfd uintptr\n\t\t\tPanic(raw.Control(func(fd uintptr) {\n\t\t\t\tdupfd, err = dupFd(fd)\n\t\t\t\tPanic(err)\n\t\t\t}))\n\n\t\t\tconnList = append(connList, dupfd)\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tconn.Write([]byte(\"hello\"))\n\t\t\t\t\ttime.Sleep(time.Millisecond * 500)\n\t\t\t\t\tvar data = make([]byte, 1024)\n\t\t\t\t\tconn.Read(data)\n\t\t\t\t\tfmt.Println(string(data))\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGKILL, syscall.SIGHUP)\n\t<-ch\n\n\tos.Setenv(\"fork\", \"true\")\n\n\tpid, err := forkExec(os.Args[0], os.Args, connList...)\n\tPanic(err)\n\n\tgo func() {\n\t\t// 防止子进程变成僵尸进程\n\t\tfor {\n\t\t\t_, _ = syscall.Wait4(pid, nil, syscall.WNOWAIT, nil)\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\treturn\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 5)\n}", "func (c *cpu) socket() {\n\tsp, protocol := popI32(c.sp)\n\tsp, typ := popI32(sp)\n\tdomain := readI32(sp)\n\tfd, err := syscall.Socket(int(domain), int(typ), int(protocol))\n\tif strace {\n\t\tfmt.Fprintf(os.Stderr, \"socket(%s, %s, %#x) %v %v\\t; %s\\n\", socketAF(domain), socketType(typ), protocol, fd, err, c.pos())\n\t}\n\tif err != nil {\n\t\tc.setErrno(err)\n\t\twriteI32(c.rp, -1)\n\t\treturn\n\t}\n\n\twriteI32(c.rp, int32(fd))\n}", "func fakeSyscall(duration time.Duration) {\n\truntime.Entersyscall()\n\tfor start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {\n\t}\n\truntime.Exitsyscall()\n}", "func (*Root) Sys() interface{} { return nil }", "func runtime_procPin()", "func defaultSolarisIfNameCmd() []string {\n\treturn []string{\"/usr/sbin/route\", \"-n\", \"get\", \"default\"}\n}", "func main() {\n\tfile, err := os.Open(\"./temp\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\tfdnum := file.Fd()\n\tfmt.Printf(\"fd: %b %b %b %b\\n\", byte(fdnum), byte(fdnum>>8), byte(fdnum>>16), byte(fdnum>>24))\n\tfmt.Printf(\"ready to send fd: %d\\n\", fdnum)\n\t// 编码fd编译传送给其他进程\n\tdata := syscall.UnixRights(int(fdnum))\n\traddr, err := net.ResolveUnixAddr(\"unix\", socksPath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// 连接UnixSock\n\tconn, err := net.DialUnix(\"unix\", nil, raddr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"has dial, time:%v\\n\", time.Now())\n\ttime.Sleep(10 * time.Second)\n\t// 发送msg\n\tn, oobn, err := conn.WriteMsgUnix(nil, data, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"has write, time:%v\\n\", time.Now())\n\ttime.Sleep(10 * time.Second)\n\tfmt.Printf(\"WriteMsgUnix = n:%d, oobn:%d; want 1, %d\\n\", n, oobn, len(data))\n\tfmt.Printf(\"write %d data success\\n\", n)\n}", "func (w *wrapper) Statfs(path string, stat *fuse.Statfs_t) int {\n\treturn -fuse.ENOSYS\n}", "func Ioctl(fd, cmd, ptr uintptr) error {\n\t_, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr)\n\tif e != 0 {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\tlogrus.ErrorKey: e,\n\t\t\t\"errno\": int(e),\n\t\t}).Error(\"ioctl failed\")\n\t\treturn e\n\t}\n\treturn nil\n}", "func main() {\n\ttype SysProcIDMap struct {\n\t\tContainerID int\n\t\tHostID int\n\t\tSize int\n\t}\n\tvar rootfsPath string\n\n\tcmd := reexec.Command(\"nsInitialisation\", rootfsPath)\n\tcmd = exec.Command(\"/bin/bash\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Stderr = os.Stderr\n\n\tcmd.Env = []string{\"PS1=-[ns-process]- # \"}\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCloneflags: syscall.CLONE_NEWUTS |\n\t\t\tsyscall.CLONE_NEWNS |\n\t\t\tsyscall.CLONE_NEWIPC |\n\t\t\tsyscall.CLONE_NEWNET |\n\t\t\tsyscall.CLONE_NEWPID |\n\t\t\tsyscall.CLONE_NEWUSER,\n\t\tUidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getuid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t\tGidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getgid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\tfmt.Printf(\"Error running the /bin/bash command %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {\n\tlibcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))\n\treturn\n}", "func Xaccess(tls *TLS, path uintptr, amode int32) int32 {\n\tr, _, err := syscall.Syscall(syscall.SYS_ACCESS, path, uintptr(amode), 0)\n\tif strace {\n\t\tfmt.Fprintf(os.Stderr, \"access(%q) %v %v\\n\", GoString(path), r, err)\n\t}\n\tif err != 0 {\n\t\ttls.setErrno(err)\n\t}\n\treturn int32(r)\n}", "func PrintPtraceRequest(req int32) string {\n\tvar ptraceRequest = map[int32]string{\n\t\t0: \"PTRACE_TRACEME\",\n\t\t1: \"PTRACE_PEEKTEXT\",\n\t\t2: \"PTRACE_PEEKDATA\",\n\t\t3: \"PTRACE_PEEKUSER\",\n\t\t4: \"PTRACE_POKETEXT\",\n\t\t5: \"PTRACE_POKEDATA\",\n\t\t6: \"PTRACE_POKEUSER\",\n\t\t7: \"PTRACE_CONT\",\n\t\t8: \"PTRACE_KILL\",\n\t\t9: \"PTRACE_SINGLESTEP\",\n\t\t12: \"PTRACE_GETREGS\",\n\t\t13: \"PTRACE_SETREGS\",\n\t\t14: \"PTRACE_GETFPREGS\",\n\t\t15: \"PTRACE_SETFPREGS\",\n\t\t16: \"PTRACE_ATTACH\",\n\t\t17: \"PTRACE_DETACH\",\n\t\t18: \"PTRACE_GETFPXREGS\",\n\t\t19: \"PTRACE_SETFPXREGS\",\n\t\t24: \"PTRACE_SYSCALL\",\n\t\t0x4200: \"PTRACE_SETOPTIONS\",\n\t\t0x4201: \"PTRACE_GETEVENTMSG\",\n\t\t0x4202: \"PTRACE_GETSIGINFO\",\n\t\t0x4203: \"PTRACE_SETSIGINFO\",\n\t\t0x4204: \"PTRACE_GETREGSET\",\n\t\t0x4205: \"PTRACE_SETREGSET\",\n\t\t0x4206: \"PTRACE_SEIZE\",\n\t\t0x4207: \"PTRACE_INTERRUPT\",\n\t\t0x4208: \"PTRACE_LISTEN\",\n\t\t0x4209: \"PTRACE_PEEKSIGINFO\",\n\t\t0x420a: \"PTRACE_GETSIGMASK\",\n\t\t0x420b: \"PTRACE_SETSIGMASK\",\n\t\t0x420c: \"PTRACE_SECCOMP_GET_FILTER\",\n\t\t0x420d: \"PTRACE_SECCOMP_GET_METADATA\",\n\t}\n\n\tvar res string\n\tif reqName, ok := ptraceRequest[req]; ok {\n\t\tres = reqName\n\t} else {\n\t\tres = strconv.Itoa(int(req))\n\t}\n\treturn res\n}", "func usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\\n\")\n\tos.Exit(1)\n}", "func runLinux(path, parameters string, seconds int) (string, bool) {\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second)\n\tdefer cancel()\n\n\t// Runs as nobody\n\tcommand := exec.CommandContext(ctx, `setpriv`, `--no-new-privs`, `--reuid=nobody`, path, `-PARAMETERS=`+parameters)\n\n\toutputBytes, err := command.CombinedOutput()\n\toutput := strings.TrimSuffix(string(outputBytes), \"\\n\")\n\n\tif ctx.Err() == context.DeadlineExceeded {\n\t\treturn errors.Render(errors.ExecutionTimeout, path), false\n\t}\n\n\tif err != nil {\n\t\treturn err.Error() + \"\\n\" + output, false\n\t}\n\n\treturn output, true\n}", "func (*FileSystemBase) Statfs(path string, stat *Statfs_t) int {\n\treturn -ENOSYS\n}", "func (this *lirc) lirc_ioctl(fd uintptr, name uintptr, data unsafe.Pointer) syscall.Errno {\n\tthis.lock.Lock()\n\tdefer this.lock.Unlock()\n\t_, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, fd, name, uintptr(data))\n\treturn err\n}", "func puts(fd int, s string) int {\n\tn, err := syscall.Write(fd, []byte(s))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"puts error %s\\n\", err))\n\t}\n\treturn n\n}", "func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {\n\tpanic(\"unimplemented\")\n}", "func ioctl(fd, name uintptr, data interface{}) error {\n\tvar v uintptr\n\n\tswitch dd := data.(type) {\n\tcase unsafe.Pointer:\n\t\tv = uintptr(dd)\n\n\tcase int:\n\t\tv = uintptr(dd)\n\n\tcase uintptr:\n\t\tv = dd\n\n\tdefault:\n\t\treturn fmt.Errorf(\"ioctl: data has invalid type %T\", data)\n\t}\n\n\t_, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, fd, name, v)\n\tif errno != 0 {\n\t\treturn errno\n\t}\n\treturn nil\n}", "func cmdLine() string {\n\treturn \"go run mksyscall_aix_ppc64.go \" + strings.Join(os.Args[1:], \" \")\n}", "func (w *wrapper) Access(path string, mask uint32) int {\n\treturn -fuse.ENOSYS\n}", "func system_program(code []byte, fileName string, num *int, args []string) []byte {\n\n\tvar output []byte\n\n\tif num != nil {\n\t\targs = append([]string{strconv.Itoa(*num)}, args...)\n\t}\n\n\toutput = run(fileName, args...)\n\n\treturn output\n}", "func runtime_procPin() int", "func runtime_procPin() int", "func fixSyscallName(prefix string, name KProbeName) string {\n\t// see get_syscall_fname in bcc\n\n\tparts := strings.Split(string(name), \"/\")\n\tprobeType := parts[0]\n\trawName := strings.TrimPrefix(parts[1], \"sys_\")\n\n\tout := probeType + \"/\" + prefix + rawName\n\n\treturn out\n}", "func checkErrno(err error) error {\n\te, ok := err.(syscall.Errno)\n\tif !ok {\n\t\treturn err\n\t}\n\n\tif e == 0 {\n\t\treturn nil\n\t}\n\n\treturn err\n}", "func switchRoot() error {\n\t// mount devtmpfs on /mnt/dev so that distri init has /dev/null\n\tif err := mount(\"dev\", \"/mnt/dev\", \"devtmpfs\"); err != nil {\n\t\treturn err\n\t}\n\n\t// mount proc on /mnt/proc so that distri init can increase RLIMIT_NOFILE\n\tif err := mount(\"proc\", \"/mnt/proc\", \"proc\"); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO(later): remove files from initramfs to free up RAM\n\n\tif err := os.Chdir(\"/mnt\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err := syscall.Mount(\".\", \"/\", \"\", syscall.MS_MOVE, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mount . /: %v\", err)\n\t}\n\n\tif err := syscall.Chroot(\".\"); err != nil {\n\t\treturn fmt.Errorf(\"chroot .: %v\", err)\n\t}\n\n\tif err := os.Chdir(\"/\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn syscall.Exec(\"/init\", []string{\"/init\"}, os.Environ())\n}", "func getSyscallPrefix() (string, error) {\n\n\tsyscallPrefixes := []string{\n\t\t\"__sys_\",\n\t\t\"sys_\",\n\t\t\"__x64_sys_\",\n\t\t\"__x32_compat_sys_\",\n\t\t\"__ia32_compat_sys_\",\n\t\t\"__arm64_sys_\",\n\t\t\"__s390x_sys_\",\n\t\t\"__s390_sys_\",\n\t}\n\n\tkallsyms := path.Join(util.GetProcRoot(), \"kallsyms\")\n\tfile, err := os.Open(kallsyms)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tscanner := bufio.NewScanner(file)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tfor _, prefix := range syscallPrefixes {\n\t\t\tif strings.HasSuffix(line, \" \"+prefix+\"socket\") {\n\t\t\t\treturn prefix, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"could not get syscall prefix\")\n}", "func Statfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n\taddr := args[0].Pointer()\n\tstatfsAddr := args[1].Pointer()\n\n\tpath, _, err := copyInPath(t, addr, false /* allowEmpty */)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\treturn 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {\n\t\treturn statfsImpl(t, d, statfsAddr)\n\t})\n}", "func CreateSystemInfoFile2() {\n\tf, err := os.OpenFile(\"info.log\", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tlog.SetOutput(f)\n\t/*\n\t\tonly doign this section becase its would be able to be ported to other GOOS hopefully\n\t*/\n\tsystemInfoLoc := os.ExpandEnv(\"$HOME/systemvar.txt\")\n\texec.Command(\"rm\", systemInfoLoc).Run() //file removed to avoid redundancy when appending\n\t//file is opened here to write to the information. if doesnt exist it will be created\n\tfile, err := os.OpenFile(systemInfoLoc, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\t//gets the system user\n\tgetSystemUser, err := exec.Command(\"echo\", os.Getenv(\"USER\")).CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemUser)\n\t}\n\tif _, err := file.WriteString(string(getSystemUser)); err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t// gets system kernel\n\tgetSystemKernel, err := exec.Command(\"uname\", \"-s\").CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemKernel)\n\t}\n\tif _, err := file.WriteString(string(getSystemKernel)); err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t//gets system kernel release\n\tgetSystemKernelRelease, err := exec.Command(\"uname\", \"-r\").CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemKernelRelease)\n\t}\n\tif _, err := file.WriteString(string(getSystemKernelRelease)); err != nil {\n\t\tlog.Println(err)\n\t}\n\t//getSystemKernelVersion\n\tgetSystemKernelVersion, err := exec.Command(\"uname\", \"-v\").CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemKernelVersion)\n\t}\n\tif _, err := file.WriteString(string(getSystemKernelVersion)); err != nil {\n\t\tlog.Println(err)\n\t}\n\t//getSystemArch\n\tgetSystemArch, err := exec.Command(\"uname\", \"--m\").CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemArch)\n\t}\n\tif _, err := file.WriteString(string(getSystemArch)); err != nil {\n\t\tlog.Println(err)\n\t}\n\t//getSystemProcessor\n\tgetSystemProcessor, err := exec.Command(\"uname\", \"-p\").CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemProcessor)\n\t}\n\tif _, err := file.WriteString(string(getSystemProcessor)); err != nil {\n\t\tlog.Println(err)\n\t}\n\t//getSystemHardwarePlatform\n\tgetSystemHardwarePlatform, err := exec.Command(\"uname\", \"-i\").CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemHardwarePlatform)\n\t}\n\tif _, err := file.WriteString(string(getSystemHardwarePlatform)); err != nil {\n\t\tlog.Println(err)\n\t}\n\t//getSystemOS\n\tgetSystemOS, err := exec.Command(\"uname\", \"-o\").CombinedOutput()\n\tif err != nil {\n\t\t// Show error and output\n\t\tlog.Fatalf(\"%s: %s\", err, getSystemOS)\n\t}\n\tif _, err := file.WriteString(string(getSystemOS)); err != nil {\n\t\tlog.Println(err)\n\t}\n}", "func seccomp(op, flags uint32, ptr unsafe.Pointer) (uintptr, unix.Errno) {\n\tn, _, errno := unix.RawSyscall(SYS_SECCOMP, uintptr(op), uintptr(flags), uintptr(ptr))\n\treturn n, errno\n}", "func run() {\n\tfmt.Printf(\"Running from main %v\\n\", os.Args[2:])\n\tcmd := exec.Command(\"/proc/self/exe\", append([]string{\"child\"}, os.Args[2:]...)...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCloneflags: syscall.CLONE_NEWUTS | syscall.CLONE_NEWPID | syscall.CLONE_NEWNS,\n\t\t// use user ns enable you to do something with root privilege inside container\n\t\t// notice that at this moment you can not use cgroup along with NEWUSER flag\n\t\t// | syscall.CLONE_NEWUSER,\n\t\t// Credential: &syscall.Credential{Uid: 0, Gid 0},\n\t\t// UidMappings: []syscall.SysProcIDMap {\n\t\t// \t{ContainerID: 0, HostID: os.Getpid(), Size: 1}\n\t\t// },\n\t\t// GidMappings: []syscall.SysProcIDMap {\n\t\t// \t{ContainerID: 0, HostID: os.Getpid(), Size: 1}\n\t\t// },\n\t}\n\n\tcmd.Run()\n}", "func (this *hardware) sysinfo_() *syscall.Sysinfo_t {\n\tinfo := syscall.Sysinfo_t{}\n\tif err := syscall.Sysinfo(&info); err != nil {\n\t\tthis.log.Error(\"<hw.linux>sysinfo: %v\", err)\n\t\treturn nil\n\t} else {\n\t\treturn &info\n\t}\n}", "func launchSystem() string {\n\t_, err := execWhich(\"systemctl\")\n\tif err == nil {\n\t\treturn \"systemd\"\n\t}\n\n\t_, err = execWhich(\"initctl\")\n\tif err == nil {\n\t\treturn \"upstart\"\n\t}\n\n\treturn \"\"\n}", "func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)", "func IsSysErrNoSys(err error) bool {\n\tif err == syscall.ENOSYS {\n\t\treturn true\n\t}\n\tpathErr, ok := err.(*os.PathError)\n\treturn ok && pathErr.Err == syscall.ENOSYS\n}", "func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)", "func (d *Driver) ioctl(cmd int, parent, id string) error {\n\tvar op, arg uintptr\n\tvar name string\n\tvar plen int\n\n\tlogrus.Debugf(\"lcfs ioctl cmd %d parent %s id %s\", cmd, parent, id)\n\n\t// Create a name string which includes both parent and id\n\tif parent != \"\" {\n\t\tname = path.Join(parent, id)\n\t\tplen = len(parent)\n\t} else {\n\t\tname = id\n\t}\n\tif name == \"\" {\n\t\top = uintptr(cmd)\n\t} else {\n\t\top = uintptr((1 << 30) | (len(name) << 16) | (plen << 8) | cmd);\n\t\targ = uintptr(unsafe.Pointer(&[]byte(name)[0]))\n\t}\n\t_, _, ep := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), op, arg);\n\tif ep != 0 {\n\t\tlogrus.Errorf(\"err %v\\n\", syscall.Errno(ep))\n\t\treturn syscall.Errno(ep)\n\t}\n\treturn nil\n}", "func sysctl(mib []C.int, old *byte, oldlen *uintptr,\n\tnew *byte, newlen uintptr) (err error) {\n\tvar p0 unsafe.Pointer\n\tp0 = unsafe.Pointer(&mib[0])\n\t_, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0),\n\t\tuintptr(len(mib)),\n\t\tuintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)),\n\t\tuintptr(unsafe.Pointer(new)), uintptr(newlen))\n\tif e1 != 0 {\n\t\terr = e1\n\t}\n\treturn\n}", "func createSandboxWithSysctls(rc internalapi.RuntimeService, sysctls map[string]string) (string, *runtimeapi.PodSandboxConfig) {\n\tBy(\"create a PodSandbox with sysctls\")\n\tpodSandboxName := \"pod-sandbox-with-sysctls-\" + framework.NewUUID()\n\tuid := framework.DefaultUIDPrefix + framework.NewUUID()\n\tnamespace := framework.DefaultNamespacePrefix + framework.NewUUID()\n\n\tpodConfig := &runtimeapi.PodSandboxConfig{\n\t\tMetadata: framework.BuildPodSandboxMetadata(podSandboxName, uid, namespace, framework.DefaultAttempt),\n\t\tLinux: &runtimeapi.LinuxPodSandboxConfig{\n\t\t\tSysctls: sysctls,\n\t\t},\n\t}\n\treturn framework.RunPodSandbox(rc, podConfig), podConfig\n}", "func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) {\n\tvar ptr uintptr\n\tvar size uintptr\n\n\tif len(buf) > 0 {\n\t\tptr = uintptr(unsafe.Pointer(&buf[0]))\n\t\tsize = unsafe.Sizeof(buf[0]) * uintptr(len(buf))\n\t} else {\n\t\tptr = uintptr(0)\n\t\tsize = uintptr(0)\n\t}\n\n\ttrap := uintptr(syscall.SYS_GETFSSTAT64)\n\tret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags))\n\n\tn = int(ret)\n\tif errno != 0 {\n\t\terr = errno\n\t}\n\n\treturn\n}", "func mount(mountPoint string, opts *MountOptions, ready chan<- error) (fd int, err error) {\n\tuser, err := user.Current()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tf, err := os.OpenFile(\"/dev/fuse\", os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tfd = int(f.Fd())\n\n\tm := cmount.Mount{\n\t\tType: fmt.Sprintf(\"fuse.%s\", opts.Name),\n\t\tSource: opts.FsName,\n\t\tOptions: []string{\n\t\t\t\"nosuid\",\n\t\t\t\"nodev\",\n\t\t\tfmt.Sprintf(\"fd=%d\", fd),\n\t\t\tfmt.Sprintf(\"rootmode=%#o\", syscall.S_IFDIR),\n\t\t\tfmt.Sprintf(\"user_id=%s\", user.Uid),\n\t\t\tfmt.Sprintf(\"group_id=%s\", user.Gid),\n\t\t},\n\t}\n\n\tif opts.AllowOther {\n\t\tm.Options = append(m.Options, \"allow_other\")\n\t}\n\n\tm.Options = append(m.Options, opts.Options...)\n\n\terr = m.Mount(mountPoint)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tclose(ready)\n\treturn fd, err\n}", "func tcgetattr(fd int, termios *syscall.Termios) (err error) {\n\tr, _, errno := syscall.Syscall(uintptr(syscall.SYS_IOCTL),\n\t\tuintptr(fd), uintptr(syscall.TCGETS), uintptr(unsafe.Pointer(termios)))\n\tif errno != 0 {\n\t\terr = errno\n\t\treturn\n\t}\n\tif r != 0 {\n\t\terr = fmt.Errorf(\"tcgetattr failed %v\", r)\n\t\treturn\n\t}\n\treturn\n}", "func SystemInfo() string {\n\n\tvar si SysInfo\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tsi.AllocMemory = m.Alloc\n\tsi.AllocMemoryMB = btomb(m.Alloc)\n\tsi.TotalAllocMemory = m.TotalAlloc\n\tsi.TotalAllocMemoryMB = btomb(m.TotalAlloc)\n\tsi.TotalSystemMemory = m.Sys\n\tsi.TotalSystemMemoryMB = btomb(m.Sys)\n\tc, _ := cpu.Info()\n\n\tsi.CPUs = c[0].Cores\n\n\tsi.GolangVersion = runtime.Version()\n\tsi.ContainerHostName, _ = os.Hostname()\n\tsi.CurrentUTC = time.Now().UTC()\n\n\tsi.CurrentLocalTime = time.Now().Local()\n\n\tconst (\n\t\tB = 1\n\t\tKB = 1024 * B\n\t\tMB = 1024 * KB\n\t\tGB = 1024 * MB\n\t)\n\n\tv, _ := mem.VirtualMemory()\n\tfmt.Printf(\"Total: %v, Free:%v, UsedPercent:%f%%\\n\", v.Total, v.Free, v.UsedPercent)\n\n\ttype InfoStat struct {\n\t\tHostname string `json:\"hostname\"`\n\t\tUptime uint64 `json:\"uptime\"`\n\t\tBootTime uint64 `json:\"bootTime\"`\n\t\tProcs uint64 `json:\"procs\"` // number of processes\n\t\tOS string `json:\"os\"` // ex: freebsd, linux\n\t\tPlatform string `json:\"platform\"` // ex: ubuntu, linuxmint\n\t\tPlatformFamily string `json:\"platformFamily\"` // ex: debian, rhel\n\t\tPlatformVersion string `json:\"platformVersion\"` // version of the complete OS\n\t\tKernelVersion string `json:\"kernelVersion\"` // version of the OS kernel (if available)\n\t\tVirtualizationSystem string `json:\"virtualizationSystem\"`\n\t\tVirtualizationRole string `json:\"virtualizationRole\"` // guest or host\n\t\tHostID string `json:\"hostid\"` // ex: uuid\n\t}\n\n\tvar his *host.InfoStat\n\this, _ = host.Info()\n\n\tsi.Uptime = his.Uptime\n\n\tsi.OperatingSystem = his.OS\n\tsi.Platform = his.Platform\n\tsi.PlatformFamily = his.PlatformFamily\n\tsi.PlatformVersion = his.PlatformVersion\n\tsi.VirtualSystem = his.VirtualizationSystem\n\tsi.VirtualRole = his.VirtualizationRole\n\tsi.HostID = his.HostID\n\tsi.HostName = his.Hostname\n\tsi.BootTime = strconv.FormatUint(his.BootTime, 10)\n\tsi.KernelVersion = his.KernelVersion\n\n\tsi.UptimeDays = si.Uptime / (60 * 60 * 24)\n\tsi.UptimeHours = (si.Uptime - (si.UptimeDays * 60 * 60 * 24)) / (60 * 60)\n\tsi.UptimeMinutes = ((si.Uptime - (si.UptimeDays * 60 * 60 * 24)) - (si.UptimeHours * 60 * 60)) / 60\n\tinterfaces, err := net.Interfaces()\n\n\tif err == nil {\n\t\tfor i, interfac := range interfaces {\n\t\t\tif interfac.Name == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddrs := interfac.Addrs\n\t\t\tsi.NetworkInterfaces[i].Name = interfac.Name\n\t\t\tsi.NetworkInterfaces[i].HardwareAddress = string(interfac.HardwareAddr)\n\t\t\tfor x, addr := range addrs {\n\t\t\t\tif addr.String() != \"\" {\n\t\t\t\t\tsi.NetworkInterfaces[i].IPAddresses[x].IPAddress = addr.String()\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tvar paths [10]string\n\tpaths[0] = \"/\"\n\n\tfor i, path := range paths {\n\t\tdisk := DiskUsage(path)\n\t\tsi.Disk[i].Path = path\n\t\tsi.Disk[i].All = float64(disk.All) / float64(GB)\n\t\tsi.Disk[i].Used = float64(disk.Used) / float64(GB)\n\t\tsi.Disk[i].Free = float64(disk.Free) / float64(GB)\n\t}\n\n\tstrJSON, err := json.Marshal(si)\n\tcheckErr(err)\n\n\treturn string(strJSON)\n}", "func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n\tfd := args[0].Int()\n\tpathAddr := args[1].Pointer()\n\tflags := args[2].Int()\n\tmask := args[3].Uint()\n\tstatxAddr := args[4].Pointer()\n\n\tif mask&linux.STATX__RESERVED > 0 {\n\t\treturn 0, nil, syserror.EINVAL\n\t}\n\tif flags&linux.AT_STATX_SYNC_TYPE == linux.AT_STATX_SYNC_TYPE {\n\t\treturn 0, nil, syserror.EINVAL\n\t}\n\n\tpath, dirPath, err := copyInPath(t, pathAddr, flags&linux.AT_EMPTY_PATH != 0)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\tif path == \"\" {\n\t\tfile := t.GetFile(fd)\n\t\tif file == nil {\n\t\t\treturn 0, nil, syserror.EBADF\n\t\t}\n\t\tdefer file.DecRef()\n\t\tuattr, err := file.UnstableAttr(t)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\treturn 0, nil, statx(t, file.Dirent.Inode.StableAttr, uattr, statxAddr)\n\t}\n\n\tresolve := dirPath || flags&linux.AT_SYMLINK_NOFOLLOW == 0\n\n\treturn 0, nil, fileOpOn(t, fd, path, resolve, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {\n\t\tif dirPath && !fs.IsDir(d.Inode.StableAttr) {\n\t\t\treturn syserror.ENOTDIR\n\t\t}\n\t\tuattr, err := d.Inode.UnstableAttr(t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn statx(t, d.Inode.StableAttr, uattr, statxAddr)\n\t})\n}", "func checkSetSysctls(rc internalapi.RuntimeService, containerID, sysctlPath, expected string) {\n\tcmd := []string{\"cat\", sysctlPath}\n\tstdout, _, err := rc.ExecSync(containerID, cmd, time.Duration(defaultExecSyncTimeout)*time.Second)\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(strings.TrimSpace(string(stdout))).To(Equal(expected))\n}", "func execInSystem(execPath string, params []string, logsBuffer *bytes.Buffer, print bool) error {\n\tvar lock sync.Mutex\n\tvar c string\n\tvar cmdName string\n\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tc = \"-c\"\n\t\tcmdName = \"sh\"\n\tcase \"windows\":\n\t\tc = \"/c\"\n\t\tcmdName = \"cmd\"\n\tdefault:\n\t\tlog.Panicf(\"System type error, got <%s>, but expect linux/windowns!\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(cmdName, append(params, c)...)\n\tcmd.Dir = execPath\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// print log\n\toutReader := bufio.NewReader(stdout)\n\terrReader := bufio.NewReader(stderr)\n\tprintLog := func(reader *bufio.Reader, typex string) {\n\t\tfor {\n\t\t\tline, err := reader.ReadString('\\n')\n\t\t\tif print {\n\t\t\t\tlog.Printf(\"%s: %s\", typex, line)\n\t\t\t}\n\t\t\tif logsBuffer != nil {\n\t\t\t\tlock.Lock()\n\t\t\t\tlogsBuffer.WriteString(line)\n\t\t\t\tlock.Unlock()\n\t\t\t}\n\t\t\tif err != nil || err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tprintLog(outReader, \"Stdout\")\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tprintLog(errReader, \"Stderr\")\n\t}()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Wait()\n\treturn cmd.Wait()\n}", "func (comp *compiler) assignSyscallNumbers(consts map[string]uint64) {\n\tfor _, decl := range comp.desc.Nodes {\n\t\tc, ok := decl.(*ast.Call)\n\t\tif !ok || strings.HasPrefix(c.CallName, \"syz_\") {\n\t\t\tcontinue\n\t\t}\n\t\tstr := comp.target.SyscallPrefix + c.CallName\n\t\tnr, ok := consts[str]\n\t\tif ok {\n\t\t\tc.NR = nr\n\t\t\tcontinue\n\t\t}\n\t\tc.NR = ^uint64(0) // mark as unused to not generate it\n\t\tname := \"syscall \" + c.CallName\n\t\tif !comp.unsupported[name] {\n\t\t\tcomp.unsupported[name] = true\n\t\t\tcomp.warning(c.Pos, \"unsupported syscall: %v due to missing const %v\",\n\t\t\t\tc.CallName, str)\n\t\t}\n\t}\n}", "func statfsImpl(t *kernel.Task, d *fs.Dirent, addr usermem.Addr) error {\n\tinfo, err := d.Inode.StatFS(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Construct the statfs structure and copy it out.\n\tstatfs := linux.Statfs{\n\t\tType: info.Type,\n\t\t// Treat block size and fragment size as the same, as\n\t\t// most consumers of this structure will expect one\n\t\t// or the other to be filled in.\n\t\tBlockSize: d.Inode.StableAttr.BlockSize,\n\t\tBlocks: info.TotalBlocks,\n\t\t// We don't have the concept of reserved blocks, so\n\t\t// report blocks free the same as available blocks.\n\t\t// This is a normal thing for filesystems, to do, see\n\t\t// udf, hugetlbfs, tmpfs, among others.\n\t\tBlocksFree: info.FreeBlocks,\n\t\tBlocksAvailable: info.FreeBlocks,\n\t\tFiles: info.TotalFiles,\n\t\tFilesFree: info.FreeFiles,\n\t\t// Same as Linux for simple_statfs, see fs/libfs.c.\n\t\tNameLength: linux.NAME_MAX,\n\t\tFragmentSize: d.Inode.StableAttr.BlockSize,\n\t\t// Leave other fields 0 like simple_statfs does.\n\t}\n\t_, err = t.CopyOut(addr, &statfs)\n\treturn err\n}", "func sysExec(args ...OBJ) OBJ {\n\tif len(args) < 1 {\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tvar command string\n\tswitch c := args[0].(type) {\n\tcase *object.String:\n\t\tcommand = c.Value\n\tdefault:\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tif len(command) < 1 {\n\t\treturn NewError(\"`sys.exec` expected string, got invalid argument\")\n\t}\n\t// split the command\n\ttoExec := splitCommand(command)\n\tcmd := exec.Command(toExec[0], toExec[1:]...)\n\n\t// get the result\n\tvar outb, errb bytes.Buffer\n\tcmd.Stdout = &outb\n\tcmd.Stderr = &errb\n\terr := cmd.Run()\n\n\t// If the command exits with a non-zero exit-code it\n\t// is regarded as a failure. Here we test for ExitError\n\t// to regard that as a non-failure.\n\tif err != nil && err != err.(*exec.ExitError) {\n\t\tfmt.Printf(\"Failed to run '%s' -> %s\\n\", command, err.Error())\n\t\treturn &object.Error{Message: \"Failed to run command!\"}\n\t}\n\n\t// The result-objects to store in our hash.\n\tstdout := &object.String{Value: outb.String()}\n\tstderr := &object.String{Value: errb.String()}\n\n\treturn NewHash(StringObjectMap{\n\t\t\"stdout\": stdout,\n\t\t\"stderr\": stderr,\n\t})\n}", "func (*FileSystemBase) Symlink(target string, newpath string) int {\n\treturn -ENOSYS\n}", "func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {\n\tp := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)\n\tif uintptr(p) < 4096 {\n\t\tif uintptr(p) == _EACCES {\n\t\t\tprint(\"runtime: mmap: access denied\\n\")\n\t\t\texit(2)\n\t\t}\n\t\tif uintptr(p) == _EAGAIN {\n\t\t\tprint(\"runtime: mmap: too much locked memory (check 'ulimit -l').\\n\")\n\t\t\texit(2)\n\t\t}\n\t\treturn nil\n\t}\n\tmSysStatInc(sysStat, n)\n\treturn p\n}", "func servicectlhandler(ctl uint32) uintptr", "func Xpopen(tls TLS, command, typ uintptr) uintptr {\n\tpanic(\"TODO popen\")\n}", "func fcntl(fd int, cmd int, arg int) (int, error)", "func (ns *nodeServer) mount(sourcePath, targetPath string, mountOptions []string, rawBlock bool) error {\n\tnotMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"failed to determain if '%s' is a valid mount point: %s\", targetPath, err.Error())\n\t}\n\tif !notMnt {\n\t\treturn nil\n\t}\n\n\t// Create target path, using a file for raw block bind mounts\n\t// or a directory for filesystems. Might already exist from a\n\t// previous call or because Kubernetes erroneously created it\n\t// for us.\n\tif rawBlock {\n\t\tf, err := os.OpenFile(targetPath, os.O_CREATE, os.FileMode(0644))\n\t\tif err == nil {\n\t\t\tdefer f.Close()\n\t\t} else if !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"create target device file: %w\", err)\n\t\t}\n\t} else {\n\t\tif err := os.Mkdir(targetPath, os.FileMode(0755)); err != nil && !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"create target directory: %w\", err)\n\t\t}\n\t}\n\n\t// We supposed to use \"mount\" package - ns.mounter.Mount()\n\t// but it seems not supporting -c \"canonical\" option, so do it with exec()\n\t// added -c makes canonical mount, resulting in mounted path matching what LV thinks is lvpath.\n\targs := []string{\"-c\"}\n\tif len(mountOptions) != 0 {\n\t\targs = append(args, \"-o\", strings.Join(mountOptions, \",\"))\n\t}\n\n\targs = append(args, sourcePath, targetPath)\n\tif _, err := pmemexec.RunCommand(\"mount\", args...); err != nil {\n\t\treturn fmt.Errorf(\"mount filesystem failed: %s\", err.Error())\n\t}\n\n\treturn nil\n}", "func Mount(mountpoint string, fs string, device string, isReadOnly bool) error {\n\tlog.WithFields(log.Fields{\n\t\t\"device\": device,\n\t\t\"mountpoint\": mountpoint,\n\t}).Debug(\"Calling syscall.Mount() \")\n\n\tflags := 0\n\tif isReadOnly {\n\t\tflags = syscall.MS_RDONLY\n\t}\n\terr := syscall.Mount(device, mountpoint, fs, uintptr(flags), \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to mount device %s at %s: %s\", device, mountpoint, err)\n\t}\n\treturn nil\n}", "func main() {\n\tif os.Geteuid() != 0 {\n\t\tfmt.Printf(\"This command must be ran as root via sudo or osascript\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n}", "func bindMount(source, dest string) error {\n\treturn syscall.Mount(source, dest, \"\", syscall.MS_BIND, \"\")\n}", "func Xfstatfs(tls TLS, fd int32, buf uintptr) int32 {\n\tr, _, err := syscall.Syscall(syscall.SYS_FSTATFS, uintptr(fd), buf, 0)\n\tif strace {\n\t\tfmt.Fprintf(TraceWriter, \"fstatfs(%v, %#x) %v %v\\n\", fd, buf, r, err)\n\t}\n\tif err != 0 {\n\t\ttls.setErrno(err)\n\t}\n\treturn int32(r)\n}", "func testCmdMountFilesystem(t *testing.T) {\n\tt.Log(\"TODO\")\n}", "func Xfopen(tls TLS, path, mode uintptr) uintptr {\n\tp := GoString(path)\n\tvar u uintptr\n\tswitch p {\n\tcase os.Stderr.Name():\n\t\tu = stderr\n\tcase os.Stdin.Name():\n\t\tu = stdin\n\tcase os.Stdout.Name():\n\t\tu = stdout\n\tdefault:\n\t\tvar f *os.File\n\t\tvar err error\n\t\tswitch mode := GoString(mode); mode {\n\t\tcase \"a\":\n\t\t\tif f, err = os.OpenFile(p, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666); err != nil {\n\t\t\t\tswitch {\n\t\t\t\tcase os.IsPermission(err):\n\t\t\t\t\ttls.setErrno(errno.XEPERM)\n\t\t\t\tdefault:\n\t\t\t\t\ttls.setErrno(errno.XEACCES)\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"r\", \"rb\":\n\t\t\tif f, err = os.OpenFile(p, os.O_RDONLY, 0666); err != nil {\n\t\t\t\tswitch {\n\t\t\t\tcase os.IsNotExist(err):\n\t\t\t\t\ttls.setErrno(errno.XENOENT)\n\t\t\t\tcase os.IsPermission(err):\n\t\t\t\t\ttls.setErrno(errno.XEPERM)\n\t\t\t\tdefault:\n\t\t\t\t\ttls.setErrno(errno.XEACCES)\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"r+b\":\n\t\t\tif f, err = os.OpenFile(p, os.O_RDWR, 0666); err != nil {\n\t\t\t\tswitch {\n\t\t\t\tcase os.IsNotExist(err):\n\t\t\t\t\ttls.setErrno(errno.XENOENT)\n\t\t\t\tcase os.IsPermission(err):\n\t\t\t\t\ttls.setErrno(errno.XEPERM)\n\t\t\t\tdefault:\n\t\t\t\t\ttls.setErrno(errno.XEACCES)\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"w\", \"wb\":\n\t\t\tif f, err = os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666); err != nil {\n\t\t\t\tswitch {\n\t\t\t\tcase os.IsPermission(err):\n\t\t\t\t\ttls.setErrno(errno.XEPERM)\n\t\t\t\tdefault:\n\t\t\t\t\ttls.setErrno(errno.XEACCES)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(mode)\n\t\t}\n\t\tif f != nil {\n\t\t\tu = Xmalloc(tls, ptrSize)\n\t\t\tfiles.add(f, u)\n\t\t}\n\t}\n\treturn u\n}", "func daemonise() {\n\t// Drop privileges by switching to nobody user and group\n\tif _, _, err := syscall.Syscall(syscall.SYS_SETGID, 65534, 0, 0); err != 0 {\n\t\tos.Exit(1)\n\t}\n\tif _, _, err := syscall.Syscall(syscall.SYS_SETUID, 65534, 0, 0); err != 0 {\n\t\tos.Exit(1)\n\t}\n\n\t// Do first fork\n\tpid, _, _ := syscall.Syscall(syscall.SYS_FORK, 0, 0, 0)\n\n\t// Exit in parent process\n\tswitch pid {\n\tcase 0:\n\t\t// Child process, carry on\n\t\tbreak\n\tdefault:\n\t\t// Parent process, exit cleanly\n\t\tos.Exit(0)\n\t}\n\n\t// Call setsid\n\t_, err := syscall.Setsid()\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\t// Fork again\n\tpid, _, _ = syscall.Syscall(syscall.SYS_FORK, 0, 0, 0)\n\n\t// Exit in parent again\n\tswitch pid {\n\tcase 0:\n\t\t// Child process, carry on\n\t\tbreak\n\tdefault:\n\t\t// Parent process, exit cleanly\n\t\tos.Exit(0)\n\t}\n\n\t// Clear umask\n\tsyscall.Umask(0)\n\n\t// Change working directory\n\terr = syscall.Chdir(\"/\")\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\t// Duplicate /dev/null to stdin, stdout and stderr\n\tnullFile, err := os.OpenFile(\"/dev/null\", os.O_RDWR, 0)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\tnullFd := nullFile.Fd()\n\tsyscall.Dup2(int(nullFd), int(os.Stdin.Fd()))\n\tsyscall.Dup2(int(nullFd), int(os.Stdout.Fd()))\n\tsyscall.Dup2(int(nullFd), int(os.Stderr.Fd()))\n\n}", "func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) error {\n\t_, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)\n\tif e1 != 0 {\n\t\treturn fmt.Errorf(\"errno %d\", e1)\n\t}\n\treturn nil\n}", "func sysctlbyname(name string, data interface{}) (err error) {\n\tval, err := syscall.Sysctl(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf := []byte(val)\n\n\tswitch v := data.(type) {\n\tcase *uint64:\n\t\t*v = *(*uint64)(unsafe.Pointer(&buf[0]))\n\t\treturn\n\t}\n\n\tbbuf := bytes.NewBuffer([]byte(val))\n\treturn binary.Read(bbuf, binary.LittleEndian, data)\n}", "func WrapExec(cmd string, args []String, nArg uint32) (status syscall.Status){\n\n\n\tpath := \"/programs/\"+cmd\n\n\tif nArg == 0 {\n\n\t\tstatus = altEthos.Exec(path)\n\n\t} else if nArg == 1 {\n\n\t\tstatus = altEthos.Exec(path, &args[0])\n\n\t} else if nArg == 2 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1])\n\n\t} else if nArg == 3 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2])\n\n\t} else if nArg == 4 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2], &args[3])\n\n\t}\n\n\treturn\n\n}", "func Mount(vsock transport.Transport, target, share string, port uint32, readonly bool) (err error) {\n\tactivity := \"plan9::Mount\"\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"target\": target,\n\t\t\"share\": share,\n\t\t\"port\": port,\n\t\t\"readonly\": readonly,\n\t})\n\tlog.Debug(activity + \" - Begin Operation\")\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Data[logrus.ErrorKey] = err\n\t\t\tlog.Error(activity + \" - End Operation\")\n\t\t} else {\n\t\t\tlog.Debug(activity + \" - End Operation\")\n\t\t}\n\t}()\n\n\tif err := osMkdirAll(target, 0700); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tosRemoveAll(target)\n\t\t}\n\t}()\n\tconn, err := vsock.Dial(port)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not connect to plan9 server for %s\", target)\n\t}\n\tf, err := conn.File()\n\tconn.Close()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not get file for plan9 connection for %s\", target)\n\t}\n\tdefer f.Close()\n\n\tvar mountOptions uintptr\n\tdata := fmt.Sprintf(\"trans=fd,rfdno=%d,wfdno=%d\", f.Fd(), f.Fd())\n\tif readonly {\n\t\tmountOptions |= unix.MS_RDONLY\n\t\tdata += \",noload\"\n\t}\n\tif share != \"\" {\n\t\tdata += \",aname=\" + share\n\t}\n\tif err := unixMount(target, target, \"9p\", mountOptions, data); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to mount directory for mapped directory %s\", target)\n\t}\n\treturn nil\n}", "func mntCmd(source string, target string, c *MountConfig) string {\n\toptions := map[string]string{\n\t\t\"dfltgid\": strconv.Itoa(c.GID),\n\t\t\"dfltuid\": strconv.Itoa(c.UID),\n\t}\n\tif c.Port != 0 {\n\t\toptions[\"port\"] = strconv.Itoa(c.Port)\n\t}\n\tif c.Version != \"\" {\n\t\toptions[\"version\"] = c.Version\n\t}\n\tif c.MSize != 0 {\n\t\toptions[\"msize\"] = strconv.Itoa(c.MSize)\n\t}\n\n\t// Copy in all of the user-supplied keys and values\n\tfor k, v := range c.Options {\n\t\toptions[k] = v\n\t}\n\n\t// Convert everything into a sorted list for better test results\n\topts := []string{}\n\tfor k, v := range options {\n\t\t// Mount option with no value, such as \"noextend\"\n\t\tif v == \"\" {\n\t\t\topts = append(opts, k)\n\t\t\tcontinue\n\t\t}\n\t\topts = append(opts, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\tsort.Strings(opts)\n\treturn fmt.Sprintf(\"sudo mount -t %s -o %s %s %s\", c.Type, strings.Join(opts, \",\"), source, target)\n}", "func runShellAs(who, hname, ttype, cmd string, interactive bool, conn *xsnet.Conn, chaffing bool) (exitStatus uint32, err error) {\n\tvar wg sync.WaitGroup\n\tu, err := user.Lookup(who)\n\tif err != nil {\n\t\texitStatus = 1\n\t\treturn\n\t}\n\tvar uid, gid uint32\n\t_, _ = fmt.Sscanf(u.Uid, \"%d\", &uid) // nolint: gosec\n\t_, _ = fmt.Sscanf(u.Gid, \"%d\", &gid) // nolint: gosec\n\tlog.Println(\"uid:\", uid, \"gid:\", gid)\n\n\t// Need to clear server's env and set key vars of the\n\t// target user. This isn't perfect (TERM doesn't seem to\n\t// work 100%; ANSI/xterm colour isn't working even\n\t// if we set \"xterm\" or \"ansi\" here; and line count\n\t// reported by 'stty -a' defaults to 24 regardless\n\t// of client shell window used to run client.\n\t// Investigate -- rlm 2018-01-26)\n\tos.Clearenv()\n\t_ = os.Setenv(\"HOME\", u.HomeDir) // nolint: gosec\n\t_ = os.Setenv(\"TERM\", ttype) // nolint: gosec\n\t_ = os.Setenv(\"XS_SESSION\", \"1\") // nolint: gosec\n\n\tvar c *exec.Cmd\n\tif interactive {\n\t\tif useSysLogin {\n\t\t\t// Use the server's login binary (post-auth\n\t\t\t// which is still done via our own bcrypt file)\n\t\t\t// Things UNIX login does, like print the 'motd',\n\t\t\t// and use the shell specified by /etc/passwd, will be done\n\t\t\t// automagically, at the cost of another external tool\n\t\t\t// dependency.\n\t\t\t//\n\t\t\tc = exec.Command(xs.GetTool(\"login\"), \"-f\", \"-p\", who) // nolint: gosec\n\t\t} else {\n\t\t\tc = exec.Command(xs.GetTool(\"bash\"), \"-i\", \"-l\") // nolint: gosec\n\t\t}\n\t} else {\n\t\tc = exec.Command(xs.GetTool(\"bash\"), \"-c\", cmd) // nolint: gosec\n\t}\n\t//If os.Clearenv() isn't called by server above these will be seen in the\n\t//client's session env.\n\t//c.Env = []string{\"HOME=\" + u.HomeDir, \"SUDO_GID=\", \"SUDO_UID=\", \"SUDO_USER=\", \"SUDO_COMMAND=\", \"MAIL=\", \"LOGNAME=\"+who}\n\tc.Dir = u.HomeDir\n\tc.SysProcAttr = &syscall.SysProcAttr{}\n\tif useSysLogin {\n\t\t// If using server's login binary, drop to user creds\n\t\t// is taken care of by it.\n\t\tc.SysProcAttr.Credential = &syscall.Credential{}\n\t} else {\n\t\tc.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}\n\t}\n\n\t// Start the command with a pty.\n\tptmx, err := pty.Start(c) // returns immediately with ptmx file\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn xsnet.CSEPtyExecFail, err\n\t}\n\t// Make sure to close the pty at the end.\n\t// #gv:s/label=\\\"runShellAs\\$1\\\"/label=\\\"deferPtmxClose\\\"/\n\tdefer func() {\n\t\t//logger.LogDebug(fmt.Sprintf(\"[Exited process was %d]\", c.Process.Pid))\n\t\t_ = ptmx.Close()\n\t}() // nolint: gosec\n\n\t// get pty info for system accounting (who, lastlog)\n\tpts, pe := ptsName(ptmx.Fd())\n\tif pe != nil {\n\t\treturn xsnet.CSEPtyGetNameFail, err\n\t}\n\tutmpx := goutmp.Put_utmp(who, pts, hname)\n\tdefer func() { goutmp.Unput_utmp(utmpx) }()\n\tgoutmp.Put_lastlog_entry(\"xs\", who, pts, hname)\n\n\tlog.Printf(\"[%s]\\n\", cmd)\n\tif err != nil {\n\t\tlog.Printf(\"Command finished with error: %v\", err)\n\t} else {\n\t\t// Watch for term resizes\n\t\t// #gv:s/label=\\\"runShellAs\\$2\\\"/label=\\\"termResizeWatcher\\\"/\n\t\tgo func() {\n\t\t\tfor sz := range conn.WinCh {\n\t\t\t\tlog.Printf(\"[Setting term size to: %v %v]\\n\", sz.Rows, sz.Cols)\n\t\t\t\tpty.Setsize(ptmx, &pty.Winsize{Rows: sz.Rows, Cols: sz.Cols}) // nolint: gosec,errcheck\n\t\t\t}\n\t\t\tlog.Println(\"*** WinCh goroutine done ***\")\n\t\t}()\n\n\t\t// Copy stdin to the pty.. (bgnd goroutine)\n\t\t// #gv:s/label=\\\"runShellAs\\$3\\\"/label=\\\"stdinToPtyWorker\\\"/\n\t\tgo func() {\n\t\t\t_, e := io.Copy(ptmx, conn)\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(\"** stdin->pty ended **:\", e.Error())\n\t\t\t} else {\n\t\t\t\tlog.Println(\"*** stdin->pty goroutine done ***\")\n\t\t\t}\n\t\t}()\n\n\t\tif chaffing {\n\t\t\tconn.EnableChaff()\n\t\t}\n\t\t// #gv:s/label=\\\"runShellAs\\$4\\\"/label=\\\"deferChaffShutdown\\\"/\n\t\tdefer func() {\n\t\t\tconn.DisableChaff()\n\t\t\tconn.ShutdownChaff()\n\t\t}()\n\n\t\t// ..and the pty to stdout.\n\t\t// This may take some time exceeding that of the\n\t\t// actual command's lifetime, so the c.Wait() below\n\t\t// must synchronize with the completion of this goroutine\n\t\t// to ensure all stdout data gets to the client before\n\t\t// connection is closed.\n\t\twg.Add(1)\n\t\t// #gv:s/label=\\\"runShellAs\\$5\\\"/label=\\\"ptyToStdoutWorker\\\"/\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, e := io.Copy(conn, ptmx)\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(\"** pty->stdout ended **:\", e.Error())\n\t\t\t} else {\n\t\t\t\t// The above io.Copy() will exit when the command attached\n\t\t\t\t// to the pty exits\n\t\t\t\tlog.Println(\"*** pty->stdout goroutine done ***\")\n\t\t\t}\n\t\t}()\n\n\t\tif err := c.Wait(); err != nil {\n\t\t\t//fmt.Println(\"*** c.Wait() done ***\")\n\t\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\t\t// The program has exited with an exit code != 0\n\n\t\t\t\t// This works on both Unix and Windows. Although package\n\t\t\t\t// syscall is generally platform dependent, WaitStatus is\n\t\t\t\t// defined for both Unix and Windows and in both cases has\n\t\t\t\t// an ExitStatus() method with the same signature.\n\t\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\t\texitStatus = uint32(status.ExitStatus())\n\t\t\t\t\tlog.Printf(\"Exit Status: %d\", exitStatus)\n\t\t\t\t}\n\t\t\t}\n\t\t\tconn.SetStatus(xsnet.CSOType(exitStatus))\n\t\t} else {\n\t\t\tlogger.LogDebug(\"*** Main proc has exited. ***\")\n\t\t\t// Background jobs still may be running; close the\n\t\t\t// pty anyway, so the client can return before\n\t\t\t// wg.Wait() below completes (Issue #18)\n\t\t\tif interactive {\n\t\t\t\t_ = ptmx.Close()\n\t\t\t}\n\t\t}\n\t\twg.Wait() // Wait on pty->stdout completion to client\n\t}\n\treturn\n}", "func (bp *Proc) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n\t// Switch on ioctl request.\n\tswitch uint32(args[1].Int()) {\n\tcase linux.BinderVersionIoctl:\n\t\tver := &linux.BinderVersion{\n\t\t\tProtocolVersion: currentProtocolVersion,\n\t\t}\n\t\t// Copy result to user-space.\n\t\t_, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), ver, usermem.IOOpts{\n\t\t\tAddressSpaceActive: true,\n\t\t})\n\t\treturn 0, err\n\tcase linux.BinderWriteReadIoctl:\n\t\t// TODO(b/30946773): Implement.\n\t\tfallthrough\n\tcase linux.BinderSetIdleTimeoutIoctl:\n\t\t// TODO(b/30946773): Implement.\n\t\tfallthrough\n\tcase linux.BinderSetMaxThreadsIoctl:\n\t\t// TODO(b/30946773): Implement.\n\t\tfallthrough\n\tcase linux.BinderSetIdlePriorityIoctl:\n\t\t// TODO(b/30946773): Implement.\n\t\tfallthrough\n\tcase linux.BinderSetContextMgrIoctl:\n\t\t// TODO(b/30946773): Implement.\n\t\tfallthrough\n\tcase linux.BinderThreadExitIoctl:\n\t\t// TODO(b/30946773): Implement.\n\t\treturn 0, syserror.ENOSYS\n\tdefault:\n\t\t// Ioctls irrelevant to Binder.\n\t\treturn 0, syserror.EINVAL\n\t}\n}", "func TestStraceParse2Basic(t *testing.T) {\n\tnopen := 0\n\tnexec := 0\n\tfor _, l := range straceout {\n\t\tif strings.Contains(l, \" open(\") {\n\t\t\tnopen++\n\t\t}\n\t\tif strings.Contains(l, \" execve(\") {\n\t\t\tnexec++\n\t\t}\n\t}\n\tsyscalls := map[string]int{}\n\tfor info := range StraceParse2(StraceParse1(ChanFromList(straceout))) {\n\t\tsyscalls[info.syscall]++\n\t}\n\tif nopen != syscalls[\"open\"] {\n\t\tt.Errorf(\"\\\"open\\\" count mismatch: %d != %d\", nopen, syscalls[\"open\"])\n\t}\n\tif nexec != syscalls[\"execve\"] {\n\t\tt.Errorf(\"\\\"execve\\\" count mismatch: %d != %d\", nexec, syscalls[\"execve\"])\n\t}\n}", "func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {\n\tp, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)\n\tif err != 0 {\n\t\tif err == _EACCES {\n\t\t\tprint(\"runtime: mmap: access denied\\n\")\n\t\t\texit(2)\n\t\t}\n\t\tif err == _EAGAIN {\n\t\t\tprint(\"runtime: mmap: too much locked memory (check 'ulimit -l').\\n\")\n\t\t\texit(2)\n\t\t}\n\t\treturn nil\n\t}\n\tsysStat.add(int64(n))\n\treturn p\n}", "func getShell() (string, error) {\n\tvar ret string\n\n\t// Start by making a new ssh session on the lighthouse.\n\tsshConfig := &ssh.ClientConfig{\n\t\tUser: lhusername,\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.Password(lhpassword),\n\t\t},\n\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(), // We're authing with credentials.\n\t}\n\tconnection, err := ssh.Dial(sshConn, getSSHAddr(), sshConfig)\n\tif err != nil {\n\t\treturn ret, fmt.Errorf(\"Failed to create a new shell session: %s\", err)\n\t}\n\tsession, err := connection.NewSession()\n\tif err != nil {\n\t\treturn ret, fmt.Errorf(\"Failed to create a new shell session: %s\", err)\n\t}\n\tdefer session.Close()\n\n\t// We'll need to create a PTY on the lighthouse to run the command.\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 0,\n\t\tssh.TTY_OP_ISPEED: 14400,\n\t\tssh.TTY_OP_OSPEED: 14400,\n\t}\n\tif err := session.RequestPty(\"xterm\", 160, 80, modes); err != nil {\n\t\treturn ret, fmt.Errorf(\"Failed to create a new shell session: %s\", err)\n\t}\n\n\t// Pipe all stdout, stderr, stdin to/from the PTY.\n\tstdin, err := session.StdinPipe()\n\tif err != nil {\n\t\treturn ret, fmt.Errorf(\"Failed to create a new shell session: %s\", err)\n\t}\n\tstdout, err := session.StdoutPipe()\n\tif err != nil {\n\t\treturn ret, fmt.Errorf(\"Failed to create a new shell session: %s\", err)\n\t}\n\tstderr, err := session.StderrPipe()\n\tif err != nil {\n\t\treturn ret, fmt.Errorf(\"Failed to create a new shell session: %s\", err)\n\t}\n\n\t// Keep them synchronised.\n\tgo io.Copy(stdin, os.Stdin)\n\tgo io.Copy(os.Stdout, stdout)\n\tgo io.Copy(os.Stderr, stderr)\n\n\t// We'll need to catch SIGINT and SIGKILL to ensure the ssh session is closed.\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, os.Kill)\n\tgo func() {\n\t\t<-c\n\t\tsession.Close()\n\t}()\n\n\t// Now we can run the pmshell command\n\tret = \"Shell session completed\\n\"\n\tsession.Run(\"pmshell\")\n\treturn ret, nil\n}", "func reflinkInternal(d, s *os.File) error {\n\tss, err := s.SyscallConn()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsd, err := d.SyscallConn()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar err2, err3 error\n\n\terr = sd.Control(func(dfd uintptr) {\n\t\terr2 = ss.Control(func(sfd uintptr) {\n\t\t\t// int ioctl(int dest_fd, FICLONE, int src_fd);\n\t\t\t_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, dfd, FICLONE, sfd)\n\t\t\tif errno != 0 {\n\t\t\t\terr3 = errno\n\t\t\t}\n\t\t})\n\t})\n\n\tif err != nil {\n\t\t// sd.Control failed\n\t\treturn err\n\t}\n\tif err2 != nil {\n\t\t// ss.Control failed\n\t\treturn err2\n\t}\n\n\t// err3 is ioctl() response\n\treturn err3\n}", "func Xreadlink(tls *TLS, pathname, buf uintptr, bufsiz size_t) ssize_t {\n\tpanic(\"TODO readlink\")\n}", "func TestSysctlOverride(t *testing.T) {\n\tskip.If(t, os.Getuid() != 0, \"skipping test that requires root\")\n\tc := &container.Container{\n\t\tConfig: &containertypes.Config{\n\t\t\tHostname: \"foobar\",\n\t\t\tDomainname: \"baz.cyphar.com\",\n\t\t},\n\t\tHostConfig: &containertypes.HostConfig{\n\t\t\tNetworkMode: \"bridge\",\n\t\t\tSysctls: map[string]string{},\n\t\t},\n\t}\n\td := setupFakeDaemon(t, c)\n\n\t// Ensure that the implicit sysctl is set correctly.\n\ts, err := d.createSpec(context.TODO(), &configStore{}, c)\n\tassert.NilError(t, err)\n\tassert.Equal(t, s.Hostname, \"foobar\")\n\tassert.Equal(t, s.Linux.Sysctl[\"kernel.domainname\"], c.Config.Domainname)\n\tif sysctlExists(\"net.ipv4.ip_unprivileged_port_start\") {\n\t\tassert.Equal(t, s.Linux.Sysctl[\"net.ipv4.ip_unprivileged_port_start\"], \"0\")\n\t}\n\tif sysctlExists(\"net.ipv4.ping_group_range\") {\n\t\tassert.Equal(t, s.Linux.Sysctl[\"net.ipv4.ping_group_range\"], \"0 2147483647\")\n\t}\n\n\t// Set an explicit sysctl.\n\tc.HostConfig.Sysctls[\"kernel.domainname\"] = \"foobar.net\"\n\tassert.Assert(t, c.HostConfig.Sysctls[\"kernel.domainname\"] != c.Config.Domainname)\n\tc.HostConfig.Sysctls[\"net.ipv4.ip_unprivileged_port_start\"] = \"1024\"\n\n\ts, err = d.createSpec(context.TODO(), &configStore{}, c)\n\tassert.NilError(t, err)\n\tassert.Equal(t, s.Hostname, \"foobar\")\n\tassert.Equal(t, s.Linux.Sysctl[\"kernel.domainname\"], c.HostConfig.Sysctls[\"kernel.domainname\"])\n\tassert.Equal(t, s.Linux.Sysctl[\"net.ipv4.ip_unprivileged_port_start\"], c.HostConfig.Sysctls[\"net.ipv4.ip_unprivileged_port_start\"])\n\n\t// Ensure the ping_group_range is not set on a daemon with user-namespaces enabled\n\ts, err = d.createSpec(context.TODO(), &configStore{Config: config.Config{RemappedRoot: \"dummy:dummy\"}}, c)\n\tassert.NilError(t, err)\n\t_, ok := s.Linux.Sysctl[\"net.ipv4.ping_group_range\"]\n\tassert.Assert(t, !ok)\n\n\t// Ensure the ping_group_range is set on a container in \"host\" userns mode\n\t// on a daemon with user-namespaces enabled\n\tc.HostConfig.UsernsMode = \"host\"\n\ts, err = d.createSpec(context.TODO(), &configStore{Config: config.Config{RemappedRoot: \"dummy:dummy\"}}, c)\n\tassert.NilError(t, err)\n\tassert.Equal(t, s.Linux.Sysctl[\"net.ipv4.ping_group_range\"], \"0 2147483647\")\n}", "func TestGetProgInfo(t *testing.T) {\n\tdisableFunc, err := enableBPFStats()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer disableFunc()\n\n\tspecs, err := newGetproginfoSpecs()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tprog, err := ebpf.NewProgram(specs.ProgramOpen)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer prog.Close()\n\n\tlink, err := link.AttachRawTracepoint(link.RawTracepointOptions{\n\t\tName: \"sys_enter\",\n\t\tProgram: prog,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif link == nil {\n\t\tt.Fatal(\"no link\")\n\t}\n\tdefer link.Close()\n\n\tf, err := os.Open(\"/etc/os-release\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tf.Close()\n\n\tstats, err := getProgramStats(prog.FD())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif stats.RunCount == 0 {\n\t\tt.Errorf(\"run count should be non-zero\")\n\t}\n\tif stats.RunTime == 0 {\n\t\tt.Errorf(\"run time should be non-zero\")\n\t}\n}" ]
[ "0.7211902", "0.6758328", "0.65226424", "0.6358648", "0.59837586", "0.5968245", "0.57826364", "0.5782253", "0.5769097", "0.56659955", "0.5599327", "0.5580922", "0.55571395", "0.55484253", "0.55367595", "0.54918444", "0.54208976", "0.5402904", "0.5364842", "0.53416246", "0.5326302", "0.5311069", "0.5296605", "0.5282", "0.5259268", "0.52441293", "0.5222352", "0.51651585", "0.5160484", "0.51585096", "0.5117193", "0.5113153", "0.51129544", "0.5107833", "0.5105476", "0.5083889", "0.50735974", "0.5029898", "0.50262165", "0.5001851", "0.49893162", "0.49846366", "0.49782273", "0.49739167", "0.49730968", "0.4973095", "0.4961002", "0.4961002", "0.49567452", "0.4953626", "0.4946029", "0.49450433", "0.49343708", "0.4913315", "0.49037305", "0.49003723", "0.4898881", "0.4894633", "0.4887017", "0.48852605", "0.48805732", "0.48575732", "0.4845376", "0.48438767", "0.4843614", "0.4828417", "0.4821687", "0.48187155", "0.4810988", "0.48108664", "0.48090285", "0.48059797", "0.4783953", "0.47803083", "0.4775851", "0.4768822", "0.4762053", "0.475342", "0.47477257", "0.47453216", "0.47400677", "0.4739694", "0.4731229", "0.47174188", "0.47133982", "0.47024974", "0.46957773", "0.46918026", "0.4690254", "0.46896434", "0.46880576", "0.4679103", "0.4678226", "0.4675663", "0.46716923", "0.46702054", "0.46472153", "0.46396106", "0.4639548", "0.46319994", "0.4624173" ]
0.0
-1
Titleizes given name to match
func toFieldName(name string) string { name = strings.Title(name) // NOTE: golint prefers method names use "ID" instead of "Id". re := regexp.MustCompile("Id([A-Z]|$)") return re.ReplaceAllString(name, "ID${1}") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Title(name string) string {\n\t//name = strings.Replace(name, \"_\", \" \", -1)\n\t//name = strings.Replace(name, \".\", \" \", -1)\n\tname = strings.TrimSpace(name)\n\treturn strings.ToUpper(name)\n}", "func Title(name string) string {\n\tname = strings.Replace(name, \"_\", \" \", -1)\n\tname = strings.Replace(name, \".\", \" \", -1)\n\tname = strings.TrimSpace(name)\n\treturn strings.ToUpper(name)\n}", "func Name(s string) string {\n\tparts := strings.Split(s, \".\")\n\n\tfor key, value := range parts {\n\t\tparts[key] = strings.Title(value)\n\t}\n\n\treturn strings.Join(parts, \"\\\\\")\n}", "func TitleCasedName(name string) string {\n\tnewstr := make([]byte, 0, len(name))\n\tupNextChar := true\n\n\tname = strings.ToLower(name)\n\n\tfor i := 0; i < len(name); i++ {\n\t\tc := name[i]\n\t\tswitch {\n\t\tcase upNextChar:\n\t\t\tupNextChar = false\n\t\t\tif 'a' <= c && c <= 'z' {\n\t\t\t\tc -= 'a' - 'A'\n\t\t\t}\n\t\tcase c == '_':\n\t\t\tupNextChar = true\n\t\t\tcontinue\n\t\t}\n\n\t\tnewstr = append(newstr, c)\n\t}\n\n\treturn bytesconv.BytesToStr(newstr)\n}", "func formatName(n string) string {\n\tn = strings.TrimSpace(n)\n\tinput := []rune(n)\n\tvar output []rune\n\tupperit := true\n\tfor _, ch := range input {\n\t\tif unicode.IsSpace(ch) {\n\t\t\tupperit = true\n\t\t\tcontinue\n\t\t}\n\t\tif upperit {\n\t\t\tch = unicode.ToUpper(ch)\n\t\t\tupperit = false\n\t\t}\n\t\toutput = append(output, ch)\n\t}\n\treturn string(output)\n}", "func Name(name string) (string, error) {\n\tvar validName string\n\terrorInvalid := errors.New(\"invalid name format\")\n\tif len(name) > 50 || len(name) < 2 {\n\t\treturn validName, errorInvalid\n\t}\n\tnameRegexp := regexp.MustCompile(\"^[\\\\p{L}\\\\s'.-]+$\")\n\tif !nameRegexp.MatchString(name) {\n\t\treturn validName, errorInvalid\n\t}\n\tvalidName = strings.TrimSpace(name)\n\tvalidName = strings.ToUpper(validName)\n\treturn validName, nil\n}", "func Name(text string) string {\n\t// Start with lowercase string\n\tfileName := strings.ToLower(text)\n\tfileName = path.Clean(path.Base(fileName))\n\tfileName = strings.Trim(fileName, \" \")\n\n\t// Replace certain joining characters with a dash\n\tseps, err := regexp.Compile(`[ &_=+:]`)\n\tif err == nil {\n\t\tfileName = seps.ReplaceAllString(fileName, \"-\")\n\t}\n\n\t// Remove all other unrecognised characters - NB we do allow any printable characters\n\tlegal, err := regexp.Compile(`[^[:alnum:]-.]`)\n\tif err == nil {\n\t\tfileName = legal.ReplaceAllString(fileName, \"\")\n\t}\n\n\t// Remove any double dashes caused by existing - in name\n\tfileName = strings.Replace(fileName, \"--\", \"-\", -1)\n\n\t// NB this may be of length 0, caller must check\n\treturn fileName\n}", "func formatName(name string) string {\n\tparts := strings.Split(name, \"_\")\n\tnewName := \"\"\n\tfor _, p := range parts {\n\t\tif len(p) < 1 {\n\t\t\tcontinue\n\t\t}\n\t\tnewName = newName + strings.Replace(p, string(p[0]), strings.ToUpper(string(p[0])), 1)\n\t}\n\treturn newName\n}", "func NormalizeName(title string) (string, error) {\n\n\tnormalizedName, err := GetRequest(config.NormalizeServer + \"?searchText=\" + url.QueryEscape(title))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar response Response\n\tif json.Unmarshal([]byte(normalizedName), &response); err != nil {\n\t\treturn \"\", err\n\t}\n\tif response.Status != 200 {\n\t\treturn \"\", revel.NewErrorFromPanic(\"Server responded with\" + strconv.Itoa(response.Status))\n\t}\n\tlog.Println(\"normalized\", title, \"to\", response.Content)\n\treturn response.Content, nil\n}", "func NameTitle() (string, error) {\n\tslice, err := loader(\"name_titles\")\n\tcheckErr(err)\n\treturn random(slice), nil\n}", "func Titleize(text string) string {\n\tpascalized := Pascalize(text)\n\n\treturn titleRegex.ReplaceAllStringFunc(pascalized, func(s string) string {\n\t\treturn string(s[0]) + \" \" + string(s[1])\n\t})\n}", "func Titleize(input string) (titleized string) {\n\tisToUpper := false\n\tfor k, v := range input {\n\t\tif k == 0 {\n\t\t\ttitleized = strings.ToUpper(string(input[0]))\n\t\t} else {\n\t\t\tif isToUpper || unicode.IsUpper(v) {\n\t\t\t\ttitleized += \" \" + strings.ToUpper(string(v))\n\t\t\t\tisToUpper = false\n\t\t\t} else {\n\t\t\t\tif (v == '_') || (v == ' ') {\n\t\t\t\t\tisToUpper = true\n\t\t\t\t} else {\n\t\t\t\t\ttitleized += string(v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n\n}", "func Title(in string) string {\n\n\trunes := []rune(in)\n\tlength := len(runes)\n\n\tvar out []rune\n\tfor i := 0; i < length; i++ {\n\t\tif i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {\n\t\t\tout = append(out, ' ')\n\t\t}\n\t\tout = append(out, runes[i])\n\t}\n\n\treturn string(out)\n}", "func titleCase(s string) string {\n\tprev := '_'\n\ts = fixForInitialismCase(s)\n\ttitleCased := strings.Map(\n\t\tfunc(r rune) rune {\n\t\t\tif r == '_' {\n\t\t\t\tprev = r\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\tif prev == '_' {\n\t\t\t\tprev = r\n\t\t\t\treturn unicode.ToUpper(r)\n\t\t\t}\n\t\t\tprev = r\n\t\t\treturn r\n\t\t}, s)\n\t// special cases when a struct name ends in 'Args'/'Result', leads to go name having '_' appended\n\t// https://github.com/apache/thrift/blob/master/compiler/cpp/src/thrift/generate/t_go_generator.cc#L495\n\tif (len(titleCased) >= 4 && titleCased[len(titleCased)-4:] == \"Args\") ||\n\t\t(len(titleCased) >= 6 && s[len(titleCased)-6:] == \"Result\") {\n\t\ttitleCased = titleCased + \"_\"\n\t}\n\treturn titleCased\n}", "func normaliseHeaderName(headerName string) string {\n\tsegments := strings.Split(headerName, \"-\")\n\tfor index, segment := range segments {\n\t\tsegments[index] = strings.Title(\n\t\t\tstrings.ToLower(segment),\n\t\t)\n\t}\n\n\treturn strings.Join(segments, \"-\")\n}", "func TitleCase(n ComponentName) ComponentName {\n\ts := string(n)\n\treturn ComponentName(strings.ToUpper(s[0:1]) + s[1:])\n}", "func (u Username) Name() string {\n\treturn strings.Title(\n\t\tfmt.Sprintf(\"%s %s\", u.Firstname(), u.Lastname()),\n\t)\n}", "func FormatDefName(name string) string {\n\tformatted := bytes.NewBuffer(nil)\n\tstart := true\n\n\tfor _, c := range name {\n\t\tif !bytes.ContainsRune(chars, c) {\n\t\t\tstart = true\n\t\t\tcontinue\n\t\t}\n\n\t\tif !start {\n\t\t\tformatted.WriteRune(c)\n\t\t\tcontinue\n\t\t}\n\n\t\tformatted.WriteRune(unicode.ToUpper(c))\n\t\tstart = false\n\t}\n\n\treturn formatted.String()\n}", "func sanitizeName(field string) string {\n\tif len(field) == 0 {\n\t\treturn \"\"\n\t}\n\n\tfieldSlice := strings.Split(field, \"\")\n\tfield = \"\"\n\n\tfieldSlice[0] = strings.ToUpper(fieldSlice[0])\n\tfor _, f := range fieldSlice {\n\t\tfield += f\n\t}\n\n\treturn field\n}", "func Name(name string) bool {\n\treturn nameMap[strings.ToLower(strings.TrimSpace(name))]\n}", "func NormalizedName(s string) string {\n\treturn strings.Map(normalizedChar, s)\n}", "func Title(operand string) string { return strings.Title(operand) }", "func (t title) fixCase() string {\n\treturn strings.Title(string(t)) // convert title to a string since type Title is based on a string.\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func filterTitle(ctx stick.Context, val stick.Value, args ...stick.Value) stick.Value {\n\treturn strings.Title(stick.CoerceString(val))\n}", "func ParseName(name string) Name {\n\tnames := Name{}\n\tvar currWord []rune\n\n\tprev := ohterRuneCase\n\tcurr := ohterRuneCase\n\n\tclear := func() {\n\t\tcurrWord = currWord[:0]\n\t}\n\n\tsplitCurr := func() {\n\t\tswitch len(currWord) {\n\t\tcase 0:\n\t\tcase 1:\n\t\t\twordKind := ohterWordCase\n\t\t\tswitch getKind(currWord[0]) {\n\t\t\tcase lowerRuneCase:\n\t\t\t\twordKind = lowerWordCase\n\t\t\tcase upperRuneCase:\n\t\t\t\twordKind = upperWordCase\n\t\t\t}\n\t\t\tnames = append(names, Word{\n\t\t\t\tword: string(currWord),\n\t\t\t\tkind: wordKind,\n\t\t\t})\n\t\t\tclear()\n\t\tdefault:\n\t\t\twordKind := ohterWordCase\n\t\t\tswitch getKind(currWord[0]) {\n\t\t\tcase lowerRuneCase:\n\t\t\t\twordKind = lowerWordCase\n\t\t\tcase upperRuneCase:\n\t\t\t\tswitch getKind(currWord[len(currWord)-1]) {\n\t\t\t\tcase lowerRuneCase:\n\t\t\t\t\twordKind = titleWordCase\n\t\t\t\tcase upperRuneCase:\n\t\t\t\t\twordKind = upperWordCase\n\t\t\t\t}\n\t\t\t}\n\t\t\tnames = append(names, Word{\n\t\t\t\tword: string(currWord),\n\t\t\t\tkind: wordKind,\n\t\t\t})\n\t\t\tclear()\n\t\t}\n\t}\n\tsplitPrev := func() {\n\t\tprevRune := currWord[len(currWord)-1]\n\t\tcurrWord = currWord[:len(currWord)-1]\n\t\tsplitCurr()\n\t\tcurrWord = append(currWord, prevRune)\n\t}\n\n\tstep := func(r rune) {\n\t\tcurr = getKind(r)\n\t\tswitch prev {\n\t\tcase splitRuneCase:\n\t\t\tif curr != splitRuneCase {\n\t\t\t\tclear()\n\t\t\t}\n\t\tcase ohterRuneCase:\n\t\t\tif curr != ohterRuneCase {\n\t\t\t\tsplitCurr()\n\t\t\t}\n\t\tcase lowerRuneCase:\n\t\t\tif curr != lowerRuneCase {\n\t\t\t\tsplitCurr()\n\t\t\t}\n\t\tcase upperRuneCase:\n\t\t\tswitch curr {\n\t\t\tcase lowerRuneCase:\n\t\t\t\tsplitPrev()\n\t\t\tcase ohterRuneCase, splitRuneCase, eofRuneCase:\n\t\t\t\tsplitCurr()\n\t\t\t}\n\t\t}\n\t\tcurrWord = append(currWord, r)\n\t\tprev = curr\n\t}\n\n\tfor _, r := range []rune(name) {\n\t\tstep(r)\n\t}\n\tstep(0)\n\treturn names\n}", "func ToTitle(s string) string {\n\treturn strings.ToTitle(s)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func NormalizeName(s string) string {\n\treturn strings.ToLower(strings.TrimSpace(s))\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func Title(s string) string {\n\treturn strings.Title(s)\n}", "func sanitizeName(name string) string {\n\toutput := strings.ToLower(illegalChars.ReplaceAllString(name, \"_\"))\n\n\tif legalLabel.MatchString(output) {\n\t\treturn output\n\t}\n\t// Prefix name with _ if it begins with a number\n\treturn \"_\" + output\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementNotStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementNotStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func ToTitle(str string) string {\n\treturn strings.Title(str)\n}", "func shortenName(name, origin string) string {\n\tif name == origin {\n\t\treturn \"@\"\n\t}\n\treturn strings.TrimSuffix(name, \".\"+origin)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func cleanName(name string) string {\n\tname = strings.TrimSpace(strings.ToLower(name))\n\n\tfor {\n\t\tif i := nameStripRE.FindStringIndex(name); i != nil {\n\t\t\tname = name[i[1]:]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tname = strings.Trim(name, \"-\")\n\t// Remove dots at the beginning of names\n\tif len(name) > 1 && name[0] == '.' {\n\t\tname = name[1:]\n\t}\n\treturn name\n}", "func NormalizeName(name string) string {\n\tname = strings.TrimLeft(name, \"_\")\n\treturn strings.ToUpper(name[:1]) + name[1:]\n}", "func (o WebAclRuleStatementAndStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func titleOrDefault(specDoc *loads.Document, name, defaultName string) string {\n\tif strings.TrimSpace(name) == \"\" {\n\t\tif specDoc.Spec().Info != nil && strings.TrimSpace(specDoc.Spec().Info.Title) != \"\" {\n\t\t\tname = specDoc.Spec().Info.Title\n\t\t} else {\n\t\t\tname = defaultName\n\t\t}\n\t}\n\treturn swag.ToGoName(name)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func AbbrevName(name string) string {\n\tvar s []string\n\n\ts = append(s, name)\n\n\tfor _, nn := range s {\n\t\tspnn := strings.Split(nn, \" \")\n\t\tfirstName := spnn[0]\n\t\tlastName := spnn[1]\n\t\tfmt.Println(strings.ToUpper(firstName[0:1]) + \".\" + strings.ToUpper(lastName[0:1]))\n\n\t}\n\n\treturn name\n}", "func (o WebAclRuleStatementAndStatementStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (r *Role) Title() string {\n\t// Uppercase all words, and also ensure \"MARC\" is fully capitalized\n\tvar c = cases.Title(language.AmericanEnglish)\n\treturn c.String(strings.Replace(r.Name, \"marc\", \"MARC\", -1))\n}", "func (this *TitleInfoImpl) SetTitleName(titleNameIn string) {\n\t this.titleName = titleNameIn;\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func FindMemberName(text string) string {\n\tif reMatchMomota.MatchString(text) {\n\t\treturn \"百田夏菜子\"\n\t}\n\n\tif reMatchAriyasu.MatchString(text) {\n\t\treturn \"有安杏果\"\n\t}\n\n\tif reMatchTamai.MatchString(text) {\n\t\treturn \"玉井詩織\"\n\t}\n\n\tif reMatchSasaki.MatchString(text) {\n\t\treturn \"佐々木彩夏\"\n\t}\n\n\tif reMatchTakagi.MatchString(text) {\n\t\treturn \"高城れに\"\n\t}\n\treturn \"\"\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func ToFriendlyCase(name string) string {\n\tparts := nameParts(name)\n\tfor i := range parts {\n\t\tparts[i] = strings.ToLower(parts[i])\n\t}\n\treturn strings.Join(parts, \" \")\n}", "func (o AlphabetOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Alphabet) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementNotStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementNotStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementSizeConstraintStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementSizeConstraintStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func MakeTitle(inpath string) string {\n\treturn strings.Replace(strings.TrimSpace(inpath), \"-\", \" \", -1)\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func cleanName(s string) string {\n\ts = strings.Replace(s, \" \", \"_\", -1) // Remove spaces\n\ts = strings.Replace(s, \"(\", \"\", -1) // Remove open parenthesis\n\ts = strings.Replace(s, \")\", \"\", -1) // Remove close parenthesis\n\ts = strings.Replace(s, \"/\", \"\", -1) // Remove forward slashes\n\ts = strings.ToLower(s)\n\treturn s\n}", "func (o WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementSizeConstraintStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementSizeConstraintStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func AbbrevName1(name string) string {\n\twords := strings.Split(name, \" \")\n\treturn strings.ToUpper(string(words[0][0])) + \".\" + strings.ToUpper(string(words[1][0]))\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func ToTitle(r rune) rune", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func toSubsetName(labelValue string) string {\n\tre, _ := regexp.Compile(\"[_.]\")\n\treturn re.ReplaceAllString(labelValue, \"-\")\n}", "func (o WebAclRuleStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementRegexMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementSizeConstraintStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementSizeConstraintStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func GoName(s string) string {\n\ts = strcase.ToCamel(s)\n\tfor _, c := range cases {\n\t\tif strings.HasSuffix(s, c) {\n\t\t\ts = strings.Replace(s, c, strings.ToUpper(c), 1)\n\t\t}\n\t}\n\treturn s\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func SpecialCaseToTitle(special unicode.SpecialCase, r rune) rune", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func NormalizeWikiName(name string) string {\n\treturn strings.Replace(name, \"-\", \" \", -1)\n}", "func Name(field, defaultValue string, autoSelect, allowEmpty bool) (string, error) {\n\tfield = strings.Title(field)\n\n\tvalidate := func(input string) error {\n\t\tif len(input) == 0 {\n\t\t\treturn fmt.Errorf(\"Please provide a %s name\", field)\n\t\t}\n\n\t\tl := manifold.Label(input)\n\t\tif err := l.Validate(nil); err != nil {\n\t\t\treturn fmt.Errorf(\"Please provide a valid %s name\", field)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tlabel := fmt.Sprintf(\"New %s Name\", field)\n\n\tif autoSelect {\n\t\terr := validate(defaultValue)\n\t\tif err != nil {\n\t\t\tfmt.Println(templates.PromptFailure(label, defaultValue))\n\t\t} else {\n\t\t\tfmt.Println(templates.PromptSuccess(label, defaultValue))\n\t\t}\n\n\t\treturn defaultValue, err\n\t}\n\n\tp := promptui.Prompt{\n\t\tLabel: label,\n\t\tDefault: defaultValue,\n\t\tValidate: validate,\n\t}\n\n\treturn p.Run()\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (n *Namer) CreateName() string {\n\ts, err := n.Execute(n.Words)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn s\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementRegexMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementSizeConstraintStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementSizeConstraintStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementSqliMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementSqliMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func (o WebAclRuleStatementAndStatementStatementNotStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementNotStatementStatementRegexPatternSetReferenceStatementFieldToMatchSingleHeader) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}", "func getRtName(mediaTitle string) string {\n\treturn strings.ReplaceAll(strings.ReplaceAll(mediaTitle, \" \", \"_\"), \":\", \"\")\n}", "func formatTestName(name string) string {\n\tname = strings.Replace(name, \"_example.go\", \"\", -1)\n\tname = strings.Replace(name, \"-\", \" \", -1)\n\treturn name\n}", "func cleanName(s string) string {\n\ts = strings.Replace(s, \" \", \"_\", -1) // Remove spaces\n\ts = strings.Replace(s, \"(\", \"_\", -1) // Remove open parenthesis\n\ts = strings.Replace(s, \":\", \"_\", -1) // Remove open parenthesis\n\ts = strings.Replace(s, \")\", \"_\", -1) // Remove close parenthesis\n\ts = strings.Replace(s, \"\\\\\", \"_\", -1) // Remove backward slashes\n\ts = strings.ToLower(s)\n\treturn s\n}", "func ConstNameToAllCapsSnake(name string) string {\n\tparts := nameParts(RemoveLeadingK(name))\n\tfor i := range parts {\n\t\tparts[i] = strings.ToUpper(parts[i])\n\t}\n\treturn strings.Join(parts, \"_\")\n}", "func (o WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleQueryArgumentOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementRegexMatchStatementFieldToMatchSingleQueryArgument) string {\n\t\treturn v.Name\n\t}).(pulumi.StringOutput)\n}" ]
[ "0.7358443", "0.7263768", "0.6653588", "0.66427946", "0.6610177", "0.65596616", "0.6526799", "0.6518762", "0.64714485", "0.64706373", "0.6443669", "0.6421691", "0.638007", "0.6326133", "0.62860125", "0.6278684", "0.6265051", "0.6237156", "0.62015903", "0.6178959", "0.61777234", "0.6110934", "0.6093986", "0.6028991", "0.60224324", "0.60054696", "0.6002277", "0.5998172", "0.5996574", "0.59805685", "0.59740156", "0.5970144", "0.5965215", "0.59632677", "0.59590834", "0.59588283", "0.59554374", "0.5954943", "0.59523517", "0.5941757", "0.59408545", "0.59366614", "0.59308344", "0.5929946", "0.59247756", "0.5924448", "0.5922877", "0.5910489", "0.59103113", "0.5905129", "0.59036136", "0.5898153", "0.5893146", "0.58861285", "0.58854926", "0.58843017", "0.58797526", "0.5876793", "0.58701134", "0.58697635", "0.58675283", "0.585811", "0.58575004", "0.58499587", "0.5841357", "0.58412296", "0.58339155", "0.5833157", "0.58326876", "0.5827909", "0.5819405", "0.5815109", "0.5813764", "0.5811723", "0.58102036", "0.5805048", "0.58036315", "0.5801814", "0.57949245", "0.5794044", "0.57927835", "0.5792174", "0.57872707", "0.5780486", "0.5778774", "0.5777381", "0.5775947", "0.57734555", "0.5773092", "0.5769512", "0.576887", "0.5767136", "0.57662946", "0.57644695", "0.5763988", "0.5763565", "0.57620585", "0.57575196", "0.5755927", "0.5752608", "0.5751878" ]
0.0
-1
genFields generates fields config for given AST
func genFields(fs []*ast.FieldDefinition) *jen.Statement { // // Generate config for fields // // == Example input SDL // // type Dog { // name(style: NameComponentsStyle = SHORT): String! // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // graphql.Fields{ // "name": graphql.Field{ ... }, // "givenName": graphql.Field{ ... }, // } // return jen.Qual(defsPkg, "Fields").Values(jen.DictFunc(func(d jen.Dict) { for _, f := range fs { d[jen.Lit(f.Name.Value)] = genField(f) } })) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no fields defined for type %s\", typeName)\n\t}\n\t// TODO: for now we remove Default from the start (maybe move that to an option)\n\tlogicalTypeName := \"\\\"\" + strings.TrimPrefix(typeName, \"Default\") + \"\\\"\"\n\n\t// Generate code that will fail if the constants change value.\n\tg.Printf(\"func (d *%s) Serialize() ([]byte, error) {\\n\", typeName)\n\tg.Printf(\"wb := utils.NewWriteBufferByteBased(utils.WithByteOrderForByteBasedBuffer(binary.BigEndian))\\n\")\n\tg.Printf(\"\\tif err := d.SerializeWithWriteBuffer(context.Background(), wb); err != nil {\\n\")\n\tg.Printf(\"\\t\\treturn nil, err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn wb.GetBytes(), nil\\n\")\n\tg.Printf(\"}\\n\\n\")\n\tg.Printf(\"func (d *%s) SerializeWithWriteBuffer(ctx context.Context, writeBuffer utils.WriteBuffer) error {\\n\", typeName)\n\tg.Printf(\"\\tif err := writeBuffer.PushContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tfor _, field := range fields {\n\t\tfieldType := field.fieldType\n\t\tif field.isDelegate {\n\t\t\tg.Printf(\"\\t\\t\\tif err := d.%s.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\", fieldType.(*ast.Ident).Name)\n\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := field.name\n\t\tfieldNameUntitled := \"\\\"\" + unTitle(fieldName) + \"\\\"\"\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"if err := func()error {\\n\")\n\t\t\tg.Printf(\"\\td.\" + field.hasLocker + \".Lock()\\n\")\n\t\t\tg.Printf(\"\\tdefer d.\" + field.hasLocker + \".Unlock()\\n\")\n\t\t}\n\t\tneedsDereference := false\n\t\tif starFieldType, ok := fieldType.(*ast.StarExpr); ok {\n\t\t\tfieldType = starFieldType.X\n\t\t\tneedsDereference = true\n\t\t}\n\t\tif field.isStringer {\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"if d.%s != nil {\", field.name)\n\t\t\t}\n\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name+\".String()\", fieldNameUntitled)\n\t\t\tif field.hasLocker != \"\" {\n\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tswitch fieldType := fieldType.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\t{\n\t\t\t\t// TODO: bit hacky but not sure how else we catch those ones\n\t\t\t\tx := fieldType.X\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\txIdent, xIsIdent := x.(*ast.Ident)\n\t\t\t\tif xIsIdent {\n\t\t\t\t\tif xIdent.Name == \"atomic\" {\n\t\t\t\t\t\tif sel.Name == \"Uint32\" {\n\t\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Uint64\" {\n\t\t\t\t\t\t\tg.Printf(uint64FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Int32\" {\n\t\t\t\t\t\t\tg.Printf(int32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Bool\" {\n\t\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Value\" {\n\t\t\t\t\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif xIdent.Name == \"sync\" {\n\t\t\t\t\t\tfmt.Printf(\"\\t skipping field %s because it is %v.%v\\n\", fieldName, x, sel)\n\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name, fieldNameUntitled)\n\t\tcase *ast.IndexExpr:\n\t\t\tx := fieldType.X\n\t\t\tif fieldType, isxFieldSelector := x.(*ast.SelectorExpr); isxFieldSelector { // TODO: we need to refactor this so we can reuse...\n\t\t\t\txIdent, xIsIdent := fieldType.X.(*ast.Ident)\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\tif xIsIdent && xIdent.Name == \"atomic\" && sel.Name == \"Pointer\" {\n\t\t\t\t\tg.Printf(atomicPointerFieldTemplate, \"d.\"+field.name, field.name, fieldNameUntitled)\n\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"no support yet for %#q\\n\", fieldType)\n\t\t\tcontinue\n\t\tcase *ast.Ident:\n\t\t\tswitch fieldType.Name {\n\t\t\tcase \"byte\":\n\t\t\t\tg.Printf(byteFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"int\":\n\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"int32\":\n\t\t\t\tg.Printf(int32FieldSerialize, \"int32(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"uint32\":\n\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"bool\":\n\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"string\":\n\t\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"error\":\n\t\t\t\tg.Printf(errorFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident with type %v\\n\", fieldType)\n\t\t\t\tg.Printf(\"{\\n\")\n\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", d.%s)\\n\", fieldName)\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\tcase *ast.ArrayType:\n\t\t\tif eltType, ok := fieldType.Elt.(*ast.Ident); ok && eltType.Name == \"byte\" {\n\t\t\t\tg.Printf(\"if err := writeBuffer.WriteByteArray(%s, d.%s); err != nil {\\n\", fieldNameUntitled, field.name)\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t} else {\n\t\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"for _, elem := range d.%s {\", field.name)\n\t\t\t\tswitch eltType := fieldType.Elt.(type) {\n\t\t\t\tcase *ast.SelectorExpr, *ast.StarExpr:\n\t\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\t\tg.Printf(serializableFieldTemplate, \"elem\", \"\\\"value\\\"\")\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tswitch eltType.Name {\n\t\t\t\t\tcase \"int\":\n\t\t\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\t\t\tcase \"uint32\":\n\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\t\t\tcase \"bool\":\n\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"error\":\n\t\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within ArrayType for %v\\n\", fieldType)\n\t\t\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t}\n\t\tcase *ast.MapType:\n\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t// TODO: we use serializable or strings as we don't want to over-complex this\n\t\t\tg.Printf(\"for _name, elem := range d.%s {\\n\", fieldName)\n\t\t\tswitch keyType := fieldType.Key.(type) {\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch keyType.Name {\n\t\t\t\tcase \"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\", \"int\", \"int8\", \"int16\", \"int32\", \"int64\": // TODO: add other types\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", _name)\\n\", \"%v\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(\"\\t\\tname := _name\\n\")\n\t\t\t\tdefault:\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t}\n\t\t\tswitch eltType := fieldType.Value.(type) {\n\t\t\tcase *ast.StarExpr, *ast.SelectorExpr:\n\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\tg.Printf(\"\\t\\tif serializable, ok := elem.(utils.Serializable); ok {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PushContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := serializable.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PopContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t} else {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\telemAsString := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.WriteString(name, uint32(len(elemAsString)*8), \\\"UTF-8\\\", elemAsString); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t}\\n\")\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch eltType.Name {\n\t\t\t\tcase \"bool\":\n\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"error\":\n\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"name\")\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within MapType for %v\\n\", fieldType)\n\t\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented within MapType %v\\n\", fieldType.Value)\n\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t}\n\t\t\tg.Printf(\"\\t}\\n\")\n\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\tcase *ast.ChanType:\n\t\t\tg.Printf(chanFieldSerialize, \"d.\"+field.name, fieldNameUntitled, field.name)\n\t\tcase *ast.FuncType:\n\t\t\tg.Printf(funcFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\tdefault:\n\t\t\tfmt.Printf(\"no support implemented %#v\\n\", fieldType)\n\t\t}\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\tg.Printf(\"}\\n\")\n\t\t}\n\t}\n\tg.Printf(\"\\tif err := writeBuffer.PopContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn nil\\n\")\n\tg.Printf(\"}\\n\")\n\tg.Printf(\"\\n\")\n\tg.Printf(stringerTemplate, typeName)\n}", "func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tg.additionalImports = append(g.additionalImports, file.additionalImports...)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %s\", typeName)\n\t}\n\n\tg.build(fields, typeName)\n}", "func (fs *FileStat) GenerateFields() (string, error) {\n\ttb, e := fs.modTime.MarshalBinary()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tcb, e := fs.compressedBytes()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\tformat := `\"%s\", \"%s\", %d, 0%o, binfs.MustHexDecode(\"%x\"), %t, binfs.MustHexDecode(\"%x\")`\n\treturn fmt.Sprintf(format,\n\t\tfs.path,\n\t\tfs.name,\n\t\tfs.size,\n\t\tfs.mode,\n\t\ttb,\n\t\tfs.isDir,\n\t\tcb,\n\t), nil\n}", "func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {\n\tfields := make([]Field, 0, 100)\n\timports := make([]Import, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t\timports = append(imports, file.imports...)\n\t\t}\n\t}\n\n\tgenFn(typeName, fields, imports)\n\n}", "func Generate(fields map[string]*indexer.Field) map[string]interface{} {\n\treturn mapFields(fields)\n}", "func GenerateBaseFields(conf CurveConfig) error {\n\tif err := goff.GenerateFF(\"fr\", \"Element\", conf.RTorsion, filepath.Join(conf.OutputDir, \"fr\"), false); err != nil {\n\t\treturn err\n\t}\n\tif err := goff.GenerateFF(\"fp\", \"Element\", conf.FpModulus, filepath.Join(conf.OutputDir, \"fp\"), false); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}", "func expandFields(compiled *lang.CompiledExpr, define *lang.DefineExpr) lang.DefineFieldsExpr {\n\tvar fields lang.DefineFieldsExpr\n\tfor _, field := range define.Fields {\n\t\tif isEmbeddedField(field) {\n\t\t\tembedded := expandFields(compiled, compiled.LookupDefine(string(field.Type)))\n\t\t\tfields = append(fields, embedded...)\n\t\t} else {\n\t\t\tfields = append(fields, field)\n\t\t}\n\t}\n\treturn fields\n}", "func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}", "func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) {\n\tfieldsByName := map[string]*treeField{}\n\n\t// index nested and non-nested fields\n\tfor i := range p.Fields {\n\t\tf := p.Fields[i]\n\t\tseq, err := leaf.Pipe(&f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif seq == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fieldsByName[f.Name] == nil {\n\t\t\tfieldsByName[f.Name] = &treeField{name: f.Name}\n\t\t}\n\n\t\t// non-nested field -- add directly to the treeFields list\n\t\tif f.SubName == \"\" {\n\t\t\t// non-nested field -- only 1 element\n\t\t\tval, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfieldsByName[f.Name].value = val\n\t\t\tcontinue\n\t\t}\n\n\t\t// nested-field -- create a parent elem, and index by the 'match' value\n\t\tif fieldsByName[f.Name].subFieldByMatch == nil {\n\t\t\tfieldsByName[f.Name].subFieldByMatch = map[string]treeFields{}\n\t\t}\n\t\tindex := fieldsByName[f.Name].subFieldByMatch\n\t\tfor j := range seq.Content() {\n\t\t\telem := seq.Content()[j]\n\t\t\tmatches := f.Matches[elem]\n\t\t\tstr, err := yaml.String(elem, yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// map the field by the name of the element\n\t\t\t// index the subfields by the matching element so we can put all the fields for the\n\t\t\t// same element under the same branch\n\t\t\tmatchKey := strings.Join(matches, \"/\")\n\t\t\tindex[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str})\n\t\t}\n\t}\n\n\t// iterate over collection of all queried fields in the Resource\n\tfor _, field := range fieldsByName {\n\t\t// iterate over collection of elements under the field -- indexed by element name\n\t\tfor match, subFields := range field.subFieldByMatch {\n\t\t\t// create a new element for this collection of fields\n\t\t\t// note: we will convert name to an index later, but keep the match for sorting\n\t\t\telem := &treeField{name: match}\n\t\t\tfield.matchingElementsAndFields = append(field.matchingElementsAndFields, elem)\n\n\t\t\t// iterate over collection of queried fields for the element\n\t\t\tfor i := range subFields {\n\t\t\t\t// add to the list of fields for this element\n\t\t\t\telem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i])\n\t\t\t}\n\t\t}\n\t\t// clear this cached data\n\t\tfield.subFieldByMatch = nil\n\t}\n\n\t// put the fields in a list so they are ordered\n\tfieldList := treeFields{}\n\tfor _, v := range fieldsByName {\n\t\tfieldList = append(fieldList, v)\n\t}\n\n\t// sort the fields\n\tsort.Sort(fieldList)\n\tfor i := range fieldList {\n\t\tfield := fieldList[i]\n\t\t// sort the elements under this field\n\t\tsort.Sort(field.matchingElementsAndFields)\n\n\t\tfor i := range field.matchingElementsAndFields {\n\t\t\telement := field.matchingElementsAndFields[i]\n\t\t\t// sort the elements under a list field by their name\n\t\t\tsort.Sort(element.matchingElementsAndFields)\n\t\t\t// set the name of the element to its index\n\t\t\telement.name = fmt.Sprintf(\"%d\", i)\n\t\t}\n\t}\n\n\treturn fieldList, nil\n}", "func generate(copyrights string, collector *collector, templateBuilder templateBuilder) {\n\tfor _, pkg := range collector.Packages {\n\t\tfileTemplate := fileTpl{\n\t\t\tCopyright: copyrights,\n\n\t\t\tStandardImports: []string{\n\t\t\t\t\"fmt\",\n\t\t\t\t\"unicode\",\n\t\t\t\t\"unicode/utf8\",\n\t\t\t},\n\n\t\t\tCustomImports: []string{\n\t\t\t\t\"github.com/google/uuid\",\n\t\t\t},\n\t\t}\n\t\tfor _, f := range pkg.Files {\n\t\t\tfor _, d := range f.Decls {\n\t\t\t\tg, ok := d.(*ast.GenDecl)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstructs := structSearch(g)\n\t\t\t\tif len(structs) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, s := range structs {\n\t\t\t\t\tatLeastOneField := false\n\n\t\t\t\t\tfor _, field := range s.Type.Fields.List {\n\n\t\t\t\t\t\tpos := collector.FileSet.Position(field.Type.Pos())\n\t\t\t\t\t\ttyp := collector.Info.TypeOf(field.Type)\n\n\t\t\t\t\t\tcomposedType := \"\"\n\t\t\t\t\t\tbaseName := getType(typ, &composedType)\n\t\t\t\t\t\tfmt.Println(\"Add validation: \", pos, \": \", baseName, \"/\", composedType)\n\n\t\t\t\t\t\tif err := templateBuilder.generateCheck(field, s.Name, baseName, composedType); err != nil {\n\t\t\t\t\t\t\tfmt.Printf(\"struct %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tatLeastOneField = true\n\t\t\t\t\t}\n\n\t\t\t\t\tif !atLeastOneField {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\terr := templateBuilder.generateMethod(s.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"struct gen %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfileTemplate.Package = pkg.Name\n\t\terr := templateBuilder.generateFile(pkg.Path, fileTemplate)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Generation error\", err)\n\t\t}\n\t}\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about type declarations.\n\t\treturn true\n\t}\n\tfor _, spec := range decl.Specs {\n\t\ttypeSpec := spec.(*ast.TypeSpec)\n\t\tstructDecl, ok := typeSpec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif typeSpec.Name.Name != f.typeName {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"Handling %s\\n\", typeSpec.Name.Name)\n\t\tfor _, field := range structDecl.Fields.List {\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`ignore:\\\"true\\\"`\" {\n\t\t\t\tvar name string\n\t\t\t\tif len(field.Names) != 0 {\n\t\t\t\t\tname = field.Names[0].Name\n\t\t\t\t} else {\n\t\t\t\t\tname = \"<delegate>\"\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\t ignoring field %s %v\\n\", name, field.Type)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tisStringer := false\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`stringer:\\\"true\\\"`\" { // TODO: Check if we do that a bit smarter\n\t\t\t\tisStringer = true\n\t\t\t}\n\t\t\thasLocker := \"\"\n\t\t\tif field.Tag != nil && strings.HasPrefix(field.Tag.Value, \"`hasLocker:\\\"\") { // TODO: Check if we do that a bit smarter\n\t\t\t\thasLocker = strings.TrimPrefix(field.Tag.Value, \"`hasLocker:\\\"\")\n\t\t\t\thasLocker = strings.TrimSuffix(hasLocker, \"\\\"`\")\n\t\t\t}\n\t\t\tif len(field.Names) == 0 {\n\t\t\t\tfmt.Printf(\"\\t adding delegate\\n\")\n\t\t\t\tswitch ft := field.Type.(type) {\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\tswitch set := ft.X.(type) {\n\t\t\t\t\tcase *ast.Ident:\n\t\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\t\tfieldType: set,\n\t\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpanic(fmt.Sprintf(\"Only pointer to struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t\t}\n\t\t\t\tcase *ast.SelectorExpr:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft.Sel,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(fmt.Sprintf(\"Only struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"\\t adding field %s %v\\n\", field.Names[0].Name, field.Type)\n\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\tname: field.Names[0].Name,\n\t\t\t\tfieldType: field.Type,\n\t\t\t\tisStringer: isStringer,\n\t\t\t\thasLocker: hasLocker,\n\t\t\t})\n\t\t}\n\t}\n\treturn false\n}", "func fields(spec *ast.TypeSpec) []*ast.Field {\n\ts := make([]*ast.Field, 0)\n\tif structType, ok := spec.Type.(*ast.StructType); ok {\n\t\tfor _, field := range structType.Fields.List {\n\t\t\tif keyname(field) != \"\" {\n\t\t\t\ts = append(s, field)\n\t\t\t}\n\t\t}\n\t}\n\treturn s\n}", "func Fields() error {\n\treturn devtools.GenerateFieldsYAML()\n}", "func (g *generator) structFields(t reflect.Type) []field {\n\tvar fields []field\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif g.ignoreField(t, f) {\n\t\t\tcontinue\n\t\t}\n\t\tname, _ := parseTag(g.fieldTagKey, f.Tag)\n\t\tif name == \"\" {\n\t\t\tname = f.Name\n\t\t}\n\t\tfields = append(fields, field{\n\t\t\tName: name,\n\t\t\tType: f.Type,\n\t\t\tZero: zeroValue(f.Type),\n\t\t})\n\t}\n\treturn fields\n}", "func generateStruct(a *AnnotationDoc, packageName string, imports []string, indent string) (string, []string) {\n\tvar allAnnotationsPackages []string\n\tpossiblePackagesForA := combinePackages(imports, []string{packageName})\n\tts, foundPackageOfA, foundImportsOfA := getAnnotationStruct(a.Name, possiblePackagesForA)\n\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, []string{foundPackageOfA})\n\tstr, _ := ts.Type.(*ast.StructType)\n\tvar b bytes.Buffer\n\tb.WriteString(indent)\n\tb.WriteString(foundPackageOfA)\n\tb.WriteString(\".\")\n\tb.WriteString(a.Name)\n\tb.WriteString(\"{\\n\")\n\tchildIndent := indent + \" \"\n\tfor _, f := range str.Fields.List {\n\t\tfieldName := getFieldName(f)\n\t\tdefValue := getDefaultValue(f)\n\t\tfieldKey := fieldName\n\t\t// consider special case when only default parameter is specified\n\t\tif len(str.Fields.List) == 1 && len(a.Content) == 1 {\n\t\t\tfor key := range a.Content {\n\t\t\t\tif key == DEFAULT_PARAM {\n\t\t\t\t\tfieldKey = DEFAULT_PARAM\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvalue, found := a.Content[fieldKey]\n\t\tif found {\n\t\t\tswitch t := value.(type) {\n\t\t\tcase string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getLiteral(f.Type, t, false))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tcase []string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getFieldConstructor(f.Type))\n\t\t\t\tb.WriteString(\"\\n\")\n\t\t\t\tfor _, elem := range t {\n\t\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\t\tb.WriteString(elem)\n\t\t\t\t\tb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"}\")\n\t\t\tcase []AnnotationDoc:\n\t\t\t\t// calculate array's elements\n\t\t\t\tvar bb bytes.Buffer\n\t\t\t\tfor _, sa := range t {\n\t\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&sa, foundPackageOfA, foundImportsOfA, childIndent+\" \")\n\t\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\t\tbb.WriteString(childCode)\n\t\t\t\t\tbb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\t// insert array initialzer of child annotation type\n\t\t\t\ts := writeArrayInitializer(&b, bb.String())\n\t\t\t\t// append array of child annotations\n\t\t\t\tb.WriteString(\"{\\n\")\n\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\tb.WriteString(s)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"},\\n\")\n\t\t\tcase AnnotationDoc:\n\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&t, foundPackageOfA, foundImportsOfA, childIndent)\n\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tif isOptional(f.Type) {\n\t\t\t\t\tb.WriteString(\"&\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(strings.TrimLeft(childCode, \" \"))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unexpected annotation value type\")\n\t\t\t}\n\t\t} else {\n\t\t\tb.WriteString(childIndent)\n\t\t\tb.WriteString(defValue)\n\t\t\tb.WriteString(\",\\n\")\n\t\t}\n\t}\n\tb.WriteString(indent)\n\tb.WriteString(\"}\")\n\treturn b.String(), allAnnotationsPackages\n}", "func MapFieldsToTypExpr(args ...*ast.Field) []ast.Expr {\n\tr := []ast.Expr{}\n\tfor idx, f := range args {\n\t\tif len(f.Names) == 0 {\n\t\t\tf.Names = []*ast.Ident{ast.NewIdent(fmt.Sprintf(\"f%d\", idx))}\n\t\t}\n\n\t\tfor _ = range f.Names {\n\t\t\tr = append(r, f.Type)\n\t\t}\n\n\t}\n\treturn r\n}", "func GenStructFromAllOfTypes(allOf []TypeDefinition) string {\n\t// Start out with struct {\n\tobjectParts := []string{\"struct {\"}\n\tfor _, td := range allOf {\n\t\tref := td.Schema.RefType\n\t\tif ref != \"\" {\n\t\t\t// We have a referenced type, we will generate an inlined struct\n\t\t\t// member.\n\t\t\t// struct {\n\t\t\t// InlinedMember\n\t\t\t// ...\n\t\t\t// }\n\t\t\tobjectParts = append(objectParts,\n\t\t\t\tfmt.Sprintf(\" // Embedded struct due to allOf(%s)\", ref))\n\t\t\tobjectParts = append(objectParts,\n\t\t\t\tfmt.Sprintf(\" %s `yaml:\\\",inline\\\"`\", ref))\n\t\t} else {\n\t\t\t// Inline all the fields from the schema into the output struct,\n\t\t\t// just like in the simple case of generating an object.\n\t\t\tobjectParts = append(objectParts, \" // Embedded fields due to inline allOf schema\")\n\t\t\tobjectParts = append(objectParts, GenFieldsFromProperties(td.Schema.Properties)...)\n\n\t\t\tif td.Schema.HasAdditionalProperties {\n\t\t\t\taddPropsType := td.Schema.AdditionalPropertiesType.GoType\n\t\t\t\tif td.Schema.AdditionalPropertiesType.RefType != \"\" {\n\t\t\t\t\taddPropsType = td.Schema.AdditionalPropertiesType.RefType\n\t\t\t\t}\n\n\t\t\t\tadditionalPropertiesPart := fmt.Sprintf(\"AdditionalProperties map[string]%s `json:\\\"-\\\"`\", addPropsType)\n\t\t\t\tif !StringInArray(additionalPropertiesPart, objectParts) {\n\t\t\t\t\tobjectParts = append(objectParts, additionalPropertiesPart)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tobjectParts = append(objectParts, \"}\")\n\treturn strings.Join(objectParts, \"\\n\")\n}", "func (b *Builder) InputFields(source reflect.Value, parent reflect.Value) graphql.InputObjectConfigFieldMap {\n\tresult := make(graphql.InputObjectConfigFieldMap, 0)\n\tnodes := b.buildObject(source, parent)\n\tfor _, node := range nodes {\n\t\tif node.skip {\n\t\t\tcontinue\n\t\t}\n\t\tif !node.source.CanSet() {\n\t\t\tcontinue\n\t\t}\n\t\tif node.readOnly {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := node.alias\n\t\tif name == \"\" {\n\t\t\tname = strcase.ToLowerCamel(node.name)\n\t\t}\n\t\tgType := b.mapInput(node.source, parent)\n\t\tif node.required {\n\t\t\tgType = graphql.NewNonNull(gType)\n\t\t}\n\n\t\tfield := &graphql.InputObjectFieldConfig{\n\t\t\tType: gType,\n\t\t}\n\t\tresult[name] = field\n\t}\n\treturn result\n}", "func parse(r io.Reader) ([]field, error) {\n\tinData, err := models.Unmarshal(r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling models.yml: %w\", err)\n\t}\n\n\tvar fields []field\n\tfor collectionName, collection := range inData {\n\t\tfor fieldName, modelField := range collection.Fields {\n\t\t\tf := field{}\n\t\t\tf.Name = collectionName + \"/\" + fieldName\n\t\t\tf.GoName = goName(collectionName) + \"_\" + goName(fieldName)\n\t\t\tf.GoType = goType(modelField.Type)\n\t\t\tf.Collection = firstLower(goName(collectionName))\n\t\t\tf.FQField = collectionName + \"/%d/\" + fieldName\n\t\t\tf.Required = modelField.Required\n\n\t\t\tif modelField.Type == \"relation\" || modelField.Type == \"generic-relation\" {\n\t\t\t\tf.SingleRelation = true\n\t\t\t}\n\n\t\t\tif strings.Contains(fieldName, \"$\") {\n\t\t\t\tf.TemplateAttr = \"replacement\"\n\t\t\t\tf.TemplateAttrType = \"string\"\n\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%s\", 1)\n\t\t\t\tf.GoType = goType(modelField.Template.Fields.Type)\n\n\t\t\t\tif modelField.Template.Replacement != \"\" {\n\t\t\t\t\tf.TemplateAttr = modelField.Template.Replacement + \"ID\"\n\t\t\t\t\tf.TemplateAttrType = \"int\"\n\t\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%d\", 1)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfields = append(fields, f)\n\t\t}\n\t}\n\n\t// TODO: fix models-to-go to return fields in input order.\n\tsort.Slice(fields, func(i, j int) bool {\n\t\treturn fields[i].GoName < fields[j].GoName\n\t})\n\n\treturn fields, nil\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE { // We only care about Type declarations.\n\t\treturn true\n\t}\n\t// The name of the type of the constants we are declaring.\n\t// Can change if this is a multi-element declaration.\n\ttyp := \"\"\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\t\tif tspec.Type != nil {\n\t\t\t// \"X T\". We have a type. Remember it.\n\t\t\ttyp = tspec.Name.Name\n\t\t}\n\t\tif typ != f.typeName {\n\t\t\t// This is not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\t\t// We now have a list of names (from one line of source code) all being\n\t\t// declared with the desired type.\n\n\t\tstructType, ok := tspec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\t//not a struct type\n\t\t\tcontinue\n\t\t}\n\n\t\ttypesObj, typeObjOk := f.pkg.defs[tspec.Name]\n\t\tif !typeObjOk {\n\t\t\tlog.Fatalf(\"no type info found for struct %s\", typ)\n\t\t}\n\n\t\tfor _, fieldLine := range structType.Fields.List {\n\t\t\tfor _, field := range fieldLine.Names {\n\t\t\t\t//skip struct padding\n\t\t\t\tif field.Name == \"_\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)\n\n\t\t\t\ttypeStr := fieldObj.Type().String()\n\t\t\t\ttags := parseFieldTags(fieldLine.Tag)\n\n\t\t\t\t//Skip here so we don't include rubbish import lines\n\t\t\t\tif tags[\"exclude_dao\"].Value == \"true\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tprocessedTypeStr, importPath := processTypeStr(typeStr)\n\t\t\t\t//log.Printf(\"processedTypeStr: %s, importPath: %s\", processedTypeStr, importPath)\n\n\t\t\t\tif importPath != \"\" && !importExists(importPath, f.imports) {\n\n\t\t\t\t\tf.imports = append(f.imports, Import{importPath})\n\n\t\t\t\t}\n\n\t\t\t\tv := Field{\n\t\t\t\t\tName: field.Name,\n\t\t\t\t\tTags: tags,\n\t\t\t\t\tTypeName: processedTypeStr,\n\t\t\t\t}\n\t\t\t\tf.fields = append(f.fields, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about types declarations.\n\t\treturn true\n\t}\n\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\n\t\tif tspec.Name.Name != f.typeName {\n\t\t\t// Not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Printf(\"Type spec: %v name: %s\\n\", tspec.Type, tspec.Name.Name)\n\n\t\tif structType, ok := tspec.Type.(*ast.StructType); ok {\n\t\t\tlog.Printf(\"Located the struct type: %v\\n\", structType)\n\n\t\t\tfor _, field := range structType.Fields.List {\n\t\t\t\tlog.Printf(\"Field: %v\\n\", field)\n\n\t\t\t\tif ident, ok := field.Type.(*ast.Ident); ok {\n\t\t\t\t\t// Look at list of known types and determine if we have a translation.\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[ident.Name]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", ident.Name, tp.String())\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", ident.Name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: ident.Name,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else if selector, ok := field.Type.(*ast.SelectorExpr); ok {\n\t\t\t\t\t// TODO: This likely means an object in another package. Foreign link?\n\t\t\t\t\tlog.Printf(\"Found selector: %s :: %s\\n\", selector.X, selector.Sel.Name)\n\t\t\t\t\ttypeName := fmt.Sprintf(\"%s.%s\", selector.X, selector.Sel.Name)\n\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[typeName]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", typeName, tp.String())\n\t\t\t\t\t\tf.additionalImports = append(f.additionalImports, fmt.Sprintf(\"%s\", selector.X))\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", typeName)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: typeName,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: Enumerate all different possible types here.\n\t\t\t\t\tlog.Printf(\"UNKNOWN TYPE seen: %v\\n\", field.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (c *TypeConverter) genStructConverter(\n\tkeyPrefix string,\n\tfromPrefix string,\n\tindent string,\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n\tprevKeyPrefixes []string,\n) error {\n\n\tfor i := 0; i < len(toFields); i++ {\n\t\ttoField := toFields[i]\n\n\t\t// Check for same named field\n\t\tvar fromField *compile.FieldSpec\n\t\tfor j := 0; j < len(fromFields); j++ {\n\t\t\tif fromFields[j].Name == toField.Name {\n\t\t\t\tfromField = fromFields[j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttoSubIdentifier := keyPrefix + PascalCase(toField.Name)\n\t\ttoIdentifier := \"out.\" + toSubIdentifier\n\t\toverriddenIdentifier := \"\"\n\t\tfromIdentifier := \"\"\n\n\t\t// Check for mapped field\n\t\tvar overriddenField *compile.FieldSpec\n\n\t\t// check if this toField satisfies a fieldMap transform\n\t\ttransformFrom, ok := fieldMap[toSubIdentifier]\n\t\tif ok {\n\t\t\t// no existing direct fromField, just assign the transform\n\t\t\tif fromField == nil {\n\t\t\t\tfromField = transformFrom.Field\n\t\t\t\tif c.useRecurGen {\n\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t} else {\n\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t}\n\t\t\t\t// else there is a conflicting direct fromField\n\t\t\t} else {\n\t\t\t\t// depending on Override flag either the direct fromField or transformFrom is the OverrideField\n\t\t\t\tif transformFrom.Override {\n\t\t\t\t\t// check for required/optional setting\n\t\t\t\t\tif !transformFrom.Field.Required {\n\t\t\t\t\t\toverriddenField = fromField\n\t\t\t\t\t\toverriddenIdentifier = \"in.\" + fromPrefix +\n\t\t\t\t\t\t\tPascalCase(overriddenField.Name)\n\t\t\t\t\t}\n\t\t\t\t\t// If override is true and the new field is required,\n\t\t\t\t\t// there's a default instantiation value and will always\n\t\t\t\t\t// overwrite.\n\t\t\t\t\tfromField = transformFrom.Field\n\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If override is false and the from field is required,\n\t\t\t\t\t// From is always populated and will never be overwritten.\n\t\t\t\t\tif !fromField.Required {\n\t\t\t\t\t\toverriddenField = transformFrom.Field\n\t\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toverriddenIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// neither direct or transform fromField was found\n\t\tif fromField == nil {\n\t\t\t// search the fieldMap toField identifiers for matching identifier prefix\n\t\t\t// e.g. the current toField is a struct and something within it has a transform\n\t\t\t// a full match identifiers for transform non-struct types would have been caught above\n\t\t\thasStructFieldMapping := false\n\t\t\tfor toID := range fieldMap {\n\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\thasStructFieldMapping = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// if there's no fromField and no fieldMap transform that could be applied\n\t\t\tif !hasStructFieldMapping {\n\t\t\t\tvar bypass bool\n\t\t\t\t// check if required field is filled from other resources\n\t\t\t\t// it can be used to set system default (customized tracing /auth required for clients),\n\t\t\t\t// or header propagating\n\t\t\t\tif c.optionalEntries != nil {\n\t\t\t\t\tfor toID := range c.optionalEntries {\n\t\t\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\t\t\tbypass = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// the toField is either covered by optionalEntries, or optional and\n\t\t\t\t// there's nothing that maps to it or its sub-fields so we should skip it\n\t\t\t\tif bypass || !toField.Required {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// unrecoverable error\n\t\t\t\treturn errors.Errorf(\n\t\t\t\t\t\"required toField %s does not have a valid fromField mapping\",\n\t\t\t\t\ttoField.Name,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\tif fromIdentifier == \"\" && fromField != nil {\n\t\t\t// should we set this if no fromField ??\n\t\t\tfromIdentifier = \"in.\" + fromPrefix + PascalCase(fromField.Name)\n\t\t}\n\n\t\tif prevKeyPrefixes == nil {\n\t\t\tprevKeyPrefixes = []string{}\n\t\t}\n\n\t\tvar overriddenFieldName string\n\t\tvar overriddenFieldType compile.TypeSpec\n\t\tif overriddenField != nil {\n\t\t\toverriddenFieldName = overriddenField.Name\n\t\t\toverriddenFieldType = overriddenField.Type\n\t\t}\n\n\t\t// Override thrift type names to avoid naming collisions between endpoint\n\t\t// and client types.\n\t\tswitch toFieldType := compile.RootTypeSpec(toField.Type).(type) {\n\t\tcase\n\t\t\t*compile.BoolSpec,\n\t\t\t*compile.I8Spec,\n\t\t\t*compile.I16Spec,\n\t\t\t*compile.I32Spec,\n\t\t\t*compile.EnumSpec,\n\t\t\t*compile.I64Spec,\n\t\t\t*compile.DoubleSpec,\n\t\t\t*compile.StringSpec:\n\n\t\t\terr := c.genConverterForPrimitive(\n\t\t\t\ttoField,\n\t\t\t\ttoIdentifier,\n\t\t\t\tfromField,\n\t\t\t\tfromIdentifier,\n\t\t\t\toverriddenField,\n\t\t\t\toverriddenIdentifier,\n\t\t\t\tindent,\n\t\t\t\tprevKeyPrefixes,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.BinarySpec:\n\t\t\tfor _, line := range checkOptionalNil(indent, c.uninitialized, toIdentifier, prevKeyPrefixes, c.useRecurGen) {\n\t\t\t\tc.append(line)\n\t\t\t}\n\t\t\tc.append(toIdentifier, \" = []byte(\", fromIdentifier, \")\")\n\t\tcase *compile.StructSpec:\n\t\t\tvar (\n\t\t\t\tstFromPrefix = fromPrefix\n\t\t\t\tstFromType compile.TypeSpec\n\t\t\t\tfromTypeName string\n\t\t\t)\n\t\t\tif fromField != nil {\n\t\t\t\tstFromType = fromField.Type\n\t\t\t\tstFromPrefix = fromPrefix + PascalCase(fromField.Name)\n\n\t\t\t\tfromTypeName, _ = c.getIdentifierName(stFromType)\n\t\t\t}\n\n\t\t\ttoTypeName, err := c.getIdentifierName(toFieldType)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif converterMethodName, ok := c.convStructMap[toFieldType.Name]; ok {\n\t\t\t\t// the converter for this struct has already been generated, so just use it\n\t\t\t\tc.append(indent, \"out.\", keyPrefix+PascalCase(toField.Name), \" = \", converterMethodName, \"(\", fromIdentifier, \")\")\n\t\t\t} else if c.useRecurGen && fromTypeName != \"\" {\n\t\t\t\t// generate a callable converter inside function literal\n\t\t\t\terr = c.genConverterForStructWrapped(\n\t\t\t\t\ttoField,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoTypeName,\n\t\t\t\t\ttoSubIdentifier,\n\t\t\t\t\tfromTypeName,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t\tindent,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\terr = c.genConverterForStruct(\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tkeyPrefix+PascalCase(toField.Name),\n\t\t\t\t\tstFromPrefix,\n\t\t\t\t\tindent,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.ListSpec:\n\t\t\terr := c.genConverterForList(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.MapSpec:\n\t\t\terr := c.genConverterForMap(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// fmt.Printf(\"Unknown type %s for field %s \\n\",\n\t\t\t// \ttoField.Type.TypeCode().String(), toField.Name,\n\t\t\t// )\n\n\t\t\t// pkgName, err := h.TypePackageName(toField.Type.IDLFile())\n\t\t\t// if err != nil {\n\t\t\t// \treturn nil, err\n\t\t\t// }\n\t\t\t// typeName := pkgName + \".\" + toField.Type.ThriftName()\n\t\t\t// line := toIdentifier + \"(*\" + typeName + \")\" + postfix\n\t\t\t// c.Lines = append(c.Lines, line)\n\t\t}\n\t}\n\n\treturn nil\n}", "func JsonFieldGenerator() gopter.Gen {\n\tif jsonFieldGenerator != nil {\n\t\treturn jsonFieldGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonField(generators)\n\tjsonFieldGenerator = gen.Struct(reflect.TypeOf(JsonField{}), generators)\n\n\treturn jsonFieldGenerator\n}", "func NodesFromTypedef(pkg *packages.Package, f *ast.File, typed *ast.GenDecl) ([]models.EncodedNode, []string, []string) {\n\tpf := pkg.Fset.File(f.Pos())\n\n\tkind := KindTypename\n\tnodes := []models.EncodedNode{}\n\tstructs := []string{}\n\tifaces := []string{}\n\n\tfor _, spec := range typed.Specs {\n\t\ttspec, ok := spec.(*ast.TypeSpec)\n\t\tif !ok {\n\t\t\tpanic(fmt.Errorf(\"Unknown type for processing types: %#v\", spec))\n\t\t}\n\t\tdoc := \"\"\n\t\tif tspec.Comment != nil {\n\t\t\tdoc = tspec.Comment.Text()\n\t\t}\n\t\tpublic := true\n\t\tname := tspec.Name.Name\n\t\tif 'a' <= name[0] && name[0] <= 'z' {\n\t\t\tpublic = false\n\t\t}\n\n\t\tuid := fmt.Sprintf(\"%s.%s\", pkg.PkgPath, name)\n\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\tComponent: models.Component{\n\t\t\t\tUID: uid,\n\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s\", pkg.Name, name),\n\t\t\t\tDescription: doc,\n\t\t\t\tKind: kind,\n\t\t\t\t// HACK one line for definition and one for closing curly brace\n\t\t\t\tLocation: pos2loc(pf.Name(), tspec.Name.NamePos - token.Pos(pf.Base()), uint(pf.Base()), spec, uint(2)),\n\t\t\t},\n\t\t\tPublic: public,\n\t\t})\n\t\tswitch typeTyped := tspec.Type.(type) {\n\t\tcase *ast.StructType:\n\t\t\tstructs = append(structs, uid)\n\t\t\tfor _, field := range typeTyped.Fields.List {\n\t\t\t\tfieldDoc := \"\"\n\t\t\t\tif field.Comment != nil {\n\t\t\t\t\tfieldDoc = field.Comment.Text()\n\t\t\t\t}\n\t\t\t\tfor _, fieldName := range field.Names {\n\t\t\t\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\t\t\t\tComponent: models.Component{\n\t\t\t\t\t\t\tUID: fmt.Sprintf(\"%s.%s.%s\", pkg.PkgPath, name, fieldName.Name),\n\t\t\t\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s.%s\", pkg.Name, name, fieldName.Name),\n\t\t\t\t\t\t\tDescription: fieldDoc,\n\t\t\t\t\t\t\tKind: KindField,\n\t\t\t\t\t\t\t// NOTE for multiple fields on the same line this is ambiguous\n\t\t\t\t\t\t\tLocation: pos2loc(pf.Name(), fieldName.NamePos - token.Pos(pf.Base()), uint(pf.Base()), field, 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPublic: public,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\tcase *ast.InterfaceType:\n\t\t\tifaces = append(ifaces, uid)\n\t\t\tfor _, method := range typeTyped.Methods.List {\n\t\t\t\tmethodDoc := \"\"\n\t\t\t\tif method.Comment != nil {\n\t\t\t\t\tmethodDoc = method.Comment.Text()\n\t\t\t\t}\n\t\t\t\tfor _, methodName := range method.Names {\n\t\t\t\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\t\t\t\tComponent: models.Component{\n\t\t\t\t\t\t\tUID: fmt.Sprintf(\"%s.%s.%s\", pkg.PkgPath, name, methodName.Name),\n\t\t\t\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s.%s\", pkg.Name, name, methodName.Name),\n\t\t\t\t\t\t\tDescription: methodDoc,\n\t\t\t\t\t\t\tKind: KindMethod,\n\t\t\t\t\t\t\tLocation: pos2loc(pf.Name(), methodName.NamePos - token.Pos(pf.Base()), uint(pf.Base()), method, 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPublic: public,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nodes, structs, ifaces\n}", "func (m *BgpConfiguration) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"asn\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAsn(val)\n }\n return nil\n }\n res[\"ipAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIpAddress(val)\n }\n return nil\n }\n res[\"localIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLocalIpAddress(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"peerIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPeerIpAddress(val)\n }\n return nil\n }\n return res\n}", "func (m *Directory) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"administrativeUnits\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAdministrativeUnitFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AdministrativeUnitable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AdministrativeUnitable)\n }\n }\n m.SetAdministrativeUnits(res)\n }\n return nil\n }\n res[\"attributeSets\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAttributeSetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AttributeSetable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AttributeSetable)\n }\n }\n m.SetAttributeSets(res)\n }\n return nil\n }\n res[\"customSecurityAttributeDefinitions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateCustomSecurityAttributeDefinitionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]CustomSecurityAttributeDefinitionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(CustomSecurityAttributeDefinitionable)\n }\n }\n m.SetCustomSecurityAttributeDefinitions(res)\n }\n return nil\n }\n res[\"deletedItems\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDirectoryObjectFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DirectoryObjectable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DirectoryObjectable)\n }\n }\n m.SetDeletedItems(res)\n }\n return nil\n }\n res[\"federationConfigurations\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateIdentityProviderBaseFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]IdentityProviderBaseable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(IdentityProviderBaseable)\n }\n }\n m.SetFederationConfigurations(res)\n }\n return nil\n }\n res[\"onPremisesSynchronization\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateOnPremisesDirectorySynchronizationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]OnPremisesDirectorySynchronizationable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(OnPremisesDirectorySynchronizationable)\n }\n }\n m.SetOnPremisesSynchronization(res)\n }\n return nil\n }\n return res\n}", "func (m *BusinessScenarioPlanner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"planConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerPlanConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlanConfiguration(val.(PlannerPlanConfigurationable))\n }\n return nil\n }\n res[\"taskConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerTaskConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTaskConfiguration(val.(PlannerTaskConfigurationable))\n }\n return nil\n }\n res[\"tasks\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioTaskFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioTaskable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioTaskable)\n }\n }\n m.SetTasks(res)\n }\n return nil\n }\n return res\n}", "func AddIndependentPropertyGeneratorsForJsonField(gens map[string]gopter.Gen) {\n\tgens[\"SourceField\"] = gen.PtrOf(gen.AlphaString())\n}", "func (g *mapGen) genType() {\n\tg.P(\"type \", g.typeName, \" struct {\")\n\tg.P(\"m *map[\", getGoType(g.GeneratedFile, g.field.Message.Fields[0]), \"]\", getGoType(g.GeneratedFile, g.field.Message.Fields[1]))\n\tg.P(\"}\")\n\tg.P()\n}", "func (c *Core) generate(tab Table) (string, error) {\n\tref := []reflect.StructField{}\n\tfor _, col := range tab.Columns {\n\t\tv := reflect.StructField{\n\t\t\tName: strings.Title(col.Name),\n\t\t}\n\t\tif col.Annotations != \"\" {\n\t\t\tv.Tag = reflect.StructTag(col.Annotations)\n\t\t}\n\t\tswitch col.Type {\n\t\tcase \"float\":\n\t\t\tv.Type = reflect.TypeOf(float64(0))\n\t\tcase \"varchar\":\n\t\t\tv.Type = reflect.TypeOf(string(\"\"))\n\t\tcase \"integer\", \"int\", \"tinyint\":\n\t\t\tv.Type = reflect.TypeOf(int(0))\n\t\tcase \"bigint\":\n\t\t\tv.Type = reflect.TypeOf(int64(0))\n\t\tcase \"timestamp\":\n\t\t\tv.Type = reflect.TypeOf(time.Time{})\n\t\t}\n\t\tref = append(ref, v)\n\t}\n\treturn fmt.Sprintf(\"type %s %s\", strings.Title(tab.Name), reflect.StructOf(ref).String()), nil\n}", "func getNodeFields() []string {\n\trt := reflect.TypeOf((*tailcfg.Node)(nil)).Elem()\n\tret := make([]string, rt.NumField())\n\tfor i := 0; i < rt.NumField(); i++ {\n\t\tret[i] = rt.Field(i).Name\n\t}\n\treturn ret\n}", "func (p *Parser) parseTypes(file *ast.File) (ret []structConfig) {\n\tast.Inspect(file, func(n ast.Node) bool {\n\t\tdecl, ok := n.(*ast.GenDecl)\n\t\tif !ok || decl.Tok != token.TYPE {\n\t\t\treturn true\n\t\t}\n\n\t\tfor _, spec := range decl.Specs {\n\t\t\tvar (\n\t\t\t\tdata structConfig\n\t\t\t)\n\t\t\ttypeSpec, _ok := spec.(*ast.TypeSpec)\n\t\t\tif !_ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// We only care about struct declaration (for now)\n\t\t\tvar structType *ast.StructType\n\t\t\tif structType, ok = typeSpec.Type.(*ast.StructType); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata.StructName = typeSpec.Name.Name\n\t\t\tfor _, v := range structType.Fields.List {\n\t\t\t\tvar (\n\t\t\t\t\toptionField fieldConfig\n\t\t\t\t)\n\n\t\t\t\tif t, _ok := v.Type.(*ast.Ident); _ok {\n\t\t\t\t\toptionField.FieldType = t.String()\n\t\t\t\t} else {\n\t\t\t\t\tif v.Tag != nil {\n\t\t\t\t\t\tif strings.Contains(v.Tag.Value, \"gorm\") && strings.Contains(v.Tag.Value, \"time\") {\n\t\t\t\t\t\t\toptionField.FieldType = \"time.Time\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(v.Names) > 0 {\n\t\t\t\t\toptionField.FieldName = v.Names[0].String()\n\t\t\t\t\toptionField.ColumnName = gorm.ToDBName(optionField.FieldName)\n\t\t\t\t\toptionField.HumpName = SQLColumnToHumpStyle(optionField.ColumnName)\n\t\t\t\t}\n\n\t\t\t\tdata.OptionFields = append(data.OptionFields, optionField)\n\t\t\t}\n\n\t\t\tret = append(ret, data)\n\t\t}\n\t\treturn true\n\t})\n\treturn\n}", "func (a *Aggregate) makeFields(parts []string) map[string]string {\n\tfields := make(map[string]string, len(parts))\n\tfor _, part := range parts {\n\t\tkv := strings.SplitN(part, protocol.AggregateKVDelimiter, 2)\n\t\tif len(kv) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfields[kv[0]] = kv[1]\n\t}\n\treturn fields\n}", "func TypeFields(t *Type) (fields []*Field) {\n\n\tif t == nil {\n\t\treturn\n\t}\n\n\tfor _, spec := range t.Decl.Specs {\n\n\t\ttypeSpec := spec.(*ast.TypeSpec)\n\n\t\t// struct type\n\t\tif str, ok := typeSpec.Type.(*ast.StructType); ok {\n\n\t\t\tfor _, f := range str.Fields.List {\n\t\t\t\tfields = append(fields, &Field{\n\t\t\t\t\tField: f,\n\t\t\t\t\tType: t,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t// interface type methods\n\t\tif str, ok := typeSpec.Type.(*ast.InterfaceType); ok {\n\t\t\tfor _, field := range str.Methods.List {\n\t\t\t\tif ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil {\n\t\t\t\t\tfield.Names = []*ast.Ident{ident}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, f := range str.Methods.List {\n\t\t\t\tfields = append(fields, &Field{\n\t\t\t\t\tField: f,\n\t\t\t\t\tType: t,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func getFieldList(p *program.Program, f *ast.FunctionDecl, fieldTypes []string) (\n\t_ *goast.FieldList, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error in function field list. err = %v\", err)\n\t\t}\n\t}()\n\tr := []*goast.Field{}\n\tfor i := range fieldTypes {\n\t\tif len(f.Children()) <= i {\n\t\t\terr = fmt.Errorf(\"not correct type/children: %d, %d\",\n\t\t\t\tlen(f.Children()), len(fieldTypes))\n\t\t\treturn\n\t\t}\n\t\tn := f.Children()[i]\n\t\tif v, ok := n.(*ast.ParmVarDecl); ok {\n\t\t\tt, err := types.ResolveType(p, fieldTypes[i])\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"FieldList type: %s. %v\", fieldTypes[i], err)\n\t\t\t\tp.AddMessage(p.GenerateWarningMessage(err, f))\n\t\t\t\terr = nil // ignore error\n\t\t\t\tt = \"C4GO_UNDEFINE_TYPE\"\n\t\t\t}\n\n\t\t\tif t == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr = append(r, &goast.Field{\n\t\t\t\tNames: []*goast.Ident{util.NewIdent(v.Name)},\n\t\t\t\tType: goast.NewIdent(t),\n\t\t\t})\n\t\t}\n\t}\n\n\t// for function argument: ...\n\tif strings.Contains(f.Type, \"...\") {\n\t\tr = append(r, &goast.Field{\n\t\t\tNames: []*goast.Ident{util.NewIdent(\"c4goArgs\")},\n\t\t\tType: &goast.Ellipsis{\n\t\t\t\tEllipsis: 1,\n\t\t\t\tElt: &goast.InterfaceType{\n\t\t\t\t\tInterface: 1,\n\t\t\t\t\tMethods: &goast.FieldList{\n\t\t\t\t\t\tOpening: 1,\n\t\t\t\t\t},\n\t\t\t\t\tIncomplete: false,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn &goast.FieldList{\n\t\tList: r,\n\t}, nil\n}", "func (s VirtualNodeSpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Backends != nil {\n\t\tv := s.Backends\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"backends\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\tif s.ServiceDiscovery != nil {\n\t\tv := s.ServiceDiscovery\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"serviceDiscovery\", v, metadata)\n\t}\n\treturn nil\n}", "func IterFields(t *Type) (*Field, Iter)", "func (b *basic) ToGoCode(n *ecsgen.Node) (string, error) {\n\t// we can only generate a Go struct definition for an Object, verify\n\t// we're not shooting ourselves in the foot\n\tif !n.IsObject() {\n\t\treturn \"\", fmt.Errorf(\"node %s is not an object\", n.Path)\n\t}\n\n\t// Now enumerate the Node's fields and sort the keys so the resulting Go code\n\t// is deterministically generated\n\tfieldKeys := []string{}\n\n\tfor key := range n.Children {\n\t\tfieldKeys = append(fieldKeys, key)\n\t}\n\n\tsort.Strings(fieldKeys)\n\n\t// Create a new buffer to write the struct definition to\n\tbuf := new(strings.Builder)\n\n\t// comment and type definition\n\tbuf.WriteString(fmt.Sprintf(\"// %s defines the object located at ECS path %s.\", n.TypeIdent().Pascal(), n.Path))\n\tbuf.WriteString(\"\\n\")\n\tbuf.WriteString(fmt.Sprintf(\"type %s struct {\", n.TypeIdent().Pascal()))\n\tbuf.WriteString(\"\\n\")\n\n\t// Enumerate the fields and generate their field definition, adding it\n\t// to the buffer as a line item.\n\tfor _, k := range fieldKeys {\n\t\tscalarField := n.Children[k]\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"\\t%s %s `json:\\\"%s,omitempty\\\" yaml:\\\"%s,omitempty\\\" ecs:\\\"%s\\\"`\",\n\t\t\t\tscalarField.FieldIdent().Pascal(),\n\t\t\t\tGoFieldType(scalarField),\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Path,\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\t// Close the type definition and return the result\n\tbuf.WriteString(\"}\")\n\tbuf.WriteString(\"\\n\")\n\n\t// if the user included the JSON operator flag, add the implementation\n\tif b.IncludeJSONMarshal {\n\t\t// Now we implement at json.Marshaler implementation for each specific type that\n\t\t// removes any nested JSON types that might exist.\n\t\t//\n\t\t// We do this by enumerating every field in the type and check to see\n\t\t// if it's got a zero value.\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"// MarshalJSON implements the json.Marshaler interface and removes zero values from returned JSON.\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"func (b %s) MarshalJSON() ([]byte, error) {\",\n\t\t\t\tn.TypeIdent().Pascal(),\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// Define the result struct we will populate non-zero fields with\n\t\tbuf.WriteString(\"\\tres := map[string]interface{}{}\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// enumerate the fields for the object fields\n\t\tfor _, fieldName := range fieldKeys {\n\t\t\tfield := n.Children[fieldName]\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\tif val := reflect.ValueOf(b.%s); !val.IsZero() {\", field.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\t\\tres[\\\"%s\\\"] = b.%s\",\n\t\t\t\t\tfield.Name,\n\t\t\t\t\tfield.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\"\\t}\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t}\n\n\t\t// add a line spacer and return the marshaled JSON result\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\treturn json.Marshal(res)\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"}\")\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\treturn buf.String(), nil\n}", "func contextFields(lvl ...int) Fields {\n\tlevel := 2\n\tif len(lvl) == 1 {\n\t\tlevel = lvl[0]\n\t}\n\tpc, file, line, _ := runtime.Caller(level)\n\t_, fileName := path.Split(file)\n\tparts := strings.Split(runtime.FuncForPC(pc).Name(), \".\")\n\tpl := len(parts)\n\tpackageName := \"\"\n\tfuncName := parts[pl-1]\n\n\tif len(parts) >= 0 && pl-2 < len(parts) {\n\t\tif parts[pl-2][0] == '(' {\n\t\t\tfuncName = parts[pl-2] + \".\" + funcName\n\t\t\tpackageName = strings.Join(parts[0:pl-2], \".\")\n\t\t} else {\n\t\t\tpackageName = strings.Join(parts[0:pl-1], \".\")\n\t\t}\n\n\t\tpkgs := strings.Split(packageName, \"/sigma/\")\n\t\tif len(pkgs) > 1 {\n\t\t\tpackageName = pkgs[1]\n\t\t}\n\t}\n\n\treturn Fields{\n\t\t\"package\": packageName,\n\t\t\"file\": fileName,\n\t\t\"func\": funcName,\n\t\t\"line\": line,\n\t}\n}", "func _fields(args ...interface{}) *ast.FieldList {\n\tlist := []*ast.Field{}\n\tnames := []*ast.Ident{}\n\tlasti := interface{}(nil)\n\tmaybePop := func() {\n\t\tif len(names) > 0 {\n\t\t\tvar last ast.Expr\n\t\t\tif lastte_, ok := lasti.(string); ok {\n\t\t\t\tlast = _x(lastte_)\n\t\t\t} else {\n\t\t\t\tlast = lasti.(ast.Expr)\n\t\t\t}\n\t\t\tlist = append(list, &ast.Field{\n\t\t\t\tNames: names,\n\t\t\t\tType: last,\n\t\t\t})\n\t\t\tnames = []*ast.Ident{}\n\t\t}\n\t}\n\tfor i := 0; i < len(args); i++ {\n\t\tname, ok := args[i].(*ast.Ident)\n\t\tif !ok {\n\t\t\tname = _i(args[i].(string))\n\t\t}\n\t\tte_ := args[i+1]\n\t\ti += 1\n\t\t// NOTE: This comparison could be improved, to say, deep equality,\n\t\t// but is that the behavior we want?\n\t\tif lasti == te_ {\n\t\t\tnames = append(names, name)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tmaybePop()\n\t\t\tnames = append(names, name)\n\t\t\tlasti = te_\n\t\t}\n\t}\n\tmaybePop()\n\treturn &ast.FieldList{\n\t\tList: list,\n\t}\n}", "func (m *AccessPackage) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackagesIncompatibleWith\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackagesIncompatibleWith)\n res[\"assignmentPolicies\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageAssignmentPolicyFromDiscriminatorValue , m.SetAssignmentPolicies)\n res[\"catalog\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateAccessPackageCatalogFromDiscriminatorValue , m.SetCatalog)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"incompatibleAccessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetIncompatibleAccessPackages)\n res[\"incompatibleGroups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetIncompatibleGroups)\n res[\"isHidden\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsHidden)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n return res\n}", "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// FieldConfigArgument{\n\t// \"style\": &ArgumentConfig{ ... }\n\t// },\n\t//\n\treturn jen.Qual(defsPkg, \"FieldConfigArgument\").Values(\n\t\tjen.DictFunc(func(d jen.Dict) {\n\t\t\tfor _, arg := range args {\n\t\t\t\td[jen.Lit(arg.Name.Value)] = genArgument(arg)\n\t\t\t}\n\t\t}),\n\t)\n}", "func (*Base) ObjectFields(p ASTPass, fields *ast.ObjectFields, ctx Context) {\n\tfor i := range *fields {\n\t\tp.ObjectField(p, &(*fields)[i], ctx)\n\t}\n}", "func printStructField(t *reflect.Type) {\n fieldNum := (*t).NumField()\n for i := 0; i < fieldNum; i++ {\n fmt.Printf(\"conf's field: %s\\n\", (*t).Field(i).Name)\n }\n fmt.Println(\"\")\n}", "func MapFieldsToNameExpr(args ...*ast.Field) []ast.Expr {\n\tresult := make([]ast.Expr, 0, len(args))\n\tfor _, f := range args {\n\t\tresult = append(result, MapIdentToExpr(f.Names...)...)\n\t}\n\treturn result\n}", "func (g *Generator) generate(typeInfo typeInfo) {\n\t// <key, value>\n\tvalues := make([]Value, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeInfo = typeInfo\n\t\tfile.values = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tvalues = append(values, file.values...)\n\t\t}\n\t}\n\n\tif len(values) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %+v\", typeInfo)\n\t}\n\tg.transformValueNames(values, transformMethod)\n\t// Generate code that will fail if the constants change value.\n\tfor _, im := range checkImportPackages {\n\t\tg.Printf(stringImport, im)\n\t}\n\n\tif useNew {\n\t\tfor _, im := range newImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useBinary {\n\t\tfor _, im := range binaryImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useJson {\n\t\tfor _, im := range jsonImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useText {\n\t\tfor _, im := range textImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useYaml {\n\t\tfor _, im := range yamlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useSql {\n\t\tfor _, im := range sqlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\n\tg.buildEnumRegenerateCheck(values)\n\n\truns := splitIntoRuns(values)\n\tthreshold := 10\n\n\tif useString {\n\t\t// The decision of which pattern to use depends on the number of\n\t\t// runs in the numbers. If there's only one, it's easy. For more than\n\t\t// one, there's a tradeoff between complexity and size of the data\n\t\t// and code vs. the simplicity of a map. A map takes more space,\n\t\t// but so does the code. The decision here (crossover at 10) is\n\t\t// arbitrary, but considers that for large numbers of runs the cost\n\t\t// of the linear scan in the switch might become important, and\n\t\t// rather than use yet another algorithm such as binary search,\n\t\t// we punt and use a map. In any case, the likelihood of a map\n\t\t// being necessary for any realistic example other than bitmasks\n\t\t// is very low. And bitmasks probably deserve their own analysis,\n\t\t// to be done some other day.\n\t\tswitch {\n\t\tcase len(runs) == 1:\n\t\t\tg.buildOneRun(runs, typeInfo)\n\t\tcase len(runs) <= threshold:\n\t\t\tg.buildMultipleRuns(runs, typeInfo)\n\t\tdefault:\n\t\t\tg.buildMap(runs, typeInfo)\n\t\t}\n\t}\n\n\tif useNew {\n\t\tg.Printf(newTemplate, typeInfo.Name)\n\t}\n\tif useBinary {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(binaryTemplate, typeInfo.Name)\n\t}\n\tif useJson {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(jsonTemplate, typeInfo.Name)\n\t}\n\tif useText {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(textTemplate, typeInfo.Name)\n\t}\n\tif useYaml {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(yamlTemplate, typeInfo.Name)\n\t}\n\tif useSql {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(sqpTemplate, typeInfo.Name)\n\t}\n\n\tif useContains {\n\t\tg.Printf(containsTemplate, typeInfo.Name)\n\t}\n}", "func compileField(sf reflect.StructField, name string) interface{} {\n\tf := field{sField: sf.Index[0]}\n\n\tf.name = []byte(name)\n\n\tswitch sf.Type.Kind() {\n\tcase reflect.Struct:\n\t\treturn fieldStruct{f.sField, f.name, compileStruct(sf.Type)}\n\tcase reflect.Bool:\n\t\tf.write = encodeBool\n\t\tf.read = decodeBool\n\t\tf.requiredType = 1\n\tcase reflect.Int8:\n\t\tf.write = encodeInt8\n\t\tf.read = decodeInt8\n\t\tf.requiredType = 1\n\tcase reflect.Int16:\n\t\tf.write = encodeInt16\n\t\tf.read = decodeInt16\n\t\tf.requiredType = 2\n\tcase reflect.Int32:\n\t\tf.write = encodeInt32\n\t\tf.read = decodeInt32\n\t\tf.requiredType = 3\n\tcase reflect.Int64:\n\t\tf.write = encodeInt64\n\t\tf.read = decodeInt64\n\t\tf.requiredType = 4\n\tcase reflect.String:\n\t\tf.write = encodeString\n\t\tf.read = decodeString\n\t\tf.requiredType = 8\n\tcase reflect.Map:\n\t\tf.requiredType = 10\n\t\telem := sf.Type.Elem()\n\t\tvar elemField interface{}\n\t\tname := \"map:\" + sf.Name\n\t\tif elem.Kind() != reflect.Interface {\n\t\t\telemField = compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t}\n\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\tkeys := fi.MapKeys()\n\t\t\tfor _, key := range keys {\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv := fi.MapIndex(key).Elem()\n\t\t\t\t\t\ttemp := compileField(reflect.StructField{Type: v.Type(), Index: []int{0}}, \"\")\n\t\t\t\t\t\tif f, ok := temp.(field); ok {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\t\tfs := temp.(fieldStruct)\n\t\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tbs := en.b[:1]\n\t\t\tbs[0] = 0\n\t\t\t_, err := w.Write(bs)\n\t\t\treturn err\n\t\t}\n\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\n\t\t\tma := reflect.MakeMap(sf.Type)\n\n\t\t\tname, t, err := readPrefix(r, de)\n\t\t\tfor ; t != 0; name, t, err = readPrefix(r, de) {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tkeyVal := reflect.ValueOf(name)\n\n\t\t\t\tvar val reflect.Value\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\terr := f.read(r, de, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv, err := fallbackRead(r, de)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = reflect.ValueOf(v)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\terr := read(r, de, fs.m, val)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tma.SetMapIndex(keyVal, val)\n\t\t\t}\n\t\t\tfi.Set(ma)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Slice:\n\t\tf.requiredType = 9\n\t\telem := sf.Type.Elem()\n\t\tswitch elem.Kind() {\n\t\tcase reflect.Uint8: //Short-cut for byte arrays\n\t\t\tf.requiredType = 7\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t_, err = w.Write(fi.Bytes())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]byte, l)\n\t\t\t\t_, err = r.Read(out)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfi.SetBytes(out)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase reflect.Int32: //Short-cut for int32 arrays\n\t\t\tf.requiredType = 11\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdata := fi.Interface().([]int32)\n\t\t\t\tfor i := range data {\n\t\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(data[i]))\n\t\t\t\t\t_, err := w.Write(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]int32, l)\n\t\t\t\tfor i := range out {\n\t\t\t\t\t_, err := r.Read(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tout[i] = int32(binary.BigEndian.Uint32(bs))\n\t\t\t\t}\n\t\t\t\tfi.Set(reflect.ValueOf(out))\n\t\t\t\treturn nil\n\t\t\t}\n\t\tdefault:\n\t\t\tname := \"slice:\" + sf.Name\n\t\t\telemField := compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:5]\n\t\t\t\tbinary.BigEndian.PutUint32(bs[1:], uint32(l))\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tbs[0] = f.requiredType\n\t\t\t\t} else {\n\t\t\t\t\tbs[0] = 10\n\t\t\t\t}\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := write(w, en, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:5]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tif bs[0] != f.requiredType {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif bs[0] != 10 {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tl := int(binary.BigEndian.Uint32(bs[1:]))\n\t\t\t\tval := reflect.MakeSlice(sf.Type, l, l)\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := f.read(r, de, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := read(r, de, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfi.Set(val)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\tcase reflect.Float32:\n\t\tf.requiredType = 5\n\t\tf.write = encodeFloat32\n\t\tf.read = decodeFloat32\n\tcase reflect.Float64:\n\t\tf.requiredType = 6\n\t\tf.write = encodeFloat64\n\t\tf.read = decodeFloat64\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unhandled type %s for %s\", sf.Type.Kind().String(), sf.Name))\n\t}\n\treturn f\n}", "func (m *EdiscoverySearch) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Search.GetFieldDeserializers()\n res[\"additionalSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetAdditionalSources)\n res[\"addToReviewSetOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryAddToReviewSetOperationFromDiscriminatorValue , m.SetAddToReviewSetOperation)\n res[\"custodianSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetCustodianSources)\n res[\"dataSourceScopes\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseDataSourceScopes , m.SetDataSourceScopes)\n res[\"lastEstimateStatisticsOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryEstimateOperationFromDiscriminatorValue , m.SetLastEstimateStatisticsOperation)\n res[\"noncustodialSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateEdiscoveryNoncustodialDataSourceFromDiscriminatorValue , m.SetNoncustodialSources)\n return res\n}", "func Struct(rt reflect.Type, gens map[string]gopter.Gen) gopter.Gen {\n\tif rt.Kind() == reflect.Ptr {\n\t\trt = rt.Elem()\n\t}\n\tif rt.Kind() != reflect.Struct {\n\t\treturn Fail(rt)\n\t}\n\tfieldGens := []gopter.Gen{}\n\tfieldTypes := []reflect.Type{}\n\tassignable := reflect.New(rt).Elem()\n\tfor i := 0; i < rt.NumField(); i++ {\n\t\tfieldName := rt.Field(i).Name\n\t\tif !assignable.Field(i).CanSet() {\n\t\t\tcontinue\n\t\t}\n\n\t\tgen := gens[fieldName]\n\t\tif gen != nil {\n\t\t\tfieldGens = append(fieldGens, gen)\n\t\t\tfieldTypes = append(fieldTypes, rt.Field(i).Type)\n\t\t}\n\t}\n\n\tbuildStructType := reflect.FuncOf(fieldTypes, []reflect.Type{rt}, false)\n\tunbuildStructType := reflect.FuncOf([]reflect.Type{rt}, fieldTypes, false)\n\n\tbuildStructFunc := reflect.MakeFunc(buildStructType, func(args []reflect.Value) []reflect.Value {\n\t\tresult := reflect.New(rt)\n\t\tfor i := 0; i < rt.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult.Elem().Field(i).Set(args[0])\n\t\t\targs = args[1:]\n\t\t}\n\t\treturn []reflect.Value{result.Elem()}\n\t})\n\tunbuildStructFunc := reflect.MakeFunc(unbuildStructType, func(args []reflect.Value) []reflect.Value {\n\t\ts := args[0]\n\t\tresults := []reflect.Value{}\n\t\tfor i := 0; i < s.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresults = append(results, s.Field(i))\n\t\t}\n\t\treturn results\n\t})\n\n\treturn gopter.DeriveGen(\n\t\tbuildStructFunc.Interface(),\n\t\tunbuildStructFunc.Interface(),\n\t\tfieldGens...,\n\t)\n}", "func typeFields(t reflect.Type) []field {\n\t// Anonymous fields to explore at the current level and the next.\n\tcurrent := []field{}\n\tnext := []field{{typ: t}}\n\n\t// Count of queued names for current level and the next.\n\tcount := map[reflect.Type]int{}\n\tnextCount := map[reflect.Type]int{}\n\n\t// Types already visited at an earlier level.\n\tvisited := map[reflect.Type]bool{}\n\n\t// Fields found.\n\tvar fields []field\n\n\tfor len(next) > 0 {\n\t\tcurrent, next = next, current[:0]\n\t\tcount, nextCount = nextCount, map[reflect.Type]int{}\n\n\t\tfor _, f := range current {\n\t\t\tif visited[f.typ] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvisited[f.typ] = true\n\n\t\t\t// Scan f.typ for fields to include.\n\t\t\tfor i := 0; i < f.typ.NumField(); i++ {\n\t\t\t\tsf := f.typ.Field(i)\n\t\t\t\tisUnexported := sf.PkgPath != \"\"\n\t\t\t\tif sf.Anonymous {\n\t\t\t\t\tt := sf.Type\n\t\t\t\t\tif isUnexported && t.Kind() != reflect.Struct {\n\t\t\t\t\t\t// Ignore embedded fields of unexported non-struct types.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Do not ignore embedded fields of unexported struct types\n\t\t\t\t\t// since they may have exported fields.\n\t\t\t\t} else if isUnexported {\n\t\t\t\t\t// Ignore unexported non-embedded fields.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tindex := make([]int, len(f.index)+1)\n\t\t\t\tcopy(index, f.index)\n\t\t\t\tindex[len(f.index)] = i\n\n\t\t\t\tft := sf.Type\n\n\t\t\t\t// Record found field and index sequence.\n\t\t\t\tif !sf.Anonymous || ft.Kind() != reflect.Struct {\n\t\t\t\t\tfields = append(fields, field{\n\t\t\t\t\t\tname: sf.Name,\n\t\t\t\t\t\tindex: index,\n\t\t\t\t\t\ttyp: ft,\n\t\t\t\t\t})\n\t\t\t\t\tif count[f.typ] > 1 {\n\t\t\t\t\t\t// If there were multiple instances, add a second,\n\t\t\t\t\t\t// so that the annihilation code will see a duplicate.\n\t\t\t\t\t\t// It only cares about the distinction between 1 or 2,\n\t\t\t\t\t\t// so don't bother generating any more copies.\n\t\t\t\t\t\tfields = append(fields, fields[len(fields)-1])\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Record new anonymous struct to explore in next round.\n\t\t\t\tnextCount[ft]++\n\t\t\t\tif nextCount[ft] == 1 {\n\t\t\t\t\tnext = append(next, field{name: ft.Name(), index: index, typ: ft})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Sort(byIndex(fields))\n\n\treturn fields\n}", "func GenerateGoCode(preamble string, mainDefAddr string, includeDirectories []string, generate_tests bool) error {\n\n\toutDefs, version, err := XMLToFields(mainDefAddr, includeDirectories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// merge enums together\n\tenums := make(map[string]*OutEnum)\n\tfor _, def := range outDefs {\n\t\tfor _, defEnum := range def.Enums {\n\t\t\tif _, ok := enums[defEnum.Name]; !ok {\n\t\t\t\tenums[defEnum.Name] = &OutEnum{\n\t\t\t\t\tName: defEnum.Name,\n\t\t\t\t\tDescription: defEnum.Description,\n\t\t\t\t}\n\t\t\t}\n\t\t\tenum := enums[defEnum.Name]\n\n\t\t\tfor _, v := range defEnum.Values {\n\t\t\t\tenum.Values = append(enum.Values, v)\n\t\t\t}\n\t\t}\n\t}\n\n\t// fill enum missing values\n\tfor _, enum := range enums {\n\t\tnextVal := 0\n\t\tfor _, v := range enum.Values {\n\t\t\tif v.Value != \"\" {\n\t\t\t\tnextVal, _ = strconv.Atoi(v.Value)\n\t\t\t\tnextVal++\n\t\t\t} else {\n\t\t\t\tv.Value = strconv.Itoa(nextVal)\n\t\t\t\tnextVal++\n\t\t\t}\n\t\t}\n\t}\n\n\t// get package name\n\t// remove underscores since they can lead to errors\n\t// (for instance, when package name ends with _test)\n\t_, inFile := filepath.Split(mainDefAddr)\n\tpkgName := strings.TrimSuffix(inFile, \".xml\")\n\n\t// dump\n\tif generate_tests {\n\t\treturn tplDialectTest.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t} else {\n\t\treturn tplDialect.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t}\n}", "func (s *DbRecorder) scanFields(ar Record) {\n\tv := reflect.Indirect(reflect.ValueOf(ar))\n\tt := v.Type()\n\tcount := t.NumField()\n\tkeys := make([]*field, 0, 2)\n\n\tfor i := 0; i < count; i++ {\n\t\tf := t.Field(i)\n\t\t// Skip fields with no tag.\n\t\tif len(f.Tag) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsqtag := f.Tag.Get(\"stbl\")\n\t\tif len(sqtag) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tparts := s.parseTag(f.Name, sqtag)\n\t\tfield := new(field)\n\t\tfield.name = f.Name\n\t\tfield.column = parts[0]\n\t\tfor _, part := range parts[1:] {\n\t\t\tpart = strings.TrimSpace(part)\n\t\t\tswitch part {\n\t\t\tcase \"PRIMARY_KEY\", \"PRIMARY KEY\":\n\t\t\t\tfield.isKey = true\n\t\t\t\tkeys = append(keys, field)\n\t\t\tcase \"AUTO_INCREMENT\", \"SERIAL\", \"AUTO INCREMENT\":\n\t\t\t\tfield.isAuto = true\n\t\t\t}\n\t\t}\n\t\ts.fields = append(s.fields, field)\n\t\ts.key = keys\n\t}\n}", "func (Builder) Fields() []ent.Field {\n\treturn nil\n}", "func (a *api) h_POST_orgs_orgId_fields(c *gin.Context) {\n\torgId, err := parseInt64Param(c, \"orgId\")\n\ta.logger.Debug(\"POST /orgs/\", orgId, \"/fields\")\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\taCtx := a.getAuthContext(c)\n\tif a.errorResponse(c, aCtx.AuthZOrgAdmin(orgId)) {\n\t\treturn\n\t}\n\n\tvar mis OrgMetaInfoArr\n\tif a.errorResponse(c, bindAppJson(c, &mis)) {\n\t\treturn\n\t}\n\n\tfis := a.metaInfos2FieldInfos(mis, orgId)\n\tif a.errorResponse(c, a.Dc.InsertNewFields(orgId, fis)) {\n\t\treturn\n\t}\n\n\ta.logger.Info(\"New fields were added for orgId=\", orgId, \" \", fis)\n\tc.Status(http.StatusCreated)\n}", "func ReflectFieldsFq(\n\tt reflect.Type,\n\ttypeMap TypeMap,\n\texclude ExcludeFieldTag,\n) graphql.Fields {\n\tif t.Kind() != reflect.Struct {\n\t\tpanic(fmt.Sprintf(`ReflectFieldsFq can only work on struct types.\n\t\t\tReceived instead %s`, t.Kind()))\n\t}\n\tfields := make(graphql.Fields)\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif includeField(f, exclude) {\n\t\t\tname := GqlName(GetFieldFirstTag(f, \"json\"))\n\t\t\tfields[string(name)] = ReflectFieldFq(name, f.Type, typeMap, exclude)\n\t\t}\n\t}\n\treturn fields\n}", "func (m *ProgramControl) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"controlId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlId(val)\n }\n return nil\n }\n res[\"controlTypeId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlTypeId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"owner\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateUserIdentityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOwner(val.(UserIdentityable))\n }\n return nil\n }\n res[\"program\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgram(val.(Programable))\n }\n return nil\n }\n res[\"programId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgramId(val)\n }\n return nil\n }\n res[\"resource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramResourceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetResource(val.(ProgramResourceable))\n }\n return nil\n }\n res[\"status\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetStatus(val)\n }\n return nil\n }\n return res\n}", "func (def *Definition) Fieldnames() []string {\n\ttypeList := make([]string, 0)\n\tt := TraverserMethods{EnterFunction: func(adaType IAdaType, parentType IAdaType, level int, x interface{}) error {\n\t\ttypeList = append(typeList, adaType.Name())\n\t\treturn nil\n\t}}\n\n\t_ = def.TraverseTypes(t, true, typeList)\n\treturn typeList\n}", "func (m *AccessPackageCatalog) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackages)\n res[\"catalogType\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogType , m.SetCatalogType)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"isExternallyVisible\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsExternallyVisible)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n res[\"state\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogState , m.SetState)\n return res\n}", "func generateMembers(fields []libtypes.Field) *C.GoMembers {\n\n\tmemberArray := make([]C.GoMember, 0)\n\tfor _, field := range fields {\n\n\t\t// Create a go member\n\t\tmember := C.GoMember{}\n\n\t\t// Name of field\n\t\tmemberName := C.CString(field.Name)\n\t\tdefer C.free(unsafe.Pointer(memberName))\n\n\t\t// Array information\n\t\tmember.is_array_ = C.bool(field.IsArray)\n\t\tmember.array_size_ = C.size_t(field.ArrayLen)\n\n\t\tswitch field.Type {\n\t\tcase \"int8\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT8\n\t\tcase \"uint8\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT8\n\t\tcase \"int16\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT16\n\t\tcase \"uint16\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT16\n\t\tcase \"int32\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT32\n\t\tcase \"uint32\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT32\n\t\tcase \"int64\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT64\n\t\tcase \"uint64\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT64\n\t\tcase \"float32\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_FLOAT32\n\t\tcase \"float64\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_DOUBLE\n\t\tcase \"string\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_STRING\n\t\tcase \"bool\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_BOOL\n\t\tcase \"char\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_CHAR\n\t\tcase \"byte\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_BYTE\n\t\t// Note: Time and Duration are builtin MESSAGE types\n\t\tdefault:\n\t\t\t// We need to generated nested fields\n\t\t\tmsgType, _ := newDynamicMessageTypeNested(field.Type, field.Package)\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_MESSAGE\n\t\t\t// Member field takes a typesupport definition\n\t\t\tmember.members_ = msgType.rosType\n\t\t}\n\n\t\tmemberArray = append(memberArray, member)\n\t}\n\n\tmembers := C.GoMembers{}\n\n\tmembers.member_array = memberArray\n\n\treturn &members\n}", "func (m *ReportRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"appCredentialSignInActivities\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAppCredentialSignInActivityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AppCredentialSignInActivityable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AppCredentialSignInActivityable)\n }\n }\n m.SetAppCredentialSignInActivities(res)\n }\n return nil\n }\n res[\"applicationSignInDetailedSummary\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateApplicationSignInDetailedSummaryFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ApplicationSignInDetailedSummaryable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ApplicationSignInDetailedSummaryable)\n }\n }\n m.SetApplicationSignInDetailedSummary(res)\n }\n return nil\n }\n res[\"authenticationMethods\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateAuthenticationMethodsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAuthenticationMethods(val.(AuthenticationMethodsRootable))\n }\n return nil\n }\n res[\"credentialUserRegistrationDetails\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateCredentialUserRegistrationDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]CredentialUserRegistrationDetailsable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(CredentialUserRegistrationDetailsable)\n }\n }\n m.SetCredentialUserRegistrationDetails(res)\n }\n return nil\n }\n res[\"dailyPrintUsage\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageable)\n }\n }\n m.SetDailyPrintUsage(res)\n }\n return nil\n }\n res[\"dailyPrintUsageByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetDailyPrintUsageByPrinter(res)\n }\n return nil\n }\n res[\"dailyPrintUsageByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetDailyPrintUsageByUser(res)\n }\n return nil\n }\n res[\"dailyPrintUsageSummariesByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetDailyPrintUsageSummariesByPrinter(res)\n }\n return nil\n }\n res[\"dailyPrintUsageSummariesByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetDailyPrintUsageSummariesByUser(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetMonthlyPrintUsageByPrinter(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetMonthlyPrintUsageByUser(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageSummariesByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetMonthlyPrintUsageSummariesByPrinter(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageSummariesByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetMonthlyPrintUsageSummariesByUser(res)\n }\n return nil\n }\n res[\"security\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateSecurityReportsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSecurity(val.(SecurityReportsRootable))\n }\n return nil\n }\n res[\"servicePrincipalSignInActivities\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateServicePrincipalSignInActivityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ServicePrincipalSignInActivityable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ServicePrincipalSignInActivityable)\n }\n }\n m.SetServicePrincipalSignInActivities(res)\n }\n return nil\n }\n res[\"sla\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateServiceLevelAgreementRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSla(val.(ServiceLevelAgreementRootable))\n }\n return nil\n }\n res[\"userCredentialUsageDetails\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateUserCredentialUsageDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]UserCredentialUsageDetailsable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(UserCredentialUsageDetailsable)\n }\n }\n m.SetUserCredentialUsageDetails(res)\n }\n return nil\n }\n return res\n}", "func (m *Store) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"defaultLanguageTag\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDefaultLanguageTag)\n res[\"groups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetGroups)\n res[\"languageTags\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfPrimitiveValues(\"string\" , m.SetLanguageTags)\n res[\"sets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSetFromDiscriminatorValue , m.SetSets)\n return res\n}", "func generateGoTestBlock_Func(file *File, fe *feparser.FEFunc) []*StatementAndName {\n\t// Seed the random number generator with the hash of the\n\t// FEFunc, so that the numbers in the variable names\n\t// will stay the same as long as the FEFunc is the same.\n\t//rand.Seed(int64(MustHashAnyWithJSON(fe.CodeQL.Blocks)))\n\n\tchildren := make([]*StatementAndName, 0)\n\tfor blockIndex, block := range fe.CodeQL.Blocks {\n\t\tinps, outps, err := getIdentitiesByBlock_FEFunc(fe, block)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfor inpIndex, inp := range inps {\n\t\t\tfor outpIndex, outp := range outps {\n\n\t\t\t\tchildBlock := generateGoChildBlock_Func(\n\t\t\t\t\tfile,\n\t\t\t\t\tfe,\n\t\t\t\t\tinp,\n\t\t\t\t\toutp,\n\t\t\t\t)\n\t\t\t\t{\n\t\t\t\t\tif childBlock != nil {\n\n\t\t\t\t\t\ttestFuncID := \"TaintStepTest_\" + feparser.FormatCodeQlName(fe.PkgPath+\"-\"+fe.Name) + Sf(\"_B%vI%vO%v\", blockIndex, inpIndex, outpIndex)\n\t\t\t\t\t\tenclosed := Func().Id(testFuncID).\n\t\t\t\t\t\t\tParamsFunc(\n\t\t\t\t\t\t\t\tfunc(group *Group) {\n\t\t\t\t\t\t\t\t\tgroup.Add(Id(\"sourceCQL\").Interface())\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t).\n\t\t\t\t\t\t\tInterface().\n\t\t\t\t\t\t\tAdd(childBlock)\n\n\t\t\t\t\t\tchildren = append(children, &StatementAndName{\n\t\t\t\t\t\t\tStatement: enclosed,\n\t\t\t\t\t\t\tTestFuncName: testFuncID,\n\t\t\t\t\t\t})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tWarnf(Sf(\"NOTHING GENERATED; block %v, inp %v, outp %v\", blockIndex, inpIndex, outpIndex))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn children\n}", "func parseFields(criteria *measurev1.QueryRequest, metadata *commonv1.Metadata, groupByEntity bool) logical.UnresolvedPlan {\n\tprojFields := make([]*logical.Field, len(criteria.GetFieldProjection().GetNames()))\n\tfor i, fieldNameProj := range criteria.GetFieldProjection().GetNames() {\n\t\tprojFields[i] = logical.NewField(fieldNameProj)\n\t}\n\ttimeRange := criteria.GetTimeRange()\n\treturn indexScan(timeRange.GetBegin().AsTime(), timeRange.GetEnd().AsTime(), metadata,\n\t\tlogical.ToTags(criteria.GetTagProjection()), projFields, groupByEntity, criteria.GetCriteria())\n}", "func (op *metadataLookup) buildField() {\n\tlengthOfFields := len(op.fields)\n\top.executeCtx.Fields = make(field.Metas, lengthOfFields)\n\n\tidx := 0\n\tfor fieldID := range op.fields {\n\t\tf := op.fields[fieldID]\n\t\top.executeCtx.Fields[idx] = field.Meta{\n\t\t\tID: fieldID,\n\t\t\tType: f.DownSampling.GetFieldType(),\n\t\t\tName: f.DownSampling.FieldName(),\n\t\t}\n\t\tidx++\n\t}\n\t// first sort field by field id\n\top.executeCtx.SortFields()\n\t// after sort filed, build aggregation spec\n\top.executeCtx.DownSamplingSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\top.executeCtx.AggregatorSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\tfor fieldIdx, fieldMeta := range op.executeCtx.Fields {\n\t\tf := op.fields[fieldMeta.ID]\n\t\top.executeCtx.DownSamplingSpecs[fieldIdx] = f.DownSampling\n\t\top.executeCtx.AggregatorSpecs[fieldIdx] = f.Aggregator\n\t}\n}", "func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interface{}, prefix, aliasPrefix, event string, iterator *common.StructField, dejavu map[string]bool) {\n\tif verbose {\n\t\tfmt.Printf(\"handleSpec spec: %+v, prefix: %s, aliasPrefix %s, event %s, iterator %+v\\n\", spec, prefix, aliasPrefix, event, iterator)\n\t}\n\n\tvar typeSpec *ast.TypeSpec\n\tvar structType *ast.StructType\n\tvar ok bool\n\tif typeSpec, ok = spec.(*ast.TypeSpec); !ok {\n\t\treturn\n\t}\n\tif structType, ok = typeSpec.Type.(*ast.StructType); !ok {\n\t\tlog.Printf(\"Don't know what to do with %s (%s)\", typeSpec.Name, spew.Sdump(typeSpec))\n\t\treturn\n\t}\n\n\tfor _, field := range structType.Fields.List {\n\t\tfieldCommentText := field.Comment.Text()\n\t\tfieldIterator := iterator\n\n\t\tvar tag reflect.StructTag\n\t\tif field.Tag != nil {\n\t\t\ttag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1])\n\t\t}\n\n\t\tif e, ok := tag.Lookup(\"event\"); ok {\n\t\t\tevent = e\n\t\t\tif _, ok = module.EventTypes[e]; !ok {\n\t\t\t\tmodule.EventTypes[e] = common.NewEventTypeMetada()\n\t\t\t\tdejavu = make(map[string]bool) // clear dejavu map when it's a new event type\n\t\t\t}\n\t\t\tif e != \"*\" {\n\t\t\t\tmodule.EventTypes[e].Doc = fieldCommentText\n\t\t\t}\n\t\t}\n\n\t\tif isEmbedded := len(field.Names) == 0; isEmbedded {\n\t\t\tif fieldTag, found := tag.Lookup(\"field\"); found && fieldTag == \"-\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tident, _ := field.Type.(*ast.Ident)\n\t\t\tif ident == nil {\n\t\t\t\tif starExpr, ok := field.Type.(*ast.StarExpr); ok {\n\t\t\t\t\tident, _ = starExpr.X.(*ast.Ident)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ident != nil {\n\t\t\t\tname := ident.Name\n\t\t\t\tif prefix != \"\" {\n\t\t\t\t\tname = prefix + \".\" + ident.Name\n\t\t\t\t}\n\n\t\t\t\tembedded := astFiles.LookupSymbol(ident.Name)\n\t\t\t\tif embedded != nil {\n\t\t\t\t\thandleEmbedded(module, ident.Name, prefix, event, field.Type)\n\t\t\t\t\thandleSpecRecursive(module, astFiles, embedded.Decl, name, aliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", ident.Name, pkgname)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfieldBasename := field.Names[0].Name\n\t\t\tif !unicode.IsUpper(rune(fieldBasename[0])) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif dejavu[fieldBasename] {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar opOverrides string\n\t\t\tvar fields []seclField\n\t\t\tif tags, err := structtag.Parse(string(tag)); err == nil && len(tags.Tags()) != 0 {\n\t\t\t\topOverrides, fields = parseTags(tags, typeSpec.Name.Name)\n\n\t\t\t\tif opOverrides == \"\" && fields == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tfields = append(fields, seclField{name: fieldBasename})\n\t\t\t}\n\n\t\t\tfieldType, isPointer, isArray := getFieldIdentName(field.Type)\n\n\t\t\tprefixedFieldName := fieldBasename\n\t\t\tif prefix != \"\" {\n\t\t\t\tprefixedFieldName = fmt.Sprintf(\"%s.%s\", prefix, fieldBasename)\n\t\t\t}\n\n\t\t\tfor _, seclField := range fields {\n\t\t\t\thandleNonEmbedded(module, seclField, prefixedFieldName, event, fieldType, isPointer, isArray)\n\n\t\t\t\tif seclFieldIterator := seclField.iterator; seclFieldIterator != \"\" {\n\t\t\t\t\tfieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, event, fieldCommentText, opOverrides, isPointer, isArray)\n\t\t\t\t}\n\n\t\t\t\tif handler := seclField.handler; handler != \"\" {\n\n\t\t\t\t\thandleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, event, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator)\n\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif verbose {\n\t\t\t\t\tlog.Printf(\"Don't know what to do with %s: %s\", fieldBasename, spew.Sdump(field.Type))\n\t\t\t\t}\n\n\t\t\t\tdejavu[fieldBasename] = true\n\n\t\t\t\tif len(fieldType) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\talias := seclField.name\n\t\t\t\tif isBasicType(fieldType) {\n\t\t\t\t\thandleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, event, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray)\n\t\t\t\t} else {\n\t\t\t\t\tspec := astFiles.LookupSymbol(fieldType)\n\t\t\t\t\tif spec != nil {\n\t\t\t\t\t\tnewPrefix, newAliasPrefix := fieldBasename, alias\n\n\t\t\t\t\t\tif prefix != \"\" {\n\t\t\t\t\t\t\tnewPrefix = prefix + \".\" + fieldBasename\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif aliasPrefix != \"\" {\n\t\t\t\t\t\t\tnewAliasPrefix = aliasPrefix + \".\" + alias\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\thandleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", fieldType, pkgname)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !seclField.exposedAtEventRootOnly {\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (b *Builder) QueryFields(source reflect.Value, parent reflect.Value) (graphql.Fields, error) {\n\tresult := make(graphql.Fields, 0)\n\tif source.IsValid() && source.IsZero() {\n\t\tsource = reflect.New(source.Type())\n\t}\n\tnodes := b.buildObject(source, parent)\n\tfor _, node := range nodes {\n\t\tif node.skip {\n\t\t\tcontinue\n\t\t}\n\t\tif !node.source.CanSet() {\n\t\t\tcontinue\n\t\t}\n\t\tif node.inputOnly {\n\t\t\tcontinue\n\t\t}\n\t\tname := node.alias\n\t\tif name == \"\" {\n\t\t\tname = strcase.ToLowerCamel(node.name)\n\t\t}\n\t\tvar gType graphql.Type\n\t\tif node.isRelay {\n\t\t\tgType = b.buildConnection(node.source, parent)\n\t\t} else {\n\t\t\tgType = b.mapOutput(node.source, parent)\n\t\t}\n\t\tif gType == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif node.required {\n\t\t\tgType = graphql.NewNonNull(gType)\n\t\t}\n\n\t\tfield := &graphql.Field{\n\t\t\tName: name,\n\t\t\tType: gType,\n\t\t\tDescription: node.description,\n\t\t\tResolve: node.resolver,\n\t\t\tArgs: node.resolverArgs,\n\t\t}\n\t\tresult[name] = field\n\t}\n\treturn result, nil\n}", "func XMLToFields(filePathXML string, includeDirectories []string) ([]*OutDefinition, uint, error) {\n\toutDefs, version, err := do(\"\", filePathXML, includeDirectories)\n\tversionInt, _ := strconv.Atoi(version)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn outDefs, uint(versionInt), nil\n}", "func (m *ParentLabelDetails) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"color\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetColor(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"id\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetId(val)\n }\n return nil\n }\n res[\"isActive\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsActive(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"parent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateParentLabelDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetParent(val.(ParentLabelDetailsable))\n }\n return nil\n }\n res[\"sensitivity\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSensitivity(val)\n }\n return nil\n }\n res[\"tooltip\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTooltip(val)\n }\n return nil\n }\n return res\n}", "func (m *AttackSimulationRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"simulationAutomations\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSimulationAutomationFromDiscriminatorValue , m.SetSimulationAutomations)\n res[\"simulations\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSimulationFromDiscriminatorValue , m.SetSimulations)\n return res\n}", "func init() {\n\tgroupFields := schema.Group{}.Fields()\n\t_ = groupFields\n\t// groupDescTenant is the schema descriptor for tenant field.\n\tgroupDescTenant := groupFields[0].Descriptor()\n\t// group.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tgroup.TenantValidator = groupDescTenant.Validators[0].(func(string) error)\n\t// groupDescName is the schema descriptor for name field.\n\tgroupDescName := groupFields[1].Descriptor()\n\t// group.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tgroup.NameValidator = groupDescName.Validators[0].(func(string) error)\n\t// groupDescType is the schema descriptor for type field.\n\tgroupDescType := groupFields[2].Descriptor()\n\t// group.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tgroup.TypeValidator = groupDescType.Validators[0].(func(string) error)\n\t// groupDescCreatedAt is the schema descriptor for created_at field.\n\tgroupDescCreatedAt := groupFields[5].Descriptor()\n\t// group.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tgroup.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time)\n\t// groupDescUpdatedAt is the schema descriptor for updated_at field.\n\tgroupDescUpdatedAt := groupFields[6].Descriptor()\n\t// group.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tgroup.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time)\n\t// group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tgroup.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tnodeFields := schema.Node{}.Fields()\n\t_ = nodeFields\n\t// nodeDescTenant is the schema descriptor for tenant field.\n\tnodeDescTenant := nodeFields[0].Descriptor()\n\t// node.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tnode.TenantValidator = nodeDescTenant.Validators[0].(func(string) error)\n\t// nodeDescName is the schema descriptor for name field.\n\tnodeDescName := nodeFields[1].Descriptor()\n\t// node.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tnode.NameValidator = nodeDescName.Validators[0].(func(string) error)\n\t// nodeDescType is the schema descriptor for type field.\n\tnodeDescType := nodeFields[2].Descriptor()\n\t// node.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tnode.TypeValidator = nodeDescType.Validators[0].(func(string) error)\n\t// nodeDescCreatedAt is the schema descriptor for created_at field.\n\tnodeDescCreatedAt := nodeFields[5].Descriptor()\n\t// node.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tnode.DefaultCreatedAt = nodeDescCreatedAt.Default.(func() time.Time)\n\t// nodeDescUpdatedAt is the schema descriptor for updated_at field.\n\tnodeDescUpdatedAt := nodeFields[6].Descriptor()\n\t// node.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tnode.DefaultUpdatedAt = nodeDescUpdatedAt.Default.(func() time.Time)\n\t// node.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tnode.UpdateDefaultUpdatedAt = nodeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tpermissionFields := schema.Permission{}.Fields()\n\t_ = permissionFields\n\t// permissionDescTenant is the schema descriptor for tenant field.\n\tpermissionDescTenant := permissionFields[0].Descriptor()\n\t// permission.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tpermission.TenantValidator = permissionDescTenant.Validators[0].(func(string) error)\n\t// permissionDescName is the schema descriptor for name field.\n\tpermissionDescName := permissionFields[1].Descriptor()\n\t// permission.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tpermission.NameValidator = permissionDescName.Validators[0].(func(string) error)\n\t// permissionDescCreatedAt is the schema descriptor for created_at field.\n\tpermissionDescCreatedAt := permissionFields[3].Descriptor()\n\t// permission.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tpermission.DefaultCreatedAt = permissionDescCreatedAt.Default.(func() time.Time)\n\t// permissionDescUpdatedAt is the schema descriptor for updated_at field.\n\tpermissionDescUpdatedAt := permissionFields[4].Descriptor()\n\t// permission.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tpermission.DefaultUpdatedAt = permissionDescUpdatedAt.Default.(func() time.Time)\n\t// permission.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tpermission.UpdateDefaultUpdatedAt = permissionDescUpdatedAt.UpdateDefault.(func() time.Time)\n\trouteFields := schema.Route{}.Fields()\n\t_ = routeFields\n\t// routeDescTenant is the schema descriptor for tenant field.\n\trouteDescTenant := routeFields[0].Descriptor()\n\t// route.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\troute.TenantValidator = routeDescTenant.Validators[0].(func(string) error)\n\t// routeDescName is the schema descriptor for name field.\n\trouteDescName := routeFields[1].Descriptor()\n\t// route.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\troute.NameValidator = routeDescName.Validators[0].(func(string) error)\n\t// routeDescURI is the schema descriptor for uri field.\n\trouteDescURI := routeFields[2].Descriptor()\n\t// route.URIValidator is a validator for the \"uri\" field. It is called by the builders before save.\n\troute.URIValidator = routeDescURI.Validators[0].(func(string) error)\n\t// routeDescCreatedAt is the schema descriptor for created_at field.\n\trouteDescCreatedAt := routeFields[5].Descriptor()\n\t// route.DefaultCreatedAt holds the default value on creation for the created_at field.\n\troute.DefaultCreatedAt = routeDescCreatedAt.Default.(func() time.Time)\n\t// routeDescUpdatedAt is the schema descriptor for updated_at field.\n\trouteDescUpdatedAt := routeFields[6].Descriptor()\n\t// route.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\troute.DefaultUpdatedAt = routeDescUpdatedAt.Default.(func() time.Time)\n\t// route.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\troute.UpdateDefaultUpdatedAt = routeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tuserFields := schema.User{}.Fields()\n\t_ = userFields\n\t// userDescTenant is the schema descriptor for tenant field.\n\tuserDescTenant := userFields[0].Descriptor()\n\t// user.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tuser.TenantValidator = userDescTenant.Validators[0].(func(string) error)\n\t// userDescUUID is the schema descriptor for uuid field.\n\tuserDescUUID := userFields[1].Descriptor()\n\t// user.UUIDValidator is a validator for the \"uuid\" field. It is called by the builders before save.\n\tuser.UUIDValidator = userDescUUID.Validators[0].(func(string) error)\n\t// userDescIsSuper is the schema descriptor for is_super field.\n\tuserDescIsSuper := userFields[3].Descriptor()\n\t// user.DefaultIsSuper holds the default value on creation for the is_super field.\n\tuser.DefaultIsSuper = userDescIsSuper.Default.(bool)\n\t// userDescCreatedAt is the schema descriptor for created_at field.\n\tuserDescCreatedAt := userFields[5].Descriptor()\n\t// user.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tuser.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)\n\t// userDescUpdatedAt is the schema descriptor for updated_at field.\n\tuserDescUpdatedAt := userFields[6].Descriptor()\n\t// user.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tuser.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time)\n\t// user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tuser.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time)\n}", "func (m *SolutionsRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"businessScenarios\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioable)\n }\n }\n m.SetBusinessScenarios(res)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"virtualEvents\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateVirtualEventsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVirtualEvents(val.(VirtualEventsRootable))\n }\n return nil\n }\n return res\n}", "func (s *Struct) generateEnums(dir string) error {\n\tfor _, f := range s.Fields {\n\t\tif f.Enum == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := f.Enum.generate(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func IterFields(recurse bool, st reflect.Type, v reflect.Value, callback func(f reflect.Value, sf reflect.StructField, cmd ...string)) {\n\t// NOTE: if we're passed something that isn't a struct, then the program will\n\t// panic when we call NumField() as this is the reality of using reflection.\n\t//\n\t// we are relying on the consumer of this package to follow the instructions\n\t// given and to provide us with what we are expecting.\n\t//\n\t// so if we're not careful, then we violate the language type safety.\n\t// but we protect against this in the calling function by checking for a\n\t// struct before calling IterFields.\n\t//\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Field(i)\n\n\t\t// we call Field() on the struct type so we can get a StructField type,\n\t\t// which we have to do in order to access the struct 'tags' on the field.\n\t\t//\n\t\t// it also gives us access to the field name so we can create the various\n\t\t// flags necessary (as well as determine the command that a user runs).\n\t\t//\n\t\tsf := st.Field(i)\n\n\t\tif field.Kind() == reflect.Struct {\n\t\t\t// when we see a struct we expect by convention for this to be a\n\t\t\t// 'command' that will have its own set of flags.\n\t\t\t//\n\t\t\tcmd := strings.ToLower(sf.Name)\n\t\t\tif _, ok := cmds[cmd]; !ok {\n\t\t\t\tcmds[cmd] = true\n\t\t\t}\n\n\t\t\t// we use CanInterface() because otherise if we were to call Interface()\n\t\t\t// on a field that was unexported, then the program would panic.\n\t\t\t//\n\t\t\tif recurse && field.CanInterface() {\n\t\t\t\t// we use Interface() to get the nested struct value as an interface{}.\n\t\t\t\t// this is done because if we called TypeOf on the field variable, then\n\t\t\t\t// we would end up with reflect.Value when really we need the nested\n\t\t\t\t// struct's concrete type definition (e.g. struct {...}).\n\t\t\t\t//\n\t\t\t\tst := reflect.TypeOf(field.Interface())\n\n\t\t\t\tfor i := 0; i < field.NumField(); i++ {\n\t\t\t\t\t// again, we get the field from the nested struct, as well as acquire\n\t\t\t\t\t// its StructField type for purposes already explained above.\n\t\t\t\t\t//\n\t\t\t\t\tfield := field.Field(i)\n\t\t\t\t\tst := st.Field(i)\n\n\t\t\t\t\t// because our callback function is going to attempt to set values on\n\t\t\t\t\t// these struct fields, we need to be sure they are 'settable' first.\n\t\t\t\t\t//\n\t\t\t\t\tif field.CanSet() {\n\t\t\t\t\t\tcallback(field, st, cmd)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// we check if recurse is false because we don't want our nested commands\n\t\t\t// to accidentally add the top-level fields into our command flagset and\n\t\t\t// thus -h/--help would show the top-level fields in the help output.\n\t\t\t//\n\t\t\t// also, because our callback function is going to attempt to set values\n\t\t\t// on these struct fields, we need to be sure they are 'settable' first.\n\t\t\t//\n\t\t\t//\n\t\t\tif !recurse && field.CanSet() {\n\t\t\t\tcallback(field, sf)\n\t\t\t}\n\t\t}\n\t}\n}", "func fieldsWithNames(f *ast.Field) (fields []*ast.Field) {\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\tif len(f.Names) == 0 {\n\t\tfields = append(fields, &ast.Field{\n\t\t\tDoc: f.Doc,\n\t\t\tNames: []*ast.Ident{{Name: printIdentField(f)}},\n\t\t\tType: f.Type,\n\t\t\tTag: f.Tag,\n\t\t\tComment: f.Comment,\n\t\t})\n\t\treturn\n\t}\n\tfor _, ident := range f.Names {\n\t\tfields = append(fields, &ast.Field{\n\t\t\tDoc: f.Doc,\n\t\t\tNames: []*ast.Ident{ident},\n\t\t\tType: f.Type,\n\t\t\tTag: f.Tag,\n\t\t\tComment: f.Comment,\n\t\t})\n\t}\n\treturn\n}", "func fields(t reflect.Type) map[string]interface{} {\n\tfieldCache.RLock()\n\tfs := fieldCache.m[t]\n\tfieldCache.RUnlock()\n\n\t//Cached version exists\n\tif fs != nil {\n\t\treturn fs\n\t}\n\t//This is to prevent multiple goroutines computing the same thing\n\tfieldCache.Lock()\n\tvar sy *sync.WaitGroup\n\tif sy, ok := fieldCache.create[t]; ok {\n\t\tfieldCache.Unlock()\n\t\tsy.Wait()\n\t\treturn fields(t)\n\t}\n\tsy = &sync.WaitGroup{}\n\tfieldCache.create[t] = sy\n\tsy.Add(1)\n\tfieldCache.Unlock()\n\n\tfs = compileStruct(t)\n\n\tfieldCache.Lock()\n\tfieldCache.m[t] = fs\n\tfieldCache.Unlock()\n\tsy.Done()\n\treturn fs\n}", "func (m *Planner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"buckets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerBucketFromDiscriminatorValue , m.SetBuckets)\n res[\"plans\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue , m.SetPlans)\n res[\"tasks\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerTaskFromDiscriminatorValue , m.SetTasks)\n return res\n}", "func getFieldConstructor(e ast.Expr) string {\n\tswitch t := e.(type) {\n\tcase *ast.StarExpr:\n\t\tswitch t.X.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Ponter on pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Pointer on arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"&\" + getFieldConstructor(t.X)\n\t\t}\n\tcase *ast.ArrayType:\n\t\tswitch elemType := t.Elt.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Array of pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Array of arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"[]\" + getFieldConstructor(elemType)\n\t\t}\n\tcase *ast.Ident:\n\t\tswitch t.Name {\n\t\tcase \"int\", \"int8\", \"int16\", \"int32\", \"int64\",\n\t\t\t\"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n\t\t\t\"float32\", \"float64\", \"byte\", \"rune\", \"string\":\n\t\t\treturn t.Name + \"{\"\n\t\tcase \"complex64\", \"complex128\", \"uintptr\":\n\t\t\tpanic(\"Type '\" + t.Name + \"' is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn t.Name + \"{\"\n\t\t}\n\tdefault:\n\t\tpanic(\"Unsupported field type in annotation\")\n\t}\n}", "func GenORMSetup(db *gorm.DB) {\n\n\t// relative to the models package, swith to ../controlers package\n\tfilename := filepath.Join(OrmPkgGenPath, \"setup.go\")\n\n\t// we should use go generate\n\tlog.Println(\"generating orm setup file : \" + filename)\n\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t// create the list of structs\n\tvar structs []models.Struct\n\tdb.Find(&structs)\n\n\tLISTOFSTRUCT := \"\\n\"\n\n\tdeleteCalls := \"\"\n\n\tfor idx, _struct := range structs {\n\t\tif idx != 0 {\n\t\t\tLISTOFSTRUCT += \",\\n\"\n\t\t}\n\t\tLISTOFSTRUCT += fmt.Sprintf(\"\\t\\t&%sDB{}\", _struct.Name)\n\n\t\tdeleteCalls += fmt.Sprintf(\"\\tdb.Delete(&%sDB{})\\n\", _struct.Name)\n\n\t\tfmt.Printf(\"\t\torm.LoadDB%s(%ss, db)\\n\", _struct.Name, _struct.Name)\n\t}\n\tres := strings.ReplaceAll(template, \"{{LISTOFSTRUCT}}\", LISTOFSTRUCT)\n\n\tres = strings.ReplaceAll(res, \"{{Deletes}}\", deleteCalls)\n\n\tfmt.Fprintf(f, \"%s\", res)\n\n\tdefer f.Close()\n}", "func (g *CodeGenerator) Generate() error {\n\tif len(g.opts.FilePath) == 0 {\n\t\treturn errors.New(\"invalid file path\")\n\t}\n\n\tif len(g.opts.PackageName) == 0 {\n\t\treturn errors.New(\"invalid package name\")\n\t}\n\n\t// generate package\n\tg.P(\"package \", g.opts.PackageName)\n\tg.P()\n\n\t// generate import path\n\tg.P(\"import (\")\n\tfor _, path := range g.opts.ImportPath {\n\t\tg.P(\"\\t\\\"\", path, \"\\\"\")\n\t}\n\tg.P(\")\")\n\tg.P()\n\n\t// generate variables\n\tfor _, v := range g.opts.Variables {\n\t\tvariableLine := fmt.Sprintf(\"var\\t%-15s\\t%-15s\\t//%-15s\", v.name, v.tp, v.comment)\n\t\tg.P(variableLine)\n\t\tg.P()\n\t}\n\n\t// generate structs\n\tfor _, s := range g.opts.Structs {\n\t\t// struct comment\n\t\tif len(s.comment) > 0 {\n\t\t\tg.P(\"// \", s.comment)\n\t\t}\n\n\t\t// struct begin\n\t\tg.P(\"type \", s.name, \" struct {\")\n\n\t\t// struct fields\n\t\tfieldLines := make([]string, s.fieldRaw.Size())\n\t\tit := s.fieldRaw.Iterator()\n\t\tfor it.Next() {\n\t\t\tfieldRaw := it.Value().(*ExcelFieldRaw)\n\n\t\t\t// don't need import\n\t\t\tif !fieldRaw.imp {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfieldLine := fmt.Sprintf(\"\\t%-15s\\t%-20s\\t%-20s\\t//%-10s\", it.Key(), fieldRaw.tp, fieldRaw.tag, fieldRaw.desc)\n\t\t\tfieldLines[fieldRaw.idx] = fieldLine\n\t\t}\n\n\t\t// print struct field in sort\n\t\tfor _, v := range fieldLines {\n\t\t\tif len(v) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tg.P(v)\n\t\t}\n\n\t\t// struct end\n\t\tg.P(\"}\")\n\t\tg.P()\n\t}\n\n\t// generate functions\n\tfor _, f := range g.opts.Functions {\n\t\t// function comment\n\t\tif len(f.comment) > 0 {\n\t\t\tg.P(\"// \", f.comment)\n\t\t}\n\n\t\t// function receiver\n\t\tvar receiver string\n\t\tif len(f.receiver) > 0 {\n\t\t\treceiver = fmt.Sprintf(\"(e *%s)\", f.receiver)\n\t\t}\n\n\t\t// function parameters\n\t\tparameters := strings.Join(f.parameters, \", \")\n\n\t\t// function begin\n\t\tg.P(\"func \", receiver, \" \", f.name, \"(\", parameters, \") \", f.retType, \" {\")\n\n\t\t// function body\n\t\tg.P(\"\\t\", f.body)\n\n\t\t// function end\n\t\tg.P(\"}\")\n\t\tg.P()\n\t}\n\n\treturn ioutil.WriteFile(g.opts.FilePath, g.buf.Bytes(), 0666)\n}", "func (m *DeviceConfigurationAssignment) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"intent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceConfigAssignmentIntent)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIntent(val.(*DeviceConfigAssignmentIntent))\n }\n return nil\n }\n res[\"source\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceAndAppManagementAssignmentSource)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSource(val.(*DeviceAndAppManagementAssignmentSource))\n }\n return nil\n }\n res[\"sourceId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSourceId(val)\n }\n return nil\n }\n res[\"target\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceAndAppManagementAssignmentTargetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTarget(val.(DeviceAndAppManagementAssignmentTargetable))\n }\n return nil\n }\n return res\n}", "func (m *DeviceManagementConfigurationPolicy) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"assignments\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationPolicyAssignmentFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationPolicyAssignmentable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationPolicyAssignmentable)\n }\n }\n m.SetAssignments(res)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"creationSource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreationSource(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"isAssigned\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsAssigned(val)\n }\n return nil\n }\n res[\"lastModifiedDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastModifiedDateTime(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"platforms\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationPlatforms)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlatforms(val.(*DeviceManagementConfigurationPlatforms))\n }\n return nil\n }\n res[\"priorityMetaData\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementPriorityMetaDataFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriorityMetaData(val.(DeviceManagementPriorityMetaDataable))\n }\n return nil\n }\n res[\"roleScopeTagIds\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetRoleScopeTagIds(res)\n }\n return nil\n }\n res[\"settingCount\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSettingCount(val)\n }\n return nil\n }\n res[\"settings\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationSettingFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationSettingable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationSettingable)\n }\n }\n m.SetSettings(res)\n }\n return nil\n }\n res[\"technologies\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationTechnologies)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTechnologies(val.(*DeviceManagementConfigurationTechnologies))\n }\n return nil\n }\n res[\"templateReference\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationPolicyTemplateReferenceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTemplateReference(val.(DeviceManagementConfigurationPolicyTemplateReferenceable))\n }\n return nil\n }\n return res\n}", "func dumpFields(documentType IDocumentBase) {\n\thandleType := reflect.TypeOf(documentType)\n\tlog.Printf(\"reflect.Kind: %s\", handleType.Kind())\n\tlog.Printf(\"reflect.Kind: %s\", handleType.Elem().Kind())\n\n\thandleStructType := reflect.TypeOf(documentType)\n\tif handleStructType.Kind() == reflect.Ptr {\n\t\thandleStructType = handleType.Elem()\n\t}\n\tlog.Printf(\"reflect.handleStructType.Kind: %s\", handleStructType.Kind())\n\n\tfor i := 0; i < handleStructType.NumField(); i++ {\n\t\tfield := handleStructType.Field(i) // Get the field, returns https://golang.org/pkg/reflect/#StructField\n\t\tlog.Printf(\"dumpFields Name: %s\", field.Name)\n\t\tlog.Printf(\"dumpFields Tags: %s\", field.Tag)\n\t}\n}", "func (s CreateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (n *Node) Fields() []uintptr {\n\t// we store the offsets for the fields in type properties\n\ttprops := kit.Types.Properties(n.Type(), true) // true = makeNew\n\tpnm := \"__FieldOffs\"\n\tif foff, ok := tprops[pnm]; ok {\n\t\treturn foff.([]uintptr)\n\t}\n\tfoff := make([]uintptr, 0)\n\tkitype := KiType()\n\tFlatFieldsValueFunc(n.This, func(stru interface{}, typ reflect.Type, field reflect.StructField, fieldVal reflect.Value) bool {\n\t\tif fieldVal.Kind() == reflect.Struct && kit.EmbeddedTypeImplements(field.Type, kitype) {\n\t\t\tfoff = append(foff, field.Offset)\n\t\t}\n\t\treturn true\n\t})\n\ttprops[pnm] = foff\n\treturn foff\n}", "func (s GetFunctionConfigurationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CodeSha256 != nil {\n\t\tv := *s.CodeSha256\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSha256\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CodeSize != nil {\n\t\tv := *s.CodeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.DeadLetterConfig != nil {\n\t\tv := s.DeadLetterConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"DeadLetterConfig\", v, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Environment != nil {\n\t\tv := s.Environment\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Environment\", v, metadata)\n\t}\n\tif s.FunctionArn != nil {\n\t\tv := *s.FunctionArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FunctionName != nil {\n\t\tv := *s.FunctionName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Handler != nil {\n\t\tv := *s.Handler\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Handler\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.KMSKeyArn != nil {\n\t\tv := *s.KMSKeyArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"KMSKeyArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastModified != nil {\n\t\tv := *s.LastModified\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastModified\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.LastUpdateStatus) > 0 {\n\t\tv := s.LastUpdateStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdateStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.LastUpdateStatusReason != nil {\n\t\tv := *s.LastUpdateStatusReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdateStatusReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.LastUpdateStatusReasonCode) > 0 {\n\t\tv := s.LastUpdateStatusReasonCode\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdateStatusReasonCode\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Layers != nil {\n\t\tv := s.Layers\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Layers\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.MasterArn != nil {\n\t\tv := *s.MasterArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MasterArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MemorySize != nil {\n\t\tv := *s.MemorySize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MemorySize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.RevisionId != nil {\n\t\tv := *s.RevisionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RevisionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Runtime) > 0 {\n\t\tv := s.Runtime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Runtime\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.State) > 0 {\n\t\tv := s.State\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"State\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.StateReason != nil {\n\t\tv := *s.StateReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StateReasonCode) > 0 {\n\t\tv := s.StateReasonCode\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReasonCode\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Timeout != nil {\n\t\tv := *s.Timeout\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timeout\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.TracingConfig != nil {\n\t\tv := s.TracingConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"TracingConfig\", v, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.VpcConfig != nil {\n\t\tv := s.VpcConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"VpcConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (c *TypeConverter) GenStructConverter(\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n) error {\n\t// Add compiled FieldSpecs to the FieldMapperEntry\n\tfieldMap = addSpecToMap(fieldMap, fromFields, \"\")\n\t// Check for vlaues not populated recursively by addSpecToMap\n\tfor k, v := range fieldMap {\n\t\tif fieldMap[k].Field == nil {\n\t\t\treturn errors.Errorf(\n\t\t\t\t\"Failed to find field ( %s ) for transform.\",\n\t\t\t\tv.QualifiedName,\n\t\t\t)\n\t\t}\n\t}\n\n\tc.useRecurGen = c.isRecursiveStruct(toFields) || c.isRecursiveStruct(fromFields)\n\n\tif c.useRecurGen && len(fieldMap) != 0 {\n\t\tc.append(\"inOriginal := in; _ = inOriginal\")\n\t\tc.append(\"outOriginal := out; _ = outOriginal\")\n\t}\n\n\terr := c.genStructConverter(\"\", \"\", \"\", fromFields, toFields, fieldMap, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func loadFields(fieldBucket *bbolt.Bucket) (fields []field.Meta) {\n\tcursor := fieldBucket.Cursor()\n\tfor k, v := cursor.First(); k != nil; k, v = cursor.Next() {\n\t\tfields = append(fields, field.Meta{\n\t\t\tName: field.Name(k),\n\t\t\tID: field.ID(v[0]),\n\t\t\tType: field.Type(v[1]),\n\t\t})\n\t}\n\treturn\n}", "func compileStruct(t reflect.Type) map[string]interface{} {\n\tfs := map[string]interface{}{}\n\tcount := t.NumField()\n\tfor i := 0; i < count; i++ {\n\t\tf := t.Field(i)\n\t\tvar name string\n\t\tif !f.Anonymous {\n\t\t\tname = f.Name\n\t\t\tif tName := f.Tag.Get(\"nbt\"); len(tName) > 0 {\n\t\t\t\tname = tName\n\t\t\t}\n\t\t\tif name == \"ignore\" || f.Tag.Get(\"ignore\") == \"true\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tname = f.Type.Name()\n\t\t\tif tName := f.Tag.Get(\"nbt\"); len(tName) > 0 {\n\t\t\t\tname = tName\n\t\t\t}\n\t\t\tif name == \"ignore\" || f.Tag.Get(\"ignore\") == \"true\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfs[name] = compileField(f, name)\n\t}\n\treturn fs\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.AuthorizationScopes != nil {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestModels != nil {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func StructFields() {\n\tv := vertex{1, 2}\n\tv.X = 11\n\n\tfmt.Println(v.X, v.Y)\n}", "func (m *ManagementTemplateStep) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"acceptedVersion\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAcceptedVersion(val.(ManagementTemplateStepVersionable))\n }\n return nil\n }\n res[\"category\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementCategory)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCategory(val.(*ManagementCategory))\n }\n return nil\n }\n res[\"createdByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedByUserId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"lastActionByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionByUserId(val)\n }\n return nil\n }\n res[\"lastActionDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionDateTime(val)\n }\n return nil\n }\n res[\"managementTemplate\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetManagementTemplate(val.(ManagementTemplateable))\n }\n return nil\n }\n res[\"portalLink\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateActionUrlFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPortalLink(val.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ActionUrlable))\n }\n return nil\n }\n res[\"priority\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriority(val)\n }\n return nil\n }\n res[\"versions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ManagementTemplateStepVersionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ManagementTemplateStepVersionable)\n }\n }\n m.SetVersions(res)\n }\n return nil\n }\n return res\n}", "func (m *TemplateParameter) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"jsonAllowedValues\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonAllowedValues(val)\n }\n return nil\n }\n res[\"jsonDefaultValue\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonDefaultValue(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"valueType\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementParameterValueType)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetValueType(val.(*ManagementParameterValueType))\n }\n return nil\n }\n return res\n}", "func (s GetRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func StructFields(v interface{}) (vType reflect.Type, vFields []*TField) {\n\tvar (\n\t\tfield reflect.StructField\n\t)\n\tvType = reflect.Indirect(reflect.ValueOf(v)).Type()\n\tnumFields := vType.NumField()\n\tvFields = make([]*TField, 0, numFields)\n\tfor i := 0; i < numFields; i++ {\n\t\tfield = vType.Field(i)\n\t\tfieldInfo := &TField{\n\t\t\tFname: field.Name,\n\t\t\tFtype: field.Type.String(),\n\t\t\tFkind: field.Type.Kind(),\n\t\t\tFtags: field.Tag,\n\t\t}\n\t\tif field.PkgPath == \"\" {\n\t\t\tfieldInfo.Fexported = true\n\t\t}\n\t\tvFields = append(vFields, fieldInfo)\n\t}\n\treturn\n}", "func dbFields(obj interface{}, skipKey bool) (table, key, fields string) {\n\tt := reflect.TypeOf(obj)\n\tlist := make([]string, 0, t.NumField())\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif isTable := f.Tag.Get(\"table\"); len(isTable) > 0 {\n\t\t\ttable = isTable\n\t\t}\n\t\tk := f.Tag.Get(\"sql\")\n\t\tif f.Tag.Get(\"key\") == \"true\" {\n\t\t\tkey = k\n\t\t\tif skipKey {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif len(k) > 0 {\n\t\t\tlist = append(list, k)\n\t\t}\n\t}\n\tfields = strings.Join(list, \",\")\n\treturn\n}", "func init() {\n\tcodegen.RegisterPlugin(\"types\", \"gen\", nil, Generate)\n}", "func StructFields(t reflect.Type) string {\n\tfields := make([]string, 0)\n\tif t.Kind() == reflect.Struct {\n\t\tfor i := 0; i < t.NumField(); i ++ {\n\t\t\tname := t.Field(i).Name\n\t\t\tif t.Field(i).Type.Kind() == reflect.Struct {\n\t\t\t\ts := StructFields(t.Field(i).Type)\n\t\t\t\tf := strings.Split(s, \", \")\n\t\t\t\tleft := FirstLower(name)\n\t\t\t\tfor _, v := range f {\n\t\t\t\t\tfields = append(fields, fmt.Sprintf(\"%s.%s\", left, FirstLower(v)))\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfields = append(fields, FirstLower(name))\n\t\t}\n\t}\n\n\treturn strings.Join(fields, \", \")\n}" ]
[ "0.62704337", "0.6208788", "0.61161774", "0.588618", "0.5744872", "0.5581054", "0.556311", "0.55467135", "0.5487712", "0.5455513", "0.54416054", "0.54179704", "0.5410756", "0.54015136", "0.53891087", "0.5364398", "0.5253189", "0.5208028", "0.52043945", "0.5097442", "0.5096631", "0.50294745", "0.50262094", "0.5002781", "0.49794614", "0.49494854", "0.49378783", "0.4932478", "0.4906218", "0.4894362", "0.48918608", "0.48858726", "0.48758674", "0.48757905", "0.4873638", "0.48704875", "0.48592275", "0.48588952", "0.48574054", "0.484549", "0.48419362", "0.48415983", "0.48413324", "0.4841029", "0.48327285", "0.4829235", "0.48267707", "0.48250237", "0.48143035", "0.48040703", "0.48031336", "0.47986373", "0.47876844", "0.4775719", "0.47700143", "0.4756326", "0.47468916", "0.4741483", "0.4740756", "0.4738409", "0.47368366", "0.473303", "0.47219402", "0.472103", "0.4720735", "0.47144935", "0.4708526", "0.4703656", "0.46961218", "0.46927565", "0.46846217", "0.46844286", "0.4682837", "0.46777657", "0.46691662", "0.46686304", "0.46638605", "0.46600878", "0.46583408", "0.46303263", "0.46222466", "0.46187404", "0.46060646", "0.46004346", "0.45963866", "0.45916227", "0.45889625", "0.45851684", "0.45793283", "0.4576849", "0.4572388", "0.45655093", "0.4563145", "0.45609367", "0.45592597", "0.45538422", "0.45524934", "0.45482114", "0.45459345", "0.45450222" ]
0.7535413
0
genField generates field config for given AST
func genField(field *ast.FieldDefinition) *jen.Statement { // // Generate config for field // // == Example input SDL // // interface Pet { // "name of the pet" // name(style: NameComponentsStyle = SHORT): String! // """ // givenName of the pet ★ // """ // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // &graphql.Field{ // Name: "name", // Type: graphql.NonNull(graphql.String), // Description: "name of the pet", // DeprecationReason: "", // Args: FieldConfigArgument{ ... }, // } // // &graphql.Field{ // Name: "givenName", // Type: graphql.String, // Description: "givenName of the pet", // DeprecationReason: "No longer supported; please use name field.", // Args: FieldConfigArgument{ ... }, // } // return jen.Op("&").Qual(defsPkg, "Field").Values(jen.Dict{ jen.Id("Args"): genArguments(field.Arguments), jen.Id("DeprecationReason"): genDeprecationReason(field.Directives), jen.Id("Description"): genDescription(field), jen.Id("Name"): jen.Lit(field.Name.Value), jen.Id("Type"): genOutputTypeReference(field.Type), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// graphql.Fields{\n\t// \"name\": graphql.Field{ ... },\n\t// \"givenName\": graphql.Field{ ... },\n\t// }\n\t//\n\treturn jen.Qual(defsPkg, \"Fields\").Values(jen.DictFunc(func(d jen.Dict) {\n\t\tfor _, f := range fs {\n\t\t\td[jen.Lit(f.Name.Value)] = genField(f)\n\t\t}\n\t}))\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no fields defined for type %s\", typeName)\n\t}\n\t// TODO: for now we remove Default from the start (maybe move that to an option)\n\tlogicalTypeName := \"\\\"\" + strings.TrimPrefix(typeName, \"Default\") + \"\\\"\"\n\n\t// Generate code that will fail if the constants change value.\n\tg.Printf(\"func (d *%s) Serialize() ([]byte, error) {\\n\", typeName)\n\tg.Printf(\"wb := utils.NewWriteBufferByteBased(utils.WithByteOrderForByteBasedBuffer(binary.BigEndian))\\n\")\n\tg.Printf(\"\\tif err := d.SerializeWithWriteBuffer(context.Background(), wb); err != nil {\\n\")\n\tg.Printf(\"\\t\\treturn nil, err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn wb.GetBytes(), nil\\n\")\n\tg.Printf(\"}\\n\\n\")\n\tg.Printf(\"func (d *%s) SerializeWithWriteBuffer(ctx context.Context, writeBuffer utils.WriteBuffer) error {\\n\", typeName)\n\tg.Printf(\"\\tif err := writeBuffer.PushContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tfor _, field := range fields {\n\t\tfieldType := field.fieldType\n\t\tif field.isDelegate {\n\t\t\tg.Printf(\"\\t\\t\\tif err := d.%s.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\", fieldType.(*ast.Ident).Name)\n\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := field.name\n\t\tfieldNameUntitled := \"\\\"\" + unTitle(fieldName) + \"\\\"\"\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"if err := func()error {\\n\")\n\t\t\tg.Printf(\"\\td.\" + field.hasLocker + \".Lock()\\n\")\n\t\t\tg.Printf(\"\\tdefer d.\" + field.hasLocker + \".Unlock()\\n\")\n\t\t}\n\t\tneedsDereference := false\n\t\tif starFieldType, ok := fieldType.(*ast.StarExpr); ok {\n\t\t\tfieldType = starFieldType.X\n\t\t\tneedsDereference = true\n\t\t}\n\t\tif field.isStringer {\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"if d.%s != nil {\", field.name)\n\t\t\t}\n\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name+\".String()\", fieldNameUntitled)\n\t\t\tif field.hasLocker != \"\" {\n\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tswitch fieldType := fieldType.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\t{\n\t\t\t\t// TODO: bit hacky but not sure how else we catch those ones\n\t\t\t\tx := fieldType.X\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\txIdent, xIsIdent := x.(*ast.Ident)\n\t\t\t\tif xIsIdent {\n\t\t\t\t\tif xIdent.Name == \"atomic\" {\n\t\t\t\t\t\tif sel.Name == \"Uint32\" {\n\t\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Uint64\" {\n\t\t\t\t\t\t\tg.Printf(uint64FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Int32\" {\n\t\t\t\t\t\t\tg.Printf(int32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Bool\" {\n\t\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Value\" {\n\t\t\t\t\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif xIdent.Name == \"sync\" {\n\t\t\t\t\t\tfmt.Printf(\"\\t skipping field %s because it is %v.%v\\n\", fieldName, x, sel)\n\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name, fieldNameUntitled)\n\t\tcase *ast.IndexExpr:\n\t\t\tx := fieldType.X\n\t\t\tif fieldType, isxFieldSelector := x.(*ast.SelectorExpr); isxFieldSelector { // TODO: we need to refactor this so we can reuse...\n\t\t\t\txIdent, xIsIdent := fieldType.X.(*ast.Ident)\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\tif xIsIdent && xIdent.Name == \"atomic\" && sel.Name == \"Pointer\" {\n\t\t\t\t\tg.Printf(atomicPointerFieldTemplate, \"d.\"+field.name, field.name, fieldNameUntitled)\n\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"no support yet for %#q\\n\", fieldType)\n\t\t\tcontinue\n\t\tcase *ast.Ident:\n\t\t\tswitch fieldType.Name {\n\t\t\tcase \"byte\":\n\t\t\t\tg.Printf(byteFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"int\":\n\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"int32\":\n\t\t\t\tg.Printf(int32FieldSerialize, \"int32(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"uint32\":\n\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"bool\":\n\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"string\":\n\t\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"error\":\n\t\t\t\tg.Printf(errorFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident with type %v\\n\", fieldType)\n\t\t\t\tg.Printf(\"{\\n\")\n\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", d.%s)\\n\", fieldName)\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\tcase *ast.ArrayType:\n\t\t\tif eltType, ok := fieldType.Elt.(*ast.Ident); ok && eltType.Name == \"byte\" {\n\t\t\t\tg.Printf(\"if err := writeBuffer.WriteByteArray(%s, d.%s); err != nil {\\n\", fieldNameUntitled, field.name)\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t} else {\n\t\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"for _, elem := range d.%s {\", field.name)\n\t\t\t\tswitch eltType := fieldType.Elt.(type) {\n\t\t\t\tcase *ast.SelectorExpr, *ast.StarExpr:\n\t\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\t\tg.Printf(serializableFieldTemplate, \"elem\", \"\\\"value\\\"\")\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tswitch eltType.Name {\n\t\t\t\t\tcase \"int\":\n\t\t\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\t\t\tcase \"uint32\":\n\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\t\t\tcase \"bool\":\n\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"error\":\n\t\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within ArrayType for %v\\n\", fieldType)\n\t\t\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t}\n\t\tcase *ast.MapType:\n\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t// TODO: we use serializable or strings as we don't want to over-complex this\n\t\t\tg.Printf(\"for _name, elem := range d.%s {\\n\", fieldName)\n\t\t\tswitch keyType := fieldType.Key.(type) {\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch keyType.Name {\n\t\t\t\tcase \"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\", \"int\", \"int8\", \"int16\", \"int32\", \"int64\": // TODO: add other types\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", _name)\\n\", \"%v\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(\"\\t\\tname := _name\\n\")\n\t\t\t\tdefault:\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t}\n\t\t\tswitch eltType := fieldType.Value.(type) {\n\t\t\tcase *ast.StarExpr, *ast.SelectorExpr:\n\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\tg.Printf(\"\\t\\tif serializable, ok := elem.(utils.Serializable); ok {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PushContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := serializable.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PopContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t} else {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\telemAsString := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.WriteString(name, uint32(len(elemAsString)*8), \\\"UTF-8\\\", elemAsString); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t}\\n\")\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch eltType.Name {\n\t\t\t\tcase \"bool\":\n\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"error\":\n\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"name\")\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within MapType for %v\\n\", fieldType)\n\t\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented within MapType %v\\n\", fieldType.Value)\n\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t}\n\t\t\tg.Printf(\"\\t}\\n\")\n\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\tcase *ast.ChanType:\n\t\t\tg.Printf(chanFieldSerialize, \"d.\"+field.name, fieldNameUntitled, field.name)\n\t\tcase *ast.FuncType:\n\t\t\tg.Printf(funcFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\tdefault:\n\t\t\tfmt.Printf(\"no support implemented %#v\\n\", fieldType)\n\t\t}\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\tg.Printf(\"}\\n\")\n\t\t}\n\t}\n\tg.Printf(\"\\tif err := writeBuffer.PopContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn nil\\n\")\n\tg.Printf(\"}\\n\")\n\tg.Printf(\"\\n\")\n\tg.Printf(stringerTemplate, typeName)\n}", "func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}", "func JsonFieldGenerator() gopter.Gen {\n\tif jsonFieldGenerator != nil {\n\t\treturn jsonFieldGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonField(generators)\n\tjsonFieldGenerator = gen.Struct(reflect.TypeOf(JsonField{}), generators)\n\n\treturn jsonFieldGenerator\n}", "func (op *metadataLookup) buildField() {\n\tlengthOfFields := len(op.fields)\n\top.executeCtx.Fields = make(field.Metas, lengthOfFields)\n\n\tidx := 0\n\tfor fieldID := range op.fields {\n\t\tf := op.fields[fieldID]\n\t\top.executeCtx.Fields[idx] = field.Meta{\n\t\t\tID: fieldID,\n\t\t\tType: f.DownSampling.GetFieldType(),\n\t\t\tName: f.DownSampling.FieldName(),\n\t\t}\n\t\tidx++\n\t}\n\t// first sort field by field id\n\top.executeCtx.SortFields()\n\t// after sort filed, build aggregation spec\n\top.executeCtx.DownSamplingSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\top.executeCtx.AggregatorSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\tfor fieldIdx, fieldMeta := range op.executeCtx.Fields {\n\t\tf := op.fields[fieldMeta.ID]\n\t\top.executeCtx.DownSamplingSpecs[fieldIdx] = f.DownSampling\n\t\top.executeCtx.AggregatorSpecs[fieldIdx] = f.Aggregator\n\t}\n}", "func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}", "func compileField(sf reflect.StructField, name string) interface{} {\n\tf := field{sField: sf.Index[0]}\n\n\tf.name = []byte(name)\n\n\tswitch sf.Type.Kind() {\n\tcase reflect.Struct:\n\t\treturn fieldStruct{f.sField, f.name, compileStruct(sf.Type)}\n\tcase reflect.Bool:\n\t\tf.write = encodeBool\n\t\tf.read = decodeBool\n\t\tf.requiredType = 1\n\tcase reflect.Int8:\n\t\tf.write = encodeInt8\n\t\tf.read = decodeInt8\n\t\tf.requiredType = 1\n\tcase reflect.Int16:\n\t\tf.write = encodeInt16\n\t\tf.read = decodeInt16\n\t\tf.requiredType = 2\n\tcase reflect.Int32:\n\t\tf.write = encodeInt32\n\t\tf.read = decodeInt32\n\t\tf.requiredType = 3\n\tcase reflect.Int64:\n\t\tf.write = encodeInt64\n\t\tf.read = decodeInt64\n\t\tf.requiredType = 4\n\tcase reflect.String:\n\t\tf.write = encodeString\n\t\tf.read = decodeString\n\t\tf.requiredType = 8\n\tcase reflect.Map:\n\t\tf.requiredType = 10\n\t\telem := sf.Type.Elem()\n\t\tvar elemField interface{}\n\t\tname := \"map:\" + sf.Name\n\t\tif elem.Kind() != reflect.Interface {\n\t\t\telemField = compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t}\n\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\tkeys := fi.MapKeys()\n\t\t\tfor _, key := range keys {\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv := fi.MapIndex(key).Elem()\n\t\t\t\t\t\ttemp := compileField(reflect.StructField{Type: v.Type(), Index: []int{0}}, \"\")\n\t\t\t\t\t\tif f, ok := temp.(field); ok {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\t\tfs := temp.(fieldStruct)\n\t\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tbs := en.b[:1]\n\t\t\tbs[0] = 0\n\t\t\t_, err := w.Write(bs)\n\t\t\treturn err\n\t\t}\n\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\n\t\t\tma := reflect.MakeMap(sf.Type)\n\n\t\t\tname, t, err := readPrefix(r, de)\n\t\t\tfor ; t != 0; name, t, err = readPrefix(r, de) {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tkeyVal := reflect.ValueOf(name)\n\n\t\t\t\tvar val reflect.Value\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\terr := f.read(r, de, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv, err := fallbackRead(r, de)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = reflect.ValueOf(v)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\terr := read(r, de, fs.m, val)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tma.SetMapIndex(keyVal, val)\n\t\t\t}\n\t\t\tfi.Set(ma)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Slice:\n\t\tf.requiredType = 9\n\t\telem := sf.Type.Elem()\n\t\tswitch elem.Kind() {\n\t\tcase reflect.Uint8: //Short-cut for byte arrays\n\t\t\tf.requiredType = 7\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t_, err = w.Write(fi.Bytes())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]byte, l)\n\t\t\t\t_, err = r.Read(out)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfi.SetBytes(out)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase reflect.Int32: //Short-cut for int32 arrays\n\t\t\tf.requiredType = 11\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdata := fi.Interface().([]int32)\n\t\t\t\tfor i := range data {\n\t\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(data[i]))\n\t\t\t\t\t_, err := w.Write(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]int32, l)\n\t\t\t\tfor i := range out {\n\t\t\t\t\t_, err := r.Read(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tout[i] = int32(binary.BigEndian.Uint32(bs))\n\t\t\t\t}\n\t\t\t\tfi.Set(reflect.ValueOf(out))\n\t\t\t\treturn nil\n\t\t\t}\n\t\tdefault:\n\t\t\tname := \"slice:\" + sf.Name\n\t\t\telemField := compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:5]\n\t\t\t\tbinary.BigEndian.PutUint32(bs[1:], uint32(l))\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tbs[0] = f.requiredType\n\t\t\t\t} else {\n\t\t\t\t\tbs[0] = 10\n\t\t\t\t}\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := write(w, en, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:5]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tif bs[0] != f.requiredType {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif bs[0] != 10 {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tl := int(binary.BigEndian.Uint32(bs[1:]))\n\t\t\t\tval := reflect.MakeSlice(sf.Type, l, l)\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := f.read(r, de, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := read(r, de, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfi.Set(val)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\tcase reflect.Float32:\n\t\tf.requiredType = 5\n\t\tf.write = encodeFloat32\n\t\tf.read = decodeFloat32\n\tcase reflect.Float64:\n\t\tf.requiredType = 6\n\t\tf.write = encodeFloat64\n\t\tf.read = decodeFloat64\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unhandled type %s for %s\", sf.Type.Kind().String(), sf.Name))\n\t}\n\treturn f\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tg.additionalImports = append(g.additionalImports, file.additionalImports...)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %s\", typeName)\n\t}\n\n\tg.build(fields, typeName)\n}", "func (p *Planner) addField(ref int) {\n\tfieldName := p.visitor.Operation.FieldNameString(ref)\n\n\talias := ast.Alias{\n\t\tIsDefined: p.visitor.Operation.FieldAliasIsDefined(ref),\n\t}\n\n\tif alias.IsDefined {\n\t\taliasBytes := p.visitor.Operation.FieldAliasBytes(ref)\n\t\talias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes)\n\t}\n\n\ttypeName := p.visitor.Walker.EnclosingTypeDefinition.NameString(p.visitor.Definition)\n\tfor i := range p.visitor.Config.Fields {\n\t\tisDesiredField := p.visitor.Config.Fields[i].TypeName == typeName &&\n\t\t\tp.visitor.Config.Fields[i].FieldName == fieldName\n\n\t\t// chech that we are on a desired field and field path contains a single element - mapping is plain\n\t\tif isDesiredField && len(p.visitor.Config.Fields[i].Path) == 1 {\n\t\t\t// define alias when mapping path differs from fieldName and no alias has been defined\n\t\t\tif p.visitor.Config.Fields[i].Path[0] != fieldName && !alias.IsDefined {\n\t\t\t\talias.IsDefined = true\n\t\t\t\taliasBytes := p.visitor.Operation.FieldNameBytes(ref)\n\t\t\t\talias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes)\n\t\t\t}\n\n\t\t\t// override fieldName with mapping path value\n\t\t\tfieldName = p.visitor.Config.Fields[i].Path[0]\n\n\t\t\t// when provided field is a root type field save new field name\n\t\t\tif ref == p.rootFieldRef {\n\t\t\t\tp.rootFieldName = fieldName\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfield := p.upstreamOperation.AddField(ast.Field{\n\t\tName: p.upstreamOperation.Input.AppendInputString(fieldName),\n\t\tAlias: alias,\n\t})\n\n\tselection := ast.Selection{\n\t\tKind: ast.SelectionKindField,\n\t\tRef: field.Ref,\n\t}\n\n\tp.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, selection)\n\tp.nodes = append(p.nodes, field)\n}", "func (dr *defaultRender) OnParseField(out *jen.File, methodDefinition *jen.Group, field *atool.Arg, file *atool.File) {\n}", "func (sb *schemaBuilder) buildField(field reflect.StructField) (*graphql.Field, error) {\n\tretType, err := sb.getType(field.Type)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &graphql.Field{\n\t\tResolve: func(ctx context.Context, source, args interface{}, selectionSet *graphql.SelectionSet) (interface{}, error) {\n\t\t\tvalue := reflect.ValueOf(source)\n\t\t\tif value.Kind() == reflect.Ptr {\n\t\t\t\tvalue = value.Elem()\n\t\t\t}\n\t\t\treturn value.FieldByIndex(field.Index).Interface(), nil\n\t\t},\n\t\tType: retType,\n\t\tParseArguments: nilParseArguments,\n\t}, nil\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about type declarations.\n\t\treturn true\n\t}\n\tfor _, spec := range decl.Specs {\n\t\ttypeSpec := spec.(*ast.TypeSpec)\n\t\tstructDecl, ok := typeSpec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif typeSpec.Name.Name != f.typeName {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"Handling %s\\n\", typeSpec.Name.Name)\n\t\tfor _, field := range structDecl.Fields.List {\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`ignore:\\\"true\\\"`\" {\n\t\t\t\tvar name string\n\t\t\t\tif len(field.Names) != 0 {\n\t\t\t\t\tname = field.Names[0].Name\n\t\t\t\t} else {\n\t\t\t\t\tname = \"<delegate>\"\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\t ignoring field %s %v\\n\", name, field.Type)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tisStringer := false\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`stringer:\\\"true\\\"`\" { // TODO: Check if we do that a bit smarter\n\t\t\t\tisStringer = true\n\t\t\t}\n\t\t\thasLocker := \"\"\n\t\t\tif field.Tag != nil && strings.HasPrefix(field.Tag.Value, \"`hasLocker:\\\"\") { // TODO: Check if we do that a bit smarter\n\t\t\t\thasLocker = strings.TrimPrefix(field.Tag.Value, \"`hasLocker:\\\"\")\n\t\t\t\thasLocker = strings.TrimSuffix(hasLocker, \"\\\"`\")\n\t\t\t}\n\t\t\tif len(field.Names) == 0 {\n\t\t\t\tfmt.Printf(\"\\t adding delegate\\n\")\n\t\t\t\tswitch ft := field.Type.(type) {\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\tswitch set := ft.X.(type) {\n\t\t\t\t\tcase *ast.Ident:\n\t\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\t\tfieldType: set,\n\t\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpanic(fmt.Sprintf(\"Only pointer to struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t\t}\n\t\t\t\tcase *ast.SelectorExpr:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft.Sel,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(fmt.Sprintf(\"Only struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"\\t adding field %s %v\\n\", field.Names[0].Name, field.Type)\n\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\tname: field.Names[0].Name,\n\t\t\t\tfieldType: field.Type,\n\t\t\t\tisStringer: isStringer,\n\t\t\t\thasLocker: hasLocker,\n\t\t\t})\n\t\t}\n\t}\n\treturn false\n}", "func AddIndependentPropertyGeneratorsForJsonField(gens map[string]gopter.Gen) {\n\tgens[\"SourceField\"] = gen.PtrOf(gen.AlphaString())\n}", "func (c *TypeConverter) genStructConverter(\n\tkeyPrefix string,\n\tfromPrefix string,\n\tindent string,\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n\tprevKeyPrefixes []string,\n) error {\n\n\tfor i := 0; i < len(toFields); i++ {\n\t\ttoField := toFields[i]\n\n\t\t// Check for same named field\n\t\tvar fromField *compile.FieldSpec\n\t\tfor j := 0; j < len(fromFields); j++ {\n\t\t\tif fromFields[j].Name == toField.Name {\n\t\t\t\tfromField = fromFields[j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttoSubIdentifier := keyPrefix + PascalCase(toField.Name)\n\t\ttoIdentifier := \"out.\" + toSubIdentifier\n\t\toverriddenIdentifier := \"\"\n\t\tfromIdentifier := \"\"\n\n\t\t// Check for mapped field\n\t\tvar overriddenField *compile.FieldSpec\n\n\t\t// check if this toField satisfies a fieldMap transform\n\t\ttransformFrom, ok := fieldMap[toSubIdentifier]\n\t\tif ok {\n\t\t\t// no existing direct fromField, just assign the transform\n\t\t\tif fromField == nil {\n\t\t\t\tfromField = transformFrom.Field\n\t\t\t\tif c.useRecurGen {\n\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t} else {\n\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t}\n\t\t\t\t// else there is a conflicting direct fromField\n\t\t\t} else {\n\t\t\t\t// depending on Override flag either the direct fromField or transformFrom is the OverrideField\n\t\t\t\tif transformFrom.Override {\n\t\t\t\t\t// check for required/optional setting\n\t\t\t\t\tif !transformFrom.Field.Required {\n\t\t\t\t\t\toverriddenField = fromField\n\t\t\t\t\t\toverriddenIdentifier = \"in.\" + fromPrefix +\n\t\t\t\t\t\t\tPascalCase(overriddenField.Name)\n\t\t\t\t\t}\n\t\t\t\t\t// If override is true and the new field is required,\n\t\t\t\t\t// there's a default instantiation value and will always\n\t\t\t\t\t// overwrite.\n\t\t\t\t\tfromField = transformFrom.Field\n\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If override is false and the from field is required,\n\t\t\t\t\t// From is always populated and will never be overwritten.\n\t\t\t\t\tif !fromField.Required {\n\t\t\t\t\t\toverriddenField = transformFrom.Field\n\t\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toverriddenIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// neither direct or transform fromField was found\n\t\tif fromField == nil {\n\t\t\t// search the fieldMap toField identifiers for matching identifier prefix\n\t\t\t// e.g. the current toField is a struct and something within it has a transform\n\t\t\t// a full match identifiers for transform non-struct types would have been caught above\n\t\t\thasStructFieldMapping := false\n\t\t\tfor toID := range fieldMap {\n\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\thasStructFieldMapping = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// if there's no fromField and no fieldMap transform that could be applied\n\t\t\tif !hasStructFieldMapping {\n\t\t\t\tvar bypass bool\n\t\t\t\t// check if required field is filled from other resources\n\t\t\t\t// it can be used to set system default (customized tracing /auth required for clients),\n\t\t\t\t// or header propagating\n\t\t\t\tif c.optionalEntries != nil {\n\t\t\t\t\tfor toID := range c.optionalEntries {\n\t\t\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\t\t\tbypass = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// the toField is either covered by optionalEntries, or optional and\n\t\t\t\t// there's nothing that maps to it or its sub-fields so we should skip it\n\t\t\t\tif bypass || !toField.Required {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// unrecoverable error\n\t\t\t\treturn errors.Errorf(\n\t\t\t\t\t\"required toField %s does not have a valid fromField mapping\",\n\t\t\t\t\ttoField.Name,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\tif fromIdentifier == \"\" && fromField != nil {\n\t\t\t// should we set this if no fromField ??\n\t\t\tfromIdentifier = \"in.\" + fromPrefix + PascalCase(fromField.Name)\n\t\t}\n\n\t\tif prevKeyPrefixes == nil {\n\t\t\tprevKeyPrefixes = []string{}\n\t\t}\n\n\t\tvar overriddenFieldName string\n\t\tvar overriddenFieldType compile.TypeSpec\n\t\tif overriddenField != nil {\n\t\t\toverriddenFieldName = overriddenField.Name\n\t\t\toverriddenFieldType = overriddenField.Type\n\t\t}\n\n\t\t// Override thrift type names to avoid naming collisions between endpoint\n\t\t// and client types.\n\t\tswitch toFieldType := compile.RootTypeSpec(toField.Type).(type) {\n\t\tcase\n\t\t\t*compile.BoolSpec,\n\t\t\t*compile.I8Spec,\n\t\t\t*compile.I16Spec,\n\t\t\t*compile.I32Spec,\n\t\t\t*compile.EnumSpec,\n\t\t\t*compile.I64Spec,\n\t\t\t*compile.DoubleSpec,\n\t\t\t*compile.StringSpec:\n\n\t\t\terr := c.genConverterForPrimitive(\n\t\t\t\ttoField,\n\t\t\t\ttoIdentifier,\n\t\t\t\tfromField,\n\t\t\t\tfromIdentifier,\n\t\t\t\toverriddenField,\n\t\t\t\toverriddenIdentifier,\n\t\t\t\tindent,\n\t\t\t\tprevKeyPrefixes,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.BinarySpec:\n\t\t\tfor _, line := range checkOptionalNil(indent, c.uninitialized, toIdentifier, prevKeyPrefixes, c.useRecurGen) {\n\t\t\t\tc.append(line)\n\t\t\t}\n\t\t\tc.append(toIdentifier, \" = []byte(\", fromIdentifier, \")\")\n\t\tcase *compile.StructSpec:\n\t\t\tvar (\n\t\t\t\tstFromPrefix = fromPrefix\n\t\t\t\tstFromType compile.TypeSpec\n\t\t\t\tfromTypeName string\n\t\t\t)\n\t\t\tif fromField != nil {\n\t\t\t\tstFromType = fromField.Type\n\t\t\t\tstFromPrefix = fromPrefix + PascalCase(fromField.Name)\n\n\t\t\t\tfromTypeName, _ = c.getIdentifierName(stFromType)\n\t\t\t}\n\n\t\t\ttoTypeName, err := c.getIdentifierName(toFieldType)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif converterMethodName, ok := c.convStructMap[toFieldType.Name]; ok {\n\t\t\t\t// the converter for this struct has already been generated, so just use it\n\t\t\t\tc.append(indent, \"out.\", keyPrefix+PascalCase(toField.Name), \" = \", converterMethodName, \"(\", fromIdentifier, \")\")\n\t\t\t} else if c.useRecurGen && fromTypeName != \"\" {\n\t\t\t\t// generate a callable converter inside function literal\n\t\t\t\terr = c.genConverterForStructWrapped(\n\t\t\t\t\ttoField,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoTypeName,\n\t\t\t\t\ttoSubIdentifier,\n\t\t\t\t\tfromTypeName,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t\tindent,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\terr = c.genConverterForStruct(\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tkeyPrefix+PascalCase(toField.Name),\n\t\t\t\t\tstFromPrefix,\n\t\t\t\t\tindent,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.ListSpec:\n\t\t\terr := c.genConverterForList(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.MapSpec:\n\t\t\terr := c.genConverterForMap(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// fmt.Printf(\"Unknown type %s for field %s \\n\",\n\t\t\t// \ttoField.Type.TypeCode().String(), toField.Name,\n\t\t\t// )\n\n\t\t\t// pkgName, err := h.TypePackageName(toField.Type.IDLFile())\n\t\t\t// if err != nil {\n\t\t\t// \treturn nil, err\n\t\t\t// }\n\t\t\t// typeName := pkgName + \".\" + toField.Type.ThriftName()\n\t\t\t// line := toIdentifier + \"(*\" + typeName + \")\" + postfix\n\t\t\t// c.Lines = append(c.Lines, line)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (b *Builder) Field(keypath string) *Builder {\n\tb.p.RegisterTransformation(impl.Field(keypath))\n\treturn b\n}", "func (b *basic) ToGoCode(n *ecsgen.Node) (string, error) {\n\t// we can only generate a Go struct definition for an Object, verify\n\t// we're not shooting ourselves in the foot\n\tif !n.IsObject() {\n\t\treturn \"\", fmt.Errorf(\"node %s is not an object\", n.Path)\n\t}\n\n\t// Now enumerate the Node's fields and sort the keys so the resulting Go code\n\t// is deterministically generated\n\tfieldKeys := []string{}\n\n\tfor key := range n.Children {\n\t\tfieldKeys = append(fieldKeys, key)\n\t}\n\n\tsort.Strings(fieldKeys)\n\n\t// Create a new buffer to write the struct definition to\n\tbuf := new(strings.Builder)\n\n\t// comment and type definition\n\tbuf.WriteString(fmt.Sprintf(\"// %s defines the object located at ECS path %s.\", n.TypeIdent().Pascal(), n.Path))\n\tbuf.WriteString(\"\\n\")\n\tbuf.WriteString(fmt.Sprintf(\"type %s struct {\", n.TypeIdent().Pascal()))\n\tbuf.WriteString(\"\\n\")\n\n\t// Enumerate the fields and generate their field definition, adding it\n\t// to the buffer as a line item.\n\tfor _, k := range fieldKeys {\n\t\tscalarField := n.Children[k]\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"\\t%s %s `json:\\\"%s,omitempty\\\" yaml:\\\"%s,omitempty\\\" ecs:\\\"%s\\\"`\",\n\t\t\t\tscalarField.FieldIdent().Pascal(),\n\t\t\t\tGoFieldType(scalarField),\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Path,\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\t// Close the type definition and return the result\n\tbuf.WriteString(\"}\")\n\tbuf.WriteString(\"\\n\")\n\n\t// if the user included the JSON operator flag, add the implementation\n\tif b.IncludeJSONMarshal {\n\t\t// Now we implement at json.Marshaler implementation for each specific type that\n\t\t// removes any nested JSON types that might exist.\n\t\t//\n\t\t// We do this by enumerating every field in the type and check to see\n\t\t// if it's got a zero value.\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"// MarshalJSON implements the json.Marshaler interface and removes zero values from returned JSON.\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"func (b %s) MarshalJSON() ([]byte, error) {\",\n\t\t\t\tn.TypeIdent().Pascal(),\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// Define the result struct we will populate non-zero fields with\n\t\tbuf.WriteString(\"\\tres := map[string]interface{}{}\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// enumerate the fields for the object fields\n\t\tfor _, fieldName := range fieldKeys {\n\t\t\tfield := n.Children[fieldName]\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\tif val := reflect.ValueOf(b.%s); !val.IsZero() {\", field.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\t\\tres[\\\"%s\\\"] = b.%s\",\n\t\t\t\t\tfield.Name,\n\t\t\t\t\tfield.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\"\\t}\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t}\n\n\t\t// add a line spacer and return the marshaled JSON result\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\treturn json.Marshal(res)\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"}\")\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\treturn buf.String(), nil\n}", "func (fs *FileStat) GenerateFields() (string, error) {\n\ttb, e := fs.modTime.MarshalBinary()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tcb, e := fs.compressedBytes()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\tformat := `\"%s\", \"%s\", %d, 0%o, binfs.MustHexDecode(\"%x\"), %t, binfs.MustHexDecode(\"%x\")`\n\treturn fmt.Sprintf(format,\n\t\tfs.path,\n\t\tfs.name,\n\t\tfs.size,\n\t\tfs.mode,\n\t\ttb,\n\t\tfs.isDir,\n\t\tcb,\n\t), nil\n}", "func GenerateBaseFields(conf CurveConfig) error {\n\tif err := goff.GenerateFF(\"fr\", \"Element\", conf.RTorsion, filepath.Join(conf.OutputDir, \"fr\"), false); err != nil {\n\t\treturn err\n\t}\n\tif err := goff.GenerateFF(\"fp\", \"Element\", conf.FpModulus, filepath.Join(conf.OutputDir, \"fp\"), false); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (n DependencyNode) Codegen(prog *Program) (value.Value, error) { return nil, nil }", "func (g *mapGen) genType() {\n\tg.P(\"type \", g.typeName, \" struct {\")\n\tg.P(\"m *map[\", getGoType(g.GeneratedFile, g.field.Message.Fields[0]), \"]\", getGoType(g.GeneratedFile, g.field.Message.Fields[1]))\n\tg.P(\"}\")\n\tg.P()\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel []query.Selection, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ec.Doc, sel, __FieldImplementors, ec.Variables)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}", "func (m *BgpConfiguration) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"asn\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAsn(val)\n }\n return nil\n }\n res[\"ipAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIpAddress(val)\n }\n return nil\n }\n res[\"localIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLocalIpAddress(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"peerIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPeerIpAddress(val)\n }\n return nil\n }\n return res\n}", "func (x StructField) Generate(r *rand.Rand, size int) reflect.Value {\n\tname, _ := quick.Value(reflect.TypeOf(\"\"), r)\n\tfor {\n\t\treturn reflect.ValueOf(StructField{\n\t\t\tName: name.String(),\n\t\t\tValue: Generate(r, size, false, false).Interface().(Value),\n\t\t})\n\t}\n}", "func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {\n\tfields := make([]Field, 0, 100)\n\timports := make([]Import, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t\timports = append(imports, file.imports...)\n\t\t}\n\t}\n\n\tgenFn(typeName, fields, imports)\n\n}", "func execNewField(_ int, p *gop.Context) {\n\targs := p.GetArgs(5)\n\tret := types.NewField(token.Pos(args[0].(int)), args[1].(*types.Package), args[2].(string), args[3].(types.Type), args[4].(bool))\n\tp.Ret(5, ret)\n}", "func CreateField(prefix string, field *typast.Field) *Field {\n\t// NOTE: mimic kelseyhightower/envconfig struct tags\n\n\tname := field.Get(\"envconfig\")\n\tif name == \"\" {\n\t\tname = strings.ToUpper(field.Names[0])\n\t}\n\n\treturn &Field{\n\t\tKey: fmt.Sprintf(\"%s_%s\", prefix, name),\n\t\tDefault: field.Get(\"default\"),\n\t\tRequired: field.Get(\"required\") == \"true\",\n\t}\n}", "func (p *Plugin) generateRedisHashFieldFunc(data *generateData) {\n\n\ttype FiledType struct {\n\t\t*generateField\n\t\t*generateData\n\t}\n\n\tfor _, field := range data.Fields {\n\n\t\tfieldData := &FiledType{}\n\t\tfieldData.generateData = data\n\t\tfieldData.generateField = field\n\n\t\tgetTemplateName := \"\"\n\t\tsetTemplateName := \"\"\n\t\ttpy := descriptor.FieldDescriptorProto_Type_value[field.Type]\n\t\tswitch descriptor.FieldDescriptorProto_Type(tpy) {\n\t\tcase descriptor.FieldDescriptorProto_TYPE_DOUBLE,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_FLOAT,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_INT64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_UINT64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_INT32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_UINT32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_FIXED64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_SFIXED64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_FIXED32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_SFIXED32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_BOOL,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_STRING:\n\t\t\tgetTemplateName = getBasicTypeFromRedisHashFuncTemplate\n\t\t\tsetTemplateName = setBasicTypeFromRedisHashFuncTemplate\n\t\tcase descriptor.FieldDescriptorProto_TYPE_ENUM:\n\t\t\tgetTemplateName = getBasicTypeFromRedisHashFuncTemplate\n\t\t\tfieldData.RedisType = \"Int64\"\n\t\t\tfieldData.RedisTypeReplace = true\n\t\t\tsetTemplateName = setBasicTypeFromRedisHashFuncTemplate\n\t\tcase descriptor.FieldDescriptorProto_TYPE_MESSAGE:\n\t\t\tgetTemplateName = getMessageTypeFromRedisHashFuncTemplate\n\t\t\tfieldData.RedisType = \"Bytes\"\n\t\t\tsetTemplateName = setMessageTypeFromRedisHashFuncTemplate\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\n\t\tif field.Getter {\n\t\t\tif getTemplateName != \"\" {\n\t\t\t\ttmpl, _ := template.New(\"hash-get\").Parse(getTemplateName)\n\t\t\t\tif err := tmpl.Execute(p.Buffer, fieldData); err != nil {\n\t\t\t\t\tlog.Println(getTemplateName, fieldData)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif field.Setter {\n\t\t\tif setTemplateName != \"\" {\n\t\t\t\ttmpl, _ := template.New(\"hash-set\").Parse(setTemplateName)\n\t\t\t\tif err := tmpl.Execute(p.Buffer, fieldData); err != nil {\n\t\t\t\t\tlog.Println(setTemplateName, fieldData)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}", "func getFieldConstructor(e ast.Expr) string {\n\tswitch t := e.(type) {\n\tcase *ast.StarExpr:\n\t\tswitch t.X.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Ponter on pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Pointer on arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"&\" + getFieldConstructor(t.X)\n\t\t}\n\tcase *ast.ArrayType:\n\t\tswitch elemType := t.Elt.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Array of pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Array of arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"[]\" + getFieldConstructor(elemType)\n\t\t}\n\tcase *ast.Ident:\n\t\tswitch t.Name {\n\t\tcase \"int\", \"int8\", \"int16\", \"int32\", \"int64\",\n\t\t\t\"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n\t\t\t\"float32\", \"float64\", \"byte\", \"rune\", \"string\":\n\t\t\treturn t.Name + \"{\"\n\t\tcase \"complex64\", \"complex128\", \"uintptr\":\n\t\t\tpanic(\"Type '\" + t.Name + \"' is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn t.Name + \"{\"\n\t\t}\n\tdefault:\n\t\tpanic(\"Unsupported field type in annotation\")\n\t}\n}", "func (ec *executionContext) ___Field(sel []query.Selection, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ec.doc, sel, __FieldImplementors, ec.variables)\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(field, obj)\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(field, obj)\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(field, obj)\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(field, obj)\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tinvalid := false\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\tif invalid {\n\t\treturn graphql.Null\n\t}\n\treturn out\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tinvalid := false\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\tif invalid {\n\t\treturn graphql.Null\n\t}\n\treturn out\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tinvalid := false\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\tif invalid {\n\t\treturn graphql.Null\n\t}\n\treturn out\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE { // We only care about Type declarations.\n\t\treturn true\n\t}\n\t// The name of the type of the constants we are declaring.\n\t// Can change if this is a multi-element declaration.\n\ttyp := \"\"\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\t\tif tspec.Type != nil {\n\t\t\t// \"X T\". We have a type. Remember it.\n\t\t\ttyp = tspec.Name.Name\n\t\t}\n\t\tif typ != f.typeName {\n\t\t\t// This is not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\t\t// We now have a list of names (from one line of source code) all being\n\t\t// declared with the desired type.\n\n\t\tstructType, ok := tspec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\t//not a struct type\n\t\t\tcontinue\n\t\t}\n\n\t\ttypesObj, typeObjOk := f.pkg.defs[tspec.Name]\n\t\tif !typeObjOk {\n\t\t\tlog.Fatalf(\"no type info found for struct %s\", typ)\n\t\t}\n\n\t\tfor _, fieldLine := range structType.Fields.List {\n\t\t\tfor _, field := range fieldLine.Names {\n\t\t\t\t//skip struct padding\n\t\t\t\tif field.Name == \"_\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)\n\n\t\t\t\ttypeStr := fieldObj.Type().String()\n\t\t\t\ttags := parseFieldTags(fieldLine.Tag)\n\n\t\t\t\t//Skip here so we don't include rubbish import lines\n\t\t\t\tif tags[\"exclude_dao\"].Value == \"true\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tprocessedTypeStr, importPath := processTypeStr(typeStr)\n\t\t\t\t//log.Printf(\"processedTypeStr: %s, importPath: %s\", processedTypeStr, importPath)\n\n\t\t\t\tif importPath != \"\" && !importExists(importPath, f.imports) {\n\n\t\t\t\t\tf.imports = append(f.imports, Import{importPath})\n\n\t\t\t\t}\n\n\t\t\t\tv := Field{\n\t\t\t\t\tName: field.Name,\n\t\t\t\t\tTags: tags,\n\t\t\t\t\tTypeName: processedTypeStr,\n\t\t\t\t}\n\t\t\t\tf.fields = append(f.fields, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func newField(\n\tcrd *CRD,\n\tfieldNames names.Names,\n\tshapeRef *awssdkmodel.ShapeRef,\n\tcfg *ackgenconfig.FieldConfig,\n) *Field {\n\tvar gte, gt, gtwp string\n\tvar shape *awssdkmodel.Shape\n\tif shapeRef != nil {\n\t\tshape = shapeRef.Shape\n\t}\n\tif shape != nil {\n\t\tgte, gt, gtwp = cleanGoType(crd.sdkAPI, crd.cfg, shape)\n\t} else {\n\t\tgte = \"string\"\n\t\tgt = \"*string\"\n\t\tgtwp = \"*string\"\n\t}\n\treturn &Field{\n\t\tCRD: crd,\n\t\tNames: fieldNames,\n\t\tShapeRef: shapeRef,\n\t\tGoType: gt,\n\t\tGoTypeElem: gte,\n\t\tGoTypeWithPkgName: gtwp,\n\t\tFieldConfig: cfg,\n\t}\n}", "func Generate(fields map[string]*indexer.Field) map[string]interface{} {\n\treturn mapFields(fields)\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about types declarations.\n\t\treturn true\n\t}\n\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\n\t\tif tspec.Name.Name != f.typeName {\n\t\t\t// Not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Printf(\"Type spec: %v name: %s\\n\", tspec.Type, tspec.Name.Name)\n\n\t\tif structType, ok := tspec.Type.(*ast.StructType); ok {\n\t\t\tlog.Printf(\"Located the struct type: %v\\n\", structType)\n\n\t\t\tfor _, field := range structType.Fields.List {\n\t\t\t\tlog.Printf(\"Field: %v\\n\", field)\n\n\t\t\t\tif ident, ok := field.Type.(*ast.Ident); ok {\n\t\t\t\t\t// Look at list of known types and determine if we have a translation.\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[ident.Name]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", ident.Name, tp.String())\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", ident.Name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: ident.Name,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else if selector, ok := field.Type.(*ast.SelectorExpr); ok {\n\t\t\t\t\t// TODO: This likely means an object in another package. Foreign link?\n\t\t\t\t\tlog.Printf(\"Found selector: %s :: %s\\n\", selector.X, selector.Sel.Name)\n\t\t\t\t\ttypeName := fmt.Sprintf(\"%s.%s\", selector.X, selector.Sel.Name)\n\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[typeName]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", typeName, tp.String())\n\t\t\t\t\t\tf.additionalImports = append(f.additionalImports, fmt.Sprintf(\"%s\", selector.X))\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", typeName)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: typeName,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: Enumerate all different possible types here.\n\t\t\t\t\tlog.Printf(\"UNKNOWN TYPE seen: %v\\n\", field.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) {\n\tfieldsByName := map[string]*treeField{}\n\n\t// index nested and non-nested fields\n\tfor i := range p.Fields {\n\t\tf := p.Fields[i]\n\t\tseq, err := leaf.Pipe(&f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif seq == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fieldsByName[f.Name] == nil {\n\t\t\tfieldsByName[f.Name] = &treeField{name: f.Name}\n\t\t}\n\n\t\t// non-nested field -- add directly to the treeFields list\n\t\tif f.SubName == \"\" {\n\t\t\t// non-nested field -- only 1 element\n\t\t\tval, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfieldsByName[f.Name].value = val\n\t\t\tcontinue\n\t\t}\n\n\t\t// nested-field -- create a parent elem, and index by the 'match' value\n\t\tif fieldsByName[f.Name].subFieldByMatch == nil {\n\t\t\tfieldsByName[f.Name].subFieldByMatch = map[string]treeFields{}\n\t\t}\n\t\tindex := fieldsByName[f.Name].subFieldByMatch\n\t\tfor j := range seq.Content() {\n\t\t\telem := seq.Content()[j]\n\t\t\tmatches := f.Matches[elem]\n\t\t\tstr, err := yaml.String(elem, yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// map the field by the name of the element\n\t\t\t// index the subfields by the matching element so we can put all the fields for the\n\t\t\t// same element under the same branch\n\t\t\tmatchKey := strings.Join(matches, \"/\")\n\t\t\tindex[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str})\n\t\t}\n\t}\n\n\t// iterate over collection of all queried fields in the Resource\n\tfor _, field := range fieldsByName {\n\t\t// iterate over collection of elements under the field -- indexed by element name\n\t\tfor match, subFields := range field.subFieldByMatch {\n\t\t\t// create a new element for this collection of fields\n\t\t\t// note: we will convert name to an index later, but keep the match for sorting\n\t\t\telem := &treeField{name: match}\n\t\t\tfield.matchingElementsAndFields = append(field.matchingElementsAndFields, elem)\n\n\t\t\t// iterate over collection of queried fields for the element\n\t\t\tfor i := range subFields {\n\t\t\t\t// add to the list of fields for this element\n\t\t\t\telem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i])\n\t\t\t}\n\t\t}\n\t\t// clear this cached data\n\t\tfield.subFieldByMatch = nil\n\t}\n\n\t// put the fields in a list so they are ordered\n\tfieldList := treeFields{}\n\tfor _, v := range fieldsByName {\n\t\tfieldList = append(fieldList, v)\n\t}\n\n\t// sort the fields\n\tsort.Sort(fieldList)\n\tfor i := range fieldList {\n\t\tfield := fieldList[i]\n\t\t// sort the elements under this field\n\t\tsort.Sort(field.matchingElementsAndFields)\n\n\t\tfor i := range field.matchingElementsAndFields {\n\t\t\telement := field.matchingElementsAndFields[i]\n\t\t\t// sort the elements under a list field by their name\n\t\t\tsort.Sort(element.matchingElementsAndFields)\n\t\t\t// set the name of the element to its index\n\t\t\telement.name = fmt.Sprintf(\"%d\", i)\n\t\t}\n\t}\n\n\treturn fieldList, nil\n}", "func genNodeDev(id nodes.ID, n *nodes.Node) (nodeDev, error) {\n\tr, ok := typesMap[reflect.TypeOf(n.Config).Elem()]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown type for %T\", n.Config)\n\t}\n\tv := reflect.New(r)\n\te := v.Elem()\n\te.Field(0).Set(reflect.ValueOf(NodeBase{id: id, name: n.Name, typ: n.Type()}))\n\te.Field(1).Set(reflect.ValueOf(n.Config))\n\t/*\n\t\tswitch v := n.Config.(type) {\n\t\tcase *nodes.Anim1D:\n\t\t\td.nodes[id] = &anim1DDev{NodeBase: b, cfg: v}\n\t\tcase *nodes.Button:\n\t\t\td.nodes[id] = &buttonDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.Display:\n\t\t\td.nodes[id] = &displayDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.IR:\n\t\t\td.nodes[id] = &irDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.PIR:\n\t\t\td.nodes[id] = &pirDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.Sound:\n\t\t\td.nodes[id] = &soundDev{nodeBase: b, cfg: v}\n\t\tdefault:\n\t\t\tpubErr(dbus, \"failed to initialize: unknown node %q: %T\", id, n)\n\t\t\treturn fmt.Errorf(\"unknown node %q: %T\", id, n)\n\t\t}\n\t*/\n\treturn v.Interface().(nodeDev), nil\n}", "func generatePerNodeConfigSnippet(pathStructName string, nodeData *ypathgen.NodeData, fakeRootTypeName, schemaStructPkgAccessor string, preferShadowPath bool) (GoPerNodeCodeSnippet, goTypeData, util.Errors) {\n\t// TODO: See if a float32 -> binary helper should be provided\n\t// for setting a float32 leaf.\n\tvar errs util.Errors\n\ts := struct {\n\t\tPathStructName string\n\t\tGoType goTypeData\n\t\tGoFieldName string\n\t\tGoStructTypeName string\n\t\tYANGPath string\n\t\tFakeRootTypeName string\n\t\tIsScalarField bool\n\t\tIsRoot bool\n\t\tSchemaStructPkgAccessor string\n\t\tWildcardSuffix string\n\t\tSpecialConversionFn string\n\t\tPreferShadowPath bool\n\t}{\n\t\tPathStructName: pathStructName,\n\t\tGoType: goTypeData{\n\t\t\tGoTypeName: nodeData.GoTypeName,\n\t\t\tTransformedGoTypeName: transformGoTypeName(nodeData),\n\t\t\tIsLeaf: nodeData.IsLeaf,\n\t\t\tHasDefault: nodeData.HasDefault,\n\t\t},\n\t\tGoFieldName: nodeData.GoFieldName,\n\t\tGoStructTypeName: nodeData.SubsumingGoStructName,\n\t\tYANGPath: nodeData.YANGPath,\n\t\tFakeRootTypeName: fakeRootTypeName,\n\t\tIsScalarField: nodeData.IsScalarField,\n\t\tIsRoot: nodeData.YANGPath == \"/\",\n\t\tWildcardSuffix: ypathgen.WildcardSuffix,\n\t\tSchemaStructPkgAccessor: schemaStructPkgAccessor,\n\t\tPreferShadowPath: preferShadowPath,\n\t}\n\tvar getMethod, replaceMethod, convertHelper strings.Builder\n\tif nodeData.IsLeaf {\n\t\t// Leaf types use their parent GoStruct to unmarshal, before\n\t\t// being retrieved out when returned to the user.\n\t\tif err := goLeafConvertTemplate.Execute(&convertHelper, s); err != nil {\n\t\t\tutil.AppendErr(errs, err)\n\t\t}\n\t}\n\tif err := goNodeSetTemplate.Execute(&replaceMethod, s); err != nil {\n\t\tutil.AppendErr(errs, err)\n\t}\n\tif err := goNodeGetTemplate.Execute(&getMethod, s); err != nil {\n\t\tutil.AppendErr(errs, err)\n\t}\n\n\treturn GoPerNodeCodeSnippet{\n\t\tPathStructName: pathStructName,\n\t\tGetMethod: getMethod.String(),\n\t\tConvertHelper: convertHelper.String(),\n\t\tReplaceMethod: replaceMethod.String(),\n\t}, s.GoType, errs\n}", "func fieldToSchema(prog *Program, fName, tagName string, ref Reference, f *ast.Field) (*Schema, error) {\n\tvar p Schema\n\n\tif f.Doc != nil {\n\t\tp.Description = f.Doc.Text()\n\t} else if f.Comment != nil {\n\t\tp.Description = f.Comment.Text()\n\t}\n\tp.Description = strings.TrimSpace(p.Description)\n\n\tvar tags []string\n\tp.Description, tags = parseTags(p.Description)\n\terr := setTags(fName, ref.File, &p, tags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Don't need to carry on if we're loading our own schema.\n\tif p.CustomSchema != \"\" {\n\t\treturn &p, nil\n\t}\n\n\tpkg := ref.Package\n\tvar name *ast.Ident\n\n\tdbg(\"fieldToSchema: %v\", f.Names)\n\n\tsw := f.Type\nstart:\n\tswitch typ := sw.(type) {\n\n\t// Interface, only useful for its description.\n\tcase *ast.InterfaceType:\n\t\tif len(f.Names) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"field has no Names: %#v\", f)\n\t\t}\n\n\t\tfield := f.Names[0].Obj.Decl.(*ast.Field)\n\t\tswitch typ := field.Type.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\tpkgSel, ok := typ.X.(*ast.Ident)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"typ.X is not ast.Ident: %#v\", typ.X)\n\t\t\t}\n\t\t\tpkg = pkgSel.Name\n\t\t\tname = typ.Sel\n\n\t\t\tlookup := pkg + \".\" + name.Name\n\t\t\tif _, err := GetReference(prog, ref.Context, false, lookup, ref.File); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"GetReference: %v\", err)\n\t\t\t}\n\t\tcase *ast.Ident:\n\t\t\tname = typ\n\t\t}\n\n\t// Pointer type; we don't really care about this for now, so just read over\n\t// it.\n\tcase *ast.StarExpr:\n\t\tsw = typ.X\n\t\tgoto start\n\n\t// Simple identifiers such as \"string\", \"int\", \"MyType\", etc.\n\tcase *ast.Ident:\n\t\tmappedType, mappedFormat := MapType(prog, pkg+\".\"+typ.Name)\n\t\tif mappedType == \"\" {\n\t\t\t// Only check for canonicalType if this isn't mapped.\n\t\t\tcanon, err := canonicalType(ref.File, pkg, typ)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot get canonical type: %v\", err)\n\t\t\t}\n\t\t\tif canon != nil {\n\t\t\t\tsw = canon\n\t\t\t\tgoto start\n\t\t\t}\n\t\t}\n\t\tif mappedType != \"\" {\n\t\t\tp.Type = JSONSchemaType(mappedType)\n\t\t} else {\n\t\t\tp.Type = JSONSchemaType(typ.Name)\n\t\t}\n\t\tif mappedFormat != \"\" {\n\t\t\tp.Format = mappedFormat\n\t\t}\n\n\t\t// e.g. string, int64, etc.: don't need to look up.\n\t\tif isPrimitive(p.Type) {\n\t\t\treturn &p, nil\n\t\t}\n\n\t\tp.Type = \"\"\n\t\tname = typ\n\n\t// Anonymous struct\n\tcase *ast.StructType:\n\t\tp.Type = \"object\"\n\t\tp.Properties = map[string]*Schema{}\n\t\tfor _, f := range typ.Fields.List {\n\t\t\tpropName := goutil.TagName(f, tagName)\n\t\t\tprop, err := fieldToSchema(prog, propName, tagName, ref, f)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"anon struct: %v\", err)\n\t\t\t}\n\n\t\t\tp.Properties[propName] = prop\n\t\t}\n\n\t// An expression followed by a selector, e.g. \"pkg.foo\"\n\tcase *ast.SelectorExpr:\n\t\tpkgSel, ok := typ.X.(*ast.Ident)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"typ.X is not ast.Ident: %#v\", typ.X)\n\t\t}\n\n\t\tpkg = pkgSel.Name\n\t\tname = typ.Sel\n\n\t\tlookup := pkg + \".\" + name.Name\n\t\tt, f := MapType(prog, lookup)\n\t\tif t == \"\" {\n\t\t\t// Only check for canonicalType if this isn't mapped.\n\t\t\tcanon, err := canonicalType(ref.File, pkgSel.Name, typ.Sel)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot get canonical type: %v\", err)\n\t\t\t}\n\t\t\tif canon != nil {\n\t\t\t\tsw = canon\n\t\t\t\tgoto start\n\t\t\t}\n\t\t}\n\n\t\tp.Format = f\n\t\tif t != \"\" {\n\t\t\tp.Type = JSONSchemaType(t)\n\t\t\treturn &p, nil\n\t\t}\n\n\t\t// Deal with array.\n\t\t// TODO: don't do this inline but at the end. Reason it doesn't work not\n\t\t// is because we always use GetReference().\n\t\tts, _, importPath, err := findType(ref.File, pkg, name.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !strings.HasSuffix(importPath, pkg) { // import alias\n\t\t\tpkg = importPath\n\t\t}\n\n\t\tswitch resolvType := ts.Type.(type) {\n\t\tcase *ast.ArrayType:\n\t\t\tp.Type = \"array\"\n\t\t\terr := resolveArray(prog, ref, pkg, &p, resolvType.Elt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn &p, nil\n\t\t}\n\n\t// Maps\n\tcase *ast.MapType:\n\t\t// As far as I can find there is no obvious/elegant way to represent\n\t\t// this in JSON schema, so it's just an object.\n\t\tp.Type = \"object\"\n\t\tvtyp, vpkg, err := findTypeIdent(typ.Value, pkg)\n\t\tif err != nil {\n\t\t\t// we cannot find a mapping to a concrete type,\n\t\t\t// so we cannot define the type of the maps -> ?\n\t\t\tdbg(\"ERR FOUND MapType: %s\", err.Error())\n\t\t\treturn &p, nil\n\t\t}\n\t\tif isPrimitive(vtyp.Name) {\n\t\t\t// we are done, no need for a lookup of a custom type\n\t\t\tp.AdditionalProperties = &Schema{Type: JSONSchemaType(vtyp.Name)}\n\t\t\treturn &p, nil\n\t\t}\n\n\t\t_, lref, err := lookupTypeAndRef(ref.File, vpkg, vtyp.Name)\n\t\tif err == nil {\n\t\t\t// found additional properties\n\t\t\tp.AdditionalProperties = &Schema{Reference: lref}\n\t\t\t// Make sure the reference is added to `prog.References`:\n\t\t\t_, err := GetReference(prog, ref.Context, false, lref, ref.File)\n\t\t\tif err != nil {\n\t\t\t\tdbg(\"ERR, Could not find additionalProperties Reference: %s\", err.Error())\n\t\t\t}\n\t\t} else {\n\t\t\tdbg(\"ERR, Could not find additionalProperties: %s\", err.Error())\n\t\t}\n\t\treturn &p, nil\n\n\t// Array and slices.\n\tcase *ast.ArrayType:\n\t\tp.Type = \"array\"\n\n\t\terr := resolveArray(prog, ref, pkg, &p, typ.Elt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn &p, nil\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"fieldToSchema: unknown type: %T\", typ)\n\t}\n\n\tif name == nil {\n\t\treturn &p, nil\n\t}\n\n\t// Check if the type resolves to a Go primitive.\n\tlookup := pkg + \".\" + name.Name\n\tt, err := getTypeInfo(prog, lookup, ref.File)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif t != \"\" {\n\t\tp.Type = t\n\t\tif isPrimitive(p.Type) {\n\t\t\treturn &p, nil\n\t\t}\n\t}\n\n\tif i := strings.LastIndex(lookup, \"/\"); i > -1 {\n\t\tlookup = pkg[i+1:] + \".\" + name.Name\n\t}\n\n\tp.Description = \"\" // SwaggerHub will complain if both Description and $ref are set.\n\tp.Reference = lookup\n\n\treturn &p, nil\n}", "func generateStruct(a *AnnotationDoc, packageName string, imports []string, indent string) (string, []string) {\n\tvar allAnnotationsPackages []string\n\tpossiblePackagesForA := combinePackages(imports, []string{packageName})\n\tts, foundPackageOfA, foundImportsOfA := getAnnotationStruct(a.Name, possiblePackagesForA)\n\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, []string{foundPackageOfA})\n\tstr, _ := ts.Type.(*ast.StructType)\n\tvar b bytes.Buffer\n\tb.WriteString(indent)\n\tb.WriteString(foundPackageOfA)\n\tb.WriteString(\".\")\n\tb.WriteString(a.Name)\n\tb.WriteString(\"{\\n\")\n\tchildIndent := indent + \" \"\n\tfor _, f := range str.Fields.List {\n\t\tfieldName := getFieldName(f)\n\t\tdefValue := getDefaultValue(f)\n\t\tfieldKey := fieldName\n\t\t// consider special case when only default parameter is specified\n\t\tif len(str.Fields.List) == 1 && len(a.Content) == 1 {\n\t\t\tfor key := range a.Content {\n\t\t\t\tif key == DEFAULT_PARAM {\n\t\t\t\t\tfieldKey = DEFAULT_PARAM\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvalue, found := a.Content[fieldKey]\n\t\tif found {\n\t\t\tswitch t := value.(type) {\n\t\t\tcase string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getLiteral(f.Type, t, false))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tcase []string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getFieldConstructor(f.Type))\n\t\t\t\tb.WriteString(\"\\n\")\n\t\t\t\tfor _, elem := range t {\n\t\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\t\tb.WriteString(elem)\n\t\t\t\t\tb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"}\")\n\t\t\tcase []AnnotationDoc:\n\t\t\t\t// calculate array's elements\n\t\t\t\tvar bb bytes.Buffer\n\t\t\t\tfor _, sa := range t {\n\t\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&sa, foundPackageOfA, foundImportsOfA, childIndent+\" \")\n\t\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\t\tbb.WriteString(childCode)\n\t\t\t\t\tbb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\t// insert array initialzer of child annotation type\n\t\t\t\ts := writeArrayInitializer(&b, bb.String())\n\t\t\t\t// append array of child annotations\n\t\t\t\tb.WriteString(\"{\\n\")\n\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\tb.WriteString(s)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"},\\n\")\n\t\t\tcase AnnotationDoc:\n\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&t, foundPackageOfA, foundImportsOfA, childIndent)\n\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tif isOptional(f.Type) {\n\t\t\t\t\tb.WriteString(\"&\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(strings.TrimLeft(childCode, \" \"))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unexpected annotation value type\")\n\t\t\t}\n\t\t} else {\n\t\t\tb.WriteString(childIndent)\n\t\t\tb.WriteString(defValue)\n\t\t\tb.WriteString(\",\\n\")\n\t\t}\n\t}\n\tb.WriteString(indent)\n\tb.WriteString(\"}\")\n\treturn b.String(), allAnnotationsPackages\n}", "func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {\n\tvar typ reflect.Type\n\tvar kind reflect.Kind\n\n\tcurrent, kind, v.fldIsPointer = v.extractTypeInternal(current, false)\n\n\tswitch kind {\n\tcase reflect.Ptr, reflect.Interface, reflect.Invalid:\n\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.hasTag {\n\t\t\tif kind == reflect.Invalid {\n\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t} else {\n\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t}\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tv.str1 = string(append(ns, cf.altName...))\n\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t} else {\n\t\t\t\tv.str2 = v.str1\n\t\t\t}\n\t\t\tif !ct.runValidationWhenNil {\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\ttyp: current.Type(),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\tcase reflect.Struct:\n\n\t\ttyp = current.Type()\n\n\t\tif !typ.ConvertibleTo(timeType) {\n\n\t\t\tif ct != nil {\n\n\t\t\t\tif ct.typeof == typeStructOnly {\n\t\t\t\t\tgoto CONTINUE\n\t\t\t\t} else if ct.typeof == typeIsDefault {\n\t\t\t\t\t// set Field Level fields\n\t\t\t\t\tv.slflParent = parent\n\t\t\t\t\tv.flField = current\n\t\t\t\t\tv.cf = cf\n\t\t\t\t\tv.ct = ct\n\n\t\t\t\t\tif !ct.fn(ctx, v) {\n\t\t\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\n\t\t\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\t\tif ct != nil && ct.typeof == typeNoStructLevel {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tCONTINUE:\n\t\t\t// if len == 0 then validating using 'Var' or 'VarWithValue'\n\t\t\t// Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...\n\t\t\t// VarWithField - this allows for validating against each field within the struct against a specific value\n\t\t\t// pretty handy in certain situations\n\t\t\tif len(cf.name) > 0 {\n\t\t\t\tns = append(append(ns, cf.altName...), '.')\n\t\t\t\tstructNs = append(append(structNs, cf.name...), '.')\n\t\t\t}\n\n\t\t\tv.validateStruct(ctx, parent, current, typ, ns, structNs, ct)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif ct == nil || !ct.hasTag {\n\t\treturn\n\t}\n\n\ttyp = current.Type()\n\nOUTER:\n\tfor {\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tswitch ct.typeof {\n\n\t\tcase typeOmitEmpty:\n\n\t\t\t// set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.cf = cf\n\t\t\tv.ct = ct\n\n\t\t\tif !hasValue(v) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tct = ct.next\n\t\t\tcontinue\n\n\t\tcase typeEndKeys:\n\t\t\treturn\n\n\t\tcase typeDive:\n\n\t\t\tct = ct.next\n\n\t\t\t// traverse slice or map here\n\t\t\t// or panic ;)\n\t\t\tswitch kind {\n\t\t\tcase reflect.Slice, reflect.Array:\n\n\t\t\t\tvar i64 int64\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor i := 0; i < current.Len(); i++ {\n\n\t\t\t\t\ti64 = int64(i)\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\t\t\t\t\tv.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)\n\t\t\t\t}\n\n\t\t\tcase reflect.Map:\n\n\t\t\t\tvar pv string\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor _, key := range current.MapKeys() {\n\n\t\t\t\t\tpv = fmt.Sprintf(\"%v\", key.Interface())\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\n\t\t\t\t\tif ct != nil && ct.typeof == typeKeys && ct.keys != nil {\n\t\t\t\t\t\tv.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)\n\t\t\t\t\t\t// can be nil when just keys being validated\n\t\t\t\t\t\tif ct.next != nil {\n\t\t\t\t\t\t\tv.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\t// throw error, if not a slice or map then should not have gotten here\n\t\t\t\t// bad dive tag\n\t\t\t\tpanic(\"dive error! can't dive on a non slice or map\")\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase typeOr:\n\n\t\t\tv.misc = v.misc[0:0]\n\n\t\t\tfor {\n\n\t\t\t\t// set Field Level fields\n\t\t\t\tv.slflParent = parent\n\t\t\t\tv.flField = current\n\t\t\t\tv.cf = cf\n\t\t\t\tv.ct = ct\n\n\t\t\t\tif ct.fn(ctx, v) {\n\t\t\t\t\tif ct.isBlockEnd {\n\t\t\t\t\t\tct = ct.next\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\n\t\t\t\t\t// drain rest of the 'or' values, then continue or leave\n\t\t\t\t\tfor {\n\n\t\t\t\t\t\tct = ct.next\n\n\t\t\t\t\t\tif ct == nil {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif ct.typeof != typeOr {\n\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif ct.isBlockEnd {\n\t\t\t\t\t\t\tct = ct.next\n\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tv.misc = append(v.misc, '|')\n\t\t\t\tv.misc = append(v.misc, ct.tag...)\n\n\t\t\t\tif ct.hasParam {\n\t\t\t\t\tv.misc = append(v.misc, '=')\n\t\t\t\t\tv.misc = append(v.misc, ct.param...)\n\t\t\t\t}\n\n\t\t\t\tif ct.isBlockEnd || ct.next == nil {\n\t\t\t\t\t// if we get here, no valid 'or' value and no more tags\n\t\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\n\t\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t\t}\n\n\t\t\t\t\tif ct.hasAlias {\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\t\t\tactualTag: ct.actualAliasTag,\n\t\t\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\ttVal := string(v.misc)[1:]\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\t\t\ttag: tVal,\n\t\t\t\t\t\t\t\tactualTag: tVal,\n\t\t\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\tdefault:\n\n\t\t\t// set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.cf = cf\n\t\t\tv.ct = ct\n\n\t\t\tif !ct.fn(ctx, v) {\n\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\n\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t} else {\n\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t}\n\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\treturn\n\t\t\t}\n\t\t\tct = ct.next\n\t\t}\n\t}\n\n}", "func (m *ParentLabelDetails) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"color\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetColor(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"id\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetId(val)\n }\n return nil\n }\n res[\"isActive\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsActive(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"parent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateParentLabelDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetParent(val.(ParentLabelDetailsable))\n }\n return nil\n }\n res[\"sensitivity\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSensitivity(val)\n }\n return nil\n }\n res[\"tooltip\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTooltip(val)\n }\n return nil\n }\n return res\n}", "func (m *EdiscoverySearch) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Search.GetFieldDeserializers()\n res[\"additionalSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetAdditionalSources)\n res[\"addToReviewSetOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryAddToReviewSetOperationFromDiscriminatorValue , m.SetAddToReviewSetOperation)\n res[\"custodianSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetCustodianSources)\n res[\"dataSourceScopes\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseDataSourceScopes , m.SetDataSourceScopes)\n res[\"lastEstimateStatisticsOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryEstimateOperationFromDiscriminatorValue , m.SetLastEstimateStatisticsOperation)\n res[\"noncustodialSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateEdiscoveryNoncustodialDataSourceFromDiscriminatorValue , m.SetNoncustodialSources)\n return res\n}", "func VisitField(nodes []Node, field string, callback func(value string, negated bool, annotation Annotation)) {\n\tvisitor := &FieldVisitor{callback: callback, field: field}\n\tvisitor.VisitNodes(visitor, nodes)\n}", "func (w *State) GenerateFlowField(destination DestinationID) error {\n\tlog.Println(\"find shorted path\")\n\tFindShortestPath(w, destination)\n\tlog.Println(\"compute directions\")\n\tw.computeDirections(destination)\n\n\treturn nil\n\n}", "func (x *fastReflection_EventCreateBatch) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.class_id\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.batch_denom\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.issuer\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.total_amount\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.start_date\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.end_date\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.project_location\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: regen.ecocredit.v1alpha1.EventCreateBatch\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message regen.ecocredit.v1alpha1.EventCreateBatch does not contain field %s\", fd.FullName()))\n\t}\n}", "func (m *ProgramControl) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"controlId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlId(val)\n }\n return nil\n }\n res[\"controlTypeId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlTypeId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"owner\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateUserIdentityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOwner(val.(UserIdentityable))\n }\n return nil\n }\n res[\"program\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgram(val.(Programable))\n }\n return nil\n }\n res[\"programId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgramId(val)\n }\n return nil\n }\n res[\"resource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramResourceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetResource(val.(ProgramResourceable))\n }\n return nil\n }\n res[\"status\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetStatus(val)\n }\n return nil\n }\n return res\n}", "func (m *BusinessScenarioPlanner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"planConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerPlanConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlanConfiguration(val.(PlannerPlanConfigurationable))\n }\n return nil\n }\n res[\"taskConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerTaskConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTaskConfiguration(val.(PlannerTaskConfigurationable))\n }\n return nil\n }\n res[\"tasks\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioTaskFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioTaskable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioTaskable)\n }\n }\n m.SetTasks(res)\n }\n return nil\n }\n return res\n}", "func (op *metadataLookup) field(parentFunc *stmt.CallExpr, expr stmt.Expr) {\n\tif op.err != nil {\n\t\treturn\n\t}\n\tswitch e := expr.(type) {\n\tcase *stmt.SelectItem:\n\t\top.field(nil, e.Expr)\n\tcase *stmt.CallExpr:\n\t\tif e.FuncType == function.Quantile {\n\t\t\top.planHistogramFields(e)\n\t\t\treturn\n\t\t}\n\t\tfor _, param := range e.Params {\n\t\t\top.field(e, param)\n\t\t}\n\tcase *stmt.ParenExpr:\n\t\top.field(nil, e.Expr)\n\tcase *stmt.BinaryExpr:\n\t\top.field(nil, e.Left)\n\t\top.field(nil, e.Right)\n\tcase *stmt.FieldExpr:\n\t\tqueryStmt := op.executeCtx.Query\n\t\tfieldMeta, err := op.metadata.GetField(queryStmt.Namespace, queryStmt.MetricName, field.Name(e.Name))\n\t\tif err != nil {\n\t\t\top.err = err\n\t\t\treturn\n\t\t}\n\n\t\top.planField(parentFunc, fieldMeta)\n\t}\n}", "func JsonFieldWithDefaultGenerator() gopter.Gen {\n\tif jsonFieldWithDefaultGenerator != nil {\n\t\treturn jsonFieldWithDefaultGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonFieldWithDefault(generators)\n\tjsonFieldWithDefaultGenerator = gen.Struct(reflect.TypeOf(JsonFieldWithDefault{}), generators)\n\n\treturn jsonFieldWithDefaultGenerator\n}", "func (m *TemplateParameter) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"jsonAllowedValues\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonAllowedValues(val)\n }\n return nil\n }\n res[\"jsonDefaultValue\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonDefaultValue(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"valueType\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementParameterValueType)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetValueType(val.(*ManagementParameterValueType))\n }\n return nil\n }\n return res\n}", "func yangListFieldToGoType(listField *ygen.NodeDetails, listFieldName string, parent *ygen.ParsedDirectory, goStructElements map[string]*ygen.ParsedDirectory, generateOrderedMaps bool) (string, *generatedGoMultiKeyListStruct, *generatedGoListMethod, *generatedOrderedMapStruct, error) {\n\t// The list itself, since it is a container, has a struct associated with it. Retrieve\n\t// this from the set of Directory structs for which code (a Go struct) will be\n\t// generated such that additional details can be used in the code generation.\n\tlistElem, ok := goStructElements[listField.YANGDetails.Path]\n\tif !ok {\n\t\treturn \"\", nil, nil, nil, fmt.Errorf(\"struct for %s did not exist\", listField.YANGDetails.Path)\n\t}\n\n\tif len(listElem.ListKeys) == 0 {\n\t\t// Keyless list therefore represent this as a slice of pointers to\n\t\t// the struct that represents the list element itself.\n\t\treturn fmt.Sprintf(\"[]*%s\", listElem.Name), nil, nil, nil, nil\n\t}\n\n\tlistType, keyType, _, err := UnorderedMapTypeName(listField.YANGDetails.Path, listFieldName, parent.Name, goStructElements)\n\tif err != nil {\n\t\treturn \"\", nil, nil, nil, err\n\t}\n\tvar multiListKey *generatedGoMultiKeyListStruct\n\tvar listKeys []goStructField\n\n\tshortestPath := func(ss [][]string) [][]string {\n\t\tvar shortest []string\n\t\tfor _, s := range ss {\n\t\t\tif shortest == nil {\n\t\t\t\tshortest = s\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(s) < len(shortest) {\n\t\t\t\tshortest = s\n\t\t\t}\n\t\t}\n\t\treturn [][]string{shortest}\n\t}\n\n\tusedKeyElemNames := make(map[string]bool)\n\tfor _, keName := range listElem.ListKeyYANGNames {\n\t\tkeyType, ok := listElem.Fields[keName]\n\t\tif !ok {\n\t\t\treturn \"\", nil, nil, nil, fmt.Errorf(\"did not find type for key %s\", keName)\n\t\t}\n\n\t\tkeyField := goStructField{\n\t\t\tYANGName: keName,\n\t\t\tName: genutil.MakeNameUnique(listElem.ListKeys[keName].Name, usedKeyElemNames),\n\t\t\tType: listElem.ListKeys[keName].LangType.NativeType,\n\t\t\t// The shortest mapped path for a list key must be the path to the key.\n\t\t\tTags: mappedPathTag(shortestPath(keyType.MappedPaths), \"\"),\n\t\t}\n\t\tkeyField.IsScalarField = IsScalarField(keyType)\n\t\tlistKeys = append(listKeys, keyField)\n\t}\n\n\tswitch {\n\tcase len(listElem.ListKeys) != 1:\n\t\t// This is a list with multiple keys, so we need to generate a new structure\n\t\t// that represents the list key itself - this struct is described in a\n\t\t// generatedGoMultiKeyListStruct struct, which is then expanded by a template to the struct\n\t\t// definition.\n\t\tmultiListKey = &generatedGoMultiKeyListStruct{\n\t\t\tKeyStructName: keyType,\n\t\t\tParentPath: parent.Path,\n\t\t\tListName: listFieldName,\n\t\t\tKeys: listKeys,\n\t\t}\n\t}\n\n\tvar listMethodSpec *generatedGoListMethod\n\tvar orderedMapSpec *generatedOrderedMapStruct\n\n\tif listField.YANGDetails.OrderedByUser && generateOrderedMaps {\n\t\tstructName := OrderedMapTypeName(listElem.Name)\n\t\tlistType = fmt.Sprintf(\"*%s\", structName)\n\t\t// Create spec for generating ordered maps.\n\t\torderedMapSpec = &generatedOrderedMapStruct{\n\t\t\tStructName: structName,\n\t\t\tKeyName: keyType,\n\t\t\tListTypeName: listElem.Name,\n\t\t\tListFieldName: listFieldName,\n\t\t\tKeys: listKeys,\n\t\t\tParentStructName: parent.Name,\n\t\t\tYANGPath: listField.YANGDetails.Path,\n\t\t}\n\t} else {\n\t\t// Generate the specification for the methods that should be generated for this\n\t\t// list, such that this can be handed to the relevant templates to generate code.\n\t\tlistMethodSpec = &generatedGoListMethod{\n\t\t\tListName: listFieldName,\n\t\t\tListType: listElem.Name,\n\t\t\tKeys: listKeys,\n\t\t\tReceiver: parent.Name,\n\t\t}\n\t\tif multiListKey != nil {\n\t\t\tlistMethodSpec.KeyStruct = keyType\n\t\t}\n\t}\n\n\treturn listType, multiListKey, listMethodSpec, orderedMapSpec, nil\n}", "func (b *PlanBuilder) buildProjectionField(ctx context.Context, p LogicalPlan, field *ast.SelectField, expr expression.Expression) (*expression.Column, *types.FieldName, error) {\n\tvar origTblName, tblName, colName, dbName parser_model.CIStr\n\tinnerNode := getInnerFromParenthesesAndUnaryPlus(field.Expr)\n\tcol, isCol := expr.(*expression.Column)\n\t// Correlated column won't affect the final output names. So we can put it in any of the three logic block.\n\t// Don't put it into the first block just for simplifying the codes.\n\tif colNameField, ok := innerNode.(*ast.ColumnNameExpr); ok && isCol {\n\t\t// Field is a column reference.\n\t\tidx := p.Schema().ColumnIndex(col)\n\t\tvar name *types.FieldName\n\t\t// The column maybe the one from join's redundant part.\n\t\t// TODO: Fully support USING/NATURAL JOIN, refactor here.\n\t\tif idx != -1 {\n\t\t\tname = p.OutputNames()[idx]\n\t\t}\n\t\tcolName, _, tblName, origTblName, dbName = b.buildProjectionFieldNameFromColumns(field, colNameField, name)\n\t} else if field.AsName.L != \"\" {\n\t\t// Field has alias.\n\t\tcolName = field.AsName\n\t} else {\n\t\t// Other: field is an expression.\n\t\tvar err error\n\t\tif colName, err = b.buildProjectionFieldNameFromExpressions(ctx, field); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\tname := &types.FieldName{\n\t\tTblName: tblName,\n\t\tOrigTblName: origTblName,\n\t\tColName: colName,\n\t\tOrigColName: colName,\n\t\tDBName: dbName,\n\t}\n\tif isCol {\n\t\treturn col, name, nil\n\t}\n\tnewCol := &expression.Column{\n\t\tUniqueID: b.ctx.GetSessionVars().AllocPlanColumnID(),\n\t\tRetType: expr.GetType(),\n\t\tOrigName: colName.L,\n\t}\n\treturn newCol, name, nil\n}", "func (node *selfNode) packToStructByFieldName(st reflect.Value) (err error) {\n\n\tnodeName := node.head.String()\n\tfor _, n := range node.values {\n\t\tif _, ok := n.(*selfNode); !ok {\n\t\t\treturn n.newPackError(\"field `\" + nodeName + \"` should be only made of lists\")\n\t\t}\n\t\tvalueNode := n.(*selfNode)\n\t\tfieldName := publicName(valueNode.head.String())\n\t\ttargetField := st.FieldByName(fieldName)\n\t\tif !targetField.IsValid() {\n\t\t\treturn valueNode.newPackError(\"undefined field `\" + fieldName + \"` for node `\" + nodeName + \"`\")\n\t\t}\n\n\t\tif err = valueNode.packIntoField(fieldName, targetField); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn nil\n}", "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// FieldConfigArgument{\n\t// \"style\": &ArgumentConfig{ ... }\n\t// },\n\t//\n\treturn jen.Qual(defsPkg, \"FieldConfigArgument\").Values(\n\t\tjen.DictFunc(func(d jen.Dict) {\n\t\t\tfor _, arg := range args {\n\t\t\t\td[jen.Lit(arg.Name.Value)] = genArgument(arg)\n\t\t\t}\n\t\t}),\n\t)\n}", "func (*Base) ObjectField(p ASTPass, field *ast.ObjectField, ctx Context) {\n\tswitch field.Kind {\n\tcase ast.ObjectLocal:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.Fodder(p, &field.Fodder2, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectFieldID:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectFieldStr:\n\t\tp.Visit(p, &field.Expr1, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectFieldExpr:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.Visit(p, &field.Expr1, ctx)\n\t\tp.Fodder(p, &field.Fodder2, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectAssert:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\t\tif field.Expr3 != nil {\n\t\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\t\tp.Visit(p, &field.Expr3, ctx)\n\t\t}\n\t}\n\n\tp.Fodder(p, &field.CommaFodder, ctx)\n}", "func Struct(rt reflect.Type, gens map[string]gopter.Gen) gopter.Gen {\n\tif rt.Kind() == reflect.Ptr {\n\t\trt = rt.Elem()\n\t}\n\tif rt.Kind() != reflect.Struct {\n\t\treturn Fail(rt)\n\t}\n\tfieldGens := []gopter.Gen{}\n\tfieldTypes := []reflect.Type{}\n\tassignable := reflect.New(rt).Elem()\n\tfor i := 0; i < rt.NumField(); i++ {\n\t\tfieldName := rt.Field(i).Name\n\t\tif !assignable.Field(i).CanSet() {\n\t\t\tcontinue\n\t\t}\n\n\t\tgen := gens[fieldName]\n\t\tif gen != nil {\n\t\t\tfieldGens = append(fieldGens, gen)\n\t\t\tfieldTypes = append(fieldTypes, rt.Field(i).Type)\n\t\t}\n\t}\n\n\tbuildStructType := reflect.FuncOf(fieldTypes, []reflect.Type{rt}, false)\n\tunbuildStructType := reflect.FuncOf([]reflect.Type{rt}, fieldTypes, false)\n\n\tbuildStructFunc := reflect.MakeFunc(buildStructType, func(args []reflect.Value) []reflect.Value {\n\t\tresult := reflect.New(rt)\n\t\tfor i := 0; i < rt.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult.Elem().Field(i).Set(args[0])\n\t\t\targs = args[1:]\n\t\t}\n\t\treturn []reflect.Value{result.Elem()}\n\t})\n\tunbuildStructFunc := reflect.MakeFunc(unbuildStructType, func(args []reflect.Value) []reflect.Value {\n\t\ts := args[0]\n\t\tresults := []reflect.Value{}\n\t\tfor i := 0; i < s.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresults = append(results, s.Field(i))\n\t\t}\n\t\treturn results\n\t})\n\n\treturn gopter.DeriveGen(\n\t\tbuildStructFunc.Interface(),\n\t\tunbuildStructFunc.Interface(),\n\t\tfieldGens...,\n\t)\n}", "func (m *DeviceConfigurationAssignment) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"intent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceConfigAssignmentIntent)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIntent(val.(*DeviceConfigAssignmentIntent))\n }\n return nil\n }\n res[\"source\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceAndAppManagementAssignmentSource)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSource(val.(*DeviceAndAppManagementAssignmentSource))\n }\n return nil\n }\n res[\"sourceId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSourceId(val)\n }\n return nil\n }\n res[\"target\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceAndAppManagementAssignmentTargetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTarget(val.(DeviceAndAppManagementAssignmentTargetable))\n }\n return nil\n }\n return res\n}", "func generate(copyrights string, collector *collector, templateBuilder templateBuilder) {\n\tfor _, pkg := range collector.Packages {\n\t\tfileTemplate := fileTpl{\n\t\t\tCopyright: copyrights,\n\n\t\t\tStandardImports: []string{\n\t\t\t\t\"fmt\",\n\t\t\t\t\"unicode\",\n\t\t\t\t\"unicode/utf8\",\n\t\t\t},\n\n\t\t\tCustomImports: []string{\n\t\t\t\t\"github.com/google/uuid\",\n\t\t\t},\n\t\t}\n\t\tfor _, f := range pkg.Files {\n\t\t\tfor _, d := range f.Decls {\n\t\t\t\tg, ok := d.(*ast.GenDecl)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstructs := structSearch(g)\n\t\t\t\tif len(structs) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, s := range structs {\n\t\t\t\t\tatLeastOneField := false\n\n\t\t\t\t\tfor _, field := range s.Type.Fields.List {\n\n\t\t\t\t\t\tpos := collector.FileSet.Position(field.Type.Pos())\n\t\t\t\t\t\ttyp := collector.Info.TypeOf(field.Type)\n\n\t\t\t\t\t\tcomposedType := \"\"\n\t\t\t\t\t\tbaseName := getType(typ, &composedType)\n\t\t\t\t\t\tfmt.Println(\"Add validation: \", pos, \": \", baseName, \"/\", composedType)\n\n\t\t\t\t\t\tif err := templateBuilder.generateCheck(field, s.Name, baseName, composedType); err != nil {\n\t\t\t\t\t\t\tfmt.Printf(\"struct %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tatLeastOneField = true\n\t\t\t\t\t}\n\n\t\t\t\t\tif !atLeastOneField {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\terr := templateBuilder.generateMethod(s.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"struct gen %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfileTemplate.Package = pkg.Name\n\t\terr := templateBuilder.generateFile(pkg.Path, fileTemplate)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Generation error\", err)\n\t\t}\n\t}\n}", "func (p *Planner) configureObjectFieldSource(upstreamFieldRef, downstreamFieldRef int, fieldConfiguration plan.FieldConfiguration, argumentConfiguration plan.ArgumentConfiguration) {\n\tif len(argumentConfiguration.SourcePath) < 1 {\n\t\treturn\n\t}\n\n\tfieldName := p.visitor.Operation.FieldNameUnsafeString(downstreamFieldRef)\n\n\tif len(fieldConfiguration.Path) == 1 {\n\t\tfieldName = fieldConfiguration.Path[0]\n\t}\n\n\tqueryTypeDefinition, exists := p.visitor.Definition.Index.FirstNodeByNameBytes(p.visitor.Definition.Index.QueryTypeName)\n\tif !exists {\n\t\treturn\n\t}\n\targumentDefinition := p.visitor.Definition.NodeFieldDefinitionArgumentDefinitionByName(queryTypeDefinition, []byte(fieldName), []byte(argumentConfiguration.Name))\n\tif argumentDefinition == -1 {\n\t\treturn\n\t}\n\n\targumentType := p.visitor.Definition.InputValueDefinitionType(argumentDefinition)\n\tvariableName := p.upstreamOperation.GenerateUnusedVariableDefinitionName(p.nodes[0].Ref)\n\tvariableValue, argument := p.upstreamOperation.AddVariableValueArgument([]byte(argumentConfiguration.Name), variableName)\n\tp.upstreamOperation.AddArgumentToField(upstreamFieldRef, argument)\n\timportedType := p.visitor.Importer.ImportType(argumentType, p.visitor.Definition, p.upstreamOperation)\n\tp.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValue, importedType)\n\n\tobjectVariableName, exists := p.variables.AddVariable(&resolve.ObjectVariable{\n\t\tPath: argumentConfiguration.SourcePath,\n\t\tRenderAsGraphQLValue: true,\n\t})\n\tif !exists {\n\t\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, string(variableName), []byte(objectVariableName))\n\t}\n}", "func (s *BasePlSqlParserListener) EnterField_spec(ctx *Field_specContext) {}", "func (c *TypeConverter) GenStructConverter(\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n) error {\n\t// Add compiled FieldSpecs to the FieldMapperEntry\n\tfieldMap = addSpecToMap(fieldMap, fromFields, \"\")\n\t// Check for vlaues not populated recursively by addSpecToMap\n\tfor k, v := range fieldMap {\n\t\tif fieldMap[k].Field == nil {\n\t\t\treturn errors.Errorf(\n\t\t\t\t\"Failed to find field ( %s ) for transform.\",\n\t\t\t\tv.QualifiedName,\n\t\t\t)\n\t\t}\n\t}\n\n\tc.useRecurGen = c.isRecursiveStruct(toFields) || c.isRecursiveStruct(fromFields)\n\n\tif c.useRecurGen && len(fieldMap) != 0 {\n\t\tc.append(\"inOriginal := in; _ = inOriginal\")\n\t\tc.append(\"outOriginal := out; _ = outOriginal\")\n\t}\n\n\terr := c.genStructConverter(\"\", \"\", \"\", fromFields, toFields, fieldMap, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Core) generate(tab Table) (string, error) {\n\tref := []reflect.StructField{}\n\tfor _, col := range tab.Columns {\n\t\tv := reflect.StructField{\n\t\t\tName: strings.Title(col.Name),\n\t\t}\n\t\tif col.Annotations != \"\" {\n\t\t\tv.Tag = reflect.StructTag(col.Annotations)\n\t\t}\n\t\tswitch col.Type {\n\t\tcase \"float\":\n\t\t\tv.Type = reflect.TypeOf(float64(0))\n\t\tcase \"varchar\":\n\t\t\tv.Type = reflect.TypeOf(string(\"\"))\n\t\tcase \"integer\", \"int\", \"tinyint\":\n\t\t\tv.Type = reflect.TypeOf(int(0))\n\t\tcase \"bigint\":\n\t\t\tv.Type = reflect.TypeOf(int64(0))\n\t\tcase \"timestamp\":\n\t\t\tv.Type = reflect.TypeOf(time.Time{})\n\t\t}\n\t\tref = append(ref, v)\n\t}\n\treturn fmt.Sprintf(\"type %s %s\", strings.Title(tab.Name), reflect.StructOf(ref).String()), nil\n}", "func GenerateGoCode(preamble string, mainDefAddr string, includeDirectories []string, generate_tests bool) error {\n\n\toutDefs, version, err := XMLToFields(mainDefAddr, includeDirectories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// merge enums together\n\tenums := make(map[string]*OutEnum)\n\tfor _, def := range outDefs {\n\t\tfor _, defEnum := range def.Enums {\n\t\t\tif _, ok := enums[defEnum.Name]; !ok {\n\t\t\t\tenums[defEnum.Name] = &OutEnum{\n\t\t\t\t\tName: defEnum.Name,\n\t\t\t\t\tDescription: defEnum.Description,\n\t\t\t\t}\n\t\t\t}\n\t\t\tenum := enums[defEnum.Name]\n\n\t\t\tfor _, v := range defEnum.Values {\n\t\t\t\tenum.Values = append(enum.Values, v)\n\t\t\t}\n\t\t}\n\t}\n\n\t// fill enum missing values\n\tfor _, enum := range enums {\n\t\tnextVal := 0\n\t\tfor _, v := range enum.Values {\n\t\t\tif v.Value != \"\" {\n\t\t\t\tnextVal, _ = strconv.Atoi(v.Value)\n\t\t\t\tnextVal++\n\t\t\t} else {\n\t\t\t\tv.Value = strconv.Itoa(nextVal)\n\t\t\t\tnextVal++\n\t\t\t}\n\t\t}\n\t}\n\n\t// get package name\n\t// remove underscores since they can lead to errors\n\t// (for instance, when package name ends with _test)\n\t_, inFile := filepath.Split(mainDefAddr)\n\tpkgName := strings.TrimSuffix(inFile, \".xml\")\n\n\t// dump\n\tif generate_tests {\n\t\treturn tplDialectTest.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t} else {\n\t\treturn tplDialect.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t}\n}", "func (x *fastReflection_ModuleOptions) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.autocli.v1.ModuleOptions.tx\":\n\t\tm := new(ServiceCommandDescriptor)\n\t\treturn protoreflect.ValueOfMessage(m.ProtoReflect())\n\tcase \"cosmos.autocli.v1.ModuleOptions.query\":\n\t\tm := new(ServiceCommandDescriptor)\n\t\treturn protoreflect.ValueOfMessage(m.ProtoReflect())\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.autocli.v1.ModuleOptions\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.autocli.v1.ModuleOptions does not contain field %s\", fd.FullName()))\n\t}\n}", "func (m *Directory) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"administrativeUnits\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAdministrativeUnitFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AdministrativeUnitable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AdministrativeUnitable)\n }\n }\n m.SetAdministrativeUnits(res)\n }\n return nil\n }\n res[\"attributeSets\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAttributeSetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AttributeSetable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AttributeSetable)\n }\n }\n m.SetAttributeSets(res)\n }\n return nil\n }\n res[\"customSecurityAttributeDefinitions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateCustomSecurityAttributeDefinitionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]CustomSecurityAttributeDefinitionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(CustomSecurityAttributeDefinitionable)\n }\n }\n m.SetCustomSecurityAttributeDefinitions(res)\n }\n return nil\n }\n res[\"deletedItems\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDirectoryObjectFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DirectoryObjectable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DirectoryObjectable)\n }\n }\n m.SetDeletedItems(res)\n }\n return nil\n }\n res[\"federationConfigurations\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateIdentityProviderBaseFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]IdentityProviderBaseable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(IdentityProviderBaseable)\n }\n }\n m.SetFederationConfigurations(res)\n }\n return nil\n }\n res[\"onPremisesSynchronization\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateOnPremisesDirectorySynchronizationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]OnPremisesDirectorySynchronizationable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(OnPremisesDirectorySynchronizationable)\n }\n }\n m.SetOnPremisesSynchronization(res)\n }\n return nil\n }\n return res\n}", "func printStructField(t *reflect.Type) {\n fieldNum := (*t).NumField()\n for i := 0; i < fieldNum; i++ {\n fmt.Printf(\"conf's field: %s\\n\", (*t).Field(i).Name)\n }\n fmt.Println(\"\")\n}", "func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interface{}, prefix, aliasPrefix, event string, iterator *common.StructField, dejavu map[string]bool) {\n\tif verbose {\n\t\tfmt.Printf(\"handleSpec spec: %+v, prefix: %s, aliasPrefix %s, event %s, iterator %+v\\n\", spec, prefix, aliasPrefix, event, iterator)\n\t}\n\n\tvar typeSpec *ast.TypeSpec\n\tvar structType *ast.StructType\n\tvar ok bool\n\tif typeSpec, ok = spec.(*ast.TypeSpec); !ok {\n\t\treturn\n\t}\n\tif structType, ok = typeSpec.Type.(*ast.StructType); !ok {\n\t\tlog.Printf(\"Don't know what to do with %s (%s)\", typeSpec.Name, spew.Sdump(typeSpec))\n\t\treturn\n\t}\n\n\tfor _, field := range structType.Fields.List {\n\t\tfieldCommentText := field.Comment.Text()\n\t\tfieldIterator := iterator\n\n\t\tvar tag reflect.StructTag\n\t\tif field.Tag != nil {\n\t\t\ttag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1])\n\t\t}\n\n\t\tif e, ok := tag.Lookup(\"event\"); ok {\n\t\t\tevent = e\n\t\t\tif _, ok = module.EventTypes[e]; !ok {\n\t\t\t\tmodule.EventTypes[e] = common.NewEventTypeMetada()\n\t\t\t\tdejavu = make(map[string]bool) // clear dejavu map when it's a new event type\n\t\t\t}\n\t\t\tif e != \"*\" {\n\t\t\t\tmodule.EventTypes[e].Doc = fieldCommentText\n\t\t\t}\n\t\t}\n\n\t\tif isEmbedded := len(field.Names) == 0; isEmbedded {\n\t\t\tif fieldTag, found := tag.Lookup(\"field\"); found && fieldTag == \"-\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tident, _ := field.Type.(*ast.Ident)\n\t\t\tif ident == nil {\n\t\t\t\tif starExpr, ok := field.Type.(*ast.StarExpr); ok {\n\t\t\t\t\tident, _ = starExpr.X.(*ast.Ident)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ident != nil {\n\t\t\t\tname := ident.Name\n\t\t\t\tif prefix != \"\" {\n\t\t\t\t\tname = prefix + \".\" + ident.Name\n\t\t\t\t}\n\n\t\t\t\tembedded := astFiles.LookupSymbol(ident.Name)\n\t\t\t\tif embedded != nil {\n\t\t\t\t\thandleEmbedded(module, ident.Name, prefix, event, field.Type)\n\t\t\t\t\thandleSpecRecursive(module, astFiles, embedded.Decl, name, aliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", ident.Name, pkgname)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfieldBasename := field.Names[0].Name\n\t\t\tif !unicode.IsUpper(rune(fieldBasename[0])) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif dejavu[fieldBasename] {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar opOverrides string\n\t\t\tvar fields []seclField\n\t\t\tif tags, err := structtag.Parse(string(tag)); err == nil && len(tags.Tags()) != 0 {\n\t\t\t\topOverrides, fields = parseTags(tags, typeSpec.Name.Name)\n\n\t\t\t\tif opOverrides == \"\" && fields == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tfields = append(fields, seclField{name: fieldBasename})\n\t\t\t}\n\n\t\t\tfieldType, isPointer, isArray := getFieldIdentName(field.Type)\n\n\t\t\tprefixedFieldName := fieldBasename\n\t\t\tif prefix != \"\" {\n\t\t\t\tprefixedFieldName = fmt.Sprintf(\"%s.%s\", prefix, fieldBasename)\n\t\t\t}\n\n\t\t\tfor _, seclField := range fields {\n\t\t\t\thandleNonEmbedded(module, seclField, prefixedFieldName, event, fieldType, isPointer, isArray)\n\n\t\t\t\tif seclFieldIterator := seclField.iterator; seclFieldIterator != \"\" {\n\t\t\t\t\tfieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, event, fieldCommentText, opOverrides, isPointer, isArray)\n\t\t\t\t}\n\n\t\t\t\tif handler := seclField.handler; handler != \"\" {\n\n\t\t\t\t\thandleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, event, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator)\n\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif verbose {\n\t\t\t\t\tlog.Printf(\"Don't know what to do with %s: %s\", fieldBasename, spew.Sdump(field.Type))\n\t\t\t\t}\n\n\t\t\t\tdejavu[fieldBasename] = true\n\n\t\t\t\tif len(fieldType) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\talias := seclField.name\n\t\t\t\tif isBasicType(fieldType) {\n\t\t\t\t\thandleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, event, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray)\n\t\t\t\t} else {\n\t\t\t\t\tspec := astFiles.LookupSymbol(fieldType)\n\t\t\t\t\tif spec != nil {\n\t\t\t\t\t\tnewPrefix, newAliasPrefix := fieldBasename, alias\n\n\t\t\t\t\t\tif prefix != \"\" {\n\t\t\t\t\t\t\tnewPrefix = prefix + \".\" + fieldBasename\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif aliasPrefix != \"\" {\n\t\t\t\t\t\t\tnewAliasPrefix = aliasPrefix + \".\" + alias\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\thandleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", fieldType, pkgname)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !seclField.exposedAtEventRootOnly {\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func GenElem(in *ast.TypeSpec) gen.Elem {\n\t// handle supported types\n\tswitch in.Type.(type) {\n\n\tcase *ast.StructType:\n\t\tv := in.Type.(*ast.StructType)\n\t\tfmt.Printf(chalk.Green.Color(\"parsing %s...\"), in.Name.Name)\n\t\tp := &gen.Ptr{\n\t\t\tValue: &gen.Struct{\n\t\t\t\tName: in.Name.Name, // ast.Ident\n\t\t\t\tFields: parseFieldList(v.Fields),\n\t\t\t},\n\t\t}\n\n\t\t// mark type as processed\n\t\tglobalProcessed[in.Name.Name] = struct{}{}\n\n\t\tif len(p.Value.(*gen.Struct).Fields) == 0 {\n\t\t\tfmt.Printf(chalk.Red.Color(\" has no exported fields \\u2717\\n\")) // X\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Print(chalk.Green.Color(\" \\u2713\\n\")) // check\n\t\treturn p\n\n\tdefault:\n\t\treturn nil\n\n\t}\n}", "func (node selfNode) packIntoField(name string, field reflect.Value) (err error) {\n\n\tfieldKind := field.Kind()\n\n\tif isScalarKind(fieldKind) {\n\t\tif len(node.values) != 1 {\n\t\t\treturn node.newPackError(\"bad number of values for scalar field `\" + name + \"`\")\n\t\t}\n\t\tif _, ok := node.values[0].(selfString); !ok {\n\t\t\treturn node.newPackError(\"expected a string element for scalar field `\" + name + \"`\")\n\t\t}\n\t\tstrValue := node.values[0].(selfString)\n\t\treturn strValue.packIntoField(name, field)\n\n\t} else if fieldKind == reflect.Struct {\n\t\treturn node.packToStruct(field)\n\n\t} else if fieldKind == reflect.Array {\n\t\treturn node.packToArray(field)\n\n\t} else if fieldKind == reflect.Slice {\n\t\treturn node.packToSlice(field)\n\n\t} else if fieldKind == reflect.Map {\n\t\tfield.Set(reflect.MakeMap(field.Type())) // Map requires initialization.\n\t\treturn node.packToMap(field)\n\n\t} else {\n\t\treturn node.newPackError(\"unsupported field kind \" + fieldKind.String())\n\t}\n\n\treturn\n}", "func (m *DeviceManagementConfigurationSettingGroupDefinition) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.DeviceManagementConfigurationSettingDefinition.GetFieldDeserializers()\n res[\"childIds\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetChildIds(res)\n }\n return nil\n }\n res[\"dependedOnBy\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationSettingDependedOnByFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationSettingDependedOnByable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationSettingDependedOnByable)\n }\n }\n m.SetDependedOnBy(res)\n }\n return nil\n }\n res[\"dependentOn\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationDependentOnFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationDependentOnable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationDependentOnable)\n }\n }\n m.SetDependentOn(res)\n }\n return nil\n }\n return res\n}", "func (m *LabelActionBase) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n return res\n}", "func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {\n\tif !receiver.IsValid() {\n\t\treturn zero\n\t}\n\ttyp := receiver.Type()\n\treceiver, _ = indirect(receiver)\n\t// Unless it's an interface, need to get to a value of type *T to guarantee\n\t// we see all methods of T and *T.\n\tptr := receiver\n\tif ptr.Kind() != reflect.Interface && ptr.CanAddr() {\n\t\tptr = ptr.Addr()\n\t}\n\tif method := ptr.MethodByName(fieldName); method.IsValid() {\n\t\treturn s.evalCall(dot, method, node, fieldName, args, final)\n\t}\n\thasArgs := len(args) > 1 || final.IsValid()\n\t// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.\n\treceiver, isNil := indirect(receiver)\n\tif isNil {\n\t\ts.errorf(\"nil pointer evaluating %s.%s\", typ, fieldName)\n\t}\n\tswitch receiver.Kind() {\n\tcase reflect.Struct:\n\t\ttField, ok := receiver.Type().FieldByName(fieldName)\n\t\tif ok {\n\t\t\tfield := receiver.FieldByIndex(tField.Index)\n\t\t\tif tField.PkgPath != \"\" { // field is unexported\n\t\t\t\ts.errorf(\"%s is an unexported field of struct type %s\", fieldName, typ)\n\t\t\t}\n\t\t\t// If it's a function, we must call it.\n\t\t\tif hasArgs {\n\t\t\t\ts.errorf(\"%s has arguments but cannot be invoked as function\", fieldName)\n\t\t\t}\n\t\t\treturn field\n\t\t}\n\t\ts.errorf(\"%s is not a field of struct type %s\", fieldName, typ)\n\tcase reflect.Map:\n\t\t// If it's a map, attempt to use the field name as a key.\n\t\tnameVal := reflect.ValueOf(fieldName)\n\t\tif nameVal.Type().AssignableTo(receiver.Type().Key()) {\n\t\t\tif hasArgs {\n\t\t\t\ts.errorf(\"%s is not a method but has arguments\", fieldName)\n\t\t\t}\n\t\t\treturn receiver.MapIndex(nameVal)\n\t\t}\n\t}\n\ts.errorf(\"can't evaluate field %s in type %s\", fieldName, typ)\n\tpanic(\"not reached\")\n}", "func (m *AccessPackage) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackagesIncompatibleWith\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackagesIncompatibleWith)\n res[\"assignmentPolicies\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageAssignmentPolicyFromDiscriminatorValue , m.SetAssignmentPolicies)\n res[\"catalog\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateAccessPackageCatalogFromDiscriminatorValue , m.SetCatalog)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"incompatibleAccessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetIncompatibleAccessPackages)\n res[\"incompatibleGroups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetIncompatibleGroups)\n res[\"isHidden\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsHidden)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n return res\n}", "func (m *VppToken) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"appleId\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetAppleId)\n res[\"automaticallyUpdateApps\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetAutomaticallyUpdateApps)\n res[\"countryOrRegion\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetCountryOrRegion)\n res[\"expirationDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetExpirationDateTime)\n res[\"lastModifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetLastModifiedDateTime)\n res[\"lastSyncDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetLastSyncDateTime)\n res[\"lastSyncStatus\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseVppTokenSyncStatus , m.SetLastSyncStatus)\n res[\"organizationName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetOrganizationName)\n res[\"state\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseVppTokenState , m.SetState)\n res[\"token\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetToken)\n res[\"vppTokenAccountType\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseVppTokenAccountType , m.SetVppTokenAccountType)\n return res\n}", "func parseStructField(cache structCache, key, sk, keytail string, values []string, target reflect.Value) {\n\tl, ok := cache[sk]\n\tif !ok {\n\t\tpanic(KeyError{\n\t\t\tFullKey: key,\n\t\t\tKey: kpath(key, keytail),\n\t\t\tType: target.Type(),\n\t\t\tField: sk,\n\t\t})\n\t}\n\tf := target.Field(l.offset)\n\n\tl.parse(key, keytail, values, f)\n}", "func getFieldMetadata(pkg *model.ModelPackage, column *model.Column) string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"sql:\\\"\")\n\tbuffer.WriteString(\"type:\")\n\tbuffer.WriteString(column.ColumnType)\n\tif !column.IsNullable {\n\t\tbuffer.WriteString(\";not null\")\n\t}\n\tif column.IsUnique {\n\t\tbuffer.WriteString(\";unique\")\n\t}\n\tif column.IsAutoIncrement {\n\t\tbuffer.WriteString(\";AUTO_INCREMENT\")\n\t}\n\tbuffer.WriteString(\"\\\"\")\n\treturn buffer.String()\n}", "func (p *Planner) planField(e *Executor, f *Field) (processor, error) {\n\treturn p.planExpr(e, f.Expr)\n}", "func Field(typ ast.Expr, names ...*ast.Ident) *ast.Field {\n\treturn &ast.Field{\n\t\tNames: names,\n\t\tType: typ,\n\t}\n}", "func generatePerNodeSnippet(pathStructName string, nodeData *ypathgen.NodeData, fakeRootTypeName, schemaStructPkgAccessor string, preferShadowPath bool) (GoPerNodeCodeSnippet, goTypeData, util.Errors) {\n\t// Special case: ieeefloat32 is represented as a 4-byte binary in YANG\n\t// and ygen, but float32 is more user-friendly.\n\tvar specialConversionFn string\n\tif nodeData.YANGTypeName == \"ieeefloat32\" {\n\t\tswitch nodeData.LocalGoTypeName {\n\t\tcase \"Binary\":\n\t\t\tnodeData.GoTypeName = \"float32\"\n\t\t\tnodeData.LocalGoTypeName = \"float32\"\n\t\t\tspecialConversionFn = \"ygot.BinaryToFloat32\"\n\t\tcase \"[]\" + \"Binary\":\n\t\t\tnodeData.GoTypeName = \"[]float32\"\n\t\t\tnodeData.LocalGoTypeName = \"[]float32\"\n\t\t\tspecialConversionFn = \"binarySliceToFloat32\"\n\t\tdefault:\n\t\t\treturn GoPerNodeCodeSnippet{}, goTypeData{}, util.NewErrs(\n\t\t\t\terrors.Errorf(\"ieeefloat32 is expected to be a binary, got %q\", nodeData.LocalGoTypeName))\n\t\t}\n\t}\n\n\tvar errs util.Errors\n\ts := struct {\n\t\tPathStructName string\n\t\tGoType goTypeData\n\t\tGoFieldName string\n\t\tGoStructTypeName string\n\t\tYANGPath string\n\t\tFakeRootTypeName string\n\t\t// IsScalarField indicates a leaf that is stored as a pointer\n\t\t// in its parent struct.\n\t\tIsScalarField bool\n\t\tIsRoot bool\n\t\tSchemaStructPkgAccessor string\n\t\t// WildcardSuffix is the suffix used to indicate that a path\n\t\t// node contains a wildcard.\n\t\tWildcardSuffix string\n\t\t// SpecialConversionFn is the special-case conversion function\n\t\t// to convert the field from the parent struct into the\n\t\t// qualified type returned to the user.\n\t\tSpecialConversionFn string\n\t\tPreferShadowPath bool\n\t}{\n\t\tPathStructName: pathStructName,\n\t\tGoType: goTypeData{\n\t\t\tGoTypeName: nodeData.GoTypeName,\n\t\t\tTransformedGoTypeName: transformGoTypeName(nodeData),\n\t\t\tIsLeaf: nodeData.IsLeaf,\n\t\t\tHasDefault: nodeData.HasDefault,\n\t\t},\n\t\tGoFieldName: nodeData.GoFieldName,\n\t\tGoStructTypeName: nodeData.SubsumingGoStructName,\n\t\tYANGPath: nodeData.YANGPath,\n\t\tFakeRootTypeName: fakeRootTypeName,\n\t\tIsScalarField: nodeData.IsScalarField,\n\t\tIsRoot: nodeData.YANGPath == \"/\",\n\t\tWildcardSuffix: ypathgen.WildcardSuffix,\n\t\tSpecialConversionFn: specialConversionFn,\n\t\tSchemaStructPkgAccessor: schemaStructPkgAccessor,\n\t\tPreferShadowPath: preferShadowPath,\n\t}\n\tvar getMethod, collectMethod, convertHelper strings.Builder\n\tif nodeData.IsLeaf {\n\t\t// Leaf types use their parent GoStruct to unmarshal, before\n\t\t// being retrieved out when returned to the user.\n\t\tif err := goLeafConvertTemplate.Execute(&convertHelper, s); err != nil {\n\t\t\tutil.AppendErr(errs, err)\n\t\t}\n\t\t// TODO: Collect methods for non-leaf nodes is not implemented.\n\t\tif err := goLeafCollectTemplate.Execute(&collectMethod, s); err != nil {\n\t\t\tutil.AppendErr(errs, err)\n\t\t}\n\t}\n\tif err := goNodeGetTemplate.Execute(&getMethod, s); err != nil {\n\t\tutil.AppendErr(errs, err)\n\t}\n\n\treturn GoPerNodeCodeSnippet{\n\t\tPathStructName: pathStructName,\n\t\tGetMethod: getMethod.String(),\n\t\tCollectMethod: collectMethod.String(),\n\t\tConvertHelper: convertHelper.String(),\n\t}, s.GoType, errs\n}", "func init() {\n\tgroupFields := schema.Group{}.Fields()\n\t_ = groupFields\n\t// groupDescTenant is the schema descriptor for tenant field.\n\tgroupDescTenant := groupFields[0].Descriptor()\n\t// group.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tgroup.TenantValidator = groupDescTenant.Validators[0].(func(string) error)\n\t// groupDescName is the schema descriptor for name field.\n\tgroupDescName := groupFields[1].Descriptor()\n\t// group.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tgroup.NameValidator = groupDescName.Validators[0].(func(string) error)\n\t// groupDescType is the schema descriptor for type field.\n\tgroupDescType := groupFields[2].Descriptor()\n\t// group.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tgroup.TypeValidator = groupDescType.Validators[0].(func(string) error)\n\t// groupDescCreatedAt is the schema descriptor for created_at field.\n\tgroupDescCreatedAt := groupFields[5].Descriptor()\n\t// group.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tgroup.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time)\n\t// groupDescUpdatedAt is the schema descriptor for updated_at field.\n\tgroupDescUpdatedAt := groupFields[6].Descriptor()\n\t// group.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tgroup.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time)\n\t// group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tgroup.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tnodeFields := schema.Node{}.Fields()\n\t_ = nodeFields\n\t// nodeDescTenant is the schema descriptor for tenant field.\n\tnodeDescTenant := nodeFields[0].Descriptor()\n\t// node.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tnode.TenantValidator = nodeDescTenant.Validators[0].(func(string) error)\n\t// nodeDescName is the schema descriptor for name field.\n\tnodeDescName := nodeFields[1].Descriptor()\n\t// node.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tnode.NameValidator = nodeDescName.Validators[0].(func(string) error)\n\t// nodeDescType is the schema descriptor for type field.\n\tnodeDescType := nodeFields[2].Descriptor()\n\t// node.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tnode.TypeValidator = nodeDescType.Validators[0].(func(string) error)\n\t// nodeDescCreatedAt is the schema descriptor for created_at field.\n\tnodeDescCreatedAt := nodeFields[5].Descriptor()\n\t// node.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tnode.DefaultCreatedAt = nodeDescCreatedAt.Default.(func() time.Time)\n\t// nodeDescUpdatedAt is the schema descriptor for updated_at field.\n\tnodeDescUpdatedAt := nodeFields[6].Descriptor()\n\t// node.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tnode.DefaultUpdatedAt = nodeDescUpdatedAt.Default.(func() time.Time)\n\t// node.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tnode.UpdateDefaultUpdatedAt = nodeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tpermissionFields := schema.Permission{}.Fields()\n\t_ = permissionFields\n\t// permissionDescTenant is the schema descriptor for tenant field.\n\tpermissionDescTenant := permissionFields[0].Descriptor()\n\t// permission.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tpermission.TenantValidator = permissionDescTenant.Validators[0].(func(string) error)\n\t// permissionDescName is the schema descriptor for name field.\n\tpermissionDescName := permissionFields[1].Descriptor()\n\t// permission.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tpermission.NameValidator = permissionDescName.Validators[0].(func(string) error)\n\t// permissionDescCreatedAt is the schema descriptor for created_at field.\n\tpermissionDescCreatedAt := permissionFields[3].Descriptor()\n\t// permission.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tpermission.DefaultCreatedAt = permissionDescCreatedAt.Default.(func() time.Time)\n\t// permissionDescUpdatedAt is the schema descriptor for updated_at field.\n\tpermissionDescUpdatedAt := permissionFields[4].Descriptor()\n\t// permission.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tpermission.DefaultUpdatedAt = permissionDescUpdatedAt.Default.(func() time.Time)\n\t// permission.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tpermission.UpdateDefaultUpdatedAt = permissionDescUpdatedAt.UpdateDefault.(func() time.Time)\n\trouteFields := schema.Route{}.Fields()\n\t_ = routeFields\n\t// routeDescTenant is the schema descriptor for tenant field.\n\trouteDescTenant := routeFields[0].Descriptor()\n\t// route.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\troute.TenantValidator = routeDescTenant.Validators[0].(func(string) error)\n\t// routeDescName is the schema descriptor for name field.\n\trouteDescName := routeFields[1].Descriptor()\n\t// route.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\troute.NameValidator = routeDescName.Validators[0].(func(string) error)\n\t// routeDescURI is the schema descriptor for uri field.\n\trouteDescURI := routeFields[2].Descriptor()\n\t// route.URIValidator is a validator for the \"uri\" field. It is called by the builders before save.\n\troute.URIValidator = routeDescURI.Validators[0].(func(string) error)\n\t// routeDescCreatedAt is the schema descriptor for created_at field.\n\trouteDescCreatedAt := routeFields[5].Descriptor()\n\t// route.DefaultCreatedAt holds the default value on creation for the created_at field.\n\troute.DefaultCreatedAt = routeDescCreatedAt.Default.(func() time.Time)\n\t// routeDescUpdatedAt is the schema descriptor for updated_at field.\n\trouteDescUpdatedAt := routeFields[6].Descriptor()\n\t// route.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\troute.DefaultUpdatedAt = routeDescUpdatedAt.Default.(func() time.Time)\n\t// route.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\troute.UpdateDefaultUpdatedAt = routeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tuserFields := schema.User{}.Fields()\n\t_ = userFields\n\t// userDescTenant is the schema descriptor for tenant field.\n\tuserDescTenant := userFields[0].Descriptor()\n\t// user.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tuser.TenantValidator = userDescTenant.Validators[0].(func(string) error)\n\t// userDescUUID is the schema descriptor for uuid field.\n\tuserDescUUID := userFields[1].Descriptor()\n\t// user.UUIDValidator is a validator for the \"uuid\" field. It is called by the builders before save.\n\tuser.UUIDValidator = userDescUUID.Validators[0].(func(string) error)\n\t// userDescIsSuper is the schema descriptor for is_super field.\n\tuserDescIsSuper := userFields[3].Descriptor()\n\t// user.DefaultIsSuper holds the default value on creation for the is_super field.\n\tuser.DefaultIsSuper = userDescIsSuper.Default.(bool)\n\t// userDescCreatedAt is the schema descriptor for created_at field.\n\tuserDescCreatedAt := userFields[5].Descriptor()\n\t// user.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tuser.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)\n\t// userDescUpdatedAt is the schema descriptor for updated_at field.\n\tuserDescUpdatedAt := userFields[6].Descriptor()\n\t// user.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tuser.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time)\n\t// user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tuser.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time)\n}", "func (m *SolutionsRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"businessScenarios\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioable)\n }\n }\n m.SetBusinessScenarios(res)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"virtualEvents\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateVirtualEventsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVirtualEvents(val.(VirtualEventsRootable))\n }\n return nil\n }\n return res\n}", "func (m *DeviceManagementConfigurationSettingDefinition) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessTypes\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationSettingAccessTypes)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAccessTypes(val.(*DeviceManagementConfigurationSettingAccessTypes))\n }\n return nil\n }\n res[\"applicability\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationSettingApplicabilityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetApplicability(val.(DeviceManagementConfigurationSettingApplicabilityable))\n }\n return nil\n }\n res[\"baseUri\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetBaseUri(val)\n }\n return nil\n }\n res[\"categoryId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCategoryId(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"helpText\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetHelpText(val)\n }\n return nil\n }\n res[\"infoUrls\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetInfoUrls(res)\n }\n return nil\n }\n res[\"keywords\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetKeywords(res)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"occurrence\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationSettingOccurrenceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOccurrence(val.(DeviceManagementConfigurationSettingOccurrenceable))\n }\n return nil\n }\n res[\"offsetUri\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOffsetUri(val)\n }\n return nil\n }\n res[\"referredSettingInformationList\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationReferredSettingInformationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationReferredSettingInformationable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationReferredSettingInformationable)\n }\n }\n m.SetReferredSettingInformationList(res)\n }\n return nil\n }\n res[\"rootDefinitionId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetRootDefinitionId(val)\n }\n return nil\n }\n res[\"settingUsage\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationSettingUsage)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSettingUsage(val.(*DeviceManagementConfigurationSettingUsage))\n }\n return nil\n }\n res[\"uxBehavior\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationControlType)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetUxBehavior(val.(*DeviceManagementConfigurationControlType))\n }\n return nil\n }\n res[\"version\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVersion(val)\n }\n return nil\n }\n res[\"visibility\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationSettingVisibility)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVisibility(val.(*DeviceManagementConfigurationSettingVisibility))\n }\n return nil\n }\n return res\n}", "func (m *CreatePostRequestBody) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"certificateSigningRequest\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreatePrintCertificateSigningRequestFromDiscriminatorValue , m.SetCertificateSigningRequest)\n res[\"connectorId\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetConnectorId)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"hasPhysicalDevice\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetHasPhysicalDevice)\n res[\"manufacturer\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetManufacturer)\n res[\"model\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetModel)\n res[\"physicalDeviceId\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetPhysicalDeviceId)\n return res\n}", "func (m *ManagementTemplateStep) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"acceptedVersion\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAcceptedVersion(val.(ManagementTemplateStepVersionable))\n }\n return nil\n }\n res[\"category\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementCategory)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCategory(val.(*ManagementCategory))\n }\n return nil\n }\n res[\"createdByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedByUserId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"lastActionByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionByUserId(val)\n }\n return nil\n }\n res[\"lastActionDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionDateTime(val)\n }\n return nil\n }\n res[\"managementTemplate\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetManagementTemplate(val.(ManagementTemplateable))\n }\n return nil\n }\n res[\"portalLink\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateActionUrlFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPortalLink(val.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ActionUrlable))\n }\n return nil\n }\n res[\"priority\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriority(val)\n }\n return nil\n }\n res[\"versions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ManagementTemplateStepVersionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ManagementTemplateStepVersionable)\n }\n }\n m.SetVersions(res)\n }\n return nil\n }\n return res\n}", "func expandFields(compiled *lang.CompiledExpr, define *lang.DefineExpr) lang.DefineFieldsExpr {\n\tvar fields lang.DefineFieldsExpr\n\tfor _, field := range define.Fields {\n\t\tif isEmbeddedField(field) {\n\t\t\tembedded := expandFields(compiled, compiled.LookupDefine(string(field.Type)))\n\t\t\tfields = append(fields, embedded...)\n\t\t} else {\n\t\t\tfields = append(fields, field)\n\t\t}\n\t}\n\treturn fields\n}", "func (m *Planner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"buckets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerBucketFromDiscriminatorValue , m.SetBuckets)\n res[\"plans\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue , m.SetPlans)\n res[\"tasks\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerTaskFromDiscriminatorValue , m.SetTasks)\n return res\n}", "func parse(r io.Reader) ([]field, error) {\n\tinData, err := models.Unmarshal(r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling models.yml: %w\", err)\n\t}\n\n\tvar fields []field\n\tfor collectionName, collection := range inData {\n\t\tfor fieldName, modelField := range collection.Fields {\n\t\t\tf := field{}\n\t\t\tf.Name = collectionName + \"/\" + fieldName\n\t\t\tf.GoName = goName(collectionName) + \"_\" + goName(fieldName)\n\t\t\tf.GoType = goType(modelField.Type)\n\t\t\tf.Collection = firstLower(goName(collectionName))\n\t\t\tf.FQField = collectionName + \"/%d/\" + fieldName\n\t\t\tf.Required = modelField.Required\n\n\t\t\tif modelField.Type == \"relation\" || modelField.Type == \"generic-relation\" {\n\t\t\t\tf.SingleRelation = true\n\t\t\t}\n\n\t\t\tif strings.Contains(fieldName, \"$\") {\n\t\t\t\tf.TemplateAttr = \"replacement\"\n\t\t\t\tf.TemplateAttrType = \"string\"\n\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%s\", 1)\n\t\t\t\tf.GoType = goType(modelField.Template.Fields.Type)\n\n\t\t\t\tif modelField.Template.Replacement != \"\" {\n\t\t\t\t\tf.TemplateAttr = modelField.Template.Replacement + \"ID\"\n\t\t\t\t\tf.TemplateAttrType = \"int\"\n\t\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%d\", 1)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfields = append(fields, f)\n\t\t}\n\t}\n\n\t// TODO: fix models-to-go to return fields in input order.\n\tsort.Slice(fields, func(i, j int) bool {\n\t\treturn fields[i].GoName < fields[j].GoName\n\t})\n\n\treturn fields, nil\n}", "func (m *Store) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"defaultLanguageTag\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDefaultLanguageTag)\n res[\"groups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetGroups)\n res[\"languageTags\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfPrimitiveValues(\"string\" , m.SetLanguageTags)\n res[\"sets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSetFromDiscriminatorValue , m.SetSets)\n return res\n}", "func (m *DeviceManagementConfigurationPolicy) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"assignments\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationPolicyAssignmentFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationPolicyAssignmentable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationPolicyAssignmentable)\n }\n }\n m.SetAssignments(res)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"creationSource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreationSource(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"isAssigned\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsAssigned(val)\n }\n return nil\n }\n res[\"lastModifiedDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastModifiedDateTime(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"platforms\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationPlatforms)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlatforms(val.(*DeviceManagementConfigurationPlatforms))\n }\n return nil\n }\n res[\"priorityMetaData\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementPriorityMetaDataFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriorityMetaData(val.(DeviceManagementPriorityMetaDataable))\n }\n return nil\n }\n res[\"roleScopeTagIds\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetRoleScopeTagIds(res)\n }\n return nil\n }\n res[\"settingCount\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSettingCount(val)\n }\n return nil\n }\n res[\"settings\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationSettingFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationSettingable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationSettingable)\n }\n }\n m.SetSettings(res)\n }\n return nil\n }\n res[\"technologies\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationTechnologies)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTechnologies(val.(*DeviceManagementConfigurationTechnologies))\n }\n return nil\n }\n res[\"templateReference\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationPolicyTemplateReferenceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTemplateReference(val.(DeviceManagementConfigurationPolicyTemplateReferenceable))\n }\n return nil\n }\n return res\n}", "func (x *fastReflection_Output) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.bank.v1beta1.Output.address\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Output.coins\":\n\t\tlist := []*v1beta1.Coin{}\n\t\treturn protoreflect.ValueOfList(&_Output_2_list{list: &list})\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.bank.v1beta1.Output\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.bank.v1beta1.Output does not contain field %s\", fd.FullName()))\n\t}\n}", "func (builder *RoomBuilder) BuildField(f *FieldProxyI) {\n\t*f = builder.field\n}", "func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tunmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+out+\").UnmarshalEasyJSON(in)\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.Raw(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalJSON(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalText(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeDecoderNoCheck(t, out, tags, indent)\n\treturn err\n}", "func (x *fastReflection_Metadata) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.bank.v1beta1.Metadata.description\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.denom_units\":\n\t\tlist := []*DenomUnit{}\n\t\treturn protoreflect.ValueOfList(&_Metadata_2_list{list: &list})\n\tcase \"cosmos.bank.v1beta1.Metadata.base\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.display\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.name\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.symbol\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.uri\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.uri_hash\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.bank.v1beta1.Metadata\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.bank.v1beta1.Metadata does not contain field %s\", fd.FullName()))\n\t}\n}", "func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(ctx context.Context, field *ast.SelectField) (parser_model.CIStr, error) {\n\tinnerExpr := getInnerFromParenthesesAndUnaryPlus(field.Expr)\n\tvalueExpr, isValueExpr := innerExpr.(*driver.ValueExpr)\n\n\t// Non-literal: Output as inputed, except that comments need to be removed.\n\tif !isValueExpr {\n\t\treturn parser_model.NewCIStr(parser.SpecFieldPattern.ReplaceAllStringFunc(field.Text(), parser.TrimComment)), nil\n\t}\n\n\t// Literal: Need special processing\n\tswitch valueExpr.Kind() {\n\tcase types.KindString:\n\t\tprojName := valueExpr.GetString()\n\t\tprojOffset := valueExpr.GetProjectionOffset()\n\t\tif projOffset >= 0 {\n\t\t\tprojName = projName[:projOffset]\n\t\t}\n\t\t// See #3686, #3994:\n\t\t// For string literals, string content is used as column name. Non-graph initial characters are trimmed.\n\t\tfieldName := strings.TrimLeftFunc(projName, func(r rune) bool {\n\t\t\treturn !unicode.IsOneOf(mysql.RangeGraph, r)\n\t\t})\n\t\treturn parser_model.NewCIStr(fieldName), nil\n\tcase types.KindNull:\n\t\t// See #4053, #3685\n\t\treturn parser_model.NewCIStr(\"NULL\"), nil\n\tcase types.KindBinaryLiteral:\n\t\t// Don't rewrite BIT literal or HEX literals\n\t\treturn parser_model.NewCIStr(field.Text()), nil\n\tcase types.KindInt64:\n\t\t// See #9683\n\t\t// TRUE or FALSE can be a int64\n\t\tif mysql.HasIsBooleanFlag(valueExpr.Type.Flag) {\n\t\t\tif i := valueExpr.GetValue().(int64); i == 0 {\n\t\t\t\treturn parser_model.NewCIStr(\"FALSE\"), nil\n\t\t\t}\n\t\t\treturn parser_model.NewCIStr(\"TRUE\"), nil\n\t\t}\n\t\tfallthrough\n\n\tdefault:\n\t\tfieldName := field.Text()\n\t\tfieldName = strings.TrimLeft(fieldName, \"\\t\\n +(\")\n\t\tfieldName = strings.TrimRight(fieldName, \"\\t\\n )\")\n\t\treturn parser_model.NewCIStr(fieldName), nil\n\t}\n}", "func mergeFieldDef(target, source *ast.FieldDefinition) {\n\tif target.Description == \"\" {\n\t\ttarget.Description = source.Description\n\t}\n\tif target.Name == \"\" {\n\t\ttarget.Name = source.Name\n\t}\n\tif target.ArgumentsDefinition == nil {\n\t\ttarget.ArgumentsDefinition = source.ArgumentsDefinition\n\t}\n\tif target.Type == nil {\n\t\ttarget.Type = source.Type\n\t}\n\tif target.Directives == nil {\n\t\ttarget.Directives = source.Directives\n\t}\n}", "func (m *AccessPackageCatalog) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackages)\n res[\"catalogType\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogType , m.SetCatalogType)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"isExternallyVisible\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsExternallyVisible)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n res[\"state\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogState , m.SetState)\n return res\n}", "func (m *PaymentTerm) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"calculateDiscountOnCreditMemos\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCalculateDiscountOnCreditMemos(val)\n }\n return nil\n }\n res[\"code\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCode(val)\n }\n return nil\n }\n res[\"discountDateCalculation\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDiscountDateCalculation(val)\n }\n return nil\n }\n res[\"discountPercent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetFloat64Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDiscountPercent(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"dueDateCalculation\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDueDateCalculation(val)\n }\n return nil\n }\n res[\"id\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetUUIDValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetId(val)\n }\n return nil\n }\n res[\"lastModifiedDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastModifiedDateTime(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n return res\n}" ]
[ "0.6892996", "0.59916204", "0.57207865", "0.57188165", "0.56753683", "0.5636733", "0.5596574", "0.55853677", "0.5520538", "0.5467922", "0.5415466", "0.53430325", "0.53384626", "0.5321866", "0.52896065", "0.5278876", "0.5278473", "0.521161", "0.5197192", "0.5160622", "0.51567143", "0.51550436", "0.5147077", "0.51422423", "0.5134995", "0.5125008", "0.51133937", "0.5062007", "0.50596523", "0.50420433", "0.5037253", "0.5037253", "0.5037253", "0.5037191", "0.5032363", "0.5026841", "0.5018489", "0.49712357", "0.49637362", "0.49629217", "0.49525085", "0.494682", "0.49234253", "0.49165308", "0.48968995", "0.48819926", "0.48531297", "0.48460788", "0.4845097", "0.4843897", "0.48201433", "0.481769", "0.4807314", "0.47908702", "0.47879153", "0.47768614", "0.47643158", "0.47562444", "0.47546977", "0.4753293", "0.4748595", "0.47467503", "0.47423792", "0.47404823", "0.47370917", "0.4733257", "0.4729395", "0.47209784", "0.47155458", "0.4714776", "0.4691385", "0.46873227", "0.46867412", "0.4686594", "0.46796343", "0.4675671", "0.4673864", "0.46587044", "0.46586603", "0.46584782", "0.46528453", "0.46463683", "0.46432978", "0.4639629", "0.463718", "0.46325478", "0.46312508", "0.46269098", "0.46251613", "0.46191674", "0.46175572", "0.4611585", "0.46029124", "0.4602239", "0.45985088", "0.45904619", "0.4582061", "0.45726395", "0.45703688", "0.4566938" ]
0.757106
0
genArguments generates argument field config for given AST
func genArguments(args []*ast.InputValueDefinition) *jen.Statement { // // Generate config for arguments // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // FieldConfigArgument{ // "style": &ArgumentConfig{ ... } // }, // return jen.Qual(defsPkg, "FieldConfigArgument").Values( jen.DictFunc(func(d jen.Dict) { for _, arg := range args { d[jen.Lit(arg.Name.Value)] = genArgument(arg) } }), ) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genArgument(arg *ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for argument\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &ArgumentConfig{\n\t// Type: graphql.NonNull(graphql.String),\n\t// DefaultValue: \"SHORT\", // TODO: ???\n\t// Description: \"style is stylish\",\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"ArgumentConfig\").Values(jen.Dict{\n\t\tjen.Id(\"DefaultValue\"): genValue(arg.DefaultValue),\n\t\tjen.Id(\"Description\"): genDescription(arg),\n\t\tjen.Id(\"Type\"): genInputTypeReference(arg.Type),\n\t})\n}", "func BindArg(obj interface{}, tags ...string) FieldConfigArgument {\n\tv := reflect.Indirect(reflect.ValueOf(obj))\n\tvar config = make(FieldConfigArgument)\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Type().Field(i)\n\n\t\tmytag := extractTag(field.Tag)\n\t\tif inArray(tags, mytag) {\n\t\t\tconfig[mytag] = &ArgumentConfig{\n\t\t\t\tType: getGraphType(field.Type),\n\t\t\t}\n\t\t}\n\t}\n\treturn config\n}", "func (*Base) Arguments(p ASTPass, l *ast.Fodder, args *ast.Arguments, r *ast.Fodder, ctx Context) {\n\tp.Fodder(p, l, ctx)\n\tfor i := range args.Positional {\n\t\targ := &args.Positional[i]\n\t\tp.Visit(p, &arg.Expr, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tfor i := range args.Named {\n\t\targ := &args.Named[i]\n\t\tp.Fodder(p, &arg.NameFodder, ctx)\n\t\tp.Fodder(p, &arg.EqFodder, ctx)\n\t\tp.Visit(p, &arg.Arg, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tp.Fodder(p, r, ctx)\n}", "func genArgs(optionMap map[string]string) []string {\n\toptions := []string{}\n\tfor k, v := range optionMap {\n\t\tif v != \"\" {\n\t\t\tk = fmt.Sprintf(\"%s=%s\", k, v)\n\t\t}\n\t\toptions = append(options, k)\n\t}\n\treturn options\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\ttoken: token,\n\t\tscheme: scheme,\n\t}\n}", "func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// graphql.Fields{\n\t// \"name\": graphql.Field{ ... },\n\t// \"givenName\": graphql.Field{ ... },\n\t// }\n\t//\n\treturn jen.Qual(defsPkg, \"Fields\").Values(jen.DictFunc(func(d jen.Dict) {\n\t\tfor _, f := range fs {\n\t\t\td[jen.Lit(f.Name.Value)] = genField(f)\n\t\t}\n\t}))\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\tauthToken: token,\n\t\tscheme: scheme,\n\t\tclusterNameOrID: \"\",\n\t\tuserProvidedToken: flags.Token,\n\t\tverbose: flags.Verbose,\n\t}\n}", "func (p *Planner) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, argumentName string, sourcePath []string) {\n\tfieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamFieldRef, []byte(argumentName))\n\tif !ok {\n\t\treturn\n\t}\n\tvalue := p.visitor.Operation.ArgumentValue(fieldArgument)\n\tif value.Kind != ast.ValueKindVariable {\n\t\tp.applyInlineFieldArgument(upstreamFieldRef, downstreamFieldRef, argumentName, sourcePath)\n\t\treturn\n\t}\n\tvariableName := p.visitor.Operation.VariableValueNameBytes(value.Ref)\n\tvariableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref)\n\n\tcontextVariable := &resolve.ContextVariable{\n\t\tPath: []string{variableNameStr},\n\t\tRenderAsGraphQLValue: true,\n\t}\n\tcontextVariable.SetJsonValueType(p.visitor.Definition, p.visitor.Definition, p.argTypeRef)\n\n\tcontextVariableName, exists := p.variables.AddVariable(contextVariable)\n\tvariableValueRef, argRef := p.upstreamOperation.AddVariableValueArgument([]byte(argumentName), variableName) // add the argument to the field, but don't redefine it\n\tp.upstreamOperation.AddArgumentToField(upstreamFieldRef, argRef)\n\n\tif exists { // if the variable exists we don't have to put it onto the variables declaration again, skip\n\t\treturn\n\t}\n\n\tfor _, i := range p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].VariableDefinitions.Refs {\n\t\tref := p.visitor.Operation.VariableDefinitions[i].VariableValue.Ref\n\t\tif !p.visitor.Operation.VariableValueNameBytes(ref).Equals(variableName) {\n\t\t\tcontinue\n\t\t}\n\t\timportedType := p.visitor.Importer.ImportType(p.visitor.Operation.VariableDefinitions[i].Type, p.visitor.Operation, p.upstreamOperation)\n\t\tp.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValueRef, importedType)\n\t}\n\n\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName))\n}", "func (p *Parser) buildArg(argDef Value, argType reflect.Type, index int, args *[]reflect.Value) error {\n\tswitch argType.Name() {\n\tcase \"Setter\":\n\t\tfallthrough\n\tcase \"GetSetter\":\n\t\targ, err := p.pathParser(argDef.Path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Getter\":\n\t\targ, err := p.newGetter(argDef)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Enum\":\n\t\targ, err := p.enumParser(argDef.Enum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v must be an Enum\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*arg))\n\tcase \"string\":\n\t\tif argDef.String == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an string\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.String))\n\tcase \"float64\":\n\t\tif argDef.Float == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an float\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Float))\n\tcase \"int64\":\n\t\tif argDef.Int == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an int\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Int))\n\tcase \"bool\":\n\t\tif argDef.Bool == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be a bool\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(bool(*argDef.Bool)))\n\t}\n\treturn nil\n}", "func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}", "func structargs(tl *types.Type, mustname bool) []*Node {\n\tvar args []*Node\n\tgen := 0\n\tfor _, t := range tl.Fields().Slice() {\n\t\ts := t.Sym\n\t\tif mustname && (s == nil || s.Name == \"_\") {\n\t\t\t// invent a name so that we can refer to it in the trampoline\n\t\t\ts = lookupN(\".anon\", gen)\n\t\t\tgen++\n\t\t}\n\t\ta := symfield(s, t.Type)\n\t\ta.Pos = t.Pos\n\t\ta.SetIsDDD(t.IsDDD())\n\t\targs = append(args, a)\n\t}\n\n\treturn args\n}", "func (ec *executionContext) field_Mutation_createAgent_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 models.CreateAgentInput\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNCreateAgentInput2golangᚑmongoᚑgraphqlᚑ003ᚋinternalᚋmodelsᚐCreateAgentInput(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (c *compileContext) makeArgumentResolver(typ schema.InputableType) (argumentResolver, error) {\n\tswitch t := typ.(type) {\n\tcase *schema.InputObjectType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.ListType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tlistCreator := t.Unwrap().(schema.InputableType).InputListCreator()\n\n\t\t\tif av, ok := v.(schema.LiteralArray); ok {\n\t\t\t\treturn listCreator.NewList(len(av), func(i int) (interface{}, error) {\n\t\t\t\t\treturn elementResolver(ctx, av[i])\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// if we get a non-list value we have to wrap into a single element\n\t\t\t// list.\n\t\t\t// See https://facebook.github.io/graphql/June2018/#sec-Type-System.List\n\t\t\tresultElement, err := elementResolver(ctx, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn listCreator.NewList(1, func(i int) (interface{}, error) {\n\t\t\t\treturn resultElement, nil\n\t\t\t})\n\t\t}, nil\n\n\tcase *schema.NotNilType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Required value was not supplied\")\n\t\t\t}\n\t\t\treturn elementResolver(ctx, v)\n\t\t}, nil\n\tcase *schema.ScalarType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.EnumType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn t.Decode(ctx, v)\n\t\t\t}\n\t\t\tval, ok := v.(schema.LiteralString)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Expected string, got %v\", v)\n\t\t\t}\n\t\t\treturn t.Decode(ctx, val)\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Invalid type for input argument: %v\", typ)\n\t}\n}", "func (params PostParams) Generate(args []string, argConfigs []Arg) PostParams {\n\tvar md5hash string\n\tfor index, arg := range args {\n\t\tDebugf(\"Index and args %d %s %v\", index, arg, argConfigs)\n\n\t\tDebugf(\"PostParams Setting %s to %s\", strings.Title(argConfigs[index].Name), arg)\n\t\tif argConfigs[index].Type == \"object\" {\n\t\t\tDebugln(\"Using object parser\")\n\t\t\tvar jsonArg map[string]interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArg)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArg))\n\t\t} else if argConfigs[index].Type == \"array\" {\n\t\t\tDebugln(\"Using array parser\")\n\t\t\tvar jsonArray []interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArray)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArray))\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tDebugf(\"Using bool parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t} else {\n\t\t\tif argConfigs[index].Type == \"url\" {\n\t\t\t\tDebugf(\"Handling url %s\", arg)\n\t\t\t\ta, err := ComputeMd5(arg)\n\t\t\t\tmd5hash = a\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to generate MD5 from url %s. Make sure the file exists and permissions are correct. (%s)\", arg, err)\n\t\t\t\t\tExit(1)\n\t\t\t\t}\n\t\t\t\targ = ConvertFileToURL(arg)\n\t\t\t}\n\t\t\tDebugf(\"Using string parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t}\n\n\t\tDebugf(\"Finished %s\", arg)\n\t}\n\tif len(md5hash) > 0 {\n\t\tparams.Checksum = md5hash\n\t}\n\treturn params\n}", "func ASTArgsFromPlan(plan *plannercore.LoadData) *ASTArgs {\n\treturn &ASTArgs{\n\t\tFileLocRef: plan.FileLocRef,\n\t\tColumnsAndUserVars: plan.ColumnsAndUserVars,\n\t\tColumnAssignments: plan.ColumnAssignments,\n\t\tOnDuplicate: plan.OnDuplicate,\n\t\tFieldsInfo: plan.FieldsInfo,\n\t\tLinesInfo: plan.LinesInfo,\n\t}\n}", "func (g GoStruct) ArglistFunc() string {\n\tvar builder strings.Builder\n\tfor _, f := range g.Fields {\n\t\tif !f.Type.IsList {\n\t\t\tbuilder.WriteString(fmt.Sprintf(\"args = append(args, %s)\\n\", \"r.\"+f.Name))\n\t\t} else {\n\t\t\ttpl := `for _, v := range %s {\n\targs = append(args, v)\n}\n`\n\t\t\tbuilder.WriteString(fmt.Sprintf(tpl, \"r.\"+f.Name))\n\t\t\tbuilder.WriteString(fmt.Sprintf(\"inlens = append(inlens, len(%s))\\n\", \"r.\"+f.Name))\n\t\t}\n\t}\n\treturn fmt.Sprintf(\n\t\t\"func (r *%s) arglist() (args []interface{}, inlens []int) {\\n %s return\\n}\\n\",\n\t\tg.Name, builder.String())\n}", "func (ec *executionContext) field_Mutation_createAdmin_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 map[string]interface{}\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\targ0, err = ec.unmarshalNAdminCreateInput2map(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (n *CommandNode) Args() []Expr { return n.args }", "func buildArg(mt *methodType, d json.RawMessage) (reflect.Value, error) {\n\tvar argv reflect.Value\n\targIsValue := false // if true, need to indirect before calling.\n\tif mt.ArgType.Kind() == reflect.Ptr {\n\t\targv = reflect.New(mt.ArgType.Elem())\n\t} else {\n\t\targv = reflect.New(mt.ArgType)\n\t\targIsValue = true\n\t}\n\terr := json.Unmarshal(d, argv.Interface())\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\tif argIsValue {\n\t\targv = argv.Elem()\n\t}\n\treturn argv, nil\n}", "func Marshal(data *parser.Result, document *string) (err error) {\n\n\targuments := \"\"\n\ttmp := []string{}\n\n\tfor _, node := range data.AST.Children {\n\n\t\tinstruction := strings.ToUpper(node.Value)\n\t\ttab := strings.Repeat(\" \", len(node.Value)+1)\n\n\t\tswitch instruction {\n\t\tcase \"FROM\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"LABEL\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"MAINTAINER\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"EXPOSE\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ADD\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ONBUILD\":\n\t\t\tfor _, n := range node.Next.Children {\n\t\t\t\targuments = strings.ToUpper(n.Value) + \" \" + DefaultForm(n)\n\t\t\t}\n\t\tcase \"STOPSIGNAL\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"HEALTHCHECK\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ARG\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"COPY\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ENV\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"RUN\":\n\t\t\targuments = ShellForm(node)\n\t\t\t//arguments = ExecForm(node)\n\t\tcase \"CMD\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"ENTRYPOINT\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"SHELL\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"VOLUME\":\n\t\t\t//arguments = ExecForm(node)\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"USER\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tcase \"WORKDIR\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Instruction %s not supported\", instruction)\n\t\t}\n\n\t\tif len(arguments) > 0 {\n\t\t\ttmp = append(tmp, fmt.Sprintf(\"%s %s\", instruction, arguments))\n\t\t} else {\n\t\t\ttmp = append(tmp, instruction)\n\t\t}\n\n\t}\n\n\t*document = strings.Join(tmp, \"\\n\")\n\n\treturn err\n}", "func (params GetParams) Generate(args []string, argConfigs []Arg) GetParams {\n\tfor index, arg := range args {\n\t\tif argConfigs[index].Type != \"object\" && argConfigs[index].Type != \"array\" {\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t}\n\t}\n\treturn params\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tg.additionalImports = append(g.additionalImports, file.additionalImports...)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %s\", typeName)\n\t}\n\n\tg.build(fields, typeName)\n}", "func getFieldList(p *program.Program, f *ast.FunctionDecl, fieldTypes []string) (\n\t_ *goast.FieldList, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error in function field list. err = %v\", err)\n\t\t}\n\t}()\n\tr := []*goast.Field{}\n\tfor i := range fieldTypes {\n\t\tif len(f.Children()) <= i {\n\t\t\terr = fmt.Errorf(\"not correct type/children: %d, %d\",\n\t\t\t\tlen(f.Children()), len(fieldTypes))\n\t\t\treturn\n\t\t}\n\t\tn := f.Children()[i]\n\t\tif v, ok := n.(*ast.ParmVarDecl); ok {\n\t\t\tt, err := types.ResolveType(p, fieldTypes[i])\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"FieldList type: %s. %v\", fieldTypes[i], err)\n\t\t\t\tp.AddMessage(p.GenerateWarningMessage(err, f))\n\t\t\t\terr = nil // ignore error\n\t\t\t\tt = \"C4GO_UNDEFINE_TYPE\"\n\t\t\t}\n\n\t\t\tif t == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr = append(r, &goast.Field{\n\t\t\t\tNames: []*goast.Ident{util.NewIdent(v.Name)},\n\t\t\t\tType: goast.NewIdent(t),\n\t\t\t})\n\t\t}\n\t}\n\n\t// for function argument: ...\n\tif strings.Contains(f.Type, \"...\") {\n\t\tr = append(r, &goast.Field{\n\t\t\tNames: []*goast.Ident{util.NewIdent(\"c4goArgs\")},\n\t\t\tType: &goast.Ellipsis{\n\t\t\t\tEllipsis: 1,\n\t\t\t\tElt: &goast.InterfaceType{\n\t\t\t\t\tInterface: 1,\n\t\t\t\t\tMethods: &goast.FieldList{\n\t\t\t\t\t\tOpening: 1,\n\t\t\t\t\t},\n\t\t\t\t\tIncomplete: false,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn &goast.FieldList{\n\t\tList: r,\n\t}, nil\n}", "func GenerationArgsFor(category, pathToExecutable, fuzzerName string, isMaster bool) GenerationArgs {\n\tf, found := fuzzers[category]\n\tif !found {\n\t\tsklog.Errorf(\"Unknown fuzz category %q\", category)\n\t\treturn nil\n\t}\n\tmasterFlag := \"-M\"\n\tif !isMaster {\n\t\tmasterFlag = \"-S\"\n\t}\n\tseedPath := filepath.Join(config.Generator.FuzzSamples, category)\n\toutputPath := filepath.Join(config.Generator.AflOutputPath, category)\n\n\tcmd := append([]string{\"-i\", seedPath, \"-o\", outputPath, \"-m\", \"5000\", masterFlag, fuzzerName, \"--\", pathToExecutable}, f.ArgsAfterExecutable...)\n\n\treturn append(cmd, \"@@\")\n}", "func transformArgs(n ir.InitNode) {\n\tvar list []ir.Node\n\tswitch n := n.(type) {\n\tdefault:\n\t\tbase.Fatalf(\"transformArgs %+v\", n.Op())\n\tcase *ir.CallExpr:\n\t\tlist = n.Args\n\t\tif n.IsDDD {\n\t\t\treturn\n\t\t}\n\tcase *ir.ReturnStmt:\n\t\tlist = n.Results\n\t}\n\tif len(list) != 1 {\n\t\treturn\n\t}\n\n\tt := list[0].Type()\n\tif t == nil || !t.IsFuncArgStruct() {\n\t\treturn\n\t}\n\n\t// Save n as n.Orig for fmt.go.\n\tif ir.Orig(n) == n {\n\t\tn.(ir.OrigNode).SetOrig(ir.SepCopy(n))\n\t}\n\n\t// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).\n\ttypecheck.RewriteMultiValueCall(n, list[0])\n}", "func (n *FnInvNode) Args() []Expr { return n.args }", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no fields defined for type %s\", typeName)\n\t}\n\t// TODO: for now we remove Default from the start (maybe move that to an option)\n\tlogicalTypeName := \"\\\"\" + strings.TrimPrefix(typeName, \"Default\") + \"\\\"\"\n\n\t// Generate code that will fail if the constants change value.\n\tg.Printf(\"func (d *%s) Serialize() ([]byte, error) {\\n\", typeName)\n\tg.Printf(\"wb := utils.NewWriteBufferByteBased(utils.WithByteOrderForByteBasedBuffer(binary.BigEndian))\\n\")\n\tg.Printf(\"\\tif err := d.SerializeWithWriteBuffer(context.Background(), wb); err != nil {\\n\")\n\tg.Printf(\"\\t\\treturn nil, err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn wb.GetBytes(), nil\\n\")\n\tg.Printf(\"}\\n\\n\")\n\tg.Printf(\"func (d *%s) SerializeWithWriteBuffer(ctx context.Context, writeBuffer utils.WriteBuffer) error {\\n\", typeName)\n\tg.Printf(\"\\tif err := writeBuffer.PushContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tfor _, field := range fields {\n\t\tfieldType := field.fieldType\n\t\tif field.isDelegate {\n\t\t\tg.Printf(\"\\t\\t\\tif err := d.%s.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\", fieldType.(*ast.Ident).Name)\n\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := field.name\n\t\tfieldNameUntitled := \"\\\"\" + unTitle(fieldName) + \"\\\"\"\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"if err := func()error {\\n\")\n\t\t\tg.Printf(\"\\td.\" + field.hasLocker + \".Lock()\\n\")\n\t\t\tg.Printf(\"\\tdefer d.\" + field.hasLocker + \".Unlock()\\n\")\n\t\t}\n\t\tneedsDereference := false\n\t\tif starFieldType, ok := fieldType.(*ast.StarExpr); ok {\n\t\t\tfieldType = starFieldType.X\n\t\t\tneedsDereference = true\n\t\t}\n\t\tif field.isStringer {\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"if d.%s != nil {\", field.name)\n\t\t\t}\n\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name+\".String()\", fieldNameUntitled)\n\t\t\tif field.hasLocker != \"\" {\n\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tswitch fieldType := fieldType.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\t{\n\t\t\t\t// TODO: bit hacky but not sure how else we catch those ones\n\t\t\t\tx := fieldType.X\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\txIdent, xIsIdent := x.(*ast.Ident)\n\t\t\t\tif xIsIdent {\n\t\t\t\t\tif xIdent.Name == \"atomic\" {\n\t\t\t\t\t\tif sel.Name == \"Uint32\" {\n\t\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Uint64\" {\n\t\t\t\t\t\t\tg.Printf(uint64FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Int32\" {\n\t\t\t\t\t\t\tg.Printf(int32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Bool\" {\n\t\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Value\" {\n\t\t\t\t\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif xIdent.Name == \"sync\" {\n\t\t\t\t\t\tfmt.Printf(\"\\t skipping field %s because it is %v.%v\\n\", fieldName, x, sel)\n\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name, fieldNameUntitled)\n\t\tcase *ast.IndexExpr:\n\t\t\tx := fieldType.X\n\t\t\tif fieldType, isxFieldSelector := x.(*ast.SelectorExpr); isxFieldSelector { // TODO: we need to refactor this so we can reuse...\n\t\t\t\txIdent, xIsIdent := fieldType.X.(*ast.Ident)\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\tif xIsIdent && xIdent.Name == \"atomic\" && sel.Name == \"Pointer\" {\n\t\t\t\t\tg.Printf(atomicPointerFieldTemplate, \"d.\"+field.name, field.name, fieldNameUntitled)\n\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"no support yet for %#q\\n\", fieldType)\n\t\t\tcontinue\n\t\tcase *ast.Ident:\n\t\t\tswitch fieldType.Name {\n\t\t\tcase \"byte\":\n\t\t\t\tg.Printf(byteFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"int\":\n\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"int32\":\n\t\t\t\tg.Printf(int32FieldSerialize, \"int32(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"uint32\":\n\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"bool\":\n\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"string\":\n\t\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"error\":\n\t\t\t\tg.Printf(errorFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident with type %v\\n\", fieldType)\n\t\t\t\tg.Printf(\"{\\n\")\n\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", d.%s)\\n\", fieldName)\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\tcase *ast.ArrayType:\n\t\t\tif eltType, ok := fieldType.Elt.(*ast.Ident); ok && eltType.Name == \"byte\" {\n\t\t\t\tg.Printf(\"if err := writeBuffer.WriteByteArray(%s, d.%s); err != nil {\\n\", fieldNameUntitled, field.name)\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t} else {\n\t\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"for _, elem := range d.%s {\", field.name)\n\t\t\t\tswitch eltType := fieldType.Elt.(type) {\n\t\t\t\tcase *ast.SelectorExpr, *ast.StarExpr:\n\t\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\t\tg.Printf(serializableFieldTemplate, \"elem\", \"\\\"value\\\"\")\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tswitch eltType.Name {\n\t\t\t\t\tcase \"int\":\n\t\t\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\t\t\tcase \"uint32\":\n\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\t\t\tcase \"bool\":\n\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"error\":\n\t\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within ArrayType for %v\\n\", fieldType)\n\t\t\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t}\n\t\tcase *ast.MapType:\n\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t// TODO: we use serializable or strings as we don't want to over-complex this\n\t\t\tg.Printf(\"for _name, elem := range d.%s {\\n\", fieldName)\n\t\t\tswitch keyType := fieldType.Key.(type) {\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch keyType.Name {\n\t\t\t\tcase \"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\", \"int\", \"int8\", \"int16\", \"int32\", \"int64\": // TODO: add other types\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", _name)\\n\", \"%v\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(\"\\t\\tname := _name\\n\")\n\t\t\t\tdefault:\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t}\n\t\t\tswitch eltType := fieldType.Value.(type) {\n\t\t\tcase *ast.StarExpr, *ast.SelectorExpr:\n\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\tg.Printf(\"\\t\\tif serializable, ok := elem.(utils.Serializable); ok {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PushContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := serializable.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PopContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t} else {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\telemAsString := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.WriteString(name, uint32(len(elemAsString)*8), \\\"UTF-8\\\", elemAsString); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t}\\n\")\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch eltType.Name {\n\t\t\t\tcase \"bool\":\n\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"error\":\n\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"name\")\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within MapType for %v\\n\", fieldType)\n\t\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented within MapType %v\\n\", fieldType.Value)\n\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t}\n\t\t\tg.Printf(\"\\t}\\n\")\n\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\tcase *ast.ChanType:\n\t\t\tg.Printf(chanFieldSerialize, \"d.\"+field.name, fieldNameUntitled, field.name)\n\t\tcase *ast.FuncType:\n\t\t\tg.Printf(funcFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\tdefault:\n\t\t\tfmt.Printf(\"no support implemented %#v\\n\", fieldType)\n\t\t}\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\tg.Printf(\"}\\n\")\n\t\t}\n\t}\n\tg.Printf(\"\\tif err := writeBuffer.PopContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn nil\\n\")\n\tg.Printf(\"}\\n\")\n\tg.Printf(\"\\n\")\n\tg.Printf(stringerTemplate, typeName)\n}", "func mmcArgGenerator() string {\r\n\tmmcArgs:= make([] string,1000)\t\r\n\tfor i:=0;i<len(mmcArgs);i++{\r\n\t\tmmcArgs[i] = strconv.Itoa(i+1)\r\n\t}\r\n\treturn joinMmcArgs(mmcArgs)\r\n}", "func argInit() args {\n\n\tvar a args\n\tflag.Float64Var(&a.x1, \"x1\", -2.0, \"left position of real axis\")\n\tflag.Float64Var(&a.x2, \"x2\", 1.0, \"right position of real axis\")\n\tflag.Float64Var(&a.y1, \"y1\", -1.5, \"down position of imaginary axis\")\n\tflag.Float64Var(&a.y2, \"y2\", 1.5, \"up position of imaginary axis\")\n\tflag.Float64Var(&a.threshold, \"th\", 4.0, \"squared threshold of the function\")\n\tflag.IntVar(&a.w, \"w\", 1000, \"width in pixels of the image\")\n\tflag.IntVar(&a.h, \"h\", 1000, \"height in pixels of the image\")\n\tflag.IntVar(&a.nIter, \"ni\", 100, \"maximum number of iterations for pixel\")\n\tflag.IntVar(&a.nRoutines, \"nr\", 4, \"number of go routines to be used\")\n\tflag.StringVar(&a.path, \"p\", \"./\", \"path to the generated png image\")\n\n\tflag.Parse()\n\treturn a\n}", "func (p *preprocessorImpl) getDirectiveArguments(info TokenInfo, emptyOk bool) []TokenInfo {\n\tdir := info.Token\n\tvar ret []TokenInfo\n\tfor info = p.lexer.Peek(); info.Token != nil && !info.Newline; info = p.lexer.Peek() {\n\t\tret = append(ret, p.lexer.Next())\n\t}\n\n\tif len(ret) == 0 && !emptyOk {\n\t\tp.err.Errorf(\"%s needs an argument.\", dir)\n\t}\n\n\treturn ret\n}", "func parse(parentField string, v interface{}) ([]argument, error) {\n\n\t// Reflect on the value to get started.\n\trawValue := reflect.ValueOf(v)\n\n\t// If a parent field is provided we are recursing. We are now\n\t// processing a struct within a struct. We need the parent struct\n\t// name for namespacing.\n\tif parentField != \"\" {\n\t\tparentField = strings.ToLower(parentField) + \"_\"\n\t}\n\n\t// We need to check we have a pointer else we can't modify anything\n\t// later. With the pointer, get the value that the pointer points to.\n\t// With a struct, that means we are recursing and we need to assert to\n\t// get the inner struct value to process it.\n\tvar val reflect.Value\n\tswitch rawValue.Kind() {\n\tcase reflect.Ptr:\n\t\tval = rawValue.Elem()\n\t\tif val.Kind() != reflect.Struct {\n\t\t\treturn nil, fmt.Errorf(\"incompatible type `%v` looking for a pointer\", val.Kind())\n\t\t}\n\tcase reflect.Struct:\n\t\tvar ok bool\n\t\tif val, ok = v.(reflect.Value); !ok {\n\t\t\treturn nil, fmt.Errorf(\"internal recurse error\")\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"incompatible type `%v`\", rawValue.Kind())\n\t}\n\n\tvar args []argument\n\n\t// We need to iterate over the fields of the struct value we are processing.\n\t// If the field is a struct then recurse to process its fields. If we have\n\t// a field that is not a struct, get pull the metadata. The `field` field\n\t// is important because it is how we update things later.\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tfield := val.Type().Field(i)\n\t\tif field.Type.Kind() == reflect.Struct {\n\t\t\tnewArgs, err := parse(parentField+field.Name, val.Field(i))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\targs = append(args, newArgs...)\n\t\t\tcontinue\n\t\t}\n\n\t\targ := argument{\n\t\t\tShort: field.Tag.Get(\"flag\"),\n\t\t\tLong: parentField + strings.ToLower(field.Name),\n\t\t\tType: field.Type.Name(),\n\t\t\tDefault: field.Tag.Get(\"default\"),\n\t\t\tDesc: field.Tag.Get(\"flagdesc\"),\n\t\t\tfield: val.Field(i),\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\n\treturn args, nil\n}", "func (ec *executionContext) field_Mutation_createTag_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 DayTag\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNDayTag2githubᚗcomᚋArtemGretsovᚋgolangᚑgqlgenᚑgormᚑpsqlᚑexampleᚋgraphᚋgeneratedᚐDayTag(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func castArg(prefix string, f field.Field, argIndex int) string {\n\tswitch f.DatatypeName {\n\tcase field.TypeString:\n\t\treturn fmt.Sprintf(\"%s%s := args[%d]\", prefix, f.Name.UpperCamel, argIndex)\n\tcase field.TypeUint, field.TypeInt, field.TypeBool:\n\t\treturn fmt.Sprintf(`%s%s, err := cast.To%sE(args[%d])\n if err != nil {\n return err\n }`,\n\t\t\tprefix, f.Name.UpperCamel, strings.Title(f.Datatype), argIndex)\n\tcase field.TypeCustom:\n\t\treturn fmt.Sprintf(`%[1]v%[2]v := new(types.%[3]v)\n\t\t\terr = json.Unmarshal([]byte(args[%[4]v]), %[1]v%[2]v)\n \t\tif err != nil {\n return err\n }`, prefix, f.Name.UpperCamel, f.Datatype, argIndex)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", f.DatatypeName))\n\t}\n}", "func (ec *executionContext) field_Mutation_accesstoken_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.AccesstokenRequest\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNAccesstokenRequest2gitlabᚗcomᚋsirinibinᚋgoᚑmysqlᚑgraphqlᚋgraphᚋmodelᚐAccesstokenRequest(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (o RegistryTaskDockerStepOutput) Arguments() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v RegistryTaskDockerStep) map[string]string { return v.Arguments }).(pulumi.StringMapOutput)\n}", "func getArguments() {\n\t// define pointers to the arguments which will be filled up when flag.Parse() is called\n\tlangFlag := flag.String(\"l\", string(auto), \"Which language to use. Args are: lua | wren | moon | auto\")\n\tdirFlag := flag.String(\"d\", \".\", \"The directory containing the main file and the subfiles\")\n\toutFlag := flag.String(\"o\", \"out\", \"The output file (sans extension)\")\n\twatchFlag := flag.Bool(\"w\", false, \"Whether to enable Watch mode, which automatically recompiles if a file has changed in the directory\")\n\tdefinesFlag := flag.String(\"D\", \"\", \"Used to pass in defines before compiling. Format is -D \\\"var1=value;var2=value;var3=value\\\"\")\n\n\t// begin parsing the flags\n\tflag.Parse()\n\n\t// these setup functions have to be performed in this particular order\n\t// because they depend on certain fields of Args to be set when they are called\n\t_setDir(*dirFlag)\n\t_setLanguage(*langFlag)\n\t_setOutputFile(*outFlag)\n\t_setDefines(*definesFlag)\n\n\tArgs.watchMode = *watchFlag\n\n\t// this gives all the non-flag command line args\n\tArgs.positional = flag.Args()\n}", "func (fi *funcInfo) emitVararg(line, a, n int) {\r\n\tfi.emitABC(line, OP_VARARG, a, n+1, 0)\r\n}", "func genConfig() ([]byte, error) {\n\t// Using genflags.getConfig() instead of config.New() because\n\t// it will include any defaults we have on the command line such\n\t// as default plugin selection. We didn't want to wire this into\n\t// the `config` package, but it will be a default value the CLI\n\t// users expect.\n\tc := genflags.resolveConfig()\n\tb, err := json.Marshal(c)\n\treturn b, errors.Wrap(err, \"unable to marshal configuration\")\n}", "func (o RegistryTaskDockerStepPtrOutput) Arguments() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *RegistryTaskDockerStep) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Arguments\n\t}).(pulumi.StringMapOutput)\n}", "func convertBuilderFunc(fn interface{}) ParamFuncBuilder {\n\ttypFn := reflect.TypeOf(fn)\n\tif !goodParamFunc(typFn) {\n\t\t// it's not a function which returns a function,\n\t\t// it's not a a func(compileArgs) func(requestDynamicParamValue) bool\n\t\t// but it's a func(requestDynamicParamValue) bool, such as regexp.Compile.MatchString\n\t\tif typFn.NumIn() == 1 && typFn.In(0).Kind() == reflect.String && typFn.NumOut() == 1 && typFn.Out(0).Kind() == reflect.Bool {\n\t\t\tfnV := reflect.ValueOf(fn)\n\t\t\t// let's convert it to a ParamFuncBuilder which its combile route arguments are empty and not used at all.\n\t\t\t// the below return function runs on each route that this param type function is used in order to validate the function,\n\t\t\t// if that param type function is used wrongly it will be panic like the rest,\n\t\t\t// indeed the only check is the len of arguments not > 0, no types of values or conversions,\n\t\t\t// so we return it as soon as possible.\n\t\t\treturn func(args []string) reflect.Value {\n\t\t\t\tif n := len(args); n > 0 {\n\t\t\t\t\tpanic(fmt.Sprintf(\"%T does not allow any input arguments from route but got [len=%d,values=%s]\", fn, n, strings.Join(args, \", \")))\n\t\t\t\t}\n\t\t\t\treturn fnV\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tnumFields := typFn.NumIn()\n\n\tpanicIfErr := func(i int, err error) {\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"on field index: %d: %v\", i, err))\n\t\t}\n\t}\n\n\treturn func(args []string) reflect.Value {\n\t\tif len(args) != numFields {\n\t\t\t// no variadics support, for now.\n\t\t\tpanic(fmt.Sprintf(\"args(len=%d) should be the same len as numFields(%d) for: %s\", len(args), numFields, typFn))\n\t\t}\n\t\tvar argValues []reflect.Value\n\t\tfor i := 0; i < numFields; i++ {\n\t\t\tfield := typFn.In(i)\n\t\t\targ := args[i]\n\n\t\t\t// try to convert the string literal as we get it from the parser.\n\t\t\tvar (\n\t\t\t\tval interface{}\n\t\t\t)\n\n\t\t\t// try to get the value based on the expected type.\n\t\t\tswitch field.Kind() {\n\t\t\tcase reflect.Int:\n\t\t\t\tv, err := strconv.Atoi(arg)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Int8:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 8)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = int8(v)\n\t\t\tcase reflect.Int16:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 16)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = int16(v)\n\t\t\tcase reflect.Int32:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 32)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = int32(v)\n\t\t\tcase reflect.Int64:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 64)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Uint:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, strconv.IntSize)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint(v)\n\t\t\tcase reflect.Uint8:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 8)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint8(v)\n\t\t\tcase reflect.Uint16:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 16)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint16(v)\n\t\t\tcase reflect.Uint32:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 32)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint32(v)\n\t\t\tcase reflect.Uint64:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 64)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Float32:\n\t\t\t\tv, err := strconv.ParseFloat(arg, 32)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = float32(v)\n\t\t\tcase reflect.Float64:\n\t\t\t\tv, err := strconv.ParseFloat(arg, 64)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Bool:\n\t\t\t\tv, err := strconv.ParseBool(arg)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Slice:\n\t\t\t\tif len(arg) > 1 {\n\t\t\t\t\tif arg[0] == '[' && arg[len(arg)-1] == ']' {\n\t\t\t\t\t\t// it is a single argument but as slice.\n\t\t\t\t\t\tval = strings.Split(arg[1:len(arg)-1], \",\") // only string slices.\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tval = arg\n\t\t\t}\n\n\t\t\targValue := reflect.ValueOf(val)\n\t\t\tif expected, got := field.Kind(), argValue.Kind(); expected != got {\n\t\t\t\tpanic(fmt.Sprintf(\"func's input arguments should have the same type: [%d] expected %s but got %s\", i, expected, got))\n\t\t\t}\n\n\t\t\targValues = append(argValues, argValue)\n\t\t}\n\n\t\tevalFn := reflect.ValueOf(fn).Call(argValues)[0]\n\n\t\t// var evaluator EvaluatorFunc\n\t\t// // check for typed and not typed\n\t\t// if _v, ok := evalFn.(EvaluatorFunc); ok {\n\t\t// \tevaluator = _v\n\t\t// } else if _v, ok = evalFn.(func(string) bool); ok {\n\t\t// \tevaluator = _v\n\t\t// }\n\t\t// return func(paramValue interface{}) bool {\n\t\t// \treturn evaluator(paramValue)\n\t\t// }\n\t\treturn evalFn\n\t}\n}", "func (ec *executionContext) field_Mutation_addPlantToNursery_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.NewNurseryAddition\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\targ0, err = ec.unmarshalNNewNurseryAddition2githubᚗcomᚋwonesyᚋplantparenthoodᚋgraphᚋmodelᚐNewNurseryAddition(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func NewArgument(meta ScriptMetaData, node *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: node}, value: value}\n}", "func (c *Compiler) moduleArgStmts(ident string, value *ValueExpr) []dst.Stmt {\n\tstmts := []dst.Stmt{}\n\tvariable := util.KebabToCamel(ident)\n\tt := value.Type()\n\tswitch t {\n\tcase ArrayType:\n\t\tstmts = append(stmts, c.compileCollectionExpansion(expandArrayFunc, ident, variable, value)...)\n\tcase ObjectType:\n\t\tstmts = append(stmts, c.compileCollectionExpansion(expandObjectFunc, ident, variable, value)...)\n\tcase FunctionType:\n\t\tassignStmt := &dst.AssignStmt{\n\t\t\tTok: token.DEFINE,\n\t\t\tLhs: []dst.Expr{&dst.Ident{Name: variable}},\n\t\t\tRhs: []dst.Expr{value.ToGoAST()},\n\t\t}\n\t\tstmts = append(stmts, assignStmt)\n\t\terrField := fmt.Sprintf(\"%s.Error\", variable)\n\t\tifErrStmt := &dst.IfStmt{\n\t\t\tCond: &dst.BinaryExpr{\n\t\t\t\tOp: token.NEQ,\n\t\t\t\tX: &dst.Ident{Name: errField},\n\t\t\t\tY: &dst.Ident{Name: nilValue},\n\t\t\t},\n\t\t\tBody: &dst.BlockStmt{\n\t\t\t\tList: []dst.Stmt{\n\t\t\t\t\t&dst.ReturnStmt{\n\t\t\t\t\t\tResults: []dst.Expr{\n\t\t\t\t\t\t\t&dst.Ident{Name: modVar},\n\t\t\t\t\t\t\t&dst.Ident{Name: errField},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tstmts = append(stmts, ifErrStmt)\n\t\tvalueField := fmt.Sprintf(\"%s.Value\", variable)\n\t\tassignFieldStmt := &dst.AssignStmt{\n\t\t\tTok: token.ASSIGN,\n\t\t\tLhs: []dst.Expr{\n\t\t\t\t&dst.Ident{Name: fmt.Sprintf(\"mod.%s\", util.KebabToPascal(ident))},\n\t\t\t},\n\t\t\tRhs: []dst.Expr{\n\t\t\t\t&dst.Ident{Name: valueField},\n\t\t\t},\n\t\t}\n\t\tstmts = append(stmts, assignFieldStmt)\n\tdefault:\n\t\tassignFieldStmt := &dst.AssignStmt{\n\t\t\tTok: token.ASSIGN,\n\t\t\tLhs: []dst.Expr{\n\t\t\t\t&dst.Ident{Name: fmt.Sprintf(\"mod.%s\", util.KebabToPascal(ident))},\n\t\t\t},\n\t\t\tRhs: []dst.Expr{\n\t\t\t\tvalue.ToGoAST(),\n\t\t\t},\n\t\t}\n\t\tstmts = append(stmts, assignFieldStmt)\n\t}\n\treturn stmts\n}", "func GenAST(program []Statement) AST {\n\tvar ast AST\n\tfor _, stmt := range program {\n\t\tv, err := ParseVerb(stmt)\n\t\tif err != nil { //TODO\n\t\t\t//panic(ParserError{stmtIndex: stmtIndex, tok: stmt[0], message: fmt.Sprintf(\"First token in statement must be a word, was %s\", stmt[0].tokType.toString())})\n\t\t\tpanic(err)\n\t\t}\n\t\tast = append(ast, v)\n\t}\n\treturn ast\n}", "func printInferredArguments(out *output.Output) {\n\tif out == nil {\n\t\treturn\n\t}\n\n\tblock := out.Block(output.Line(output.EmojiLightbulb, output.StyleItalic, \"Inferred arguments\"))\n\tblock.Writef(\"repo: %s\", codeintelUploadFlags.repo)\n\tblock.Writef(\"commit: %s\", codeintelUploadFlags.commit)\n\tblock.Writef(\"root: %s\", codeintelUploadFlags.root)\n\tblock.Writef(\"file: %s\", codeintelUploadFlags.file)\n\tblock.Writef(\"indexer: %s\", codeintelUploadFlags.indexer)\n\tblock.Writef(\"indexerVersion: %s\", codeintelUploadFlags.indexerVersion)\n\tblock.Close()\n}", "func generateParams(generator *Generator, params []parser.Node) (cParams []string) {\n\t// Translate each parameter\n\tfor _, param := range params {\n\t\t// Append the translated parameter in C\n\t\tcParams = append(\n\t\t\tcParams,\n\t\t\tgenerateInstruction(generator, param),\n\t\t)\n\t}\n\n\treturn\n}", "func expandArgs(s *State, rawArgs [][]argFragment, regexpArgs []int) []string {\n\targs := make([]string, 0, len(rawArgs))\n\tfor i, frags := range rawArgs {\n\t\tisRegexp := false\n\t\tfor _, j := range regexpArgs {\n\t\t\tif i == j {\n\t\t\t\tisRegexp = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tvar b strings.Builder\n\t\tfor _, frag := range frags {\n\t\t\tif frag.quoted {\n\t\t\t\tb.WriteString(frag.s)\n\t\t\t} else {\n\t\t\t\tb.WriteString(s.ExpandEnv(frag.s, isRegexp))\n\t\t\t}\n\t\t}\n\t\targs = append(args, b.String())\n\t}\n\treturn args\n}", "func buildIPArgument(parameter string, environmentVariable string, imageType FDBImageType, sampleAddresses []fdbv1beta2.ProcessAddress) []monitorapi.Argument {\n\tvar leftIPWrap string\n\tvar rightIPWrap string\n\tif imageType == FDBImageTypeUnified {\n\t\tleftIPWrap = \"[\"\n\t\trightIPWrap = \"]\"\n\t} else {\n\t\tleftIPWrap = \"\"\n\t\trightIPWrap = \"\"\n\t}\n\targuments := []monitorapi.Argument{{Value: fmt.Sprintf(\"--%s=%s\", parameter, leftIPWrap)}}\n\n\tfor indexOfAddress, address := range sampleAddresses {\n\t\tif indexOfAddress != 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\",%s\", leftIPWrap)})\n\t\t}\n\n\t\targuments = append(arguments,\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.EnvironmentArgumentType, Source: environmentVariable},\n\t\t\tmonitorapi.Argument{Value: fmt.Sprintf(\"%s:\", rightIPWrap)},\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.ProcessNumberArgumentType, Offset: address.Port - 2, Multiplier: 2},\n\t\t)\n\n\t\tflags := address.SortedFlags()\n\n\t\tif len(flags) > 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\":%s\", strings.Join(flags, \":\"))})\n\t\t}\n\t}\n\treturn arguments\n}", "func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}", "func genVariants(arg interface{}) gopter.Gen {\n\targs := arg.([]interface{})\n\ts := args[0].(string)\n\tt := args[1].(string)\n\treturn gen.OneConstOf(s, strings.ToUpper(s), strings.Title(s),\n\t\tfmt.Sprintf(\"%s %s\", s, t),\n\t\tfmt.Sprintf(\"%s %s\", strings.ToUpper(s), t),\n\t\tfmt.Sprintf(\"%s %s\", strings.Title(s), t),\n\t)\n}", "func (ArgumentFalse) argumentNode() {}", "func (ec *executionContext) field_Mutation_createAccount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.NewAccount\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNNewAccount2githubᚗcomᚋannoyingᚑorangeᚋecpᚑapiᚋgraphᚋmodelᚐNewAccount(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (ec *executionContext) field_Mutation_createArticle_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 models.NewArticle\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNNewArticle2githubᚗcomᚋGlitchyGlitchᚋtypingerᚋmodelsᚐNewArticle(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (cfg *Config) genTokenListSpec(t xsd.Builtin) ([]spec, error) {\n\tcfg.debugf(\"generating Go source for token list %q\", xsd.XMLName(t).Local)\n\ts := spec{\n\t\tname: strings.ToLower(t.String()),\n\t\texpr: builtinExpr(t),\n\t\txsdType: t,\n\t}\n\tmarshal, err := gen.Func(\"MarshalText\").\n\t\tReceiver(\"x \"+s.name).\n\t\tReturns(\"[]byte\", \"error\").\n\t\tBody(`\n\t\t\treturn []byte(strings.Join(x, \" \")), nil\n\t\t`).Decl()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"MarshalText %s: %v\", s.name, err)\n\t}\n\n\tunmarshal, err := gen.Func(\"UnmarshalText\").\n\t\tReceiver(\"x \" + s.name).\n\t\tArgs(\"text []byte\").\n\t\tReturns(\"error\").\n\t\tBody(`\n\t\t\t*x = bytes.Fields(text)\n\t\t\treturn nil\n\t\t`).Decl()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"UnmarshalText %s: %v\", s.name, err)\n\t}\n\n\ts.methods = append(s.methods, marshal, unmarshal)\n\treturn []spec{s}, nil\n}", "func BuildArgs(s Servable, argsType reflect.Type, argsValue reflect.Value, req *http.Request, buildStructArg func(s Servable, typeName string, req *http.Request) (v reflect.Value, err error)) ([]reflect.Value, error) {\n\tfieldNum := argsType.NumField()\n\tparams := make([]reflect.Value, fieldNum)\n\tfor i := 0; i < fieldNum; i++ {\n\t\tfield := argsType.Field(i)\n\t\tfieldName := field.Name\n\t\tvalueType := argsValue.FieldByName(fieldName).Type()\n\t\tif field.Type.Kind() == reflect.Ptr && valueType.Elem().Kind() == reflect.Struct {\n\t\t\tconvertor := components(req).Convertor(valueType.Elem().Name())\n\t\t\tif convertor != nil {\n\t\t\t\tparams[i] = convertor(req)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstructName := valueType.Elem().Name()\n\t\t\tv, err := buildStructArg(s, structName, req)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"turbo: failed to BuildArgs, error:%s\", err))\n\t\t\t}\n\t\t\tparams[i] = v\n\t\t\tcontinue\n\t\t}\n\t\tv, _ := findValue(fieldName, req)\n\t\tvalue, err := reflectValue(field.Type, argsValue.FieldByName(fieldName), v)\n\t\tlogErrorIf(err)\n\t\tparams[i] = value\n\t}\n\treturn params, nil\n}", "func Agen(n *Node, res *Node)", "func GenerateValidArg(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"xyz\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"111\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}", "func (o BuildStrategySpecBuildStepsOutput) Args() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildSteps) []string { return v.Args }).(pulumi.StringArrayOutput)\n}", "func (ec *executionContext) field_Mutation_activateGame_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"testUUID\"]; ok {\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"testUUID\"] = arg0\n\treturn args, nil\n}", "func (c ResolverIndexConfigurationFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1}\n}", "func (t *Terraform) initArgs(p types.ProviderType, cfg map[string]interface{}, clusterDir string) []string {\n\targs := make([]string, 0)\n\n\tvarsFile := filepath.Join(clusterDir, tfVarsFile)\n\n\targs = append(args, fmt.Sprintf(\"-var-file=%s\", varsFile), clusterDir)\n\n\treturn args\n}", "func AddIndependentPropertyGeneratorsForJsonField(gens map[string]gopter.Gen) {\n\tgens[\"SourceField\"] = gen.PtrOf(gen.AlphaString())\n}", "func NewDynamicArgument(value Value) Argument {\n\treturn &argument{value: value}\n}", "func (ctx *argComplContext) generate(env *complEnv, ch chan<- rawCandidate) error {\n\treturn completeArg(ctx.words, env.evaler, env.argCompleter, ch)\n}", "func (n DependencyNode) Codegen(prog *Program) (value.Value, error) { return nil, nil }", "func ASTArgsFromStmt(stmt string) (*ASTArgs, error) {\n\tstmtNode, err := parser.New().ParseOneStmt(stmt, \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tloadDataStmt, ok := stmtNode.(*ast.LoadDataStmt)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"stmt %s is not load data stmt\", stmt)\n\t}\n\treturn &ASTArgs{\n\t\tFileLocRef: loadDataStmt.FileLocRef,\n\t\tColumnsAndUserVars: loadDataStmt.ColumnsAndUserVars,\n\t\tColumnAssignments: loadDataStmt.ColumnAssignments,\n\t\tOnDuplicate: loadDataStmt.OnDuplicate,\n\t\tFieldsInfo: loadDataStmt.FieldsInfo,\n\t\tLinesInfo: loadDataStmt.LinesInfo,\n\t}, nil\n}", "func (g *Generator) generate(typeInfo typeInfo) {\n\t// <key, value>\n\tvalues := make([]Value, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeInfo = typeInfo\n\t\tfile.values = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tvalues = append(values, file.values...)\n\t\t}\n\t}\n\n\tif len(values) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %+v\", typeInfo)\n\t}\n\tg.transformValueNames(values, transformMethod)\n\t// Generate code that will fail if the constants change value.\n\tfor _, im := range checkImportPackages {\n\t\tg.Printf(stringImport, im)\n\t}\n\n\tif useNew {\n\t\tfor _, im := range newImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useBinary {\n\t\tfor _, im := range binaryImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useJson {\n\t\tfor _, im := range jsonImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useText {\n\t\tfor _, im := range textImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useYaml {\n\t\tfor _, im := range yamlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useSql {\n\t\tfor _, im := range sqlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\n\tg.buildEnumRegenerateCheck(values)\n\n\truns := splitIntoRuns(values)\n\tthreshold := 10\n\n\tif useString {\n\t\t// The decision of which pattern to use depends on the number of\n\t\t// runs in the numbers. If there's only one, it's easy. For more than\n\t\t// one, there's a tradeoff between complexity and size of the data\n\t\t// and code vs. the simplicity of a map. A map takes more space,\n\t\t// but so does the code. The decision here (crossover at 10) is\n\t\t// arbitrary, but considers that for large numbers of runs the cost\n\t\t// of the linear scan in the switch might become important, and\n\t\t// rather than use yet another algorithm such as binary search,\n\t\t// we punt and use a map. In any case, the likelihood of a map\n\t\t// being necessary for any realistic example other than bitmasks\n\t\t// is very low. And bitmasks probably deserve their own analysis,\n\t\t// to be done some other day.\n\t\tswitch {\n\t\tcase len(runs) == 1:\n\t\t\tg.buildOneRun(runs, typeInfo)\n\t\tcase len(runs) <= threshold:\n\t\t\tg.buildMultipleRuns(runs, typeInfo)\n\t\tdefault:\n\t\t\tg.buildMap(runs, typeInfo)\n\t\t}\n\t}\n\n\tif useNew {\n\t\tg.Printf(newTemplate, typeInfo.Name)\n\t}\n\tif useBinary {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(binaryTemplate, typeInfo.Name)\n\t}\n\tif useJson {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(jsonTemplate, typeInfo.Name)\n\t}\n\tif useText {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(textTemplate, typeInfo.Name)\n\t}\n\tif useYaml {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(yamlTemplate, typeInfo.Name)\n\t}\n\tif useSql {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(sqpTemplate, typeInfo.Name)\n\t}\n\n\tif useContains {\n\t\tg.Printf(containsTemplate, typeInfo.Name)\n\t}\n}", "func printInferredArguments(out *output.Output) {\n\tif out == nil {\n\t\treturn\n\t}\n\n\tblock := out.Block(output.Line(output.EmojiLightbulb, output.StyleItalic, \"Inferred arguments\"))\n\tblock.Writef(\"repo: %s\", lsifUploadFlags.repo)\n\tblock.Writef(\"commit: %s\", lsifUploadFlags.commit)\n\tblock.Writef(\"root: %s\", lsifUploadFlags.root)\n\tblock.Writef(\"file: %s\", lsifUploadFlags.file)\n\tblock.Writef(\"indexer: %s\", lsifUploadFlags.indexer)\n\tblock.Close()\n}", "func parseArgument(p *parser) (*ast.Argument, error) {\n\tvar label string\n\tvar labelStartPos, labelEndPos ast.Position\n\n\texpr, err := parseExpression(p, lowestBindingPower)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.skipSpaceAndComments()\n\n\t// If a colon follows the expression, the expression was our label.\n\tif p.current.Is(lexer.TokenColon) {\n\t\tlabelEndPos = p.current.EndPos\n\n\t\tidentifier, ok := expr.(*ast.IdentifierExpression)\n\t\tif !ok {\n\t\t\treturn nil, p.syntaxError(\n\t\t\t\t\"expected identifier for label, got %s\",\n\t\t\t\texpr,\n\t\t\t)\n\t\t}\n\t\tlabel = identifier.Identifier.Identifier\n\t\tlabelStartPos = expr.StartPosition()\n\n\t\t// Skip the identifier\n\t\tp.nextSemanticToken()\n\n\t\texpr, err = parseExpression(p, lowestBindingPower)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(label) > 0 {\n\t\treturn ast.NewArgument(\n\t\t\tp.memoryGauge,\n\t\t\tlabel,\n\t\t\t&labelStartPos,\n\t\t\t&labelEndPos,\n\t\t\texpr,\n\t\t), nil\n\t}\n\treturn ast.NewUnlabeledArgument(p.memoryGauge, expr), nil\n}", "func (t *Type) ChanArgs() *Type", "func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {\n\tflags := GoRunFlags.WithPrefix(\"test\")\n\tbindings := map[string]interface{}{\n\t\t\"Go\": &goFlagsConfig,\n\t}\n\n\targs, err := GenerateFlagArgs(flags, bindings)\n\tif err != nil {\n\t\treturn args, err\n\t}\n\targs = append(args, \"--test.v\")\n\treturn args, nil\n}", "func GenGoCodeFromParams(parameters []StructParameter) (string, error) {\n\tvar buf bytes.Buffer\n\n\tfor _, parameter := range parameters {\n\t\tif parameter.Usage == \"\" {\n\t\t\tparameter.Usage = \"-\"\n\t\t}\n\t\tfmt.Fprintf(&buf, \"// %s %s\\n\", DefaultNamer.FieldName(parameter.Name), parameter.Usage)\n\t\tgenField(parameter, &buf)\n\t}\n\tsource, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tfmt.Println(\"Failed to format source:\", err)\n\t}\n\n\treturn string(source), nil\n}", "func (p *Planner) addVariableDefinitionsRecursively(value ast.Value, sourcePath []string, fieldName []byte) {\n\tswitch value.Kind {\n\tcase ast.ValueKindObject:\n\t\tprevArgTypeRef := p.argTypeRef\n\t\tp.argTypeRef = p.resolveNestedArgumentType(fieldName)\n\t\tfor _, objectFieldRef := range p.visitor.Operation.ObjectValues[value.Ref].Refs {\n\t\t\tp.addVariableDefinitionsRecursively(p.visitor.Operation.ObjectFields[objectFieldRef].Value, sourcePath, p.visitor.Operation.ObjectFieldNameBytes(objectFieldRef))\n\t\t}\n\t\tp.argTypeRef = prevArgTypeRef\n\t\treturn\n\tcase ast.ValueKindList:\n\t\tfor _, i := range p.visitor.Operation.ListValues[value.Ref].Refs {\n\t\t\tp.addVariableDefinitionsRecursively(p.visitor.Operation.Values[i], sourcePath, nil)\n\t\t}\n\t\treturn\n\tcase ast.ValueKindVariable:\n\t\t// continue after switch\n\tdefault:\n\t\treturn\n\t}\n\n\tvariableName := p.visitor.Operation.VariableValueNameBytes(value.Ref)\n\tvariableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref)\n\tvariableDefinition, exists := p.visitor.Operation.VariableDefinitionByNameAndOperation(p.visitor.Walker.Ancestors[0].Ref, variableName)\n\tif !exists {\n\t\treturn\n\t}\n\timportedVariableDefinition := p.visitor.Importer.ImportVariableDefinition(variableDefinition, p.visitor.Operation, p.upstreamOperation)\n\tp.upstreamOperation.AddImportedVariableDefinitionToOperationDefinition(p.nodes[0].Ref, importedVariableDefinition)\n\n\tfieldType := p.resolveNestedArgumentType(fieldName)\n\tcontextVariable := &resolve.ContextVariable{\n\t\tPath: append(sourcePath, variableNameStr),\n\t\tRenderAsGraphQLValue: true,\n\t}\n\tcontextVariable.SetJsonValueType(p.visitor.Definition, p.visitor.Definition, fieldType)\n\n\tcontextVariableName, variableExists := p.variables.AddVariable(contextVariable)\n\tif variableExists {\n\t\treturn\n\t}\n\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName))\n}", "func (node *Argument) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteArg(\":\", node.Name)\n\tif node.Type >= 0 {\n\t\t// For bind variables that are statically typed, emit their type as an adjacent comment.\n\t\t// This comment will be ignored by older versions of Vitess (and by MySQL) but will provide\n\t\t// type safety when using the query as a cache key.\n\t\tbuf.WriteString(\" /* \")\n\t\tbuf.WriteString(node.Type.String())\n\t\tbuf.WriteString(\" */\")\n\t}\n}", "func buildRuleToGenerateAnnotationFlags(ctx android.ModuleContext, desc string, classesJars android.Paths, stubFlagsCSV android.Path, outputPath android.WritablePath) {\n\tctx.Build(pctx, android.BuildParams{\n\t\tRule: hiddenAPIGenerateCSVRule,\n\t\tDescription: desc,\n\t\tInputs: classesJars,\n\t\tOutput: outputPath,\n\t\tImplicit: stubFlagsCSV,\n\t\tArgs: map[string]string{\n\t\t\t\"outFlag\": \"--write-flags-csv\",\n\t\t\t\"stubAPIFlags\": stubFlagsCSV.String(),\n\t\t},\n\t})\n}", "func GenLiftParams(ringQ *ring.Ring, t uint64) (deltaMont []uint64) {\n\n\tdelta := new(big.Int).Quo(ringQ.ModulusBigint, ring.NewUint(t))\n\n\tdeltaMont = make([]uint64, len(ringQ.Modulus))\n\n\ttmp := new(big.Int)\n\tbredParams := ringQ.BredParams\n\tfor i, Qi := range ringQ.Modulus {\n\t\tdeltaMont[i] = tmp.Mod(delta, ring.NewUint(Qi)).Uint64()\n\t\tdeltaMont[i] = ring.MForm(deltaMont[i], Qi, bredParams[i])\n\t}\n\n\treturn\n}", "func newArguments(arguments []string) *Arguments {\n\treturn &Arguments{\n\t\targs: arguments,\n\t\tcount: len(arguments),\n\t\tindex: 0,\n\t\trawMode: false,\n\t}\n}", "func (s *BasePlSqlParserListener) EnterArgument(ctx *ArgumentContext) {}", "func newFormulaArgMatrix(numMtx [][]float64) (arg [][]formulaArg) {\n\tfor r, row := range numMtx {\n\t\targ = append(arg, make([]formulaArg, len(row)))\n\t\tfor c, cell := range row {\n\t\t\targ[r][c] = newNumberFormulaArg(cell)\n\t\t}\n\t}\n\treturn\n}", "func fieldArgNamesStruct(obj any, path string, nest bool, allArgs map[string]reflect.Value) {\n\tif kit.IfaceIsNil(obj) {\n\t\treturn\n\t}\n\tov := reflect.ValueOf(obj)\n\tif ov.Kind() == reflect.Pointer && ov.IsNil() {\n\t\treturn\n\t}\n\tval := kit.NonPtrValue(ov)\n\ttyp := val.Type()\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tf := typ.Field(i)\n\t\tfv := val.Field(i)\n\t\tif kit.NonPtrType(f.Type).Kind() == reflect.Struct {\n\t\t\tnwPath := f.Name\n\t\t\tif path != \"\" {\n\t\t\t\tnwPath = path + \".\" + nwPath\n\t\t\t}\n\t\t\tnwNest := nest\n\t\t\tif !nwNest {\n\t\t\t\tneststr, ok := f.Tag.Lookup(\"nest\")\n\t\t\t\tif ok && (neststr == \"+\" || neststr == \"true\") {\n\t\t\t\t\tnwNest = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tfieldArgNamesStruct(kit.PtrValue(fv).Interface(), nwPath, nwNest, allArgs)\n\t\t\tcontinue\n\t\t}\n\t\tpval := kit.PtrValue(fv)\n\t\taddAllCases(f.Name, path, pval, allArgs)\n\t\tif f.Type.Kind() == reflect.Bool {\n\t\t\taddAllCases(\"No\"+f.Name, path, pval, allArgs)\n\t\t}\n\t\t// now process adding non-nested version of field\n\t\tif path == \"\" || nest {\n\t\t\tcontinue\n\t\t}\n\t\tneststr, ok := f.Tag.Lookup(\"nest\")\n\t\tif ok && (neststr == \"+\" || neststr == \"true\") {\n\t\t\tcontinue\n\t\t}\n\t\tif _, has := allArgs[f.Name]; has {\n\t\t\tmpi.Printf(\"econfig Field: %s.%s cannot be added as a non-nested %s arg because it has already been registered -- add 'nest:'+'' field tag to the one you want to keep only as a nested arg with path, to eliminate this message\\n\", path, f.Name, f.Name)\n\t\t\tcontinue\n\t\t}\n\t\taddAllCases(f.Name, \"\", pval, allArgs)\n\t\tif f.Type.Kind() == reflect.Bool {\n\t\t\taddAllCases(\"No\"+f.Name, \"\", pval, allArgs)\n\t\t}\n\t}\n}", "func (gen *jsGenerator) init(args *Arguments) error {\n\tif !args.GenClient && !args.GenModel && !args.GenServer {\n\t\treturn fmt.Errorf(\"nothing to do\")\n\t} else if len(args.Options) > 0 {\n\t\tfor k, v := range args.Options {\n\t\t\tswitch k {\n\t\t\tcase \"controller\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"controller cannot be empty\")\n\t\t\t\t}\n\t\t\t\tgen.baseControllerName = v\n\t\t\tcase \"output\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"output option cannot be empty. Valid options are 'ns-flat' and 'ns-nested'\")\n\t\t\t\t} else if v != \"ns-flat\" && v != \"ns-nested\" {\n\t\t\t\t\treturn fmt.Errorf(\"invalid output option: %s: valid options are 'ns-flat' and 'ns-nested'\", v)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"the %s option is not applicable to language js\", k)\n\t\t\t}\n\t\t}\n\t}\n\tgen.args = args\n\treturn gen.loadTempates(args.TemplateDir, \"js\", template.FuncMap{\n\t\t\"formatType\": func(t *idl.Type) string { return gen.formatType(t) },\n\t\t\"fullNameOf\": func(name string) string { return gen.fullNameOf(name) },\n\t\t\"formatValue\": func(p *idl.Pair) string { return gen.formatLiteral(p.Value, p.DataType) },\n\t\t\"filterAttrs\": func(attrs []*idl.Attribute) []*idl.Attribute { return gen.filterAttributes(attrs) },\n\t\t\"isVoid\": func(t *idl.Type) bool { return gen.isVoid(t) },\n\t\t\"isTrivialProperty\": func(t *idl.Type) bool { return gen.isTrivialProperty(t) },\n\t\t\"usings\": func() []string {\n\t\t\tpkg := gen.tplRootIdl.Namespaces[\"js\"]\n\t\t\timports := make([]string, 0)\n\t\t\tfor _, i := range gen.tplRootIdl.UniqueNamespaces(\"js\") {\n\t\t\t\tif i != pkg {\n\t\t\t\t\t//relPath := i[len(pkg)];\n\t\t\t\t\t//fmt.Println(relPath);\n\t\t\t\t\t//fmt.Println(\"pkg => \" + pkg);\n\t\t\t\t\t//fmt.Println(\"i => \" + i);\n\t\t\t\t\trelPath := \"\"\n\t\t\t\t\tfor x := 0; x < len(strings.Split(pkg, \".\")); x++ {\n\t\t\t\t\t\trelPath += \"../\"\n\t\t\t\t\t}\n\t\t\t\t\t//fmt.Println(\"relPath => \" + relPath);\n\t\t\t\t\timports = append(imports, relPath+strings.Replace(i, \".\", \"/\", -1))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn imports\n\t\t},\n\t\t\"isNotPascalCase\": func(name string) bool {\n\t\t\tif len(name) > 1 {\n\t\t\t\treturn strings.ToUpper(name[0:1]) != name[0:1]\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"baseController\": func() string {\n\t\t\tif gen.baseControllerName != \"\" {\n\t\t\t\treturn gen.baseControllerName\n\t\t\t} else {\n\t\t\t\treturn \"Concur.Babel.Mvc.BabelController\"\n\t\t\t}\n\t\t},\n\t\t\"cast\": func(t *idl.Type) string {\n\t\t\tif t.Name == \"float32\" {\n\t\t\t\treturn \"(float)\"\n\t\t\t} else {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t},\n\t\t\"constType\": func(s string) string {\n\t\t\tcs, ok := jsConstTypes[s]\n\t\t\tif ok {\n\t\t\t\treturn cs\n\t\t\t} else {\n\t\t\t\treturn \"string\"\n\t\t\t}\n\t\t},\n\t})\n}", "func ExecForm(node *parser.Node) (arguments string) {\n\ttmp := []string{}\n\n\tfor n := node.Next; n != nil; n = n.Next {\n\t\tvalue := n.Value\n\t\tif strings.HasPrefix(n.Value, `\"`) && strings.HasSuffix(n.Value, `\"`) {\n\t\t\tvalue = strings.TrimPrefix(value, `\"`)\n\t\t\tvalue = strings.TrimSuffix(value, `\"`)\n\t\t}\n\t\ttmp = append(tmp, `\"`+strings.ReplaceAll(value, `\"`, `\\\"`)+`\"`)\n\t}\n\n\targuments = `[ ` + strings.Join(tmp, `,`) + ` ]`\n\n\tif len(node.Flags) > 0 {\n\t\targuments = fmt.Sprintf(\"%s %s\", strings.Join(node.Flags, \" \"), arguments)\n\t}\n\n\treturn arguments + \"\\n\"\n}", "func (ec *executionContext) field_Mutation_createCar_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.CreateCarInput\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNCreateCarInput2githubᚗcomᚋuchᚑkudukᚋnaiveᚑgraphqlᚗgitᚋgraphᚋmodelᚐCreateCarInput(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func reflectArgs(fnType reflect.Type, args []Argument) []reflect.Value {\n\tin := make([]reflect.Value, len(args))\n\n\tfor k, arg := range args {\n\t\tif arg == nil {\n\t\t\t// Use the zero value of the function parameter type,\n\t\t\t// since \"reflect.Call\" doesn't accept \"nil\" parameters\n\t\t\tin[k] = reflect.New(fnType.In(k)).Elem()\n\t\t} else {\n\t\t\tin[k] = reflect.ValueOf(arg)\n\t\t}\n\t}\n\n\treturn in\n}", "func MapFieldsToTypExpr(args ...*ast.Field) []ast.Expr {\n\tr := []ast.Expr{}\n\tfor idx, f := range args {\n\t\tif len(f.Names) == 0 {\n\t\t\tf.Names = []*ast.Ident{ast.NewIdent(fmt.Sprintf(\"f%d\", idx))}\n\t\t}\n\n\t\tfor _ = range f.Names {\n\t\t\tr = append(r, f.Type)\n\t\t}\n\n\t}\n\treturn r\n}", "func ArgumentCustomType(name string, values ...Argument) Argument {\n\treturn Argument{name, argumentSlice(values)}\n}", "func generateCall(generator *Generator, node parser.Node) string {\n\tvar identifier string\n\n\t// Check if it is a built-in function or not\n\tif strings.Contains(node.Value, \"|\") {\n\t\t// Get the function identifier by spliting the value by the pipe\n\t\tidentifier = strings.Split(node.Value, \"|\")[1]\n\n\t\tcheckCall(generator, node)\n\n\t\t// Add import to the generator\n\t\taddCallImport(\n\t\t\tgenerator,\n\t\t\tnode.Value,\n\t\t)\n\t} else {\n\t\tidentifier = node.Value\n\t}\n\n\t// Translate the params\n\tparams := generateParams(generator, node.Params)\n\n\t// Link all the translations together\n\treturn fmt.Sprintf(\n\t\tcCall,\n\t\tidentifier,\n\t\tstrings.Join(params, \",\"),\n\t)\n}", "func (c ResolverCommitGraphFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1}\n}", "func (s *BaselimboListener) EnterFormal_arg_list(ctx *Formal_arg_listContext) {}", "func argsFn(args ...OBJ) OBJ {\n\tl := len(os.Args[1:])\n\tresult := make([]OBJ, l)\n\tfor i, txt := range os.Args[1:] {\n\t\tresult[i] = &object.String{Value: txt}\n\t}\n\treturn &object.Array{Elements: result}\n}", "func decodeArg(b *hcl.Block) (*Arg, errors.Error) {\n\targ := new(Arg)\n\targ.name = b.Labels[0]\n\tbc, d := b.Body.Content(schemaArg)\n\tif err := errors.EvalDiagnostics(d); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := arg.populateArgAttributes(bc.Attributes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn arg, nil\n}", "func generateStruct(a *AnnotationDoc, packageName string, imports []string, indent string) (string, []string) {\n\tvar allAnnotationsPackages []string\n\tpossiblePackagesForA := combinePackages(imports, []string{packageName})\n\tts, foundPackageOfA, foundImportsOfA := getAnnotationStruct(a.Name, possiblePackagesForA)\n\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, []string{foundPackageOfA})\n\tstr, _ := ts.Type.(*ast.StructType)\n\tvar b bytes.Buffer\n\tb.WriteString(indent)\n\tb.WriteString(foundPackageOfA)\n\tb.WriteString(\".\")\n\tb.WriteString(a.Name)\n\tb.WriteString(\"{\\n\")\n\tchildIndent := indent + \" \"\n\tfor _, f := range str.Fields.List {\n\t\tfieldName := getFieldName(f)\n\t\tdefValue := getDefaultValue(f)\n\t\tfieldKey := fieldName\n\t\t// consider special case when only default parameter is specified\n\t\tif len(str.Fields.List) == 1 && len(a.Content) == 1 {\n\t\t\tfor key := range a.Content {\n\t\t\t\tif key == DEFAULT_PARAM {\n\t\t\t\t\tfieldKey = DEFAULT_PARAM\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvalue, found := a.Content[fieldKey]\n\t\tif found {\n\t\t\tswitch t := value.(type) {\n\t\t\tcase string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getLiteral(f.Type, t, false))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tcase []string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getFieldConstructor(f.Type))\n\t\t\t\tb.WriteString(\"\\n\")\n\t\t\t\tfor _, elem := range t {\n\t\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\t\tb.WriteString(elem)\n\t\t\t\t\tb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"}\")\n\t\t\tcase []AnnotationDoc:\n\t\t\t\t// calculate array's elements\n\t\t\t\tvar bb bytes.Buffer\n\t\t\t\tfor _, sa := range t {\n\t\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&sa, foundPackageOfA, foundImportsOfA, childIndent+\" \")\n\t\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\t\tbb.WriteString(childCode)\n\t\t\t\t\tbb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\t// insert array initialzer of child annotation type\n\t\t\t\ts := writeArrayInitializer(&b, bb.String())\n\t\t\t\t// append array of child annotations\n\t\t\t\tb.WriteString(\"{\\n\")\n\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\tb.WriteString(s)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"},\\n\")\n\t\t\tcase AnnotationDoc:\n\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&t, foundPackageOfA, foundImportsOfA, childIndent)\n\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tif isOptional(f.Type) {\n\t\t\t\t\tb.WriteString(\"&\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(strings.TrimLeft(childCode, \" \"))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unexpected annotation value type\")\n\t\t\t}\n\t\t} else {\n\t\t\tb.WriteString(childIndent)\n\t\t\tb.WriteString(defValue)\n\t\t\tb.WriteString(\",\\n\")\n\t\t}\n\t}\n\tb.WriteString(indent)\n\tb.WriteString(\"}\")\n\treturn b.String(), allAnnotationsPackages\n}", "func (mi TestModuleInfo) GenerateBuildActions(blueprint.ModuleContext) {}", "func (me *TxsdArguments) Walk() (err error) {\n\tif fn := WalkHandlers.TxsdArguments; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.XsdGoPkgHasElems_ArgumentsequenceTxsdArgumentsArgumentsschema_Argument_TxsdArgumentsSequenceArgument_.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func Fields(fields ...string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tpa.SetParameter(\"fields\", fields)\n\t}\n}", "func getConfigArgs(action BuildAction, dir string, ctx Context, args []string) []string {\n\t// The next block of code verifies that the current directory is the root directory of the source\n\t// tree. It then finds the relative path of dir based on the root directory of the source tree\n\t// and verify that dir is inside of the source tree.\n\tcheckTopDir(ctx)\n\ttopDir, err := os.Getwd()\n\tif err != nil {\n\t\tctx.Fatalf(\"Error retrieving top directory: %v\", err)\n\t}\n\tdir, err = filepath.EvalSymlinks(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to evaluate symlink of %s: %v\", dir, err)\n\t}\n\tdir, err = filepath.Abs(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find absolute path %s: %v\", dir, err)\n\t}\n\trelDir, err := filepath.Rel(topDir, dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find relative path %s of %s: %v\", relDir, topDir, err)\n\t}\n\t// If there are \"..\" in the path, it's not in the source tree.\n\tif strings.Contains(relDir, \"..\") {\n\t\tctx.Fatalf(\"Directory %s is not under the source tree %s\", dir, topDir)\n\t}\n\n\tconfigArgs := args[:]\n\n\t// If the arguments contains GET-INSTALL-PATH, change the target name prefix from MODULES-IN- to\n\t// GET-INSTALL-PATH-IN- to extract the installation path instead of building the modules.\n\ttargetNamePrefix := \"MODULES-IN-\"\n\tif inList(\"GET-INSTALL-PATH\", configArgs) {\n\t\ttargetNamePrefix = \"GET-INSTALL-PATH-IN-\"\n\t\tconfigArgs = removeFromList(\"GET-INSTALL-PATH\", configArgs)\n\t}\n\n\tvar targets []string\n\n\tswitch action {\n\tcase BUILD_MODULES:\n\t\t// No additional processing is required when building a list of specific modules or all modules.\n\tcase BUILD_MODULES_IN_A_DIRECTORY:\n\t\t// If dir is the root source tree, all the modules are built of the source tree are built so\n\t\t// no need to find the build file.\n\t\tif topDir == dir {\n\t\t\tbreak\n\t\t}\n\n\t\tbuildFile := findBuildFile(ctx, relDir)\n\t\tif buildFile == \"\" {\n\t\t\tctx.Fatalf(\"Build file not found for %s directory\", relDir)\n\t\t}\n\t\ttargets = []string{convertToTarget(filepath.Dir(buildFile), targetNamePrefix)}\n\tcase BUILD_MODULES_IN_DIRECTORIES:\n\t\tnewConfigArgs, dirs := splitArgs(configArgs)\n\t\tconfigArgs = newConfigArgs\n\t\ttargets = getTargetsFromDirs(ctx, relDir, dirs, targetNamePrefix)\n\t}\n\n\t// Tidy only override all other specified targets.\n\ttidyOnly := os.Getenv(\"WITH_TIDY_ONLY\")\n\tif tidyOnly == \"true\" || tidyOnly == \"1\" {\n\t\tconfigArgs = append(configArgs, \"tidy_only\")\n\t} else {\n\t\tconfigArgs = append(configArgs, targets...)\n\t}\n\n\treturn configArgs\n}", "func ProcessArgs(cfg *Config) (u []string) {\n\tflag.Usage = func() {\n\t\tfmt.Println(\"Usage:\\nkdevpije user1,user2,alias3... [default|week|sprint]\")\n\t\tflag.PrintDefaults()\n\t}\n\tvar debugFlag = flag.Bool(\"debug\", false, \"Print logs to stderr\")\n\tvar reloadData = flag.Bool(\"reloadData\", false, \"Download list of employees again\")\n\tflag.Parse() // Scan the arguments list\n\n\tif !*debugFlag {\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\tlog.Println(\"Processing arguments\")\n\tcfg.ReloadData = *reloadData\n\temps := flag.Arg(0)\n\tif emps == \"\" {\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\tu = strings.Split(emps, \",\")\n\tu = employees.ExpandFiveTimes(u, cfg.Aliases)\n\n\ttimeframe := flag.Arg(1)\n\tif timeframe == \"\" {\n\t\ttimeframe = \"default\"\n\t}\n\ttf, ok := cfg.Intervals[timeframe]\n\tif !ok {\n\t\ttf = 1\n\t}\n\tcfg.TimeFrame = tf\n\tcfg.PDConfig.TimeFrame = cfg.TimeFrame\n\tlog.Println(\"Processed config:\", cfg)\n\treturn\n}", "func (ec *executionContext) field_Mutation_ChangeReceiver_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"txcode\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"txcode\"))\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"txcode\"] = arg0\n\tvar arg1 model.CustomerChanges\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ1, err = ec.unmarshalNCustomerChanges2githubᚗcomᚋbaadjisᚋtransferserviceᚋgraphᚋmodelᚐCustomerChanges(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg1\n\treturn args, nil\n}", "func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {\n\tfields := make([]Field, 0, 100)\n\timports := make([]Import, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t\timports = append(imports, file.imports...)\n\t\t}\n\t}\n\n\tgenFn(typeName, fields, imports)\n\n}" ]
[ "0.6473212", "0.5893512", "0.52816784", "0.5240693", "0.5222666", "0.51920587", "0.50385666", "0.5037734", "0.50329053", "0.5010324", "0.50005144", "0.49702114", "0.4968135", "0.47936794", "0.47646612", "0.47604808", "0.47278732", "0.47187448", "0.4692279", "0.4692153", "0.46814573", "0.46635288", "0.4658278", "0.46540728", "0.46322483", "0.46279842", "0.46115977", "0.45967904", "0.45956135", "0.4594253", "0.45753404", "0.452807", "0.45275617", "0.45247382", "0.45232978", "0.45211542", "0.45009685", "0.44962022", "0.4494873", "0.44909754", "0.44702196", "0.44440544", "0.44364893", "0.44225812", "0.43959165", "0.43953192", "0.43909836", "0.43868423", "0.43827596", "0.4379306", "0.43528584", "0.4345264", "0.43422312", "0.4338586", "0.43341056", "0.43313193", "0.43285048", "0.4312617", "0.43111342", "0.43001252", "0.4299968", "0.4292014", "0.42913413", "0.42822087", "0.42758998", "0.42689538", "0.42689186", "0.4262163", "0.42609718", "0.4259967", "0.4243299", "0.4239233", "0.42385328", "0.42328787", "0.42303622", "0.42078492", "0.4206437", "0.41984665", "0.41905227", "0.41859326", "0.41836825", "0.4179301", "0.41790164", "0.41750932", "0.4172565", "0.41660464", "0.41658062", "0.41639036", "0.41604862", "0.4149038", "0.4137513", "0.41307592", "0.41253623", "0.41241714", "0.41224602", "0.41215736", "0.41208646", "0.41187602", "0.41174406", "0.4117139" ]
0.7882007
0
genArgument generates argument config for given AST
func genArgument(arg *ast.InputValueDefinition) *jen.Statement { // // Generate config for argument // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // &ArgumentConfig{ // Type: graphql.NonNull(graphql.String), // DefaultValue: "SHORT", // TODO: ??? // Description: "style is stylish", // } // return jen.Op("&").Qual(defsPkg, "ArgumentConfig").Values(jen.Dict{ jen.Id("DefaultValue"): genValue(arg.DefaultValue), jen.Id("Description"): genDescription(arg), jen.Id("Type"): genInputTypeReference(arg.Type), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// FieldConfigArgument{\n\t// \"style\": &ArgumentConfig{ ... }\n\t// },\n\t//\n\treturn jen.Qual(defsPkg, \"FieldConfigArgument\").Values(\n\t\tjen.DictFunc(func(d jen.Dict) {\n\t\t\tfor _, arg := range args {\n\t\t\t\td[jen.Lit(arg.Name.Value)] = genArgument(arg)\n\t\t\t}\n\t\t}),\n\t)\n}", "func BindArg(obj interface{}, tags ...string) FieldConfigArgument {\n\tv := reflect.Indirect(reflect.ValueOf(obj))\n\tvar config = make(FieldConfigArgument)\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Type().Field(i)\n\n\t\tmytag := extractTag(field.Tag)\n\t\tif inArray(tags, mytag) {\n\t\t\tconfig[mytag] = &ArgumentConfig{\n\t\t\t\tType: getGraphType(field.Type),\n\t\t\t}\n\t\t}\n\t}\n\treturn config\n}", "func parseArgument(p *parser) (*ast.Argument, error) {\n\tvar label string\n\tvar labelStartPos, labelEndPos ast.Position\n\n\texpr, err := parseExpression(p, lowestBindingPower)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.skipSpaceAndComments()\n\n\t// If a colon follows the expression, the expression was our label.\n\tif p.current.Is(lexer.TokenColon) {\n\t\tlabelEndPos = p.current.EndPos\n\n\t\tidentifier, ok := expr.(*ast.IdentifierExpression)\n\t\tif !ok {\n\t\t\treturn nil, p.syntaxError(\n\t\t\t\t\"expected identifier for label, got %s\",\n\t\t\t\texpr,\n\t\t\t)\n\t\t}\n\t\tlabel = identifier.Identifier.Identifier\n\t\tlabelStartPos = expr.StartPosition()\n\n\t\t// Skip the identifier\n\t\tp.nextSemanticToken()\n\n\t\texpr, err = parseExpression(p, lowestBindingPower)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(label) > 0 {\n\t\treturn ast.NewArgument(\n\t\t\tp.memoryGauge,\n\t\t\tlabel,\n\t\t\t&labelStartPos,\n\t\t\t&labelEndPos,\n\t\t\texpr,\n\t\t), nil\n\t}\n\treturn ast.NewUnlabeledArgument(p.memoryGauge, expr), nil\n}", "func NewArgument(meta ScriptMetaData, node *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: node}, value: value}\n}", "func (ArgumentFalse) argumentNode() {}", "func (p *Parser) buildArg(argDef Value, argType reflect.Type, index int, args *[]reflect.Value) error {\n\tswitch argType.Name() {\n\tcase \"Setter\":\n\t\tfallthrough\n\tcase \"GetSetter\":\n\t\targ, err := p.pathParser(argDef.Path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Getter\":\n\t\targ, err := p.newGetter(argDef)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Enum\":\n\t\targ, err := p.enumParser(argDef.Enum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v must be an Enum\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*arg))\n\tcase \"string\":\n\t\tif argDef.String == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an string\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.String))\n\tcase \"float64\":\n\t\tif argDef.Float == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an float\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Float))\n\tcase \"int64\":\n\t\tif argDef.Int == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an int\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Int))\n\tcase \"bool\":\n\t\tif argDef.Bool == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be a bool\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(bool(*argDef.Bool)))\n\t}\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterArgument(ctx *ArgumentContext) {}", "func (app ApplicationArguments) Argument(name, description string, shorts ...rune) *kingpin.FlagClause {\n\treturn app.add(name, description, false, shorts...)\n}", "func argInit() args {\n\n\tvar a args\n\tflag.Float64Var(&a.x1, \"x1\", -2.0, \"left position of real axis\")\n\tflag.Float64Var(&a.x2, \"x2\", 1.0, \"right position of real axis\")\n\tflag.Float64Var(&a.y1, \"y1\", -1.5, \"down position of imaginary axis\")\n\tflag.Float64Var(&a.y2, \"y2\", 1.5, \"up position of imaginary axis\")\n\tflag.Float64Var(&a.threshold, \"th\", 4.0, \"squared threshold of the function\")\n\tflag.IntVar(&a.w, \"w\", 1000, \"width in pixels of the image\")\n\tflag.IntVar(&a.h, \"h\", 1000, \"height in pixels of the image\")\n\tflag.IntVar(&a.nIter, \"ni\", 100, \"maximum number of iterations for pixel\")\n\tflag.IntVar(&a.nRoutines, \"nr\", 4, \"number of go routines to be used\")\n\tflag.StringVar(&a.path, \"p\", \"./\", \"path to the generated png image\")\n\n\tflag.Parse()\n\treturn a\n}", "func GenAST(program []Statement) AST {\n\tvar ast AST\n\tfor _, stmt := range program {\n\t\tv, err := ParseVerb(stmt)\n\t\tif err != nil { //TODO\n\t\t\t//panic(ParserError{stmtIndex: stmtIndex, tok: stmt[0], message: fmt.Sprintf(\"First token in statement must be a word, was %s\", stmt[0].tokType.toString())})\n\t\t\tpanic(err)\n\t\t}\n\t\tast = append(ast, v)\n\t}\n\treturn ast\n}", "func GenerationArgsFor(category, pathToExecutable, fuzzerName string, isMaster bool) GenerationArgs {\n\tf, found := fuzzers[category]\n\tif !found {\n\t\tsklog.Errorf(\"Unknown fuzz category %q\", category)\n\t\treturn nil\n\t}\n\tmasterFlag := \"-M\"\n\tif !isMaster {\n\t\tmasterFlag = \"-S\"\n\t}\n\tseedPath := filepath.Join(config.Generator.FuzzSamples, category)\n\toutputPath := filepath.Join(config.Generator.AflOutputPath, category)\n\n\tcmd := append([]string{\"-i\", seedPath, \"-o\", outputPath, \"-m\", \"5000\", masterFlag, fuzzerName, \"--\", pathToExecutable}, f.ArgsAfterExecutable...)\n\n\treturn append(cmd, \"@@\")\n}", "func (ctx *argComplContext) generate(env *complEnv, ch chan<- rawCandidate) error {\n\treturn completeArg(ctx.words, env.evaler, env.argCompleter, ch)\n}", "func buildArg(mt *methodType, d json.RawMessage) (reflect.Value, error) {\n\tvar argv reflect.Value\n\targIsValue := false // if true, need to indirect before calling.\n\tif mt.ArgType.Kind() == reflect.Ptr {\n\t\targv = reflect.New(mt.ArgType.Elem())\n\t} else {\n\t\targv = reflect.New(mt.ArgType)\n\t\targIsValue = true\n\t}\n\terr := json.Unmarshal(d, argv.Interface())\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\tif argIsValue {\n\t\targv = argv.Elem()\n\t}\n\treturn argv, nil\n}", "func GenerateValidArg(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"xyz\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"111\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func NewDynamicArgument(value Value) Argument {\n\treturn &argument{value: value}\n}", "func Agen(n *Node, res *Node)", "func genArgs(optionMap map[string]string) []string {\n\toptions := []string{}\n\tfor k, v := range optionMap {\n\t\tif v != \"\" {\n\t\t\tk = fmt.Sprintf(\"%s=%s\", k, v)\n\t\t}\n\t\toptions = append(options, k)\n\t}\n\treturn options\n}", "func decodeArg(b *hcl.Block) (*Arg, errors.Error) {\n\targ := new(Arg)\n\targ.name = b.Labels[0]\n\tbc, d := b.Body.Content(schemaArg)\n\tif err := errors.EvalDiagnostics(d); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := arg.populateArgAttributes(bc.Attributes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn arg, nil\n}", "func (s *BasePlSqlParserListener) EnterFunction_argument(ctx *Function_argumentContext) {}", "func (node Argument) Format(buf *TrackedBuffer) {\n\tbuf.WriteArg(string(node))\n}", "func (s *BasemumpsListener) EnterArg(ctx *ArgContext) {}", "func (c *compileContext) makeArgumentResolver(typ schema.InputableType) (argumentResolver, error) {\n\tswitch t := typ.(type) {\n\tcase *schema.InputObjectType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.ListType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tlistCreator := t.Unwrap().(schema.InputableType).InputListCreator()\n\n\t\t\tif av, ok := v.(schema.LiteralArray); ok {\n\t\t\t\treturn listCreator.NewList(len(av), func(i int) (interface{}, error) {\n\t\t\t\t\treturn elementResolver(ctx, av[i])\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// if we get a non-list value we have to wrap into a single element\n\t\t\t// list.\n\t\t\t// See https://facebook.github.io/graphql/June2018/#sec-Type-System.List\n\t\t\tresultElement, err := elementResolver(ctx, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn listCreator.NewList(1, func(i int) (interface{}, error) {\n\t\t\t\treturn resultElement, nil\n\t\t\t})\n\t\t}, nil\n\n\tcase *schema.NotNilType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Required value was not supplied\")\n\t\t\t}\n\t\t\treturn elementResolver(ctx, v)\n\t\t}, nil\n\tcase *schema.ScalarType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.EnumType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn t.Decode(ctx, v)\n\t\t\t}\n\t\t\tval, ok := v.(schema.LiteralString)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Expected string, got %v\", v)\n\t\t\t}\n\t\t\treturn t.Decode(ctx, val)\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Invalid type for input argument: %v\", typ)\n\t}\n}", "func genConfig() ([]byte, error) {\n\t// Using genflags.getConfig() instead of config.New() because\n\t// it will include any defaults we have on the command line such\n\t// as default plugin selection. We didn't want to wire this into\n\t// the `config` package, but it will be a default value the CLI\n\t// users expect.\n\tc := genflags.resolveConfig()\n\tb, err := json.Marshal(c)\n\treturn b, errors.Wrap(err, \"unable to marshal configuration\")\n}", "func finishReadingArgument(ctx *parsingCtx) *ParseError {\n\tif ctx.scope == READING_WORD {\n\t\tctx.scope = READING_ARGUMENTS\n\t\tif statement := ctx.head.Last(); statement != nil {\n\t\t\tstatement.AddArgument(&WordArgument{ctx.word})\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif ctx.scope == READING_NUMBER {\n\t\tctx.scope = READING_ARGUMENTS\n\t\tif statement := ctx.head.Last(); statement != nil {\n\t\t\tnumber, _ := strconv.Atoi(ctx.number)\n\t\t\tstatement.AddArgument(&NumberArgument{number})\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}", "func tokenToFormulaArg(token efp.Token) formulaArg {\n\tswitch token.TSubType {\n\tcase efp.TokenSubTypeLogical:\n\t\treturn newBoolFormulaArg(strings.EqualFold(token.TValue, \"TRUE\"))\n\tcase efp.TokenSubTypeNumber:\n\t\tnum, _ := strconv.ParseFloat(token.TValue, 64)\n\t\treturn newNumberFormulaArg(num)\n\tdefault:\n\t\treturn newStringFormulaArg(token.TValue)\n\t}\n}", "func Argument(name string, argType ArgumentType) *RequiredArgumentBuilder {\n\treturn &RequiredArgumentBuilder{Name: name, Type: argType}\n}", "func castArg(prefix string, f field.Field, argIndex int) string {\n\tswitch f.DatatypeName {\n\tcase field.TypeString:\n\t\treturn fmt.Sprintf(\"%s%s := args[%d]\", prefix, f.Name.UpperCamel, argIndex)\n\tcase field.TypeUint, field.TypeInt, field.TypeBool:\n\t\treturn fmt.Sprintf(`%s%s, err := cast.To%sE(args[%d])\n if err != nil {\n return err\n }`,\n\t\t\tprefix, f.Name.UpperCamel, strings.Title(f.Datatype), argIndex)\n\tcase field.TypeCustom:\n\t\treturn fmt.Sprintf(`%[1]v%[2]v := new(types.%[3]v)\n\t\t\terr = json.Unmarshal([]byte(args[%[4]v]), %[1]v%[2]v)\n \t\tif err != nil {\n return err\n }`, prefix, f.Name.UpperCamel, f.Datatype, argIndex)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", f.DatatypeName))\n\t}\n}", "func (s *BaseSyslParserListener) EnterFunc_arg(ctx *Func_argContext) {}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\ttoken: token,\n\t\tscheme: scheme,\n\t}\n}", "func (opts *HMACDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func (opts *HMACDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func argument(n int) string {\n\tflag.Parse()\n\treturn flag.Arg(n)\n}", "func (n *Argument) Walk(v walker.Visitor) {\n\tif !v.EnterNode(n) {\n\t\treturn\n\t}\n\n\tif n.Expr != nil {\n\t\tn.Expr.Walk(v)\n\t}\n\n\tv.LeaveNode(n)\n}", "func mmcArgGenerator() string {\r\n\tmmcArgs:= make([] string,1000)\t\r\n\tfor i:=0;i<len(mmcArgs);i++{\r\n\t\tmmcArgs[i] = strconv.Itoa(i+1)\r\n\t}\r\n\treturn joinMmcArgs(mmcArgs)\r\n}", "func (p *Parser) parseProcessArg() (AstProcessArg, error) {\n if (p.Scanner.Token == scanner.String) {\n sval, err := strconv.Unquote(p.Scanner.TokenText)\n if err != nil {\n return nil, err\n }\n\n p.Scanner.Scan()\n return &AstLiteralProcessArg{StringDatum(sval)}, nil\n } else {\n return nil, p.Error(\"Unreognised process argument type\")\n }\n}", "func (in *Argument) DeepCopy() *Argument {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Argument)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func starlarkValueToArg(v starlark.Value) (Arg, error) {\n\tswitch x := v.(type) {\n\tcase Arg:\n\t\treturn x, nil\n\tcase starlark.String:\n\t\treturn String(x), nil\n\tdefault:\n\t\treturn nil, errors.Errorf(\n\t\t\t\"Cannot convert %s into a target argument\",\n\t\t\tv.Type(),\n\t\t)\n\t}\n}", "func (s *BaseMySqlParserListener) EnterFunctionArg(ctx *FunctionArgContext) {}", "func Marshal(data *parser.Result, document *string) (err error) {\n\n\targuments := \"\"\n\ttmp := []string{}\n\n\tfor _, node := range data.AST.Children {\n\n\t\tinstruction := strings.ToUpper(node.Value)\n\t\ttab := strings.Repeat(\" \", len(node.Value)+1)\n\n\t\tswitch instruction {\n\t\tcase \"FROM\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"LABEL\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"MAINTAINER\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"EXPOSE\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ADD\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ONBUILD\":\n\t\t\tfor _, n := range node.Next.Children {\n\t\t\t\targuments = strings.ToUpper(n.Value) + \" \" + DefaultForm(n)\n\t\t\t}\n\t\tcase \"STOPSIGNAL\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"HEALTHCHECK\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ARG\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"COPY\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ENV\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"RUN\":\n\t\t\targuments = ShellForm(node)\n\t\t\t//arguments = ExecForm(node)\n\t\tcase \"CMD\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"ENTRYPOINT\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"SHELL\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"VOLUME\":\n\t\t\t//arguments = ExecForm(node)\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"USER\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tcase \"WORKDIR\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Instruction %s not supported\", instruction)\n\t\t}\n\n\t\tif len(arguments) > 0 {\n\t\t\ttmp = append(tmp, fmt.Sprintf(\"%s %s\", instruction, arguments))\n\t\t} else {\n\t\t\ttmp = append(tmp, instruction)\n\t\t}\n\n\t}\n\n\t*document = strings.Join(tmp, \"\\n\")\n\n\treturn err\n}", "func ToArg(name, value string) string {\n\treturn name + \"=\" + value\n}", "func buildIPArgument(parameter string, environmentVariable string, imageType FDBImageType, sampleAddresses []fdbv1beta2.ProcessAddress) []monitorapi.Argument {\n\tvar leftIPWrap string\n\tvar rightIPWrap string\n\tif imageType == FDBImageTypeUnified {\n\t\tleftIPWrap = \"[\"\n\t\trightIPWrap = \"]\"\n\t} else {\n\t\tleftIPWrap = \"\"\n\t\trightIPWrap = \"\"\n\t}\n\targuments := []monitorapi.Argument{{Value: fmt.Sprintf(\"--%s=%s\", parameter, leftIPWrap)}}\n\n\tfor indexOfAddress, address := range sampleAddresses {\n\t\tif indexOfAddress != 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\",%s\", leftIPWrap)})\n\t\t}\n\n\t\targuments = append(arguments,\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.EnvironmentArgumentType, Source: environmentVariable},\n\t\t\tmonitorapi.Argument{Value: fmt.Sprintf(\"%s:\", rightIPWrap)},\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.ProcessNumberArgumentType, Offset: address.Port - 2, Multiplier: 2},\n\t\t)\n\n\t\tflags := address.SortedFlags()\n\n\t\tif len(flags) > 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\":%s\", strings.Join(flags, \":\"))})\n\t\t}\n\t}\n\treturn arguments\n}", "func (a *Arguments) PutArgument(expr Expression) {\n\ta.exprs = append(a.exprs, expr)\n}", "func (n DependencyNode) Codegen(prog *Program) (value.Value, error) { return nil, nil }", "func initArgs(){\n\t//master -config ./master.json\n\tflag.StringVar(&confFile, \"config\", \"./master.json\", \"specify master.json as config file\")\n\tflag.Parse()\n}", "func (s *BaselimboListener) EnterFormal_arg(ctx *Formal_argContext) {}", "func NewArgumentScanner(args []string, options ...ArgOption) Scanner {\n\t// Process the options\n\topts := &argOptions{\n\t\tjoiner: \" \",\n\t\topts: []FileOption{LineEndings(NoLineStyle)},\n\t}\n\tfor _, opt := range options {\n\t\topt.argApply(opts)\n\t}\n\n\t// Construct the joiner scanner\n\tloc := ArgLocation{\n\t\tB: ArgPos{I: 0, C: 1},\n\t\tE: ArgPos{I: 0, C: 1},\n\t}\n\tjoiner := NewMemoizingScanner(NewFileScanner(bytes.NewBufferString(opts.joiner), loc, opts.opts...))\n\n\t// Construct a list of scanners\n\tstreams := []Scanner{}\n\tfor i, arg := range args {\n\t\tif i != 0 {\n\t\t\tstreams = append(streams, joiner)\n\t\t}\n\n\t\tloc := ArgLocation{\n\t\t\tB: ArgPos{I: i + 1, C: 1},\n\t\t\tE: ArgPos{I: i + 1, C: 1},\n\t\t}\n\t\tstreams = append(streams, NewFileScanner(bytes.NewBufferString(arg), loc, opts.opts...))\n\t}\n\n\treturn NewChainingScanner(streams)\n}", "func (params PostParams) Generate(args []string, argConfigs []Arg) PostParams {\n\tvar md5hash string\n\tfor index, arg := range args {\n\t\tDebugf(\"Index and args %d %s %v\", index, arg, argConfigs)\n\n\t\tDebugf(\"PostParams Setting %s to %s\", strings.Title(argConfigs[index].Name), arg)\n\t\tif argConfigs[index].Type == \"object\" {\n\t\t\tDebugln(\"Using object parser\")\n\t\t\tvar jsonArg map[string]interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArg)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArg))\n\t\t} else if argConfigs[index].Type == \"array\" {\n\t\t\tDebugln(\"Using array parser\")\n\t\t\tvar jsonArray []interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArray)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArray))\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tDebugf(\"Using bool parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t} else {\n\t\t\tif argConfigs[index].Type == \"url\" {\n\t\t\t\tDebugf(\"Handling url %s\", arg)\n\t\t\t\ta, err := ComputeMd5(arg)\n\t\t\t\tmd5hash = a\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to generate MD5 from url %s. Make sure the file exists and permissions are correct. (%s)\", arg, err)\n\t\t\t\t\tExit(1)\n\t\t\t\t}\n\t\t\t\targ = ConvertFileToURL(arg)\n\t\t\t}\n\t\t\tDebugf(\"Using string parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t}\n\n\t\tDebugf(\"Finished %s\", arg)\n\t}\n\tif len(md5hash) > 0 {\n\t\tparams.Checksum = md5hash\n\t}\n\treturn params\n}", "func Int16Arg(register Register, name string, options ...ArgOptionApplyer) *int16 {\n\tp := new(int16)\n\t_ = Int16ArgVar(register, p, name, options...)\n\treturn p\n}", "func (c *Command) addArgument(arg interface{}) {\n\tc.Arguments = append(c.Arguments, arg)\n}", "func (p *Planner) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, argumentName string, sourcePath []string) {\n\tfieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamFieldRef, []byte(argumentName))\n\tif !ok {\n\t\treturn\n\t}\n\tvalue := p.visitor.Operation.ArgumentValue(fieldArgument)\n\tif value.Kind != ast.ValueKindVariable {\n\t\tp.applyInlineFieldArgument(upstreamFieldRef, downstreamFieldRef, argumentName, sourcePath)\n\t\treturn\n\t}\n\tvariableName := p.visitor.Operation.VariableValueNameBytes(value.Ref)\n\tvariableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref)\n\n\tcontextVariable := &resolve.ContextVariable{\n\t\tPath: []string{variableNameStr},\n\t\tRenderAsGraphQLValue: true,\n\t}\n\tcontextVariable.SetJsonValueType(p.visitor.Definition, p.visitor.Definition, p.argTypeRef)\n\n\tcontextVariableName, exists := p.variables.AddVariable(contextVariable)\n\tvariableValueRef, argRef := p.upstreamOperation.AddVariableValueArgument([]byte(argumentName), variableName) // add the argument to the field, but don't redefine it\n\tp.upstreamOperation.AddArgumentToField(upstreamFieldRef, argRef)\n\n\tif exists { // if the variable exists we don't have to put it onto the variables declaration again, skip\n\t\treturn\n\t}\n\n\tfor _, i := range p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].VariableDefinitions.Refs {\n\t\tref := p.visitor.Operation.VariableDefinitions[i].VariableValue.Ref\n\t\tif !p.visitor.Operation.VariableValueNameBytes(ref).Equals(variableName) {\n\t\t\tcontinue\n\t\t}\n\t\timportedType := p.visitor.Importer.ImportType(p.visitor.Operation.VariableDefinitions[i].Type, p.visitor.Operation, p.upstreamOperation)\n\t\tp.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValueRef, importedType)\n\t}\n\n\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName))\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\tauthToken: token,\n\t\tscheme: scheme,\n\t\tclusterNameOrID: \"\",\n\t\tuserProvidedToken: flags.Token,\n\t\tverbose: flags.Verbose,\n\t}\n}", "func (s *BaseConcertoListener) EnterFuncArg(ctx *FuncArgContext) {}", "func NewArgumentWithDots(meta ScriptMetaData, nodeBegin *node32, nodeEnd *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: nodeBegin, endNode: nodeEnd}, value: value}\n}", "func (s *BaseGraffleParserListener) EnterBuilt_func_input(ctx *Built_func_inputContext) {}", "func GenerateUniqueArg(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"strconv.Itoa(i)\"\n\tcase field.TypeUint:\n\t\treturn \"uint64(i)\"\n\tcase field.TypeInt:\n\t\treturn \"int32(i)\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func (e PackageExpr) Arg() rel.Expr {\n\treturn e.a\n}", "func (fi *funcInfo) emitVararg(line, a, n int) {\r\n\tfi.emitABC(line, OP_VARARG, a, n+1, 0)\r\n}", "func IntArg(register Register, name string, options ...ArgOptionApplyer) *int {\n\tp := new(int)\n\t_ = IntArgVar(register, p, name, options...)\n\treturn p\n}", "func (ec *executionContext) field_Mutation_addPlantToNursery_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.NewNurseryAddition\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\targ0, err = ec.unmarshalNNewNurseryAddition2githubᚗcomᚋwonesyᚋplantparenthoodᚋgraphᚋmodelᚐNewNurseryAddition(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (ec *executionContext) field_Mutation_createAgent_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 models.CreateAgentInput\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNCreateAgentInput2golangᚑmongoᚑgraphqlᚑ003ᚋinternalᚋmodelsᚐCreateAgentInput(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func printInferredArguments(out *output.Output) {\n\tif out == nil {\n\t\treturn\n\t}\n\n\tblock := out.Block(output.Line(output.EmojiLightbulb, output.StyleItalic, \"Inferred arguments\"))\n\tblock.Writef(\"repo: %s\", codeintelUploadFlags.repo)\n\tblock.Writef(\"commit: %s\", codeintelUploadFlags.commit)\n\tblock.Writef(\"root: %s\", codeintelUploadFlags.root)\n\tblock.Writef(\"file: %s\", codeintelUploadFlags.file)\n\tblock.Writef(\"indexer: %s\", codeintelUploadFlags.indexer)\n\tblock.Writef(\"indexerVersion: %s\", codeintelUploadFlags.indexerVersion)\n\tblock.Close()\n}", "func generateCall(generator *Generator, node parser.Node) string {\n\tvar identifier string\n\n\t// Check if it is a built-in function or not\n\tif strings.Contains(node.Value, \"|\") {\n\t\t// Get the function identifier by spliting the value by the pipe\n\t\tidentifier = strings.Split(node.Value, \"|\")[1]\n\n\t\tcheckCall(generator, node)\n\n\t\t// Add import to the generator\n\t\taddCallImport(\n\t\t\tgenerator,\n\t\t\tnode.Value,\n\t\t)\n\t} else {\n\t\tidentifier = node.Value\n\t}\n\n\t// Translate the params\n\tparams := generateParams(generator, node.Params)\n\n\t// Link all the translations together\n\treturn fmt.Sprintf(\n\t\tcCall,\n\t\tidentifier,\n\t\tstrings.Join(params, \",\"),\n\t)\n}", "func (opts *HMACTruncated256AESDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func (opts *HMACTruncated256AESDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func (n *CommandNode) Args() []Expr { return n.args }", "func ArgumentCustomType(name string, values ...Argument) Argument {\n\treturn Argument{name, argumentSlice(values)}\n}", "func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}", "func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error {\n\tif module.TagDefault == TAGS_AUTOMATIC {\n\t\t// See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented.\n\t\treturn errors.New(\"AUTOMATIC tagged modules are not supported\")\n\t}\n\tctx := moduleContext{\n\t\textensibilityImplied: module.ExtensibilityImplied,\n\t\ttagDefault: module.TagDefault,\n\t\tlookupContext: module.ModuleBody,\n\t\tparams: gen.Params,\n\t}\n\tmoduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference))\n\tif len(gen.Params.Package) > 0 {\n\t\tmoduleName = goast.NewIdent(gen.Params.Package)\n\t}\n\tast := &goast.File{\n\t\tName: moduleName,\n\t\tDecls: ctx.generateDeclarations(module),\n\t}\n\tif len(ctx.errors) != 0 {\n\t\tmsg := \"errors generating Go AST from module: \\n\"\n\t\tfor _, err := range ctx.errors {\n\t\t\tmsg += \" \" + err.Error() + \"\\n\"\n\t\t}\n\t\treturn errors.New(msg)\n\t}\n\timportDecls := make([]goast.Decl, 0)\n\tfor _, moduleName := range ctx.requiredModules {\n\t\tmodulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf(\"\\\"%v\\\"\", moduleName)}\n\t\tspecs := []goast.Spec{&goast.ImportSpec{Path: modulePath}}\n\t\timportDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs})\n\t}\n\tast.Decls = append(importDecls, ast.Decls...)\n\treturn goprint.Fprint(writer, gotoken.NewFileSet(), ast)\n}", "func (n *CommandNode) AddArg(a Expr) {\n\tn.args = append(n.args, a)\n}", "func (node *Argument) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteArg(\":\", node.Name)\n\tif node.Type >= 0 {\n\t\t// For bind variables that are statically typed, emit their type as an adjacent comment.\n\t\t// This comment will be ignored by older versions of Vitess (and by MySQL) but will provide\n\t\t// type safety when using the query as a cache key.\n\t\tbuf.WriteString(\" /* \")\n\t\tbuf.WriteString(node.Type.String())\n\t\tbuf.WriteString(\" */\")\n\t}\n}", "func Struct(pkgName, strctName, argName string) *CXArgument {\n\tpkg, err := PROGRAM.GetPackage(pkgName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrct, err := pkg.GetStruct(strctName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\targ := MakeArgument(argName, \"\", -1).AddType(TypeNames[TYPE_CUSTOM])\n\targ.DeclarationSpecifiers = append(arg.DeclarationSpecifiers, DECL_STRUCT)\n\targ.Size = strct.Size\n\targ.TotalSize = strct.Size\n\targ.CustomType = strct\n\n\treturn arg\n}", "func (s *BaseSyslParserListener) EnterTransform_arg(ctx *Transform_argContext) {}", "func (w *reqResWriter) writeArg1(arg Output) error {\n\treturn w.writeArg(arg, false, reqResWriterPreArg1, reqResWriterPreArg2)\n}", "func Literal(literal string) *LiteralArgumentBuilder {\n\treturn &LiteralArgumentBuilder{Literal: literal}\n}", "func (gen *jsGenerator) init(args *Arguments) error {\n\tif !args.GenClient && !args.GenModel && !args.GenServer {\n\t\treturn fmt.Errorf(\"nothing to do\")\n\t} else if len(args.Options) > 0 {\n\t\tfor k, v := range args.Options {\n\t\t\tswitch k {\n\t\t\tcase \"controller\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"controller cannot be empty\")\n\t\t\t\t}\n\t\t\t\tgen.baseControllerName = v\n\t\t\tcase \"output\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"output option cannot be empty. Valid options are 'ns-flat' and 'ns-nested'\")\n\t\t\t\t} else if v != \"ns-flat\" && v != \"ns-nested\" {\n\t\t\t\t\treturn fmt.Errorf(\"invalid output option: %s: valid options are 'ns-flat' and 'ns-nested'\", v)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"the %s option is not applicable to language js\", k)\n\t\t\t}\n\t\t}\n\t}\n\tgen.args = args\n\treturn gen.loadTempates(args.TemplateDir, \"js\", template.FuncMap{\n\t\t\"formatType\": func(t *idl.Type) string { return gen.formatType(t) },\n\t\t\"fullNameOf\": func(name string) string { return gen.fullNameOf(name) },\n\t\t\"formatValue\": func(p *idl.Pair) string { return gen.formatLiteral(p.Value, p.DataType) },\n\t\t\"filterAttrs\": func(attrs []*idl.Attribute) []*idl.Attribute { return gen.filterAttributes(attrs) },\n\t\t\"isVoid\": func(t *idl.Type) bool { return gen.isVoid(t) },\n\t\t\"isTrivialProperty\": func(t *idl.Type) bool { return gen.isTrivialProperty(t) },\n\t\t\"usings\": func() []string {\n\t\t\tpkg := gen.tplRootIdl.Namespaces[\"js\"]\n\t\t\timports := make([]string, 0)\n\t\t\tfor _, i := range gen.tplRootIdl.UniqueNamespaces(\"js\") {\n\t\t\t\tif i != pkg {\n\t\t\t\t\t//relPath := i[len(pkg)];\n\t\t\t\t\t//fmt.Println(relPath);\n\t\t\t\t\t//fmt.Println(\"pkg => \" + pkg);\n\t\t\t\t\t//fmt.Println(\"i => \" + i);\n\t\t\t\t\trelPath := \"\"\n\t\t\t\t\tfor x := 0; x < len(strings.Split(pkg, \".\")); x++ {\n\t\t\t\t\t\trelPath += \"../\"\n\t\t\t\t\t}\n\t\t\t\t\t//fmt.Println(\"relPath => \" + relPath);\n\t\t\t\t\timports = append(imports, relPath+strings.Replace(i, \".\", \"/\", -1))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn imports\n\t\t},\n\t\t\"isNotPascalCase\": func(name string) bool {\n\t\t\tif len(name) > 1 {\n\t\t\t\treturn strings.ToUpper(name[0:1]) != name[0:1]\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"baseController\": func() string {\n\t\t\tif gen.baseControllerName != \"\" {\n\t\t\t\treturn gen.baseControllerName\n\t\t\t} else {\n\t\t\t\treturn \"Concur.Babel.Mvc.BabelController\"\n\t\t\t}\n\t\t},\n\t\t\"cast\": func(t *idl.Type) string {\n\t\t\tif t.Name == \"float32\" {\n\t\t\t\treturn \"(float)\"\n\t\t\t} else {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t},\n\t\t\"constType\": func(s string) string {\n\t\t\tcs, ok := jsConstTypes[s]\n\t\t\tif ok {\n\t\t\t\treturn cs\n\t\t\t} else {\n\t\t\t\treturn \"string\"\n\t\t\t}\n\t\t},\n\t})\n}", "func marshalArg(arg any) any {\n\tif buf, err := json.Marshal(arg); err == nil {\n\t\targ = string(buf)\n\t}\n\treturn arg\n}", "func formulaArgToToken(arg formulaArg) efp.Token {\n\tswitch arg.Type {\n\tcase ArgNumber:\n\t\tif arg.Boolean {\n\t\t\treturn efp.Token{TValue: arg.Value(), TType: efp.TokenTypeOperand, TSubType: efp.TokenSubTypeLogical}\n\t\t}\n\t\treturn efp.Token{TValue: arg.Value(), TType: efp.TokenTypeOperand, TSubType: efp.TokenSubTypeNumber}\n\tdefault:\n\t\treturn efp.Token{TValue: arg.Value(), TType: efp.TokenTypeOperand, TSubType: efp.TokenSubTypeText}\n\t}\n}", "func ASTArgsFromStmt(stmt string) (*ASTArgs, error) {\n\tstmtNode, err := parser.New().ParseOneStmt(stmt, \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tloadDataStmt, ok := stmtNode.(*ast.LoadDataStmt)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"stmt %s is not load data stmt\", stmt)\n\t}\n\treturn &ASTArgs{\n\t\tFileLocRef: loadDataStmt.FileLocRef,\n\t\tColumnsAndUserVars: loadDataStmt.ColumnsAndUserVars,\n\t\tColumnAssignments: loadDataStmt.ColumnAssignments,\n\t\tOnDuplicate: loadDataStmt.OnDuplicate,\n\t\tFieldsInfo: loadDataStmt.FieldsInfo,\n\t\tLinesInfo: loadDataStmt.LinesInfo,\n\t}, nil\n}", "func (s *BaseSyslParserListener) EnterCall_arg(ctx *Call_argContext) {}", "func (s *BaseGShellListener) EnterNamedArgument(ctx *NamedArgumentContext) {}", "func (n *Attribute) Arg(i int) *Argument { return n.Args[i].(*Argument) }", "func (params GetParams) Generate(args []string, argConfigs []Arg) GetParams {\n\tfor index, arg := range args {\n\t\tif argConfigs[index].Type != \"object\" && argConfigs[index].Type != \"array\" {\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t}\n\t}\n\treturn params\n}", "func (s *BasePlSqlParserListener) ExitArgument(ctx *ArgumentContext) {}", "func BundleGeneratorConfig(config sheaf.BundleConfig) BundleGeneratorOption {\n\treturn func(generator BundleGenerator) BundleGenerator {\n\t\tgenerator.config = config\n\t\treturn generator\n\t}\n}", "func getConfigArgs(action BuildAction, dir string, ctx Context, args []string) []string {\n\t// The next block of code verifies that the current directory is the root directory of the source\n\t// tree. It then finds the relative path of dir based on the root directory of the source tree\n\t// and verify that dir is inside of the source tree.\n\tcheckTopDir(ctx)\n\ttopDir, err := os.Getwd()\n\tif err != nil {\n\t\tctx.Fatalf(\"Error retrieving top directory: %v\", err)\n\t}\n\tdir, err = filepath.EvalSymlinks(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to evaluate symlink of %s: %v\", dir, err)\n\t}\n\tdir, err = filepath.Abs(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find absolute path %s: %v\", dir, err)\n\t}\n\trelDir, err := filepath.Rel(topDir, dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find relative path %s of %s: %v\", relDir, topDir, err)\n\t}\n\t// If there are \"..\" in the path, it's not in the source tree.\n\tif strings.Contains(relDir, \"..\") {\n\t\tctx.Fatalf(\"Directory %s is not under the source tree %s\", dir, topDir)\n\t}\n\n\tconfigArgs := args[:]\n\n\t// If the arguments contains GET-INSTALL-PATH, change the target name prefix from MODULES-IN- to\n\t// GET-INSTALL-PATH-IN- to extract the installation path instead of building the modules.\n\ttargetNamePrefix := \"MODULES-IN-\"\n\tif inList(\"GET-INSTALL-PATH\", configArgs) {\n\t\ttargetNamePrefix = \"GET-INSTALL-PATH-IN-\"\n\t\tconfigArgs = removeFromList(\"GET-INSTALL-PATH\", configArgs)\n\t}\n\n\tvar targets []string\n\n\tswitch action {\n\tcase BUILD_MODULES:\n\t\t// No additional processing is required when building a list of specific modules or all modules.\n\tcase BUILD_MODULES_IN_A_DIRECTORY:\n\t\t// If dir is the root source tree, all the modules are built of the source tree are built so\n\t\t// no need to find the build file.\n\t\tif topDir == dir {\n\t\t\tbreak\n\t\t}\n\n\t\tbuildFile := findBuildFile(ctx, relDir)\n\t\tif buildFile == \"\" {\n\t\t\tctx.Fatalf(\"Build file not found for %s directory\", relDir)\n\t\t}\n\t\ttargets = []string{convertToTarget(filepath.Dir(buildFile), targetNamePrefix)}\n\tcase BUILD_MODULES_IN_DIRECTORIES:\n\t\tnewConfigArgs, dirs := splitArgs(configArgs)\n\t\tconfigArgs = newConfigArgs\n\t\ttargets = getTargetsFromDirs(ctx, relDir, dirs, targetNamePrefix)\n\t}\n\n\t// Tidy only override all other specified targets.\n\ttidyOnly := os.Getenv(\"WITH_TIDY_ONLY\")\n\tif tidyOnly == \"true\" || tidyOnly == \"1\" {\n\t\tconfigArgs = append(configArgs, \"tidy_only\")\n\t} else {\n\t\tconfigArgs = append(configArgs, targets...)\n\t}\n\n\treturn configArgs\n}", "func (a *arguments) Argument() string {\n\treturn a.argument\n}", "func (eeo EncodingErrorOption) argApply(o *argOptions) {\n\to.opts = append(o.opts, eeo)\n}", "func genYaml(name string) error {\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterFunction_argument_modeling(ctx *Function_argument_modelingContext) {\n}", "func genConfigCobra(cmd *cobra.Command, args []string) {\n\ts, err := genConfig()\n\tif err != nil {\n\t\terrlog.LogError(err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(string(s))\n}", "func ASTArgsFromPlan(plan *plannercore.LoadData) *ASTArgs {\n\treturn &ASTArgs{\n\t\tFileLocRef: plan.FileLocRef,\n\t\tColumnsAndUserVars: plan.ColumnsAndUserVars,\n\t\tColumnAssignments: plan.ColumnAssignments,\n\t\tOnDuplicate: plan.OnDuplicate,\n\t\tFieldsInfo: plan.FieldsInfo,\n\t\tLinesInfo: plan.LinesInfo,\n\t}\n}", "func genVariants(arg interface{}) gopter.Gen {\n\targs := arg.([]interface{})\n\ts := args[0].(string)\n\tt := args[1].(string)\n\treturn gen.OneConstOf(s, strings.ToUpper(s), strings.Title(s),\n\t\tfmt.Sprintf(\"%s %s\", s, t),\n\t\tfmt.Sprintf(\"%s %s\", strings.ToUpper(s), t),\n\t\tfmt.Sprintf(\"%s %s\", strings.Title(s), t),\n\t)\n}", "func (c *context) ArgBytes(name string) []byte {\n\treturn c.ParamBytes(name)\n}", "func (s *BaseGShellListener) EnterScriptArgument(ctx *ScriptArgumentContext) {}", "func genConfigXML(data map[string]interface{}, section string) string {\n\tif len(data) == 0 {\n\t\treturn \"\"\n\t}\n\n\tb := &bytes.Buffer{}\n\n\t// <yandex>\n\t//\t\t<SECTION>\n\tfprintf(b, \"<%s>\\n\", xmlTagYandex)\n\tfprintf(b, \"%4s<%s>\\n\", \" \", section)\n\n\txmlbuilder.GenerateXML(b, data, 4, 4)\n\t//\t\t<SECTION>\n\t// <yandex>\n\tfprintf(b, \"%4s</%s>\\n\", \" \", section)\n\tfprintf(b, \"</%s>\\n\", xmlTagYandex)\n\n\treturn b.String()\n}", "func (v *Argument) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.Name); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Type == nil {\n\t\treturn errors.New(\"field Type of Argument is required\")\n\t}\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 2, Type: wire.TStruct}); err != nil {\n\t\treturn err\n\t}\n\tif err := v.Type.Encode(sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Annotations != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 3, Type: wire.TMap}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := _Map_String_String_Encode(v.Annotations, sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (n *AnonClassExpr) Arg(i int) *Argument { return n.Args[i].(*Argument) }", "func (*Base) Arguments(p ASTPass, l *ast.Fodder, args *ast.Arguments, r *ast.Fodder, ctx Context) {\n\tp.Fodder(p, l, ctx)\n\tfor i := range args.Positional {\n\t\targ := &args.Positional[i]\n\t\tp.Visit(p, &arg.Expr, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tfor i := range args.Named {\n\t\targ := &args.Named[i]\n\t\tp.Fodder(p, &arg.NameFodder, ctx)\n\t\tp.Fodder(p, &arg.EqFodder, ctx)\n\t\tp.Visit(p, &arg.Arg, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tp.Fodder(p, r, ctx)\n}", "func (rt *operatorRuntime) genOp(op func(interface{}, interface{}) interface{},\n\tvs parser.Scope, is map[string]interface{}, tid uint64) (interface{}, error) {\n\n\tvar ret interface{}\n\n\terrorutil.AssertTrue(len(rt.node.Children) == 2,\n\t\tfmt.Sprint(\"Operation requires 2 operands\", rt.node))\n\n\tres1, err := rt.node.Children[0].Runtime.Eval(vs, is, tid)\n\tif err == nil {\n\t\tvar res2 interface{}\n\n\t\tif res2, err = rt.node.Children[1].Runtime.Eval(vs, is, tid); err == nil {\n\t\t\tret = op(res1, res2)\n\t\t}\n\t}\n\n\treturn ret, err\n}", "func (self *ArgumentParser) statePassThrough(parser *parserState) stateFunc {\n for ; parser.pos < len(parser.args) ; parser.pos++ {\n arg := parser.args[parser.pos]\n parser.emitWithArgument(tokArgument, parser.stickyArg, parser.stickyArg.String)\n parser.emitWithValue(tokValue, arg)\n }\n return nil\n}" ]
[ "0.73820037", "0.57308537", "0.55290264", "0.5522804", "0.5506649", "0.5157852", "0.5125404", "0.5091101", "0.504007", "0.5006817", "0.49592155", "0.49067444", "0.49022472", "0.48643097", "0.48459315", "0.484178", "0.48397017", "0.48209807", "0.4767527", "0.47625056", "0.47602305", "0.47338164", "0.46945795", "0.46309894", "0.4624366", "0.46112818", "0.45811763", "0.4580689", "0.4573709", "0.45722592", "0.45722592", "0.45616448", "0.4558582", "0.45512962", "0.45456877", "0.4532968", "0.4532445", "0.45313627", "0.45305777", "0.4515654", "0.4459298", "0.4456183", "0.44486606", "0.44464204", "0.44449866", "0.44370413", "0.4434004", "0.44288403", "0.4418655", "0.4417687", "0.4396892", "0.4395933", "0.43903193", "0.43853173", "0.4381437", "0.4362715", "0.43624857", "0.43614763", "0.43610916", "0.43598354", "0.4358731", "0.43564078", "0.43513617", "0.43513617", "0.43476504", "0.4337128", "0.43327057", "0.43223402", "0.4314685", "0.43111363", "0.43079457", "0.4304672", "0.43041334", "0.43000758", "0.42928866", "0.428864", "0.42808256", "0.42805028", "0.42754608", "0.42721108", "0.42719278", "0.42713574", "0.42532334", "0.42484128", "0.42405307", "0.42388877", "0.42294285", "0.4226754", "0.4225793", "0.42224246", "0.42222396", "0.4218464", "0.4218087", "0.4214192", "0.42058864", "0.4203382", "0.41990677", "0.41987687", "0.41892397", "0.4172177" ]
0.793481
0
AddReceipt adds receipt for user.
func (client Client) AddReceipt(userId string, text string) error { addReceiptUrl := client.backendUrl + "/internal/receipt" request := addReceiptRequest{ReceiptString: text, UserId: userId} reader, err := getReader(request) if err != nil { return err } response, err := http.Post(addReceiptUrl, "text/javascript", reader) if err != nil { return err } switch response.StatusCode { case http.StatusOK: return nil default: return errors.New(response.Status) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (puo *ProductUpdateOne) AddReceipt(r ...*Receipt) *ProductUpdateOne {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn puo.AddReceiptIDs(ids...)\n}", "func (pu *ProductUpdate) AddReceipt(r ...*Receipt) *ProductUpdate {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn pu.AddReceiptIDs(ids...)\n}", "func (service *Service) AddUser(accountId types.ID) error {\n\t// you can be delegate of a user after the user designate you as a delegate.\n\tif isDelegate, err := service.accounts.IsDelegateOf(service.addr, accountId); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to call Accounts.IsDelegateOf\")\n\t} else if !isDelegate {\n\t\treturn ErrDelegationNotAllowed\n\t}\n\tservice.accountIds = append(service.accountIds, accountId)\n\treturn nil\n}", "func (r *Receipt) AddItem(item *Item) {\n r.Items = append(r.Items, item)\n r.Taxes += float64(item.Quantity) * item.Taxes\n r.Total += float64(item.Quantity) * (item.Price + item.Taxes)\n}", "func (_UsersData *UsersDataTransactor) AddUser(opts *bind.TransactOpts, uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.contract.Transact(opts, \"addUser\", uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}", "func (_AnchorChain *AnchorChainTransactor) AddUser(opts *bind.TransactOpts, user common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.contract.Transact(opts, \"addUser\", user)\n}", "func (c *Client) AddUser(userID, phone, name, certNum string, userType string, certType string, autoSign bool) (*AddUserResponse, error) {\n\tcreateSign := \"0\"\n\tif autoSign {\n\t\tcreateSign = \"1\"\n\t}\n\tp := addUserParams{\n\t\tAppUserID: userID,\n\t\tCellNum: phone,\n\t\tUserType: userType,\n\t\tUserName: name,\n\t\tCertifyType: certType,\n\t\tCertifyNumber: certNum,\n\t\tCreateSignature: createSign,\n\t}\n\n\tparamMap, err := toMap(p, map[string]string{\n\t\tAppIDKey: c.config.AppID,\n\t\tPasswordKey: c.config.Password,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := httpRequest(c, p.URI(), paramMap, nil, func() interface{} {\n\t\treturn &AddUserResponse{}\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsp := ret.(*AddUserResponse)\n\n\tif err = checkErr(rsp.Code, rsp.SubCode, rsp.Message); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rsp, nil\n}", "func (puo *ProductUpdateOne) AddReceiptIDs(ids ...int) *ProductUpdateOne {\n\tpuo.mutation.AddReceiptIDs(ids...)\n\treturn puo\n}", "func (_UsersData *UsersDataTransactorSession) AddUser(uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.Contract.AddUser(&_UsersData.TransactOpts, uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}", "func NewReceipt() *Receipt {\n\treturn &Receipt{}\n}", "func (_UsersData *UsersDataSession) AddUser(uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.Contract.AddUser(&_UsersData.TransactOpts, uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}", "func (rm *ReceiptMaker) NewReceipt() types.MessageReceipt {\n\tseq := rm.seq\n\trm.seq++\n\treturn types.MessageReceipt{\n\t\tReturn: []byte(fmt.Sprintf(\"%d\", seq)),\n\t}\n}", "func (a *Client) AddStockReceipts(params *AddStockReceiptsParams, authInfo runtime.ClientAuthInfoWriter) (*AddStockReceiptsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddStockReceiptsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"addStockReceipts\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/accounts/{koronaAccountId}/stockReceipts\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &AddStockReceiptsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*AddStockReceiptsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for addStockReceipts: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (_Pausable *PausableTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _Pausable.contract.Transact(opts, \"addPauser\", account)\n}", "func NewReceipt(values map[string]string, contentID uuid.UUID, userID uuid.UUID) *Receipt {\n\tif values == nil {\n\t\tvalues = make(map[string]string, 0)\n\t}\n\treturn &Receipt{\n\t\tID: uuid.NewUUID(),\n\t\tValues: values,\n\t\tSendState: READY,\n\t\tCreated: time.Now(),\n\t\tContentID: contentID,\n\t\tUserID: userID,\n\t}\n}", "func (vu *VaultUsers) AddUser(email string, publicKeyString string, masterPassphrase []byte) error {\n\tif vu.users[email] != nil {\n\t\treturn errors.New(\"User already exists in vault:\" + email)\n\t}\n\n\tuser, err := NewVaultUser(vu.path, email, publicKeyString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := user.SetEncryptedMasterKey(masterPassphrase); err != nil {\n\t\treturn err\n\t}\n\n\tif err := user.Save(); err != nil {\n\t\treturn err\n\t}\n\tvu.users[email] = user\n\treturn nil\n}", "func (pu *ProductUpdate) AddReceiptIDs(ids ...int) *ProductUpdate {\n\tpu.mutation.AddReceiptIDs(ids...)\n\treturn pu\n}", "func (s *Service) AddUserRecord() http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// todo\n\t})\n}", "func (_ERC20Pausable *ERC20PausableTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _ERC20Pausable.contract.Transact(opts, \"addPauser\", account)\n}", "func (m *MemoryUserStorage) Add(user users.User) int {\n\tuser.ID = len(m.users) + 1\n\tuser.Cash = 1000.0\n\tm.users = append(m.users, user)\n\n\treturn user.ID\n}", "func (_AnchorChain *AnchorChainTransactorSession) AddUser(user common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.AddUser(&_AnchorChain.TransactOpts, user)\n}", "func (fs Fakes) AddUser(u types.User) int32 {\n\tid := fs.UserStore.lastUserID + 1\n\tfs.UserStore.lastUserID = id\n\tu.ID = id\n\tfs.UserStore.list = append(fs.UserStore.list, u)\n\treturn id\n}", "func NewReceipt() *Receipt {\n return &Receipt{}\n}", "func (session *AliceSession) OnReceipt(receiptFile, secretFile string) error {\n\tif err := utils.CheckRegularFileReadPerm(receiptFile); err != nil {\n\t\treturn err\n\t}\n\tif err := utils.CheckDirOfPathExistence(secretFile); err != nil {\n\t\treturn err\n\t}\n\n\thandle := C.handle_t(session.handle)\n\n\treceiptFileCStr := C.CString(receiptFile)\n\tdefer C.free(unsafe.Pointer(receiptFileCStr))\n\n\tsecretFileCStr := C.CString(secretFile)\n\tdefer C.free(unsafe.Pointer(secretFileCStr))\n\n\tret := bool(C.E_TableOtComplaintAliceOnReceipt(\n\t\thandle, receiptFileCStr, secretFileCStr))\n\tif !ret {\n\t\treturn fmt.Errorf(\n\t\t\t\"E_TableOtComplaintAliceOnReceipt(%v, %s, %s) failed\",\n\t\t\thandle, receiptFile, secretFile)\n\t}\n\n\treturn nil\n}", "func AddUserReview(db *sql.DB, revieweeEmail string, strengths []string, opportunities []string, cycle string) error {\n\tq := `\n INSERT INTO reviews\n (recipient_id,\n review_cycle_id,\n feedback,\n is_strength,\n is_growth_opportunity)\n VALUES ((SELECT id\n FROM users\n WHERE email =?\n LIMIT 1),\n (SELECT id\n FROM review_cycles\n WHERE name =? ),\n ?,\n ?,\n ?) ;\n `\n\t// could make some uber query, but it is just easier to iterate\n\tfor _, strength := range strengths {\n\t\tif _, err := db.Exec(q, revieweeEmail, cycle, strength, true, false); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to insert strengths in reviews\")\n\t\t}\n\t}\n\tfor _, opportunity := range opportunities {\n\t\tif _, err := db.Exec(q, revieweeEmail, cycle, opportunity, false, true); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to insert opportunity in reviews\")\n\t\t}\n\t}\n\treturn nil\n}", "func (r *PurchaseInvoicePurchaseInvoiceLinesCollectionRequest) Add(ctx context.Context, reqObj *PurchaseInvoiceLine) (resObj *PurchaseInvoiceLine, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (_AnchorChain *AnchorChainSession) AddUser(user common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.AddUser(&_AnchorChain.TransactOpts, user)\n}", "func (_ChpRegistry *ChpRegistryTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _ChpRegistry.contract.Transact(opts, \"addPauser\", account)\n}", "func AddNoteUsn(path, userId string, Usn int) {\n\tnote := model.NewNote(path)\n\tnote.UID = userId\n\tfiled := []string{\"Usn\"}\n\tnote.Usn = Usn\n\tbeego.Debug(\"inc Usn\")\n\tif err := note.Update(filed); err != nil {\n\t\tbeego.Error(err)\n\t}\n\treturn\n}", "func (s *Service) Add(userId, tan string) error {\n\thash, err := s.hasher.Hash(tan)\n\tif nil != err {\n\t\treturn err\n\t}\n\t_, err = s.repository.Create(userId, hash)\n\tif nil == err {\n\t\ts.subscriberRepo.AddSubscriber(userId)\n\t}\n\treturn err\n}", "func AddPayee(id bson.ObjectId, payeeID bson.ObjectId) User {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"reimburse-me\").C(\"user\")\n\tuserID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$addToSet\": bson.M{\n\t\t\"payees\": payeeID,\n\t}}\n\tdb.Update(userID, change)\n\tvar user User\n\tdb.Find(bson.M{\"_id\": id}).One(&user)\n\treturn user\n}", "func (r *CompanyCustomerPaymentJournalsCollectionRequest) Add(ctx context.Context, reqObj *CustomerPaymentJournal) (resObj *CustomerPaymentJournal, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (_PauserRole *PauserRoleTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _PauserRole.contract.Transact(opts, \"addPauser\", account)\n}", "func (r *CustomerPaymentJournalCustomerPaymentsCollectionRequest) Add(ctx context.Context, reqObj *CustomerPayment) (resObj *CustomerPayment, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (r *CustomerPaymentJournalCustomerPaymentsCollectionRequest) Add(ctx context.Context, reqObj *CustomerPayment) (resObj *CustomerPayment, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (a *Client) AddStockReceiptItems(params *AddStockReceiptItemsParams, authInfo runtime.ClientAuthInfoWriter) (*AddStockReceiptItemsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddStockReceiptItemsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"addStockReceiptItems\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/accounts/{koronaAccountId}/stockReceipts/{stockReceiptId}/items\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &AddStockReceiptItemsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*AddStockReceiptItemsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for addStockReceiptItems: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (t *Thereum) TxReceipt(hash common.Hash) (*types.Receipt, error) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\treceipt, _, _, _ := rawdb.ReadReceipt(t.database, hash, t.chainConfig)\n\treturn receipt, nil\n}", "func (_ElvToken *ElvTokenTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _ElvToken.contract.Transact(opts, \"addPauser\", account)\n}", "func AddUser(u User) (User, error) {\n\tif u.ID != 0 {\n\t\treturn User{}, errors.New(\"new user must not include an id or it must be set to zero\")\n\t}\n\tu.ID = nextID\n\tnextID++\n\tusers = append(users, &u)\n\treturn u, nil\n}", "func (m *RepairinvoiceMutation) AddUserid(i int) {\n\tif m.adduserid != nil {\n\t\t*m.adduserid += i\n\t} else {\n\t\tm.adduserid = &i\n\t}\n}", "func ProcessNewReceipt(ctx context.Context, xbiz *XBusiness, d1, d2 *time.Time, r *Receipt) (Journal, error) {\n\tvar j Journal\n\tj.BID = xbiz.P.BID\n\tj.Amount = RoundToCent(r.Amount)\n\tj.Dt = r.Dt\n\tj.Type = JNLTYPERCPT\n\tj.ID = r.RCPTID\n\t// j.RAID = r.RAID\n\tjid, err := InsertJournal(ctx, &j)\n\tif err != nil {\n\t\tUlog(\"Error inserting Journal entry: %v\\n\", err)\n\t\treturn j, err\n\t}\n\tif jid > 0 {\n\t\t// now add the Journal allocation records...\n\t\tfor i := 0; i < len(r.RA); i++ {\n\t\t\t// // Console(\"r.RA[%d] id = %d\\n\", i, r.RA[i].RCPAID)\n\t\t\t// rntagr, _ := GetRentalAgreement(r.RA[i].RAID) // what Rental Agreements did this payment affect and the amounts for each\n\t\t\tvar ja JournalAllocation\n\t\t\tja.JID = jid\n\t\t\tja.TCID = r.TCID\n\t\t\tja.Amount = RoundToCent(r.RA[i].Amount)\n\t\t\tja.BID = j.BID\n\t\t\tja.ASMID = r.RA[i].ASMID\n\t\t\tja.AcctRule = r.RA[i].AcctRule\n\t\t\tif ja.ASMID > 0 { // there may not be an assessment associated, it could be unallocated funds\n\t\t\t\t// TODO(Steve): should we ignore error?\n\t\t\t\ta, _ := GetAssessment(ctx, ja.ASMID) // but if there is an associated assessment, then mark the RID and RAID\n\t\t\t\tja.RID = a.RID\n\t\t\t\tja.RAID = r.RA[i].RAID\n\t\t\t}\n\t\t\tja.TCID = r.TCID\n\t\t\tif _, err = InsertJournalAllocationEntry(ctx, &ja); err != nil {\n\t\t\t\tLogAndPrintError(\"ProcessNewReceipt\", err)\n\t\t\t\treturn j, err\n\t\t\t}\n\t\t\tj.JA = append(j.JA, ja)\n\t\t}\n\t}\n\treturn j, nil\n}", "func (_DappboxManager *DappboxManagerTransactor) AddUsers(opts *bind.TransactOpts, dAppBoxOrigin common.Address, _address common.Address, _userName string, _defaultURL string, _shortenURL string) (*types.Transaction, error) {\n\treturn _DappboxManager.contract.Transact(opts, \"addUsers\", dAppBoxOrigin, _address, _userName, _defaultURL, _shortenURL)\n}", "func AddUser(APIstub shim.ChaincodeStubInterface, args []string, txnID string, userID string) sc.Response {\n\n\texistingClaimAsBytes, _ := APIstub.GetState(args[0])\n\n\tclaim := Claim{}\n\tjson.Unmarshal(existingClaimAsBytes, &claim)\n\n\tif utils.StringInSlice(userID, claim.UserIDs) {\n\t\treturn shim.Error(\"User already in Claim\")\n\t}\n\n\tclaim.UserIDs = append(claim.UserIDs, userID)\n\n\tclaimAsBytes, _ := json.Marshal(claim)\n\n\tAPIstub.PutState(args[0], claimAsBytes)\n\n\ttimestamp, _ := APIstub.GetTxTimestamp()\n\ttimestampAsInt := timestamp.GetSeconds()\n\tisotimestamp := time.Unix(timestampAsInt, 0).Format(time.RFC3339)\n\ttxnDetails := []string{txnID, \"CEA - Claim User Addition\", isotimestamp, \"\", claim.ID}\n\ttxn.Add(APIstub, txnDetails)\n\n\treturn shim.Success(claimAsBytes)\n\n}", "func (*RegDBService) AddUser(reg *Registration) error {\n\terr := rdb.Create(&reg).Error\n\treturn err\n}", "func (_DappboxManager *DappboxManagerSession) AddUsers(dAppBoxOrigin common.Address, _address common.Address, _userName string, _defaultURL string, _shortenURL string) (*types.Transaction, error) {\n\treturn _DappboxManager.Contract.AddUsers(&_DappboxManager.TransactOpts, dAppBoxOrigin, _address, _userName, _defaultURL, _shortenURL)\n}", "func (u *CryptohomeClient) AddRecoveryAuthFactor(ctx context.Context, authSessionID, label, mediatorPubKeyHex, userGaiaID, deviceUserID string) error {\n\t_, err := u.binary.addRecoveryAuthFactor(ctx, authSessionID, label, mediatorPubKeyHex, userGaiaID, deviceUserID)\n\treturn err\n}", "func (_OwnerProxyRegistry *OwnerProxyRegistryTransactor) AddDelegate(opts *bind.TransactOpts, from common.Address) (*types.Transaction, error) {\n\treturn _OwnerProxyRegistry.contract.Transact(opts, \"addDelegate\", from)\n}", "func (r *Runner) AddUser(user *discordgo.User) {\n\tr.DiscordSession.Users[user.ID] = user\n}", "func TestEthTxAdapter_addReceiptToResult(t *testing.T) {\n\tt.Parallel()\n\n\tj := models.JSON{}\n\tinput := *models.NewRunInput(models.NewID(), j, models.RunStatusUnstarted)\n\n\toutput := addReceiptToResult(nil, input, j)\n\tassert.True(t, output.HasError())\n\tassert.EqualError(t, output.Error(), \"missing receipt for transaction\")\n}", "func (service *UserService) AddUser(u models.User) (models.User, error) {\n\tservice.MaxUserID = service.MaxUserID + 1\n\tu.ID = service.MaxUserID\n\tservice.UserList[service.MaxUserID] = u\n\treturn u, nil\n}", "func (ms *moviestoreImpl) AddUser(name string, age Age) UserID {\n\tuserID := ms.nextUserID\n\tms.nextUserID++\n\tuser := User{name, age, userID}\n\tms.users[userID] = user\n\treturn userID\n}", "func (_DappboxManager *DappboxManagerTransactorSession) AddUsers(dAppBoxOrigin common.Address, _address common.Address, _userName string, _defaultURL string, _shortenURL string) (*types.Transaction, error) {\n\treturn _DappboxManager.Contract.AddUsers(&_DappboxManager.TransactOpts, dAppBoxOrigin, _address, _userName, _defaultURL, _shortenURL)\n}", "func (_Userable *UserableTransactor) AddAuditor(opts *bind.TransactOpts, _newAuditor common.Address) (*types.Transaction, error) {\n\treturn _Userable.contract.Transact(opts, \"addAuditor\", _newAuditor)\n}", "func (e *LifecycleEvent) SetReceiptHandle(receipt string) { e.receiptHandle = receipt }", "func (r *Redis) AddUser(id, key string) (err error) {\n\terr = r.client.HMSet(id, \"timestamp\", strconv.FormatInt(time.Now().UTC().Unix(), 10), \"key\", key, \"files\", \"\").Err()\n\treturn\n}", "func (c *UsageController) Add(recipeID int64, userID int64) error {\n\tc.Usage = append(c.Usage, models.Usage{\n\t\tID: c.getNewID(),\n\t\tRecipeID: recipeID,\n\t\tDate: time.Now(),\n\t\tUserID: userID,\n\t})\n\n\treturn nil\n}", "func (pg *PGUser) Add(in *user.User) (err error) {\n\tfmt.Printf(\"\\nPGUser in: %+v\\n\", in)\n\tif err := pg.DB.Create(in).Scan(&in); err != nil {\n\t\treturn oops.Err(err.Error)\n\t}\n\treturn nil\n}", "func (s *Server) AddItemLine(ctx context.Context, in *api.ItemLine) (*api.MsgResponse, error) {\n\tlog.Printf(\"insert itemLine with %v\", *in)\n\tb, err := json.Marshal(in)\n\tif err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"FAILED\",\n\t\t}, err\n\t}\n\tc := make(chan ConfirmationMessage)\n\tfn := func(uid string, err error) {\n\t\tif err != nil {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: \"ERROR\",\n\t\t\t\terr: err,\n\t\t\t}\n\t\t\tc <- resp\n\t\t} else {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: uid,\n\t\t\t\terr: nil,\n\t\t\t}\n\t\t\tc <- resp\n\t\t}\n\t}\n\ts.MsgPublisher.PublishEvent(kitemLineChannelID, string(b), fn)\n\n\tif ret := <-c; ret.err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"Error\",\n\t\t}, ret.err\n\t}\n\treturn &api.MsgResponse{\n\t\tResponseMsg: \"Created\",\n\t}, err\n}", "func (room *Room) AddUser(user *User) error {\n\troom.rwMutex.Lock()\n\tdefer room.rwMutex.Unlock()\n\tif room.deleted {\n\t\treturn ErrMissingRoom\n\t}\n\troom.users[user] = nil\n\treturn nil\n}", "func (card *Card) AddUser(user string) {\n log.Printf(\"Adding user %s to card %s.\", user, card.Id)\n GenPOSTForm(card.trello, \"/cards/\" + card.Id + \"/idMembers\", nil, url.Values{ \"value\": { card.trello.userIdbyName[user] } })\n}", "func AddUser(req *router.Request) error {\n\tparams := req.Params.(*AddUserParams)\n\n\tencryptedPassword, err := auth.CryptPassword(params.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuser := &auth.User{\n\t\tName: params.Name,\n\t\tEmail: params.Email,\n\t\tPassword: encryptedPassword,\n\t}\n\n\tif err := user.Save(); err != nil {\n\t\treturn err\n\t}\n\n\treq.Created(payloads.NewFullUser(user))\n\treturn nil\n}", "func (list *UserNotifications) Add(notificationID string) error {\n\tif list.Contains(notificationID) {\n\t\treturn errors.New(\"Notification \" + notificationID + \" has already been added\")\n\t}\n\n\tlist.Items = append(list.Items, notificationID)\n\treturn nil\n}", "func (r *CompanyAgedAccountsReceivableCollectionRequest) Add(ctx context.Context, reqObj *AgedAccountsReceivable) (resObj *AgedAccountsReceivable, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (c Application) AddUser() revel.Result {\n\tif user := c.connected(); user != nil {\n\t\tc.ViewArgs[\"user\"] = user\n\t}\n\treturn nil\n}", "func PlusKudo(self *slack.UserDetails, ev *slack.ReactionAddedEvent, rtm *slack.RTM, db *SQLite) {\n\tlog.Debug(ev)\n\tdb.PlusKudo(ev.ItemUser)\n}", "func AddCartItem(service Service, userService users.Service) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\t\tlogger := loglib.GetLogger(ctx)\n\t\tusername, err := auth.GetLoggedInUsername(r)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusForbidden, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tuser, err := userService.RetrieveUserByUsername(ctx, username)\n\t\tif err != nil || user == nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusUnauthorized, errorcode.UserNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\n\t\tlogger.Infof(\"user is %v\", user.Username)\n\t\t// unmarshal request\n\t\treq := addCartItemRequest{}\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); (err != nil || req == addCartItemRequest{}) {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// validate request\n\t\tif err := req.Validate(); err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tcart, err := service.AddItemCart(ctx, user.ID, req.ProductID, req.Quantity)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusInternalServerError, \"internal_error\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\thttpresponse.RespondJSON(w, http.StatusOK, cart, nil)\n\t}\n}", "func (_ChpRegistry *ChpRegistryTransactorSession) AddPauser(account common.Address) (*types.Transaction, error) {\n\treturn _ChpRegistry.Contract.AddPauser(&_ChpRegistry.TransactOpts, account)\n}", "func (db *InMemoryDB) AddTransaction(userID string, transaction model.Transaction) {\n\tif transactions, ok := db.UserTransactions[userID]; ok {\n\t\tdb.UserTransactions[userID] = append(transactions, transaction)\n\t} else {\n\t\tdb.UserTransactions[userID] = []model.Transaction{transaction}\n\t}\n}", "func (t *tx) AddUser(user *model.User) error {\n\t// FIXME: handle sql constraint errors\n\terr := t.Create(user).Error\n\n\treturn errors.Wrap(err, \"create user failed\")\n}", "func (ec *ExpertiseCreate) AddExpertiseUser(u ...*User) *ExpertiseCreate {\n\tids := make([]int, len(u))\n\tfor i := range u {\n\t\tids[i] = u[i].ID\n\t}\n\treturn ec.AddExpertiseUserIDs(ids...)\n}", "func ReadReceipt(row *sql.Row, a *Receipt) error {\n\terr := row.Scan(&a.RCPTID, &a.PRCPTID, &a.BID, &a.TCID, &a.PMTID, &a.DEPID, &a.DID, &a.RAID, &a.Dt, &a.DocNo, &a.Amount, &a.AcctRuleReceive, &a.ARID, &a.AcctRuleApply, &a.FLAGS, &a.Comment,\n\t\t&a.OtherPayorName, &a.CreateTS, &a.CreateBy, &a.LastModTime, &a.LastModBy)\n\tSkipSQLNoRowsError(&err)\n\treturn err\n}", "func (q *Quickbooks) CreateSalesReceipt(invoice SalesReceipt) (*SalesReceiptObject, error) {\n\tendpoint := fmt.Sprintf(\"/company/%s/salesreceipt\", q.RealmID)\n\n\tres, err := q.makePostRequest(endpoint, invoice)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tnewSalesReceipt := SalesReceiptObject{}\n\terr = json.NewDecoder(res.Body).Decode(&newSalesReceipt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &newSalesReceipt, nil\n}", "func AddUser(w http.ResponseWriter, r *http.Request) {\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tvar user datastructures.UserLogin\n\terr = json.Unmarshal(reqBody, &user)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\terr = database.AddUser(user)\n\tif err != nil {\n\t\tif err.Error() == \"User already exists\" {\n\t\t\tw.WriteHeader(http.StatusConflict)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n}", "func (u *UserProfile) AddAddress(a Address) {\n\t// TODO: consider sending a request to dominos to update the user with this address.\n\tu.Addresses = append(u.Addresses, UserAddressFromAddress(a))\n}", "func NewReceipt(blockRoot []byte, failed bool, cumulativeGasUsed *big.Int) *Receipt {\n\tr := &Receipt{PostState: bgmcommon.CopyBytes(blockRoot), CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}\n\tif failed {\n\t\tr.Status = ReceiptStatusFailed\n\t} else {\n\t\tr.Status = ReceiptStatusSuccessful\n\t}\n\treturn r\n}", "func (db userDatabase) AddPendingUser(name string, email string, template string, password string, summary string) error {\n\tcon, err := db.mysql.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer con.Close()\n\n\thasher := md5.New()\n\thasher.Write([]byte(password))\n\tcreds := hex.EncodeToString(hasher.Sum(nil))\n\n\t_, err = con.Exec(\"INSERT INTO users (name, email, gender, password, summary) VALUES(?, ?, ?, ?, ?)\",\n\t\tname, email, template, creds, summary)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewReceipt(barCodes []string) *Receipt {\n\tlineItems := map[string]*LineItem{}\n\tfor _, barCode := range barCodes {\n\t\tquantityCode := regexp.MustCompile(\"-[0-9]+$\").FindString(barCode)\n\t\tproductBarCode := barCode\n\t\tquantity := 1\n\t\tif quantityCode != \"\" {\n\t\t\tproductBarCode = strings.TrimSuffix(productBarCode, quantityCode)\n\t\t\tquantityCode = strings.TrimPrefix(quantityCode, \"-\")\n\t\t\tquantity, _ = strconv.Atoi(quantityCode)\n\t\t}\n\n\t\tif li, ok := lineItems[productBarCode]; !ok {\n\t\t\tlineItems[productBarCode] = NewLineItem(productBarCode, quantity)\n\t\t} else {\n\t\t\tli.Quantity += quantity\n\t\t}\n\t}\n\n\treturn &Receipt{LineItems: lineItems}\n}", "func (ec *Client) TransactionReceipt(ctx context.Context, txHash helper.Hash) (*types.Receipt, error) {\n\tvar r *types.Receipt\n\terr := ec.c.CallContext(ctx, &r, \"siot_getTransactionReceipt\", txHash)\n\tif err == nil && r != nil && len(r.PostState) == 0 {\n\t\treturn nil, fmt.Errorf(\"server returned receipt without post state\")\n\t}\n\treturn r, err\n}", "func (u *user) AddReputation(amount int) {\n\tu.reputation.Add(amount)\n}", "func (u UserController) AddUser(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar user models.User\n\tif err := json.NewDecoder(r.Body).Decode(&user); err != nil || user.IsEmpty() {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tid, err := u.userRepository.Create(user)\n\tif err != nil {\n\t\thttp.Error(w, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, fmt.Sprintf(\"/users/%v\", id), http.StatusSeeOther)\n}", "func (r *CompanyPurchaseInvoiceLinesCollectionRequest) Add(ctx context.Context, reqObj *PurchaseInvoiceLine) (resObj *PurchaseInvoiceLine, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (rt *RecoveryTracker) AddRecoveryRequest(partitionID int32, fromOffset int64, toOffset int64) error {\n\tlog.WithField(\"partition_id\", partitionID).WithField(\"from_offset\", fromOffset).WithField(\"to_offset\", toOffset).Warn(\"recoverytracker: requesting partition recovery\")\n\n\trt.requestLock.Lock()\n\tdefer rt.requestLock.Unlock()\n\n\trequests := rt.recoveryRequests[partitionID]\n\tif requests == nil {\n\t\trequests = &RecoveryRequests{}\n\t\trt.recoveryRequests[partitionID] = requests\n\t}\n\n\t// if this overlaps an existing request, merge them in-place\n\toverlapFound := false\n\tfor _, request := range requests.Requests {\n\t\t// test for overlaps\n\t\tif fromOffset <= request.ToOffset && request.FromOffset <= toOffset {\n\t\t\tlog.WithField(\"partition_id\", partitionID).WithField(\"from_offset\", request.FromOffset).\n\t\t\t\tWithField(\"to_offset\", request.ToOffset).Info(\"recoverytracker: merging with existing recovery request\")\n\n\t\t\trequest.FromOffset = min(fromOffset, request.FromOffset)\n\t\t\trequest.ToOffset = max(toOffset, request.ToOffset)\n\t\t\toverlapFound = true\n\t\t}\n\t}\n\n\t// otherwise create and add a new request\n\tif !overlapFound {\n\t\trequest := &RecoveryRequest{\n\t\t\tPartitionID: partitionID,\n\t\t\tFromOffset: fromOffset,\n\t\t\tToOffset: toOffset,\n\t\t\tCreated: time.Now(),\n\t\t}\n\t\trequests.Requests = append(requests.Requests, request)\n\t}\n\treturn rt.sendRecoveryRequests(partitionID, requests)\n}", "func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {\n\tec.Send(generalCost)\n\treturn ec.c.TransactionReceipt(ctx, txHash)\n}", "func (r *CompanyJournalLinesCollectionRequest) Add(ctx context.Context, reqObj *JournalLine) (resObj *JournalLine, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (c Controller) AddUser(w http.ResponseWriter, r *http.Request) {\n\trequest := &AddUserRequest{}\n\tif err := render.Bind(r, request); err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser, err := c.userService.AddUser(r.Context(), user.AddUserRequest(*request))\n\tif err != nil {\n\t\thttp.Error(w, \"could not add user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trender.JSON(w, r, user)\n}", "func (buo *BookingUpdateOne) AddUSERNUMBER(i int) *BookingUpdateOne {\n\tbuo.mutation.AddUSERNUMBER(i)\n\treturn buo\n}", "func (ticket *Ticket) AddLines(lines TicketLines) (int, error) {\n\n\tif ticket.IsAmendable() {\n\t\tticket.Lines = append(ticket.Lines, lines...)\n\t\treturn len(ticket.Lines), nil\n\t} else {\n\t\treturn -1, errors.New(\"Ticket with id: \" + strconv.FormatInt(ticket.Id, 10) + \" is not ammendable\")\n\t}\n}", "func (_ChpRegistry *ChpRegistrySession) AddPauser(account common.Address) (*types.Transaction, error) {\n\treturn _ChpRegistry.Contract.AddPauser(&_ChpRegistry.TransactOpts, account)\n}", "func (tb *timerBuilder) AddUserTimer(ti *persistence.TimerInfo, msBuilder mutableState) {\n\tif !tb.isLoadedUserTimers {\n\t\ttb.loadUserTimers(msBuilder)\n\t}\n\tseqNum := tb.localSeqNumGen.NextSeq()\n\ttimer := &timerDetails{\n\t\tTimerSequenceID: TimerSequenceID{VisibilityTimestamp: ti.ExpiryTime, TaskID: seqNum},\n\t\tTimerID: ti.TimerID,\n\t\tTaskCreated: ti.TaskID == TimerTaskStatusCreated}\n\ttb.insertTimer(timer)\n}", "func (_m *ReceiptStore) ProcessReceipt(msgBytes []byte) {\n\t_m.Called(msgBytes)\n}", "func (userManager *UserManager) AddUser(user *User) {\n\t//userManager.users = append(userManager.users, user)\n\n\tub, err := json.Marshal(*user)\n\n\tlog.Println(\"ADDUSER: \" + string(ub))\n\n\tif err == nil {\n\t\tuserManager.DBHelper.Put(user.ID, ub)\n\t} else {\n\t\tlog.Println(\"Error marshalling user \" + err.Error())\n\t}\n}", "func (puo *ProductUpdateOne) RemoveReceipt(r ...*Receipt) *ProductUpdateOne {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn puo.RemoveReceiptIDs(ids...)\n}", "func (bu *BookingUpdate) AddUSERNUMBER(i int) *BookingUpdate {\n\tbu.mutation.AddUSERNUMBER(i)\n\treturn bu\n}", "func AddUser(u User) {\n\tuserData.Insert(u)\n}", "func (client ManagementClient) PostUserRequestaudittrailSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n }", "func (c CvpRestAPI) AddUser(user *SingleUser) error {\n\tif user == nil {\n\t\treturn errors.New(\"AddUser: can not add nil user\")\n\t}\n\tresp, err := c.client.Post(\"/user/addUser.do\", nil, user)\n\tif err != nil {\n\t\treturn errors.Errorf(\"AddUser: %s\", err)\n\t}\n\tvar addedUser *SingleUser\n\tif err = json.Unmarshal(resp, &addedUser); err != nil {\n\t\treturn errors.Errorf(\"AddUser: JSON unmarshal error: \\n%v\", err)\n\t}\n\tif err = addedUser.Error(); err != nil {\n\t\tvar retErr error\n\t\tif addedUser.ErrorCode == USER_ALREADY_EXISTS ||\n\t\t\taddedUser.ErrorCode == DATA_ALREADY_EXISTS {\n\t\t\tretErr = errors.Errorf(\"AddUser: user '%s' already exists\", addedUser.UserData.UserID)\n\t\t} else {\n\t\t\tretErr = errors.Errorf(\"AddUser: %s\", addedUser.String())\n\t\t}\n\t\treturn retErr\n\t}\n\treturn nil\n}", "func AddUser(w http.ResponseWriter, r *http.Request) {\n\tuser := &data.User{}\n\tif r.Body == nil {\n\t\thttp.Error(w, \"You must send data\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr := user.FromJSON(r.Body)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t//validation\n\terr = user.Validate()\n\tif err != nil {\n\t\thttp.Error(\n\t\t\tw,\n\t\t\tfmt.Sprintf(\"Error validating user: %s\", err),\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t\treturn\n\t}\n\tuser = user.Clean()\n\tif err = user.SetPassword(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = data.Create(user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tuser.ToJSON(w)\n}", "func (m *MgoUserManager) AddUserDetail(u *auth.User) (*auth.User, error) {\n\tu.Id = bson.NewObjectId()\n\terr := m.insertUser(u)\n\treturn u, err\n}", "func AddUserToEvent(euid humus.UID, userUid humus.UID, premium int) bool {\n\tvar us User\n\tus.SetUID(userUid)\n\tus.Premium = premium\n\tvar ev = Event{\n\t\tAttending: []*User{&us},\n\t}\n\tev.SetUID(euid)\n\t_, err := db.Mutate(context.Background(), humus.CreateMutation(&ev, humus.MutateSet))\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func AddUser(c echo.Context) error {\n\n\tvar body User\n\n\terr := c.Bind(&body)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusBadRequest, ResponseError{Status: http.StatusBadRequest, Message: err.Error()})\n\t}\n\n\tif body.ID == nil {\n\t\treturn c.JSON(http.StatusBadRequest, ResponseError{Status: http.StatusBadRequest, Message: \"id empty\"})\n\t}\n\n\tuser := User{\n\t\tID: body.ID,\n\t\tName: body.Name,\n\t\tUsername: body.Username,\n\t\tPassword: body.Password,\n\t}\n\n\tusers = append(users, user)\n\n\treturn c.JSON(http.StatusCreated, user)\n}" ]
[ "0.65677184", "0.6518654", "0.54841864", "0.54648083", "0.5241363", "0.5195453", "0.51696557", "0.51665854", "0.5155161", "0.514535", "0.5085661", "0.50395", "0.5033886", "0.50278527", "0.50236046", "0.49896088", "0.49473062", "0.49466264", "0.4934199", "0.4933858", "0.49277717", "0.49178162", "0.49110535", "0.48808488", "0.48524132", "0.4837199", "0.48346403", "0.4829178", "0.48195553", "0.48178372", "0.48104066", "0.48004246", "0.47804353", "0.4772494", "0.4772494", "0.47606868", "0.47415143", "0.47279352", "0.47157457", "0.47056267", "0.4658072", "0.46374932", "0.4637224", "0.46236265", "0.46196753", "0.46186438", "0.46179762", "0.4617325", "0.4608181", "0.46018898", "0.46013984", "0.45947266", "0.45932904", "0.45750543", "0.4563937", "0.45568192", "0.4556364", "0.4555953", "0.45509872", "0.4516811", "0.449668", "0.44963774", "0.44869447", "0.44837824", "0.4475507", "0.44704184", "0.4465477", "0.4463221", "0.44540873", "0.44451898", "0.44427302", "0.44425023", "0.4440662", "0.4438296", "0.4436944", "0.44321966", "0.44298542", "0.44273606", "0.44270787", "0.44234312", "0.44221914", "0.44212633", "0.4414147", "0.44128585", "0.440925", "0.439212", "0.43920794", "0.43888792", "0.43767023", "0.4376626", "0.43739158", "0.43658012", "0.43578494", "0.4357741", "0.43533626", "0.43532634", "0.43478537", "0.43447736", "0.43287632", "0.43223354" ]
0.7855252
0
Contain Returns true if slice contains string.
func Contain(list []string, str string) bool { for _, s := range list { if s == str { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (slice StringSlice) Contains(str string) bool {\n\tfor _, iStr := range slice {\n\t\tif iStr == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *StringSlice) Contains(s string) bool {\n\treturn t.Index(s) > -1\n}", "func Contains(slice []string, s string) bool {\n\tfor _, elem := range slice {\n\t\tif elem == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainsInSlice(s []string, str string) bool {\n\tfor _, val := range s {\n\t\tif val == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(slice []string, elem string) bool {\n\tfor _, v := range slice {\n\t\tif v == elem {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(slice []string, str string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainsString(slice []string, contains string) bool {\n\tfor _, value := range slice {\n\t\tif value == contains {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(slice []string, value string) bool {\n\treturn ArrayContains(slice, value)\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, s string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (ss *StringSet) Contain(s string) bool {\n\t_, exist := ss.set[s]\n\treturn exist\n}", "func sliceContainsString(s string, sl []string) bool {\n\tfor _, v := range sl {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsSliceContainsString(search string, strSlice ...string) bool {\n\tfor _, str := range strSlice {\n\t\tif strings.EqualFold(str, search) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s StringSlice) Contains(item string) bool {\n\tfor _, v := range s {\n\t\tif v == item {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(s []string, val string) bool {\n\tif s != nil && val != \"\" {\n\t\tfor _, v := range s {\n\t\t\tif val == v {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func StringSliceContains(list []string, s string) bool {\n\tfor _, v := range list {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(list []string, s string) bool {\n\tfor _, v := range list {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainSlice(slice []string, value string) bool {\n\tfor _, a := range slice {\n\t\tif a == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceContains(s []string, value string) bool {\n\tfor _, v := range s {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainsString(slice []string, value string) bool {\n\tfor _, v := range slice {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(slice []string, element string) bool {\n\tfor _, v := range slice {\n\t\tif v == element {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsInStringSlice(slice []string, search string) bool {\n\tfor _, v := range slice {\n\t\tif v == search {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(slice []string, element string) bool {\n\treturn posString(slice, element) != -1\n}", "func SliceContainsString(sl []string, st string) bool {\n\tfor _, s := range sl {\n\t\tif s == st {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(slice []string, val string) bool {\n\tfor _, item := range slice {\n\t\tif item == val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainsString(slice []string, value string) bool {\n\tfor _, v := range slice {\n\t\tif value == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func contains(slice []string, el string) bool {\n\tfor _, a := range slice {\n\t\tif a == el {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(s []string, e string) bool {\n\tfor _, a := range s {\n\t\tif a == e {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(s string, slice []string) bool {\n\tfor _, s2 := range slice {\n\t\tif s2 == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(slice []string, value string) bool {\n\tinterfaceSlice := make([]interface{}, len(slice))\n\tfor _, item := range slice {\n\t\tvar interfaceItem interface{} = item\n\t\tinterfaceSlice = append(interfaceSlice, interfaceItem)\n\t}\n\tvar interfaceValue interface{} = value\n\treturn InterfaceSliceContains(interfaceSlice, interfaceValue)\n}", "func stringSliceContains(ss []string, s string) bool {\n\tfor _, v := range ss {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func sliceContains(slice []string, needle string) bool {\n\tfor _, str := range slice {\n\t\tif str == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func SliceContains(slice []string, needle string) bool {\n\tfor _, s := range slice {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func ContainString(s []string, target string) bool {\n\tfor _, v := range s {\n\t\tif v == target {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func contains(slice []string, sa string) bool {\n\tfor _, sb := range slice {\n\t\tif sa == sb {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainsString(slice []string, needle string) bool {\n\tfor _, s := range slice {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func IsSliceContainsStr(sl []string, str string) bool {\n\tstr = strings.ToLower(str)\n\tfor _, s := range sl {\n\t\tif strings.ToLower(s) == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringSliceContains(strings []string, s string) bool {\n\treturn indexOfStringSlice(strings, s) != -1\n}", "func sliceContains(slice []string, item string) bool {\n\tfor _, sliceItem := range slice {\n\t\tif sliceItem == item {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(s []string, str string) bool {\n\tfor _, v := range s {\n\t\tif v == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func StringSliceContains(haystack []string, needle string) bool {\n\tfor _, str := range haystack {\n\t\tif str == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func str_is_in_slice(slice []string, str string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (arr StringArray) Contains(v string) bool {\n\treturn arr.IndexOf(v) > -1\n}", "func sliceContains(slice []string, values ...string) bool {\n\tfor _, s := range slice {\n\t\tfor _, v := range values {\n\t\t\tif strings.EqualFold(strings.TrimSpace(s), strings.TrimSpace(v)) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func sliceContains(haystack []string, needle string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func InSlice(v string, sl []string) bool {\n\tfor _, vv := range sl {\n\t\tif vv == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceContainsString(list []string, a string) bool {\n\tsort.Strings(list)\n\ti := sort.SearchStrings(list, a)\n\treturn (i < len(list) && list[i] == a)\n}", "func SliceContainsString(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainsString(slice []string, s string, modifier func(s string) string) bool {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn true\n\t\t}\n\t\tif modifier != nil && modifier(item) == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(slice []string, element string) bool {\n\tfor _, existingElement := range slice {\n\t\tif existingElement == element {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s Slice) Contains(v uuid.UUID) bool {\n\tfor _, i := range s {\n\t\tif i == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func sliceContains(haystack []string, needle string) bool {\n\tfor _, e := range haystack {\n\t\tif e == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(a []string, s string) bool {\n\tif len(a) == 0 {\n\t\treturn false\n\t}\n\treturn Index(a, s) >= 0\n}", "func containsString(slice []string, element string) bool {\n\treturn !(posString(slice, element) == -1)\n}", "func containsString(slice []string, element string) bool {\n\treturn !(posString(slice, element) == -1)\n}", "func containsString(slice []string, element string) bool {\n\treturn !(posString(slice, element) == -1)\n}", "func containsString(slice []string, element string) bool {\n\treturn !(posString(slice, element) == -1)\n}", "func containsString(slice []string, element string) bool {\n\treturn !(posString(slice, element) == -1)\n}", "func containsString(slice []string, element string) bool {\n\treturn !(posString(slice, element) == -1)\n}", "func InStringSlice(ss []string, str string) bool {\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(ss []string, s string) bool {\n\treturn Index(ss, s) != -1\n}", "func InStringSlice(s string, arr []string) bool {\n\tfor _, v := range arr {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func contains(s []string, str string) bool {\n\tfor _, v := range s {\n\t\tif v == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func substringContainedInSlice(str string, substrs []string) bool {\n\tfor _, s := range substrs {\n\t\tif strings.Contains(str, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(slice []string, item string) bool {\n\tset := make(map[string]struct{}, len(slice))\n\tfor _, s := range slice {\n\t\tset[s] = struct{}{}\n\t}\n\n\t_, ok := set[item]\n\treturn ok\n}", "func StringInSlice(str string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(s []string, e string) bool {\n\treturn sliceIndex(s, e) > -1\n}", "func contains(s []string, str string) bool {\n\tfor _, v := range s {\n\t\tif v == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (ss *StringSet) Contains(aString string) bool {\n\t_, ok := ss.members[aString]\n\treturn ok\n}", "func InStringSlice(list []string, str string) bool {\n\tfor _, item := range list {\n\t\tif item == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func InStringSlice(list []string, str string) bool {\n\tfor _, item := range list {\n\t\tif item == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(slice, val interface{}) bool {\n\tfn, ok := containsOf(slice, val)\n\tif fn == nil {\n\t\tpanic(\"slice is not supported slice type\")\n\t}\n\tif !ok {\n\t\tpanic(\"val is not the same type as slice\")\n\t}\n\n\tsptr := noescape(ptrOf(slice))\n\tvptr := noescape(ptrOf(val))\n\treturn fn(sptr, vptr)\n}", "func isInStringSlice(x string, elements []string) bool {\n\tfor _, elem := range elements {\n\t\tif elem == x {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (c *Comparator) contains(s []string, str string) bool {\n\tfor _, v := range s {\n\t\tif v == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func InSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (gdb *generalDatabase) IsStringInSlice(needle string, haystack []string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func contains(slice []string, entry string) bool {\n\tfor _, element := range slice {\n\t\tif element == entry {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsInSlice(slice []rune, val rune) bool {\n\tfor _, item := range slice {\n\t\tif item == val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contain(s interface{}, v interface{}) bool {\n\tswitch slice := s.(type) {\n\tcase []string:\n\t\tfor _, val := range slice {\n\t\t\tif val == v {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\tcase []int:\n\t\tfor _, val := range slice {\n\t\t\tif val == v {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(strSlice *[]string, value string) bool {\n\tif strSlice == nil {\n\t\treturn false\n\t} else {\n\t\tfor _, v := range *strSlice {\n\t\t\tif strings.ToLower(v) == strings.ToLower(value) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n}", "func InSlice(haystack []string, needle string) bool {\n\treturn SliceIndex(haystack, needle) != -1\n}", "func StringInSlice(s string, sl []string) bool {\n\tfor _, val := range sl {\n\t\tif s == val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func contains(str string, arr []string) bool{\n\tfor _, a := range arr {\n\t\tif a == str {\n\t\t\t return true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(arr []string, s string) bool {\n\tfor _, el := range arr {\n\t\tif el == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func InStringSlice(str string, strSli []string) bool {\n\tfor _, v := range strSli {\n\t\tif str == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func inStringSlice(ss []string, str string) bool {\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s RuneSlice) Contains(value rune) bool {\n\tfor _, v := range s {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}" ]
[ "0.783324", "0.76756775", "0.76235545", "0.74297565", "0.7393863", "0.73772204", "0.7371275", "0.7368948", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.71999377", "0.7194989", "0.71826386", "0.7165759", "0.71588486", "0.71499884", "0.71499884", "0.714888", "0.71143425", "0.71048117", "0.7103377", "0.70727813", "0.70600706", "0.70589215", "0.7051512", "0.70493525", "0.7045766", "0.7044146", "0.70335764", "0.70239013", "0.7016415", "0.7001521", "0.69740605", "0.6966919", "0.69510293", "0.69464594", "0.6943314", "0.6943087", "0.69211537", "0.69165653", "0.6901709", "0.68997145", "0.687773", "0.6848114", "0.68406504", "0.6830548", "0.6824135", "0.6823551", "0.6816652", "0.67965347", "0.6786435", "0.67853326", "0.6783258", "0.67746145", "0.67746145", "0.67746145", "0.67746145", "0.67746145", "0.67746145", "0.6773534", "0.6765866", "0.67640954", "0.6748016", "0.67459285", "0.67459047", "0.672183", "0.67193305", "0.6719183", "0.67046463", "0.6696988", "0.6696988", "0.6689679", "0.6687749", "0.66843617", "0.66777384", "0.66683346", "0.6657627", "0.66508526", "0.6644377", "0.6633669", "0.6629834", "0.66292155", "0.6628934", "0.6621393", "0.661803", "0.66126585", "0.66031295", "0.6600207", "0.65923923" ]
0.0
-1
MergeUnique Merges `source` string slice into `dest` and returns result. Inserts from `source` only when `dest` does not `Contain` given string.
func MergeUnique(dest, source []string) []string { for _, str := range source { if !Contain(dest, str) { dest = append(dest, str) } } return dest }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MergeAndDeduplicateSlice(src []string, target []string) []string {\n\tm := make(map[string]bool)\n\tfor i := range src {\n\t\tm[src[i]] = true\n\t}\n\n\tfor i := range target {\n\t\tif _, ok := m[target[i]]; !ok {\n\t\t\tsrc = append(src, target[i])\n\t\t}\n\t}\n\n\treturn src\n}", "func StringUniqueAppend(slice []string, s string) []string {\n\treturn strings.UniqueAppend(slice, s)\n}", "func DedupStrings(src []string) []string {\n\tm := make(map[string]struct{}, len(src))\n\tdst := make([]string, 0, len(src))\n\n\tfor _, v := range src {\n\t\t// Skip empty items\n\t\tif len(v) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip duplicates\n\t\tif _, ok := m[v]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tm[v] = struct{}{}\n\t\tdst = append(dst, v)\n\t}\n\n\treturn dst\n}", "func MergeUnique(left, right []string) []string {\n\treturn CollectVariety(left, right, GetUnique, GetUnique, GetUnique)\n}", "func MergeStringSlices(slice1 []string, slice2 []string) []string {\n\tfor _, item := range slice2 {\n\t\tif !IsStringPresent(slice1, item) {\n\t\t\tslice1 = append(slice1, item)\n\t\t}\n\t}\n\treturn slice1\n}", "func AppendUniqueSlices(a, b []string) []string {\n\tfor _, e := range a {\n\t\tif !SliceContainsString(e, b) {\n\t\t\tb = append(b, e)\n\t\t}\n\t}\n\treturn b\n}", "func appendUnique(s []string, e string) []string {\n\tif !contains(s, e) {\n\t\treturn append(s, e)\n\t}\n\treturn s\n}", "func (c StringArrayCollection) Merge(i interface{}) Collection {\n\tm := i.([]string)\n\tvar d = make([]string, len(c.value))\n\tcopy(d, c.value)\n\n\tfor i := 0; i < len(m); i++ {\n\t\texist := false\n\t\tfor j := 0; j < len(d); j++ {\n\t\t\tif d[j] == m[i] {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exist {\n\t\t\td = append(d, m[i])\n\t\t}\n\t}\n\n\treturn StringArrayCollection{\n\t\tvalue: d,\n\t}\n}", "func AddStringIfMissing(slice []string, s string) (bool, []string) {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn false, slice\n\t\t}\n\t}\n\treturn true, append(slice, s)\n}", "func appendIfMissing(slice []string, s string) []string {\n\tfor _, e := range slice {\n\t\tif e == s {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, s)\n}", "func mergeTags(existing string, tags []string) string {\n\tif existing == \"\" {\n\t\treturn strings.Join(tags, \",\")\n\t}\n\told := strings.Split(existing, \",\")\n\tvar merged []string\n\tfor _, o := range old {\n\t\tfound := false\n\t\tfor _, tag := range tags {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, o)\n\t\t}\n\t}\n\tfor _, tag := range tags {\n\t\tfound := false\n\t\tfor _, o := range merged {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, tag)\n\t\t}\n\t}\n\treturn strings.Join(merged, \",\")\n}", "func CompareSliceStrU(s1, s2 []string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tfor i := range s1 {\n\t\tfor j := len(s2) - 1; j >= 0; j-- {\n\t\t\tif s1[i] == s2[j] {\n\t\t\t\ts2 = append(s2[:j], s2[j+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif len(s2) > 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (dst *Hosts) Merge(src Hosts) {\n\tif dst == nil || len(src) == 0 {\n\t\treturn\n\t}\n\n\tcopied := *dst\n\tcopied = append(copied, src...)\n\n\tregistry := map[string]int{}\n\tfor i := len(copied); i > 0; i-- {\n\t\tregistry[copied[i-1].Name] = i - 1\n\t}\n\tunique := copied[:0]\n\tfor i, host := range copied {\n\t\torigin := registry[host.Name]\n\t\tif i == origin {\n\t\t\tunique = append(unique, host)\n\t\t\tcontinue\n\t\t}\n\t\tunique[origin].Merge(host)\n\t}\n\n\t*dst = unique\n}", "func mergeAlternately(word1 string, word2 string) string {\n\tvar buf bytes.Buffer\n\tfor i := range word1 {\n\t\tbuf.WriteByte(word1[i])\n\t\tif i < len(word2) {\n\t\t\tbuf.WriteByte(word2[i])\n\t\t}\n\t}\n\n\tif len(word1) < len(word2) {\n\t\tbuf.WriteString(word2[len(word1):])\n\t}\n\treturn buf.String()\n}", "func appendIfMissing(slice []string, s string) ([]string, bool) {\n\tfor _, ele := range slice {\n\t\tif ele == s {\n\t\t\treturn slice, false\n\t\t}\n\t}\n\treturn append(slice, s), true\n}", "func UniqueAppend(orig []string, add ...string) []string {\n\treturn append(orig, NewUniqueElements(orig, add...)...)\n}", "func MergeStringSlices(a []string, b []string) []string {\n\tset := sets.NewString(a...)\n\tset.Insert(b...)\n\treturn set.UnsortedList()\n}", "func mergeString(a, b string) string {\n\tif a != \"\" {\n\t\treturn a\n\t}\n\n\treturn b\n}", "func MergeSortedStrings(n ...[]string) []string {\n\tvar result []string\n\tif len(n) == 0 {\n\t\treturn nil\n\t} else if len(n) == 1 {\n\t\t// Special case. Merge single slice with a nil slice, to remove any\n\t\t// duplicates from the single slice.\n\t\treturn MergeSortedStrings(n[0], nil)\n\t}\n\n\tvar maxSize int\n\tfor _, a := range n {\n\t\tif len(a) > maxSize {\n\t\t\tmaxSize = len(a)\n\t\t}\n\t}\n\tresult = make([]string, 0, maxSize) // This will likely be too small but it's a start.\n\n\tidxs := make([]int, len(n)) // Indexes we've processed.\n\tvar j int // Index we currently think is minimum.\n\n\tfor {\n\t\tj = -1\n\n\t\t// Find the smallest minimum in all slices.\n\t\tfor i := 0; i < len(n); i++ {\n\t\t\tif idxs[i] >= len(n[i]) {\n\t\t\t\tcontinue // We have completely drained all values in this slice.\n\t\t\t} else if j == -1 {\n\t\t\t\t// We haven't picked the minimum value yet. Pick this one.\n\t\t\t\tj = i\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// It this value key is lower than the candidate.\n\n\t\t\tif n[i][idxs[i]] < n[j][idxs[j]] {\n\t\t\t\tj = i\n\t\t\t} else if n[i][idxs[i]] == n[j][idxs[j]] {\n\t\t\t\t// Duplicate value. Throw it away.\n\t\t\t\tidxs[i]++\n\t\t\t}\n\n\t\t}\n\n\t\t// We could have drained all of the values and be done...\n\t\tif j == -1 {\n\t\t\tbreak\n\t\t}\n\n\t\t// First value to just append it and move on.\n\t\tif len(result) == 0 {\n\t\t\tresult = append(result, n[j][idxs[j]])\n\t\t\tidxs[j]++\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the minimum value to results if it's not a duplicate of\n\t\t// the existing one.\n\n\t\tif result[len(result)-1] < n[j][idxs[j]] {\n\t\t\tresult = append(result, n[j][idxs[j]])\n\t\t} else if result[len(result)-1] == n[j][idxs[j]] {\n\t\t\t// Duplicate so drop it.\n\t\t} else {\n\t\t\tpanic(\"value being merged out of order.\")\n\t\t}\n\n\t\tidxs[j]++\n\t}\n\treturn result\n}", "func (s StringSet) Union(other StringSet) StringSet {\n\tresult := make(StringSet)\n\tfor v := range s {\n\t\tresult[v] = struct{}{}\n\t}\n\tfor v := range other {\n\t\tresult[v] = struct{}{}\n\t}\n\treturn result\n}", "func (a *StringArray) Merge(b *StringArray) {\n\tif a.Len() == 0 {\n\t\t*a = *b\n\t\treturn\n\t}\n\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\n\t// Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values. Remove them if they exists before\n\t// merging.\n\t// a = a.Deduplicate()\n\t// b = b.Deduplicate()\n\n\tif a.MaxTime() < b.MinTime() {\n\t\ta.Timestamps = append(a.Timestamps, b.Timestamps...)\n\t\ta.Values = append(a.Values, b.Values...)\n\t\treturn\n\t}\n\n\tif b.MaxTime() < a.MinTime() {\n\t\tvar tmp StringArray\n\t\ttmp.Timestamps = append(b.Timestamps, a.Timestamps...)\n\t\ttmp.Values = append(b.Values, a.Values...)\n\t\t*a = tmp\n\t\treturn\n\t}\n\n\tout := NewStringArrayLen(a.Len() + b.Len())\n\ti, j, k := 0, 0, 0\n\tfor i < len(a.Timestamps) && j < len(b.Timestamps) {\n\t\tif a.Timestamps[i] < b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = a.Timestamps[i]\n\t\t\tout.Values[k] = a.Values[i]\n\t\t\ti++\n\t\t} else if a.Timestamps[i] == b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\tj++\n\t\t}\n\t\tk++\n\t}\n\n\tif i < len(a.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], a.Timestamps[i:])\n\t\tcopy(out.Values[k:], a.Values[i:])\n\t\tk += n\n\t} else if j < len(b.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], b.Timestamps[j:])\n\t\tcopy(out.Values[k:], b.Values[j:])\n\t\tk += n\n\t}\n\n\ta.Timestamps = out.Timestamps[:k]\n\ta.Values = out.Values[:k]\n}", "func StringSliceExtractUnique(strSlice []string) (result []string) {\n\tif strSlice == nil {\n\t\treturn []string{}\n\t} else if len(strSlice) <= 1 {\n\t\treturn strSlice\n\t} else {\n\t\tfor _, v := range strSlice {\n\t\t\tif !StringSliceContains(&result, v) {\n\t\t\t\tresult = append(result, v)\n\t\t\t}\n\t\t}\n\n\t\treturn result\n\t}\n}", "func (us *UniqueStrings) Add(strings ...string) {\n\tfor _, s := range strings {\n\t\tif _, ok := us.values[s]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tif us.values == nil {\n\t\t\tus.values = map[string]struct{}{}\n\t\t}\n\t\tus.values[s] = struct{}{}\n\t\tus.result = append(us.result, s)\n\t}\n}", "func (m *mergeQuerier) mergeDistinctStringSlice(f stringSliceFunc) ([]string, storage.Warnings, error) {\n\tvar jobs = make([]interface{}, len(m.tenantIDs))\n\n\tfor pos := range m.tenantIDs {\n\t\tjobs[pos] = &stringSliceFuncJob{\n\t\t\tquerier: m.queriers[pos],\n\t\t\ttenantID: m.tenantIDs[pos],\n\t\t}\n\t}\n\n\trun := func(ctx context.Context, jobIntf interface{}) error {\n\t\tjob, ok := jobIntf.(*stringSliceFuncJob)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T\", jobIntf)\n\t\t}\n\n\t\tvar err error\n\t\tjob.result, job.warnings, err = f(ctx, job.querier)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error querying %s %s\", rewriteLabelName(defaultTenantLabel), job.tenantID)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\terr := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// aggregate warnings and deduplicate string results\n\tvar warnings storage.Warnings\n\tresultMap := make(map[string]struct{})\n\tfor _, jobIntf := range jobs {\n\t\tjob, ok := jobIntf.(*stringSliceFuncJob)\n\t\tif !ok {\n\t\t\treturn nil, nil, fmt.Errorf(\"unexpected type %T\", jobIntf)\n\t\t}\n\n\t\tfor _, e := range job.result {\n\t\t\tresultMap[e] = struct{}{}\n\t\t}\n\n\t\tfor _, w := range job.warnings {\n\t\t\twarnings = append(warnings, errors.Wrapf(w, \"warning querying %s %s\", rewriteLabelName(defaultTenantLabel), job.tenantID))\n\t\t}\n\t}\n\n\tvar result = make([]string, 0, len(resultMap))\n\tfor e := range resultMap {\n\t\tresult = append(result, e)\n\t}\n\tsort.Strings(result)\n\treturn result, warnings, nil\n}", "func merge(source ...[]string) []string {\n\tm := make(map[string]struct{}, len(source)*10)\n\tfor _, list := range source {\n\t\tfor _, item := range list {\n\t\t\tm[item] = struct{}{}\n\t\t}\n\t}\n\tdst := make([]string, len(m))\n\tcnt := 0\n\tfor k := range m {\n\t\tdst[cnt] = k\n\t\tcnt += 1\n\t}\n\tsort.Strings(dst)\n\treturn dst\n}", "func AppendStringIfNotPresent(s string, ss []string) []string {\n\tfor _, e := range ss {\n\t\tif e == s {\n\t\t\treturn ss\n\t\t}\n\t}\n\treturn append(ss, s)\n}", "func appendHostIfMissing(slice []string, s string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == s {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, s)\n}", "func StringSliceDelete(slice1 []string, slice2 []string) []string {\n\n\tvar diff []string\n\n\tfor _, s1 := range slice1 {\n\t\tfound := false\n\t\tfor _, s2 := range slice2 {\n\t\t\tif s1 == s2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// String not found. We add it to return slice\n\t\tif !found {\n\t\t\tdiff = append(diff, s1)\n\t\t}\n\t}\n\n\treturn diff\n}", "func StringSlicesUnion(one, two []string) []string {\n\tvar union []string\n\tunion = append(union, one...)\n\tunion = append(union, two...)\n\treturn OnlyUnique(union)\n}", "func Merge(aa []string, bb ...string) []string {\n\tcheck := make(map[string]int)\n\tres := make([]string, 0)\n\tdd := append(aa, bb...)\n\tfor _, val := range dd {\n\t\tcheck[val] = 1\n\t}\n\n\tfor letter, _ := range check {\n\t\tif letter == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, letter)\n\t}\n\n\tsort.Strings(res)\n\n\treturn res\n}", "func AppendIfMissing(slice []string, val string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == val {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, val)\n}", "func Union(s1, s2 string) string {\n\tvar intersect strings.Builder\n\tset := make(map[rune]bool)\n\tfor _, char := range s1 {\n\t\tif _, ok := set[char]; !ok {\n\t\t\tset[char] = true\n\t\t\tintersect.WriteRune(char)\n\t\t}\n\t}\n\tfor _, char := range s2 {\n\t\tif _, ok := set[char]; !ok {\n\t\t\tset[char] = true\n\t\t\tintersect.WriteRune(char)\n\t\t}\n\t}\n\treturn intersect.String()\n}", "func dupe(src []byte) []byte {\n\td := make([]byte, len(src))\n\tcopy(d, src)\n\treturn d\n}", "func Distinct(s string) string {\n\tvar ascii [256]bool\n\tvar nonascii map[rune]bool\n\treturn strings.Map(func(r rune) rune {\n\t\tif r < 0x80 {\n\t\t\tb := byte(r)\n\t\t\tif ascii[b] {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\tascii[b] = true\n\t\t} else {\n\t\t\tif nonascii == nil {\n\t\t\t\tnonascii = make(map[rune]bool)\n\t\t\t}\n\t\t\tif nonascii[r] {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\tnonascii[r] = true\n\t\t}\n\t\treturn r\n\t}, s)\n}", "func stringSliceOverlaps(left []string, right []string) bool {\n\tfor _, s := range left {\n\t\tfor _, t := range right {\n\t\t\tif s == t {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func Merge(target sgmt.MutableSegment, srcs ...sgmt.MutableSegment) error {\n\tsafeClosers := []io.Closer{}\n\tdefer func() {\n\t\tfor _, c := range safeClosers {\n\t\t\tc.Close()\n\t\t}\n\t}()\n\n\t// for each src\n\tfor _, src := range srcs {\n\t\t// get reader for `src`\n\t\treader, err := src.Reader()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// ensure readers are all closed.\n\t\treaderCloser := x.NewSafeCloser(reader)\n\t\tsafeClosers = append(safeClosers, readerCloser)\n\n\t\t// retrieve all docs known to the reader\n\t\tdIter, err := reader.AllDocs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// iterate over all known docs\n\t\tfor dIter.Next() {\n\t\t\td := dIter.Current()\n\t\t\t_, err := target.Insert(d)\n\t\t\tif err == nil || err == index.ErrDuplicateID {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\t// ensure no errors while iterating\n\t\tif err := dIter.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// ensure no errors while closing reader\n\t\tif err := readerCloser.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// all good\n\treturn nil\n}", "func (s StringSet) AddSet(src StringSet) {\n\tfor str := range src {\n\t\ts[str] = struct{}{}\n\t}\n}", "func ConcatSlice(sliceToConcat []byte) string {\n\tstringRep := \"\"\n\n\tfor index := 0; index < len(sliceToConcat); index++ {\n\t\tstringRep = stringRep + string(sliceToConcat[index])\n\n\t\tif index+1 != len(sliceToConcat) {\n\t\t\tstringRep = stringRep + \"-\"\n\t\t}\n\t}\n\n\treturn stringRep\n}", "func Merge(dest string, input ...string) error {\n\tfor _, file := range input {\n\t\tstat, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif stat.Mode().IsRegular() {\n\t\t\ttargetFilePath := filepath.Join(dest, stat.Name())\n\t\t\tif err := EnsureFile(targetFilePath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadFile(file)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := ioutil.WriteFile(targetFilePath, body, 0600); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if stat.Mode().IsDir() {\n\t\t\tif err := filepath.Walk(file, func(path string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tstat, err := os.Stat(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif stat.Mode().IsRegular() {\n\t\t\t\t\tdestDir := filepath.Join(dest, filepath.Dir(path))\n\t\t\t\t\tif err := EnsureDir(destDir); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := copy.Copy(file, destDir); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func RemoveStringDuplicates(slice []string) []string {\n\treturnSlice := make([]string, 0)\n\tseen := make(map[string]struct{})\n\n\tfor _, s := range slice {\n\t\tif _, wasThere := seen[s]; !wasThere {\n\t\t\treturnSlice = append(returnSlice, s)\n\t\t\tseen[s] = struct{}{}\n\t\t}\n\t}\n\n\treturn returnSlice\n}", "func dedupStrings(s []string) []string {\n\tp := len(s) - 1\n\tif p <= 0 {\n\t\treturn s\n\t}\n\n\tfor i := p - 1; i >= 0; i-- {\n\t\tif s[p] != s[i] {\n\t\t\tp--\n\t\t\ts[p] = s[i]\n\t\t}\n\t}\n\n\treturn s[p:]\n}", "func Unique(ss []string) []string {\n\tr := make([]string, 0)\n\tfor _, s := range ss {\n\t\tif Search(s, r) == -1 {\n\t\t\tr = append(r, s)\n\t\t}\n\t}\n\n\treturn r\n}", "func makeUnique(src []string, maxLength int) []string {\n\tresult := make([]string, 0, maxLength)\n\tuniqueMap := make(map[string]struct{}, maxLength)\n\tfor _, v := range src {\n\t\tif _, ok := uniqueMap[v]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tuniqueMap[v] = struct{}{}\n\n\t\tresult = append(result, v)\n\t\tif len(result) >= maxLength {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn result\n}", "func (dst *Workers) Merge(src Workers) {\n\tif dst == nil || len(src) == 0 {\n\t\treturn\n\t}\n\n\tcopied := *dst\n\tcopied = append(copied, src...)\n\n\tregistry := map[string]int{}\n\tfor i := len(copied); i > 0; i-- {\n\t\tregistry[copied[i-1].Name] = i - 1\n\t}\n\tunique := copied[:0]\n\tfor i, worker := range copied {\n\t\torigin := registry[worker.Name]\n\t\tif i == origin {\n\t\t\tunique = append(unique, worker)\n\t\t\tcontinue\n\t\t}\n\t\tunique[origin].Merge(worker)\n\t}\n\n\t*dst = unique\n}", "func ConcatSlice(sliceToConcat []byte) string {\n\tvar dummy string\n\tfor index := 0; index < len(sliceToConcat)-1; index++ {\n\t\tdummy = dummy + string(sliceToConcat[index]) + \"-\"\n\t}\n\tdummy = dummy + string(sliceToConcat[len(sliceToConcat)-1])\n\treturn dummy\n}", "func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) {\n\ts := ss[i]\n\tsuffix := s[len(s)-prefixLen:]\n\tfor _, j := range prefixes[suffix] {\n\t\t// Empty strings mean \"already used.\" Also avoid merging with self.\n\t\tif ss[j] == \"\" || i == j {\n\t\t\tcontinue\n\t\t}\n\t\tif *v {\n\t\t\tfmt.Fprintf(os.Stderr, \"%d-length overlap at (%4d,%4d): %q and %q share %q\\n\",\n\t\t\t\tprefixLen, i, j, ss[i], ss[j], suffix)\n\t\t}\n\t\tss[i] += ss[j][prefixLen:]\n\t\tss[j] = \"\"\n\t\t// ss[i] has a new suffix, so merge again if possible.\n\t\t// Note: we only have to merge again at the same prefix length. Shorter\n\t\t// prefix lengths will be handled in the next iteration of crush's for loop.\n\t\t// Can there be matches for longer prefix lengths, introduced by the merge?\n\t\t// I believe that any such matches would by necessity have been eliminated\n\t\t// during substring removal or merged at a higher prefix length. For\n\t\t// instance, in crush(\"abc\", \"cde\", \"bcdef\"), combining \"abc\" and \"cde\"\n\t\t// would yield \"abcde\", which could be merged with \"bcdef.\" However, in\n\t\t// practice \"cde\" would already have been elimintated by removeSubstrings.\n\t\tmergeLabel(ss, i, prefixLen, prefixes)\n\t\treturn\n\t}\n}", "func (dst *Proxies) Merge(src Proxies) {\n\tif dst == nil || len(src) == 0 {\n\t\treturn\n\t}\n\n\tcopied := *dst\n\tcopied = append(copied, src...)\n\n\tregistry := map[string]int{}\n\tfor i := len(copied); i > 0; i-- {\n\t\tregistry[copied[i-1].Name] = i - 1\n\t}\n\tunique := copied[:0]\n\tfor i, proxy := range copied {\n\t\torigin := registry[proxy.Name]\n\t\tif i == origin {\n\t\t\tunique = append(unique, proxy)\n\t\t\tcontinue\n\t\t}\n\t\tunique[origin].Merge(proxy)\n\t}\n\n\t*dst = unique\n}", "func replaceUnique(list, from, to []string) []string {\n\tlist = slices.Clone(list)\n\tfor i, f := range from {\n\t\tj := slices.Index(list, f)\n\t\tif j == -1 {\n\t\t\tpanic(\"can't rename nonexistent column: \" + f)\n\t\t}\n\t\tif slices.Contains(list, to[i]) {\n\t\t\tpanic(\"can't rename to existing column: \" + to[i])\n\t\t}\n\t\tlist[j] = to[i]\n\t}\n\treturn list\n}", "func AppendIfMissingIgnoreCase(str string, suffix string, suffixes ...string) string {\n\treturn internalAppendIfMissing(str, suffix, true, suffixes...)\n}", "func (s String) Intersection(strings ...String) (intersection String) {\n\tintersection = s.Copy()\n\tfor key := range s {\n\t\tfor _, set := range append(strings, s) {\n\t\t\tif !set.Contains(key) {\n\t\t\t\tdelete(intersection, key)\n\t\t\t}\n\t\t}\n\t}\n\treturn intersection\n}", "func (il *IntList) JoinUnique(other *IntList) {\n // The algorithm here is stupid. Are there better ones?\n otherLast := other.Last()\n for otherIt := other.First(); otherIt != otherLast; otherIt = otherIt.Next() {\n contained := false\n value := otherIt.Value()\n last := il.Last()\n for it := il.First(); it != last; it = it.Next() {\n if it.Value() == value {\n contained = true\n break\n }\n }\n if !contained {\n il.Append(value)\n }\n }\n}", "func (s String) Union(strings ...String) (union String) {\n\tunion = s.Copy()\n\tfor _, set := range strings {\n\t\tfor key := range set {\n\t\t\tunion[key] = yes\n\t\t}\n\t}\n\treturn union\n}", "func AppendUniq(list []string, items ...string) []string {\n\tfor _, item := range items {\n\t\tshouldAdd := true\n\t\tfor _, v := range list {\n\t\t\tif v == item {\n\t\t\t\tshouldAdd = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif shouldAdd {\n\t\t\tlist = append(list, item)\n\t\t}\n\t}\n\n\treturn list\n}", "func DeduplicateSliceStably(items []string) []string {\n\tdata := make([]string, 0, len(items))\n\tdeduplicate := map[string]struct{}{}\n\tfor _, val := range items {\n\t\tif _, exists := deduplicate[val]; !exists {\n\t\t\tdeduplicate[val] = struct{}{}\n\t\t\tdata = append(data, val)\n\t\t}\n\t}\n\treturn data\n}", "func (s StringSet) Union(other StringSet) StringSet {\n\tresultSet := make(StringSet, len(s))\n\tfor val := range s {\n\t\tresultSet[val] = true\n\t}\n\n\tfor val := range other {\n\t\tresultSet[val] = true\n\t}\n\n\treturn resultSet\n}", "func union(left, right []string) []string {\n\tu := make([]string, len(left))\n\tcopy(u, left)\noutter:\n\tfor _, r := range right {\n\t\tfor _, l := range left {\n\t\t\tif l == r {\n\t\t\t\tcontinue outter\n\t\t\t}\n\t\t}\n\t\tu = append(u, r)\n\t}\n\treturn u\n}", "func (that *StrAnyMap) Merge(other *StrAnyMap) {\n\tthat.mu.Lock()\n\tdefer that.mu.Unlock()\n\tif that.data == nil {\n\t\tthat.data = other.MapCopy()\n\t\treturn\n\t}\n\tif other != that {\n\t\tother.mu.RLock()\n\t\tdefer other.mu.RUnlock()\n\t}\n\tfor k, v := range other.data {\n\t\tthat.data[k] = v\n\t}\n}", "func Deduplicate(input []string) []string {\n\tresult := []string{}\n\tseen := make(map[string]struct{})\n\tfor _, val := range input {\n\t\tif _, ok := seen[val]; !ok {\n\t\t\tresult = append(result, val)\n\t\t\tseen[val] = struct{}{}\n\t\t}\n\t}\n\treturn result\n}", "func RemoveDuplicatedStrings(slice []string) []string {\n\tresult := []string{}\n\n\tcheck := make(map[string]bool)\n\tfor _, element := range slice {\n\t\tcheck[element] = true\n\t}\n\n\tfor key := range check {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}", "func UniqueStrings(vs []string) (r []string) {\n\tm := map[string]struct{}{}\n\tvar ok bool\n\tfor _, v := range vs {\n\t\tif _, ok = m[v]; !ok {\n\t\t\tm[v] = struct{}{}\n\t\t\tr = append(r, v)\n\t\t}\n\t}\n\n\treturn\n}", "func appendUnique(slice []Term, item Term) []Term {\n\tfor _, c := range slice {\n\t\tif c == item {\n\t\t\treturn slice\n\t\t}\n\t}\n\n\treturn append(slice, item)\n}", "func MakeUnique(str string, pool []string) string {\n\tvar nb int\n\ttested := str\n\tfor tested == \"\" || IsIn(tested, pool...) {\n\t\tnb++\n\t\ttested = str + strconv.Itoa(nb)\n\t}\n\treturn tested\n}", "func Unique2(input string) bool {\n\ts := strings.Split(input, \"\")\n\tsort.Strings(s)\n\treturn uniqueAux(1, s)\n}", "func dedupStr(in []string) []string {\n\tsort.Strings(in)\n\n\tj := 0\n\tfor i := 1; i < len(in); i++ {\n\t\tif in[j] == in[i] {\n\t\t\tcontinue\n\t\t}\n\t\tj++\n\t\tin[j] = in[i]\n\t}\n\n\treturn in[:j+1]\n}", "func (c StringArrayCollection) Unique() Collection {\n\tvar d = make([]string, len(c.value))\n\tcopy(d, c.value)\n\tx := make([]string, 0)\n\tfor _, i := range d {\n\t\tif len(x) == 0 {\n\t\t\tx = append(x, i)\n\t\t} else {\n\t\t\tfor k, v := range x {\n\t\t\t\tif i == v {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif k == len(x)-1 {\n\t\t\t\t\tx = append(x, i)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn StringArrayCollection{\n\t\tvalue: x,\n\t}\n}", "func StringsHas(target []string, src string) bool {\n\tfor _, t := range target {\n\t\tif strings.TrimSpace(t) == src {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (u *Utils) UniqueStrings(input []string) []string {\n\tr := make([]string, 0, len(input))\n\tm := make(map[string]bool)\n\tfor _, val := range input {\n\t\tif _, ok := m[val]; !ok {\n\t\t\tm[val] = true\n\t\t\tr = append(r, val)\n\t\t}\n\t}\n\treturn r\n}", "func (v Values) Merge(src Values) Values {\n\tfor key, srcVal := range src {\n\t\tdestVal, found := v[key]\n\n\t\tsrcType := fmt.Sprintf(\"%T\", srcVal)\n\t\tdestType := fmt.Sprintf(\"%T\", destVal)\n\t\tmatch := srcType == destType\n\t\tvalidSrc := istable(srcVal)\n\t\tvalidDest := istable(destVal)\n\n\t\tif found && match && validSrc && validDest {\n\t\t\tdestMap := destVal.(Values)\n\t\t\tsrcMap := srcVal.(Values)\n\t\t\tdestMap.Merge(srcMap)\n\t\t} else {\n\t\t\tv[key] = srcVal\n\t\t}\n\t}\n\treturn v\n}", "func SanitizeDuplicates(b []string) []string {\n\tsz := len(b) - 1\n\tfor i := 0; i < sz; i++ {\n\t\tfor j := i + 1; j <= sz; j++ {\n\t\t\tif (b)[i] == ((b)[j]) {\n\t\t\t\t(b)[j] = (b)[sz]\n\t\t\t\t(b) = (b)[0:sz]\n\t\t\t\tsz--\n\t\t\t\tj--\n\t\t\t}\n\t\t}\n\t}\n\treturn b\n}", "func (t *Set) Merge(other *Set, prefix []byte) *Set {\n\tif other != nil {\n\t\tadder := func(key []byte) bool {\n\t\t\tt.Add(key)\n\t\t\treturn true\n\t\t}\n\t\tother.Iter(prefix, adder)\n\t}\n\treturn t\n}", "func MergeStrings(stringArray ...string) string {\n\n\tvar buffer bytes.Buffer\n\tfor _, v := range stringArray {\n\t\tbuffer.WriteString(v)\n\t}\n\treturn buffer.String()\n\n}", "func IncludeString(l []string, s string) []string {\n\ti := sort.Search(\n\t\tlen(l),\n\t\tfunc(i int) bool {\n\t\t\treturn l[i] >= s\n\t\t},\n\t)\n\tif i < len(l) && l[i] == s {\n\t\t// string is already in slice\n\t\treturn l\n\t}\n\tl = append(l, \"\")\n\tcopy(l[i+1:], l[i:])\n\tl[i] = s\n\treturn l\n}", "func uniqueStr2(in string) bool {\n\tfor i := 0; i < len(in); i++ {\n\t\tfor j := i + 1; j < len(in[i+1:]); j++ {\n\t\t\tif in[i] == in[j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func combine(str1, str2 string) string {\n\tvar res string\n\tlen1 := len(str1)\n\tlen2 := len(str2)\n\t//mark the number of same chars\n\tvar sameNum int = 0\n\tfor len1 > 0 && sameNum < len2 {\n\t\tif str1[len1-1] == str2[sameNum] {\n\t\t\tlen1--\n\t\t\tsameNum++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\t//combine str1 and str2\n\tres = str1[0:len1] + str2[sameNum:len2]\n\treturn res\n\n}", "func AppendIfMissing(slice []string, i string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == i {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, i)\n}", "func AppendIfMissing(slice []string, i string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == i {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, i)\n}", "func UniqueString(a []string) []string {\n\tr := make([]string, 0, len(a))\n\n\tsort.Strings(a)\n\n\tr = append(r, a[0])\n\ti := a[0]\n\n\tfor _, v := range a {\n\t\tif v != i {\n\t\t\tr = append(r, v)\n\t\t\ti = v\n\t\t}\n\t}\n\n\treturn r\n}", "func mapStringStringMergeFrom(dst, src *map[string]string) {\n\tif (src == nil) || (*src == nil) {\n\t\treturn\n\t}\n\n\tif *dst == nil {\n\t\t*dst = make(map[string]string)\n\t}\n\n\tfor key, value := range *src {\n\t\tif _, ok := (*dst)[key]; ok {\n\t\t\t// Such key already exists in dst\n\t\t\tcontinue\n\t\t}\n\n\t\t// No such a key in dst\n\t\t(*dst)[key] = value\n\t}\n}", "func merge(dst, src *unstructured.Unstructured) bool {\n\tdstNS := dst.GetLabels()[resourceLabelNamespace]\n\tsrcNS := src.GetLabels()[resourceLabelNamespace]\n\tif dstNS != srcNS {\n\t\treturn false\n\t}\n\n\tif dstResults, ok, _ := unstructured.NestedSlice(dst.UnstructuredContent(), \"results\"); ok {\n\t\tif srcResults, ok, _ := unstructured.NestedSlice(src.UnstructuredContent(), \"results\"); ok {\n\t\t\tdstResults = append(dstResults, srcResults...)\n\n\t\t\tif err := unstructured.SetNestedSlice(dst.UnstructuredContent(), dstResults, \"results\"); err == nil {\n\t\t\t\taddSummary(dst, src)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (l *StrLinked) RemoveDupes() {\n\ttmp := make(tmp)\n\n\tl.head = remove(tmp.contains, l.head)\n}", "func Merge(dest interface{}, source interface{}) error {\n\topts := make([]func(*mergo.Config), 0)\n\n\t// lists are always overridden - we don't append merged lists since it generally makes things more complicated\n\topts = append(opts, mergo.WithOverride)\n\n\terr := mergo.Merge(dest, source, opts...)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}", "func (s *UpdaterSet) Merge(set UpdaterSet) error {\n\texists := make([]string, 0, len(set.set))\n\tfor n, _ := range set.set {\n\t\tif _, ok := s.set[n]; ok {\n\t\t\texists = append(exists, n)\n\t\t}\n\t}\n\n\tif len(exists) > 0 {\n\t\treturn ErrExists{exists}\n\t}\n\n\tfor n, u := range set.set {\n\t\ts.set[n] = u\n\t}\n\treturn nil\n}", "func removeDuplicates(stringSlices ...[]string) []string {\n\tuniqueMap := map[string]bool{}\n\n\tfor _, stringSlice := range stringSlices {\n\t\tfor _, str := range stringSlice {\n\t\t\tuniqueMap[str] = true\n\t\t}\n\t}\n\n\t// Create a slice with the capacity of unique items\n\t// This capacity make appending flow much more efficient\n\tresult := make([]string, 0, len(uniqueMap))\n\n\tfor key := range uniqueMap {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}", "func (t Tags) Merge(key string, value ...string) {\n\tfor _, v := range value {\n\t\tcurrent := t.GetAll(key)\n\t\tfound := false\n\t\tfor _, cv := range current {\n\t\t\tif v == cv {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tt.Add(key, v)\n\t\t}\n\t}\n}", "func StringsUnique(s []string) bool {\n\tsort.Strings(s)\n\n\tfor i := 0; i < len(s)-1; i++ {\n\t\tif s[i] == s[i+1] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func DedupInPlace(a *[]string) (dups int) {\n\tsz := len(*a)\n\tif 2 > sz {\n\t\treturn\n\t}\n\tlast := sz - 1\n\tfor i := 0; i < last; i++ {\n\t\ts := (*a)[i]\n\t\tfor j := last; j > i; j-- {\n\t\t\tif s == (*a)[j] { // found a dup - remove it\n\t\t\t\tdups++\n\t\t\t\tif j == last {\n\t\t\t\t\t(*a) = (*a)[:last]\n\t\t\t\t} else {\n\t\t\t\t\t(*a) = append((*a)[:j], (*a)[j+1:]...)\n\t\t\t\t}\n\t\t\t\tlast--\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func AppendStr(strs []string, str string) []string {\n\tfor _, s := range strs {\n\t\tif s == str {\n\t\t\treturn strs\n\t\t}\n\t}\n\treturn append(strs, str)\n}", "func (sm StringMap) Upsert(k, v string) {\n\tif av, existing := sm.Get(k); existing {\n\t\tav.SetValue(v)\n\t} else {\n\t\t*sm.orig = append(*sm.orig, NewStringKeyValue(k, v).orig)\n\t}\n}", "func concatUnique(collections ...[]string) []string {\n\tresultSet := make(map[string]struct{})\n\tfor _, c := range collections {\n\t\tfor _, i := range c {\n\t\t\tif _, ok := resultSet[i]; !ok {\n\t\t\t\tresultSet[i] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\tresult := make([]string, 0, len(resultSet))\n\tfor k := range resultSet {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func (s *SliceOfString) Concat(items []string) *SliceOfString {\n\ts.items = append(s.items, items...)\n\treturn s\n}", "func merge(new, dst *Range) bool {\n\tif new.End() < dst.Pos {\n\t\treturn false\n\t}\n\tif new.End() > dst.End() {\n\t\tdst.Size = new.Size\n\t} else {\n\t\tdst.Size += dst.Pos - new.Pos\n\t}\n\tdst.Pos = new.Pos\n\treturn true\n}", "func RemoveStringSliceCopy(slice []string, start, end int) []string {\n\tresult := make([]string, len(slice)-(end-start))\n\tat := copy(result, slice[:start])\n\tcopy(result[at:], slice[end:])\n\treturn result\n\n}", "func Unique(input string) bool {\n\tseen := make(map[rune]bool)\n\n\tfor _, r := range input {\n\t\t_, found := seen[r]\n\t\tif found {\n\t\t\treturn false\n\t\t}\n\t\tseen[r] = true\n\t}\n\n\treturn true\n}", "func (queryParametersBag) uniqueStringsSlice(in []string) []string {\n\tkeys := make(map[string]bool)\n\tout := make([]string, 0)\n\n\tfor _, entry := range in {\n\t\tif _, ok := keys[entry]; !ok {\n\t\t\tkeys[entry] = true\n\t\t\tout = append(out, entry)\n\t\t}\n\t}\n\n\treturn out\n}", "func common(s, o []rune) []rune {\n\tmax, min := s, o\n\tif len(max) < len(min) {\n\t\tmax, min = min, max\n\t}\n\tvar str []rune\n\tfor i, r := range min {\n\t\tif r != max[i] {\n\t\t\tbreak\n\t\t}\n\t\tif str == nil {\n\t\t\tstr = []rune{r}\n\t\t} else {\n\t\t\tstr = append(str, r)\n\t\t}\n\t}\n\treturn str\n}", "func PrependIfMissingIgnoreCase(str string, prefix string, prefixes ...string) string {\n\treturn prependIfMissing(str, prefix, true, prefixes...)\n}", "func appendIfMissing(inputSlice []rowStore, input rowStore) []rowStore {\n\tfor _, element := range inputSlice {\n\t\tif element == input {\n\t\t\treturn inputSlice\n\t\t}\n\t}\n\treturn append(inputSlice, input)\n}", "func (r StringsSet) AddAll(other StringsSet) {\n\tfor s := range other {\n\t\tr[s] = struct{}{}\n\t}\n}", "func depSliceDeduplicate(s []Dependency) []Dependency {\n\tl := len(s)\n\tif l < 2 {\n\t\treturn s\n\t}\n\tif l == 2 {\n\t\tif s[0] == s[1] {\n\t\t\treturn s[0:1]\n\t\t}\n\t\treturn s\n\t}\n\n\tfound := make(map[string]bool, l)\n\tj := 0\n\tfor i, x := range s {\n\t\th := x.Hash()\n\t\tif !found[h] {\n\t\t\tfound[h] = true\n\t\t\ts[j] = s[i]\n\t\t\tj++\n\t\t}\n\t}\n\n\treturn s[:j]\n}", "func SortUnique(a []string) (rv []string) {\n\n\tsort.Strings(a)\n\n\tpos := 1\n\tlast := a[0]\n\tfor i := 1; i < len(a); i++ {\n\t\ts := a[i]\n\t\tif s != last {\n\t\t\tif pos != i {\n\t\t\t\ta[pos] = a[i]\n\t\t\t}\n\t\t\tpos++\n\t\t}\n\t\tlast = s\n\t}\n\trv = a[:pos]\n\treturn\n}" ]
[ "0.64953166", "0.5654552", "0.5652756", "0.55148536", "0.53828245", "0.53584385", "0.5342289", "0.5150319", "0.513586", "0.5078584", "0.50460446", "0.50121546", "0.5000155", "0.49774796", "0.49146158", "0.49021077", "0.48866537", "0.48661357", "0.48562434", "0.47965133", "0.4780307", "0.47801292", "0.4774901", "0.47722873", "0.4759733", "0.4744842", "0.47354126", "0.4715488", "0.4708589", "0.46616873", "0.46606046", "0.46561733", "0.45682538", "0.45668474", "0.4550267", "0.452949", "0.45249027", "0.4513581", "0.45042053", "0.4497204", "0.44971558", "0.44886267", "0.44882458", "0.44841638", "0.44706774", "0.44642594", "0.4455903", "0.4449288", "0.44251135", "0.44072983", "0.44059145", "0.44048172", "0.4402026", "0.4391726", "0.4391068", "0.43840945", "0.43787333", "0.43781415", "0.4373554", "0.43593356", "0.43465927", "0.43330926", "0.43272105", "0.43208873", "0.43179226", "0.43153548", "0.43117827", "0.43036747", "0.43021172", "0.4301195", "0.42982307", "0.42979857", "0.42941293", "0.42897224", "0.42880508", "0.42880508", "0.4277076", "0.42723846", "0.4270383", "0.42680794", "0.42665404", "0.42658705", "0.4261972", "0.42616907", "0.42537907", "0.42451707", "0.4244592", "0.42297924", "0.42213133", "0.42211708", "0.42211473", "0.42167428", "0.42070878", "0.42015666", "0.41969824", "0.41946515", "0.41935065", "0.418656", "0.41849187", "0.41829473" ]
0.7760827
0