text
stringlengths 2
1.1M
| id
stringlengths 11
117
| metadata
dict | __index_level_0__
int64 0
885
|
---|---|---|---|
// Objective-C API for talking to interfaces Go package.
// gobind -lang=objc interfaces
//
// File is generated by gobind. Do not edit.
#ifndef __GO_interfaces_H__
#define __GO_interfaces_H__
#include <stdint.h>
#include <objc/objc.h>
int32_t cproxyinterfaces_Error_Err(int32_t refnum);
int32_t cproxyinterfaces_I_Rand(int32_t refnum);
int32_t cproxyinterfaces_I3_F(int32_t refnum);
void cproxyinterfaces_Interfaces_SomeMethod(int32_t refnum);
void cproxyinterfaces_LargerI_AnotherFunc(int32_t refnum);
int32_t cproxyinterfaces_LargerI_Rand(int32_t refnum);
int32_t cproxyinterfaces_SameI_Rand(int32_t refnum);
void cproxyinterfaces_WithParam_HasParam(int32_t refnum, char p0);
#endif
| mobile/bind/testdata/interfaces.objc.go.h.golden/0 | {
"file_path": "mobile/bind/testdata/interfaces.objc.go.h.golden",
"repo_id": "mobile",
"token_count": 274
} | 600 |
// Objective-C API for talking to issue12328 Go package.
// gobind -lang=objc issue12328
//
// File is generated by gobind. Do not edit.
#ifndef __GO_issue12328_H__
#define __GO_issue12328_H__
#include <stdint.h>
#include <objc/objc.h>
#endif
| mobile/bind/testdata/issue12328.objc.go.h.golden/0 | {
"file_path": "mobile/bind/testdata/issue12328.objc.go.h.golden",
"repo_id": "mobile",
"token_count": 96
} | 601 |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objcpkg
import (
"ObjC/Foundation/NSDate"
"ObjC/Foundation/NSString"
"ObjC/QuartzCore/CAMediaTimingFunction"
)
func Func() {
NSDate.Date()
CAMediaTimingFunction.FunctionWithControlPoints(0, 0, 0, 0)
}
func Method() string {
d := NSDate.Date()
return d.Description()
}
func New() {
NSDate.New()
CAMediaTimingFunction.NewWithControlPoints(0, 0, 0, 0)
}
func Error() {
str, err := NSString.StringWithContentsOfFileEncodingError("<non-existent>", 0)
if err == nil {
panic("no error from stringWithContentsOfFile")
}
// Assert err is an error
err = err.(error)
if str != "" {
panic("non-empty string from stringWithContentsOfFile")
}
str, err = NSString.NewWithContentsOfFileEncodingError("<non-existent>", 0)
if err == nil {
panic("no error from stringWithContentsOfFile")
}
// Assert err is an error
err = err.(error)
if str != "" {
panic("non-empty string from initWithContentsOfFile")
}
}
| mobile/bind/testdata/testpkg/objcpkg/objc.go/0 | {
"file_path": "mobile/bind/testdata/testpkg/objcpkg/objc.go",
"repo_id": "mobile",
"token_count": 388
} | 602 |
// Code generated by gobind. DO NOT EDIT.
// JNI functions for the Go <=> Java bridge.
//
// autogenerated by gobind -lang=java underscores
#include <android/log.h>
#include <stdint.h>
#include "seq.h"
#include "_cgo_export.h"
#include "underscore_pkg.h"
jclass proxy_class_underscore_pkg_Underscore_struct;
jmethodID proxy_class_underscore_pkg_Underscore_struct_cons;
JNIEXPORT void JNICALL
Java_underscore_1pkg_Underscore_1pkg__1init(JNIEnv *env, jclass _unused) {
jclass clazz;
clazz = (*env)->FindClass(env, "underscore_pkg/Underscore_struct");
proxy_class_underscore_pkg_Underscore_struct = (*env)->NewGlobalRef(env, clazz);
proxy_class_underscore_pkg_Underscore_struct_cons = (*env)->GetMethodID(env, clazz, "<init>", "(I)V");
}
JNIEXPORT void JNICALL
Java_underscore_1pkg_Underscore_1pkg_underscore_1func(JNIEnv* env, jclass _clazz) {
proxyunderscore_pkg__Underscore_func();
}
JNIEXPORT jint JNICALL
Java_underscore_1pkg_Underscore_1struct__1_1New(JNIEnv *env, jclass clazz) {
return new_underscore_pkg_Underscore_struct();
}
JNIEXPORT void JNICALL
Java_underscore_1pkg_Underscore_1struct_setUnderscore_1field(JNIEnv *env, jobject this, jstring v) {
int32_t o = go_seq_to_refnum_go(env, this);
nstring _v = go_seq_from_java_string(env, v);
proxyunderscore_pkg_Underscore_struct_Underscore_field_Set(o, _v);
}
JNIEXPORT jstring JNICALL
Java_underscore_1pkg_Underscore_1struct_getUnderscore_1field(JNIEnv *env, jobject this) {
int32_t o = go_seq_to_refnum_go(env, this);
nstring r0 = proxyunderscore_pkg_Underscore_struct_Underscore_field_Get(o);
jstring _r0 = go_seq_to_java_string(env, r0);
return _r0;
}
JNIEXPORT void JNICALL
Java_underscore_1pkg_Underscore_1pkg_setUnderscore_1var(JNIEnv *env, jclass clazz, jlong v) {
nint _v = (nint)v;
var_setunderscore_pkg_Underscore_var(_v);
}
JNIEXPORT jlong JNICALL
Java_underscore_1pkg_Underscore_1pkg_getUnderscore_1var(JNIEnv *env, jclass clazz) {
nint r0 = var_getunderscore_pkg_Underscore_var();
jlong _r0 = (jlong)r0;
return _r0;
}
| mobile/bind/testdata/underscores.java.c.golden/0 | {
"file_path": "mobile/bind/testdata/underscores.java.c.golden",
"repo_id": "mobile",
"token_count": 882
} | 603 |
// Code generated by gobind. DO NOT EDIT.
// Java class vars.S is a proxy for talking to a Go program.
//
// autogenerated by gobind -lang=java vars
package vars;
import go.Seq;
public final class S implements Seq.Proxy, I {
static { Vars.touch(); }
private final int refnum;
@Override public final int incRefnum() {
Seq.incGoRef(refnum, this);
return refnum;
}
S(int refnum) { this.refnum = refnum; Seq.trackGoRef(refnum, this); }
public S() { this.refnum = __New(); Seq.trackGoRef(refnum, this); }
private static native int __New();
@Override public boolean equals(Object o) {
if (o == null || !(o instanceof S)) {
return false;
}
S that = (S)o;
return true;
}
@Override public int hashCode() {
return java.util.Arrays.hashCode(new Object[] {});
}
@Override public String toString() {
StringBuilder b = new StringBuilder();
b.append("S").append("{");
return b.append("}").toString();
}
}
// Code generated by gobind. DO NOT EDIT.
// Java class vars.I is a proxy for talking to a Go program.
//
// autogenerated by gobind -lang=java vars
package vars;
import go.Seq;
public interface I {
}
// Code generated by gobind. DO NOT EDIT.
// Java class vars.Vars is a proxy for talking to a Go program.
//
// autogenerated by gobind -lang=java vars
package vars;
import go.Seq;
public abstract class Vars {
static {
Seq.touch(); // for loading the native library
_init();
}
private Vars() {} // uninstantiable
// touch is called from other bound packages to initialize this package
public static void touch() {}
private static native void _init();
private static final class proxyI implements Seq.Proxy, I {
private final int refnum;
@Override public final int incRefnum() {
Seq.incGoRef(refnum, this);
return refnum;
}
proxyI(int refnum) { this.refnum = refnum; Seq.trackGoRef(refnum, this); }
}
public static native void setABool(boolean v);
public static native boolean getABool();
public static native void setAFloat(double v);
public static native double getAFloat();
public static native void setAFloat32(float v);
public static native float getAFloat32();
public static native void setAFloat64(double v);
public static native double getAFloat64();
public static native void setAString(String v);
public static native String getAString();
public static native void setAStructPtr(S v);
public static native S getAStructPtr();
public static native void setAnInt(long v);
public static native long getAnInt();
public static native void setAnInt16(short v);
public static native short getAnInt16();
public static native void setAnInt32(int v);
public static native int getAnInt32();
public static native void setAnInt64(long v);
public static native long getAnInt64();
public static native void setAnInt8(byte v);
public static native byte getAnInt8();
public static native void setAnInterface(I v);
public static native I getAnInterface();
}
| mobile/bind/testdata/vars.java.golden/0 | {
"file_path": "mobile/bind/testdata/vars.java.golden",
"repo_id": "mobile",
"token_count": 1321
} | 604 |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"golang.org/x/mobile/internal/binres"
"golang.org/x/tools/go/packages"
)
func goAndroidBuild(pkg *packages.Package, targets []targetInfo) (map[string]bool, error) {
ndkRoot, err := ndkRoot(targets...)
if err != nil {
return nil, err
}
appName := path.Base(pkg.PkgPath)
libName := androidPkgName(appName)
// TODO(hajimehoshi): This works only with Go tools that assume all source files are in one directory.
// Fix this to work with other Go tools.
dir := filepath.Dir(pkg.GoFiles[0])
manifestPath := filepath.Join(dir, "AndroidManifest.xml")
manifestData, err := ioutil.ReadFile(manifestPath)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
buf := new(bytes.Buffer)
buf.WriteString(`<?xml version="1.0" encoding="utf-8"?>`)
err := manifestTmpl.Execute(buf, manifestTmplData{
// TODO(crawshaw): a better package path.
JavaPkgPath: "org.golang.todo." + libName,
Name: strings.Title(appName),
LibName: libName,
})
if err != nil {
return nil, err
}
manifestData = buf.Bytes()
if buildV {
fmt.Fprintf(os.Stderr, "generated AndroidManifest.xml:\n%s\n", manifestData)
}
} else {
libName, err = manifestLibName(manifestData)
if err != nil {
return nil, fmt.Errorf("error parsing %s: %v", manifestPath, err)
}
}
libFiles := []string{}
nmpkgs := make(map[string]map[string]bool) // map: arch -> extractPkgs' output
for _, t := range targets {
toolchain := ndk.Toolchain(t.arch)
libPath := "lib/" + toolchain.abi + "/lib" + libName + ".so"
libAbsPath := filepath.Join(tmpdir, libPath)
if err := mkdir(filepath.Dir(libAbsPath)); err != nil {
return nil, err
}
err = goBuild(
pkg.PkgPath,
androidEnv[t.arch],
"-buildmode=c-shared",
"-o", libAbsPath,
)
if err != nil {
return nil, err
}
nmpkgs[t.arch], err = extractPkgs(toolchain.Path(ndkRoot, "nm"), libAbsPath)
if err != nil {
return nil, err
}
libFiles = append(libFiles, libPath)
}
block, _ := pem.Decode([]byte(debugCert))
if block == nil {
return nil, errors.New("no debug cert")
}
privKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
if buildO == "" {
buildO = androidPkgName(path.Base(pkg.PkgPath)) + ".apk"
}
if !strings.HasSuffix(buildO, ".apk") {
return nil, fmt.Errorf("output file name %q does not end in '.apk'", buildO)
}
var out io.Writer
if !buildN {
f, err := os.Create(buildO)
if err != nil {
return nil, err
}
defer func() {
if cerr := f.Close(); err == nil {
err = cerr
}
}()
out = f
}
var apkw *Writer
if !buildN {
apkw = NewWriter(out, privKey)
}
apkwCreate := func(name string) (io.Writer, error) {
if buildV {
fmt.Fprintf(os.Stderr, "apk: %s\n", name)
}
if buildN {
return ioutil.Discard, nil
}
return apkw.Create(name)
}
apkwWriteFile := func(dst, src string) error {
w, err := apkwCreate(dst)
if err != nil {
return err
}
if !buildN {
f, err := os.Open(src)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(w, f); err != nil {
return err
}
}
return nil
}
w, err := apkwCreate("classes.dex")
if err != nil {
return nil, err
}
dexData, err := base64.StdEncoding.DecodeString(dexStr)
if err != nil {
log.Fatalf("internal error bad dexStr: %v", err)
}
if _, err := w.Write(dexData); err != nil {
return nil, err
}
for _, libFile := range libFiles {
if err := apkwWriteFile(libFile, filepath.Join(tmpdir, libFile)); err != nil {
return nil, err
}
}
for _, t := range targets {
toolchain := ndk.Toolchain(t.arch)
if nmpkgs[t.arch]["golang.org/x/mobile/exp/audio/al"] {
dst := "lib/" + toolchain.abi + "/libopenal.so"
src := filepath.Join(gomobilepath, dst)
if _, err := os.Stat(src); err != nil {
return nil, errors.New("the Android requires the golang.org/x/mobile/exp/audio/al, but the OpenAL libraries was not found. Please run gomobile init with the -openal flag pointing to an OpenAL source directory.")
}
if err := apkwWriteFile(dst, src); err != nil {
return nil, err
}
}
}
// Add any assets.
var arsc struct {
iconPath string
}
assetsDir := filepath.Join(dir, "assets")
assetsDirExists := true
fi, err := os.Stat(assetsDir)
if err != nil {
if os.IsNotExist(err) {
assetsDirExists = false
} else {
return nil, err
}
} else {
assetsDirExists = fi.IsDir()
}
if assetsDirExists {
// if assets is a symlink, follow the symlink.
assetsDir, err = filepath.EvalSymlinks(assetsDir)
if err != nil {
return nil, err
}
err = filepath.Walk(assetsDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if name := filepath.Base(path); strings.HasPrefix(name, ".") {
// Do not include the hidden files.
return nil
}
if info.IsDir() {
return nil
}
if rel, err := filepath.Rel(assetsDir, path); rel == "icon.png" && err == nil {
arsc.iconPath = path
// TODO returning here does not write the assets/icon.png to the final assets output,
// making it unavailable via the assets API. Should the file be duplicated into assets
// or should assets API be able to retrieve files from the generated resource table?
return nil
}
name := "assets/" + path[len(assetsDir)+1:]
return apkwWriteFile(name, path)
})
if err != nil {
return nil, fmt.Errorf("asset %v", err)
}
}
bxml, err := binres.UnmarshalXML(bytes.NewReader(manifestData), arsc.iconPath != "")
if err != nil {
return nil, err
}
// generate resources.arsc identifying single xxxhdpi icon resource.
if arsc.iconPath != "" {
pkgname, err := bxml.RawValueByName("manifest", xml.Name{Local: "package"})
if err != nil {
return nil, err
}
tbl, name := binres.NewMipmapTable(pkgname)
if err := apkwWriteFile(name, arsc.iconPath); err != nil {
return nil, err
}
w, err := apkwCreate("resources.arsc")
if err != nil {
return nil, err
}
bin, err := tbl.MarshalBinary()
if err != nil {
return nil, err
}
if _, err := w.Write(bin); err != nil {
return nil, err
}
}
w, err = apkwCreate("AndroidManifest.xml")
if err != nil {
return nil, err
}
bin, err := bxml.MarshalBinary()
if err != nil {
return nil, err
}
if _, err := w.Write(bin); err != nil {
return nil, err
}
// TODO: add gdbserver to apk?
if !buildN {
if err := apkw.Close(); err != nil {
return nil, err
}
}
// TODO: return nmpkgs
return nmpkgs[targets[0].arch], nil
}
// androidPkgName sanitizes the go package name to be acceptable as a android
// package name part. The android package name convention is similar to the
// java package name convention described in
// https://docs.oracle.com/javase/specs/jls/se8/html/jls-6.html#jls-6.5.3.1
// but not exactly same.
func androidPkgName(name string) string {
var res []rune
for _, r := range name {
switch {
case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9':
res = append(res, r)
default:
res = append(res, '_')
}
}
if len(res) == 0 || res[0] == '_' || ('0' <= res[0] && res[0] <= '9') {
// Android does not seem to allow the package part starting with _.
res = append([]rune{'g', 'o'}, res...)
}
s := string(res)
// Look for Java keywords that are not Go keywords, and avoid using
// them as a package name.
//
// This is not a problem for normal Go identifiers as we only expose
// exported symbols. The upper case first letter saves everything
// from accidentally matching except for the package name.
//
// Note that basic type names (like int) are not keywords in Go.
switch s {
case "abstract", "assert", "boolean", "byte", "catch", "char", "class",
"do", "double", "enum", "extends", "final", "finally", "float",
"implements", "instanceof", "int", "long", "native", "private",
"protected", "public", "short", "static", "strictfp", "super",
"synchronized", "this", "throw", "throws", "transient", "try",
"void", "volatile", "while":
s += "_"
}
return s
}
// A random uninteresting private key.
// Must be consistent across builds so newer app versions can be installed.
const debugCert = `
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAy6ItnWZJ8DpX9R5FdWbS9Kr1U8Z7mKgqNByGU7No99JUnmyu
NQ6Uy6Nj0Gz3o3c0BXESECblOC13WdzjsH1Pi7/L9QV8jXOXX8cvkG5SJAyj6hcO
LOapjDiN89NXjXtyv206JWYvRtpexyVrmHJgRAw3fiFI+m4g4Qop1CxcIF/EgYh7
rYrqh4wbCM1OGaCleQWaOCXxZGm+J5YNKQcWpjZRrDrb35IZmlT0bK46CXUKvCqK
x7YXHgfhC8ZsXCtsScKJVHs7gEsNxz7A0XoibFw6DoxtjKzUCktnT0w3wxdY7OTj
9AR8mobFlM9W3yirX8TtwekWhDNTYEu8dwwykwIDAQABAoIBAA2hjpIhvcNR9H9Z
BmdEecydAQ0ZlT5zy1dvrWI++UDVmIp+Ve8BSd6T0mOqV61elmHi3sWsBN4M1Rdz
3N38lW2SajG9q0fAvBpSOBHgAKmfGv3Ziz5gNmtHgeEXfZ3f7J95zVGhlHqWtY95
JsmuplkHxFMyITN6WcMWrhQg4A3enKLhJLlaGLJf9PeBrvVxHR1/txrfENd2iJBH
FmxVGILL09fIIktJvoScbzVOneeWXj5vJGzWVhB17DHBbANGvVPdD5f+k/s5aooh
hWAy/yLKocr294C4J+gkO5h2zjjjSGcmVHfrhlXQoEPX+iW1TGoF8BMtl4Llc+jw
lKWKfpECgYEA9C428Z6CvAn+KJ2yhbAtuRo41kkOVoiQPtlPeRYs91Pq4+NBlfKO
2nWLkyavVrLx4YQeCeaEU2Xoieo9msfLZGTVxgRlztylOUR+zz2FzDBYGicuUD3s
EqC0Wv7tiX6dumpWyOcVVLmR9aKlOUzA9xemzIsWUwL3PpyONhKSq7kCgYEA1X2F
f2jKjoOVzglhtuX4/SP9GxS4gRf9rOQ1Q8DzZhyH2LZ6Dnb1uEQvGhiqJTU8CXxb
7odI0fgyNXq425Nlxc1Tu0G38TtJhwrx7HWHuFcbI/QpRtDYLWil8Zr7Q3BT9rdh
moo4m937hLMvqOG9pyIbyjOEPK2WBCtKW5yabqsCgYEAu9DkUBr1Qf+Jr+IEU9I8
iRkDSMeusJ6gHMd32pJVCfRRQvIlG1oTyTMKpafmzBAd/rFpjYHynFdRcutqcShm
aJUq3QG68U9EAvWNeIhA5tr0mUEz3WKTt4xGzYsyWES8u4tZr3QXMzD9dOuinJ1N
+4EEumXtSPKKDG3M8Qh+KnkCgYBUEVSTYmF5EynXc2xOCGsuy5AsrNEmzJqxDUBI
SN/P0uZPmTOhJIkIIZlmrlW5xye4GIde+1jajeC/nG7U0EsgRAV31J4pWQ5QJigz
0+g419wxIUFryGuIHhBSfpP472+w1G+T2mAGSLh1fdYDq7jx6oWE7xpghn5vb9id
EKLjdwKBgBtz9mzbzutIfAW0Y8F23T60nKvQ0gibE92rnUbjPnw8HjL3AZLU05N+
cSL5bhq0N5XHK77sscxW9vXjG0LJMXmFZPp9F6aV6ejkMIXyJ/Yz/EqeaJFwilTq
Mc6xR47qkdzu0dQ1aPm4XD7AWDtIvPo/GG2DKOucLBbQc2cOWtKS
-----END RSA PRIVATE KEY-----
`
| mobile/cmd/gomobile/build_androidapp.go/0 | {
"file_path": "mobile/cmd/gomobile/build_androidapp.go",
"repo_id": "mobile",
"token_count": 4714
} | 605 |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/xml"
"errors"
"fmt"
"html/template"
)
type manifestXML struct {
Activity activityXML `xml:"application>activity"`
}
type activityXML struct {
Name string `xml:"name,attr"`
MetaData []metaDataXML `xml:"meta-data"`
}
type metaDataXML struct {
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
}
// manifestLibName parses the AndroidManifest.xml and finds the library
// name of the NativeActivity.
func manifestLibName(data []byte) (string, error) {
manifest := new(manifestXML)
if err := xml.Unmarshal(data, manifest); err != nil {
return "", err
}
if manifest.Activity.Name != "org.golang.app.GoNativeActivity" {
return "", fmt.Errorf("can only build an .apk for GoNativeActivity, not %q", manifest.Activity.Name)
}
libName := ""
for _, md := range manifest.Activity.MetaData {
if md.Name == "android.app.lib_name" {
libName = md.Value
break
}
}
if libName == "" {
return "", errors.New("AndroidManifest.xml missing meta-data android.app.lib_name")
}
return libName, nil
}
type manifestTmplData struct {
JavaPkgPath string
Name string
LibName string
}
var manifestTmpl = template.Must(template.New("manifest").Parse(`
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
package="{{.JavaPkgPath}}"
android:versionCode="1"
android:versionName="1.0">
<application android:label="{{.Name}}" android:debuggable="true">
<activity android:name="org.golang.app.GoNativeActivity"
android:label="{{.Name}}"
android:configChanges="orientation|keyboardHidden">
<meta-data android:name="android.app.lib_name" android:value="{{.LibName}}" />
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>`))
| mobile/cmd/gomobile/manifest.go/0 | {
"file_path": "mobile/cmd/gomobile/manifest.go",
"repo_id": "mobile",
"token_count": 732
} | 606 |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>org.golang.example.$(PRODUCT_NAME:rfc1034identifier)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1</string>
<key>LSRequiresIPhoneOS</key>
<true/>
<key>UILaunchStoryboardName</key>
<string>LaunchScreen</string>
<key>UIMainStoryboardFile</key>
<string>Main</string>
<key>UIRequiredDeviceCapabilities</key>
<array>
<string>armv7</string>
</array>
<key>UISupportedInterfaceOrientations</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
<key>UISupportedInterfaceOrientations~ipad</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationPortraitUpsideDown</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
</dict>
</plist>
| mobile/example/bind/ios/bind/Info.plist/0 | {
"file_path": "mobile/example/bind/ios/bind/Info.plist",
"repo_id": "mobile",
"token_count": 598
} | 607 |
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="fill_parent"
android:layout_height="match_parent"
tools:context="org.golang.ivy.Help"
android:background="@color/body">
<WebView
android:id="@+id/help_webview"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:background="@color/body" />
</RelativeLayout>
| mobile/example/ivy/android/app/src/main/res/layout/activity_help.xml/0 | {
"file_path": "mobile/example/ivy/android/app/src/main/res/layout/activity_help.xml",
"repo_id": "mobile",
"token_count": 197
} | 608 |
include ':app'
| mobile/example/ivy/android/settings.gradle/0 | {
"file_path": "mobile/example/ivy/android/settings.gradle",
"repo_id": "mobile",
"token_count": 6
} | 609 |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || linux || windows
// An app that paints green if golang.org is reachable when the app first
// starts, or red otherwise.
//
// In order to access the network from the Android app, its AndroidManifest.xml
// file must include the permission to access the network.
//
// http://developer.android.com/guide/topics/manifest/manifest-intro.html#perms
//
// The gomobile tool auto-generates a default AndroidManifest file by default
// unless the package directory contains the AndroidManifest.xml. Users can
// customize app behavior, such as permissions and app name, by providing
// the AndroidManifest file. This is irrelevant to iOS.
//
// Note: This demo is an early preview of Go 1.5. In order to build this
// program as an Android APK using the gomobile tool.
//
// See http://godoc.org/golang.org/x/mobile/cmd/gomobile to install gomobile.
//
// Get the network example and use gomobile to build or install it on your device.
//
// $ go get -d golang.org/x/mobile/example/network
// $ gomobile build golang.org/x/mobile/example/network # will build an APK
//
// # plug your Android device to your computer or start an Android emulator.
// # if you have adb installed on your machine, use gomobile install to
// # build and deploy the APK to an Android target.
// $ gomobile install golang.org/x/mobile/example/network
//
// Switch to your device or emulator to start the network application from
// the launcher.
// You can also run the application on your desktop by running the command
// below. (Note: It currently doesn't work on Windows.)
//
// $ go install golang.org/x/mobile/example/network && network
package main
import (
"net/http"
"golang.org/x/mobile/app"
"golang.org/x/mobile/event/lifecycle"
"golang.org/x/mobile/event/paint"
"golang.org/x/mobile/event/size"
"golang.org/x/mobile/gl"
)
func main() {
// checkNetwork runs only once when the app first loads.
go checkNetwork()
app.Main(func(a app.App) {
var glctx gl.Context
det, sz := determined, size.Event{}
for {
select {
case <-det:
a.Send(paint.Event{})
det = nil
case e := <-a.Events():
switch e := a.Filter(e).(type) {
case lifecycle.Event:
glctx, _ = e.DrawContext.(gl.Context)
case size.Event:
sz = e
case paint.Event:
if glctx == nil {
continue
}
onDraw(glctx, sz)
a.Publish()
}
}
}
})
}
var (
determined = make(chan struct{})
ok = false
)
func checkNetwork() {
defer close(determined)
_, err := http.Get("http://golang.org/")
if err != nil {
return
}
ok = true
}
func onDraw(glctx gl.Context, sz size.Event) {
select {
case <-determined:
if ok {
glctx.ClearColor(0, 1, 0, 1)
} else {
glctx.ClearColor(1, 0, 0, 1)
}
default:
glctx.ClearColor(0, 0, 0, 1)
}
glctx.Clear(gl.COLOR_BUFFER_BIT)
}
| mobile/example/network/main.go/0 | {
"file_path": "mobile/example/network/main.go",
"repo_id": "mobile",
"token_count": 1078
} | 610 |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package f32
import "fmt"
// A Mat3 is a 3x3 matrix of float32 values.
// Elements are indexed first by row then column, i.e. m[row][column].
type Mat3 [3]Vec3
func (m Mat3) String() string {
return fmt.Sprintf(`Mat3[% 0.3f, % 0.3f, % 0.3f,
% 0.3f, % 0.3f, % 0.3f,
% 0.3f, % 0.3f, % 0.3f]`,
m[0][0], m[0][1], m[0][2],
m[1][0], m[1][1], m[1][2],
m[2][0], m[2][1], m[2][2])
}
func (m *Mat3) Identity() {
*m = Mat3{
{1, 0, 0},
{0, 1, 0},
{0, 0, 1},
}
}
func (m *Mat3) Eq(n *Mat3, epsilon float32) bool {
for i := range m {
for j := range m[i] {
diff := m[i][j] - n[i][j]
if diff < -epsilon || +epsilon < diff {
return false
}
}
}
return true
}
// Mul stores a × b in m.
func (m *Mat3) Mul(a, b *Mat3) {
// Store the result in local variables, in case m == a || m == b.
m00 := a[0][0]*b[0][0] + a[0][1]*b[1][0] + a[0][2]*b[2][0]
m01 := a[0][0]*b[0][1] + a[0][1]*b[1][1] + a[0][2]*b[2][1]
m02 := a[0][0]*b[0][2] + a[0][1]*b[1][2] + a[0][2]*b[2][2]
m10 := a[1][0]*b[0][0] + a[1][1]*b[1][0] + a[1][2]*b[2][0]
m11 := a[1][0]*b[0][1] + a[1][1]*b[1][1] + a[1][2]*b[2][1]
m12 := a[1][0]*b[0][2] + a[1][1]*b[1][2] + a[1][2]*b[2][2]
m20 := a[2][0]*b[0][0] + a[2][1]*b[1][0] + a[2][2]*b[2][0]
m21 := a[2][0]*b[0][1] + a[2][1]*b[1][1] + a[2][2]*b[2][1]
m22 := a[2][0]*b[0][2] + a[2][1]*b[1][2] + a[2][2]*b[2][2]
m[0][0] = m00
m[0][1] = m01
m[0][2] = m02
m[1][0] = m10
m[1][1] = m11
m[1][2] = m12
m[2][0] = m20
m[2][1] = m21
m[2][2] = m22
}
| mobile/exp/f32/mat3.go/0 | {
"file_path": "mobile/exp/f32/mat3.go",
"repo_id": "mobile",
"token_count": 981
} | 611 |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || linux || windows
package glutil // import "golang.org/x/mobile/exp/gl/glutil"
import (
"fmt"
"golang.org/x/mobile/exp/f32"
"golang.org/x/mobile/gl"
)
// CreateProgram creates, compiles, and links a gl.Program.
func CreateProgram(glctx gl.Context, vertexSrc, fragmentSrc string) (gl.Program, error) {
program := glctx.CreateProgram()
if program.Value == 0 {
return gl.Program{}, fmt.Errorf("glutil: no programs available")
}
vertexShader, err := loadShader(glctx, gl.VERTEX_SHADER, vertexSrc)
if err != nil {
return gl.Program{}, err
}
fragmentShader, err := loadShader(glctx, gl.FRAGMENT_SHADER, fragmentSrc)
if err != nil {
glctx.DeleteShader(vertexShader)
return gl.Program{}, err
}
glctx.AttachShader(program, vertexShader)
glctx.AttachShader(program, fragmentShader)
glctx.LinkProgram(program)
// Flag shaders for deletion when program is unlinked.
glctx.DeleteShader(vertexShader)
glctx.DeleteShader(fragmentShader)
if glctx.GetProgrami(program, gl.LINK_STATUS) == 0 {
defer glctx.DeleteProgram(program)
return gl.Program{}, fmt.Errorf("glutil: %s", glctx.GetProgramInfoLog(program))
}
return program, nil
}
func loadShader(glctx gl.Context, shaderType gl.Enum, src string) (gl.Shader, error) {
shader := glctx.CreateShader(shaderType)
if shader.Value == 0 {
return gl.Shader{}, fmt.Errorf("glutil: could not create shader (type %v)", shaderType)
}
glctx.ShaderSource(shader, src)
glctx.CompileShader(shader)
if glctx.GetShaderi(shader, gl.COMPILE_STATUS) == 0 {
defer glctx.DeleteShader(shader)
return gl.Shader{}, fmt.Errorf("shader compile: %s", glctx.GetShaderInfoLog(shader))
}
return shader, nil
}
// writeAffine writes the contents of an Affine to a 3x3 matrix GL uniform.
func writeAffine(glctx gl.Context, u gl.Uniform, a *f32.Affine) {
var m [9]float32
m[0*3+0] = a[0][0]
m[0*3+1] = a[1][0]
m[0*3+2] = 0
m[1*3+0] = a[0][1]
m[1*3+1] = a[1][1]
m[1*3+2] = 0
m[2*3+0] = a[0][2]
m[2*3+1] = a[1][2]
m[2*3+2] = 1
glctx.UniformMatrix3fv(u, m[:])
}
| mobile/exp/gl/glutil/glutil.go/0 | {
"file_path": "mobile/exp/gl/glutil/glutil.go",
"repo_id": "mobile",
"token_count": 905
} | 612 |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gl
import (
"archive/tar"
"compress/gzip"
"debug/pe"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"runtime"
)
var debug = log.New(ioutil.Discard, "gl: ", log.LstdFlags)
func downloadDLLs() (path string, err error) {
url := "https://dl.google.com/go/mobile/angle-bd3f8780b-" + runtime.GOARCH + ".tgz"
debug.Printf("downloading %s", url)
resp, err := http.Get(url)
if err != nil {
return "", fmt.Errorf("gl: %v", err)
}
defer func() {
err2 := resp.Body.Close()
if err == nil && err2 != nil {
err = fmt.Errorf("gl: error reading body from %v: %v", url, err2)
}
}()
if resp.StatusCode != http.StatusOK {
err := fmt.Errorf("gl: error fetching %v, status: %v", url, resp.Status)
return "", err
}
r, err := gzip.NewReader(resp.Body)
if err != nil {
return "", fmt.Errorf("gl: error reading gzip from %v: %v", url, err)
}
tr := tar.NewReader(r)
var bytesGLESv2, bytesEGL, bytesD3DCompiler []byte
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return "", fmt.Errorf("gl: error reading tar from %v: %v", url, err)
}
switch header.Name {
case "angle-" + runtime.GOARCH + "/libglesv2.dll":
bytesGLESv2, err = ioutil.ReadAll(tr)
case "angle-" + runtime.GOARCH + "/libegl.dll":
bytesEGL, err = ioutil.ReadAll(tr)
case "angle-" + runtime.GOARCH + "/d3dcompiler_47.dll":
bytesD3DCompiler, err = ioutil.ReadAll(tr)
default: // skip
}
if err != nil {
return "", fmt.Errorf("gl: error reading %v from %v: %v", header.Name, url, err)
}
}
if len(bytesGLESv2) == 0 || len(bytesEGL) == 0 || len(bytesD3DCompiler) == 0 {
return "", fmt.Errorf("gl: did not find all DLLs in %v", url)
}
writeDLLs := func(path string) error {
if err := ioutil.WriteFile(filepath.Join(path, "libglesv2.dll"), bytesGLESv2, 0755); err != nil {
return fmt.Errorf("gl: cannot install ANGLE: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(path, "libegl.dll"), bytesEGL, 0755); err != nil {
return fmt.Errorf("gl: cannot install ANGLE: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(path, "d3dcompiler_47.dll"), bytesD3DCompiler, 0755); err != nil {
return fmt.Errorf("gl: cannot install ANGLE: %v", err)
}
return nil
}
// First, we attempt to install these DLLs in LOCALAPPDATA/Shiny.
//
// Traditionally we would use the system32 directory, but it is
// no longer writable by normal programs.
os.MkdirAll(appdataPath(), 0775)
if err := writeDLLs(appdataPath()); err == nil {
return appdataPath(), nil
}
debug.Printf("DLLs could not be written to %s", appdataPath())
// Second, install in GOPATH/pkg if it exists.
gopath := os.Getenv("GOPATH")
gopathpkg := filepath.Join(gopath, "pkg")
if _, err := os.Stat(gopathpkg); err == nil && gopath != "" {
if err := writeDLLs(gopathpkg); err == nil {
return gopathpkg, nil
}
}
debug.Printf("DLLs could not be written to GOPATH")
// Third, pick a temporary directory.
tmp := os.TempDir()
if err := writeDLLs(tmp); err != nil {
return "", fmt.Errorf("gl: unable to install ANGLE DLLs: %v", err)
}
return tmp, nil
}
func appdataPath() string {
return filepath.Join(os.Getenv("LOCALAPPDATA"), "GoGL", runtime.GOARCH)
}
func containsDLLs(dir string) bool {
compatible := func(name string) bool {
file, err := pe.Open(filepath.Join(dir, name))
if err != nil {
return false
}
defer file.Close()
switch file.Machine {
case pe.IMAGE_FILE_MACHINE_AMD64:
return "amd64" == runtime.GOARCH
case pe.IMAGE_FILE_MACHINE_ARM:
return "arm" == runtime.GOARCH
case pe.IMAGE_FILE_MACHINE_I386:
return "386" == runtime.GOARCH
}
return false
}
return compatible("libglesv2.dll") && compatible("libegl.dll") && compatible("d3dcompiler_47.dll")
}
func chromePath() string {
// dlls are stored in:
// <BASE>/<VERSION>/libglesv2.dll
var installdirs = []string{
// Chrome User
filepath.Join(os.Getenv("LOCALAPPDATA"), "Google", "Chrome", "Application"),
// Chrome System
filepath.Join(os.Getenv("ProgramFiles(x86)"), "Google", "Chrome", "Application"),
// Chromium
filepath.Join(os.Getenv("LOCALAPPDATA"), "Chromium", "Application"),
// Chrome Canary
filepath.Join(os.Getenv("LOCALAPPDATA"), "Google", "Chrome SxS", "Application"),
}
for _, installdir := range installdirs {
versiondirs, err := ioutil.ReadDir(installdir)
if err != nil {
continue
}
for _, versiondir := range versiondirs {
if !versiondir.IsDir() {
continue
}
versionpath := filepath.Join(installdir, versiondir.Name())
if containsDLLs(versionpath) {
return versionpath
}
}
}
return ""
}
func findDLLs() (err error) {
load := func(path string) (bool, error) {
if path != "" {
// don't try to start when one of the files is missing
if !containsDLLs(path) {
return false, nil
}
LibD3DCompiler.Name = filepath.Join(path, filepath.Base(LibD3DCompiler.Name))
LibGLESv2.Name = filepath.Join(path, filepath.Base(LibGLESv2.Name))
LibEGL.Name = filepath.Join(path, filepath.Base(LibEGL.Name))
}
if err := LibGLESv2.Load(); err == nil {
if err := LibEGL.Load(); err != nil {
return false, fmt.Errorf("gl: loaded libglesv2 but not libegl: %v", err)
}
if err := LibD3DCompiler.Load(); err != nil {
return false, fmt.Errorf("gl: loaded libglesv2, libegl but not d3dcompiler: %v", err)
}
if path == "" {
debug.Printf("DLLs found")
} else {
debug.Printf("DLLs found in: %q", path)
}
return true, nil
}
return false, nil
}
// Look in the system directory.
if ok, err := load(""); ok || err != nil {
return err
}
// Look in the AppData directory.
if ok, err := load(appdataPath()); ok || err != nil {
return err
}
// Look for a Chrome installation
if dir := chromePath(); dir != "" {
if ok, err := load(dir); ok || err != nil {
return err
}
}
// Look in GOPATH/pkg.
if ok, err := load(filepath.Join(os.Getenv("GOPATH"), "pkg")); ok || err != nil {
return err
}
// Look in temporary directory.
if ok, err := load(os.TempDir()); ok || err != nil {
return err
}
// Download the DLL binary.
path, err := downloadDLLs()
if err != nil {
return err
}
debug.Printf("DLLs written to %s", path)
if ok, err := load(path); !ok || err != nil {
return fmt.Errorf("gl: unable to load ANGLE after installation: %v", err)
}
return nil
}
| mobile/gl/dll_windows.go/0 | {
"file_path": "mobile/gl/dll_windows.go",
"repo_id": "mobile",
"token_count": 2650
} | 613 |
module golang.org/x/mobile
go 1.18
require (
golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63
golang.org/x/image v0.18.0
golang.org/x/mod v0.19.0
golang.org/x/sync v0.7.0
golang.org/x/tools v0.23.0
)
require golang.org/x/sys v0.22.0 // indirect
| mobile/go.mod/0 | {
"file_path": "mobile/go.mod",
"repo_id": "mobile",
"token_count": 145
} | 614 |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The importers package uses go/ast to analyze Go packages or Go files
// and collect references to types whose package has a package prefix.
// It is used by the language specific importers to determine the set of
// wrapper types to be generated.
//
// # For example, in the Go file
//
// package javaprogram
//
// import "Java/java/lang"
//
// func F() {
//
// o := lang.Object.New()
// ...
//
// }
//
// the java importer uses this package to determine that the "java/lang"
// package and the wrapper interface, lang.Object, needs to be generated.
// After calling AnalyzeFile or AnalyzePackages, the References result
// contains the reference to lang.Object and the names set will contain
// "New".
package importers
import (
"errors"
"go/ast"
"go/token"
"path"
"sort"
"strconv"
"strings"
"golang.org/x/tools/go/packages"
)
// References is the result of analyzing a Go file or set of Go packages.
//
// # For example, the Go file
//
// package pkg
//
// import "Prefix/some/Package"
//
// var A = Package.Identifier
//
// Will result in a single PkgRef with the "some/Package" package and
// the Identifier name. The Names set will contain the single name,
// "Identifier".
type References struct {
// The list of references to identifiers in packages that are
// identified by a package prefix.
Refs []PkgRef
// The list of names used in at least one selector expression.
// Useful as a conservative upper bound on the set of identifiers
// referenced from a set of packages.
Names map[string]struct{}
// Embedders is a list of struct types with prefixed types
// embedded.
Embedders []Struct
}
// Struct is a representation of a struct type with embedded
// types.
type Struct struct {
Name string
Pkg string
PkgPath string
Refs []PkgRef
}
// PkgRef is a reference to an identifier in a package.
type PkgRef struct {
Name string
Pkg string
}
type refsSaver struct {
pkgPrefix string
*References
refMap map[PkgRef]struct{}
insideStruct bool
}
// AnalyzeFile scans the provided file for references to packages with the given
// package prefix. The list of unique (package, identifier) pairs is returned
func AnalyzeFile(file *ast.File, pkgPrefix string) (*References, error) {
visitor := newRefsSaver(pkgPrefix)
fset := token.NewFileSet()
files := map[string]*ast.File{file.Name.Name: file}
// Ignore errors (from unknown packages)
pkg, _ := ast.NewPackage(fset, files, visitor.importer(), nil)
ast.Walk(visitor, pkg)
visitor.findEmbeddingStructs("", pkg)
return visitor.References, nil
}
// AnalyzePackages scans the provided packages for references to packages with the given
// package prefix. The list of unique (package, identifier) pairs is returned
func AnalyzePackages(pkgs []*packages.Package, pkgPrefix string) (*References, error) {
visitor := newRefsSaver(pkgPrefix)
imp := visitor.importer()
fset := token.NewFileSet()
for _, pkg := range pkgs {
files := make(map[string]*ast.File)
for i, name := range pkg.GoFiles {
files[name] = pkg.Syntax[i]
}
// Ignore errors (from unknown packages)
astpkg, _ := ast.NewPackage(fset, files, imp, nil)
ast.Walk(visitor, astpkg)
visitor.findEmbeddingStructs(pkg.PkgPath, astpkg)
}
return visitor.References, nil
}
// findEmbeddingStructs finds all top level declarations embedding a prefixed type.
//
// For example:
//
// import "Prefix/some/Package"
//
// type T struct {
//
// Package.Class
//
// }
func (v *refsSaver) findEmbeddingStructs(pkgpath string, pkg *ast.Package) {
var names []string
for _, obj := range pkg.Scope.Objects {
if obj.Kind != ast.Typ || !ast.IsExported(obj.Name) {
continue
}
names = append(names, obj.Name)
}
sort.Strings(names)
for _, name := range names {
obj := pkg.Scope.Objects[name]
t, ok := obj.Decl.(*ast.TypeSpec).Type.(*ast.StructType)
if !ok {
continue
}
var refs []PkgRef
for _, f := range t.Fields.List {
sel, ok := f.Type.(*ast.SelectorExpr)
if !ok {
continue
}
ref, ok := v.addRef(sel)
if !ok {
continue
}
if len(f.Names) > 0 && !f.Names[0].IsExported() {
continue
}
refs = append(refs, ref)
}
if len(refs) > 0 {
v.Embedders = append(v.Embedders, Struct{
Name: obj.Name,
Pkg: pkg.Name,
PkgPath: pkgpath,
Refs: refs,
})
}
}
}
func newRefsSaver(pkgPrefix string) *refsSaver {
s := &refsSaver{
pkgPrefix: pkgPrefix,
refMap: make(map[PkgRef]struct{}),
References: &References{},
}
s.Names = make(map[string]struct{})
return s
}
func (v *refsSaver) importer() ast.Importer {
return func(imports map[string]*ast.Object, pkgPath string) (*ast.Object, error) {
if pkg, exists := imports[pkgPath]; exists {
return pkg, nil
}
if !strings.HasPrefix(pkgPath, v.pkgPrefix) {
return nil, errors.New("ignored")
}
pkg := ast.NewObj(ast.Pkg, path.Base(pkgPath))
imports[pkgPath] = pkg
return pkg, nil
}
}
func (v *refsSaver) addRef(sel *ast.SelectorExpr) (PkgRef, bool) {
x, ok := sel.X.(*ast.Ident)
if !ok || x.Obj == nil {
return PkgRef{}, false
}
imp, ok := x.Obj.Decl.(*ast.ImportSpec)
if !ok {
return PkgRef{}, false
}
pkgPath, err := strconv.Unquote(imp.Path.Value)
if err != nil {
return PkgRef{}, false
}
if !strings.HasPrefix(pkgPath, v.pkgPrefix) {
return PkgRef{}, false
}
pkgPath = pkgPath[len(v.pkgPrefix):]
ref := PkgRef{Pkg: pkgPath, Name: sel.Sel.Name}
if _, exists := v.refMap[ref]; !exists {
v.refMap[ref] = struct{}{}
v.Refs = append(v.Refs, ref)
}
return ref, true
}
func (v *refsSaver) Visit(n ast.Node) ast.Visitor {
switch n := n.(type) {
case *ast.StructType:
// Use a copy of refsSaver that only accepts exported fields. It refers
// to the original refsSaver for collecting references.
v2 := *v
v2.insideStruct = true
return &v2
case *ast.Field:
if v.insideStruct && len(n.Names) == 1 && !n.Names[0].IsExported() {
return nil
}
case *ast.SelectorExpr:
v.Names[n.Sel.Name] = struct{}{}
if _, ok := v.addRef(n); ok {
return nil
}
case *ast.FuncDecl:
if n.Recv != nil { // Methods
v.Names[n.Name.Name] = struct{}{}
}
}
return v
}
| mobile/internal/importers/ast.go/0 | {
"file_path": "mobile/internal/importers/ast.go",
"repo_id": "mobile",
"token_count": 2371
} | 615 |
# Release automation via GoReleaser (goreleaser.com)
# Requires a valid GITHUB_TOKEN envar prior to running `goreleaser`
# See https://goreleaser.com/environment/ for more info
---
release:
github:
owner: golang
name: mock
builds:
- binary: mockgen
goos:
- darwin
- windows
- linux
goarch:
- amd64
- arm64
- 386
env:
- CGO_ENABLED=0
- GO111MODULE=on
- GOPROXY=https://proxy.golang.org
- GOSUMDB=sum.golang.org
main: ./mockgen/
archives:
- format: tar.gz
wrap_in_directory: true
files:
- LICENSE
- README.md
checksum:
snapshot:
name_template: "snap-{{ .Commit }}"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
- 'README'
| mock/.goreleaser.yml/0 | {
"file_path": "mock/.goreleaser.yml",
"repo_id": "mock",
"token_count": 362
} | 616 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gomock is a mock framework for Go.
//
// Standard usage:
// (1) Define an interface that you wish to mock.
// type MyInterface interface {
// SomeMethod(x int64, y string)
// }
// (2) Use mockgen to generate a mock from the interface.
// (3) Use the mock in a test:
// func TestMyThing(t *testing.T) {
// mockCtrl := gomock.NewController(t)//
// mockObj := something.NewMockMyInterface(mockCtrl)
// mockObj.EXPECT().SomeMethod(4, "blah")
// // pass mockObj to a real object and play with it.
// }
//
// By default, expected calls are not enforced to run in any particular order.
// Call order dependency can be enforced by use of InOrder and/or Call.After.
// Call.After can create more varied call order dependencies, but InOrder is
// often more convenient.
//
// The following examples create equivalent call order dependencies.
//
// Example of using Call.After to chain expected call order:
//
// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
//
// Example of using InOrder to declare expected call order:
//
// gomock.InOrder(
// mockObj.EXPECT().SomeMethod(1, "first"),
// mockObj.EXPECT().SomeMethod(2, "second"),
// mockObj.EXPECT().SomeMethod(3, "third"),
// )
//
// The standard TestReporter most users will pass to `NewController` is a
// `*testing.T` from the context of the test. Note that this will use the
// standard `t.Error` and `t.Fatal` methods to report what happened in the test.
// In some cases this can leave your testing package in a weird state if global
// state is used since `t.Fatal` is like calling panic in the middle of a
// function. In these cases it is recommended that you pass in your own
// `TestReporter`.
package gomock
| mock/gomock/doc.go/0 | {
"file_path": "mock/gomock/doc.go",
"repo_id": "mock",
"token_count": 813
} | 617 |
// Code generated by MockGen. DO NOT EDIT.
// Source: bugreport.go
// Package bugreport is a generated GoMock package.
package bugreport
import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockExample is a mock of Example interface.
type MockExample struct {
ctrl *gomock.Controller
recorder *MockExampleMockRecorder
}
// MockExampleMockRecorder is the mock recorder for MockExample.
type MockExampleMockRecorder struct {
mock *MockExample
}
// NewMockExample creates a new mock instance.
func NewMockExample(ctrl *gomock.Controller) *MockExample {
mock := &MockExample{ctrl: ctrl}
mock.recorder = &MockExampleMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockExample) EXPECT() *MockExampleMockRecorder {
return m.recorder
}
// Method mocks base method.
func (m_2 *MockExample) Method(_m, _mr, m, mr int) {
m_2.ctrl.T.Helper()
m_2.ctrl.Call(m_2, "Method", _m, _mr, m, mr)
}
// Method indicates an expected call of Method.
func (mr_2 *MockExampleMockRecorder) Method(_m, _mr, m, mr interface{}) *gomock.Call {
mr_2.mock.ctrl.T.Helper()
return mr_2.mock.ctrl.RecordCallWithMethodType(mr_2.mock, "Method", reflect.TypeOf((*MockExample)(nil).Method), _m, _mr, m, mr)
}
// VarargMethod mocks base method.
func (m *MockExample) VarargMethod(_s, _x, a, ret int, varargs ...int) {
m.ctrl.T.Helper()
varargs_2 := []interface{}{_s, _x, a, ret}
for _, a_2 := range varargs {
varargs_2 = append(varargs_2, a_2)
}
m.ctrl.Call(m, "VarargMethod", varargs_2...)
}
// VarargMethod indicates an expected call of VarargMethod.
func (mr *MockExampleMockRecorder) VarargMethod(_s, _x, a, ret interface{}, varargs ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs_2 := append([]interface{}{_s, _x, a, ret}, varargs...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VarargMethod", reflect.TypeOf((*MockExample)(nil).VarargMethod), varargs_2...)
}
| mock/mockgen/internal/tests/generated_identifier_conflict/bugreport_mock.go/0 | {
"file_path": "mock/mockgen/internal/tests/generated_identifier_conflict/bugreport_mock.go",
"repo_id": "mock",
"token_count": 735
} | 618 |
// Code generated by MockGen. DO NOT EDIT.
// Source: user.go
// Package users_test is a generated GoMock package.
package users_test
import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
users "github.com/golang/mock/mockgen/internal/tests/mock_in_test_package"
)
// MockFinder is a mock of Finder interface.
type MockFinder struct {
ctrl *gomock.Controller
recorder *MockFinderMockRecorder
}
// MockFinderMockRecorder is the mock recorder for MockFinder.
type MockFinderMockRecorder struct {
mock *MockFinder
}
// NewMockFinder creates a new mock instance.
func NewMockFinder(ctrl *gomock.Controller) *MockFinder {
mock := &MockFinder{ctrl: ctrl}
mock.recorder = &MockFinderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFinder) EXPECT() *MockFinderMockRecorder {
return m.recorder
}
// Add mocks base method.
func (m *MockFinder) Add(u users.User) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Add", u)
}
// Add indicates an expected call of Add.
func (mr *MockFinderMockRecorder) Add(u interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockFinder)(nil).Add), u)
}
// FindUser mocks base method.
func (m *MockFinder) FindUser(name string) users.User {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindUser", name)
ret0, _ := ret[0].(users.User)
return ret0
}
// FindUser indicates an expected call of FindUser.
func (mr *MockFinderMockRecorder) FindUser(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindUser", reflect.TypeOf((*MockFinder)(nil).FindUser), name)
}
| mock/mockgen/internal/tests/mock_in_test_package/mock_test.go/0 | {
"file_path": "mock/mockgen/internal/tests/mock_in_test_package/mock_test.go",
"repo_id": "mock",
"token_count": 645
} | 619 |
// Copyright 2012 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package model contains the data model necessary for generating mock implementations.
package model
import (
"encoding/gob"
"fmt"
"io"
"reflect"
"strings"
)
// pkgPath is the importable path for package model
const pkgPath = "github.com/golang/mock/mockgen/model"
// Package is a Go package. It may be a subset.
type Package struct {
Name string
PkgPath string
Interfaces []*Interface
DotImports []string
}
// Print writes the package name and its exported interfaces.
func (pkg *Package) Print(w io.Writer) {
_, _ = fmt.Fprintf(w, "package %s\n", pkg.Name)
for _, intf := range pkg.Interfaces {
intf.Print(w)
}
}
// Imports returns the imports needed by the Package as a set of import paths.
func (pkg *Package) Imports() map[string]bool {
im := make(map[string]bool)
for _, intf := range pkg.Interfaces {
intf.addImports(im)
for _, tp := range intf.TypeParams {
tp.Type.addImports(im)
}
}
return im
}
// Interface is a Go interface.
type Interface struct {
Name string
Methods []*Method
TypeParams []*Parameter
}
// Print writes the interface name and its methods.
func (intf *Interface) Print(w io.Writer) {
_, _ = fmt.Fprintf(w, "interface %s\n", intf.Name)
for _, m := range intf.Methods {
m.Print(w)
}
}
func (intf *Interface) addImports(im map[string]bool) {
for _, m := range intf.Methods {
m.addImports(im)
}
}
// AddMethod adds a new method, de-duplicating by method name.
func (intf *Interface) AddMethod(m *Method) {
for _, me := range intf.Methods {
if me.Name == m.Name {
return
}
}
intf.Methods = append(intf.Methods, m)
}
// Method is a single method of an interface.
type Method struct {
Name string
In, Out []*Parameter
Variadic *Parameter // may be nil
}
// Print writes the method name and its signature.
func (m *Method) Print(w io.Writer) {
_, _ = fmt.Fprintf(w, " - method %s\n", m.Name)
if len(m.In) > 0 {
_, _ = fmt.Fprintf(w, " in:\n")
for _, p := range m.In {
p.Print(w)
}
}
if m.Variadic != nil {
_, _ = fmt.Fprintf(w, " ...:\n")
m.Variadic.Print(w)
}
if len(m.Out) > 0 {
_, _ = fmt.Fprintf(w, " out:\n")
for _, p := range m.Out {
p.Print(w)
}
}
}
func (m *Method) addImports(im map[string]bool) {
for _, p := range m.In {
p.Type.addImports(im)
}
if m.Variadic != nil {
m.Variadic.Type.addImports(im)
}
for _, p := range m.Out {
p.Type.addImports(im)
}
}
// Parameter is an argument or return parameter of a method.
type Parameter struct {
Name string // may be empty
Type Type
}
// Print writes a method parameter.
func (p *Parameter) Print(w io.Writer) {
n := p.Name
if n == "" {
n = `""`
}
_, _ = fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, ""))
}
// Type is a Go type.
type Type interface {
String(pm map[string]string, pkgOverride string) string
addImports(im map[string]bool)
}
func init() {
gob.Register(&ArrayType{})
gob.Register(&ChanType{})
gob.Register(&FuncType{})
gob.Register(&MapType{})
gob.Register(&NamedType{})
gob.Register(&PointerType{})
// Call gob.RegisterName to make sure it has the consistent name registered
// for both gob decoder and encoder.
//
// For a non-pointer type, gob.Register will try to get package full path by
// calling rt.PkgPath() for a name to register. If your project has vendor
// directory, it is possible that PkgPath will get a path like this:
// ../../../vendor/github.com/golang/mock/mockgen/model
gob.RegisterName(pkgPath+".PredeclaredType", PredeclaredType(""))
}
// ArrayType is an array or slice type.
type ArrayType struct {
Len int // -1 for slices, >= 0 for arrays
Type Type
}
func (at *ArrayType) String(pm map[string]string, pkgOverride string) string {
s := "[]"
if at.Len > -1 {
s = fmt.Sprintf("[%d]", at.Len)
}
return s + at.Type.String(pm, pkgOverride)
}
func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) }
// ChanType is a channel type.
type ChanType struct {
Dir ChanDir // 0, 1 or 2
Type Type
}
func (ct *ChanType) String(pm map[string]string, pkgOverride string) string {
s := ct.Type.String(pm, pkgOverride)
if ct.Dir == RecvDir {
return "<-chan " + s
}
if ct.Dir == SendDir {
return "chan<- " + s
}
return "chan " + s
}
func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) }
// ChanDir is a channel direction.
type ChanDir int
// Constants for channel directions.
const (
RecvDir ChanDir = 1
SendDir ChanDir = 2
)
// FuncType is a function type.
type FuncType struct {
In, Out []*Parameter
Variadic *Parameter // may be nil
}
func (ft *FuncType) String(pm map[string]string, pkgOverride string) string {
args := make([]string, len(ft.In))
for i, p := range ft.In {
args[i] = p.Type.String(pm, pkgOverride)
}
if ft.Variadic != nil {
args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride))
}
rets := make([]string, len(ft.Out))
for i, p := range ft.Out {
rets[i] = p.Type.String(pm, pkgOverride)
}
retString := strings.Join(rets, ", ")
if nOut := len(ft.Out); nOut == 1 {
retString = " " + retString
} else if nOut > 1 {
retString = " (" + retString + ")"
}
return "func(" + strings.Join(args, ", ") + ")" + retString
}
func (ft *FuncType) addImports(im map[string]bool) {
for _, p := range ft.In {
p.Type.addImports(im)
}
if ft.Variadic != nil {
ft.Variadic.Type.addImports(im)
}
for _, p := range ft.Out {
p.Type.addImports(im)
}
}
// MapType is a map type.
type MapType struct {
Key, Value Type
}
func (mt *MapType) String(pm map[string]string, pkgOverride string) string {
return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride)
}
func (mt *MapType) addImports(im map[string]bool) {
mt.Key.addImports(im)
mt.Value.addImports(im)
}
// NamedType is an exported type in a package.
type NamedType struct {
Package string // may be empty
Type string
TypeParams *TypeParametersType
}
func (nt *NamedType) String(pm map[string]string, pkgOverride string) string {
if pkgOverride == nt.Package {
return nt.Type + nt.TypeParams.String(pm, pkgOverride)
}
prefix := pm[nt.Package]
if prefix != "" {
return prefix + "." + nt.Type + nt.TypeParams.String(pm, pkgOverride)
}
return nt.Type + nt.TypeParams.String(pm, pkgOverride)
}
func (nt *NamedType) addImports(im map[string]bool) {
if nt.Package != "" {
im[nt.Package] = true
}
nt.TypeParams.addImports(im)
}
// PointerType is a pointer to another type.
type PointerType struct {
Type Type
}
func (pt *PointerType) String(pm map[string]string, pkgOverride string) string {
return "*" + pt.Type.String(pm, pkgOverride)
}
func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) }
// PredeclaredType is a predeclared type such as "int".
type PredeclaredType string
func (pt PredeclaredType) String(map[string]string, string) string { return string(pt) }
func (pt PredeclaredType) addImports(map[string]bool) {}
// TypeParametersType contains type paramters for a NamedType.
type TypeParametersType struct {
TypeParameters []Type
}
func (tp *TypeParametersType) String(pm map[string]string, pkgOverride string) string {
if tp == nil || len(tp.TypeParameters) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("[")
for i, v := range tp.TypeParameters {
if i != 0 {
sb.WriteString(", ")
}
sb.WriteString(v.String(pm, pkgOverride))
}
sb.WriteString("]")
return sb.String()
}
func (tp *TypeParametersType) addImports(im map[string]bool) {
if tp == nil {
return
}
for _, v := range tp.TypeParameters {
v.addImports(im)
}
}
// The following code is intended to be called by the program generated by ../reflect.go.
// InterfaceFromInterfaceType returns a pointer to an interface for the
// given reflection interface type.
func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) {
if it.Kind() != reflect.Interface {
return nil, fmt.Errorf("%v is not an interface", it)
}
intf := &Interface{}
for i := 0; i < it.NumMethod(); i++ {
mt := it.Method(i)
// TODO: need to skip unexported methods? or just raise an error?
m := &Method{
Name: mt.Name,
}
var err error
m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type)
if err != nil {
return nil, err
}
intf.AddMethod(m)
}
return intf, nil
}
// t's Kind must be a reflect.Func.
func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) {
nin := t.NumIn()
if t.IsVariadic() {
nin--
}
var p *Parameter
for i := 0; i < nin; i++ {
p, err = parameterFromType(t.In(i))
if err != nil {
return
}
in = append(in, p)
}
if t.IsVariadic() {
p, err = parameterFromType(t.In(nin).Elem())
if err != nil {
return
}
variadic = p
}
for i := 0; i < t.NumOut(); i++ {
p, err = parameterFromType(t.Out(i))
if err != nil {
return
}
out = append(out, p)
}
return
}
func parameterFromType(t reflect.Type) (*Parameter, error) {
tt, err := typeFromType(t)
if err != nil {
return nil, err
}
return &Parameter{Type: tt}, nil
}
var errorType = reflect.TypeOf((*error)(nil)).Elem()
var byteType = reflect.TypeOf(byte(0))
func typeFromType(t reflect.Type) (Type, error) {
// Hack workaround for https://golang.org/issue/3853.
// This explicit check should not be necessary.
if t == byteType {
return PredeclaredType("byte"), nil
}
if imp := t.PkgPath(); imp != "" {
return &NamedType{
Package: impPath(imp),
Type: t.Name(),
}, nil
}
// only unnamed or predeclared types after here
// Lots of types have element types. Let's do the parsing and error checking for all of them.
var elemType Type
switch t.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
var err error
elemType, err = typeFromType(t.Elem())
if err != nil {
return nil, err
}
}
switch t.Kind() {
case reflect.Array:
return &ArrayType{
Len: t.Len(),
Type: elemType,
}, nil
case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String:
return PredeclaredType(t.Kind().String()), nil
case reflect.Chan:
var dir ChanDir
switch t.ChanDir() {
case reflect.RecvDir:
dir = RecvDir
case reflect.SendDir:
dir = SendDir
}
return &ChanType{
Dir: dir,
Type: elemType,
}, nil
case reflect.Func:
in, variadic, out, err := funcArgsFromType(t)
if err != nil {
return nil, err
}
return &FuncType{
In: in,
Out: out,
Variadic: variadic,
}, nil
case reflect.Interface:
// Two special interfaces.
if t.NumMethod() == 0 {
return PredeclaredType("interface{}"), nil
}
if t == errorType {
return PredeclaredType("error"), nil
}
case reflect.Map:
kt, err := typeFromType(t.Key())
if err != nil {
return nil, err
}
return &MapType{
Key: kt,
Value: elemType,
}, nil
case reflect.Ptr:
return &PointerType{
Type: elemType,
}, nil
case reflect.Slice:
return &ArrayType{
Len: -1,
Type: elemType,
}, nil
case reflect.Struct:
if t.NumField() == 0 {
return PredeclaredType("struct{}"), nil
}
}
// TODO: Struct, UnsafePointer
return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind())
}
// impPath sanitizes the package path returned by `PkgPath` method of a reflect Type so that
// it is importable. PkgPath might return a path that includes "vendor". These paths do not
// compile, so we need to remove everything up to and including "/vendor/".
// See https://github.com/golang/go/issues/12019.
func impPath(imp string) string {
if strings.HasPrefix(imp, "vendor/") {
imp = "/" + imp
}
if i := strings.LastIndex(imp, "/vendor/"); i != -1 {
imp = imp[i+len("/vendor/"):]
}
return imp
}
// ErrorInterface represent built-in error interface.
var ErrorInterface = Interface{
Name: "error",
Methods: []*Method{
{
Name: "Error",
Out: []*Parameter{
{
Name: "",
Type: PredeclaredType("string"),
},
},
},
},
}
| mock/mockgen/model/model.go/0 | {
"file_path": "mock/mockgen/model/model.go",
"repo_id": "mock",
"token_count": 4950
} | 620 |
// A test that uses a mock.
package user_test
import (
"testing"
"github.com/golang/mock/gomock"
user "github.com/golang/mock/sample"
"github.com/golang/mock/sample/imp1"
)
func TestRemember(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockIndex := NewMockIndex(ctrl)
mockIndex.EXPECT().Put("a", 1) // literals work
mockIndex.EXPECT().Put("b", gomock.Eq(2)) // matchers work too
// NillableRet returns error. Not declaring it should result in a nil return.
mockIndex.EXPECT().NillableRet()
// Calls that returns something assignable to the return type.
boolc := make(chan bool)
// In this case, "chan bool" is assignable to "chan<- bool".
mockIndex.EXPECT().ConcreteRet().Return(boolc)
// In this case, nil is assignable to "chan<- bool".
mockIndex.EXPECT().ConcreteRet().Return(nil)
// Should be able to place expectations on variadic methods.
mockIndex.EXPECT().Ellip("%d", 0, 1, 1, 2, 3) // direct args
tri := []interface{}{1, 3, 6, 10, 15}
mockIndex.EXPECT().Ellip("%d", tri...) // args from slice
mockIndex.EXPECT().EllipOnly(gomock.Eq("arg"))
user.Remember(mockIndex, []string{"a", "b"}, []interface{}{1, 2})
// Check the ConcreteRet calls.
if c := mockIndex.ConcreteRet(); c != boolc {
t.Errorf("ConcreteRet: got %v, want %v", c, boolc)
}
if c := mockIndex.ConcreteRet(); c != nil {
t.Errorf("ConcreteRet: got %v, want nil", c)
}
// Try one with an action.
calledString := ""
mockIndex.EXPECT().Put(gomock.Any(), gomock.Any()).Do(func(key string, _ interface{}) {
calledString = key
})
mockIndex.EXPECT().NillableRet()
user.Remember(mockIndex, []string{"blah"}, []interface{}{7})
if calledString != "blah" {
t.Fatalf(`Uh oh. %q != "blah"`, calledString)
}
// Use Do with a nil arg.
mockIndex.EXPECT().Put("nil-key", gomock.Any()).Do(func(key string, value interface{}) {
if value != nil {
t.Errorf("Put did not pass through nil; got %v", value)
}
})
mockIndex.EXPECT().NillableRet()
user.Remember(mockIndex, []string{"nil-key"}, []interface{}{nil})
}
func TestVariadicFunction(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockIndex := NewMockIndex(ctrl)
mockIndex.EXPECT().Ellip("%d", 5, 6, 7, 8).Do(func(format string, nums ...int) {
sum := 0
for _, value := range nums {
sum += value
}
if sum != 26 {
t.Errorf("Expected 26, got %d", sum)
}
})
mockIndex.EXPECT().Ellip("%d", gomock.Any()).Do(func(format string, nums ...int) {
sum := 0
for _, value := range nums {
sum += value
}
if sum != 10 {
t.Errorf("Expected 10, got %d", sum)
}
})
mockIndex.EXPECT().Ellip("%d", gomock.Any()).Do(func(format string, nums ...int) {
sum := 0
for _, value := range nums {
sum += value
}
if sum != 0 {
t.Errorf("Expected 0, got %d", sum)
}
})
mockIndex.EXPECT().Ellip("%d", gomock.Any()).Do(func(format string, nums ...int) {
sum := 0
for _, value := range nums {
sum += value
}
if sum != 0 {
t.Errorf("Expected 0, got %d", sum)
}
})
mockIndex.EXPECT().Ellip("%d").Do(func(format string, nums ...int) {
sum := 0
for _, value := range nums {
sum += value
}
if sum != 0 {
t.Errorf("Expected 0, got %d", sum)
}
})
mockIndex.Ellip("%d", 1, 2, 3, 4) // Match second matcher.
mockIndex.Ellip("%d", 5, 6, 7, 8) // Match first matcher.
mockIndex.Ellip("%d", 0)
mockIndex.Ellip("%d")
mockIndex.Ellip("%d")
}
func TestGrabPointer(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockIndex := NewMockIndex(ctrl)
mockIndex.EXPECT().Ptr(gomock.Any()).SetArg(0, 7) // set first argument to 7
i := user.GrabPointer(mockIndex)
if i != 7 {
t.Errorf("Expected 7, got %d", i)
}
}
func TestEmbeddedInterface(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockEmbed := NewMockEmbed(ctrl)
mockEmbed.EXPECT().RegularMethod()
mockEmbed.EXPECT().EmbeddedMethod()
mockEmbed.EXPECT().ForeignEmbeddedMethod()
mockEmbed.RegularMethod()
mockEmbed.EmbeddedMethod()
var emb imp1.ForeignEmbedded = mockEmbed // also does interface check
emb.ForeignEmbeddedMethod()
}
func TestExpectTrueNil(t *testing.T) {
// Make sure that passing "nil" to EXPECT (thus as a nil interface value),
// will correctly match a nil concrete type.
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockIndex := NewMockIndex(ctrl)
mockIndex.EXPECT().Ptr(nil) // this nil is a nil interface{}
mockIndex.Ptr(nil) // this nil is a nil *int
}
func TestDoAndReturnSignature(t *testing.T) {
t.Run("wrong number of return args", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockIndex := NewMockIndex(ctrl)
mockIndex.EXPECT().Slice(gomock.Any(), gomock.Any()).DoAndReturn(
func(_ []int, _ []byte) {},
)
defer func() {
if r := recover(); r == nil {
t.Error("expected panic")
}
}()
mockIndex.Slice([]int{0}, []byte("meow"))
})
t.Run("wrong type of return arg", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockIndex := NewMockIndex(ctrl)
mockIndex.EXPECT().Slice(gomock.Any(), gomock.Any()).DoAndReturn(
func(_ []int, _ []byte) bool {
return true
})
mockIndex.Slice([]int{0}, []byte("meow"))
})
}
| mock/sample/user_test.go/0 | {
"file_path": "mock/sample/user_test.go",
"repo_id": "mock",
"token_count": 2153
} | 621 |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf_test
import (
"net"
"testing"
"golang.org/x/net/bpf"
"golang.org/x/net/ipv4"
)
func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) {
pkt := []byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0, 1, 2, 3,
}
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: uint32(len(pkt)),
Size: 1,
},
// Out of bounds should return 0, return 1 to tell if execution continued
bpf.RetConstant{Val: 1},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run(pkt)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected result:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) {
pkt := []byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
}
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Off: uint32(len(pkt) - 1),
Size: 2,
},
// Out of bounds should return 0, return 1 to tell if execution continued
bpf.RetConstant{Val: 1},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run(pkt)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected result:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) {
_, _, err := testVM(t, []bpf.Instruction{
bpf.LoadAbsolute{
Size: 5,
},
bpf.RetA{},
})
if errStr(err) != "assembling instruction 1: invalid load byte length 0" {
t.Fatalf("unexpected error: %v", err)
}
}
func TestVMLoadConstantOK(t *testing.T) {
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadConstant{
Dst: bpf.RegX,
Val: 9,
},
bpf.TXA{},
bpf.RetA{},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
})
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 1, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadIndirectOutOfBounds(t *testing.T) {
pkt := []byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
}
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadIndirect{
Off: uint32(len(pkt)),
Size: 1,
},
// Out of bounds should return 0, return 1 to tell if execution continued
bpf.RetConstant{Val: 1},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run(pkt)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected result:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadMemShiftOutOfBounds(t *testing.T) {
pkt := []byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0,
}
vm, done, err := testVM(t, []bpf.Instruction{
bpf.LoadMemShift{
Off: uint32(len(pkt)),
},
// Out of bounds should return 0, return 1 to tell if execution continued
bpf.RetConstant{Val: 1},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
defer done()
out, err := vm.Run(pkt)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected result:\n- want: %d\n- got: %d",
want, got)
}
}
const (
dhcp4Port = 53
)
func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) {
vm, in, done := testDHCPv4(t)
defer done()
// Append mostly empty UDP header with incorrect DHCPv4 port
in = append(in, []byte{
0, 0,
0, dhcp4Port + 1,
0, 0,
0, 0,
}...)
out, err := vm.Run(in)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := 0, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) {
vm, in, done := testDHCPv4(t)
defer done()
// Append mostly empty UDP header with correct DHCPv4 port
in = append(in, []byte{
0, 0,
0, dhcp4Port,
0, 0,
0, 0,
}...)
out, err := vm.Run(in)
if err != nil {
t.Fatalf("unexpected error while running program: %v", err)
}
if want, got := len(in)-8, out; want != got {
t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d",
want, got)
}
}
func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) {
// DHCPv4 test data courtesy of David Anderson:
// https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70
vm, done, err := testVM(t, []bpf.Instruction{
// Load IPv4 packet length
bpf.LoadMemShift{Off: 8},
// Get UDP dport
bpf.LoadIndirect{Off: 8 + 2, Size: 2},
// Correct dport?
bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1},
// Accept
bpf.RetConstant{Val: 1500},
// Ignore
bpf.RetConstant{Val: 0},
})
if err != nil {
t.Fatalf("failed to load BPF program: %v", err)
}
// Minimal requirements to make a valid IPv4 header
h := &ipv4.Header{
Len: ipv4.HeaderLen,
Src: net.IPv4(192, 168, 1, 1),
Dst: net.IPv4(192, 168, 1, 2),
}
hb, err := h.Marshal()
if err != nil {
t.Fatalf("failed to marshal IPv4 header: %v", err)
}
hb = append([]byte{
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
}, hb...)
return vm, hb, done
}
| net/bpf/vm_load_test.go/0 | {
"file_path": "net/bpf/vm_load_test.go",
"repo_id": "net",
"token_count": 2526
} | 622 |
These test cases come from
http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics
Distributed under both the W3C Test Suite License
(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license)
and the W3C 3-clause BSD License
(http://www.w3.org/Consortium/Legal/2008/03-bsd-license).
To contribute to a W3C Test Suite, see the policies and contribution
forms (http://www.w3.org/2004/10/27-testcases).
| net/html/charset/testdata/README/0 | {
"file_path": "net/html/charset/testdata/README",
"repo_id": "net",
"token_count": 153
} | 623 |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"strings"
)
func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
for i := range aa {
if newName, ok := nameMap[aa[i].Key]; ok {
aa[i].Key = newName
}
}
}
func adjustForeignAttributes(aa []Attribute) {
for i, a := range aa {
if a.Key == "" || a.Key[0] != 'x' {
continue
}
switch a.Key {
case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
j := strings.Index(a.Key, ":")
aa[i].Namespace = a.Key[:j]
aa[i].Key = a.Key[j+1:]
}
}
}
func htmlIntegrationPoint(n *Node) bool {
if n.Type != ElementNode {
return false
}
switch n.Namespace {
case "math":
if n.Data == "annotation-xml" {
for _, a := range n.Attr {
if a.Key == "encoding" {
val := strings.ToLower(a.Val)
if val == "text/html" || val == "application/xhtml+xml" {
return true
}
}
}
}
case "svg":
switch n.Data {
case "desc", "foreignObject", "title":
return true
}
}
return false
}
func mathMLTextIntegrationPoint(n *Node) bool {
if n.Namespace != "math" {
return false
}
switch n.Data {
case "mi", "mo", "mn", "ms", "mtext":
return true
}
return false
}
// Section 12.2.6.5.
var breakout = map[string]bool{
"b": true,
"big": true,
"blockquote": true,
"body": true,
"br": true,
"center": true,
"code": true,
"dd": true,
"div": true,
"dl": true,
"dt": true,
"em": true,
"embed": true,
"h1": true,
"h2": true,
"h3": true,
"h4": true,
"h5": true,
"h6": true,
"head": true,
"hr": true,
"i": true,
"img": true,
"li": true,
"listing": true,
"menu": true,
"meta": true,
"nobr": true,
"ol": true,
"p": true,
"pre": true,
"ruby": true,
"s": true,
"small": true,
"span": true,
"strong": true,
"strike": true,
"sub": true,
"sup": true,
"table": true,
"tt": true,
"u": true,
"ul": true,
"var": true,
}
// Section 12.2.6.5.
var svgTagNameAdjustments = map[string]string{
"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath",
}
// Section 12.2.6.1
var mathMLAttributeAdjustments = map[string]string{
"definitionurl": "definitionURL",
}
var svgAttributeAdjustments = map[string]string{
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan",
}
| net/html/foreign.go/0 | {
"file_path": "net/html/foreign.go",
"repo_id": "net",
"token_count": 3188
} | 624 |
#data
<!doctype html><p>foo<address>bar<p>baz
#errors
(1,39): expected-closing-tag-but-got-eof
30: Unclosed element “address”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <address>
| "bar"
| <p>
| "baz"
#data
<!doctype html><address><p>foo</address>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <address>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<article>bar<p>baz
#errors
(1,39): expected-closing-tag-but-got-eof
30: Unclosed element “article”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <article>
| "bar"
| <p>
| "baz"
#data
<!doctype html><article><p>foo</article>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <article>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<aside>bar<p>baz
#errors
(1,37): expected-closing-tag-but-got-eof
28: Unclosed element “aside”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <aside>
| "bar"
| <p>
| "baz"
#data
<!doctype html><aside><p>foo</aside>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <aside>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<blockquote>bar<p>baz
#errors
(1,42): expected-closing-tag-but-got-eof
33: Unclosed element “blockquote”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <blockquote>
| "bar"
| <p>
| "baz"
#data
<!doctype html><blockquote><p>foo</blockquote>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <blockquote>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<center>bar<p>baz
#errors
(1,38): expected-closing-tag-but-got-eof
29: Unclosed element “center”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <center>
| "bar"
| <p>
| "baz"
#data
<!doctype html><center><p>foo</center>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <center>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<details>bar<p>baz
#errors
(1,39): expected-closing-tag-but-got-eof
30: Unclosed element “details”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <details>
| "bar"
| <p>
| "baz"
#data
<!doctype html><details><p>foo</details>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <details>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<dialog>bar<p>baz
#errors
(1,38): expected-closing-tag-but-got-eof
29: Unclosed element “dialog”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <dialog>
| "bar"
| <p>
| "baz"
#data
<!doctype html><dialog><p>foo</dialog>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <dialog>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<dir>bar<p>baz
#errors
(1,35): expected-closing-tag-but-got-eof
26: Unclosed element “dir”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <dir>
| "bar"
| <p>
| "baz"
#data
<!doctype html><dir><p>foo</dir>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <dir>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<div>bar<p>baz
#errors
(1,35): expected-closing-tag-but-got-eof
26: Unclosed element “div”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <div>
| "bar"
| <p>
| "baz"
#data
<!doctype html><div><p>foo</div>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <div>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<dl>bar<p>baz
#errors
(1,34): expected-closing-tag-but-got-eof
25: Unclosed element “dl”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <dl>
| "bar"
| <p>
| "baz"
#data
<!doctype html><dl><p>foo</dl>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <dl>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<fieldset>bar<p>baz
#errors
(1,40): expected-closing-tag-but-got-eof
31: Unclosed element “fieldset”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <fieldset>
| "bar"
| <p>
| "baz"
#data
<!doctype html><fieldset><p>foo</fieldset>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <fieldset>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<figcaption>bar<p>baz
#errors
(1,42): expected-closing-tag-but-got-eof
33: Unclosed element “figcaption”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <figcaption>
| "bar"
| <p>
| "baz"
#data
<!doctype html><figcaption><p>foo</figcaption>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <figcaption>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<figure>bar<p>baz
#errors
(1,38): expected-closing-tag-but-got-eof
29: Unclosed element “figure”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <figure>
| "bar"
| <p>
| "baz"
#data
<!doctype html><figure><p>foo</figure>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <figure>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<footer>bar<p>baz
#errors
(1,38): expected-closing-tag-but-got-eof
29: Unclosed element “footer”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <footer>
| "bar"
| <p>
| "baz"
#data
<!doctype html><footer><p>foo</footer>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <footer>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<header>bar<p>baz
#errors
(1,38): expected-closing-tag-but-got-eof
29: Unclosed element “header”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <header>
| "bar"
| <p>
| "baz"
#data
<!doctype html><header><p>foo</header>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <header>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<hgroup>bar<p>baz
#errors
(1,38): expected-closing-tag-but-got-eof
29: Unclosed element “hgroup”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <hgroup>
| "bar"
| <p>
| "baz"
#data
<!doctype html><hgroup><p>foo</hgroup>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <hgroup>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<listing>bar<p>baz
#errors
(1,39): expected-closing-tag-but-got-eof
30: Unclosed element “listing”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <listing>
| "bar"
| <p>
| "baz"
#data
<!doctype html><listing><p>foo</listing>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <listing>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<menu>bar<p>baz
#errors
(1,36): expected-closing-tag-but-got-eof
27: Unclosed element “menu”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <menu>
| "bar"
| <p>
| "baz"
#data
<!doctype html><menu><p>foo</menu>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <menu>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<nav>bar<p>baz
#errors
(1,35): expected-closing-tag-but-got-eof
26: Unclosed element “nav”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <nav>
| "bar"
| <p>
| "baz"
#data
<!doctype html><nav><p>foo</nav>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <nav>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<ol>bar<p>baz
#errors
(1,34): expected-closing-tag-but-got-eof
25: Unclosed element “ol”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <ol>
| "bar"
| <p>
| "baz"
#data
<!doctype html><ol><p>foo</ol>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <ol>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<pre>bar<p>baz
#errors
(1,35): expected-closing-tag-but-got-eof
26: Unclosed element “pre”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <pre>
| "bar"
| <p>
| "baz"
#data
<!doctype html><pre><p>foo</pre>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <pre>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<section>bar<p>baz
#errors
(1,39): expected-closing-tag-but-got-eof
30: Unclosed element “section”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <section>
| "bar"
| <p>
| "baz"
#data
<!doctype html><section><p>foo</section>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <section>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<summary>bar<p>baz
#errors
(1,39): expected-closing-tag-but-got-eof
30: Unclosed element “summary”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <summary>
| "bar"
| <p>
| "baz"
#data
<!doctype html><summary><p>foo</summary>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <summary>
| <p>
| "foo"
| "bar"
#data
<!doctype html><p>foo<ul>bar<p>baz
#errors
(1,34): expected-closing-tag-but-got-eof
25: Unclosed element “ul”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <ul>
| "bar"
| <p>
| "baz"
#data
<!doctype html><ul><p>foo</ul>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <ul>
| <p>
| "foo"
| "bar"
| net/html/testdata/webkit/blocks.dat/0 | {
"file_path": "net/html/testdata/webkit/blocks.dat",
"repo_id": "net",
"token_count": 6125
} | 625 |
#data
<input type="hidden"><frameset>
#errors
(1,21): expected-doctype-but-got-start-tag
(1,31): unexpected-start-tag
(1,31): eof-in-frameset
#document
| <html>
| <head>
| <frameset>
#data
<!DOCTYPE html><table><caption><svg>foo</table>bar
#errors
(1,47): unexpected-end-tag
(1,47): end-table-tag-in-caption
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| <caption>
| <svg svg>
| "foo"
| "bar"
#data
<table><tr><td><svg><desc><td></desc><circle>
#errors
(1,7): expected-doctype-but-got-start-tag
(1,30): unexpected-cell-end-tag
(1,37): unexpected-end-tag
(1,45): expected-closing-tag-but-got-eof
#document
| <html>
| <head>
| <body>
| <table>
| <tbody>
| <tr>
| <td>
| <svg svg>
| <svg desc>
| <td>
| <circle>
| net/html/testdata/webkit/pending-spec-changes.dat/0 | {
"file_path": "net/html/testdata/webkit/pending-spec-changes.dat",
"repo_id": "net",
"token_count": 463
} | 626 |
#data
<!doctype html><script>
#errors
(1,23): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| <body>
#data
<!doctype html><script>a
#errors
(1,24): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "a"
| <body>
#data
<!doctype html><script><
#errors
(1,24): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<"
| <body>
#data
<!doctype html><script></
#errors
(1,25): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</"
| <body>
#data
<!doctype html><script></S
#errors
(1,26): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</S"
| <body>
#data
<!doctype html><script></SC
#errors
(1,27): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</SC"
| <body>
#data
<!doctype html><script></SCR
#errors
(1,28): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</SCR"
| <body>
#data
<!doctype html><script></SCRI
#errors
(1,29): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</SCRI"
| <body>
#data
<!doctype html><script></SCRIP
#errors
(1,30): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</SCRIP"
| <body>
#data
<!doctype html><script></SCRIPT
#errors
(1,31): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</SCRIPT"
| <body>
#data
<!doctype html><script></SCRIPT
#errors
(1,32): expected-attribute-name-but-got-eof
(1,32): expected-named-closing-tag-but-got-eof
#new-errors
(1:33) eof-in-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| <body>
#data
<!doctype html><script></s
#errors
(1,26): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</s"
| <body>
#data
<!doctype html><script></sc
#errors
(1,27): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</sc"
| <body>
#data
<!doctype html><script></scr
#errors
(1,28): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</scr"
| <body>
#data
<!doctype html><script></scri
#errors
(1,29): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</scri"
| <body>
#data
<!doctype html><script></scrip
#errors
(1,30): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</scrip"
| <body>
#data
<!doctype html><script></script
#errors
(1,31): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "</script"
| <body>
#data
<!doctype html><script></script
#errors
(1,32): expected-attribute-name-but-got-eof
(1,32): expected-named-closing-tag-but-got-eof
#new-errors
(1:33) eof-in-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| <body>
#data
<!doctype html><script><!
#errors
(1,25): expected-script-data-but-got-eof
(1,25): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!"
| <body>
#data
<!doctype html><script><!a
#errors
(1,26): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!a"
| <body>
#data
<!doctype html><script><!-
#errors
(1,26): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!-"
| <body>
#data
<!doctype html><script><!-a
#errors
(1,27): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!-a"
| <body>
#data
<!doctype html><script><!--
#errors
(1,27): expected-named-closing-tag-but-got-eof
(1,27): unexpected-eof-in-text-mode
#new-errors
(1:28) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--"
| <body>
#data
<!doctype html><script><!--a
#errors
(1,28): expected-named-closing-tag-but-got-eof
(1,28): unexpected-eof-in-text-mode
#new-errors
(1:29) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--a"
| <body>
#data
<!doctype html><script><!--<
#errors
(1,28): expected-named-closing-tag-but-got-eof
(1,28): unexpected-eof-in-text-mode
#new-errors
(1:29) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<"
| <body>
#data
<!doctype html><script><!--<a
#errors
(1,29): expected-named-closing-tag-but-got-eof
(1,29): unexpected-eof-in-text-mode
#new-errors
(1:30) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<a"
| <body>
#data
<!doctype html><script><!--</
#errors
(1,29): expected-named-closing-tag-but-got-eof
(1,29): unexpected-eof-in-text-mode
#new-errors
(1:30) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--</"
| <body>
#data
<!doctype html><script><!--</script
#errors
(1,35): expected-named-closing-tag-but-got-eof
(1,35): unexpected-eof-in-text-mode
#new-errors
(1:36) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--</script"
| <body>
#data
<!doctype html><script><!--</script
#errors
(1,36): expected-attribute-name-but-got-eof
(1,36): expected-named-closing-tag-but-got-eof
#new-errors
(1:37) eof-in-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--"
| <body>
#data
<!doctype html><script><!--<s
#errors
(1,29): expected-named-closing-tag-but-got-eof
(1,29): unexpected-eof-in-text-mode
#new-errors
(1:30) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<s"
| <body>
#data
<!doctype html><script><!--<script
#errors
(1,34): expected-named-closing-tag-but-got-eof
(1,34): unexpected-eof-in-text-mode
#new-errors
(1:35) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script"
| <body>
#data
<!doctype html><script><!--<script
#errors
(1,35): eof-in-script-in-script
(1,35): expected-named-closing-tag-but-got-eof
#new-errors
(1:36) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script "
| <body>
#data
<!doctype html><script><!--<script <
#errors
(1,36): eof-in-script-in-script
(1,36): expected-named-closing-tag-but-got-eof
#new-errors
(1:37) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script <"
| <body>
#data
<!doctype html><script><!--<script <a
#errors
(1,37): eof-in-script-in-script
(1,37): expected-named-closing-tag-but-got-eof
#new-errors
(1:38) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script <a"
| <body>
#data
<!doctype html><script><!--<script </
#errors
(1,37): eof-in-script-in-script
(1,37): expected-named-closing-tag-but-got-eof
#new-errors
(1:38) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </"
| <body>
#data
<!doctype html><script><!--<script </s
#errors
(1,38): eof-in-script-in-script
(1,38): expected-named-closing-tag-but-got-eof
#new-errors
(1:39) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </s"
| <body>
#data
<!doctype html><script><!--<script </script
#errors
(1,43): eof-in-script-in-script
(1,43): expected-named-closing-tag-but-got-eof
#new-errors
(1:44) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script"
| <body>
#data
<!doctype html><script><!--<script </scripta
#errors
(1,44): eof-in-script-in-script
(1,44): expected-named-closing-tag-but-got-eof
#new-errors
(1:45) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </scripta"
| <body>
#data
<!doctype html><script><!--<script </script
#errors
(1,44): expected-named-closing-tag-but-got-eof
(1,44): unexpected-eof-in-text-mode
#new-errors
(1:45) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<!doctype html><script><!--<script </script>
#errors
(1,44): expected-named-closing-tag-but-got-eof
(1,44): unexpected-eof-in-text-mode
#new-errors
(1:45) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script>"
| <body>
#data
<!doctype html><script><!--<script </script/
#errors
(1,44): expected-named-closing-tag-but-got-eof
(1,44): unexpected-eof-in-text-mode
#new-errors
(1:45) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script/"
| <body>
#data
<!doctype html><script><!--<script </script <
#errors
(1,45): expected-named-closing-tag-but-got-eof
(1,45): unexpected-eof-in-text-mode
#new-errors
(1:46) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script <"
| <body>
#data
<!doctype html><script><!--<script </script <a
#errors
(1,46): expected-named-closing-tag-but-got-eof
(1,46): unexpected-eof-in-text-mode
#new-errors
(1:47) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script <a"
| <body>
#data
<!doctype html><script><!--<script </script </
#errors
(1,46): expected-named-closing-tag-but-got-eof
(1,46): unexpected-eof-in-text-mode
#new-errors
(1:47) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script </"
| <body>
#data
<!doctype html><script><!--<script </script </script
#errors
(1,52): expected-named-closing-tag-but-got-eof
(1,52): unexpected-eof-in-text-mode
#new-errors
(1:53) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script </script"
| <body>
#data
<!doctype html><script><!--<script </script </script
#errors
(1,53): expected-attribute-name-but-got-eof
(1,53): expected-named-closing-tag-but-got-eof
#new-errors
(1:54) eof-in-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<!doctype html><script><!--<script </script </script/
#errors
(1,53): unexpected-EOF-after-solidus-in-tag
(1,53): expected-named-closing-tag-but-got-eof
#new-errors
(1:54) eof-in-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<!doctype html><script><!--<script </script </script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<!doctype html><script><!--<script -
#errors
(1,36): eof-in-script-in-script
(1,36): expected-named-closing-tag-but-got-eof
#new-errors
(1:37) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script -"
| <body>
#data
<!doctype html><script><!--<script -a
#errors
(1,37): eof-in-script-in-script
(1,37): expected-named-closing-tag-but-got-eof
#new-errors
(1:38) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script -a"
| <body>
#data
<!doctype html><script><!--<script -<
#errors
(1,37): eof-in-script-in-script
(1,37): expected-named-closing-tag-but-got-eof
#new-errors
(1:38) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script -<"
| <body>
#data
<!doctype html><script><!--<script --
#errors
(1,37): eof-in-script-in-script
(1,37): expected-named-closing-tag-but-got-eof
#new-errors
(1:38) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script --"
| <body>
#data
<!doctype html><script><!--<script --a
#errors
(1,38): eof-in-script-in-script
(1,38): expected-named-closing-tag-but-got-eof
#new-errors
(1:39) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script --a"
| <body>
#data
<!doctype html><script><!--<script --<
#errors
(1,38): eof-in-script-in-script
(1,38): expected-named-closing-tag-but-got-eof
#new-errors
(1:39) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script --<"
| <body>
#data
<!doctype html><script><!--<script -->
#errors
(1,38): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<!doctype html><script><!--<script --><
#errors
(1,39): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script --><"
| <body>
#data
<!doctype html><script><!--<script --></
#errors
(1,40): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script --></"
| <body>
#data
<!doctype html><script><!--<script --></script
#errors
(1,46): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script --></script"
| <body>
#data
<!doctype html><script><!--<script --></script
#errors
(1,47): expected-attribute-name-but-got-eof
(1,47): expected-named-closing-tag-but-got-eof
#new-errors
(1:48) eof-in-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<!doctype html><script><!--<script --></script/
#errors
(1,47): unexpected-EOF-after-solidus-in-tag
(1,47): expected-named-closing-tag-but-got-eof
#new-errors
(1:48) eof-in-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<!doctype html><script><!--<script --></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<!doctype html><script><!--<script><\/script>--></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script><\/script>-->"
| <body>
#data
<!doctype html><script><!--<script></scr'+'ipt>--></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></scr'+'ipt>-->"
| <body>
#data
<!doctype html><script><!--<script></script><script></script></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>"
| <body>
#data
<!doctype html><script><!--<script></script><script></script>--><!--</script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>--><!--"
| <body>
#data
<!doctype html><script><!--<script></script><script></script>-- ></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>-- >"
| <body>
#data
<!doctype html><script><!--<script></script><script></script>- -></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>- ->"
| <body>
#data
<!doctype html><script><!--<script></script><script></script>- - ></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>- - >"
| <body>
#data
<!doctype html><script><!--<script></script><script></script>-></script>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>->"
| <body>
#data
<!doctype html><script><!--<script>--!></script>X
#errors
(1,49): expected-named-closing-tag-but-got-eof
(1,49): unexpected-EOF-in-text-mode
#new-errors
(1:50) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script>--!></script>X"
| <body>
#data
<!doctype html><script><!--<scr'+'ipt></script>--></script>
#errors
(1,59): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<scr'+'ipt>"
| <body>
| "-->"
#data
<!doctype html><script><!--<script></scr'+'ipt></script>X
#errors
(1,57): expected-named-closing-tag-but-got-eof
(1,57): unexpected-eof-in-text-mode
#new-errors
(1:58) eof-in-script-html-comment-like-text
#document
| <!DOCTYPE html>
| <html>
| <head>
| <script>
| "<!--<script></scr'+'ipt></script>X"
| <body>
#data
<!doctype html><style><!--<style></style>--></style>
#errors
(1,52): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "<!--<style>"
| <body>
| "-->"
#data
<!doctype html><style><!--</style>X
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "<!--"
| <body>
| "X"
#data
<!doctype html><style><!--...</style>...--></style>
#errors
(1,51): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "<!--..."
| <body>
| "...-->"
#data
<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
| <body>
| "X"
#data
<!doctype html><style><!--...<style><!--...--!></style>--></style>
#errors
(1,66): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "<!--...<style><!--...--!>"
| <body>
| "-->"
#data
<!doctype html><style><!--...</style><!-- --><style>@import ...</style>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "<!--..."
| <!-- -->
| <style>
| "@import ..."
| <body>
#data
<!doctype html><style>...<style><!--...</style><!-- --></style>
#errors
(1,63): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "...<style><!--..."
| <!-- -->
| <body>
#data
<!doctype html><style>...<!--[if IE]><style>...</style>X
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <style>
| "...<!--[if IE]><style>..."
| <body>
| "X"
#data
<!doctype html><title><!--<title></title>--></title>
#errors
(1,52): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <title>
| "<!--<title>"
| <body>
| "-->"
#data
<!doctype html><title></title></title>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <title>
| "</title>"
| <body>
#data
<!doctype html><title>foo/title><link></head><body>X
#errors
(1,52): expected-named-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <title>
| "foo/title><link></head><body>X"
| <body>
#data
<!doctype html><noscript><!--<noscript></noscript>--></noscript>
#errors
(1,64): unexpected-end-tag
#script-on
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noscript>
| "<!--<noscript>"
| <body>
| "-->"
#data
<!doctype html><noscript><!--<noscript></noscript>--></noscript>
#errors
#script-off
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noscript>
| <!-- <noscript></noscript> -->
| <body>
#data
<!doctype html><noscript><!--</noscript>X<noscript>--></noscript>
#errors
#script-on
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noscript>
| "<!--"
| <body>
| "X"
| <noscript>
| "-->"
#data
<!doctype html><noscript><!--</noscript>X<noscript>--></noscript>
#errors
#script-off
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noscript>
| <!-- </noscript>X<noscript> -->
| <body>
#data
<!doctype html><noscript><iframe></noscript>X
#errors
#script-on
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noscript>
| "<iframe>"
| <body>
| "X"
#data
<!doctype html><noscript><iframe></noscript>X
#errors
* (1,34) unexpected token in head noscript
* (1,46) unexpected EOF
#script-off
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noscript>
| <body>
| <iframe>
| "</noscript>X"
#data
<!doctype html><noframes><!--<noframes></noframes>--></noframes>
#errors
(1,64): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noframes>
| "<!--<noframes>"
| <body>
| "-->"
#data
<!doctype html><noframes><body><script><!--...</script></body></noframes></html>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <noframes>
| "<body><script><!--...</script></body>"
| <body>
#data
<!doctype html><textarea><!--<textarea></textarea>--></textarea>
#errors
(1,64): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <textarea>
| "<!--<textarea>"
| "-->"
#data
<!doctype html><textarea></textarea></textarea>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <textarea>
| "</textarea>"
#data
<!doctype html><textarea><</textarea>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <textarea>
| "<"
#data
<!doctype html><textarea>a<b</textarea>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <textarea>
| "a<b"
#data
<!doctype html><iframe><!--<iframe></iframe>--></iframe>
#errors
(1,56): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <iframe>
| "<!--<iframe>"
| "-->"
#data
<!doctype html><iframe>...<!--X->...<!--/X->...</iframe>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <iframe>
| "...<!--X->...<!--/X->..."
#data
<!doctype html><xmp><!--<xmp></xmp>--></xmp>
#errors
(1,44): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <xmp>
| "<!--<xmp>"
| "-->"
#data
<!doctype html><noembed><!--<noembed></noembed>--></noembed>
#errors
(1,60): unexpected-end-tag
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <noembed>
| "<!--<noembed>"
| "-->"
#data
<script>
#errors
(1,8): expected-doctype-but-got-start-tag
(1,8): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| <body>
#data
<script>a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,9): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "a"
| <body>
#data
<script><
#errors
(1,8): expected-doctype-but-got-start-tag
(1,9): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<"
| <body>
#data
<script></
#errors
(1,8): expected-doctype-but-got-start-tag
(1,10): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</"
| <body>
#data
<script></S
#errors
(1,8): expected-doctype-but-got-start-tag
(1,11): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</S"
| <body>
#data
<script></SC
#errors
(1,8): expected-doctype-but-got-start-tag
(1,12): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</SC"
| <body>
#data
<script></SCR
#errors
(1,8): expected-doctype-but-got-start-tag
(1,13): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</SCR"
| <body>
#data
<script></SCRI
#errors
(1,8): expected-doctype-but-got-start-tag
(1,14): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</SCRI"
| <body>
#data
<script></SCRIP
#errors
(1,8): expected-doctype-but-got-start-tag
(1,15): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</SCRIP"
| <body>
#data
<script></SCRIPT
#errors
(1,8): expected-doctype-but-got-start-tag
(1,16): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</SCRIPT"
| <body>
#data
<script></SCRIPT
#errors
(1,8): expected-doctype-but-got-start-tag
(1,17): expected-attribute-name-but-got-eof
(1,17): expected-named-closing-tag-but-got-eof
#new-errors
(1:18) eof-in-tag
#document
| <html>
| <head>
| <script>
| <body>
#data
<script></s
#errors
(1,8): expected-doctype-but-got-start-tag
(1,11): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</s"
| <body>
#data
<script></sc
#errors
(1,8): expected-doctype-but-got-start-tag
(1,12): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</sc"
| <body>
#data
<script></scr
#errors
(1,8): expected-doctype-but-got-start-tag
(1,13): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</scr"
| <body>
#data
<script></scri
#errors
(1,8): expected-doctype-but-got-start-tag
(1,14): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</scri"
| <body>
#data
<script></scrip
#errors
(1,8): expected-doctype-but-got-start-tag
(1,15): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</scrip"
| <body>
#data
<script></script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,16): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "</script"
| <body>
#data
<script></script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,17): expected-attribute-name-but-got-eof
(1,17): expected-named-closing-tag-but-got-eof
#new-errors
(1:18) eof-in-tag
#document
| <html>
| <head>
| <script>
| <body>
#data
<script><!
#errors
(1,8): expected-doctype-but-got-start-tag
(1,10): expected-script-data-but-got-eof
(1,10): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!"
| <body>
#data
<script><!a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,11): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!a"
| <body>
#data
<script><!-
#errors
(1,8): expected-doctype-but-got-start-tag
(1,11): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!-"
| <body>
#data
<script><!-a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,12): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!-a"
| <body>
#data
<script><!--
#errors
(1,8): expected-doctype-but-got-start-tag
(1,12): expected-named-closing-tag-but-got-eof
(1,12): unexpected-eof-in-text-mode
#new-errors
(1:13) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--"
| <body>
#data
<script><!--a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,13): expected-named-closing-tag-but-got-eof
(1,13): unexpected-eof-in-text-mode
#new-errors
(1:14) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--a"
| <body>
#data
<script><!--<
#errors
(1,8): expected-doctype-but-got-start-tag
(1,13): expected-named-closing-tag-but-got-eof
(1,13): unexpected-eof-in-text-mode
#new-errors
(1:14) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<"
| <body>
#data
<script><!--<a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,14): expected-named-closing-tag-but-got-eof
(1,14): unexpected-eof-in-text-mode
#new-errors
(1:15) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<a"
| <body>
#data
<script><!--</
#errors
(1,8): expected-doctype-but-got-start-tag
(1,14): expected-named-closing-tag-but-got-eof
(1,14): unexpected-eof-in-text-mode
#new-errors
(1:15) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--</"
| <body>
#data
<script><!--</script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,20): expected-named-closing-tag-but-got-eof
(1,20): unexpected-eof-in-text-mode
#new-errors
(1:21) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--</script"
| <body>
#data
<script><!--</script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,21): expected-attribute-name-but-got-eof
(1,21): expected-named-closing-tag-but-got-eof
#new-errors
(1:22) eof-in-tag
#document
| <html>
| <head>
| <script>
| "<!--"
| <body>
#data
<script><!--<s
#errors
(1,8): expected-doctype-but-got-start-tag
(1,14): expected-named-closing-tag-but-got-eof
(1,14): unexpected-eof-in-text-mode
#new-errors
(1:15) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<s"
| <body>
#data
<script><!--<script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,19): expected-named-closing-tag-but-got-eof
(1,19): unexpected-eof-in-text-mode
#new-errors
(1:20) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script"
| <body>
#data
<script><!--<script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,20): eof-in-script-in-script
(1,20): expected-named-closing-tag-but-got-eof
#new-errors
(1:21) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script "
| <body>
#data
<script><!--<script <
#errors
(1,8): expected-doctype-but-got-start-tag
(1,21): eof-in-script-in-script
(1,21): expected-named-closing-tag-but-got-eof
#new-errors
(1:22) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script <"
| <body>
#data
<script><!--<script <a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,22): eof-in-script-in-script
(1,22): expected-named-closing-tag-but-got-eof
#new-errors
(1:23) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script <a"
| <body>
#data
<script><!--<script </
#errors
(1,8): expected-doctype-but-got-start-tag
(1,22): eof-in-script-in-script
(1,22): expected-named-closing-tag-but-got-eof
#new-errors
(1:23) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </"
| <body>
#data
<script><!--<script </s
#errors
(1,8): expected-doctype-but-got-start-tag
(1,23): eof-in-script-in-script
(1,23): expected-named-closing-tag-but-got-eof
#new-errors
(1:24) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </s"
| <body>
#data
<script><!--<script </script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,28): eof-in-script-in-script
(1,28): expected-named-closing-tag-but-got-eof
#new-errors
(1:29) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script"
| <body>
#data
<script><!--<script </scripta
#errors
(1,8): expected-doctype-but-got-start-tag
(1,29): eof-in-script-in-script
(1,29): expected-named-closing-tag-but-got-eof
#new-errors
(1:30) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </scripta"
| <body>
#data
<script><!--<script </script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,29): expected-named-closing-tag-but-got-eof
(1,29): unexpected-eof-in-text-mode
#new-errors
(1:30) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<script><!--<script </script>
#errors
(1,8): expected-doctype-but-got-start-tag
(1,29): expected-named-closing-tag-but-got-eof
(1,29): unexpected-eof-in-text-mode
#new-errors
(1:30) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script>"
| <body>
#data
<script><!--<script </script/
#errors
(1,8): expected-doctype-but-got-start-tag
(1,29): expected-named-closing-tag-but-got-eof
(1,29): unexpected-eof-in-text-mode
#new-errors
(1:30) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script/"
| <body>
#data
<script><!--<script </script <
#errors
(1,8): expected-doctype-but-got-start-tag
(1,30): expected-named-closing-tag-but-got-eof
(1,30): unexpected-eof-in-text-mode
#new-errors
(1:31) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script <"
| <body>
#data
<script><!--<script </script <a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,31): expected-named-closing-tag-but-got-eof
(1,31): unexpected-eof-in-text-mode
#new-errors
(1:32) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script <a"
| <body>
#data
<script><!--<script </script </
#errors
(1,8): expected-doctype-but-got-start-tag
(1,31): expected-named-closing-tag-but-got-eof
(1,31): unexpected-eof-in-text-mode
#new-errors
(1:32) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script </"
| <body>
#data
<script><!--<script </script </script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,37): expected-named-closing-tag-but-got-eof
(1,37): unexpected-eof-in-text-mode
#new-errors
(1:38) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script </script </script"
| <body>
#data
<script><!--<script </script </script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,38): expected-attribute-name-but-got-eof
(1,38): expected-named-closing-tag-but-got-eof
#new-errors
(1:39) eof-in-tag
#document
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<script><!--<script </script </script/
#errors
(1,8): expected-doctype-but-got-start-tag
(1,38): unexpected-EOF-after-solidus-in-tag
(1,38): expected-named-closing-tag-but-got-eof
#new-errors
(1:39) eof-in-tag
#document
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<script><!--<script </script </script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script </script "
| <body>
#data
<script><!--<script -
#errors
(1,8): expected-doctype-but-got-start-tag
(1,21): eof-in-script-in-script
(1,21): expected-named-closing-tag-but-got-eof
#new-errors
(1:22) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script -"
| <body>
#data
<script><!--<script -a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,22): eof-in-script-in-script
(1,22): expected-named-closing-tag-but-got-eof
#new-errors
(1:23) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script -a"
| <body>
#data
<script><!--<script --
#errors
(1,8): expected-doctype-but-got-start-tag
(1,22): eof-in-script-in-script
(1,22): expected-named-closing-tag-but-got-eof
#new-errors
(1:23) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script --"
| <body>
#data
<script><!--<script --a
#errors
(1,8): expected-doctype-but-got-start-tag
(1,23): eof-in-script-in-script
(1,23): expected-named-closing-tag-but-got-eof
#new-errors
(1:24) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script --a"
| <body>
#data
<script><!--<script -->
#errors
(1,8): expected-doctype-but-got-start-tag
(1,23): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<script><!--<script --><
#errors
(1,8): expected-doctype-but-got-start-tag
(1,24): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!--<script --><"
| <body>
#data
<script><!--<script --></
#errors
(1,8): expected-doctype-but-got-start-tag
(1,25): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!--<script --></"
| <body>
#data
<script><!--<script --></script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,31): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <script>
| "<!--<script --></script"
| <body>
#data
<script><!--<script --></script
#errors
(1,8): expected-doctype-but-got-start-tag
(1,32): expected-attribute-name-but-got-eof
(1,32): expected-named-closing-tag-but-got-eof
#new-errors
(1:33) eof-in-tag
#document
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<script><!--<script --></script/
#errors
(1,8): expected-doctype-but-got-start-tag
(1,32): unexpected-EOF-after-solidus-in-tag
(1,32): expected-named-closing-tag-but-got-eof
#new-errors
(1:33) eof-in-tag
#document
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<script><!--<script --></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script -->"
| <body>
#data
<script><!--<script><\/script>--></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script><\/script>-->"
| <body>
#data
<script><!--<script></scr'+'ipt>--></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script></scr'+'ipt>-->"
| <body>
#data
<script><!--<script></script><script></script></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>"
| <body>
#data
<script><!--<script></script><script></script>--><!--</script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>--><!--"
| <body>
#data
<script><!--<script></script><script></script>-- ></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>-- >"
| <body>
#data
<script><!--<script></script><script></script>- -></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>- ->"
| <body>
#data
<script><!--<script></script><script></script>- - ></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>- - >"
| <body>
#data
<script><!--<script></script><script></script>-></script>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <script>
| "<!--<script></script><script></script>->"
| <body>
#data
<script><!--<script>--!></script>X
#errors
(1,8): expected-doctype-but-got-start-tag
(1,34): expected-named-closing-tag-but-got-eof
(1,34): unexpected-eof-in-text-mode
#new-errors
(1:35) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script>--!></script>X"
| <body>
#data
<script><!--<scr'+'ipt></script>--></script>
#errors
(1,8): expected-doctype-but-got-start-tag
(1,44): unexpected-end-tag
#document
| <html>
| <head>
| <script>
| "<!--<scr'+'ipt>"
| <body>
| "-->"
#data
<script><!--<script></scr'+'ipt></script>X
#errors
(1,8): expected-doctype-but-got-start-tag
(1,42): expected-named-closing-tag-but-got-eof
(1,42): unexpected-eof-in-text-mode
#new-errors
(1:43) eof-in-script-html-comment-like-text
#document
| <html>
| <head>
| <script>
| "<!--<script></scr'+'ipt></script>X"
| <body>
#data
<style><!--<style></style>--></style>
#errors
(1,7): expected-doctype-but-got-start-tag
(1,37): unexpected-end-tag
#document
| <html>
| <head>
| <style>
| "<!--<style>"
| <body>
| "-->"
#data
<style><!--</style>X
#errors
(1,7): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <style>
| "<!--"
| <body>
| "X"
#data
<style><!--...</style>...--></style>
#errors
(1,7): expected-doctype-but-got-start-tag
(1,36): unexpected-end-tag
#document
| <html>
| <head>
| <style>
| "<!--..."
| <body>
| "...-->"
#data
<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
#errors
(1,7): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <style>
| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
| <body>
| "X"
#data
<style><!--...<style><!--...--!></style>--></style>
#errors
(1,7): expected-doctype-but-got-start-tag
(1,51): unexpected-end-tag
#document
| <html>
| <head>
| <style>
| "<!--...<style><!--...--!>"
| <body>
| "-->"
#data
<style><!--...</style><!-- --><style>@import ...</style>
#errors
(1,7): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <style>
| "<!--..."
| <!-- -->
| <style>
| "@import ..."
| <body>
#data
<style>...<style><!--...</style><!-- --></style>
#errors
(1,7): expected-doctype-but-got-start-tag
(1,48): unexpected-end-tag
#document
| <html>
| <head>
| <style>
| "...<style><!--..."
| <!-- -->
| <body>
#data
<style>...<!--[if IE]><style>...</style>X
#errors
(1,7): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <style>
| "...<!--[if IE]><style>..."
| <body>
| "X"
#data
<title><!--<title></title>--></title>
#errors
(1,7): expected-doctype-but-got-start-tag
(1,37): unexpected-end-tag
#document
| <html>
| <head>
| <title>
| "<!--<title>"
| <body>
| "-->"
#data
<title></title></title>
#errors
(1,7): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <title>
| "</title>"
| <body>
#data
<title>foo/title><link></head><body>X
#errors
(1,7): expected-doctype-but-got-start-tag
(1,37): expected-named-closing-tag-but-got-eof
#document
| <html>
| <head>
| <title>
| "foo/title><link></head><body>X"
| <body>
#data
<noscript><!--<noscript></noscript>--></noscript>
#errors
(1,10): expected-doctype-but-got-start-tag
(1,49): unexpected-end-tag
#script-on
#document
| <html>
| <head>
| <noscript>
| "<!--<noscript>"
| <body>
| "-->"
#data
<noscript><!--<noscript></noscript>--></noscript>
#errors
* (1,11) missing DOCTYPE
#script-off
#document
| <html>
| <head>
| <noscript>
| <!-- <noscript></noscript> -->
| <body>
#data
<noscript><!--</noscript>X<noscript>--></noscript>
#errors
(1,10): expected-doctype-but-got-start-tag
#script-on
#document
| <html>
| <head>
| <noscript>
| "<!--"
| <body>
| "X"
| <noscript>
| "-->"
#data
<noscript><!--</noscript>X<noscript>--></noscript>
#errors
(1,10): expected-doctype-but-got-start-tag
#script-off
#document
| <html>
| <head>
| <noscript>
| <!-- </noscript>X<noscript> -->
| <body>
#data
<noscript><iframe></noscript>X
#errors
(1,10): expected-doctype-but-got-start-tag
#script-on
#document
| <html>
| <head>
| <noscript>
| "<iframe>"
| <body>
| "X"
#data
<noscript><iframe></noscript>X
#errors
* (1,11) missing DOCTYPE
* (1,19) unexpected token in head noscript
* (1,31) unexpected EOF
#script-off
#document
| <html>
| <head>
| <noscript>
| <body>
| <iframe>
| "</noscript>X"
#data
<noframes><!--<noframes></noframes>--></noframes>
#errors
(1,10): expected-doctype-but-got-start-tag
(1,49): unexpected-end-tag
#document
| <html>
| <head>
| <noframes>
| "<!--<noframes>"
| <body>
| "-->"
#data
<noframes><body><script><!--...</script></body></noframes></html>
#errors
(1,10): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <noframes>
| "<body><script><!--...</script></body>"
| <body>
#data
<textarea><!--<textarea></textarea>--></textarea>
#errors
(1,10): expected-doctype-but-got-start-tag
(1,49): unexpected-end-tag
#document
| <html>
| <head>
| <body>
| <textarea>
| "<!--<textarea>"
| "-->"
#data
<textarea></textarea></textarea>
#errors
(1,10): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <body>
| <textarea>
| "</textarea>"
#data
<iframe><!--<iframe></iframe>--></iframe>
#errors
(1,8): expected-doctype-but-got-start-tag
(1,41): unexpected-end-tag
#document
| <html>
| <head>
| <body>
| <iframe>
| "<!--<iframe>"
| "-->"
#data
<iframe>...<!--X->...<!--/X->...</iframe>
#errors
(1,8): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <body>
| <iframe>
| "...<!--X->...<!--/X->..."
#data
<xmp><!--<xmp></xmp>--></xmp>
#errors
(1,5): expected-doctype-but-got-start-tag
(1,29): unexpected-end-tag
#document
| <html>
| <head>
| <body>
| <xmp>
| "<!--<xmp>"
| "-->"
#data
<noembed><!--<noembed></noembed>--></noembed>
#errors
(1,9): expected-doctype-but-got-start-tag
(1,45): unexpected-end-tag
#document
| <html>
| <head>
| <body>
| <noembed>
| "<!--<noembed>"
| "-->"
#data
<!doctype html><table>
#errors
(2,0): eof-in-table
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| "
"
#data
<!doctype html><table><td><span><font></span><span>
#errors
(1,26): unexpected-cell-in-table-body
(1,45): unexpected-end-tag
(1,51): expected-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| <tbody>
| <tr>
| <td>
| <span>
| <font>
| <font>
| <span>
#data
<!doctype html><form><table></form><form></table></form>
#errors
(1,35): unexpected-end-tag-implies-table-voodoo
(1,35): unexpected-end-tag
(1,41): unexpected-form-in-table
(1,56): unexpected-end-tag
(1,56): expected-closing-tag-but-got-eof
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <form>
| <table>
| <form>
| net/html/testdata/webkit/tests16.dat/0 | {
"file_path": "net/html/testdata/webkit/tests16.dat",
"repo_id": "net",
"token_count": 22579
} | 627 |
#data
<!doctype html><body><title>X</title>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <title>
| "X"
#data
<!doctype html><table><title>X</title></table>
#errors
(1,29): unexpected-start-tag-implies-table-voodoo
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <title>
| "X"
| <table>
#data
<!doctype html><head></head><title>X</title>
#errors
(1,35): unexpected-start-tag-out-of-my-head
#document
| <!DOCTYPE html>
| <html>
| <head>
| <title>
| "X"
| <body>
#data
<!doctype html></head><title>X</title>
#errors
(1,29): unexpected-start-tag-out-of-my-head
#document
| <!DOCTYPE html>
| <html>
| <head>
| <title>
| "X"
| <body>
#data
<!doctype html><table><meta></table>
#errors
(1,28): unexpected-start-tag-implies-table-voodoo
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <meta>
| <table>
#data
<!doctype html><table>X<tr><td><table> <meta></table></table>
#errors
unexpected text in table
(1,45): unexpected-start-tag-implies-table-voodoo
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| "X"
| <table>
| <tbody>
| <tr>
| <td>
| <meta>
| <table>
| " "
#data
<!doctype html><html> <head>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
#data
<!doctype html> <head>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
#data
<!doctype html><table><style> <tr>x </style> </table>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| <style>
| " <tr>x "
| " "
#data
<!doctype html><table><TBODY><script> <tr>x </script> </table>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| <tbody>
| <script>
| " <tr>x "
| " "
#data
<!doctype html><p><applet><p>X</p></applet>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| <applet>
| <p>
| "X"
#data
<!doctype html><p><object type="application/x-non-existant-plugin"><p>X</p></object>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| <object>
| type="application/x-non-existant-plugin"
| <p>
| "X"
#data
<!doctype html><listing>
X</listing>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <listing>
| "X"
#data
<!doctype html><select><input>X
#errors
(1,30): unexpected-input-in-select
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <select>
| <input>
| "X"
#data
<!doctype html><select><select>X
#errors
(1,31): unexpected-select-in-select
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <select>
| "X"
#data
<!doctype html><table><input type=hidDEN></table>
#errors
(1,41): unexpected-hidden-input-in-table
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| <input>
| type="hidDEN"
#data
<!doctype html><table>X<input type=hidDEN></table>
#errors
(1,23): foster-parenting-character
(1,42): unexpected-hidden-input-in-table
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| "X"
| <table>
| <input>
| type="hidDEN"
#data
<!doctype html><table> <input type=hidDEN></table>
#errors
(1,43): unexpected-hidden-input-in-table
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| " "
| <input>
| type="hidDEN"
#data
<!doctype html><table> <input type='hidDEN'></table>
#errors
(1,45): unexpected-hidden-input-in-table
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| " "
| <input>
| type="hidDEN"
#data
<!doctype html><table><input type=" hidden"><input type=hidDEN></table>
#errors
(1,44): unexpected-start-tag-implies-table-voodoo
(1,63): unexpected-hidden-input-in-table
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <input>
| type=" hidden"
| <table>
| <input>
| type="hidDEN"
#data
<!doctype html><table><select>X<tr>
#errors
(1,30): unexpected-start-tag-implies-table-voodoo
(1,35): unexpected-table-element-start-tag-in-select-in-table
(1,35): eof-in-table
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <select>
| "X"
| <table>
| <tbody>
| <tr>
#data
<!doctype html><select>X</select>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <select>
| "X"
#data
<!DOCTYPE hTmL><html></html>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
#data
<!DOCTYPE HTML><html></html>
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
#data
<body>X</body></body>
#errors
(1,21): unexpected-end-tag-after-body
#document-fragment
html
#document
| <head>
| <body>
| "X"
#data
<div><p>a</x> b
#errors
(1,5): expected-doctype-but-got-start-tag
(1,13): unexpected-end-tag
(1,15): expected-closing-tag-but-got-eof
#document
| <html>
| <head>
| <body>
| <div>
| <p>
| "a b"
#data
<table><tr><td><code></code> </table>
#errors
(1,7): expected-doctype-but-got-start-tag
#document
| <html>
| <head>
| <body>
| <table>
| <tbody>
| <tr>
| <td>
| <code>
| " "
#data
<table><b><tr><td>aaa</td></tr>bbb</table>ccc
#errors
(1,7): expected-doctype-but-got-start-tag
(1,10): foster-parenting-start-tag
(1,32): foster-parenting-character
(1,33): foster-parenting-character
(1,34): foster-parenting-character
(1,45): expected-closing-tag-but-got-eof
#document
| <html>
| <head>
| <body>
| <b>
| <b>
| "bbb"
| <table>
| <tbody>
| <tr>
| <td>
| "aaa"
| <b>
| "ccc"
#data
A<table><tr> B</tr> B</table>
#errors
(1,1): expected-doctype-but-got-chars
(1,13): foster-parenting-character
(1,14): foster-parenting-character
(1,20): foster-parenting-character
(1,21): foster-parenting-character
#document
| <html>
| <head>
| <body>
| "A B B"
| <table>
| <tbody>
| <tr>
#data
A<table><tr> B</tr> </em>C</table>
#errors
(1,1): expected-doctype-but-got-chars
(1,13): foster-parenting-character
(1,14): foster-parenting-character
(1,20): foster-parenting-character
(1,25): unexpected-end-tag
(1,25): unexpected-end-tag-in-special-element
(1,26): foster-parenting-character
#document
| <html>
| <head>
| <body>
| "A BC"
| <table>
| <tbody>
| <tr>
| " "
#data
<select><keygen>
#errors
(1,8): expected-doctype-but-got-start-tag
(1,16): unexpected-input-in-select
#document
| <html>
| <head>
| <body>
| <select>
| <keygen>
| net/html/testdata/webkit/tests7.dat/0 | {
"file_path": "net/html/testdata/webkit/tests7.dat",
"repo_id": "net",
"token_count": 3496
} | 628 |
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "context"
// An gate is a monitor (mutex + condition variable) with one bit of state.
//
// The condition may be either set or unset.
// Lock operations may be unconditional, or wait for the condition to be set.
// Unlock operations record the new state of the condition.
type gate struct {
// When unlocked, exactly one of set or unset contains a value.
// When locked, neither chan contains a value.
set chan struct{}
unset chan struct{}
}
// newGate returns a new, unlocked gate with the condition unset.
func newGate() gate {
g := newLockedGate()
g.unlock(false)
return g
}
// newLocked gate returns a new, locked gate.
func newLockedGate() gate {
return gate{
set: make(chan struct{}, 1),
unset: make(chan struct{}, 1),
}
}
// lock acquires the gate unconditionally.
// It reports whether the condition is set.
func (g *gate) lock() (set bool) {
select {
case <-g.set:
return true
case <-g.unset:
return false
}
}
// waitAndLock waits until the condition is set before acquiring the gate.
// If the context expires, waitAndLock returns an error and does not acquire the gate.
func (g *gate) waitAndLock(ctx context.Context) error {
select {
case <-g.set:
return nil
default:
}
select {
case <-g.set:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// lockIfSet acquires the gate if and only if the condition is set.
func (g *gate) lockIfSet() (acquired bool) {
select {
case <-g.set:
return true
default:
return false
}
}
// unlock sets the condition and releases the gate.
func (g *gate) unlock(set bool) {
if set {
g.set <- struct{}{}
} else {
g.unset <- struct{}{}
}
}
// unlock sets the condition to the result of f and releases the gate.
// Useful in defers.
func (g *gate) unlockFunc(f func() bool) {
g.unlock(f())
}
| net/http2/gate_test.go/0 | {
"file_path": "net/http2/gate_test.go",
"repo_id": "net",
"token_count": 652
} | 629 |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"bufio"
"regexp"
"strconv"
"strings"
"testing"
)
func TestHeaderFieldTable(t *testing.T) {
table := &headerFieldTable{}
table.init()
table.addEntry(pair("key1", "value1-1"))
table.addEntry(pair("key2", "value2-1"))
table.addEntry(pair("key1", "value1-2"))
table.addEntry(pair("key3", "value3-1"))
table.addEntry(pair("key4", "value4-1"))
table.addEntry(pair("key2", "value2-2"))
// Tests will be run twice: once before evicting anything, and
// again after evicting the three oldest entries.
tests := []struct {
f HeaderField
beforeWantStaticI uint64
beforeWantMatch bool
afterWantStaticI uint64
afterWantMatch bool
}{
{HeaderField{"key1", "value1-1", false}, 1, true, 0, false},
{HeaderField{"key1", "value1-2", false}, 3, true, 0, false},
{HeaderField{"key1", "value1-3", false}, 3, false, 0, false},
{HeaderField{"key2", "value2-1", false}, 2, true, 3, false},
{HeaderField{"key2", "value2-2", false}, 6, true, 3, true},
{HeaderField{"key2", "value2-3", false}, 6, false, 3, false},
{HeaderField{"key4", "value4-1", false}, 5, true, 2, true},
// Name match only, because sensitive.
{HeaderField{"key4", "value4-1", true}, 5, false, 2, false},
// Key not found.
{HeaderField{"key5", "value5-x", false}, 0, false, 0, false},
}
staticToDynamic := func(i uint64) uint64 {
if i == 0 {
return 0
}
return uint64(table.len()) - i + 1 // dynamic is the reversed table
}
searchStatic := func(f HeaderField) (uint64, bool) {
old := staticTable
staticTable = table
defer func() { staticTable = old }()
return staticTable.search(f)
}
searchDynamic := func(f HeaderField) (uint64, bool) {
return table.search(f)
}
for _, test := range tests {
gotI, gotMatch := searchStatic(test.f)
if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
gotI, gotMatch = searchDynamic(test.f)
wantDynamicI := staticToDynamic(test.beforeWantStaticI)
if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
}
table.evictOldest(3)
for _, test := range tests {
gotI, gotMatch := searchStatic(test.f)
if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
gotI, gotMatch = searchDynamic(test.f)
wantDynamicI := staticToDynamic(test.afterWantStaticI)
if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
}
}
func TestHeaderFieldTable_LookupMapEviction(t *testing.T) {
table := &headerFieldTable{}
table.init()
table.addEntry(pair("key1", "value1-1"))
table.addEntry(pair("key2", "value2-1"))
table.addEntry(pair("key1", "value1-2"))
table.addEntry(pair("key3", "value3-1"))
table.addEntry(pair("key4", "value4-1"))
table.addEntry(pair("key2", "value2-2"))
// evict all pairs
table.evictOldest(table.len())
if l := table.len(); l > 0 {
t.Errorf("table.len() = %d, want 0", l)
}
if l := len(table.byName); l > 0 {
t.Errorf("len(table.byName) = %d, want 0", l)
}
if l := len(table.byNameValue); l > 0 {
t.Errorf("len(table.byNameValue) = %d, want 0", l)
}
}
func TestStaticTable(t *testing.T) {
fromSpec := `
+-------+-----------------------------+---------------+
| 1 | :authority | |
| 2 | :method | GET |
| 3 | :method | POST |
| 4 | :path | / |
| 5 | :path | /index.html |
| 6 | :scheme | http |
| 7 | :scheme | https |
| 8 | :status | 200 |
| 9 | :status | 204 |
| 10 | :status | 206 |
| 11 | :status | 304 |
| 12 | :status | 400 |
| 13 | :status | 404 |
| 14 | :status | 500 |
| 15 | accept-charset | |
| 16 | accept-encoding | gzip, deflate |
| 17 | accept-language | |
| 18 | accept-ranges | |
| 19 | accept | |
| 20 | access-control-allow-origin | |
| 21 | age | |
| 22 | allow | |
| 23 | authorization | |
| 24 | cache-control | |
| 25 | content-disposition | |
| 26 | content-encoding | |
| 27 | content-language | |
| 28 | content-length | |
| 29 | content-location | |
| 30 | content-range | |
| 31 | content-type | |
| 32 | cookie | |
| 33 | date | |
| 34 | etag | |
| 35 | expect | |
| 36 | expires | |
| 37 | from | |
| 38 | host | |
| 39 | if-match | |
| 40 | if-modified-since | |
| 41 | if-none-match | |
| 42 | if-range | |
| 43 | if-unmodified-since | |
| 44 | last-modified | |
| 45 | link | |
| 46 | location | |
| 47 | max-forwards | |
| 48 | proxy-authenticate | |
| 49 | proxy-authorization | |
| 50 | range | |
| 51 | referer | |
| 52 | refresh | |
| 53 | retry-after | |
| 54 | server | |
| 55 | set-cookie | |
| 56 | strict-transport-security | |
| 57 | transfer-encoding | |
| 58 | user-agent | |
| 59 | vary | |
| 60 | via | |
| 61 | www-authenticate | |
+-------+-----------------------------+---------------+
`
bs := bufio.NewScanner(strings.NewReader(fromSpec))
re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
for bs.Scan() {
l := bs.Text()
if !strings.Contains(l, "|") {
continue
}
m := re.FindStringSubmatch(l)
if m == nil {
continue
}
i, err := strconv.Atoi(m[1])
if err != nil {
t.Errorf("Bogus integer on line %q", l)
continue
}
if i < 1 || i > staticTable.len() {
t.Errorf("Bogus index %d on line %q", i, l)
continue
}
if got, want := staticTable.ents[i-1].Name, m[2]; got != want {
t.Errorf("header index %d name = %q; want %q", i, got, want)
}
if got, want := staticTable.ents[i-1].Value, m[3]; got != want {
t.Errorf("header index %d value = %q; want %q", i, got, want)
}
if got, want := staticTable.ents[i-1].Sensitive, false; got != want {
t.Errorf("header index %d sensitive = %t; want %t", i, got, want)
}
if got, want := strconv.Itoa(int(staticTable.byNameValue[pairNameValue{name: m[2], value: m[3]}])), m[1]; got != want {
t.Errorf("header by name %s value %s index = %s; want %s", m[2], m[3], got, want)
}
}
if err := bs.Err(); err != nil {
t.Error(err)
}
}
| net/http2/hpack/tables_test.go/0 | {
"file_path": "net/http2/hpack/tables_test.go",
"repo_id": "net",
"token_count": 5190
} | 630 |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"fmt"
"sort"
"testing"
)
func defaultPriorityWriteScheduler() *priorityWriteScheduler {
return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler)
}
func checkPriorityWellFormed(ws *priorityWriteScheduler) error {
for id, n := range ws.nodes {
if id != n.id {
return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id)
}
if n.parent == nil {
if n.next != nil || n.prev != nil {
return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id)
}
continue
}
found := false
for k := n.parent.kids; k != nil; k = k.next {
if k.id == id {
found = true
break
}
}
if !found {
return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id)
}
}
return nil
}
func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string {
var ids []int
for _, n := range ws.nodes {
ids = append(ids, int(n.id))
}
sort.Ints(ids)
var buf bytes.Buffer
for _, id := range ids {
if buf.Len() != 0 {
buf.WriteString(" ")
}
if id == 0 {
buf.WriteString(fmtNode(&ws.root))
} else {
buf.WriteString(fmtNode(ws.nodes[uint32(id)]))
}
}
return buf.String()
}
func fmtNodeParentSkipRoot(n *priorityNode) string {
switch {
case n.id == 0:
return ""
case n.parent == nil:
return fmt.Sprintf("%d{parent:nil}", n.id)
default:
return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id)
}
}
func fmtNodeWeightParentSkipRoot(n *priorityNode) string {
switch {
case n.id == 0:
return ""
case n.parent == nil:
return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight)
default:
return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id)
}
}
func TestPriorityTwoStreams(t *testing.T) {
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
// Move 1's parent to 2.
ws.AdjustStream(1, PriorityParam{
StreamDep: 2,
Weight: 32,
Exclusive: false,
})
want = "1{weight:32,parent:2} 2{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityAdjustExclusiveZero(t *testing.T) {
// 1, 2, and 3 are all children of the 0 stream.
// Exclusive reprioritization to any of the streams should bring
// the rest of the streams under the reprioritized stream.
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
ws.OpenStream(3, OpenStreamOptions{})
want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
ws.AdjustStream(2, PriorityParam{
StreamDep: 0,
Weight: 20,
Exclusive: true,
})
want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityAdjustOwnParent(t *testing.T) {
// Assigning a node as its own parent should have no effect.
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
ws.AdjustStream(2, PriorityParam{
StreamDep: 2,
Weight: 20,
Exclusive: true,
})
want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityClosedStreams(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler)
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
// Close the first three streams. We lose 1, but keep 2 and 3.
ws.CloseStream(1)
ws.CloseStream(2)
ws.CloseStream(3)
want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After close\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
// Adding a stream as an exclusive child of 1 gives it default
// priorities, since 1 is gone.
ws.OpenStream(5, OpenStreamOptions{})
ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true})
// Adding a stream as an exclusive child of 2 should work, since 2 is not gone.
ws.OpenStream(6, OpenStreamOptions{})
ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true})
want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After add streams\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityClosedStreamsDisabled(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
// Close the first two streams. We keep only 3.
ws.CloseStream(1)
ws.CloseStream(2)
want := "3{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After close\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityIdleStreams(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler)
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
ws.OpenStream(4, OpenStreamOptions{})
ws.OpenStream(5, OpenStreamOptions{})
ws.OpenStream(6, OpenStreamOptions{})
ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15})
ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15})
ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15})
want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityIdleStreamsDisabled(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
ws.OpenStream(4, OpenStreamOptions{})
want := "4{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPrioritySection531NonExclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.1.
// A,B,C,D = 1,2,3,4
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{})
ws.AdjustStream(4, PriorityParam{
StreamDep: 1,
Weight: 15,
Exclusive: false,
})
want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPrioritySection531Exclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.1.
// A,B,C,D = 1,2,3,4
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{})
ws.AdjustStream(4, PriorityParam{
StreamDep: 1,
Weight: 15,
Exclusive: true,
})
want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func makeSection533Tree() *priorityWriteScheduler {
// Initial tree from RFC 7540 Section 5.3.3.
// A,B,C,D,E,F = 1,2,3,4,5,6
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
return ws
}
func TestPrioritySection533NonExclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.3.
// A,B,C,D,E,F = 1,2,3,4,5,6
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
ws.AdjustStream(1, PriorityParam{
StreamDep: 4,
Weight: 15,
Exclusive: false,
})
want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPrioritySection533Exclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.3.
// A,B,C,D,E,F = 1,2,3,4,5,6
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
ws.AdjustStream(1, PriorityParam{
StreamDep: 4,
Weight: 15,
Exclusive: true,
})
want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func checkPopAll(ws WriteScheduler, order []uint32) error {
for k, id := range order {
wr, ok := ws.Pop()
if !ok {
return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order)
}
if got := wr.StreamID(); got != id {
return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order)
}
}
wr, ok := ws.Pop()
if ok {
return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order)
}
return nil
}
func TestPriorityPopFrom533Tree(t *testing.T) {
ws := makeSection533Tree()
ws.Push(makeWriteHeadersRequest(3 /*C*/))
ws.Push(makeWriteNonStreamRequest())
ws.Push(makeWriteHeadersRequest(5 /*E*/))
ws.Push(makeWriteHeadersRequest(1 /*A*/))
t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil {
t.Error(err)
}
}
// #49741 RST_STREAM and Control frames should have more priority than data
// frames to avoid blocking streams caused by clients not able to drain the
// queue.
func TestPriorityRSTFrames(t *testing.T) {
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
sc := &serverConn{maxFrameSize: 16}
st1 := &stream{id: 1, sc: sc}
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil})
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil})
ws.Push(makeWriteRSTStream(1))
// No flow-control bytes available.
wr, ok := ws.Pop()
if !ok {
t.Fatalf("Pop should work for control frames and not be limited by flow control")
}
if _, ok := wr.write.(StreamError); !ok {
t.Fatal("expected RST stream frames first", wr)
}
}
func TestPriorityPopFromLinearTree(t *testing.T) {
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.Push(makeWriteHeadersRequest(3))
ws.Push(makeWriteHeadersRequest(4))
ws.Push(makeWriteHeadersRequest(1))
ws.Push(makeWriteHeadersRequest(2))
ws.Push(makeWriteNonStreamRequest())
ws.Push(makeWriteNonStreamRequest())
t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil {
t.Error(err)
}
}
func TestPriorityFlowControl(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false})
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
sc := &serverConn{maxFrameSize: 16}
st1 := &stream{id: 1, sc: sc}
st2 := &stream{id: 2, sc: sc}
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil})
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil})
ws.AdjustStream(2, PriorityParam{StreamDep: 1})
// No flow-control bytes available.
if wr, ok := ws.Pop(); ok {
t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr)
}
// Add enough flow-control bytes to write st2 in two Pop calls.
// Should write data from st2 even though it's lower priority than st1.
for i := 1; i <= 2; i++ {
st2.flow.add(8)
wr, ok := ws.Pop()
if !ok {
t.Fatalf("Pop(%d)=false, want true", i)
}
if got, want := wr.DataSize(), 8; got != want {
t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want)
}
}
}
func TestPriorityThrottleOutOfOrderWrites(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true})
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
sc := &serverConn{maxFrameSize: 4096}
st1 := &stream{id: 1, sc: sc}
st2 := &stream{id: 2, sc: sc}
st1.flow.add(4096)
st2.flow.add(4096)
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil})
ws.AdjustStream(2, PriorityParam{StreamDep: 1})
// We have enough flow-control bytes to write st2 in a single Pop call.
// However, due to out-of-order write throttling, the first call should
// only write 1KB.
wr, ok := ws.Pop()
if !ok {
t.Fatalf("Pop(st2.first)=false, want true")
}
if got, want := wr.StreamID(), uint32(2); got != want {
t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want)
}
if got, want := wr.DataSize(), 1024; got != want {
t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want)
}
// Now add data on st1. This should take precedence.
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil})
wr, ok = ws.Pop()
if !ok {
t.Fatalf("Pop(st1)=false, want true")
}
if got, want := wr.StreamID(), uint32(1); got != want {
t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want)
}
if got, want := wr.DataSize(), 4096; got != want {
t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want)
}
// Should go back to writing 1KB from st2.
wr, ok = ws.Pop()
if !ok {
t.Fatalf("Pop(st2.last)=false, want true")
}
if got, want := wr.StreamID(), uint32(2); got != want {
t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want)
}
if got, want := wr.DataSize(), 1024; got != want {
t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want)
}
}
func TestPriorityWeights(t *testing.T) {
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
sc := &serverConn{maxFrameSize: 8}
st1 := &stream{id: 1, sc: sc}
st2 := &stream{id: 2, sc: sc}
st1.flow.add(40)
st2.flow.add(40)
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil})
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil})
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34})
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9})
// st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)).
// The maximum frame size is 8 bytes. The write sequence should be:
// st1, total bytes so far is (st1=8, st=0)
// st2, total bytes so far is (st1=8, st=8)
// st1, total bytes so far is (st1=16, st=8)
// st1, total bytes so far is (st1=24, st=8) // 3x bandwidth
// st1, total bytes so far is (st1=32, st=8) // 4x bandwidth
// st2, total bytes so far is (st1=32, st=16) // 2x bandwidth
// st1, total bytes so far is (st1=40, st=16)
// st2, total bytes so far is (st1=40, st=24)
// st2, total bytes so far is (st1=40, st=32)
// st2, total bytes so far is (st1=40, st=40)
if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil {
t.Error(err)
}
}
func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{
MaxClosedNodesInTree: 0,
MaxIdleNodesInTree: 0,
})
ws.OpenStream(1, OpenStreamOptions{})
ws.CloseStream(1)
ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)})
ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)})
if err := checkPopAll(ws, []uint32{1, 2}); err != nil {
t.Error(err)
}
}
// https://go.dev/issue/66514
func TestPriorityIssue66514(t *testing.T) {
addDep := func(ws *priorityWriteScheduler, child uint32, parent uint32) {
ws.AdjustStream(child, PriorityParam{
StreamDep: parent,
Exclusive: false,
Weight: 16,
})
}
validateDepTree := func(ws *priorityWriteScheduler, id uint32, t *testing.T) {
for n := ws.nodes[id]; n != nil; n = n.parent {
if n.parent == nil {
if n.id != uint32(0) {
t.Errorf("detected nodes not parented to 0")
}
}
}
}
ws := NewPriorityWriteScheduler(nil).(*priorityWriteScheduler)
// Root entry
addDep(ws, uint32(1), uint32(0))
addDep(ws, uint32(3), uint32(1))
addDep(ws, uint32(5), uint32(1))
for id := uint32(7); id < uint32(100); id += uint32(4) {
addDep(ws, id, id-uint32(4))
addDep(ws, id+uint32(2), id-uint32(4))
validateDepTree(ws, id, t)
}
}
| net/http2/writesched_priority_test.go/0 | {
"file_path": "net/http2/writesched_priority_test.go",
"repo_id": "net",
"token_count": 7645
} | 631 |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package icmp
import (
"encoding/binary"
"net"
"reflect"
"runtime"
"testing"
"golang.org/x/net/internal/socket"
"golang.org/x/net/ipv4"
)
func TestParseIPv4Header(t *testing.T) {
switch socket.NativeEndian {
case binary.LittleEndian:
t.Run("LittleEndian", func(t *testing.T) {
// TODO(mikio): Add platform dependent wire
// header formats when we support new
// platforms.
wireHeaderFromKernel := [ipv4.HeaderLen]byte{
0x45, 0x01, 0xbe, 0xef,
0xca, 0xfe, 0x45, 0xdc,
0xff, 0x01, 0xde, 0xad,
172, 16, 254, 254,
192, 168, 0, 1,
}
wireHeaderFromTradBSDKernel := [ipv4.HeaderLen]byte{
0x45, 0x01, 0xef, 0xbe,
0xca, 0xfe, 0x45, 0xdc,
0xff, 0x01, 0xde, 0xad,
172, 16, 254, 254,
192, 168, 0, 1,
}
th := &ipv4.Header{
Version: ipv4.Version,
Len: ipv4.HeaderLen,
TOS: 1,
TotalLen: 0xbeef,
ID: 0xcafe,
Flags: ipv4.DontFragment,
FragOff: 1500,
TTL: 255,
Protocol: 1,
Checksum: 0xdead,
Src: net.IPv4(172, 16, 254, 254),
Dst: net.IPv4(192, 168, 0, 1),
}
var wh []byte
switch runtime.GOOS {
case "darwin", "ios":
wh = wireHeaderFromTradBSDKernel[:]
case "freebsd":
if freebsdVersion >= 1000000 {
wh = wireHeaderFromKernel[:]
} else {
wh = wireHeaderFromTradBSDKernel[:]
}
default:
wh = wireHeaderFromKernel[:]
}
h, err := ParseIPv4Header(wh)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(h, th) {
t.Fatalf("got %#v; want %#v", h, th)
}
})
}
}
| net/icmp/ipv4_test.go/0 | {
"file_path": "net/icmp/ipv4_test.go",
"repo_id": "net",
"token_count": 891
} | 632 |
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
// deal with the transition from IDNA2003.
//
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
// differences between these two standards.
package idna // import "golang.org/x/net/idna"
import (
"fmt"
"strings"
"unicode/utf8"
"golang.org/x/text/secure/bidirule"
"golang.org/x/text/unicode/bidi"
"golang.org/x/text/unicode/norm"
)
// NOTE: Unlike common practice in Go APIs, the functions will return a
// sanitized domain name in case of errors. Browsers sometimes use a partially
// evaluated string as lookup.
// TODO: the current error handling is, in my opinion, the least opinionated.
// Other strategies are also viable, though:
// Option 1) Return an empty string in case of error, but allow the user to
// specify explicitly which errors to ignore.
// Option 2) Return the partially evaluated string if it is itself a valid
// string, otherwise return the empty string in case of error.
// Option 3) Option 1 and 2.
// Option 4) Always return an empty string for now and implement Option 1 as
// needed, and document that the return string may not be empty in case of
// error in the future.
// I think Option 1 is best, but it is quite opinionated.
// ToASCII is a wrapper for Punycode.ToASCII.
func ToASCII(s string) (string, error) {
return Punycode.process(s, true)
}
// ToUnicode is a wrapper for Punycode.ToUnicode.
func ToUnicode(s string) (string, error) {
return Punycode.process(s, false)
}
// An Option configures a Profile at creation time.
type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
// compatibility. It is used by some browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
return func(o *options) { o.transitional = transitional }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
// are longer than allowed by the RFC.
//
// This option corresponds to the VerifyDnsLength flag in UTS #46.
func VerifyDNSLength(verify bool) Option {
return func(o *options) { o.verifyDNSLength = verify }
}
// RemoveLeadingDots removes leading label separators. Leading runes that map to
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
func RemoveLeadingDots(remove bool) Option {
return func(o *options) { o.removeLeadingDots = remove }
}
// ValidateLabels sets whether to check the mandatory label validation criteria
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
// of hyphens ('-'), normalization, validity of runes, and the context rules.
// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
// in UTS #46.
func ValidateLabels(enable bool) Option {
return func(o *options) {
// Don't override existing mappings, but set one that at least checks
// normalization if it is not set.
if o.mapping == nil && enable {
o.mapping = normalize
}
o.trie = trie
o.checkJoiners = enable
o.checkHyphens = enable
if enable {
o.fromPuny = validateFromPunycode
} else {
o.fromPuny = nil
}
}
}
// CheckHyphens sets whether to check for correct use of hyphens ('-') in
// labels. Most web browsers do not have this option set, since labels such as
// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
//
// This option corresponds to the CheckHyphens flag in UTS #46.
func CheckHyphens(enable bool) Option {
return func(o *options) { o.checkHyphens = enable }
}
// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
// A of RFC 5892, concerning the use of joiner runes.
//
// This option corresponds to the CheckJoiners flag in UTS #46.
func CheckJoiners(enable bool) Option {
return func(o *options) {
o.trie = trie
o.checkJoiners = enable
}
}
// StrictDomainName limits the set of permissible ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
// but is only useful if ValidateLabels is set.
//
// This option is useful, for instance, for browsers that allow characters
// outside this range, for example a '_' (U+005F LOW LINE). See
// http://www.rfc-editor.org/std/std3.txt for more details.
//
// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
func StrictDomainName(use bool) Option {
return func(o *options) { o.useSTD3Rules = use }
}
// NOTE: the following options pull in tables. The tables should not be linked
// in as long as the options are not used.
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
// that relies on proper validation of labels should include this rule.
//
// This option corresponds to the CheckBidi flag in UTS #46.
func BidiRule() Option {
return func(o *options) { o.bidirule = bidirule.ValidString }
}
// ValidateForRegistration sets validation options to verify that a given IDN is
// properly formatted for registration as defined by Section 4 of RFC 5891.
func ValidateForRegistration() Option {
return func(o *options) {
o.mapping = validateRegistration
StrictDomainName(true)(o)
ValidateLabels(true)(o)
VerifyDNSLength(true)(o)
BidiRule()(o)
}
}
// MapForLookup sets validation and mapping options such that a given IDN is
// transformed for domain name lookup according to the requirements set out in
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
// to add this check.
//
// The mappings include normalization and mapping case, width and other
// compatibility mappings.
func MapForLookup() Option {
return func(o *options) {
o.mapping = validateAndMap
StrictDomainName(true)(o)
ValidateLabels(true)(o)
}
}
type options struct {
transitional bool
useSTD3Rules bool
checkHyphens bool
checkJoiners bool
verifyDNSLength bool
removeLeadingDots bool
trie *idnaTrie
// fromPuny calls validation rules when converting A-labels to U-labels.
fromPuny func(p *Profile, s string) error
// mapping implements a validation and mapping step as defined in RFC 5895
// or UTS 46, tailored to, for example, domain registration or lookup.
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
// bidirule, if specified, checks whether s conforms to the Bidi Rule
// defined in RFC 5893.
bidirule func(s string) bool
}
// A Profile defines the configuration of an IDNA mapper.
type Profile struct {
options
}
func apply(o *options, opts []Option) {
for _, f := range opts {
f(o)
}
}
// New creates a new Profile.
//
// With no options, the returned Profile is the most permissive and equals the
// Punycode Profile. Options can be passed to further restrict the Profile. The
// MapForLookup and ValidateForRegistration options set a collection of options,
// for lookup and registration purposes respectively, which can be tailored by
// adding more fine-grained options, where later options override earlier
// options.
func New(o ...Option) *Profile {
p := &Profile{}
apply(&p.options, o)
return p
}
// ToASCII converts a domain or domain label to its ASCII form. For example,
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
// ToASCII("golang") is "golang". If an error is encountered it will return
// an error and a (partially) processed result.
func (p *Profile) ToASCII(s string) (string, error) {
return p.process(s, true)
}
// ToUnicode converts a domain or domain label to its Unicode form. For example,
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
// ToUnicode("golang") is "golang". If an error is encountered it will return
// an error and a (partially) processed result.
func (p *Profile) ToUnicode(s string) (string, error) {
pp := *p
pp.transitional = false
return pp.process(s, false)
}
// String reports a string with a description of the profile for debugging
// purposes. The string format may change with different versions.
func (p *Profile) String() string {
s := ""
if p.transitional {
s = "Transitional"
} else {
s = "NonTransitional"
}
if p.useSTD3Rules {
s += ":UseSTD3Rules"
}
if p.checkHyphens {
s += ":CheckHyphens"
}
if p.checkJoiners {
s += ":CheckJoiners"
}
if p.verifyDNSLength {
s += ":VerifyDNSLength"
}
return s
}
var (
// Punycode is a Profile that does raw punycode processing with a minimum
// of validation.
Punycode *Profile = punycode
// Lookup is the recommended profile for looking up domain names, according
// to Section 5 of RFC 5891. The exact configuration of this profile may
// change over time.
Lookup *Profile = lookup
// Display is the recommended profile for displaying domain names.
// The configuration of this profile may change over time.
Display *Profile = display
// Registration is the recommended profile for checking whether a given
// IDN is valid for registration, according to Section 4 of RFC 5891.
Registration *Profile = registration
punycode = &Profile{}
lookup = &Profile{options{
transitional: transitionalLookup,
useSTD3Rules: true,
checkHyphens: true,
checkJoiners: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
display = &Profile{options{
useSTD3Rules: true,
checkHyphens: true,
checkJoiners: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
registration = &Profile{options{
useSTD3Rules: true,
verifyDNSLength: true,
checkHyphens: true,
checkJoiners: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateRegistration,
bidirule: bidirule.ValidString,
}}
// TODO: profiles
// Register: recommended for approving domain names: don't do any mappings
// but rather reject on invalid input. Bundle or block deviation characters.
)
type labelError struct{ label, code_ string }
func (e labelError) code() string { return e.code_ }
func (e labelError) Error() string {
return fmt.Sprintf("idna: invalid label %q", e.label)
}
type runeError rune
func (e runeError) code() string { return "P1" }
func (e runeError) Error() string {
return fmt.Sprintf("idna: disallowed rune %U", e)
}
// process implements the algorithm described in section 4 of UTS #46,
// see https://www.unicode.org/reports/tr46.
func (p *Profile) process(s string, toASCII bool) (string, error) {
var err error
var isBidi bool
if p.mapping != nil {
s, isBidi, err = p.mapping(p, s)
}
// Remove leading empty labels.
if p.removeLeadingDots {
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
}
}
// TODO: allow for a quick check of the tables data.
// It seems like we should only create this error on ToASCII, but the
// UTS 46 conformance tests suggests we should always check this.
if err == nil && p.verifyDNSLength && s == "" {
err = &labelError{s, "A4"}
}
labels := labelIter{orig: s}
for ; !labels.done(); labels.next() {
label := labels.label()
if label == "" {
// Empty labels are not okay. The label iterator skips the last
// label if it is empty.
if err == nil && p.verifyDNSLength {
err = &labelError{s, "A4"}
}
continue
}
if strings.HasPrefix(label, acePrefix) {
u, err2 := decode(label[len(acePrefix):])
if err2 != nil {
if err == nil {
err = err2
}
// Spec says keep the old label.
continue
}
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
labels.set(u)
if err == nil && p.fromPuny != nil {
err = p.fromPuny(p, u)
}
if err == nil {
// This should be called on NonTransitional, according to the
// spec, but that currently does not have any effect. Use the
// original profile to preserve options.
err = p.validateLabel(u)
}
} else if err == nil {
err = p.validateLabel(label)
}
}
if isBidi && p.bidirule != nil && err == nil {
for labels.reset(); !labels.done(); labels.next() {
if !p.bidirule(labels.label()) {
err = &labelError{s, "B"}
break
}
}
}
if toASCII {
for labels.reset(); !labels.done(); labels.next() {
label := labels.label()
if !ascii(label) {
a, err2 := encode(acePrefix, label)
if err == nil {
err = err2
}
label = a
labels.set(a)
}
n := len(label)
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
err = &labelError{label, "A4"}
}
}
}
s = labels.result()
if toASCII && p.verifyDNSLength && err == nil {
// Compute the length of the domain name minus the root label and its dot.
n := len(s)
if n > 0 && s[n-1] == '.' {
n--
}
if len(s) < 1 || n > 253 {
err = &labelError{s, "A4"}
}
}
return s, err
}
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
// TODO: consider first doing a quick check to see if any of these checks
// need to be done. This will make it slower in the general case, but
// faster in the common case.
mapped = norm.NFC.String(s)
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
return mapped, isBidi, nil
}
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
// TODO: filter need for normalization in loop below.
if !norm.NFC.IsNormalString(s) {
return s, false, &labelError{s, "V1"}
}
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if sz == 0 {
return s, bidi, runeError(utf8.RuneError)
}
bidi = bidi || info(v).isBidi(s[i:])
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
// TODO: handle the NV8 defined in the Unicode idna data set to allow
// for strict conformance to IDNA2008.
case valid, deviation:
case disallowed, mapped, unknown, ignored:
r, _ := utf8.DecodeRuneInString(s[i:])
return s, bidi, runeError(r)
}
i += sz
}
return s, bidi, nil
}
func (c info) isBidi(s string) bool {
if !c.isMapped() {
return c&attributesMask == rtl
}
// TODO: also store bidi info for mapped data. This is possible, but a bit
// cumbersome and not for the common case.
p, _ := bidi.LookupString(s)
switch p.Class() {
case bidi.R, bidi.AL, bidi.AN:
return true
}
return false
}
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
var (
b []byte
k int
)
// combinedInfoBits contains the or-ed bits of all runes. We use this
// to derive the mayNeedNorm bit later. This may trigger normalization
// overeagerly, but it will not do so in the common case. The end result
// is another 10% saving on BenchmarkProfile for the common case.
var combinedInfoBits info
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if sz == 0 {
b = append(b, s[k:i]...)
b = append(b, "\ufffd"...)
k = len(s)
if err == nil {
err = runeError(utf8.RuneError)
}
break
}
combinedInfoBits |= info(v)
bidi = bidi || info(v).isBidi(s[i:])
start := i
i += sz
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
case valid:
continue
case disallowed:
if err == nil {
r, _ := utf8.DecodeRuneInString(s[start:])
err = runeError(r)
}
continue
case mapped, deviation:
b = append(b, s[k:start]...)
b = info(v).appendMapping(b, s[start:i])
case ignored:
b = append(b, s[k:start]...)
// drop the rune
case unknown:
b = append(b, s[k:start]...)
b = append(b, "\ufffd"...)
}
k = i
}
if k == 0 {
// No changes so far.
if combinedInfoBits&mayNeedNorm != 0 {
s = norm.NFC.String(s)
}
} else {
b = append(b, s[k:]...)
if norm.NFC.QuickSpan(b) != len(b) {
b = norm.NFC.Bytes(b)
}
// TODO: the punycode converters require strings as input.
s = string(b)
}
return s, bidi, err
}
// A labelIter allows iterating over domain name labels.
type labelIter struct {
orig string
slice []string
curStart int
curEnd int
i int
}
func (l *labelIter) reset() {
l.curStart = 0
l.curEnd = 0
l.i = 0
}
func (l *labelIter) done() bool {
return l.curStart >= len(l.orig)
}
func (l *labelIter) result() string {
if l.slice != nil {
return strings.Join(l.slice, ".")
}
return l.orig
}
func (l *labelIter) label() string {
if l.slice != nil {
return l.slice[l.i]
}
p := strings.IndexByte(l.orig[l.curStart:], '.')
l.curEnd = l.curStart + p
if p == -1 {
l.curEnd = len(l.orig)
}
return l.orig[l.curStart:l.curEnd]
}
// next sets the value to the next label. It skips the last label if it is empty.
func (l *labelIter) next() {
l.i++
if l.slice != nil {
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
l.curStart = len(l.orig)
}
} else {
l.curStart = l.curEnd + 1
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
l.curStart = len(l.orig)
}
}
}
func (l *labelIter) set(s string) {
if l.slice == nil {
l.slice = strings.Split(l.orig, ".")
}
l.slice[l.i] = s
}
// acePrefix is the ASCII Compatible Encoding prefix.
const acePrefix = "xn--"
func (p *Profile) simplify(cat category) category {
switch cat {
case disallowedSTD3Mapped:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = mapped
}
case disallowedSTD3Valid:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = valid
}
case deviation:
if !p.transitional {
cat = valid
}
case validNV8, validXV8:
// TODO: handle V2008
cat = valid
}
return cat
}
func validateFromPunycode(p *Profile, s string) error {
if !norm.NFC.IsNormalString(s) {
return &labelError{s, "V1"}
}
// TODO: detect whether string may have to be normalized in the following
// loop.
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if sz == 0 {
return runeError(utf8.RuneError)
}
if c := p.simplify(info(v).category()); c != valid && c != deviation {
return &labelError{s, "V6"}
}
i += sz
}
return nil
}
const (
zwnj = "\u200c"
zwj = "\u200d"
)
type joinState int8
const (
stateStart joinState = iota
stateVirama
stateBefore
stateBeforeVirama
stateAfter
stateFAIL
)
var joinStates = [][numJoinTypes]joinState{
stateStart: {
joiningL: stateBefore,
joiningD: stateBefore,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateVirama,
},
stateVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
},
stateBefore: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
joinZWNJ: stateAfter,
joinZWJ: stateFAIL,
joinVirama: stateBeforeVirama,
},
stateBeforeVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
},
stateAfter: {
joiningL: stateFAIL,
joiningD: stateBefore,
joiningT: stateAfter,
joiningR: stateStart,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateAfter, // no-op as we can't accept joiners here
},
stateFAIL: {
0: stateFAIL,
joiningL: stateFAIL,
joiningD: stateFAIL,
joiningT: stateFAIL,
joiningR: stateFAIL,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateFAIL,
},
}
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
// already implicitly satisfied by the overall implementation.
func (p *Profile) validateLabel(s string) (err error) {
if s == "" {
if p.verifyDNSLength {
return &labelError{s, "A4"}
}
return nil
}
if p.checkHyphens {
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
return &labelError{s, "V2"}
}
if s[0] == '-' || s[len(s)-1] == '-' {
return &labelError{s, "V3"}
}
}
if !p.checkJoiners {
return nil
}
trie := p.trie // p.checkJoiners is only set if trie is set.
// TODO: merge the use of this in the trie.
v, sz := trie.lookupString(s)
x := info(v)
if x.isModifier() {
return &labelError{s, "V5"}
}
// Quickly return in the absence of zero-width (non) joiners.
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
return nil
}
st := stateStart
for i := 0; ; {
jt := x.joinType()
if s[i:i+sz] == zwj {
jt = joinZWJ
} else if s[i:i+sz] == zwnj {
jt = joinZWNJ
}
st = joinStates[st][jt]
if x.isViramaModifier() {
st = joinStates[st][joinVirama]
}
if i += sz; i == len(s) {
break
}
v, sz = trie.lookupString(s[i:])
x = info(v)
}
if st == stateFAIL || st == stateAfter {
return &labelError{s, "C"}
}
return nil
}
func ascii(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}
| net/idna/idna10.0.0.go/0 | {
"file_path": "net/idna/idna10.0.0.go",
"repo_id": "net",
"token_count": 7975
} | 633 |
// go generate gen.go
// Code generated by the command above; DO NOT EDIT.
// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).
package iana // import "golang.org/x/net/internal/iana"
// Differentiated Services Field Codepoints (DSCP), Updated: 2018-05-04
const (
DiffServCS0 = 0x00 // CS0
DiffServCS1 = 0x20 // CS1
DiffServCS2 = 0x40 // CS2
DiffServCS3 = 0x60 // CS3
DiffServCS4 = 0x80 // CS4
DiffServCS5 = 0xa0 // CS5
DiffServCS6 = 0xc0 // CS6
DiffServCS7 = 0xe0 // CS7
DiffServAF11 = 0x28 // AF11
DiffServAF12 = 0x30 // AF12
DiffServAF13 = 0x38 // AF13
DiffServAF21 = 0x48 // AF21
DiffServAF22 = 0x50 // AF22
DiffServAF23 = 0x58 // AF23
DiffServAF31 = 0x68 // AF31
DiffServAF32 = 0x70 // AF32
DiffServAF33 = 0x78 // AF33
DiffServAF41 = 0x88 // AF41
DiffServAF42 = 0x90 // AF42
DiffServAF43 = 0x98 // AF43
DiffServEF = 0xb8 // EF
DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT
NotECNTransport = 0x00 // Not-ECT (Not ECN-Capable Transport)
ECNTransport1 = 0x01 // ECT(1) (ECN-Capable Transport(1))
ECNTransport0 = 0x02 // ECT(0) (ECN-Capable Transport(0))
CongestionExperienced = 0x03 // CE (Congestion Experienced)
)
// Protocol Numbers, Updated: 2017-10-13
const (
ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number
ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option
ProtocolICMP = 1 // Internet Control Message
ProtocolIGMP = 2 // Internet Group Management
ProtocolGGP = 3 // Gateway-to-Gateway
ProtocolIPv4 = 4 // IPv4 encapsulation
ProtocolST = 5 // Stream
ProtocolTCP = 6 // Transmission Control
ProtocolCBT = 7 // CBT
ProtocolEGP = 8 // Exterior Gateway Protocol
ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
ProtocolBBNRCCMON = 10 // BBN RCC Monitoring
ProtocolNVPII = 11 // Network Voice Protocol
ProtocolPUP = 12 // PUP
ProtocolEMCON = 14 // EMCON
ProtocolXNET = 15 // Cross Net Debugger
ProtocolCHAOS = 16 // Chaos
ProtocolUDP = 17 // User Datagram
ProtocolMUX = 18 // Multiplexing
ProtocolDCNMEAS = 19 // DCN Measurement Subsystems
ProtocolHMP = 20 // Host Monitoring
ProtocolPRM = 21 // Packet Radio Measurement
ProtocolXNSIDP = 22 // XEROX NS IDP
ProtocolTRUNK1 = 23 // Trunk-1
ProtocolTRUNK2 = 24 // Trunk-2
ProtocolLEAF1 = 25 // Leaf-1
ProtocolLEAF2 = 26 // Leaf-2
ProtocolRDP = 27 // Reliable Data Protocol
ProtocolIRTP = 28 // Internet Reliable Transaction
ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4
ProtocolNETBLT = 30 // Bulk Data Transfer Protocol
ProtocolMFENSP = 31 // MFE Network Services Protocol
ProtocolMERITINP = 32 // MERIT Internodal Protocol
ProtocolDCCP = 33 // Datagram Congestion Control Protocol
Protocol3PC = 34 // Third Party Connect Protocol
ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol
ProtocolXTP = 36 // XTP
ProtocolDDP = 37 // Datagram Delivery Protocol
ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto
ProtocolTPPP = 39 // TP++ Transport Protocol
ProtocolIL = 40 // IL Transport Protocol
ProtocolIPv6 = 41 // IPv6 encapsulation
ProtocolSDRP = 42 // Source Demand Routing Protocol
ProtocolIPv6Route = 43 // Routing Header for IPv6
ProtocolIPv6Frag = 44 // Fragment Header for IPv6
ProtocolIDRP = 45 // Inter-Domain Routing Protocol
ProtocolRSVP = 46 // Reservation Protocol
ProtocolGRE = 47 // Generic Routing Encapsulation
ProtocolDSR = 48 // Dynamic Source Routing Protocol
ProtocolBNA = 49 // BNA
ProtocolESP = 50 // Encap Security Payload
ProtocolAH = 51 // Authentication Header
ProtocolINLSP = 52 // Integrated Net Layer Security TUBA
ProtocolNARP = 54 // NBMA Address Resolution Protocol
ProtocolMOBILE = 55 // IP Mobility
ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management
ProtocolSKIP = 57 // SKIP
ProtocolIPv6ICMP = 58 // ICMP for IPv6
ProtocolIPv6NoNxt = 59 // No Next Header for IPv6
ProtocolIPv6Opts = 60 // Destination Options for IPv6
ProtocolCFTP = 62 // CFTP
ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK
ProtocolKRYPTOLAN = 65 // Kryptolan
ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol
ProtocolIPPC = 67 // Internet Pluribus Packet Core
ProtocolSATMON = 69 // SATNET Monitoring
ProtocolVISA = 70 // VISA Protocol
ProtocolIPCV = 71 // Internet Packet Core Utility
ProtocolCPNX = 72 // Computer Protocol Network Executive
ProtocolCPHB = 73 // Computer Protocol Heart Beat
ProtocolWSN = 74 // Wang Span Network
ProtocolPVP = 75 // Packet Video Protocol
ProtocolBRSATMON = 76 // Backroom SATNET Monitoring
ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary
ProtocolWBMON = 78 // WIDEBAND Monitoring
ProtocolWBEXPAK = 79 // WIDEBAND EXPAK
ProtocolISOIP = 80 // ISO Internet Protocol
ProtocolVMTP = 81 // VMTP
ProtocolSECUREVMTP = 82 // SECURE-VMTP
ProtocolVINES = 83 // VINES
ProtocolTTP = 84 // Transaction Transport Protocol
ProtocolIPTM = 84 // Internet Protocol Traffic Manager
ProtocolNSFNETIGP = 85 // NSFNET-IGP
ProtocolDGP = 86 // Dissimilar Gateway Protocol
ProtocolTCF = 87 // TCF
ProtocolEIGRP = 88 // EIGRP
ProtocolOSPFIGP = 89 // OSPFIGP
ProtocolSpriteRPC = 90 // Sprite RPC Protocol
ProtocolLARP = 91 // Locus Address Resolution Protocol
ProtocolMTP = 92 // Multicast Transport Protocol
ProtocolAX25 = 93 // AX.25 Frames
ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol
ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro.
ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation
ProtocolENCAP = 98 // Encapsulation Header
ProtocolGMTP = 100 // GMTP
ProtocolIFMP = 101 // Ipsilon Flow Management Protocol
ProtocolPNNI = 102 // PNNI over IP
ProtocolPIM = 103 // Protocol Independent Multicast
ProtocolARIS = 104 // ARIS
ProtocolSCPS = 105 // SCPS
ProtocolQNX = 106 // QNX
ProtocolAN = 107 // Active Networks
ProtocolIPComp = 108 // IP Payload Compression Protocol
ProtocolSNP = 109 // Sitara Networks Protocol
ProtocolCompaqPeer = 110 // Compaq Peer Protocol
ProtocolIPXinIP = 111 // IPX in IP
ProtocolVRRP = 112 // Virtual Router Redundancy Protocol
ProtocolPGM = 113 // PGM Reliable Transport Protocol
ProtocolL2TP = 115 // Layer Two Tunneling Protocol
ProtocolDDX = 116 // D-II Data Exchange (DDX)
ProtocolIATP = 117 // Interactive Agent Transfer Protocol
ProtocolSTP = 118 // Schedule Transfer Protocol
ProtocolSRP = 119 // SpectraLink Radio Protocol
ProtocolUTI = 120 // UTI
ProtocolSMP = 121 // Simple Message Protocol
ProtocolPTP = 123 // Performance Transparency Protocol
ProtocolISIS = 124 // ISIS over IPv4
ProtocolFIRE = 125 // FIRE
ProtocolCRTP = 126 // Combat Radio Transport Protocol
ProtocolCRUDP = 127 // Combat Radio User Datagram
ProtocolSSCOPMCE = 128 // SSCOPMCE
ProtocolIPLT = 129 // IPLT
ProtocolSPS = 130 // Secure Packet Shield
ProtocolPIPE = 131 // Private IP Encapsulation within IP
ProtocolSCTP = 132 // Stream Control Transmission Protocol
ProtocolFC = 133 // Fibre Channel
ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE
ProtocolMobilityHeader = 135 // Mobility Header
ProtocolUDPLite = 136 // UDPLite
ProtocolMPLSinIP = 137 // MPLS-in-IP
ProtocolMANET = 138 // MANET Protocols
ProtocolHIP = 139 // Host Identity Protocol
ProtocolShim6 = 140 // Shim6 Protocol
ProtocolWESP = 141 // Wrapped Encapsulating Security Payload
ProtocolROHC = 142 // Robust Header Compression
ProtocolReserved = 255 // Reserved
)
// Address Family Numbers, Updated: 2018-04-02
const (
AddrFamilyIPv4 = 1 // IP (IP version 4)
AddrFamilyIPv6 = 2 // IP6 (IP version 6)
AddrFamilyNSAP = 3 // NSAP
AddrFamilyHDLC = 4 // HDLC (8-bit multidrop)
AddrFamilyBBN1822 = 5 // BBN 1822
AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format")
AddrFamilyE163 = 7 // E.163
AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM)
AddrFamilyF69 = 9 // F.69 (Telex)
AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay)
AddrFamilyIPX = 11 // IPX
AddrFamilyAppletalk = 12 // Appletalk
AddrFamilyDecnetIV = 13 // Decnet IV
AddrFamilyBanyanVines = 14 // Banyan Vines
AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress
AddrFamilyDNS = 16 // DNS (Domain Name System)
AddrFamilyDistinguishedName = 17 // Distinguished Name
AddrFamilyASNumber = 18 // AS Number
AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4
AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6
AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP
AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name
AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name
AddrFamilyGWID = 24 // GWID
AddrFamilyL2VPN = 25 // AFI for L2VPN information
AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier
AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier
AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier
AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4
AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6
AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family
AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family
AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family
AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF)
AddrFamilyBGPLS = 16388 // BGP-LS
AddrFamily48bitMAC = 16389 // 48-bit MAC
AddrFamily64bitMAC = 16390 // 64-bit MAC
AddrFamilyOUI = 16391 // OUI
AddrFamilyMACFinal24bits = 16392 // MAC/24
AddrFamilyMACFinal40bits = 16393 // MAC/40
AddrFamilyIPv6Initial64bits = 16394 // IPv6/64
AddrFamilyRBridgePortID = 16395 // RBridge Port ID
AddrFamilyTRILLNickname = 16396 // TRILL Nickname
)
| net/internal/iana/const.go/0 | {
"file_path": "net/internal/iana/const.go",
"repo_id": "net",
"token_count": 5697
} | 634 |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || windows || zos
package socket
import (
"syscall"
)
// ioComplete checks the flags and result of a syscall, to be used as return
// value in a syscall.RawConn.Read or Write callback.
func ioComplete(flags int, operr error) bool {
if operr == syscall.EAGAIN || operr == syscall.EWOULDBLOCK {
// No data available, block for I/O and try again.
return false
}
return true
}
| net/internal/socket/complete_nodontwait.go/0 | {
"file_path": "net/internal/socket/complete_nodontwait.go",
"repo_id": "net",
"token_count": 178
} | 635 |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !aix && !linux && !netbsd
package socket
import "net"
type mmsghdr struct{}
type mmsghdrs []mmsghdr
func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error {
return nil
}
func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error {
return nil
}
| net/internal/socket/mmsghdr_stub.go/0 | {
"file_path": "net/internal/socket/mmsghdr_stub.go",
"repo_id": "net",
"token_count": 194
} | 636 |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !linux
package socket
func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) {
return 0, errNotImplemented
}
func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) {
return 0, errNotImplemented
}
| net/internal/socket/rawconn_nommsg.go/0 | {
"file_path": "net/internal/socket/rawconn_nommsg.go",
"repo_id": "net",
"token_count": 127
} | 637 |
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_aix.go
// Added for go1.11 compatibility
//go:build aix
package socket
type iovec struct {
Base *byte
Len uint64
}
type msghdr struct {
Name *byte
Namelen uint32
Iov *iovec
Iovlen int32
Control *byte
Controllen uint32
Flags int32
}
type mmsghdr struct {
Hdr msghdr
Len uint32
Pad_cgo_0 [4]byte
}
type cmsghdr struct {
Len uint32
Level int32
Type int32
}
const (
sizeofIovec = 0x10
sizeofMsghdr = 0x30
)
| net/internal/socket/zsys_aix_ppc64.go/0 | {
"file_path": "net/internal/socket/zsys_aix_ppc64.go",
"repo_id": "net",
"token_count": 262
} | 638 |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipv4_test
import (
"bytes"
"fmt"
"net"
"os"
"runtime"
"testing"
"golang.org/x/net/icmp"
"golang.org/x/net/internal/iana"
"golang.org/x/net/ipv4"
"golang.org/x/net/nettest"
)
var packetConnReadWriteMulticastUDPTests = []struct {
addr string
grp, src *net.IPAddr
}{
{"224.0.0.0:0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727
{"232.0.1.0:0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
}
func TestPacketConnReadWriteMulticastUDP(t *testing.T) {
switch runtime.GOOS {
case "fuchsia", "hurd", "illumos", "js", "nacl", "plan9", "solaris", "wasip1", "windows", "zos":
t.Skipf("not supported on %s", runtime.GOOS)
}
ifi, err := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
if err != nil {
t.Skip(err)
}
for _, tt := range packetConnReadWriteMulticastUDPTests {
t.Run(fmt.Sprintf("addr=%s/grp=%s/src=%s", tt.addr, tt.grp, tt.src), func(t *testing.T) {
c, err := net.ListenPacket("udp4", tt.addr)
if err != nil {
t.Fatal(err)
}
p := ipv4.NewPacketConn(c)
defer func() {
if err := p.Close(); err != nil {
t.Error(err)
}
}()
grp := *p.LocalAddr().(*net.UDPAddr)
grp.IP = tt.grp.IP
if tt.src == nil {
if err := p.JoinGroup(ifi, &grp); err != nil {
t.Fatal(err)
}
defer func() {
if err := p.LeaveGroup(ifi, &grp); err != nil {
t.Error(err)
}
}()
} else {
if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil {
switch runtime.GOOS {
case "freebsd", "linux":
default: // platforms that don't support IGMPv2/3 fail here
t.Skipf("not supported on %s", runtime.GOOS)
}
t.Fatal(err)
}
defer func() {
if err := p.LeaveSourceSpecificGroup(ifi, &grp, tt.src); err != nil {
t.Error(err)
}
}()
}
if err := p.SetMulticastInterface(ifi); err != nil {
t.Fatal(err)
}
if _, err := p.MulticastInterface(); err != nil {
t.Fatal(err)
}
if err := p.SetMulticastLoopback(true); err != nil {
t.Fatal(err)
}
if _, err := p.MulticastLoopback(); err != nil {
t.Fatal(err)
}
cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
wb := []byte("HELLO-R-U-THERE")
for i, toggle := range []bool{true, false, true} {
if err := p.SetControlMessage(cf, toggle); err != nil {
if protocolNotSupported(err) {
t.Logf("not supported on %s", runtime.GOOS)
continue
}
t.Fatal(err)
}
if err := p.SetMulticastTTL(i + 1); err != nil {
t.Fatal(err)
}
}
if n, err := p.WriteTo(wb, nil, &grp); err != nil {
t.Fatal(err)
} else if n != len(wb) {
t.Fatalf("got %v; want %v", n, len(wb))
}
rb := make([]byte, 128)
if n, _, _, err := p.ReadFrom(rb); err != nil {
t.Fatal(err)
} else if !bytes.Equal(rb[:n], wb) {
t.Fatalf("got %v; want %v", rb[:n], wb)
}
})
}
}
var packetConnReadWriteMulticastICMPTests = []struct {
grp, src *net.IPAddr
}{
{&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727
{&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
}
func TestPacketConnReadWriteMulticastICMP(t *testing.T) {
if !nettest.SupportsRawSocket() {
t.Skipf("not supported on %s/%s", runtime.GOOS, runtime.GOARCH)
}
ifi, err := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
// Unable to obtain loopback interface on z/OS, so instead we test on any multicast
// capable interface.
if runtime.GOOS == "zos" {
ifi, err = nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast)
}
if err != nil {
t.Skip(err)
}
for _, tt := range packetConnReadWriteMulticastICMPTests {
t.Run(fmt.Sprintf("grp=%s/src=%s", tt.grp, tt.src), func(t *testing.T) {
c, err := net.ListenPacket("ip4:icmp", "0.0.0.0")
if err != nil {
t.Fatal(err)
}
p := ipv4.NewPacketConn(c)
defer func() {
if err := p.Close(); err != nil {
t.Error(err)
}
}()
if tt.src == nil {
if err := p.JoinGroup(ifi, tt.grp); err != nil {
t.Fatal(err)
}
defer func() {
if err := p.LeaveGroup(ifi, tt.grp); err != nil {
t.Error(err)
}
}()
} else {
if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {
switch runtime.GOOS {
case "freebsd", "linux":
default: // platforms that don't support IGMPv2/3 fail here
t.Skipf("not supported on %s", runtime.GOOS)
}
t.Fatal(err)
}
defer func() {
if err := p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {
t.Error(err)
}
}()
}
if err := p.SetMulticastInterface(ifi); err != nil {
t.Fatal(err)
}
if _, err := p.MulticastInterface(); err != nil {
t.Fatal(err)
}
if err := p.SetMulticastLoopback(true); err != nil {
t.Fatal(err)
}
if _, err := p.MulticastLoopback(); err != nil {
t.Fatal(err)
}
cf := ipv4.FlagDst | ipv4.FlagInterface
if runtime.GOOS != "illumos" && runtime.GOOS != "solaris" {
// Illumos and Solaris never allow modification of ICMP properties.
cf |= ipv4.FlagTTL
}
for i, toggle := range []bool{true, false, true} {
wb, err := (&icmp.Message{
Type: ipv4.ICMPTypeEcho, Code: 0,
Body: &icmp.Echo{
ID: os.Getpid() & 0xffff, Seq: i + 1,
Data: []byte("HELLO-R-U-THERE"),
},
}).Marshal(nil)
if err != nil {
t.Fatal(err)
}
if err := p.SetControlMessage(cf, toggle); err != nil {
if protocolNotSupported(err) {
t.Logf("not supported on %s", runtime.GOOS)
continue
}
t.Fatal(err)
}
if err := p.SetMulticastTTL(i + 1); err != nil {
t.Fatal(err)
}
if n, err := p.WriteTo(wb, nil, tt.grp); err != nil {
t.Fatal(err)
} else if n != len(wb) {
t.Fatalf("got %v; want %v", n, len(wb))
}
rb := make([]byte, 128)
if n, _, _, err := p.ReadFrom(rb); err != nil {
t.Fatal(err)
} else {
m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n])
if err != nil {
t.Fatal(err)
}
switch {
case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1
case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0
default:
t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)
}
}
}
})
}
}
var rawConnReadWriteMulticastICMPTests = []struct {
grp, src *net.IPAddr
}{
{&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727
{&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771
}
func TestRawConnReadWriteMulticastICMP(t *testing.T) {
if testing.Short() {
t.Skip("to avoid external network")
}
if !nettest.SupportsRawSocket() {
t.Skipf("not supported on %s/%s", runtime.GOOS, runtime.GOARCH)
}
ifi, err := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback)
if err != nil {
t.Skipf("not available on %s", runtime.GOOS)
}
for _, tt := range rawConnReadWriteMulticastICMPTests {
c, err := net.ListenPacket("ip4:icmp", "0.0.0.0")
if err != nil {
t.Fatal(err)
}
defer c.Close()
r, err := ipv4.NewRawConn(c)
if err != nil {
t.Fatal(err)
}
defer r.Close()
if tt.src == nil {
if err := r.JoinGroup(ifi, tt.grp); err != nil {
t.Fatal(err)
}
defer r.LeaveGroup(ifi, tt.grp)
} else {
if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {
switch runtime.GOOS {
case "freebsd", "linux":
default: // platforms that don't support IGMPv2/3 fail here
t.Logf("not supported on %s", runtime.GOOS)
continue
}
t.Fatal(err)
}
defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src)
}
if err := r.SetMulticastInterface(ifi); err != nil {
t.Fatal(err)
}
if _, err := r.MulticastInterface(); err != nil {
t.Fatal(err)
}
if err := r.SetMulticastLoopback(true); err != nil {
t.Fatal(err)
}
if _, err := r.MulticastLoopback(); err != nil {
t.Fatal(err)
}
cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface
for i, toggle := range []bool{true, false, true} {
wb, err := (&icmp.Message{
Type: ipv4.ICMPTypeEcho, Code: 0,
Body: &icmp.Echo{
ID: os.Getpid() & 0xffff, Seq: i + 1,
Data: []byte("HELLO-R-U-THERE"),
},
}).Marshal(nil)
if err != nil {
t.Fatal(err)
}
wh := &ipv4.Header{
Version: ipv4.Version,
Len: ipv4.HeaderLen,
TOS: i + 1,
TotalLen: ipv4.HeaderLen + len(wb),
Protocol: 1,
Dst: tt.grp.IP,
}
if err := r.SetControlMessage(cf, toggle); err != nil {
if protocolNotSupported(err) {
t.Logf("not supported on %s", runtime.GOOS)
continue
}
t.Fatal(err)
}
r.SetMulticastTTL(i + 1)
if err := r.WriteTo(wh, wb, nil); err != nil {
t.Fatal(err)
}
rb := make([]byte, ipv4.HeaderLen+128)
if rh, b, _, err := r.ReadFrom(rb); err != nil {
t.Fatal(err)
} else {
m, err := icmp.ParseMessage(iana.ProtocolICMP, b)
if err != nil {
t.Fatal(err)
}
switch {
case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1
case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0
default:
t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)
}
}
}
}
}
| net/ipv4/multicast_test.go/0 | {
"file_path": "net/ipv4/multicast_test.go",
"repo_id": "net",
"token_count": 4882
} | 639 |
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_darwin.go
package ipv4
const (
sizeofSockaddrStorage = 0x80
sizeofSockaddrInet = 0x10
sizeofInetPktinfo = 0xc
sizeofIPMreq = 0x8
sizeofIPMreqSource = 0xc
sizeofGroupReq = 0x84
sizeofGroupSourceReq = 0x104
)
type sockaddrStorage struct {
Len uint8
Family uint8
X__ss_pad1 [6]int8
X__ss_align int64
X__ss_pad2 [112]int8
}
type sockaddrInet struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]int8
}
type inetPktinfo struct {
Ifindex uint32
Spec_dst [4]byte /* in_addr */
Addr [4]byte /* in_addr */
}
type ipMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type ipMreqSource struct {
Multiaddr [4]byte /* in_addr */
Sourceaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type groupReq struct {
Interface uint32
Pad_cgo_0 [128]byte
}
type groupSourceReq struct {
Interface uint32
Pad_cgo_0 [128]byte
Pad_cgo_1 [128]byte
}
| net/ipv4/zsys_darwin.go/0 | {
"file_path": "net/ipv4/zsys_darwin.go",
"repo_id": "net",
"token_count": 480
} | 640 |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
package ipv6
import "golang.org/x/net/internal/socket"
func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {
opt.Lock()
defer opt.Unlock()
if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 {
if err := so.SetInt(c, boolint(on)); err != nil {
return err
}
if on {
opt.set(FlagTrafficClass)
} else {
opt.clear(FlagTrafficClass)
}
}
if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 {
if err := so.SetInt(c, boolint(on)); err != nil {
return err
}
if on {
opt.set(FlagHopLimit)
} else {
opt.clear(FlagHopLimit)
}
}
if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 {
if err := so.SetInt(c, boolint(on)); err != nil {
return err
}
if on {
opt.set(cf & flagPacketInfo)
} else {
opt.clear(cf & flagPacketInfo)
}
}
if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 {
if err := so.SetInt(c, boolint(on)); err != nil {
return err
}
if on {
opt.set(FlagPathMTU)
} else {
opt.clear(FlagPathMTU)
}
}
return nil
}
| net/ipv6/control_unix.go/0 | {
"file_path": "net/ipv6/control_unix.go",
"repo_id": "net",
"token_count": 575
} | 641 |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipv6_test
import (
"errors"
"net"
"reflect"
"runtime"
"testing"
"golang.org/x/net/ipv6"
"golang.org/x/net/nettest"
)
var icmpStringTests = []struct {
in ipv6.ICMPType
out string
}{
{ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"},
{256, "<nil>"},
}
func TestICMPString(t *testing.T) {
for _, tt := range icmpStringTests {
s := tt.in.String()
if s != tt.out {
t.Errorf("got %s; want %s", s, tt.out)
}
}
}
func TestICMPFilter(t *testing.T) {
switch runtime.GOOS {
case "fuchsia", "hurd", "js", "nacl", "plan9", "wasip1", "windows":
t.Skipf("not supported on %s", runtime.GOOS)
}
var f ipv6.ICMPFilter
for _, toggle := range []bool{false, true} {
f.SetAll(toggle)
for _, typ := range []ipv6.ICMPType{
ipv6.ICMPTypeDestinationUnreachable,
ipv6.ICMPTypeEchoReply,
ipv6.ICMPTypeNeighborSolicitation,
ipv6.ICMPTypeDuplicateAddressConfirmation,
} {
f.Accept(typ)
if f.WillBlock(typ) {
t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ)
}
f.Block(typ)
if !f.WillBlock(typ) {
t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ)
}
}
}
}
func TestSetICMPFilter(t *testing.T) {
if !nettest.SupportsIPv6() {
t.Skip("ipv6 is not supported")
}
if !nettest.SupportsRawSocket() {
t.Skipf("not supported on %s/%s", runtime.GOOS, runtime.GOARCH)
}
c, err := net.ListenPacket("ip6:ipv6-icmp", "::1")
if err != nil {
t.Fatal(err)
}
defer c.Close()
p := ipv6.NewPacketConn(c)
var f ipv6.ICMPFilter
f.SetAll(true)
f.Accept(ipv6.ICMPTypeEchoRequest)
f.Accept(ipv6.ICMPTypeEchoReply)
if err := p.SetICMPFilter(&f); errors.Is(err, ipv6.ErrNotImplemented) {
t.Skipf("setting ICMP filter not supported: %v", err)
} else if err != nil {
t.Fatal(err)
}
kf, err := p.ICMPFilter()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(kf, &f) {
t.Fatalf("got %#v; want %#v", kf, f)
}
}
| net/ipv6/icmp_test.go/0 | {
"file_path": "net/ipv6/icmp_test.go",
"repo_id": "net",
"token_count": 955
} | 642 |
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_aix.go
// Added for go1.11 compatibility
//go:build aix
package ipv6
const (
sizeofSockaddrStorage = 0x508
sizeofSockaddrInet6 = 0x1c
sizeofInet6Pktinfo = 0x14
sizeofIPv6Mtuinfo = 0x20
sizeofIPv6Mreq = 0x14
sizeofGroupReq = 0x510
sizeofGroupSourceReq = 0xa18
sizeofICMPv6Filter = 0x20
)
type sockaddrStorage struct {
X__ss_len uint8
Family uint8
X__ss_pad1 [6]uint8
X__ss_align int64
X__ss_pad2 [1265]uint8
Pad_cgo_0 [7]byte
}
type sockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex int32
}
type ipv6Mtuinfo struct {
Addr sockaddrInet6
Mtu uint32
}
type ipv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type icmpv6Filter struct {
Filt [8]uint32
}
type groupReq struct {
Interface uint32
Group sockaddrStorage
}
type groupSourceReq struct {
Interface uint32
Group sockaddrStorage
Source sockaddrStorage
}
| net/ipv6/zsys_aix_ppc64.go/0 | {
"file_path": "net/ipv6/zsys_aix_ppc64.go",
"repo_id": "net",
"token_count": 516
} | 643 |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build solaris
package lif
import (
"fmt"
"syscall"
"testing"
)
func (ll *Link) String() string {
return fmt.Sprintf("name=%s index=%d type=%d flags=%#x mtu=%d addr=%v", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr))
}
type linkPack struct {
af int
lls []Link
}
func linkPacks() ([]linkPack, error) {
var lastErr error
var lps []linkPack
for _, af := range [...]int{syscall.AF_UNSPEC, syscall.AF_INET, syscall.AF_INET6} {
lls, err := Links(af, "")
if err != nil {
lastErr = err
continue
}
lps = append(lps, linkPack{af: af, lls: lls})
}
return lps, lastErr
}
func TestLinks(t *testing.T) {
lps, err := linkPacks()
if len(lps) == 0 && err != nil {
t.Fatal(err)
}
for _, lp := range lps {
n := 0
for _, sll := range lp.lls {
lls, err := Links(lp.af, sll.Name)
if err != nil {
t.Fatal(lp.af, sll.Name, err)
}
for _, ll := range lls {
if ll.Name != sll.Name || ll.Index != sll.Index {
t.Errorf("af=%s got %v; want %v", addrFamily(lp.af), &ll, &sll)
continue
}
t.Logf("af=%s name=%s %v", addrFamily(lp.af), sll.Name, &ll)
n++
}
}
if n != len(lp.lls) {
t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(lp.lls))
continue
}
}
}
| net/lif/link_test.go/0 | {
"file_path": "net/lif/link_test.go",
"repo_id": "net",
"token_count": 666
} | 644 |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proxy
import (
"context"
"net"
"strings"
)
// A PerHost directs connections to a default Dialer unless the host name
// requested matches one of a number of exceptions.
type PerHost struct {
def, bypass Dialer
bypassNetworks []*net.IPNet
bypassIPs []net.IP
bypassZones []string
bypassHosts []string
}
// NewPerHost returns a PerHost Dialer that directs connections to either
// defaultDialer or bypass, depending on whether the connection matches one of
// the configured rules.
func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
return &PerHost{
def: defaultDialer,
bypass: bypass,
}
}
// Dial connects to the address addr on the given network through either
// defaultDialer or bypass.
func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
return p.dialerForRequest(host).Dial(network, addr)
}
// DialContext connects to the address addr on the given network through either
// defaultDialer or bypass.
func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
d := p.dialerForRequest(host)
if x, ok := d.(ContextDialer); ok {
return x.DialContext(ctx, network, addr)
}
return dialContext(ctx, d, network, addr)
}
func (p *PerHost) dialerForRequest(host string) Dialer {
if ip := net.ParseIP(host); ip != nil {
for _, net := range p.bypassNetworks {
if net.Contains(ip) {
return p.bypass
}
}
for _, bypassIP := range p.bypassIPs {
if bypassIP.Equal(ip) {
return p.bypass
}
}
return p.def
}
for _, zone := range p.bypassZones {
if strings.HasSuffix(host, zone) {
return p.bypass
}
if host == zone[1:] {
// For a zone ".example.com", we match "example.com"
// too.
return p.bypass
}
}
for _, bypassHost := range p.bypassHosts {
if bypassHost == host {
return p.bypass
}
}
return p.def
}
// AddFromString parses a string that contains comma-separated values
// specifying hosts that should use the bypass proxy. Each value is either an
// IP address, a CIDR range, a zone (*.example.com) or a host name
// (localhost). A best effort is made to parse the string and errors are
// ignored.
func (p *PerHost) AddFromString(s string) {
hosts := strings.Split(s, ",")
for _, host := range hosts {
host = strings.TrimSpace(host)
if len(host) == 0 {
continue
}
if strings.Contains(host, "/") {
// We assume that it's a CIDR address like 127.0.0.0/8
if _, net, err := net.ParseCIDR(host); err == nil {
p.AddNetwork(net)
}
continue
}
if ip := net.ParseIP(host); ip != nil {
p.AddIP(ip)
continue
}
if strings.HasPrefix(host, "*.") {
p.AddZone(host[1:])
continue
}
p.AddHost(host)
}
}
// AddIP specifies an IP address that will use the bypass proxy. Note that
// this will only take effect if a literal IP address is dialed. A connection
// to a named host will never match an IP.
func (p *PerHost) AddIP(ip net.IP) {
p.bypassIPs = append(p.bypassIPs, ip)
}
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
// this will only take effect if a literal IP address is dialed. A connection
// to a named host will never match.
func (p *PerHost) AddNetwork(net *net.IPNet) {
p.bypassNetworks = append(p.bypassNetworks, net)
}
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
// "example.com" matches "example.com" and all of its subdomains.
func (p *PerHost) AddZone(zone string) {
zone = strings.TrimSuffix(zone, ".")
if !strings.HasPrefix(zone, ".") {
zone = "." + zone
}
p.bypassZones = append(p.bypassZones, zone)
}
// AddHost specifies a host name that will use the bypass proxy.
func (p *PerHost) AddHost(host string) {
host = strings.TrimSuffix(host, ".")
p.bypassHosts = append(p.bypassHosts, host)
}
| net/proxy/per_host.go/0 | {
"file_path": "net/proxy/per_host.go",
"repo_id": "net",
"token_count": 1484
} | 645 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package quic
import (
"time"
)
// ackState tracks packets received from a peer within a number space.
// It handles packet deduplication (don't process the same packet twice) and
// determines the timing and content of ACK frames.
type ackState struct {
seen rangeset[packetNumber]
// The time at which we must send an ACK frame, even if we have no other data to send.
nextAck time.Time
// The time we received the largest-numbered packet in seen.
maxRecvTime time.Time
// The largest-numbered ack-eliciting packet in seen.
maxAckEliciting packetNumber
// The number of ack-eliciting packets in seen that we have not yet acknowledged.
unackedAckEliciting int
}
// shouldProcess reports whether a packet should be handled or discarded.
func (acks *ackState) shouldProcess(num packetNumber) bool {
if packetNumber(acks.seen.min()) > num {
// We've discarded the state for this range of packet numbers.
// Discard the packet rather than potentially processing a duplicate.
// https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.3-5
return false
}
if acks.seen.contains(num) {
// Discard duplicate packets.
return false
}
return true
}
// receive records receipt of a packet.
func (acks *ackState) receive(now time.Time, space numberSpace, num packetNumber, ackEliciting bool) {
if ackEliciting {
acks.unackedAckEliciting++
if acks.mustAckImmediately(space, num) {
acks.nextAck = now
} else if acks.nextAck.IsZero() {
// This packet does not need to be acknowledged immediately,
// but the ack must not be intentionally delayed by more than
// the max_ack_delay transport parameter we sent to the peer.
//
// We always delay acks by the maximum allowed, less the timer
// granularity. ("[max_ack_delay] SHOULD include the receiver's
// expected delays in alarms firing.")
//
// https://www.rfc-editor.org/rfc/rfc9000#section-18.2-4.28.1
acks.nextAck = now.Add(maxAckDelay - timerGranularity)
}
if num > acks.maxAckEliciting {
acks.maxAckEliciting = num
}
}
acks.seen.add(num, num+1)
if num == acks.seen.max() {
acks.maxRecvTime = now
}
// Limit the total number of ACK ranges by dropping older ranges.
//
// Remembering more ranges results in larger ACK frames.
//
// Remembering a large number of ranges could result in ACK frames becoming
// too large to fit in a packet, in which case we will silently drop older
// ranges during packet construction.
//
// Remembering fewer ranges can result in unnecessary retransmissions,
// since we cannot accept packets older than the oldest remembered range.
//
// The limit here is completely arbitrary. If it seems wrong, it probably is.
//
// https://www.rfc-editor.org/rfc/rfc9000#section-13.2.3
const maxAckRanges = 8
if overflow := acks.seen.numRanges() - maxAckRanges; overflow > 0 {
acks.seen.removeranges(0, overflow)
}
}
// mustAckImmediately reports whether an ack-eliciting packet must be acknowledged immediately,
// or whether the ack may be deferred.
func (acks *ackState) mustAckImmediately(space numberSpace, num packetNumber) bool {
// https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1
if space != appDataSpace {
// "[...] all ack-eliciting Initial and Handshake packets [...]"
// https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1-2
return true
}
if num < acks.maxAckEliciting {
// "[...] when the received packet has a packet number less than another
// ack-eliciting packet that has been received [...]"
// https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1-8.1
return true
}
if acks.seen.rangeContaining(acks.maxAckEliciting).end != num {
// "[...] when the packet has a packet number larger than the highest-numbered
// ack-eliciting packet that has been received and there are missing packets
// between that packet and this packet."
// https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1-8.2
//
// This case is a bit tricky. Let's say we've received:
// 0, ack-eliciting
// 1, ack-eliciting
// 3, NOT ack eliciting
//
// We have sent ACKs for 0 and 1. If we receive ack-eliciting packet 2,
// we do not need to send an immediate ACK, because there are no missing
// packets between it and the highest-numbered ack-eliciting packet (1).
// If we receive ack-eliciting packet 4, we do need to send an immediate ACK,
// because there's a gap (the missing packet 2).
//
// We check for this by looking up the ACK range which contains the
// highest-numbered ack-eliciting packet: [0, 1) in the above example.
// If the range ends just before the packet we are now processing,
// there are no gaps. If it does not, there must be a gap.
return true
}
// "[...] SHOULD send an ACK frame after receiving at least two ack-eliciting packets."
// https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.2
//
// This ack frequency takes a substantial toll on performance, however.
// Follow the behavior of Google QUICHE:
// Ack every other packet for the first 100 packets, and then ack every 10th packet.
// This keeps ack frequency high during the beginning of slow start when CWND is
// increasing rapidly.
packetsBeforeAck := 2
if acks.seen.max() > 100 {
packetsBeforeAck = 10
}
return acks.unackedAckEliciting >= packetsBeforeAck
}
// shouldSendAck reports whether the connection should send an ACK frame at this time,
// in an ACK-only packet if necessary.
func (acks *ackState) shouldSendAck(now time.Time) bool {
return !acks.nextAck.IsZero() && !acks.nextAck.After(now)
}
// acksToSend returns the set of packet numbers to ACK at this time, and the current ack delay.
// It may return acks even if shouldSendAck returns false, when there are unacked
// ack-eliciting packets whose ack is being delayed.
func (acks *ackState) acksToSend(now time.Time) (nums rangeset[packetNumber], ackDelay time.Duration) {
if acks.nextAck.IsZero() && acks.unackedAckEliciting == 0 {
return nil, 0
}
// "[...] the delays intentionally introduced between the time the packet with the
// largest packet number is received and the time an acknowledgement is sent."
// https://www.rfc-editor.org/rfc/rfc9000#section-13.2.5-1
delay := now.Sub(acks.maxRecvTime)
if delay < 0 {
delay = 0
}
return acks.seen, delay
}
// sentAck records that an ACK frame has been sent.
func (acks *ackState) sentAck() {
acks.nextAck = time.Time{}
acks.unackedAckEliciting = 0
}
// handleAck records that an ack has been received for a ACK frame we sent
// containing the given Largest Acknowledged field.
func (acks *ackState) handleAck(largestAcked packetNumber) {
// We can stop acking packets less or equal to largestAcked.
// https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.4-1
//
// We rely on acks.seen containing the largest packet number that has been successfully
// processed, so we retain the range containing largestAcked and discard previous ones.
acks.seen.sub(0, acks.seen.rangeContaining(largestAcked).start)
}
// largestSeen reports the largest seen packet.
func (acks *ackState) largestSeen() packetNumber {
return acks.seen.max()
}
| net/quic/acks.go/0 | {
"file_path": "net/quic/acks.go",
"repo_id": "net",
"token_count": 2406
} | 646 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package quic
import "fmt"
// handleAckOrLoss deals with the final fate of a packet we sent:
// Either the peer acknowledges it, or we declare it lost.
//
// In order to handle packet loss, we must retain any information sent to the peer
// until the peer has acknowledged it.
//
// When information is acknowledged, we can discard it.
//
// When information is lost, we mark it for retransmission.
// See RFC 9000, Section 13.3 for a complete list of information which is retransmitted on loss.
// https://www.rfc-editor.org/rfc/rfc9000#section-13.3
func (c *Conn) handleAckOrLoss(space numberSpace, sent *sentPacket, fate packetFate) {
if fate == packetLost && c.logEnabled(QLogLevelPacket) {
c.logPacketLost(space, sent)
}
// The list of frames in a sent packet is marshaled into a buffer in the sentPacket
// by the packetWriter. Unmarshal that buffer here. This code must be kept in sync with
// packetWriter.append*.
//
// A sent packet meets its fate (acked or lost) only once, so it's okay to consume
// the sentPacket's buffer here.
for !sent.done() {
switch f := sent.next(); f {
default:
panic(fmt.Sprintf("BUG: unhandled acked/lost frame type %x", f))
case frameTypeAck:
// Unlike most information, loss of an ACK frame does not trigger
// retransmission. ACKs are sent in response to ack-eliciting packets,
// and always contain the latest information available.
//
// Acknowledgement of an ACK frame may allow us to discard information
// about older packets.
largest := packetNumber(sent.nextInt())
if fate == packetAcked {
c.acks[space].handleAck(largest)
}
case frameTypeCrypto:
start, end := sent.nextRange()
c.crypto[space].ackOrLoss(start, end, fate)
case frameTypeMaxData:
c.ackOrLossMaxData(sent.num, fate)
case frameTypeResetStream,
frameTypeStopSending,
frameTypeMaxStreamData,
frameTypeStreamDataBlocked:
id := streamID(sent.nextInt())
s := c.streamForID(id)
if s == nil {
continue
}
s.ackOrLoss(sent.num, f, fate)
case frameTypeStreamBase,
frameTypeStreamBase | streamFinBit:
id := streamID(sent.nextInt())
start, end := sent.nextRange()
s := c.streamForID(id)
if s == nil {
continue
}
fin := f&streamFinBit != 0
s.ackOrLossData(sent.num, start, end, fin, fate)
case frameTypeMaxStreamsBidi:
c.streams.remoteLimit[bidiStream].sendMax.ackLatestOrLoss(sent.num, fate)
case frameTypeMaxStreamsUni:
c.streams.remoteLimit[uniStream].sendMax.ackLatestOrLoss(sent.num, fate)
case frameTypeNewConnectionID:
seq := int64(sent.nextInt())
c.connIDState.ackOrLossNewConnectionID(sent.num, seq, fate)
case frameTypeRetireConnectionID:
seq := int64(sent.nextInt())
c.connIDState.ackOrLossRetireConnectionID(sent.num, seq, fate)
case frameTypeHandshakeDone:
c.handshakeConfirmed.ackOrLoss(sent.num, fate)
}
}
}
| net/quic/conn_loss.go/0 | {
"file_path": "net/quic/conn_loss.go",
"repo_id": "net",
"token_count": 1093
} | 647 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package quic
import (
"fmt"
"log/slog"
"strconv"
"time"
)
// A debugFrame is a representation of the contents of a QUIC frame,
// used for debug logs and testing but not the primary serving path.
type debugFrame interface {
String() string
write(w *packetWriter) bool
LogValue() slog.Value
}
func parseDebugFrame(b []byte) (f debugFrame, n int) {
if len(b) == 0 {
return nil, -1
}
switch b[0] {
case frameTypePadding:
f, n = parseDebugFramePadding(b)
case frameTypePing:
f, n = parseDebugFramePing(b)
case frameTypeAck, frameTypeAckECN:
f, n = parseDebugFrameAck(b)
case frameTypeResetStream:
f, n = parseDebugFrameResetStream(b)
case frameTypeStopSending:
f, n = parseDebugFrameStopSending(b)
case frameTypeCrypto:
f, n = parseDebugFrameCrypto(b)
case frameTypeNewToken:
f, n = parseDebugFrameNewToken(b)
case frameTypeStreamBase, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f:
f, n = parseDebugFrameStream(b)
case frameTypeMaxData:
f, n = parseDebugFrameMaxData(b)
case frameTypeMaxStreamData:
f, n = parseDebugFrameMaxStreamData(b)
case frameTypeMaxStreamsBidi, frameTypeMaxStreamsUni:
f, n = parseDebugFrameMaxStreams(b)
case frameTypeDataBlocked:
f, n = parseDebugFrameDataBlocked(b)
case frameTypeStreamDataBlocked:
f, n = parseDebugFrameStreamDataBlocked(b)
case frameTypeStreamsBlockedBidi, frameTypeStreamsBlockedUni:
f, n = parseDebugFrameStreamsBlocked(b)
case frameTypeNewConnectionID:
f, n = parseDebugFrameNewConnectionID(b)
case frameTypeRetireConnectionID:
f, n = parseDebugFrameRetireConnectionID(b)
case frameTypePathChallenge:
f, n = parseDebugFramePathChallenge(b)
case frameTypePathResponse:
f, n = parseDebugFramePathResponse(b)
case frameTypeConnectionCloseTransport:
f, n = parseDebugFrameConnectionCloseTransport(b)
case frameTypeConnectionCloseApplication:
f, n = parseDebugFrameConnectionCloseApplication(b)
case frameTypeHandshakeDone:
f, n = parseDebugFrameHandshakeDone(b)
default:
return nil, -1
}
return f, n
}
// debugFramePadding is a sequence of PADDING frames.
type debugFramePadding struct {
size int
to int // alternate for writing packets: pad to
}
func parseDebugFramePadding(b []byte) (f debugFramePadding, n int) {
for n < len(b) && b[n] == frameTypePadding {
n++
}
f.size = n
return f, n
}
func (f debugFramePadding) String() string {
return fmt.Sprintf("PADDING*%v", f.size)
}
func (f debugFramePadding) write(w *packetWriter) bool {
if w.avail() == 0 {
return false
}
if f.to > 0 {
w.appendPaddingTo(f.to)
return true
}
for i := 0; i < f.size && w.avail() > 0; i++ {
w.b = append(w.b, frameTypePadding)
}
return true
}
func (f debugFramePadding) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "padding"),
slog.Int("length", f.size),
)
}
// debugFramePing is a PING frame.
type debugFramePing struct{}
func parseDebugFramePing(b []byte) (f debugFramePing, n int) {
return f, 1
}
func (f debugFramePing) String() string {
return "PING"
}
func (f debugFramePing) write(w *packetWriter) bool {
return w.appendPingFrame()
}
func (f debugFramePing) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "ping"),
)
}
// debugFrameAck is an ACK frame.
type debugFrameAck struct {
ackDelay unscaledAckDelay
ranges []i64range[packetNumber]
}
func parseDebugFrameAck(b []byte) (f debugFrameAck, n int) {
f.ranges = nil
_, f.ackDelay, n = consumeAckFrame(b, func(_ int, start, end packetNumber) {
f.ranges = append(f.ranges, i64range[packetNumber]{
start: start,
end: end,
})
})
// Ranges are parsed high to low; reverse ranges slice to order them low to high.
for i := 0; i < len(f.ranges)/2; i++ {
j := len(f.ranges) - 1
f.ranges[i], f.ranges[j] = f.ranges[j], f.ranges[i]
}
return f, n
}
func (f debugFrameAck) String() string {
s := fmt.Sprintf("ACK Delay=%v", f.ackDelay)
for _, r := range f.ranges {
s += fmt.Sprintf(" [%v,%v)", r.start, r.end)
}
return s
}
func (f debugFrameAck) write(w *packetWriter) bool {
return w.appendAckFrame(rangeset[packetNumber](f.ranges), f.ackDelay)
}
func (f debugFrameAck) LogValue() slog.Value {
return slog.StringValue("error: debugFrameAck should not appear as a slog Value")
}
// debugFrameScaledAck is an ACK frame with scaled ACK Delay.
//
// This type is used in qlog events, which need access to the delay as a duration.
type debugFrameScaledAck struct {
ackDelay time.Duration
ranges []i64range[packetNumber]
}
func (f debugFrameScaledAck) LogValue() slog.Value {
var ackDelay slog.Attr
if f.ackDelay >= 0 {
ackDelay = slog.Duration("ack_delay", f.ackDelay)
}
return slog.GroupValue(
slog.String("frame_type", "ack"),
// Rather than trying to convert the ack ranges into the slog data model,
// pass a value that can JSON-encode itself.
slog.Any("acked_ranges", debugAckRanges(f.ranges)),
ackDelay,
)
}
type debugAckRanges []i64range[packetNumber]
// AppendJSON appends a JSON encoding of the ack ranges to b, and returns it.
// This is different than the standard json.Marshaler, but more efficient.
// Since we only use this in cooperation with the qlog package,
// encoding/json compatibility is irrelevant.
func (r debugAckRanges) AppendJSON(b []byte) []byte {
b = append(b, '[')
for i, ar := range r {
start, end := ar.start, ar.end-1 // qlog ranges are closed-closed
if i != 0 {
b = append(b, ',')
}
b = append(b, '[')
b = strconv.AppendInt(b, int64(start), 10)
if start != end {
b = append(b, ',')
b = strconv.AppendInt(b, int64(end), 10)
}
b = append(b, ']')
}
b = append(b, ']')
return b
}
func (r debugAckRanges) String() string {
return string(r.AppendJSON(nil))
}
// debugFrameResetStream is a RESET_STREAM frame.
type debugFrameResetStream struct {
id streamID
code uint64
finalSize int64
}
func parseDebugFrameResetStream(b []byte) (f debugFrameResetStream, n int) {
f.id, f.code, f.finalSize, n = consumeResetStreamFrame(b)
return f, n
}
func (f debugFrameResetStream) String() string {
return fmt.Sprintf("RESET_STREAM ID=%v Code=%v FinalSize=%v", f.id, f.code, f.finalSize)
}
func (f debugFrameResetStream) write(w *packetWriter) bool {
return w.appendResetStreamFrame(f.id, f.code, f.finalSize)
}
func (f debugFrameResetStream) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "reset_stream"),
slog.Uint64("stream_id", uint64(f.id)),
slog.Uint64("final_size", uint64(f.finalSize)),
)
}
// debugFrameStopSending is a STOP_SENDING frame.
type debugFrameStopSending struct {
id streamID
code uint64
}
func parseDebugFrameStopSending(b []byte) (f debugFrameStopSending, n int) {
f.id, f.code, n = consumeStopSendingFrame(b)
return f, n
}
func (f debugFrameStopSending) String() string {
return fmt.Sprintf("STOP_SENDING ID=%v Code=%v", f.id, f.code)
}
func (f debugFrameStopSending) write(w *packetWriter) bool {
return w.appendStopSendingFrame(f.id, f.code)
}
func (f debugFrameStopSending) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "stop_sending"),
slog.Uint64("stream_id", uint64(f.id)),
slog.Uint64("error_code", uint64(f.code)),
)
}
// debugFrameCrypto is a CRYPTO frame.
type debugFrameCrypto struct {
off int64
data []byte
}
func parseDebugFrameCrypto(b []byte) (f debugFrameCrypto, n int) {
f.off, f.data, n = consumeCryptoFrame(b)
return f, n
}
func (f debugFrameCrypto) String() string {
return fmt.Sprintf("CRYPTO Offset=%v Length=%v", f.off, len(f.data))
}
func (f debugFrameCrypto) write(w *packetWriter) bool {
b, added := w.appendCryptoFrame(f.off, len(f.data))
copy(b, f.data)
return added
}
func (f debugFrameCrypto) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "crypto"),
slog.Int64("offset", f.off),
slog.Int("length", len(f.data)),
)
}
// debugFrameNewToken is a NEW_TOKEN frame.
type debugFrameNewToken struct {
token []byte
}
func parseDebugFrameNewToken(b []byte) (f debugFrameNewToken, n int) {
f.token, n = consumeNewTokenFrame(b)
return f, n
}
func (f debugFrameNewToken) String() string {
return fmt.Sprintf("NEW_TOKEN Token=%x", f.token)
}
func (f debugFrameNewToken) write(w *packetWriter) bool {
return w.appendNewTokenFrame(f.token)
}
func (f debugFrameNewToken) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "new_token"),
slogHexstring("token", f.token),
)
}
// debugFrameStream is a STREAM frame.
type debugFrameStream struct {
id streamID
fin bool
off int64
data []byte
}
func parseDebugFrameStream(b []byte) (f debugFrameStream, n int) {
f.id, f.off, f.fin, f.data, n = consumeStreamFrame(b)
return f, n
}
func (f debugFrameStream) String() string {
fin := ""
if f.fin {
fin = " FIN"
}
return fmt.Sprintf("STREAM ID=%v%v Offset=%v Length=%v", f.id, fin, f.off, len(f.data))
}
func (f debugFrameStream) write(w *packetWriter) bool {
b, added := w.appendStreamFrame(f.id, f.off, len(f.data), f.fin)
copy(b, f.data)
return added
}
func (f debugFrameStream) LogValue() slog.Value {
var fin slog.Attr
if f.fin {
fin = slog.Bool("fin", true)
}
return slog.GroupValue(
slog.String("frame_type", "stream"),
slog.Uint64("stream_id", uint64(f.id)),
slog.Int64("offset", f.off),
slog.Int("length", len(f.data)),
fin,
)
}
// debugFrameMaxData is a MAX_DATA frame.
type debugFrameMaxData struct {
max int64
}
func parseDebugFrameMaxData(b []byte) (f debugFrameMaxData, n int) {
f.max, n = consumeMaxDataFrame(b)
return f, n
}
func (f debugFrameMaxData) String() string {
return fmt.Sprintf("MAX_DATA Max=%v", f.max)
}
func (f debugFrameMaxData) write(w *packetWriter) bool {
return w.appendMaxDataFrame(f.max)
}
func (f debugFrameMaxData) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "max_data"),
slog.Int64("maximum", f.max),
)
}
// debugFrameMaxStreamData is a MAX_STREAM_DATA frame.
type debugFrameMaxStreamData struct {
id streamID
max int64
}
func parseDebugFrameMaxStreamData(b []byte) (f debugFrameMaxStreamData, n int) {
f.id, f.max, n = consumeMaxStreamDataFrame(b)
return f, n
}
func (f debugFrameMaxStreamData) String() string {
return fmt.Sprintf("MAX_STREAM_DATA ID=%v Max=%v", f.id, f.max)
}
func (f debugFrameMaxStreamData) write(w *packetWriter) bool {
return w.appendMaxStreamDataFrame(f.id, f.max)
}
func (f debugFrameMaxStreamData) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "max_stream_data"),
slog.Uint64("stream_id", uint64(f.id)),
slog.Int64("maximum", f.max),
)
}
// debugFrameMaxStreams is a MAX_STREAMS frame.
type debugFrameMaxStreams struct {
streamType streamType
max int64
}
func parseDebugFrameMaxStreams(b []byte) (f debugFrameMaxStreams, n int) {
f.streamType, f.max, n = consumeMaxStreamsFrame(b)
return f, n
}
func (f debugFrameMaxStreams) String() string {
return fmt.Sprintf("MAX_STREAMS Type=%v Max=%v", f.streamType, f.max)
}
func (f debugFrameMaxStreams) write(w *packetWriter) bool {
return w.appendMaxStreamsFrame(f.streamType, f.max)
}
func (f debugFrameMaxStreams) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "max_streams"),
slog.String("stream_type", f.streamType.qlogString()),
slog.Int64("maximum", f.max),
)
}
// debugFrameDataBlocked is a DATA_BLOCKED frame.
type debugFrameDataBlocked struct {
max int64
}
func parseDebugFrameDataBlocked(b []byte) (f debugFrameDataBlocked, n int) {
f.max, n = consumeDataBlockedFrame(b)
return f, n
}
func (f debugFrameDataBlocked) String() string {
return fmt.Sprintf("DATA_BLOCKED Max=%v", f.max)
}
func (f debugFrameDataBlocked) write(w *packetWriter) bool {
return w.appendDataBlockedFrame(f.max)
}
func (f debugFrameDataBlocked) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "data_blocked"),
slog.Int64("limit", f.max),
)
}
// debugFrameStreamDataBlocked is a STREAM_DATA_BLOCKED frame.
type debugFrameStreamDataBlocked struct {
id streamID
max int64
}
func parseDebugFrameStreamDataBlocked(b []byte) (f debugFrameStreamDataBlocked, n int) {
f.id, f.max, n = consumeStreamDataBlockedFrame(b)
return f, n
}
func (f debugFrameStreamDataBlocked) String() string {
return fmt.Sprintf("STREAM_DATA_BLOCKED ID=%v Max=%v", f.id, f.max)
}
func (f debugFrameStreamDataBlocked) write(w *packetWriter) bool {
return w.appendStreamDataBlockedFrame(f.id, f.max)
}
func (f debugFrameStreamDataBlocked) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "stream_data_blocked"),
slog.Uint64("stream_id", uint64(f.id)),
slog.Int64("limit", f.max),
)
}
// debugFrameStreamsBlocked is a STREAMS_BLOCKED frame.
type debugFrameStreamsBlocked struct {
streamType streamType
max int64
}
func parseDebugFrameStreamsBlocked(b []byte) (f debugFrameStreamsBlocked, n int) {
f.streamType, f.max, n = consumeStreamsBlockedFrame(b)
return f, n
}
func (f debugFrameStreamsBlocked) String() string {
return fmt.Sprintf("STREAMS_BLOCKED Type=%v Max=%v", f.streamType, f.max)
}
func (f debugFrameStreamsBlocked) write(w *packetWriter) bool {
return w.appendStreamsBlockedFrame(f.streamType, f.max)
}
func (f debugFrameStreamsBlocked) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "streams_blocked"),
slog.String("stream_type", f.streamType.qlogString()),
slog.Int64("limit", f.max),
)
}
// debugFrameNewConnectionID is a NEW_CONNECTION_ID frame.
type debugFrameNewConnectionID struct {
seq int64
retirePriorTo int64
connID []byte
token statelessResetToken
}
func parseDebugFrameNewConnectionID(b []byte) (f debugFrameNewConnectionID, n int) {
f.seq, f.retirePriorTo, f.connID, f.token, n = consumeNewConnectionIDFrame(b)
return f, n
}
func (f debugFrameNewConnectionID) String() string {
return fmt.Sprintf("NEW_CONNECTION_ID Seq=%v Retire=%v ID=%x Token=%x", f.seq, f.retirePriorTo, f.connID, f.token[:])
}
func (f debugFrameNewConnectionID) write(w *packetWriter) bool {
return w.appendNewConnectionIDFrame(f.seq, f.retirePriorTo, f.connID, f.token)
}
func (f debugFrameNewConnectionID) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "new_connection_id"),
slog.Int64("sequence_number", f.seq),
slog.Int64("retire_prior_to", f.retirePriorTo),
slogHexstring("connection_id", f.connID),
slogHexstring("stateless_reset_token", f.token[:]),
)
}
// debugFrameRetireConnectionID is a NEW_CONNECTION_ID frame.
type debugFrameRetireConnectionID struct {
seq int64
}
func parseDebugFrameRetireConnectionID(b []byte) (f debugFrameRetireConnectionID, n int) {
f.seq, n = consumeRetireConnectionIDFrame(b)
return f, n
}
func (f debugFrameRetireConnectionID) String() string {
return fmt.Sprintf("RETIRE_CONNECTION_ID Seq=%v", f.seq)
}
func (f debugFrameRetireConnectionID) write(w *packetWriter) bool {
return w.appendRetireConnectionIDFrame(f.seq)
}
func (f debugFrameRetireConnectionID) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "retire_connection_id"),
slog.Int64("sequence_number", f.seq),
)
}
// debugFramePathChallenge is a PATH_CHALLENGE frame.
type debugFramePathChallenge struct {
data pathChallengeData
}
func parseDebugFramePathChallenge(b []byte) (f debugFramePathChallenge, n int) {
f.data, n = consumePathChallengeFrame(b)
return f, n
}
func (f debugFramePathChallenge) String() string {
return fmt.Sprintf("PATH_CHALLENGE Data=%x", f.data)
}
func (f debugFramePathChallenge) write(w *packetWriter) bool {
return w.appendPathChallengeFrame(f.data)
}
func (f debugFramePathChallenge) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "path_challenge"),
slog.String("data", fmt.Sprintf("%x", f.data)),
)
}
// debugFramePathResponse is a PATH_RESPONSE frame.
type debugFramePathResponse struct {
data pathChallengeData
}
func parseDebugFramePathResponse(b []byte) (f debugFramePathResponse, n int) {
f.data, n = consumePathResponseFrame(b)
return f, n
}
func (f debugFramePathResponse) String() string {
return fmt.Sprintf("PATH_RESPONSE Data=%x", f.data)
}
func (f debugFramePathResponse) write(w *packetWriter) bool {
return w.appendPathResponseFrame(f.data)
}
func (f debugFramePathResponse) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "path_response"),
slog.String("data", fmt.Sprintf("%x", f.data)),
)
}
// debugFrameConnectionCloseTransport is a CONNECTION_CLOSE frame carrying a transport error.
type debugFrameConnectionCloseTransport struct {
code transportError
frameType uint64
reason string
}
func parseDebugFrameConnectionCloseTransport(b []byte) (f debugFrameConnectionCloseTransport, n int) {
f.code, f.frameType, f.reason, n = consumeConnectionCloseTransportFrame(b)
return f, n
}
func (f debugFrameConnectionCloseTransport) String() string {
s := fmt.Sprintf("CONNECTION_CLOSE Code=%v", f.code)
if f.frameType != 0 {
s += fmt.Sprintf(" FrameType=%v", f.frameType)
}
if f.reason != "" {
s += fmt.Sprintf(" Reason=%q", f.reason)
}
return s
}
func (f debugFrameConnectionCloseTransport) write(w *packetWriter) bool {
return w.appendConnectionCloseTransportFrame(f.code, f.frameType, f.reason)
}
func (f debugFrameConnectionCloseTransport) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "connection_close"),
slog.String("error_space", "transport"),
slog.Uint64("error_code_value", uint64(f.code)),
slog.String("reason", f.reason),
)
}
// debugFrameConnectionCloseApplication is a CONNECTION_CLOSE frame carrying an application error.
type debugFrameConnectionCloseApplication struct {
code uint64
reason string
}
func parseDebugFrameConnectionCloseApplication(b []byte) (f debugFrameConnectionCloseApplication, n int) {
f.code, f.reason, n = consumeConnectionCloseApplicationFrame(b)
return f, n
}
func (f debugFrameConnectionCloseApplication) String() string {
s := fmt.Sprintf("CONNECTION_CLOSE AppCode=%v", f.code)
if f.reason != "" {
s += fmt.Sprintf(" Reason=%q", f.reason)
}
return s
}
func (f debugFrameConnectionCloseApplication) write(w *packetWriter) bool {
return w.appendConnectionCloseApplicationFrame(f.code, f.reason)
}
func (f debugFrameConnectionCloseApplication) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "connection_close"),
slog.String("error_space", "application"),
slog.Uint64("error_code_value", uint64(f.code)),
slog.String("reason", f.reason),
)
}
// debugFrameHandshakeDone is a HANDSHAKE_DONE frame.
type debugFrameHandshakeDone struct{}
func parseDebugFrameHandshakeDone(b []byte) (f debugFrameHandshakeDone, n int) {
return f, 1
}
func (f debugFrameHandshakeDone) String() string {
return "HANDSHAKE_DONE"
}
func (f debugFrameHandshakeDone) write(w *packetWriter) bool {
return w.appendHandshakeDoneFrame()
}
func (f debugFrameHandshakeDone) LogValue() slog.Value {
return slog.GroupValue(
slog.String("frame_type", "handshake_done"),
)
}
| net/quic/frame_debug.go/0 | {
"file_path": "net/quic/frame_debug.go",
"repo_id": "net",
"token_count": 7237
} | 648 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package quic
// A packetNumber is a QUIC packet number.
// Packet numbers are integers in the range [0, 2^62-1].
//
// https://www.rfc-editor.org/rfc/rfc9000.html#section-12.3
type packetNumber int64
const maxPacketNumber = 1<<62 - 1 // https://www.rfc-editor.org/rfc/rfc9000.html#section-17.1-1
// decodePacketNumber decodes a truncated packet number, given
// the largest acknowledged packet number in this number space,
// the truncated number received in a packet, and the size of the
// number received in bytes.
//
// https://www.rfc-editor.org/rfc/rfc9000.html#section-17.1
// https://www.rfc-editor.org/rfc/rfc9000.html#section-a.3
func decodePacketNumber(largest, truncated packetNumber, numLenInBytes int) packetNumber {
expected := largest + 1
win := packetNumber(1) << (uint(numLenInBytes) * 8)
hwin := win / 2
mask := win - 1
candidate := (expected &^ mask) | truncated
if candidate <= expected-hwin && candidate < (1<<62)-win {
return candidate + win
}
if candidate > expected+hwin && candidate >= win {
return candidate - win
}
return candidate
}
// appendPacketNumber appends an encoded packet number to b.
// The packet number must be larger than the largest acknowledged packet number.
// When no packets have been acknowledged yet, largestAck is -1.
//
// https://www.rfc-editor.org/rfc/rfc9000.html#section-17.1-5
func appendPacketNumber(b []byte, pnum, largestAck packetNumber) []byte {
switch packetNumberLength(pnum, largestAck) {
case 1:
return append(b, byte(pnum))
case 2:
return append(b, byte(pnum>>8), byte(pnum))
case 3:
return append(b, byte(pnum>>16), byte(pnum>>8), byte(pnum))
default:
return append(b, byte(pnum>>24), byte(pnum>>16), byte(pnum>>8), byte(pnum))
}
}
// packetNumberLength returns the minimum length, in bytes, needed to encode
// a packet number given the largest acknowledged packet number.
// The packet number must be larger than the largest acknowledged packet number.
//
// https://www.rfc-editor.org/rfc/rfc9000.html#section-17.1-5
func packetNumberLength(pnum, largestAck packetNumber) int {
d := pnum - largestAck
switch {
case d < 0x80:
return 1
case d < 0x8000:
return 2
case d < 0x800000:
return 3
default:
return 4
}
}
| net/quic/packet_number.go/0 | {
"file_path": "net/quic/packet_number.go",
"repo_id": "net",
"token_count": 814
} | 649 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package qlog
import (
"bytes"
"errors"
"fmt"
"log/slog"
"strings"
"sync"
"testing"
"time"
)
type testJSONOut struct {
bytes.Buffer
}
func (o *testJSONOut) Close() error { return nil }
func newTestJSONWriter() *jsonWriter {
return &jsonWriter{w: &testJSONOut{}}
}
func wantJSONRecord(t *testing.T, w *jsonWriter, want string) {
t.Helper()
want = "\x1e" + want + "\n"
got := w.w.(*testJSONOut).String()
if got != want {
t.Errorf("jsonWriter contains unexpected output\ngot: %q\nwant: %q", got, want)
}
}
func TestJSONWriterWriteConcurrentRecords(t *testing.T) {
w := newTestJSONWriter()
var wg sync.WaitGroup
for i := 0; i < 3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
w.writeRecordStart()
w.writeInt64Field("field", 0)
w.writeRecordEnd()
}()
}
wg.Wait()
wantJSONRecord(t, w, strings.Join([]string{
`{"field":0}`,
`{"field":0}`,
`{"field":0}`,
}, "\n\x1e"))
}
func TestJSONWriterAttrs(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeAttrsField("field", []slog.Attr{
slog.Any("any", errors.New("value")),
slog.Bool("bool", true),
slog.Duration("duration", 1*time.Second),
slog.Float64("float64", 1),
slog.Int64("int64", 1),
slog.String("string", "value"),
slog.Time("time", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)),
slog.Uint64("uint64", 1),
slog.Group("group", "a", 1),
})
w.writeRecordEnd()
wantJSONRecord(t, w,
`{"field":{`+
`"any":"value",`+
`"bool":true,`+
`"duration":1000.000000,`+
`"float64":1,`+
`"int64":1,`+
`"string":"value",`+
`"time":946684800000.000000,`+
`"uint64":1,`+
`"group":{"a":1}`+
`}}`)
}
func TestJSONWriterAttrEmpty(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
var a slog.Attr
w.writeAttr(a)
w.writeRecordEnd()
wantJSONRecord(t, w, `{}`)
}
func TestJSONWriterObjectEmpty(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeObjectField("field", func() {})
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":{}}`)
}
func TestJSONWriterObjectFields(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeObjectField("field", func() {
w.writeStringField("a", "value")
w.writeInt64Field("b", 10)
})
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":{"a":"value","b":10}}`)
}
func TestJSONWriterRawField(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeRawField("field", `[1]`)
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":[1]}`)
}
func TestJSONWriterBoolField(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeBoolField("true", true)
w.writeBoolField("false", false)
w.writeRecordEnd()
wantJSONRecord(t, w, `{"true":true,"false":false}`)
}
func TestJSONWriterDurationField(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeDurationField("field1", (10*time.Millisecond)+(2*time.Nanosecond))
w.writeDurationField("field2", -((10 * time.Millisecond) + (2 * time.Nanosecond)))
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field1":10.000002,"field2":-10.000002}`)
}
func TestJSONWriterFloat64Field(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeFloat64Field("field", 1.1)
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":1.1}`)
}
func TestJSONWriterInt64Field(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeInt64Field("field", 1234)
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":1234}`)
}
func TestJSONWriterUint64Field(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeUint64Field("field", 1234)
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":1234}`)
}
func TestJSONWriterStringField(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeStringField("field", "value")
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":"value"}`)
}
func TestJSONWriterStringFieldEscaped(t *testing.T) {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeStringField("field", "va\x00ue")
w.writeRecordEnd()
wantJSONRecord(t, w, `{"field":"va\u0000ue"}`)
}
func TestJSONWriterStringEscaping(t *testing.T) {
for c := 0; c <= 0xff; c++ {
w := newTestJSONWriter()
w.writeRecordStart()
w.writeStringField("field", string([]byte{byte(c)}))
w.writeRecordEnd()
var want string
if (c >= 0x20 && c <= 0x21) || (c >= 0x23 && c <= 0x5b) || (c >= 0x5d && c <= 0x7e) {
want = fmt.Sprintf(`%c`, c)
} else {
want = fmt.Sprintf(`\u%04x`, c)
}
wantJSONRecord(t, w, `{"field":"`+want+`"}`)
}
}
| net/quic/qlog/json_writer_test.go/0 | {
"file_path": "net/quic/qlog/json_writer_test.go",
"repo_id": "net",
"token_count": 1989
} | 650 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package quic
import "testing"
func TestSentPacketListSlidingWindow(t *testing.T) {
// Record 1000 sent packets, acking everything outside the most recent 10.
list := &sentPacketList{}
const window = 10
for i := packetNumber(0); i < 1000; i++ {
list.add(&sentPacket{num: i})
if i < window {
continue
}
prev := i - window
sent := list.num(prev)
if sent == nil {
t.Fatalf("packet %v not in list", prev)
}
if sent.num != prev {
t.Fatalf("list.num(%v) = packet %v", prev, sent.num)
}
if got := list.nth(0); got != sent {
t.Fatalf("list.nth(0) != list.num(%v)", prev)
}
sent.acked = true
list.clean()
if got := list.num(prev); got != nil {
t.Fatalf("list.num(%v) = packet %v, expected it to be discarded", prev, got.num)
}
if got, want := list.start(), prev+1; got != want {
t.Fatalf("list.start() = %v, want %v", got, want)
}
if got, want := list.end(), i+1; got != want {
t.Fatalf("list.end() = %v, want %v", got, want)
}
if got, want := list.size, window; got != want {
t.Fatalf("list.size = %v, want %v", got, want)
}
}
}
func TestSentPacketListGrows(t *testing.T) {
// Record 1000 sent packets.
list := &sentPacketList{}
const count = 1000
for i := packetNumber(0); i < count; i++ {
list.add(&sentPacket{num: i})
}
if got, want := list.start(), packetNumber(0); got != want {
t.Fatalf("list.start() = %v, want %v", got, want)
}
if got, want := list.end(), packetNumber(count); got != want {
t.Fatalf("list.end() = %v, want %v", got, want)
}
if got, want := list.size, count; got != want {
t.Fatalf("list.size = %v, want %v", got, want)
}
for i := packetNumber(0); i < count; i++ {
sent := list.num(i)
if sent == nil {
t.Fatalf("packet %v not in list", i)
}
if sent.num != i {
t.Fatalf("list.num(%v) = packet %v", i, sent.num)
}
if got := list.nth(int(i)); got != sent {
t.Fatalf("list.nth(%v) != list.num(%v)", int(i), i)
}
}
}
func TestSentPacketListCleanAll(t *testing.T) {
list := &sentPacketList{}
// Record 10 sent packets.
const count = 10
for i := packetNumber(0); i < count; i++ {
list.add(&sentPacket{num: i})
}
// Mark all the packets as acked.
for i := packetNumber(0); i < count; i++ {
list.num(i).acked = true
}
list.clean()
if got, want := list.size, 0; got != want {
t.Fatalf("list.size = %v, want %v", got, want)
}
list.add(&sentPacket{num: 10})
if got, want := list.size, 1; got != want {
t.Fatalf("list.size = %v, want %v", got, want)
}
sent := list.num(10)
if sent == nil {
t.Fatalf("packet %v not in list", 10)
}
if sent.num != 10 {
t.Fatalf("list.num(10) = %v", sent.num)
}
if got := list.nth(0); got != sent {
t.Fatalf("list.nth(0) != list.num(10)")
}
}
| net/quic/sent_packet_list_test.go/0 | {
"file_path": "net/quic/sent_packet_list_test.go",
"repo_id": "net",
"token_count": 1237
} | 651 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21 && darwin
package quic
import (
"encoding/binary"
"golang.org/x/sys/unix"
)
// See udp.go.
const (
udpECNSupport = true
udpInvalidLocalAddrIsError = true
)
// Confusingly, on Darwin the contents of the IP_TOS option differ depending on whether
// it is used as an inbound or outbound cmsg.
func parseIPTOS(b []byte) (ecnBits, bool) {
// Single byte. The low two bits are the ECN field.
if len(b) != 1 {
return 0, false
}
return ecnBits(b[0] & ecnMask), true
}
func appendCmsgECNv4(b []byte, ecn ecnBits) []byte {
// 32-bit integer.
// https://github.com/apple/darwin-xnu/blob/2ff845c2e033bd0ff64b5b6aa6063a1f8f65aa32/bsd/netinet/in_tclass.c#L1062-L1073
b, data := appendCmsg(b, unix.IPPROTO_IP, unix.IP_TOS, 4)
binary.NativeEndian.PutUint32(data, uint32(ecn))
return b
}
| net/quic/udp_darwin.go/0 | {
"file_path": "net/quic/udp_darwin.go",
"repo_id": "net",
"token_count": 411
} | 652 |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"bytes"
"fmt"
"html/template"
"io"
"log"
"net/http"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"text/tabwriter"
"time"
)
const maxEventsPerLog = 100
type bucket struct {
MaxErrAge time.Duration
String string
}
var buckets = []bucket{
{0, "total"},
{10 * time.Second, "errs<10s"},
{1 * time.Minute, "errs<1m"},
{10 * time.Minute, "errs<10m"},
{1 * time.Hour, "errs<1h"},
{10 * time.Hour, "errs<10h"},
{24000 * time.Hour, "errors"},
}
// RenderEvents renders the HTML page typically served at /debug/events.
// It does not do any auth checking. The request may be nil.
//
// Most users will use the Events handler.
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
now := time.Now()
data := &struct {
Families []string // family names
Buckets []bucket
Counts [][]int // eventLog count per family/bucket
// Set when a bucket has been selected.
Family string
Bucket int
EventLogs eventLogs
Expanded bool
}{
Buckets: buckets,
}
data.Families = make([]string, 0, len(families))
famMu.RLock()
for name := range families {
data.Families = append(data.Families, name)
}
famMu.RUnlock()
sort.Strings(data.Families)
// Count the number of eventLogs in each family for each error age.
data.Counts = make([][]int, len(data.Families))
for i, name := range data.Families {
// TODO(sameer): move this loop under the family lock.
f := getEventFamily(name)
data.Counts[i] = make([]int, len(data.Buckets))
for j, b := range data.Buckets {
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
}
}
if req != nil {
var ok bool
data.Family, data.Bucket, ok = parseEventsArgs(req)
if !ok {
// No-op
} else {
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
}
if data.EventLogs != nil {
defer data.EventLogs.Free()
sort.Sort(data.EventLogs)
}
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
data.Expanded = exp
}
}
famMu.RLock()
defer famMu.RUnlock()
if err := eventsTmpl().Execute(w, data); err != nil {
log.Printf("net/trace: Failed executing template: %v", err)
}
}
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
fam, bStr := req.FormValue("fam"), req.FormValue("b")
if fam == "" || bStr == "" {
return "", 0, false
}
b, err := strconv.Atoi(bStr)
if err != nil || b < 0 || b >= len(buckets) {
return "", 0, false
}
return fam, b, true
}
// An EventLog provides a log of events associated with a specific object.
type EventLog interface {
// Printf formats its arguments with fmt.Sprintf and adds the
// result to the event log.
Printf(format string, a ...interface{})
// Errorf is like Printf, but it marks this event as an error.
Errorf(format string, a ...interface{})
// Finish declares that this event log is complete.
// The event log should not be used after calling this method.
Finish()
}
// NewEventLog returns a new EventLog with the specified family name
// and title.
func NewEventLog(family, title string) EventLog {
el := newEventLog()
el.ref()
el.Family, el.Title = family, title
el.Start = time.Now()
el.events = make([]logEntry, 0, maxEventsPerLog)
el.stack = make([]uintptr, 32)
n := runtime.Callers(2, el.stack)
el.stack = el.stack[:n]
getEventFamily(family).add(el)
return el
}
func (el *eventLog) Finish() {
getEventFamily(el.Family).remove(el)
el.unref() // matches ref in New
}
var (
famMu sync.RWMutex
families = make(map[string]*eventFamily) // family name => family
)
func getEventFamily(fam string) *eventFamily {
famMu.Lock()
defer famMu.Unlock()
f := families[fam]
if f == nil {
f = &eventFamily{}
families[fam] = f
}
return f
}
type eventFamily struct {
mu sync.RWMutex
eventLogs eventLogs
}
func (f *eventFamily) add(el *eventLog) {
f.mu.Lock()
f.eventLogs = append(f.eventLogs, el)
f.mu.Unlock()
}
func (f *eventFamily) remove(el *eventLog) {
f.mu.Lock()
defer f.mu.Unlock()
for i, el0 := range f.eventLogs {
if el == el0 {
copy(f.eventLogs[i:], f.eventLogs[i+1:])
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
return
}
}
}
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
f.mu.RLock()
defer f.mu.RUnlock()
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
n++
}
}
return
}
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
f.mu.RLock()
defer f.mu.RUnlock()
els = make(eventLogs, 0, len(f.eventLogs))
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
el.ref()
els = append(els, el)
}
}
return
}
type eventLogs []*eventLog
// Free calls unref on each element of the list.
func (els eventLogs) Free() {
for _, el := range els {
el.unref()
}
}
// eventLogs may be sorted in reverse chronological order.
func (els eventLogs) Len() int { return len(els) }
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
// A logEntry is a timestamped log entry in an event log.
type logEntry struct {
When time.Time
Elapsed time.Duration // since previous event in log
NewDay bool // whether this event is on a different day to the previous event
What string
IsErr bool
}
// WhenString returns a string representation of the elapsed time of the event.
// It will include the date if midnight was crossed.
func (e logEntry) WhenString() string {
if e.NewDay {
return e.When.Format("2006/01/02 15:04:05.000000")
}
return e.When.Format("15:04:05.000000")
}
// An eventLog represents an active event log.
type eventLog struct {
// Family is the top-level grouping of event logs to which this belongs.
Family string
// Title is the title of this event log.
Title string
// Timing information.
Start time.Time
// Call stack where this event log was created.
stack []uintptr
// Append-only sequence of events.
//
// TODO(sameer): change this to a ring buffer to avoid the array copy
// when we hit maxEventsPerLog.
mu sync.RWMutex
events []logEntry
LastErrorTime time.Time
discarded int
refs int32 // how many buckets this is in
}
func (el *eventLog) reset() {
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
el.Family = ""
el.Title = ""
el.Start = time.Time{}
el.stack = nil
el.events = nil
el.LastErrorTime = time.Time{}
el.discarded = 0
el.refs = 0
}
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
if maxErrAge == 0 {
return true
}
el.mu.RLock()
defer el.mu.RUnlock()
return now.Sub(el.LastErrorTime) < maxErrAge
}
// delta returns the elapsed time since the last event or the log start,
// and whether it spans midnight.
// L >= el.mu
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
if len(el.events) == 0 {
return t.Sub(el.Start), false
}
prev := el.events[len(el.events)-1].When
return t.Sub(prev), prev.Day() != t.Day()
}
func (el *eventLog) Printf(format string, a ...interface{}) {
el.printf(false, format, a...)
}
func (el *eventLog) Errorf(format string, a ...interface{}) {
el.printf(true, format, a...)
}
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
el.mu.Lock()
e.Elapsed, e.NewDay = el.delta(e.When)
if len(el.events) < maxEventsPerLog {
el.events = append(el.events, e)
} else {
// Discard the oldest event.
if el.discarded == 0 {
// el.discarded starts at two to count for the event it
// is replacing, plus the next one that we are about to
// drop.
el.discarded = 2
} else {
el.discarded++
}
// TODO(sameer): if this causes allocations on a critical path,
// change eventLog.What to be a fmt.Stringer, as in trace.go.
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
// The timestamp of the discarded meta-event should be
// the time of the last event it is representing.
el.events[0].When = el.events[1].When
copy(el.events[1:], el.events[2:])
el.events[maxEventsPerLog-1] = e
}
if e.IsErr {
el.LastErrorTime = e.When
}
el.mu.Unlock()
}
func (el *eventLog) ref() {
atomic.AddInt32(&el.refs, 1)
}
func (el *eventLog) unref() {
if atomic.AddInt32(&el.refs, -1) == 0 {
freeEventLog(el)
}
}
func (el *eventLog) When() string {
return el.Start.Format("2006/01/02 15:04:05.000000")
}
func (el *eventLog) ElapsedTime() string {
elapsed := time.Since(el.Start)
return fmt.Sprintf("%.6f", elapsed.Seconds())
}
func (el *eventLog) Stack() string {
buf := new(bytes.Buffer)
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
printStackRecord(tw, el.stack)
tw.Flush()
return buf.String()
}
// printStackRecord prints the function + source line information
// for a single stack trace.
// Adapted from runtime/pprof/pprof.go.
func printStackRecord(w io.Writer, stk []uintptr) {
for _, pc := range stk {
f := runtime.FuncForPC(pc)
if f == nil {
continue
}
file, line := f.FileLine(pc)
name := f.Name()
// Hide runtime.goexit and any runtime functions at the beginning.
if strings.HasPrefix(name, "runtime.") {
continue
}
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
}
}
func (el *eventLog) Events() []logEntry {
el.mu.RLock()
defer el.mu.RUnlock()
return el.events
}
// freeEventLogs is a freelist of *eventLog
var freeEventLogs = make(chan *eventLog, 1000)
// newEventLog returns a event log ready to use.
func newEventLog() *eventLog {
select {
case el := <-freeEventLogs:
return el
default:
return new(eventLog)
}
}
// freeEventLog adds el to freeEventLogs if there's room.
// This is non-blocking.
func freeEventLog(el *eventLog) {
el.reset()
select {
case freeEventLogs <- el:
default:
}
}
var eventsTmplCache *template.Template
var eventsTmplOnce sync.Once
func eventsTmpl() *template.Template {
eventsTmplOnce.Do(func() {
eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
"elapsed": elapsed,
"trimSpace": strings.TrimSpace,
}).Parse(eventsHTML))
})
return eventsTmplCache
}
const eventsHTML = `
<html>
<head>
<title>events</title>
</head>
<style type="text/css">
body {
font-family: sans-serif;
}
table#req-status td.family {
padding-right: 2em;
}
table#req-status td.active {
padding-right: 1em;
}
table#req-status td.empty {
color: #aaa;
}
table#reqs {
margin-top: 1em;
}
table#reqs tr.first {
{{if $.Expanded}}font-weight: bold;{{end}}
}
table#reqs td {
font-family: monospace;
}
table#reqs td.when {
text-align: right;
white-space: nowrap;
}
table#reqs td.elapsed {
padding: 0 0.5em;
text-align: right;
white-space: pre;
width: 10em;
}
address {
font-size: smaller;
margin-top: 5em;
}
</style>
<body>
<h1>/debug/events</h1>
<table id="req-status">
{{range $i, $fam := .Families}}
<tr>
<td class="family">{{$fam}}</td>
{{range $j, $bucket := $.Buckets}}
{{$n := index $.Counts $i $j}}
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
[{{$n}} {{$bucket.String}}]
{{if $n}}</a>{{end}}
</td>
{{end}}
</tr>{{end}}
</table>
{{if $.EventLogs}}
<hr />
<h3>Family: {{$.Family}}</h3>
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
[Summary]{{if $.Expanded}}</a>{{end}}
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
[Expanded]{{if not $.Expanded}}</a>{{end}}
<table id="reqs">
<tr><th>When</th><th>Elapsed</th></tr>
{{range $el := $.EventLogs}}
<tr class="first">
<td class="when">{{$el.When}}</td>
<td class="elapsed">{{$el.ElapsedTime}}</td>
<td>{{$el.Title}}
</tr>
{{if $.Expanded}}
<tr>
<td class="when"></td>
<td class="elapsed"></td>
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
</tr>
{{range $el.Events}}
<tr>
<td class="when">{{.WhenString}}</td>
<td class="elapsed">{{elapsed .Elapsed}}</td>
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
</tr>
{{end}}
{{end}}
{{end}}
</table>
{{end}}
</body>
</html>
`
| net/trace/events.go/0 | {
"file_path": "net/trace/events.go",
"repo_id": "net",
"token_count": 5114
} | 653 |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"fmt"
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the xml representation of a type.
type typeInfo struct {
xmlname *fieldInfo
fields []fieldInfo
}
// fieldInfo holds details for the xml representation of a single field.
type fieldInfo struct {
idx []int
name string
xmlns string
flags fieldFlags
parents []string
}
type fieldFlags int
const (
fElement fieldFlags = 1 << iota
fAttr
fCharData
fInnerXml
fComment
fAny
fOmitEmpty
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
)
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
var nameType = reflect.TypeOf(Name{})
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
tinfoLock.RLock()
tinfo, ok := tinfoMap[typ]
tinfoLock.RUnlock()
if ok {
return tinfo, nil
}
tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
if tinfo.xmlname == nil {
tinfo.xmlname = inner.xmlname
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
if f.Name == "XMLName" {
tinfo.xmlname = finfo
continue
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
tinfoLock.Lock()
tinfoMap[typ] = tinfo
tinfoLock.Unlock()
return tinfo, nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("xml")
if i := strings.Index(tag, " "); i >= 0 {
finfo.xmlns, tag = tag[:i], tag[i+1:]
}
// Parse flags.
tokens := strings.Split(tag, ",")
if len(tokens) == 1 {
finfo.flags = fElement
} else {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "attr":
finfo.flags |= fAttr
case "chardata":
finfo.flags |= fCharData
case "innerxml":
finfo.flags |= fInnerXml
case "comment":
finfo.flags |= fComment
case "any":
finfo.flags |= fAny
case "omitempty":
finfo.flags |= fOmitEmpty
}
}
// Validate the flags used.
valid := true
switch mode := finfo.flags & fMode; mode {
case 0:
finfo.flags |= fElement
case fAttr, fCharData, fInnerXml, fComment, fAny:
if f.Name == "XMLName" || tag != "" && mode != fAttr {
valid = false
}
default:
// This will also catch multiple modes in a single field.
valid = false
}
if finfo.flags&fMode == fAny {
finfo.flags |= fElement
}
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
valid = false
}
if !valid {
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
}
// Use of xmlns without a name is not allowed.
if finfo.xmlns != "" && tag == "" {
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
if f.Name == "XMLName" {
// The XMLName field records the XML element name. Don't
// process it as usual because its name should default to
// empty rather than to the field name.
finfo.name = tag
return finfo, nil
}
if tag == "" {
// If the name part of the tag is completely empty, get
// default from XMLName of underlying struct if feasible,
// or field name otherwise.
if xmlname := lookupXMLName(f.Type); xmlname != nil {
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
} else {
finfo.name = f.Name
}
return finfo, nil
}
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
// If it's an element no namespace specified, get the default
// from the XMLName of enclosing struct if possible.
if xmlname := lookupXMLName(typ); xmlname != nil {
finfo.xmlns = xmlname.xmlns
}
}
// Prepare field name and parents.
parents := strings.Split(tag, ">")
if parents[0] == "" {
parents[0] = f.Name
}
if parents[len(parents)-1] == "" {
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
}
finfo.name = parents[len(parents)-1]
if len(parents) > 1 {
if (finfo.flags & fElement) == 0 {
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
}
finfo.parents = parents[:len(parents)-1]
}
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
// is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
if xmlname != nil && xmlname.name != finfo.name {
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
finfo.name, typ, f.Name, xmlname.name, ftyp)
}
}
return finfo, nil
}
// lookupXMLName returns the fieldInfo for typ's XMLName field
// in case it exists and has a valid xml field tag, otherwise
// it returns nil.
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return nil
}
for i, n := 0, typ.NumField(); i < n; i++ {
f := typ.Field(i)
if f.Name != "XMLName" {
continue
}
finfo, err := structFieldInfo(typ, &f)
if finfo.name != "" && err == nil {
return finfo
}
// Also consider errors as a non-existent field tag
// and let getTypeInfo itself report the error.
break
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
Loop:
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if oldf.flags&fMode != newf.flags&fMode {
continue
}
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
continue
}
minl := min(len(newf.parents), len(oldf.parents))
for p := 0; p < minl; p++ {
if oldf.parents[p] != newf.parents[p] {
continue Loop
}
}
if len(oldf.parents) > len(newf.parents) {
if oldf.parents[len(newf.parents)] == newf.name {
conflicts = append(conflicts, i)
}
} else if len(oldf.parents) < len(newf.parents) {
if newf.parents[len(oldf.parents)] == oldf.name {
conflicts = append(conflicts, i)
}
} else {
if newf.name == oldf.name {
conflicts = append(conflicts, i)
}
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, if any of them is at the same depth level, it's an error.
for _, i := range conflicts {
oldf := &tinfo.fields[i]
if len(oldf.idx) == len(newf.idx) {
f1 := typ.FieldByIndex(oldf.idx)
f2 := typ.FieldByIndex(newf.idx)
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// A TagPathError represents an error in the unmarshalling process
// caused by the use of field tags with conflicting paths.
type TagPathError struct {
Struct reflect.Type
Field1, Tag1 string
Field2, Tag2 string
}
func (e *TagPathError) Error() string {
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
}
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
// and dereferences pointers as necessary.
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}
| net/webdav/internal/xml/typeinfo.go/0 | {
"file_path": "net/webdav/internal/xml/typeinfo.go",
"repo_id": "net",
"token_count": 3846
} | 654 |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket_test
import (
"io"
"net/http"
"golang.org/x/net/websocket"
)
// Echo the data received on the WebSocket.
func EchoServer(ws *websocket.Conn) {
io.Copy(ws, ws)
}
// This example demonstrates a trivial echo server.
func ExampleHandler() {
http.Handle("/echo", websocket.Handler(EchoServer))
err := http.ListenAndServe(":12345", nil)
if err != nil {
panic("ListenAndServe: " + err.Error())
}
}
| net/websocket/examplehandler_test.go/0 | {
"file_path": "net/websocket/examplehandler_test.go",
"repo_id": "net",
"token_count": 201
} | 655 |
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package endpoints provides constants for using OAuth2 to access various services.
package endpoints
import (
"strings"
"golang.org/x/oauth2"
)
// Amazon is the endpoint for Amazon.
var Amazon = oauth2.Endpoint{
AuthURL: "https://www.amazon.com/ap/oa",
TokenURL: "https://api.amazon.com/auth/o2/token",
}
// Battlenet is the endpoint for Battlenet.
var Battlenet = oauth2.Endpoint{
AuthURL: "https://battle.net/oauth/authorize",
TokenURL: "https://battle.net/oauth/token",
}
// Bitbucket is the endpoint for Bitbucket.
var Bitbucket = oauth2.Endpoint{
AuthURL: "https://bitbucket.org/site/oauth2/authorize",
TokenURL: "https://bitbucket.org/site/oauth2/access_token",
}
// Cern is the endpoint for CERN.
var Cern = oauth2.Endpoint{
AuthURL: "https://oauth.web.cern.ch/OAuth/Authorize",
TokenURL: "https://oauth.web.cern.ch/OAuth/Token",
}
// Facebook is the endpoint for Facebook.
var Facebook = oauth2.Endpoint{
AuthURL: "https://www.facebook.com/v3.2/dialog/oauth",
TokenURL: "https://graph.facebook.com/v3.2/oauth/access_token",
}
// Foursquare is the endpoint for Foursquare.
var Foursquare = oauth2.Endpoint{
AuthURL: "https://foursquare.com/oauth2/authorize",
TokenURL: "https://foursquare.com/oauth2/access_token",
}
// Fitbit is the endpoint for Fitbit.
var Fitbit = oauth2.Endpoint{
AuthURL: "https://www.fitbit.com/oauth2/authorize",
TokenURL: "https://api.fitbit.com/oauth2/token",
}
// GitHub is the endpoint for Github.
var GitHub = oauth2.Endpoint{
AuthURL: "https://github.com/login/oauth/authorize",
TokenURL: "https://github.com/login/oauth/access_token",
DeviceAuthURL: "https://github.com/login/device/code",
}
// GitLab is the endpoint for GitLab.
var GitLab = oauth2.Endpoint{
AuthURL: "https://gitlab.com/oauth/authorize",
TokenURL: "https://gitlab.com/oauth/token",
}
// Google is the endpoint for Google.
var Google = oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://oauth2.googleapis.com/token",
DeviceAuthURL: "https://oauth2.googleapis.com/device/code",
}
// Heroku is the endpoint for Heroku.
var Heroku = oauth2.Endpoint{
AuthURL: "https://id.heroku.com/oauth/authorize",
TokenURL: "https://id.heroku.com/oauth/token",
}
// HipChat is the endpoint for HipChat.
var HipChat = oauth2.Endpoint{
AuthURL: "https://www.hipchat.com/users/authorize",
TokenURL: "https://api.hipchat.com/v2/oauth/token",
}
// Instagram is the endpoint for Instagram.
var Instagram = oauth2.Endpoint{
AuthURL: "https://api.instagram.com/oauth/authorize",
TokenURL: "https://api.instagram.com/oauth/access_token",
}
// KaKao is the endpoint for KaKao.
var KaKao = oauth2.Endpoint{
AuthURL: "https://kauth.kakao.com/oauth/authorize",
TokenURL: "https://kauth.kakao.com/oauth/token",
}
// LinkedIn is the endpoint for LinkedIn.
var LinkedIn = oauth2.Endpoint{
AuthURL: "https://www.linkedin.com/oauth/v2/authorization",
TokenURL: "https://www.linkedin.com/oauth/v2/accessToken",
}
// Mailchimp is the endpoint for Mailchimp.
var Mailchimp = oauth2.Endpoint{
AuthURL: "https://login.mailchimp.com/oauth2/authorize",
TokenURL: "https://login.mailchimp.com/oauth2/token",
}
// Mailru is the endpoint for Mail.Ru.
var Mailru = oauth2.Endpoint{
AuthURL: "https://o2.mail.ru/login",
TokenURL: "https://o2.mail.ru/token",
}
// MediaMath is the endpoint for MediaMath.
var MediaMath = oauth2.Endpoint{
AuthURL: "https://api.mediamath.com/oauth2/v1.0/authorize",
TokenURL: "https://api.mediamath.com/oauth2/v1.0/token",
}
// MediaMathSandbox is the endpoint for MediaMath Sandbox.
var MediaMathSandbox = oauth2.Endpoint{
AuthURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/authorize",
TokenURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/token",
}
// Microsoft is the endpoint for Microsoft.
var Microsoft = oauth2.Endpoint{
AuthURL: "https://login.live.com/oauth20_authorize.srf",
TokenURL: "https://login.live.com/oauth20_token.srf",
}
// NokiaHealth is the endpoint for Nokia Health.
var NokiaHealth = oauth2.Endpoint{
AuthURL: "https://account.health.nokia.com/oauth2_user/authorize2",
TokenURL: "https://account.health.nokia.com/oauth2/token",
}
// Odnoklassniki is the endpoint for Odnoklassniki.
var Odnoklassniki = oauth2.Endpoint{
AuthURL: "https://www.odnoklassniki.ru/oauth/authorize",
TokenURL: "https://api.odnoklassniki.ru/oauth/token.do",
}
// PayPal is the endpoint for PayPal.
var PayPal = oauth2.Endpoint{
AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice",
}
// PayPalSandbox is the endpoint for PayPal Sandbox.
var PayPalSandbox = oauth2.Endpoint{
AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice",
}
// Slack is the endpoint for Slack.
var Slack = oauth2.Endpoint{
AuthURL: "https://slack.com/oauth/authorize",
TokenURL: "https://slack.com/api/oauth.access",
}
// Spotify is the endpoint for Spotify.
var Spotify = oauth2.Endpoint{
AuthURL: "https://accounts.spotify.com/authorize",
TokenURL: "https://accounts.spotify.com/api/token",
}
// StackOverflow is the endpoint for Stack Overflow.
var StackOverflow = oauth2.Endpoint{
AuthURL: "https://stackoverflow.com/oauth",
TokenURL: "https://stackoverflow.com/oauth/access_token",
}
// Strava is the endpoint for Strava.
var Strava = oauth2.Endpoint{
AuthURL: "https://www.strava.com/oauth/authorize",
TokenURL: "https://www.strava.com/oauth/token",
}
// Twitch is the endpoint for Twitch.
var Twitch = oauth2.Endpoint{
AuthURL: "https://id.twitch.tv/oauth2/authorize",
TokenURL: "https://id.twitch.tv/oauth2/token",
}
// Uber is the endpoint for Uber.
var Uber = oauth2.Endpoint{
AuthURL: "https://login.uber.com/oauth/v2/authorize",
TokenURL: "https://login.uber.com/oauth/v2/token",
}
// Vk is the endpoint for Vk.
var Vk = oauth2.Endpoint{
AuthURL: "https://oauth.vk.com/authorize",
TokenURL: "https://oauth.vk.com/access_token",
}
// Yahoo is the endpoint for Yahoo.
var Yahoo = oauth2.Endpoint{
AuthURL: "https://api.login.yahoo.com/oauth2/request_auth",
TokenURL: "https://api.login.yahoo.com/oauth2/get_token",
}
// Yandex is the endpoint for Yandex.
var Yandex = oauth2.Endpoint{
AuthURL: "https://oauth.yandex.com/authorize",
TokenURL: "https://oauth.yandex.com/token",
}
// Zoom is the endpoint for Zoom.
var Zoom = oauth2.Endpoint{
AuthURL: "https://zoom.us/oauth/authorize",
TokenURL: "https://zoom.us/oauth/token",
}
// AzureAD returns a new oauth2.Endpoint for the given tenant at Azure Active Directory.
// If tenant is empty, it uses the tenant called `common`.
//
// For more information see:
// https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-v2-protocols#endpoints
func AzureAD(tenant string) oauth2.Endpoint {
if tenant == "" {
tenant = "common"
}
return oauth2.Endpoint{
AuthURL: "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/authorize",
TokenURL: "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/token",
DeviceAuthURL: "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/devicecode",
}
}
// HipChatServer returns a new oauth2.Endpoint for a HipChat Server instance
// running on the given domain or host.
func HipChatServer(host string) oauth2.Endpoint {
return oauth2.Endpoint{
AuthURL: "https://" + host + "/users/authorize",
TokenURL: "https://" + host + "/v2/oauth/token",
}
}
// AWSCognito returns a new oauth2.Endpoint for the supplied AWS Cognito domain which is
// linked to your Cognito User Pool.
//
// Example domain: https://testing.auth.us-east-1.amazoncognito.com
//
// For more information see:
// https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-assign-domain.html
// https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-userpools-server-contract-reference.html
func AWSCognito(domain string) oauth2.Endpoint {
domain = strings.TrimRight(domain, "/")
return oauth2.Endpoint{
AuthURL: domain + "/oauth2/authorize",
TokenURL: domain + "/oauth2/token",
}
}
| oauth2/endpoints/endpoints.go/0 | {
"file_path": "oauth2/endpoints/endpoints.go",
"repo_id": "oauth2",
"token_count": 3171
} | 656 |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package downscope_test
import (
"context"
"fmt"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google/downscope"
)
func ExampleNewTokenSource() {
// This shows how to generate a downscoped token. This code would be run on the
// token broker, which holds the root token used to generate the downscoped token.
ctx := context.Background()
// Initializes an accessBoundary with one Rule which restricts the downscoped
// token to only be able to access the bucket "foo" and only grants it the
// permission "storage.objectViewer".
accessBoundary := []downscope.AccessBoundaryRule{
{
AvailableResource: "//storage.googleapis.com/projects/_/buckets/foo",
AvailablePermissions: []string{"inRole:roles/storage.objectViewer"},
},
}
var rootSource oauth2.TokenSource
// This Source can be initialized in multiple ways; the following example uses
// Application Default Credentials.
rootSource, err := google.DefaultTokenSource(ctx, "https://www.googleapis.com/auth/cloud-platform")
dts, err := downscope.NewTokenSource(ctx, downscope.DownscopingConfig{RootSource: rootSource, Rules: accessBoundary})
if err != nil {
fmt.Printf("failed to generate downscoped token source: %v", err)
return
}
tok, err := dts.Token()
if err != nil {
fmt.Printf("failed to generate token: %v", err)
return
}
_ = tok
// You can now pass tok to a token consumer however you wish, such as exposing
// a REST API and sending it over HTTP.
// You can instead use the token held in dts to make
// Google Cloud Storage calls, as follows:
// storageClient, err := storage.NewClient(ctx, option.WithTokenSource(dts))
}
| oauth2/google/downscope/tokenbroker_test.go/0 | {
"file_path": "oauth2/google/downscope/tokenbroker_test.go",
"repo_id": "oauth2",
"token_count": 577
} | 657 |
{
"SubjToken": "321road"
}
| oauth2/google/externalaccount/testdata/3pi_cred.json/0 | {
"file_path": "oauth2/google/externalaccount/testdata/3pi_cred.json",
"repo_id": "oauth2",
"token_count": 15
} | 658 |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"reflect"
"strings"
"testing"
)
func TestSDKConfig(t *testing.T) {
sdkConfigPath = func() (string, error) {
return "testdata/gcloud", nil
}
tests := []struct {
account string
accessToken string
err bool
}{
{"", "bar_access_token", false},
{"foo@example.com", "foo_access_token", false},
{"bar@example.com", "bar_access_token", false},
{"baz@serviceaccount.example.com", "", true},
}
for _, tt := range tests {
c, err := NewSDKConfig(tt.account)
if got, want := err != nil, tt.err; got != want {
if !tt.err {
t.Errorf("got %v, want nil", err)
} else {
t.Errorf("got nil, want error")
}
continue
}
if err != nil {
continue
}
tok := c.initialToken
if tok == nil {
t.Errorf("got nil, want %q", tt.accessToken)
continue
}
if tok.AccessToken != tt.accessToken {
t.Errorf("got %q, want %q", tok.AccessToken, tt.accessToken)
}
}
}
func TestParseINI(t *testing.T) {
tests := []struct {
ini string
want map[string]map[string]string
}{
{
`root = toor
[foo]
bar = hop
ini = nin
`,
map[string]map[string]string{
"": {"root": "toor"},
"foo": {"bar": "hop", "ini": "nin"},
},
},
{
"\t extra \t = whitespace \t\r\n \t [everywhere] \t \r\n here \t = \t there \t \r\n",
map[string]map[string]string{
"": {"extra": "whitespace"},
"everywhere": {"here": "there"},
},
},
{
`[empty]
[section]
empty=
`,
map[string]map[string]string{
"": {},
"empty": {},
"section": {"empty": ""},
},
},
{
`ignore
[invalid
=stuff
;comment=true
`,
map[string]map[string]string{
"": {},
},
},
}
for _, tt := range tests {
result, err := parseINI(strings.NewReader(tt.ini))
if err != nil {
t.Errorf("parseINI(%q) error %v, want: no error", tt.ini, err)
continue
}
if !reflect.DeepEqual(result, tt.want) {
t.Errorf("parseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want)
}
}
}
| oauth2/google/sdk_test.go/0 | {
"file_path": "oauth2/google/sdk_test.go",
"repo_id": "oauth2",
"token_count": 997
} | 659 |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
// known as "two-legged OAuth 2.0".
//
// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
package jwt
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws"
)
var (
defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
)
// Config is the configuration for using JWT to fetch tokens,
// commonly known as "two-legged OAuth 2.0".
type Config struct {
// Email is the OAuth client identifier used when communicating with
// the configured OAuth provider.
Email string
// PrivateKey contains the contents of an RSA private key or the
// contents of a PEM file that contains a private key. The provided
// private key is used to sign JWT payloads.
// PEM containers with a passphrase are not supported.
// Use the following command to convert a PKCS 12 file into a PEM.
//
// $ openssl pkcs12 -in key.p12 -out key.pem -nodes
//
PrivateKey []byte
// PrivateKeyID contains an optional hint indicating which key is being
// used.
PrivateKeyID string
// Subject is the optional user to impersonate.
Subject string
// Scopes optionally specifies a list of requested permission scopes.
Scopes []string
// TokenURL is the endpoint required to complete the 2-legged JWT flow.
TokenURL string
// Expires optionally specifies how long the token is valid for.
Expires time.Duration
// Audience optionally specifies the intended audience of the
// request. If empty, the value of TokenURL is used as the
// intended audience.
Audience string
// PrivateClaims optionally specifies custom private claims in the JWT.
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
PrivateClaims map[string]interface{}
// UseIDToken optionally specifies whether ID token should be used instead
// of access token when the server returns both.
UseIDToken bool
}
// TokenSource returns a JWT TokenSource using the configuration
// in c and the HTTP client from the provided context.
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
}
// Client returns an HTTP client wrapping the context's
// HTTP transport and adding Authorization headers with tokens
// obtained from c.
//
// The returned client and its Transport should not be modified.
func (c *Config) Client(ctx context.Context) *http.Client {
return oauth2.NewClient(ctx, c.TokenSource(ctx))
}
// jwtSource is a source that always does a signed JWT request for a token.
// It should typically be wrapped with a reuseTokenSource.
type jwtSource struct {
ctx context.Context
conf *Config
}
func (js jwtSource) Token() (*oauth2.Token, error) {
pk, err := internal.ParseKey(js.conf.PrivateKey)
if err != nil {
return nil, err
}
hc := oauth2.NewClient(js.ctx, nil)
claimSet := &jws.ClaimSet{
Iss: js.conf.Email,
Scope: strings.Join(js.conf.Scopes, " "),
Aud: js.conf.TokenURL,
PrivateClaims: js.conf.PrivateClaims,
}
if subject := js.conf.Subject; subject != "" {
claimSet.Sub = subject
// prn is the old name of sub. Keep setting it
// to be compatible with legacy OAuth 2.0 providers.
claimSet.Prn = subject
}
if t := js.conf.Expires; t > 0 {
claimSet.Exp = time.Now().Add(t).Unix()
}
if aud := js.conf.Audience; aud != "" {
claimSet.Aud = aud
}
h := *defaultHeader
h.KeyID = js.conf.PrivateKeyID
payload, err := jws.Encode(&h, claimSet, pk)
if err != nil {
return nil, err
}
v := url.Values{}
v.Set("grant_type", defaultGrantType)
v.Set("assertion", payload)
resp, err := hc.PostForm(js.conf.TokenURL, v)
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if c := resp.StatusCode; c < 200 || c > 299 {
return nil, &oauth2.RetrieveError{
Response: resp,
Body: body,
}
}
// tokenRes is the JSON response body.
var tokenRes struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
ExpiresIn int64 `json:"expires_in"` // relative seconds from now
}
if err := json.Unmarshal(body, &tokenRes); err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
token := &oauth2.Token{
AccessToken: tokenRes.AccessToken,
TokenType: tokenRes.TokenType,
}
raw := make(map[string]interface{})
json.Unmarshal(body, &raw) // no error checks for optional fields
token = token.WithExtra(raw)
if secs := tokenRes.ExpiresIn; secs > 0 {
token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
}
if v := tokenRes.IDToken; v != "" {
// decode returned id token to get expiry
claimSet, err := jws.Decode(v)
if err != nil {
return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
}
token.Expiry = time.Unix(claimSet.Exp, 0)
}
if js.conf.UseIDToken {
if tokenRes.IDToken == "" {
return nil, fmt.Errorf("oauth2: response doesn't have JWT token")
}
token.AccessToken = tokenRes.IDToken
}
return token, nil
}
| oauth2/jwt/jwt.go/0 | {
"file_path": "oauth2/jwt/jwt.go",
"repo_id": "oauth2",
"token_count": 1966
} | 660 |
# Proposal: Dense mark bits and sweep-free allocation
or, *How I learned to stop worrying and love the bitmap*
Author: Austin Clements
Last updated: 2015-09-30
Discussion at https://golang.org/issue/12800.
## Abstract
This document proposes a change to memory allocation to eliminate the
need for the sweeper and a new representation for the mark bitmap that
enables this. This reduces the cost of allocation, significantly
improves the locality of the mark bitmap, and paves the way for future
advances to the Go GC that require multiple mark bits.
## Background
All current releases of Go up to and including Go 1.5 use a
*mark-sweep* garbage collector. As of Go 1.5, this works by
alternating between a mostly-concurrent mark phase and a concurrent
sweep phase. The mark phase finds all reachable objects and *marks*
them, leaving all unreachable objects *unmarked*. The sweep phase
walks through the entire heap, finds all unmarked objects, and adds
them to free lists, which the allocator in turn uses to allocate new
objects.
However, this sweep phase is, in a sense, redundant. It primarily
transforms one representation of the free heap—the mark bits—into
another representation of the free heap—the free lists. Not only does
this take time, but the free list representation is unfriendly to
modern CPUs since it is not very cacheable and accesses to it are hard
to predict. Furthermore, the current mark representation is also
cache-unfriendly, which adds even more to the cost of sweeping.
This document proposes a design for eliminating the sweeper. The key
idea is to allocate directly using the mark bitmap, foregoing the free
list representation entirely. Doing this efficiently requires a new,
dense representation for mark bits that enables fast scanning and
clearing. This representation also makes it easy to maintain multiple
mark bitmaps simultaneously. We introduce the dense bitmap
representation first. We then present a simple system for allocation
based on two mark bitmaps that eliminates the free list and hence the
need for the sweeper.
## Motivation
Typical Go programs spend about 5% of their CPU in the sweeper or in
cache misses induced by the free list representation. Hence, if we can
significantly reduce or eliminate the cost of the sweeping from
allocation and improve the free set representation, we can improve
overall program performance.
To measure this, we ran the go1 benchmark suite, just the BinaryTree17
benchmark with `-benchtime 5s`, and the x/benchmarks garbage benchmark
with `-benchmem 1024`. These represent a range of CPU-intensive and
allocation-intensive benchmarks. In all cases, GOMAXPROCS is 4. The
CPU time breaks down as follows:
| | go1 (all) | BinaryTree17 | garbage 1GB |
| --- | ---------:| ------------:| -----------:|
| CPU in mark | 2.8% | 4.0% | 34.7% |
| CPU in sweep | 3.3% | 20.6% | 5.2% |
| CPU in mallocgc (excl. sweep, GC) | 6.8% | 39.0% | 15.8% |
|   % of mallocgc spent walking free list | 19.2% | 17.5% | 14.3% |
(Times were collected using pprof. mark shows samples matching
`\.gc$|gcBgMarkWorker|gcAssistAlloc|gchelper`. sweep shows
`mSpan_Sweep`. mallocgc shows `mallocgc -gcAssistAlloc -mSpan_Sweep`.)
This proposal replaces sweepone with a scan that should require
roughly 1ms of CPU time per heap GB per GC cycle. For BinaryTree17,
that’s less than 0.1% of its CPU time, versus 21% for the current
sweep. It replaces the cost of walking the free list in mallocgc with
what is likely to be a smaller cost of sequentially scanning a bitmap.
It’s likely to have negligible effect on mark performance. Finally, it
should increase the heap footprint by roughly 0.02%.
## Dense mark bitmaps
Currently, the mark bits are stored in the *heap bitmap*, which is a
structure that stores two bits for every word of the heap, using a
simple formula to map between a heap address and a bitmap address. The
mark bit is stored in one of the bits for the first word of every
object. Because mark bits are "on the side," spans can be efficiently
subdivided into smaller object sizes (especially power-of-two sizes).
However, this sparse bitmap is expensive to scan and clear, as it
requires a strided, irregular traversal of memory, as shown in
figure 1. It also makes it difficult (albeit not impossible) to
maintain two sets of mark bits on word-sized objects, which is
necessary for sweep-free allocation.
![](12800/sparse.png)
**Figure 1.** In Go 1.5, mark bits are sparse and irregularly strided.
Therefore, this proposal separates the mark bits from the heap bitmap
into a dedicated mark bitmap structure. The difficulty here is,
because objects are different sizes and a given span can be freed and
reused for a different size class any number of times, a dense mark
bitmap cannot be addressed solely based on an object’s address. It
must be indirected through the object’s span.
One solution is to store the mark bits for the objects in each span as
a dense bit array in the span itself, either before or after the
objects. This dense representation is more efficient to scan and clear
than the sparse representation, but still requires visiting every span
to do so. Furthermore, it increases memory fragmentation, especially
for power-of-two allocations, which currently have zero external
fragmentation.
We propose maintaining a dense mark bitmap, but placing it outside of
the spans and in mostly contiguous memory by allocating the mark
bitmap anew *for every GC cycle*. In both the current sparse bitmap
and the strawman dense bitmap above, an object’s mark bit is in the
same location for the lifetime of that object. However, an object’s
mark only needs to persist for two GC cycles. By allocating the mark
bitmap anew for each GC cycle, we can avoid impacting span
fragmentation and use contiguous memory to enable bulk clearing of the
bitmap. Furthermore, discarding and recreating the bitmap on every
cycle lets us use a trivial scheme for allocating mark bitmaps to
spans while simultaneously dealing with changing heap layouts: even
though spans are reused for different size classes, any given span can
change size class at most once per GC cycle, so there’s no need for
sophisticated management of a mark bitmap that will only last two
cycles.
Between GC cycles, the runtime will prepare a fresh mark bitmap for
the upcoming mark phase, as shown in figure 2. It will traverse the
list of in-use spans and use a simple arena-style linear allocator to
assign each span a mark bitmap sized for the number of objects in that
span. The arena allocator will obtain memory from the system in
reasonably large chunks (e.g., 64K) and bulk zero it. Likewise, any
span that transitions from free to in-use during this time will also
be allocated a mark bitmap.
![](12800/dense.png)
**Figure 2.** Proposed arena allocation of dense mark bitmaps. For
illustrative purposes, bitmaps are shown allocated without alignment
constraints.
When the mark phase begins, all in-use spans will have zeroed mark
bitmaps. The mark phase will set the mark bit for every reachable
object. Then, during mark termination, the garbage collector will
transfer this bitmap to the allocator, which can use it to find free
objects in spans that were in-use at the beginning of the mark phase.
Any spans that are allocated after the mark phase (including after
mark termination) will have a nil allocation bitmap, which is
equivalent to all objects in that span being unmarked and allows for
bump-pointer allocation within that span. Finally, when the allocator
is done with the mark bitmap, the whole arena can be bulk freed.
## Dense mark bitmap performance
The entire process of allocating and clearing the new mark bitmap will
require only about 1 ms of CPU time per heap GB. Walking the list of
in-use spans requires about 1 ms per heap GB and, thanks to the arena
allocation, zeroing the bitmap should add only 40 µs per heap GB,
assuming 50 GB/sec sequential memory bandwidth and an average object
size of 64 bytes.
Furthermore, the memory overhead of the mark bitmap is minimal. The
instantaneous average object size of "go build std" and "go test
-short std" is 136 bytes and 222 bytes, respectively. At these sizes,
and assuming two bitmaps, the mark bitmaps will have an overhead of
only 0.18% (1.9 MB per heap GB) and 0.11% (1.2 MB per heap GB),
respectively. Even given a very conservative average object size of 16
bytes, the overhead is only 1.6% (16 MB per heap GB).
Dense mark bits should have negligible impact on mark phase
performance. Because of interior pointers, marking an object already
requires looking up that object’s span and dividing by the span’s
object size to compute the object index. With the sparse mark bitmap,
it requires multiplying and adding to compute the object’s base
address; three subtractions, three shifts, and a mask to compute the
location and value of the mark bit; and a random atomic memory write
to set the mark bit. With the dense mark bitmap, it requires reading
the mark bitmap pointer from the span (which can be placed on the same
cache line as the metadata already read); an addition, two shifts, and
a mask to compute the location and value of the mark bit; and a random
atomic memory write to set the mark bit.
Dense mark bits should simplify some parts of mark that currently
require checks and branches to treat the heap bitmap for the first two
object words differently from the heap bitmap for the remaining words.
This may improve branch predictor behavior and hence performance of
object scanning.
Finally, dense mark bits may slightly improve the performance of
unrolling the heap bitmap during allocation. Currently, small objects
require atomic writes to the heap bitmap because they may race with
the garbage collector marking objects. By separating out the mark
bits, the sole writer to any word of the heap bitmap is the P
allocating from that span, so all bitmap writes can be non-atomic.
## Sweep-free allocation
The key idea behind eliminating the sweeper is to use the mark bitmap
directly during allocation to find free objects that can be
reallocated, rather than transforming this bitmap into a free list and
then allocating using the free list. However, in a concurrent garbage
collector some second copy of the heap free set is necessary for the
simple reason that the mutator continues to allocate objects from the
free set at the same time the concurrent mark phase is constructing
the new free set.
In the current design, this second copy is the free list, which is
fully constructed from the mark bits by the time the next mark phase
begins. This requires essentially no space because the free list can
be threaded through the free objects. It also gives the system a
chance to clear all mark bits in preparation for the next GC cycle,
which is expensive in the sparse mark bitmap representation, so it
needs to be done incrementally and simultaneously with sweeping the
marks. The flow of information in the current sweeper is shown in
figure 3.
![](12800/sweep-flow.png)
**Figure 3.** Go 1.5 flow of free object information.
We propose simply using two sets of mark bits. At the end of the mark
phase, the object allocator switches to using the mark bitmap
constructed by the mark phase and the object allocator’s current mark
bitmap is discarded. During the time between mark phases, the runtime
allocates and bulk zeroes the mark bitmap for the next mark phase. The
flow of information about free objects in this design is shown in
figure 4.
![](12800/mark-flow.png)
**Figure 4.** Proposed flow of free object information.
To allocate an object, the object allocator obtains a cached span or
allocates a new span from the span allocator as it does now. The span
allocator works much like it does now, with the exception that where
the span allocator currently sweeps the span, it will now simply reset
its bitmap pointer to the beginning of the span’s bitmap. With a span
in hand, the object allocator scans its bitmap for the next unmarked
object, updates the span’s bitmap pointer, and initializes the object.
If there are no more unmarked objects in the span, the object
allocator acquires another span. Note that this may happen repeatedly
if the allocator obtains spans that are fully marked (in contrast,
this is currently handled by the sweeper, so span allocation will
never return a fully marked span).
Most likely, it makes sense to cache an inverted copy of the current
word of the bitmap in the span. Allocation can then find the next set
bit using processor ctz intrinsics or efficient software ctz and bit
shifts to maintain its position in the word. This also simplifies the
handling of fresh spans that have nil allocation bitmaps.
### Finalizers
One complication of this approach is that sweeping is currently also
responsible for queuing finalizers for unmarked objects. One solution
is to simply check the mark bits of all finalized objects between GC
cycles. This could be done in the same loop that allocates new mark
bits to all spans after mark termination, and would add very little
cost. In order to do this concurrently, if the allocator obtained a
span before the garbage collector was able to check it for finalizers,
the allocator would be responsible for queuing finalizers for objects
on that span.
## Compatibility
This proposal only affects the performance of the runtime. It does not
change any user-facing Go APIs, and hence it satisfies Go 1
compatibility.
## Implementation
This work will be carried out by Austin Clements and Rick Hudson
during the Go 1.6 development cycle.
Figure 5 shows the components of this proposal and the dependencies
between implementing them.
![](12800/plan.png)
**Figure 5.** Implementation dependency diagram.
We will implement dense mark bitmaps first because it should be fairly
easy to update the current sweeper to use the dense bitmaps and this
will enable multiple mark bitmaps. We will then build sweep-free
allocation on top of this. Sweep-free allocation makes it difficult to
detect completely unmarked spans and return them to the span
allocator, so we will most likely want to implement eager freeing of
unmarked spans first, as discussed in issue
[#11979](https://golang.org/issue/11979), though this is not strictly
necessary. At any point after dense mark bitmaps are in place, we can
implement the optimizations to the heap bitmap discussed in "Dense
mark bitmap performance."
## Related work
Dense mark bitmaps have a long history in garbage collectors that use
segregated-fits allocation. The Boehm conservative collector
[Boehm, 1988] used dense mark bitmaps, but stored each span’s bitmap
along with that span’s objects, rather than as part of large,
contiguous allocations. Similarly, Garner [2007] explores a mark-sweep
collector that supports both mark bits in object headers and dense
"side bitmaps." Garner observes the advantages of dense mark bitmaps
for bulk zeroing, and concludes that both approaches have similar
marking performance, which supports our prediction that switching to
dense mark bitmaps will have negligible impact on mark phase
performance.
Traditionally, mark-sweep garbage collectors alternate between marking
and sweeping. However, there have various approaches to enabling
simultaneous mark and sweep in a concurrent garbage collector that
closely resemble our approach of allowing simultaneous mark and
allocation. Lamport [1976] introduced a "purple" extension to the
traditional tri-color abstraction that made it possible for the
sweeper to distinguish objects that were not marked in the previous
mark phase (and hence should be swept) from objects that are not yet
marked in the current mark phase, but may be marked later in the
phase. To reduce the cost of resetting these colors, Lamport’s
algorithm cycles through three different interpretations of the color
encoding. In contrast, our approach adheres to the tri-color
abstraction and simply alternates between two different bitmaps. This
means we have to reset the colors for every mark phase, but we arrange
the bitmap such that this cost is negligible. Queinnec’s "mark during
sweep" algorithm [Queinnec, 1989] alternates between two bitmaps like
our approach. However, unlike our approach, both Queinnec and Lamport
still depend on a sweeper to transform the mark bits into a free list
and to reset the mark bits back to white.
## Possible extensions
### 1-bit heap bitmap
With the mark bits no longer part of the heap bitmap, it’s possible we
could pack the heap bitmap more tightly, which would reduce its memory
footprint, improve cache locality, and may improve the performance of
the heap bitmap unroller (the most expensive step of malloc). One of
the two bits encoded in the heap bitmap for every word is a "dead"
bit, which forms a unary representation of the index of the last
pointer word of the object. Furthermore, it’s always safe to increase
this number (this is how we currently steal a bit for the mark bit).
We could store this information in base 2 in an object-indexed
structure and reduce overhead by only storing it for spans with a
sufficiently large size class (where the dead bit optimization
matters). Alternatively, we could continue storing it in unary, but at
lower fidelity, such as one dead bit per eight heap words.
### Reducing atomic access to mark bitmap
If the cost of atomically setting bits in the mark bitmap turns out to
be high, we could instead dedicate a byte per object for the mark.
This idea is mentioned in GC literature [Garner, 2007]. Obviously,
this involves an 8× increase in memory overhead. It’s likely that on
modern hardware, the cost of the atomic bit operation is small, while
the cost of increasing the cache footprint of the mark structure is
probably large.
Another way to reduce atomic access to the mark bitmap is to keep an
additional mark bitmap per P. When the garbage collector checks if an
object is marked, it first consults the shared bitmap. If it is not
marked there, it updates the shared bitmap by reading the entire word
(or cache line) containing the mark bit from each per-P mark bitmap,
combining these words using bitwise-or, and writing the entire word to
the shared bitmap. It can then re-check the bit. When the garbage
collector marks an object, it simply sets the bit in its per-P bitmap.
## References
Hans-Juergen Boehm and Mark Weiser. 1988. Garbage collection in an
uncooperative environment. Software Practice and Experience 18, 9
(September 1988), 807–820.
Robin Garner, Stephen M. Blackburn, and Daniel Frampton. 2007.
Effective prefetch for mark-sweep garbage collection. In Proceedings
of the 6th international symposium on Memory management (ISMM '07).
ACM, New York, NY, USA, 43–54.
Leslie Lamport. 1976. Garbage collection with multiple processes: An
exercise in parallelism. In International Conference on Parallel
Processing (ICPP). 50–54.
Christian Queinnec, Barbara Beaudoing, and Jean-Pierre Queille. 1989.
Mark DURING Sweep rather than Mark THEN Sweep. In Proceedings of the
Parallel Architectures and Languages Europe, Volume I: Parallel
Architectures (PARLE '89), Eddy Odijk, Martin Rem, and Jean-Claude
Syre (Eds.). Springer-Verlag, London, UK, UK, 224–237.
| proposal/design/12800-sweep-free-alloc.md/0 | {
"file_path": "proposal/design/12800-sweep-free-alloc.md",
"repo_id": "proposal",
"token_count": 4783
} | 661 |
# Proposal: Go Benchmark Data Format
Authors: Russ Cox, Austin Clements
Last updated: February 2016
Discussion at [golang.org/issue/14313](https://golang.org/issue/14313).
## Abstract
We propose to make the current output of `go test -bench` the defined format for recording all Go benchmark data.
Having a defined format allows benchmark measurement programs
and benchmark analysis programs to interoperate while
evolving independently.
## Background
### Benchmark data formats
We are unaware of any standard formats for recording raw benchmark data,
and we've been unable to find any using web searches.
One might expect that a standard benchmark suite such as SPEC CPU2006 would have
defined a format for raw results, but that appears not to be the case.
The [collection of published results](https://www.spec.org/cpu2006/results/)
includes only analyzed data ([example](https://www.spec.org/cpu2006/results/res2011q3/cpu2006-20110620-17230.txt)), not raw data.
Go has a de facto standard format for benchmark data:
the lines generated by the testing package when using `go test -bench`.
For example, running compress/flate's benchmarks produces this output:
BenchmarkDecodeDigitsSpeed1e4-8 100 154125 ns/op 64.88 MB/s 40418 B/op 7 allocs/op
BenchmarkDecodeDigitsSpeed1e5-8 10 1367632 ns/op 73.12 MB/s 41356 B/op 14 allocs/op
BenchmarkDecodeDigitsSpeed1e6-8 1 13879794 ns/op 72.05 MB/s 52056 B/op 94 allocs/op
BenchmarkDecodeDigitsDefault1e4-8 100 147551 ns/op 67.77 MB/s 40418 B/op 8 allocs/op
BenchmarkDecodeDigitsDefault1e5-8 10 1197672 ns/op 83.50 MB/s 41508 B/op 13 allocs/op
BenchmarkDecodeDigitsDefault1e6-8 1 11808775 ns/op 84.68 MB/s 53800 B/op 80 allocs/op
BenchmarkDecodeDigitsCompress1e4-8 100 143348 ns/op 69.76 MB/s 40417 B/op 8 allocs/op
BenchmarkDecodeDigitsCompress1e5-8 10 1185527 ns/op 84.35 MB/s 41508 B/op 13 allocs/op
BenchmarkDecodeDigitsCompress1e6-8 1 11740304 ns/op 85.18 MB/s 53800 B/op 80 allocs/op
BenchmarkDecodeTwainSpeed1e4-8 100 143665 ns/op 69.61 MB/s 40849 B/op 15 allocs/op
BenchmarkDecodeTwainSpeed1e5-8 10 1390359 ns/op 71.92 MB/s 45700 B/op 31 allocs/op
BenchmarkDecodeTwainSpeed1e6-8 1 12128469 ns/op 82.45 MB/s 89336 B/op 221 allocs/op
BenchmarkDecodeTwainDefault1e4-8 100 141916 ns/op 70.46 MB/s 40849 B/op 15 allocs/op
BenchmarkDecodeTwainDefault1e5-8 10 1076669 ns/op 92.88 MB/s 43820 B/op 28 allocs/op
BenchmarkDecodeTwainDefault1e6-8 1 10106485 ns/op 98.95 MB/s 71096 B/op 172 allocs/op
BenchmarkDecodeTwainCompress1e4-8 100 138516 ns/op 72.19 MB/s 40849 B/op 15 allocs/op
BenchmarkDecodeTwainCompress1e5-8 10 1227964 ns/op 81.44 MB/s 43316 B/op 25 allocs/op
BenchmarkDecodeTwainCompress1e6-8 1 10040347 ns/op 99.60 MB/s 72120 B/op 173 allocs/op
BenchmarkEncodeDigitsSpeed1e4-8 30 482808 ns/op 20.71 MB/s
BenchmarkEncodeDigitsSpeed1e5-8 5 2685455 ns/op 37.24 MB/s
BenchmarkEncodeDigitsSpeed1e6-8 1 24966055 ns/op 40.05 MB/s
BenchmarkEncodeDigitsDefault1e4-8 20 655592 ns/op 15.25 MB/s
BenchmarkEncodeDigitsDefault1e5-8 1 13000839 ns/op 7.69 MB/s
BenchmarkEncodeDigitsDefault1e6-8 1 136341747 ns/op 7.33 MB/s
BenchmarkEncodeDigitsCompress1e4-8 20 668083 ns/op 14.97 MB/s
BenchmarkEncodeDigitsCompress1e5-8 1 12301511 ns/op 8.13 MB/s
BenchmarkEncodeDigitsCompress1e6-8 1 137962041 ns/op 7.25 MB/s
The testing package always reports ns/op, and each benchmark can request the addition of MB/s (throughput) and also B/op and allocs/op (allocation rates).
### Benchmark processors
Multiple tools have been written that process this format,
most notably [benchcmp](https://godoc.org/golang.org/x/tools/cmd/benchcmp)
and its more statistically valid successor [benchstat](https://godoc.org/rsc.io/benchstat).
There is also [benchmany](https://godoc.org/github.com/aclements/go-misc/benchmany)'s plot subcommand
and likely more unpublished programs.
### Benchmark runners
Multiple tools have also been written that generate this format.
In addition to the standard Go testing package,
[compilebench](https://godoc.org/rsc.io/compilebench)
generates this data format based on runs of the Go compiler,
and Austin's unpublished shellbench generates this data format
after running an arbitrary shell command.
The [golang.org/x/benchmarks/bench](https://golang.org/x/benchmarks/bench) benchmarks
are notable for _not_ generating this format,
which has made all analysis of those results
more complex than we believe it should be.
We intend to update those benchmarks to generate the standard format,
once a standard format is defined.
Part of the motivation for the proposal is to avoid
the need to process custom output formats in future benchmarks.
## Proposal
A Go benchmark data file is a UTF-8 textual file consisting of a sequence of lines.
Configuration lines and benchmark result lines, described below,
have semantic meaning in the reporting of benchmark results.
All other lines in the data file, including but not limited to
blank lines and lines beginning with a # character, are ignored.
For example, the testing package prints test results above benchmark data,
usually the text `PASS`. That line is neither a configuration line nor a benchmark
result line, so it is ignored.
### Configuration Lines
A configuration line is a key-value pair of the form
key: value
where key begins with a lower case character (as defined by `unicode.IsLower`),
contains no space characters (as defined by `unicode.IsSpace`)
nor upper case characters (as defined by `unicode.IsUpper`),
and one or more ASCII space or tab characters separate “key:” from “value.”
Conventionally, multiword keys are written with the words
separated by hyphens, as in cpu-speed.
There are no restrictions on value, except that it cannot contain a newline character.
Value can be omitted entirely, in which case the colon must still be
present, but need not be followed by a space.
The interpretation of a key/value pair is up to tooling, but the key/value pair
is considered to describe all benchmark results that follow,
until overwritten by a configuration line with the same key.
### Benchmark Results
A benchmark result line has the general form
<name> <iterations> <value> <unit> [<value> <unit>...]
The fields are separated by runs of space characters (as defined by `unicode.IsSpace`),
so the line can be parsed with `strings.Fields`.
The line must have an even number of fields, and at least four.
The first field is the benchmark name, which must begin with `Benchmark`
followed by an upper case character (as defined by `unicode.IsUpper`)
or the end of the field,
as in `BenchmarkReverseString` or just `Benchmark`.
Tools displaying benchmark data conventionally omit the `Benchmark` prefix.
The same benchmark name can appear on multiple result lines,
indicating that the benchmark was run multiple times.
The second field gives the number of iterations run.
For most processing this number can be ignored, although
it may give some indication of the expected accuracy
of the measurements that follow.
The remaining fields report value/unit pairs in which the value
is a float64 that can be parsed by `strconv.ParseFloat`
and the unit explains the value, as in “64.88 MB/s”.
The units reported are typically normalized so that they can be
interpreted without considering to the number of iterations.
In the example, the CPU cost is reported per-operation and the
throughput is reported per-second; neither is a total that
depends on the number of iterations.
### Value Units
A value's unit string is expected to specify not only the measurement unit
but also, as needed, a description of what is being measured.
For example, a benchmark might report its overall execution time
as well as cache miss times with three units “ns/op,” “L1-miss-ns/op,”and “L2-miss-ns/op.”
Tooling can expect that the unit strings are identical for all runs to be compared;
for example, a result reporting “ns/op” need not be considered comparable
to one reporting “µs/op.”
However, tooling may assume that the measurement unit is the final
of the hyphen-separated words in the unit string and may recognize
and rescale known measurement units.
For example, consistently large “ns/op” or “L1-miss-ns/op”
might be rescaled to “ms/op” or “L1-miss-ms/op” for display.
### Benchmark Name Configuration
In the current testing package, benchmark names correspond to Go identifiers:
each benchmark must be written as a different Go function.
[Work targeted for Go 1.7](https://github.com/golang/proposal/blob/master/design/12166-subtests.md) will allow tests and benchmarks
to define sub-tests and sub-benchmarks programatically,
in particular to vary interesting parameters both when
testing and when benchmarking.
That work uses a slash to separate the name of a benchmark
collection from the description of a sub-benchmark.
We propose that sub-benchmarks adopt the convention of
choosing names that are key=value pairs;
that slash-prefixed key=value pairs in the benchmark name are
treated by benchmark data processors as per-benchmark
configuration values.
### Example
The benchmark output given in the background section above
is already in the format proposed here.
That is a key feature of the proposal.
However, a future run of the benchmark might add configuration lines,
and the benchmark might be rewritten to use sub-benchmarks,
producing this output:
commit: 7cd9055
commit-time: 2016-02-11T13:25:45-0500
goos: darwin
goarch: amd64
cpu: Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz
cpu-count: 8
cpu-physical-count: 4
os: Mac OS X 10.11.3
mem: 16 GB
BenchmarkDecode/text=digits/level=speed/size=1e4-8 100 154125 ns/op 64.88 MB/s 40418 B/op 7 allocs/op
BenchmarkDecode/text=digits/level=speed/size=1e5-8 10 1367632 ns/op 73.12 MB/s 41356 B/op 14 allocs/op
BenchmarkDecode/text=digits/level=speed/size=1e6-8 1 13879794 ns/op 72.05 MB/s 52056 B/op 94 allocs/op
BenchmarkDecode/text=digits/level=default/size=1e4-8 100 147551 ns/op 67.77 MB/s 40418 B/op 8 allocs/op
BenchmarkDecode/text=digits/level=default/size=1e5-8 10 1197672 ns/op 83.50 MB/s 41508 B/op 13 allocs/op
BenchmarkDecode/text=digits/level=default/size=1e6-8 1 11808775 ns/op 84.68 MB/s 53800 B/op 80 allocs/op
BenchmarkDecode/text=digits/level=best/size=1e4-8 100 143348 ns/op 69.76 MB/s 40417 B/op 8 allocs/op
BenchmarkDecode/text=digits/level=best/size=1e5-8 10 1185527 ns/op 84.35 MB/s 41508 B/op 13 allocs/op
BenchmarkDecode/text=digits/level=best/size=1e6-8 1 11740304 ns/op 85.18 MB/s 53800 B/op 80 allocs/op
BenchmarkDecode/text=twain/level=speed/size=1e4-8 100 143665 ns/op 69.61 MB/s 40849 B/op 15 allocs/op
BenchmarkDecode/text=twain/level=speed/size=1e5-8 10 1390359 ns/op 71.92 MB/s 45700 B/op 31 allocs/op
BenchmarkDecode/text=twain/level=speed/size=1e6-8 1 12128469 ns/op 82.45 MB/s 89336 B/op 221 allocs/op
BenchmarkDecode/text=twain/level=default/size=1e4-8 100 141916 ns/op 70.46 MB/s 40849 B/op 15 allocs/op
BenchmarkDecode/text=twain/level=default/size=1e5-8 10 1076669 ns/op 92.88 MB/s 43820 B/op 28 allocs/op
BenchmarkDecode/text=twain/level=default/size=1e6-8 1 10106485 ns/op 98.95 MB/s 71096 B/op 172 allocs/op
BenchmarkDecode/text=twain/level=best/size=1e4-8 100 138516 ns/op 72.19 MB/s 40849 B/op 15 allocs/op
BenchmarkDecode/text=twain/level=best/size=1e5-8 10 1227964 ns/op 81.44 MB/s 43316 B/op 25 allocs/op
BenchmarkDecode/text=twain/level=best/size=1e6-8 1 10040347 ns/op 99.60 MB/s 72120 B/op 173 allocs/op
BenchmarkEncode/text=digits/level=speed/size=1e4-8 30 482808 ns/op 20.71 MB/s
BenchmarkEncode/text=digits/level=speed/size=1e5-8 5 2685455 ns/op 37.24 MB/s
BenchmarkEncode/text=digits/level=speed/size=1e6-8 1 24966055 ns/op 40.05 MB/s
BenchmarkEncode/text=digits/level=default/size=1e4-8 20 655592 ns/op 15.25 MB/s
BenchmarkEncode/text=digits/level=default/size=1e5-8 1 13000839 ns/op 7.69 MB/s
BenchmarkEncode/text=digits/level=default/size=1e6-8 1 136341747 ns/op 7.33 MB/s
BenchmarkEncode/text=digits/level=best/size=1e4-8 20 668083 ns/op 14.97 MB/s
BenchmarkEncode/text=digits/level=best/size=1e5-8 1 12301511 ns/op 8.13 MB/s
BenchmarkEncode/text=digits/level=best/size=1e6-8 1 137962041 ns/op 7.25 MB/s
Using sub-benchmarks has benefits beyond this proposal, namely that it would
avoid the current repetitive code:
func BenchmarkDecodeDigitsSpeed1e4(b *testing.B) { benchmarkDecode(b, digits, speed, 1e4) }
func BenchmarkDecodeDigitsSpeed1e5(b *testing.B) { benchmarkDecode(b, digits, speed, 1e5) }
func BenchmarkDecodeDigitsSpeed1e6(b *testing.B) { benchmarkDecode(b, digits, speed, 1e6) }
func BenchmarkDecodeDigitsDefault1e4(b *testing.B) { benchmarkDecode(b, digits, default_, 1e4) }
func BenchmarkDecodeDigitsDefault1e5(b *testing.B) { benchmarkDecode(b, digits, default_, 1e5) }
func BenchmarkDecodeDigitsDefault1e6(b *testing.B) { benchmarkDecode(b, digits, default_, 1e6) }
func BenchmarkDecodeDigitsCompress1e4(b *testing.B) { benchmarkDecode(b, digits, compress, 1e4) }
func BenchmarkDecodeDigitsCompress1e5(b *testing.B) { benchmarkDecode(b, digits, compress, 1e5) }
func BenchmarkDecodeDigitsCompress1e6(b *testing.B) { benchmarkDecode(b, digits, compress, 1e6) }
func BenchmarkDecodeTwainSpeed1e4(b *testing.B) { benchmarkDecode(b, twain, speed, 1e4) }
func BenchmarkDecodeTwainSpeed1e5(b *testing.B) { benchmarkDecode(b, twain, speed, 1e5) }
func BenchmarkDecodeTwainSpeed1e6(b *testing.B) { benchmarkDecode(b, twain, speed, 1e6) }
func BenchmarkDecodeTwainDefault1e4(b *testing.B) { benchmarkDecode(b, twain, default_, 1e4) }
func BenchmarkDecodeTwainDefault1e5(b *testing.B) { benchmarkDecode(b, twain, default_, 1e5) }
func BenchmarkDecodeTwainDefault1e6(b *testing.B) { benchmarkDecode(b, twain, default_, 1e6) }
func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) }
func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) }
func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) }
More importantly for this proposal, using sub-benchmarks also makes the possible
comparison axes clear: digits vs twait, speed vs default vs best, size 1e4 vs 1e5 vs 1e6.
## Rationale
As discussed in the background section,
we have already developed a number of analysis programs
that assume this proposal's format,
as well as a number of programs that generate this format.
Standardizing the format should encourage additional work
on both kinds of programs.
[Issue 12826](https://golang.org/issue/12826) suggests a different approach,
namely the addition of a new `go test` option `-benchformat`, to control
the format of benchmark output. In fact it gives the lack of standardization
as the main justification for a new option:
> Currently `go test -bench .` prints out benchmark results in a
> certain format, but there is no guarantee that this format will not
> change. Thus a tool that parses go test output may break if an
> incompatible change to the output format is made.
Our approach is instead to guarantee that the format will not change,
or rather that it will only change in ways allowed by this design.
An analysis tool that parses the output specified here will not break
in future versions of Go,
and a tool that generates the output specified here will work
with all such analysis tools.
Having one agreed-upon format enables broad interoperation;
the ability for one tool to generate arbitrarily many different formats
does not achieve the same result.
The proposed format also seems to be extensible enough to accommodate
anticipated future work on benchmark reporting.
The main known issue with the current `go test -bench` is that
we'd like to emit finer-grained detail about runs, for linearity testing
and more robust statistics (see [issue 10669](https://golang.org/issue/10669)).
This proposal allows that by simply printing more result lines.
Another known issue is that we may want to add custom outputs
such as garbage collector statistics to certain benchmark runs.
This proposal allows that by adding more value-unit pairs.
## Compatibility
Tools consuming existing benchmark format may need trivial changes
to ignore non-benchmark result lines or to cope with additional value-unit pairs
in benchmark results.
## Implementation
The benchmark format described here is already generated by `go test -bench`
and expected by tools like `benchcmp` and `benchstat`.
The format is trivial to generate, and it is
straightforward but not quite trivial to parse.
We anticipate that the [new x/perf subrepo](https://github.com/golang/go/issues/14304) will include a library for loading
benchmark data from files, although the format is also simple enough that
tools that want a different in-memory representation might reasonably
write separate parsers.
| proposal/design/14313-benchmark-format.md/0 | {
"file_path": "proposal/design/14313-benchmark-format.md",
"repo_id": "proposal",
"token_count": 6436
} | 662 |
# Generalized Types
This is a proposal for adding generics to Go, written by Ian Lance
Taylor in March, 2011.
This proposal will not be adopted.
It is being presented as an example for what a complete generics
proposal must cover.
## Introduction
This document describes a possible implementation of generalized types
in Go.
We introduce a new keyword, `gen`, which declares one or more type
parameters: types that are not known at compile time.
These type parameters may then be used in other declarations,
producing generalized types and functions.
Some goals, borrowed from [Garcia et al](https://web.archive.org/web/20170812055356/http://www.crest.iu.edu/publications/prints/2003/comparing_generic_programming03.pdf):
* Do not require an explicit relationship between a definition of a generalized function and its use. The function should be callable with any type that fits the required form.
* Permit interfaces to express relationships between types of methods, as in a comparison function that takes two parameters of the same unknown type.
* Given a generalized type, make it possible to use related types, such as a slice of that type.
* Do not require explicit instantiation of generalized functions.
* Permit type aliasing of generalized types.
The type parameter introduced by a `gen` declaration is a concept that
exists at compile time.
Any actual value that exists at runtime has a specific concrete type:
an ordinary non-generalized type, or a generalized type that has been
instantiated as a concrete type.
Generalized functions will be compiled to handle values whose types
are supplied at runtime.
This is what changes in the language:
* There is a new syntax for declaring a type parameter (or parameters) for the scope of one or more declarations.
* There is a new syntax for specifying the concrete type(s) to use when using something declared with a type parameter.
* There is a new syntax for converting values of concrete type, and untyped constants, to generalized types. Also values of generalized type are permitted in type assertions.
* Within a function, we define the operations permitted on values with a generalized type.
## Syntax
Any package-scope type or function declaration may be preceded with
the new keyword `gen` followed by a list of type parameter names in
square brackets:
```
gen [T] type Vector []T
```
This defines `T` as a type parameter for the generalized type `Vector`.
The scope of `Vector` is the same as it would be if `gen` did not appear.
A use of a generalized type will normally provide specific types to
use for the type parameters.
This is done using square brackets following the generalized type.
```
type VectorInt Vector[int]
var v1 Vector[int]
var v2 Vector[float32]
gen [T1, T2] type Pair struct { first T1; second T2 }
var v3 Pair[int, string]
```
Type parameters may also be used with functions.
```
gen [T] func SetHead(v Vector[T], e T) T {
v[0] = e
return e
}
```
For convenience, we permit a modified version of the factoring syntax
used with `var`, `type`, and `const` to permit a series of
declarations to share the same type parameters.
```
gen [T1, T2] (
type Pair struct { first T1; second T2 }
func MakePair(first T1, second T2) Pair {
return &Pair{first, second}
}
)
```
References to other names declared within the same gen block do not
have to specify the type parameters.
When the type parameters are omitted, they are assumed to simply be
the parameters declared for the block.
In the above example, `Pair` when used as the result type of
`MakePair` is equivalent to `Pair[T1, T2]`.
As with generalized types, we must specify the types when we refer to
a generalized function (but see the section on type deduction, below).
```
var MakeIntPair = MakePair[int, int]
var IntPairZero = MakeIntPair(0, 0)
```
A generalized type can have methods.
```
gen [T] func (v *Vector[T]) SetHeadMethod(e T) T {
v[0] = e
return e
}
```
Of course a method of a generalized type may itself be a generalized function.
```
gen [T, T2] func (v *Vector[T]) Transform(f func(T) T2) Vector[T2]
```
The `gen` keyword may only be used with a type or function.
It may only appear in package scope, not within a function.
One `gen` keyword may appear within the scope of another.
In that case, any use of the generalized type or function must specify
all the type parameters, starting with the outermost ones.
A different way of writing the last example would be:
```
gen [T] (
type Vector []T
gen [T2] func (v *Vector[T]) Transform(f func(T) T2) Vector[T2]
)
var v Vector[int]
var v2 = v.Transform[int, string](f)
```
Type deduction, described below, would permit omitting the
`[int, string]` in the last line, based on the types of `v` and `f`.
### A note on syntax
While the use of the `gen` keyword fits reasonably well into the
existing Go language, the use of square brackets to denote the
specific types is new to Go.
We have considered a number of different approaches:
* Use angle brackets, as in `Pair<int, string>`. This has the advantage of being familiar to C++ and Java programmers. Unfortunately, it means that `f<T>(true)` can be parsed as either a call to function `f<T>` or a comparison of `f<T` (an expression that tests whether `f` is less than `T`) with `(true)`. While it may be possible to construct complex resolution rules, the Go syntax avoids that sort of ambiguity for good reason.
* Overload the dot operator again, as in `Vector.int` or `Pair.(int, string)`. This becomes confusing when we see `Vector.(int)`, which could be a type assertion.
* We considered using dot but putting the type first, as in `int.Vector` or `(int, string).Pair`. It might be possible to make that work without ambiguity, but putting the types first seems to make the code harder to read.
* An earlier version of this proposal used parentheses for names after types, as in `Vector(int)`. However, that proposal was flawed because there was no way to specify types for generalized functions, and extending the parentheses syntax led to `MakePair(int, string)(1, "")` which seems less than ideal.
* We considered various different characters, such as backslash, dollar sign, at-sign or sharp. The square brackets grouped the parameters nicely and provide an acceptable visual appearance.
## Type Deduction
When calling a function, as opposed to referring to it without calling
it, the type parameters may be omitted in some cases.
A function call may omit the type parameters when every type parameter
is used for a regular parameter, or, in other words, there are no type
parameters that are used only for results.
In that case the compiler will compare the actual type of the
argument (`A`) with the type of the generalized parameter (`P`), examining
the arguments from left to right.
`A` and `P` must be identical.
The first time we see a type parameter in `P`, it will be set to the
appropriate portion of `A`.
If the type parameter appears again, it must be identical to the
actual type at that point.
Note that at compile time the argument type may itself be a
generalized type.
The type deduction algorithm is the same.
A type parameter of `P` may match a type parameter of `A`.
Once this match is made, then every subsequent instance of the `P` type
parameter must match the same `A` type parameter.
When doing type deduction with an untyped numeric constant, the
constant is given the type `int`, `float64`, or `complex128` as usual.
Type deduction does not support passing an untyped `nil` constant;
`nil` may only be used with an explicit type conversion (or, of course,
the type parameters may be written explicitly).
For example, these two variables will have the same type and value.
```
var v1 = MakePair[int, int](0, 0)
var v2 = MakePair(0, 0) // [int, int] deduced.
```
## Constraints
The only things that a generalized function can do with a value of
generalized type are the operations inherent to the type—e.g., the
`Vector` type can be indexed or sliced.
But sometimes we want to be able to say something about the types that
are used as part of a larger type.
Specifically, we want to say that they must implement a particular
interface.
So when listing the identifiers following `gen` we permit an optional
interface type following the name.
```
gen [T Stringer] type PrintableVector []T
```
Now `PrintableVector` may only be used with types that implement the
`Stringer` interface.
The interface may itself be a generalized type.
The scope of each type parameter starts at the `[`, and so we permit
using the type identifier just named with the generalized interface
type.
```
// Types that may be compared with some other type.
gen [T] type Comparer interface {
Compare(T) int // <0, ==0, >0
}
// Vector of elements that may be compared with themselves.
gen [T Comparer[T]] type SortableVector []T
```
## Example
```
package hashmap
gen [Keytype, Valtype] (
type bucket struct {
next *bucket
key Keytype
val Valtype
}
type Hashfn func(Keytype) uint
type Eqfn func(Keytype, Keytype) bool
type Hashmap struct {
hashfn Hashfn
eqfn Eqfn
buckets []bucket
entries int
}
// This function must be called with explicit type parameters, as
// there is no way to deduce the value type. For example,
// h := hashmap.New[int, string](hashfn, eqfn)
func New(hashfn Hashfn, eqfn Eqfn) *Hashmap {
return &Hashmap{hashfn, eqfn, make([]buckets, 16), 0}
}
func (p *Hashmap) Lookup(key Keytype) (val Valtype, found bool) {
h := p.hashfn(key) % len(p.buckets)
for b := p.buckets[h]; b != nil; b = b.next {
if p.eqfn(key, b.key) {
return b.val, true
}
}
return
}
func (p *Hashmap) Insert(key Keytype, val Valtype) (inserted bool) {
// Implementation omitted.
}
) // Ends gen.
package sample
import (
“fmt”
“hashmap”
“os”
)
func hashint(i int) uint {
return uint(i)
}
func eqint(i, j int) bool {
return i == j
}
var v = hashmap.New[int, string](hashint, eqint)
func Add(id int, name string) {
if !v.Insert(id, name) {
fmt.Println(“duplicate id”, id)
os.Exit(1)
}
}
func Find(id int) string {
val, found = v.Lookup(id)
if !found {
fmt.Println(“missing id”, id)
os.Exit(1)
}
}
```
## Language spec changes
This is an outline of the changes required to the language spec.
### Types
A few paragraphs will be added to discuss generalized types.
### Struct types
While a struct may use a type parameter as an anonymous field, within
generalized code only the generalized definition is considered when
resolving field references.
That is, given
```
gen [T] type MyGenStruct struct { T }
type MyRealStruct { i int }
type MyInstStruct MyGenStruct[MyRealStruct]
gen [T] func GetI(p *MyGenStruct[T]) int {
return p.i // INVALID
}
func MyGetI(p *MyInstStruct) int {
return GetI(p)
}
```
the function `GetI` may not refer to the field `i` even though the
field exists when called from `MyGetI`.
(This restriction is fairly obvious if you think about it, but is
explicitly stated for clarity.)
### Type Identity
We define type identity for generalized types.
Two generalized types are identical if they have the same name and the
type parameters are identical.
### Assignability
We define assignability for generalized types.
A value `x` of generalized type `T1` is assignable to a variable of
type `T2` if `T1` and `T2` are identical.
A value `x` of concrete type is never assignable to a variable of
generalized type: a generalized type coercion is required (see below).
Similarly, a value `x` of generalized type is never assignable to a
variable of concrete type: a type assertion is required.
For example (more details given below):
```
gen [T] func Zero() (z T) {
z = 0 // INVALID: concrete to generalized.
z = int(0) // INVALID: concrete to generalized.
z = 0.[T] // Valid: generalized type coercion.
}
gen [T] func ToInt(v T) (r int) {
r = v // INVALID: generalized to concrete
r = int(v) // INVALID: no conversions for gen types
r, ok := v.(int) // Valid: generalized type assertion.
if !ok {
panic(“not int”)
}
}
```
### Declarations and scope
A new section Generalized declarations is added, consisting of a few
paragraphs that describe generalized declarations and the gen syntax.
### Indexes
The new syntax `x[T]` for a generalized type or function is defined,
where `T` is a type and `x` is the name of some type or function
declared within a `gen` scope.
### Type assertions
We define type assertions using generalized types.
Given `x.(T)` where `x` is a value with generalized type and `T` is a
concrete type, the type assertion succeeds if the concrete type of `x`
is identical to `T`, or, if `T` is an interface type, the concrete
type implements the interface `T`.
In other words, pretty much the same as doing a type assertion of a
value of interface type.
If `x` and `T` are both generalized types, we do the same test using
the concrete types of `x` and `T`.
In general these assertions must be checked at runtime.
### Generalized type coercions
We introduce a new syntax for coercing a value of concrete type to a
generalized type.
Where `x` is a value with concrete type and `T` is a generalized type,
the expression `x.[T]` coerces `x` to the generalized type `T`.
The generalized type coercion may succeed or fail, just as with a type
assertion.
However, it is not a pure type assertion, as we permit `x` to be an
untyped constant.
The generalized type coercion succeeds if the concrete type matches
the generalized type, where any parameters of the generalized type
match the appropriate portion of the concrete type.
If the same parameter appears more than once in the generalized type,
it must match identical types in the concrete type.
If the value is an untyped constant, the coercion succeeds if an
assignment of that constant to the concrete type would succeed at
compile time.
### Calls
This section is extended to describe the type deduction algorithm used
to avoid explicit type parameters when possible.
An implicit generalized type conversion is applied to convert the
arguments to the expected generalized type, even though normally
values of concrete type are not assignable to variables of generalized
type.
Type checking ensures that the arguments must be assignable to the
concrete type which is either specified or deduced, and so this
implicit generalized type conversion will always succeed.
When a result parameter has a generalized type, an implicit type
assertion is applied to convert back to the type that the caller
expects, which may be a concrete type.
The type expected by the caller is determined by the type parameters
passed to the function, whether determined via type deduction or not.
This implicit type assertion will always succeed.
For example, in
```
gen [T] func Identity(v T) T { return v }
func Call(i int) { j := Identity(i) }
```
the variable `j` gets the type `int`, and an implicit type assertion
converts the return value of `Identity[int]` to `int`.
### Conversions
Nothing needs to change in this section.
I just want to note explicitly that there are no type conversions for
generalized types other than the standard conversions that apply to
all types.
### Type switches
A type switch may be used on a value of generalized type.
Type switch cases may include generalized types.
The rules are the same as for type assertions.
### For statements
A range clause may be used with a value of generalized type, if the
generalized type is known to be a slice, array, map or channel.
## Implementation
Any actual value in Go will have a concrete type.
The implementation issue that arises is how to compile a function that
has parameters with generalized type.
### Representation
When calling a function that uses type parameters, the type parameters
are passed first, as pointers to a runtime type descriptor.
The type parameters are thus literally additional parameters to the
functions.
### Types
In some cases it will be necessary to create a new type at runtime,
which means creating a new runtime type descriptor.
It will be necessary to ensure that type descriptor comparisons
continue to work correctly.
For example, the hashmap example above will require creating a new
type for each call to `hashmap.New` for the concrete types that are used
in the call.
The reflect package already creates new runtime type descriptors in
the functions `PtrTo`, `ChanOf`, `FuncOf`, etc.
Type reflection on a generalized type will return the appropriate
runtime type descriptor, which may have been newly created.
Calling `Name()` on such a type descriptor will return a name with the
appropriate type parameters: e.g, `“Vector[int]”`.
### Variable declarations
A local variable in a function may be declared with a generalized
type.
In the general case, the size of the variable is unknown, and must be
retrieved from the type descriptor.
Declaring a local variable of unknown size will dynamically allocate
zeroed memory of the appropriate size.
As an optimization the memory may be allocated on the stack when there
is sufficient room.
### Composite literals
A generalized type that is defined to be a struct, array, slice, or
map type may be used to create a composite literal.
The expression has the same generalized type.
The elements of the composite literal must follow the assignability
rules.
### Selectors
When `x` is a value of generalized type that is a struct, `x.f` can
refer to a field of that struct.
Whether `f` is a field of `x` is known at compile time.
The exact offset of the field in the struct value may not be known.
When it is not known, the field offset is retrieved from the type
descriptor at runtime.
Similarly, `x.f` may refer to a method of the type.
In this case the method is always known at compile time.
As noted above under struct types, if a generalized struct type uses a
type parameter as an anonymous field, the compiler does not attempt to
look up a field name in the concrete type of the field at runtime.
### Indexes
A value of a generalized type that is an array, slice or map may be indexed.
Note that when indexing into a map type, the type of the value must be
assignable to the map’s key type;
in practice this means that if the map’s key type is generalized, the
value must itself have the same generalized type.
Indexing into a generalized array or slice may require multiplying by
the element size found in the type descriptor.
Indexing into a generalized map may require a new runtime function.
### Slices
A value of a generalized type that is an array or slice may itself be
sliced.
This operation is essentially the same as a slice of a value of
concrete type.
### Type Assertions
A type assertion generally requires a runtime check, and in the
general case requires comparing two concrete types at runtime, where
one of the types is known to instantiate some generalized type.
The complexity of the runtime check is linear in the number of tokens
in the generalized type, and requires storage space to store type
parameters during the check.
This check could be inlined into the code, or it could use a general
purpose runtime check that compares the concrete type descriptor to a
similar representation of the generalized type.
### Calls
Function calls can require converting normal values to generalized
values.
This operation depends on the representation chosen for the
generalized value.
In the worst case it will be similar to passing a normal value to a
function that takes an interface type.
When calling a function with type parameters, the type parameters will
be passed first, as a pointer to a runtime type descriptor.
Function calls can also require converting generalized return values
to normal values.
This is done via an implicitly inserted type assertion.
Depending on the representation, this may not require any actual code
to be generated.
### Communication operators
We have to implement sending and receiving generalized values for
channels of generalized type.
### Assignments
We have to implement assignment of generalized values.
This will be based on the runtime type descriptor.
### Type switches
We have to implement type switches using generalized types.
This will mostly likely devolve into a series of if statements using
type assertions.
### For statements
We have to implement for statements with range clauses over
generalized types.
This is similar to the indexing and communication operators.
### Select statements
We have to implement select on channels of generalized type.
### Return statements
We have to implement returning a value of generalized type.
### Specialization of functions
This proposal is intended to support compiling a generalized function
into code that operates on generalized values.
In fact, it requires that this work.
```
package p1
gen [T] func Call(f func (T) T, T) T {
return f(T)
}
package p2
func SliceIdentity(a []int) []int {
return a
}
package p3
var v = p1.Call(p2.SliceIdentity, make([]int, 10))
```
Here `Call` has to support calling a generalized function.
There is no straightforward specialization process that can implement
this case.
(It could be done if the full source code of p1 and p2 are available either when compiling p3 or at link time;
that is how C++ does it, but it is not an approach that fits well with Go.)
However, for many cases, this proposal can be implemented using
function specialization.
Whenever the compiler can use type deduction for a function call, and
the types are known concrete types, and the body of the function is
available, the compiler can generate a version of the function
specialized for those types.
This is, therefore, an optional optimization, in effect a form of
cross-package inlining, which costs compilation time but improves
runtime.
## Methods on builtin types
This is an optional addendum to the proposal described above.
The proposal does not provide a convenient way to write a function
that works on any numeric type.
For example, there is no convenient way to write this:
```
gen [T] func SliceAverage(a []T) T {
s := T(0)
for _, v = range a {
s += v
}
return s / len(a)
}
```
It would be nice if that function worked for any numeric function.
However, it is not permitted under the proposal described above,
because of the use of `+=` and `/`.
These operators are not available for every type and therefore are not
available for a generalized type.
This approach does work:
```
gen [T] type Number interface {
Plus(T) T
Divide(T) T
}
gen [T Number[T]] func SliceAverage(a []T) T {
s := 0.[T]
for _, v = range a {
s = s.Plus(v)
}
return s.Divide(len(a))
}
```
However, this requires writing explicit `Plus` and `Divide` methods for
each type you want to use.
These methods are themselves boilerplate:
```
func (i MyNum) Plus(v MyNum) MyNum { return i + v }
func (i MyNum) Divide(v MyNum) MyNum { return i / v }
```
This proposal does not help with this kind of boilerplate function,
because there is no way to use operators with generalized values.
There are a few ways to solve this.
One way that seems to fit well with Go as extended by this proposal is
to declare that for all types that support some language operator, the
type has a corresponding method.
That is, we say that if the type can be used with `+`, the language
defines a method `Plus` (or `Binary+` or whatever) for the type that
implements the operation.
This method can then be picked up by an interface such as the above,
and the standard library can define convenient aggregate interfaces,
such as an interface listing all the methods supported by an integer
type.
Note that it would not help for the standard library to define a
`Plus` method for every integer type, as those methods would not carry
over to user defined types.
## Operator methods
It is of course a smallish step from those language-defined methods to
having operator methods, which would permit writing generalized code
using operators rather than method calls. For the purposes of using
generalized types, however, this is less important than having
language defined methods for operators.
## Summary
This proposal will not be adopted.
It has significant flaws.
The factored `gen` syntax is convenient but looks awkward on the page.
You wind up with a trailing close parenthesis after a set of function
definitions.
Indenting all the function definitions looks silly.
This proposal doesn't let me write a trivial generalized `Max`
function, unless we include operator methods.
Even when we include operator methods, `Max` has to be written in
terms of a `Less` method.
The handling of untyped constants in generalized functions is
extremely awkward.
They must always use a generalized type coercion.
While this proposal is more or less palatable for data structures,
it is much weaker for functions.
You basically can't do anything with a generalized type,
except assign it and call a method on it.
Writing standardized algorithms will require developing a whole
vocabulary of quasi-standard methods.
The proposal doesn't help write functions that work on either `[]byte`
or `string`, unless those types get additional operator methods like
`Index` and `Len`.
Even operator methods don't help with using `range`.
| proposal/design/15292/2011-03-gen.md/0 | {
"file_path": "proposal/design/15292/2011-03-gen.md",
"repo_id": "proposal",
"token_count": 6633
} | 663 |
# Proposal: XML Stream
Author(s): Sam Whited <sam@samwhited.com>
Last updated: 2017-03-09
Discussion at https://golang.org/issue/19480
## Abstract
The `encoding/xml` package contains an API for tokenizing an XML stream, but no
API exists for processing or manipulating the resulting token stream.
This proposal describes such an API.
## Background
The [`encoding/xml`][encoding/xml] package contains APIs for tokenizing an XML
stream and decoding that token stream into native data types.
Once unmarshaled, the data can then be manipulated and transformed.
However, this is not always ideal.
If we cannot change the type we are unmarshaling into and it does not match the
XML format we are attempting to deserialize, eg. if the type is defined in a
separate package or cannot be modified for API compatibility reasons, we may
have to first unmarshal into a type we control, then copy each field over to the
original type; this is cumbersome and verbose.
Unmarshaling into a struct is also lossy.
As stated in the XML package:
> Mapping between XML elements and data structures is inherently flawed:
> an XML element is an order-dependent collection of anonymous values, while a
> data structure is an order-independent collection of named values.
This means that transforming the XML stream itself cannot necessarily be
accomplished by deserializing into a struct and then reserializing the struct
back to XML; instead it requires manipulating the XML tokens directly.
This may require re-implementing parts of the XML package, for instance, when
renaming an element the start and end tags would have to be matched in user code
so that they can both be transformed to the new name.
To address these issues, an API for manipulating the token stream itself, before
marshaling or unmarshaling occurs, is necessary.
Ideally, such an API should allow for the composition of complex XML
transformations from simple, well understood building blocks.
The transducer pattern, widely available in functional languages, matches these
requirements perfectly.
Transducers (also called, transformers, adapters, etc.) are iterators that
provide a set of operations for manipulating the data being iterated over.
Common transducer operations include Map, Reduce, Filter, etc. and these
operations are are already widely known and understood.
## Proposal
The proposed API introduces two concepts that do not already exist in the
`encoding/xml` package:
```go
// A Tokenizer is anything that can decode a stream of XML tokens, including an
// xml.Decoder.
type Tokenizer interface {
Token() (xml.Token, error)
Skip() error
}
// A Transformer is a function that takes a Tokenizer and returns a new
// Tokenizer which outputs a transformed token stream.
type Transformer func(src Tokenizer) Tokenizer
```
Common transducer operations will also be included:
```go
// Inspect performs an operation for each token in the stream without
// transforming the stream in any way.
// It is often injected into the middle of a transformer pipeline for debugging.
func Inspect(f func(t xml.Token)) Transformer {}
// Map transforms the tokens in the input using the given mapping function.
func Map(mapping func(t xml.Token) xml.Token) Transformer {}
// Remove returns a Transformer that removes tokens for which f matches.
func Remove(f func(t xml.Token) bool) Transformer {}
```
Because Go does not provide a generic iterator concept, this (and all
transducers in the Go libraries) are domain specific, meaning operations that
only make sense when discussing XML tokens can also be included:
```go
// RemoveElement returns a Transformer that removes entire elements (and their
// children) if f matches the elements start token.
func RemoveElement(f func(start xml.StartElement) bool) Transformer {}
```
## Rationale
Transducers are commonly used in functional programming and in languages that
take inspiration from functional programming languages, including Go.
Examples include [Clojure transducers][clojure/transducer], [Rust
adapters][rust/adapter], and the various "Transformer" types used throughout Go,
such as in the [`golang.org/x/text/transform`][transform] package.
Because transducers are so widely used (and already used elsewhere in Go), they
are well understood.
## Compatibility
This proposal introduces two new exported types and 4 exported functions that
would be covered by the compatibility promise.
A minimal set of Transformers is proposed, but others can be added at a later
date without breaking backwards compatibility.
## Implementation
A version of this API is already implemented in the
[`mellium.im/xmlstream`][xmlstream] package.
If this proposal is accepted, the author volunteers to copy the relevant parts
to the correct location before the 1.9 (or 1.10, depending on the length of this
proposal process) planning cycle closes.
## Open issues
- Where does this API live?
It could live in the `encoding/xml` package itself, in another package (eg.
`encoding/xml/stream`) or, temporarily or permanently, in the subrepos:
`golang.org/x/xml/stream`.
- A Transformer for removing attributes from `xml.StartElement`'s was originally
proposed as part of this API, but its implementation is more difficult to do
efficiently since each use of `RemoveAttr` in a pipeline would need to iterate
over the `xml.Attr` slice separately.
- Existing APIs in the XML package such as `DecodeElement` require an
`xml.Decoder` to function and could not be used with the Tokenizer interface
used in this package.
A compatibility API may be needed to create a new Decoder with an underlying
tokenizer.
This would require that the new functionality reside in the `encoding/xml`
package.
Alternatively, general Decoder methods could be reimplemented in a new package
with the Tokenizer API.
[encoding/xml]: https://golang.org/pkg/encoding/xml/
[clojure/transducer]: https://clojure.org/reference/transducers
[rust/adapter]: https://doc.rust-lang.org/std/iter/#adapters
[transform]: https://godoc.org/golang.org/x/text/transform
[xmlstream]: https://godoc.org/mellium.im/xmlstream
| proposal/design/19480-xml-stream.md/0 | {
"file_path": "proposal/design/19480-xml-stream.md",
"repo_id": "proposal",
"token_count": 1524
} | 664 |
# Proposal: Raw XML Token
Author(s): Sam Whited <sam@samwhited.com>
Last updated: 2018-09-01
Discussion at https://golang.org/issue/26756
CL at https://golang.org/cl/127435
## Abstract
This proposal defines a mechanism by which users can emulate the `,innerxml`
struct tag using XML tokens.
## Background
When using the `"*Encoder".EncodeToken` API to write tokens to an XML stream,
it is currently not possible to fully emulate the behavior of `Marshal`.
Specifically, there is no functionality that lets users output XML equivalent to
the `,innerxml` struct tag which inserts raw, unescaped, XML into the output.
For example, consider the following:
e := xml.NewEncoder(os.Stdout)
e.Encode(struct {
XMLName xml.Name `xml:"raw"`
Inner string `xml:",innerxml"`
}{
Inner: `<test:test xmlns:test="urn:example:golang"/>`,
})
// Output: <raw><test:test xmlns:test="urn:example:golang"/></raw>
This cannot be done with the token based output because all token types are
currently escaped.
For example, attempting to output the raw XML as character data results in the
following:
e.EncodeToken(xml.CharData(rawOut))
e.Flush()
// <test:test xmlns:test="urn:example:golang">
## Proposal
The proposed API introduces an XML pseudo-token: `RawXML`.
```go
// RawXML represents some data that should be passed through without escaping.
// Like a struct field with the ",innerxml" tag, RawXML is written to the
// stream verbatim and is not subject to the usual escaping rules.
type RawXML []byte
// Copy creates a new copy of RawXML.
func (r RawXML) Copy() RawXML { … }
```
## Rationale
When attempting to match the output of legacy XML encoders which may produce
broken escaping, or match the output of XML encoders that support features that
are not currently supported by the [`encoding/xml`] package such as namespace
prefixes it is often desirable to use `,rawxml`.
However, if the user is primarily using the token stream API, it may not be
desirable to switch between encoding tokens and encoding native structures which
is cumbersome and forces a call to `Flush`.
Being able to generate the same output from both the SAX-like and DOM-like APIs
would also allow future proposals the option of fully unifying the two APIs by
creating an encoder equivalent to the `NewTokenDecoder` function.
## Compatibility
This proposal introduces one new exported type that would be covered by the
compatibility promise.
## Implementation
Implementation of this proposal is trivial, comprising some 5 lines of code
(excluding tests and comments).
[CL 127435] has been created to demonstrate the concept.
## Open issues
None.
[`encoding/xml`]: https://golang.org/pkg/encoding/xml/
[CL 127435]: https://golang.org/cl/127435
| proposal/design/26756-rawxml-token.md/0 | {
"file_path": "proposal/design/26756-rawxml-token.md",
"repo_id": "proposal",
"token_count": 835
} | 665 |
# Go 1.2 Field Selectors and Nil Checks
Author: Russ Cox
Last updated: July 2013
Discussion at https://go.dev/issue/4238.
Originally at https://go.dev/s/go12nil.
Implemented in Go 1.2 release.
## Abstract
For Go 1.2, we need to define that, if `x` is a pointer to a struct
type and `x == nil`, `&x.Field` causes a runtime panic rather than
silently producing an unusable pointer.
## Background
Today, if you have:
```Go
package main
type T struct {
Field1 int32
Field2 int32
}
type T2 struct {
X [1<<24]byte
Field int32
}
func main() {
var x *T
p1 := &x.Field1
p2 := &x.Field2
var x2 *T2
p3 := &x2.Field
}
```
then:
* `p1 == nil`; dereferencing it causes a panic
* `p2 != nil` (it has pointer value 4); but dereferencing it still
causes a panic
* p3 is not computed: `&x2.Field` panics to avoid producing a pointer
that might point into mapped memory.
The spec does not define what should happen when `&x.Field` is evaluated
for `x == nil`.
The answer probably should not depend on `Field`’s offset within the
struct.
The current behavior is at best merely historical accident; it was
definitely not thought through or discussed.
Those three behaviors are three possible definitions.
The behavior for `p2` is clearly undesirable, since it creates
unusable pointers that cannot be detected as unusable.
hat leaves `p1` (`&x.Field` is `nil` if `x` is `nil`) and `p3`
(`&x.Field` panics if `x` is `nil`).
An analogous form of the question concerns `&x[i]` where `x` is a
`nil` pointer to an array.
he current behaviors match those of the struct exactly, depending in
the same way on both the offset of the field and the overall size of
the array.
A related question is how `&*x` should evaluate when `x` is `nil`.
In C, `&*x == x` even when `x` is `nil`.
The spec again is silent.
The gc compilers go out of their way to implement the C rule (it
seemed like a good idea at a time).
A simplified version of a recent example is:
```Go
type T struct {
f int64
sync.Mutex
}
var x *T
x.Lock()
```
The method call turns into `(&x.Mutex).Lock()`, which today is passed
a receiver with pointer value `8` and panics inside the method,
accessing a `sync.Mutex` field.
## Proposed Definition
If `x` is a `nil` pointer to a struct, then evaluating `&x.Field`
always panics.
If `x` is a `nil` pointer to an array, then evaluating `&x[i]` panics
or `x[i:j]` panics.
If `x` is a `nil` pointer, then evaluating `&*x` panics.
In general, the result of an evaluation of `&expr` either panics or
returns a non-nil pointer.
## Rationale
The alternative, defining `&x.Field == nil` when `x` is `nil`, delays
the error check.
That feels more like something that belongs in a dynamically typed
language like Python or JavaScript than in Go.
Put another way, it pushes the panic farther away from the problem.
We have not seen a compelling use case for allowing `&x.Field == nil`.
Panicking during `&x.Field` is no more expensive (perhaps less) than
defining `&x.Field == nil`.
It is difficult to justify allowing `&*x` but not `&x.Field`.
They are different expressions of the same computation.
The guarantee that `&expr`—when it evaluates successfully—is always a
non-nil pointer makes intuitive sense and avoids a surprise: how can
you take the address of something and get `nil`?
## Implementation
The addressable expressions are: “a variable, pointer indirection, or
slice indexing operation; or a field selector of an addressable struct
operand; or an array indexing operation of an addressable array.”
The address of a variable can never be `nil`; the address of a slice
indexing operation is already checked because a `nil` slice will have
`0` length, so any index is invalid.
That leaves pointer indirections, field selector of struct, and index
of array, confirming at least that we’re considering the complete set
of cases.
Assuming `x` is in register AX, the current x86 implementation of case
`p3` is to read from the memory `x` points at:
```
TEST 0(AX), AX
```
That causes a fault when `x` is nil.
Unfortunately, it also causes a read from the memory location `x`,
even if the actual field being addressed is later in memory.
This can cause unnecessary cache conflicts if different goroutines own
different sections of a large array and one is writing to the first
entry.
(It is tempting to use a conditional move instruction:
```
TEST AX, AX
CMOVZ 0, AX
```
Unfortunately, the definition of the conditional move is that the load
is unconditional and only the assignment is conditional, so the fault
at address `0` would happen always.)
An alternate implementation would be to test `x` itself and use a
conditional jump:
```
TEST AX, AX
JNZ ok (branch hint: likely)
MOV $0, 0
ok:
```
This is more code (something like 7 bytes instead of 3) but may run
more efficiently, as it avoids spurious memory references and will be
predicted easily.
(Note that defining `&x.Field == nil` would require at least that much
code, if not a little more, except when the offset is `0`.)
It will probably be important to have a basic flow analysis for
variables, so that the compiler can avoid re-testing the same pointer
over and over in a given function.
I started on that general topic a year ago and got a prototype working
but then put it aside (the goal then was index bounds check
elimination).
It could be adapted easily for nil check elimination.
| proposal/design/4238-go12nil.md/0 | {
"file_path": "proposal/design/4238-go12nil.md",
"repo_id": "proposal",
"token_count": 1698
} | 666 |
# User-configurable memory target
Author: Michael Knyszek
Updated: 16 February 2021
## Background
Issue [#23044](https://golang.org/issue/23044) proposed the addition of some
kind of API to provide a "minimum heap" size; that is, the minimum heap goal
that the GC would ever set.
The purpose of a minimum heap size, as explored in that proposal, is as a
performance optimization: by preventing the heap from shrinking, each GC cycle
will get longer as the live heap shrinks further beyond the minimum.
While `GOGC` already provides a way for Go users to trade off GC CPU time and
heap memory use, the argument against setting `GOGC` higher is that a live heap
spike is potentially dangerous, since the Go GC will use proportionally more
memory with a high proportional constant.
Instead, users (including a [high-profile account by
Twitch](https://blog.twitch.tv/en/2019/04/10/go-memory-ballast-how-i-learnt-to-stop-worrying-and-love-the-heap-26c2462549a2/)
have resorted to using a heap ballast: a large memory allocation that the Go GC
includes in its live heap size, but does not actually take up any resident
pages, according to the OS.
This technique thus effectively sets a minimum heap size in the runtime.
The main disadvantage of this technique is portability.
It relies on implementation-specific behavior, namely that the runtime will not
touch that new allocation, thereby preventing the OS from backing that space
with RAM on Unix-like systems.
It also relies on the Go GC never scanning that allocation.
This technique is also platform-specific, because on Windows such an allocation
would always count as committed.
Today, the Go GC already has a fixed minimum heap size of 4 MiB.
The reasons around this minimum heap size stem largely from a failure to account
for alternative GC work sources.
See [the GC pacer problems meta-issue](https://golang.org/issue/42430) for more
details.
The problems are resolved by a [proposed GC pacer
redesign](https://golang.org/issue/44167).
## Design
I propose the addition of the following API to the `runtime/debug` package:
```go
// SetMemoryTarget provides a hint to the runtime that it can use at least
// amount bytes of memory. amount is the sum total of in-ue Go-related memory
// that the Go runtime can measure.
//
// That explictly includes:
// - Space and fragmentation for goroutine stacks.
// - Space for runtime structures.
// - The size of the heap, with fragmentation.
// - Space for global variables (including dynamically-loaded plugins).
//
// And it explicitly excludes:
// - Read-only parts of the Go program, such as machine instructions.
// - Any non-Go memory present in the process, such as from C or another
// language runtime.
// - Memory required to maintain OS kernel resources that this process has a
// handle to.
// - Memory allocated via low-level functions in the syscall package, like Mmap.
//
// The intuition and goal with this definition is the ability to treat the Go
// part of any system as a black box: runtime overheads and fragmentation that
// are otherwise difficult to account for are explicitly included.
// Anything that is difficult or impossible for the runtime to measure portably
// is excluded. For these cases, the user is encouraged to monitor these
// sources for their particular system and update the memory target as
// necessary.
//
// The runtime is free to ignore the hint at any time.
//
// In practice, the runtime will use this hint to run the garbage collector
// less frequently by using up any additional memory up-front. Any memory used
// beyond that will obey the GOGC trade-off.
//
// If the GOGC mechanism is turned off, the hint is always ignored.
//
// Note that if the memory target is set higher than the amount of memory
// available on the system, the Go runtime may attempt to use all that memory,
// and trigger an out-of-memory condition.
//
// An amount of 0 will retract the hint. A negative amount will always be
// ignored.
//
// Returns the old memory target, or -1 if none was previously set.
func SetMemoryTarget(amount int) int
```
The design of this feature builds off of the [proposed GC pacer
redesign](https://golang.org/issue/44167).
I propose we move forward with almost exactly what issue
[#23044](https://golang.org/issue/23044) proposed, namely exposing the heap
minimum and making it configurable via a runtime API.
The behavior of `SetMemoryTarget` is thus analogous to the common (but
non-standard) Java runtime flag `-Xms` (with Adaptive Size Policy disabled).
With the GC pacer redesign, smooth behavior here should be straightforward to
ensure, as the troubles here basically boil down to the "high `GOGC`" issue
mentioned in that design.
There's one missing piece and that's how to turn the hint (which is memory use)
into a heap goal.
Because the heap goal includes both stacks and globals, I propose that we
compute the heap goal as follows:
```
Heap goal = amount
// These are runtime overheads.
- MemStats.GCSys
- Memstats.MSpanSys
- MemStats.MCacheSys
- MemStats.BuckHashSys
- MemStats.OtherSys
- MemStats.StackSys
// Fragmentation.
- (MemStats.HeapSys-MemStats.HeapInuse)
- (MemStats.StackInuse-(unused portions of stack spans))
```
What this formula leaves us with is a value that should include:
1. Stack space that is actually allocated for goroutine stacks,
1. Global variables (so the part of the binary mapped read-write), and
1. Heap space allocated for objects.
These are the three factors that go into determining the `GOGC`-based heap goal
according to the GC pacer redesign.
Note that while at first it might appear that this definition of the heap goal
will cause significant jitter in what the heap goal is actually set to, runtime
overheads and fragmentation tend to be remarkably stable over the lifetime of a
Go process.
In an ideal world, that would be it, but as the API documentation points out,
there are a number of sources of memory that are unaccounted for that deserve
more explanation.
Firstly, there's the read-only parts of the binary, like the instructions
themselves, but these parts' impact on memory use are murkier since the
operating system tends to de-duplicate this memory between processes.
Furthermore, on platforms like Linux, this memory is always evictable, down to
the last available page.
As a result, I intentionally ignore that factor here.
If the size of the binary is a factor, unfortunately it will be up to the user
to subtract out that size from the amount they pass to `SetMemoryTarget`.
The source of memory is anything non-Go, such as C code (or, say a Python VM)
running in the same process.
These sources also need to be accounted for by the user because this could be
absolutely anything, and portably interacting with the large number of different
possibilities is infeasible.
Luckily, `SetMemoryTarget` is a run-time API that can be made to respond to
changes in external memory sources that Go could not possibly be aware of, so
API recommends updating the target on-line if need be.
Another source of memory use is kernel memory.
If the Go process holds onto kernel resources that use memory within the kernel
itself, those are unaccounted for.
Unfortunately, while this API tries to avoid situations where the user needs to
make conservative estimates, this is one such case.
As far as I know, most systems do not associate kernel memory with a process, so
querying and reacting to this information is just impossible.
The final source of memory is memory that's created by the Go program, but that
the runtime isn't necessarily aware of, like explicitly `Mmap`'d memory.
Theoretically the Go runtime could be aware of this specific case, but this is
tricky to account for in general given the wide range of options that can be
passed to `mmap`-like functionality on various platforms.
Sometimes it's worth accounting for it, sometimes not.
I believe it's best to leave that up to the user.
To validate the design, I ran several [simulations](#simulations) of this
implementation.
In general, the runtime is resilient to a changing heap target (even one that
changes wildly) but shrinking the heap target significantly has the potential to
cause GC CPU utilization spikes.
This is by design: the runtime suddenly has much less runway than it thought
before the change, so it needs to make that up to reach its goal.
The only issue I found with this formulation is the potential for consistent
undershoot in the case where the heap size is very small, mostly because we
place a limit on how late a GC cycle can start.
I think this is OK, and I don't think we should alter our current setting.
This choice means that in extreme cases, there may be some missed performance.
But I don't think it's enough to justify the additional complexity.
### Simulations
These simulations were produced by the same tool as those for the [GC pacer
redesign](https://github.com/golang/go/issues/44167).
That is,
[github.com/mknyszek/pacer-model](https://github.com/mknyszek/pacer-model).
See the GC pacer design document for a list of caveats and assumptions, as well
as a description of each subplot, though the plots are mostly straightforward.
**Small heap target.**
In this scenario, we set a fairly small target (around 64 MiB) as a baseline.
This target is fairly close to what `GOGC` would have picked.
Mid-way through the scenario, the live heap grows a bit.
![](44309/low-heap-target.png)
Notes:
- There's a small amount of overshoot when the live heap size changes, which is
expected.
- The pacer is otherwise resilient to changes in the live heap size.
**Very small heap target.**
In this scenario, we set a fairly small target (around 64 MiB) as a baseline.
This target is much smaller than what `GOGC` would have picked, since the live
heap grows to around 5 GiB.
![](44309/very-low-heap-target.png)
Notes:
- `GOGC` takes over very quickly.
**Large heap target.**
In this scenario, we set a fairly large target (around 2 GiB).
This target is fairly far from what `GOGC` would have picked.
Mid-way through the scenario, the live heap grows a lot.
![](44309/high-heap-target.png)
Notes:
- There's a medium amount of overshoot when the live heap size changes, which is
expected.
- The pacer is otherwise resilient to changes in the live heap size.
**Exceed heap target.**
In this scenario, we set a fairly small target (around 64 MiB) as a baseline.
This target is fairly close to what `GOGC` would have picked.
Mid-way through the scenario, the live heap grows enough such that we exit the
memory target regime and enter the `GOGC` regime.
![](44309/exceed-heap-target.png)
Notes:
- There's a small amount of overshoot when the live heap size changes, which is
expected.
- The pacer is otherwise resilient to changes in the live heap size.
- The pacer smoothly transitions between regimes.
**Exceed heap target with a high GOGC.**
In this scenario, we set a fairly small target (around 64 MiB) as a baseline.
This target is fairly close to what `GOGC` would have picked.
Mid-way through the scenario, the live heap grows enough such that we exit the
memory target regime and enter the `GOGC` regime.
The `GOGC` value is set very high.
![](44309/exceed-heap-target-high-GOGC.png)
Notes:
- There's a small amount of overshoot when the live heap size changes, which is
expected.
- The pacer is otherwise resilient to changes in the live heap size.
- The pacer smoothly transitions between regimes.
**Change in heap target.**
In this scenario, the heap target is set mid-way through execution, to around
256 MiB.
This target is fairly far from what `GOGC` would have picked.
The live heap stays constant, meanwhile.
![](44309/step-heap-target.png)
Notes:
- The pacer is otherwise resilient to changes in the heap target.
- There's no overshoot.
**Noisy heap target.**
In this scenario, the heap target is set once per GC and is somewhat noisy.
It swings at most 3% around 2 GiB.
This target is fairly far from what `GOGC` would have picked.
Mid-way through the live heap increases.
![](44309/low-noise-heap-target.png)
Notes:
- The pacer is otherwise resilient to a noisy heap target.
- There's expected overshoot when the live heap size changes.
- GC CPU utilization bounces around slightly.
**Very noisy heap target.**
In this scenario, the heap target is set once per GC and is very noisy.
It swings at most 50% around 2 GiB.
This target is fairly far from what `GOGC` would have picked.
Mid-way through the live heap increases.
![](44309/high-noise-heap-target.png)
Notes:
- The pacer is otherwise resilient to a noisy heap target.
- There's expected overshoot when the live heap size changes.
- GC CPU utilization bounces around, but not much.
**Large heap target with a change in allocation rate.**
In this scenario, we set a fairly large target (around 2 GiB).
This target is fairly far from what `GOGC` would have picked.
Mid-way through the simulation, the application begins to suddenly allocate much
more aggressively.
![](44309/heavy-step-alloc-high-heap-target.png)
Notes:
- The pacer is otherwise resilient to changes in the live heap size.
- There's no overshoot.
- There's a spike in utilization that's consistent with other simulations of the
GC pacer.
- The live heap grows due to floating garbage from the high allocation rate
causing each GC cycle to start earlier.
### Interactions with other GC mechanisms
Although listed already in the API documentation, there are a few additional
details I want to consider.
#### GOGC
The design of the new pacer means that switching between the "memory target"
regime and the `GOGC` regime (the regimes being defined as the mechanism that
determines the heap goal) is very smooth.
While the live heap times `1+GOGC/100` is less than the heap goal set by the
memory target, we are in the memory target regime.
Otherwise, we are in the `GOGC` regime.
Notice that as `GOGC` rises to higher and higher values, the range of the memory
target regime shrinks.
At infinity, meaning `GOGC=off`, the memory target regime no longer exists.
Therefore, it's very clear to me that the memory target should be completely
ignored if `GOGC` is set to "off" or a negative value.
#### Memory limit
If we choose to also adopt an API for setting a memory limit in the runtime, it
would necessarily always need to override a memory target, though both could
plausibly be active simultaneously.
If that memory limit interacts with `GOGC` being set to "off," then the rule of
the memory target being ignored holds; the memory limit effectively acts like a
target in that circumstance.
If the two are set to an equal value, that behavior is virtually identical to
`GOGC` being set to "off" and *only* a memory limit being set.
Therefore, we need only check that these two cases behave identically.
Note however that otherwise that the memory target and the memory limit define
different regimes, so they're otherwise orthogonal.
While there's a fairly large gap between the two (relative to `GOGC`), the two
are easy to separate.
Where it gets tricky is when they're relatively close, and this case would need
to be tested extensively.
## Risks
The primary risk with this proposal is adding another "knob" to the garbage
collector, with `GOGC` famously being the only one.
Lots of language runtimes provide flags and options that alter the behavior of
the garbage collector, but when the number of flags gets large, maintaining
every possible configuration becomes a daunting, if not impossible task, because
the space of possible configurations explodes with each knob.
This risk is a strong reason to be judicious.
The bar for new knobs is high.
But there are a few good reasons why this might still be important.
The truth is, this API already exists, but is considered unsupported and is
otherwise unmaintained.
The API exists in the form of heap ballasts, a concept we can thank Hyrum's Law
for.
It's already possible for an application to "fool" the garbage collector into
thinking there's more live memory than there actually is.
The downside is resizing the ballast is never going to be nearly as reactive as
the garbage collector itself, because it is at the mercy of the of the runtime
managing the user application.
The simple fact is performance-sensitive Go users are going to write this code
anyway.
It is worth noting that unlike a memory maximum, for instance, a memory target
is purely an optimization.
On the whole, I suspect it's better for the Go ecosystem for there to be a
single solution to this problem in the standard library, rather than solutions
that *by construction* will never be as good.
And I believe we can mitigate some of the risks with "knob explosion."
The memory target, as defined above, has very carefully specified and limited
interactions with other (potential) GC knobs.
Going forward I believe a good criterion for the addition of new knobs should be
that a knob should only be added if it is *only* fully orthogonal with `GOGC`,
and nothing else.
## Monitoring
I propose adding a new metric to the `runtime/metrics` package to enable
monitoring of the memory target, since that is a new value that could change at
runtime.
I propose the metric name `/memory/config/target:bytes` for this purpose.
Otherwise, it could be useful for an operator to understand which regime the Go
application is operating in at any given time.
We currently expose the `/gc/heap/goal:bytes` metric which could theoretically
be used to determine this, but because of the dynamic nature of the heap goal in
this regime, it won't be clear which regime the application is in at-a-glance.
Therefore, I propose adding another metric `/memory/goal:bytes`.
This metric is analagous to `/gc/heap/goal:bytes` but is directly comparable
with `/memory/config/target:bytes` (that is, it includes additional overheads
beyond just what goes into the heap goal, it "converts back").
When this metric "bottoms out" at a flat line, that should serve as a clear
indicator that the pacer is in the "target" regime.
This same metric could be reused for a memory limit in the future, where it will
"top out" at the limit.
## Documentation
This API has an inherent complexity as it directly influences the behavior of
the Go garbage collector.
It also deals with memory accounting, a process that is infamously (and
unfortunately) difficult to wrap one's head around and get right.
Effective of use of this API will come down to having good documentation.
The documentation will have two target audiences: software developers, and
systems administrators (referred to as "developers" and "operators,"
respectively).
For both audiences, it's incredibly important to understand exactly what's
included and excluded in the memory target.
That is why it is explicitly broken down in the most visible possible place for
a developer: the documentation for the API itself.
For the operator, the `runtime/metrics` metric definition should either
duplicate this documentation, or point to the API.
This documentation is important for immediate use and understanding, but API
documentation is never going to be expressive enough.
I propose also introducing a new document to the `doc` directory in the Go
source tree that explains common use-cases, extreme scenarios, and what to
expect in monitoring in these various situations.
This document should include a list of known bugs and how they might appear in
monitoring.
In other words, it should include a more formal and living version of the [GC
pacer meta-issue](https://golang.org/issues/42430).
The hard truth is that memory accounting and GC behavior are always going to
fall short in some cases, and it's immensely useful to be honest and up-front
about those cases where they're known, while always striving to do better.
As every other document in this directory, it would be a living document that
will grow as new scenarios are discovered, bugs are fixed, and new functionality
is made available.
## Alternatives considered
Since this is a performance optimization, it's possible to do nothing.
But as I mentioned in [the risks section](#risks), I think there's a solid
justification for doing *something*.
Another alternative I considered was to provide better hooks into the runtime to
allow users to implement equivalent functionality themselves.
Today, we provide `debug.SetGCPercent` as well as access to a number of runtime
statistics.
Thanks to work done for the `runtime/metrics` package, that information is now
much more efficiently accessible.
By exposing just the right metric, one could imagine a background goroutine that
calls `debug.SetGCPercent` in response to polling for metrics.
The reason why I ended up discarding this alternative, however, is this then
forces the user writing the code that relies on the implementation details of
garbage collector.
For instance, a reasonable implementation of a memory target using the above
mechanism would be to make an adjustment each time the heap goal changes.
What if future GC implementations don't have a heap goal? Furthermore, the heap
goal needs to be sampled; what if GCs are occurring rapidly? Should the runtime
expose when a GC ends? What if the new GC design is fully incremental, and there
is no well-defined notion of "GC end"? It suffices to say that in order to keep
Go implementations open to new possibilities, we should avoid any behavior that
exposes implementation details.
## Go 1 backwards compatibility
This change only adds to the Go standard library's API surface, and is therefore
Go 1 backwards compatible.
## Implementation
Michael Knyszek will implement this.
1. Implement in the runtime.
1. Extend the pacer simulation test suite with this use-case in a variety of
configurations.
| proposal/design/44309-user-configurable-memory-target.md/0 | {
"file_path": "proposal/design/44309-user-configurable-memory-target.md",
"repo_id": "proposal",
"token_count": 5442
} | 667 |
# Proposal: Extended backwards compatibility for Go
Russ Cox \
December 2022
Earlier discussion at https://go.dev/issue/55090.
Proposal at https://go.dev/issue/56986.
## Abstract
Go's emphasis on backwards compatibility is one of its key strengths.
There are, however, times when we cannot maintain strict compatibility,
such as when changing sort algorithms or fixing clear bugs,
when existing code depends on the old algorithm or the buggy behavior.
This proposal aims to address many such situations by keeping older Go programs
executing the same way even when built with newer Go distributions.
## Background
This proposal is about backward compatibility, meaning
**new versions of Go compiling older Go code**.
Old versions of Go compiling newer Go code is a separate problem,
with a different solution.
There is not a proposal yet.
For now, see
[the discussion about forward compatibility](https://github.com/golang/go/discussions/55092).
Go 1 introduced Go's [compatibility promise](https://go.dev/doc/go1compat),
which says that old programs will by and large continue to run correctly in new versions of Go.
There is an exception for security problems and certain other implementation overfitting.
For example, code that depends on a given type _not_ implementing a particular interface
may change behavior when the type adds a new method, which we are allowed to do.
We now have about ten years of experience with Go 1 compatibility.
In general it works very well for the Go team and for developers.
However, there are also practices we've developed since then
that it doesn't capture (specifically GODEBUG settings),
and there are still times when developers' programs break.
I think it is worth extending our approach to try to break programs even less often,
as well as to explicitly codify GODEBUG settings
and clarify when they are and are not appropriate.
As background, I've been talking to the Kubernetes team
about their experiences with Go.
It turns out that Go's been averaging about one Kubernetes-breaking
change per year for the past few years.
I don't think Kubernetes is an outlier here:
I expect most large projects have similar experiences.
Once per year is not high, but it's not zero either,
and our goal with Go 1 compatibility is zero.
Here are some examples of Kubernetes-breaking changes that we've made:
- [Go 1.17 changed net.ParseIP](https://go.dev/doc/go1.17#net)
to reject addresses with leading zeros, like 0127.0000.0000.0001.
Go interpreted them as decimal, following some RFCs,
while all BSD-derived systems interpret them as octal.
Rejecting them avoids taking part in parser misalignment bugs.
(Here is an [arguably exaggerated security report](https://github.com/sickcodes/security/blob/master/advisories/SICK-2021-016.md).)
Kubernetes clusters may have stored configs using such addresses,
so this bug [required them to make a copy of the parsers](https://github.com/kubernetes/kubernetes/issues/100895)
in order to keep accessing old data.
In the interim, they were blocked from updating to Go 1.17.
- [Go 1.15 changed crypto/x509](https://go.dev/doc/go1.15#commonname)
not to fall back to a certificate's CN field to find a host name when the SAN field was omitted.
The old behavior was preserved when using `GODEBUG=x509ignoreCN=0`.
[Go 1.17 removed support for that setting](https://go.dev/doc/go1.17#crypto/x509).
The Go 1.15 change [broke a Kubernetes test](https://github.com/kubernetes/kubernetes/pull/93426)
and [required a warning to users in Kubernetes 1.19 release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md#api-change-4).
The [Kubernetes 1.23 release notes](https://github.com/kubernetes/kubernetes/blob/776cff391524478b61212dbb6ea48c58ab4359e1/CHANGELOG/CHANGELOG-1.23.md#no-really-you-must-read-this-before-you-upgrade)
warned users who were using the GODEBUG override that it was gone.
- [Go 1.18 dropped support for SHA1 certificates](https://go.dev/doc/go1.18#sha1),
with a `GODEBUG=x509sha1=1` override.
We announced removal of that setting for Go 1.19
but changed plans on request from Kubernetes.
SHA1 certificates are apparently still used by some enterprise CAs
for on-prem Kubernetes installations.
- [Go 1.19 changed LookPath behavior](https://go.dev/doc/go1.19#os-exec-path)
to remove an important class of security bugs,
but the change may also break existing programs,
so we included a `GODEBUG=execerrdot=0` override.
The impact of this change on Kubernetes is still uncertain:
the Kubernetes developers flagged it as risky enough to warrant further investigation.
These kinds of behavioral changes don't only cause pain for Kubernetes developers and users.
They also make it impossible to update older, long-term-supported versions
of Kubernetes to a newer version of Go.
Those older versions don't have the same access to performance improvements and bug fixes.
Again, this is not specific to Kubernetes.
I am sure lots of projects are in similar situations.
As the examples show, over time we've adopted a practice
of being able to opt out of these risky changes using `GODEBUG` settings.
The examples also show that we have probably been too aggressive
about removing those settings.
But the settings themselves have clearly become an important part of Go's compatibility story.
Other important compatibility-related GODEBUG settings include:
- `GODEBUG=asyncpreemptoff=1` disables signal-based goroutine preemption, which occasionally uncovers operating system bugs.
- `GODEBUG=cgocheck=0` disables the runtime's cgo pointer checks.
- `GODEBUG=cpu.<extension>=off` disables use of a particular CPU extension at run time.
- `GODEBUG=http2client=0` disables client-side HTTP/2.
- `GODEBUG=http2server=0` disables server-side HTTP/2.
- `GODEBUG=netdns=cgo` forces use of the cgo resolver.
- `GODEBUG=netdns=go` forces use of the Go DNS resolver
Programs that need one to use these can usually set
the GODEBUG variable in `func init` of package main,
but for runtime variables, that's too late:
the runtime reads the variable early in Go program startup,
before any of the user program has run yet.
For those programs, the environment variable must be set in the execution environment.
It cannot be “carried with” the program.
Another problem with the GODEBUGs is that you have to know they exist.
If you have a large system written for Go 1.17 and want to update to Go 1.18's toolchain,
you need to know which settings to flip to keep as close to Go 1.17 semantics as possible.
I believe that we should make it even easier and safer
for large projects like Kubernetes to update to new Go releases.
See also my [talk on this topic at GopherCon](https://www.youtube.com/watch?v=v24wrd3RwGo).
## Proposal
I propose that we formalize and expand our use of GODEBUG to provide
compatibility beyond what is guaranteed by the current
[compatibility guidelines](https://go.dev/doc/go1compat).
Specifically, I propose that we:
1. Commit to always adding a GODEBUG setting for changes
allowed by the compatibility guidelines but that
nonetheless are likely to break a significant number of real programs.
2. Guarantee that GODEBUG settings last for at least 2 years (4 releases).
That is only a minimum; some, like `http2server`, will likely last forever.
3. Provide a runtime/metrics counter `/godebug/non-default-behavior/<name>:events`
to observe non-default-behavior due to GODEBUG settings.
4. Set the default GODEBUG settings based on the `go` line the main module's go.mod,
so that updating to a new Go toolchain with an unmodified go.mod
mimics the older release.
5. Allow overriding specific default GODEBUG settings in the source code for package main
using one or more lines of the form
//go:debug <name>=<value>
The GODEBUG environment variable set when a programs runs
would continue to override both these lines
and the default inferred from the go.mod `go` line.
An unrecognized //go:debug setting is a build error.
6. Adjust the `go/build` API to report these new `//go:debug` lines. Specifically, add this type:
type Comment struct {
Pos token.Position
Text string
}
and then in type `Package` we would add a new field
Directives []Comment
This field would collect all `//go:*` directives before the package line, not just `//go:debug`,
in the hopes of supporting any future need for directives.
7. Adjust `go list` output to have a new field `DefaultGODEBUG string` set for main packages,
reporting the combination of the go.mod-based defaults and the source code overrides,
as well as adding to `Package` new fields `Directives`, `TestDirectives,` and `XTestDirectives`, all of type `[]string`.
8. Add a new `DefaultGODEBUG` setting to `debug.BuildInfo.Settings`,
to be reported by `go version -m` and other tools
that inspect build details.
9. Document these commitments as well as how to use GODEBUG in
the [compatibility guidelines](https://golang.org/doc/go1compat).
## Rationale
The main alternate approach is to keep on doing what we are doing,
without these additions.
That makes it difficult for Kubernetes and other large projects
to update in a timely fashion, which cuts them off from performance improvements
and eventually security fixes.
An alternative way to provide these improvements and fixes would be to
extend Go's release support window to two or more years,
but that would require significantly more work
and would be a serious drag on the Go project overall.
It is better to focus our energy as well as the energy of Go developers
on the latest release.
Making it safer to update to the latest release does just that.
The rest of this section gives the affirmative case for each of the enumerated items
in the previous section.
1. Building on the rest of the compatibility guidelines, this commitment will
give developers added confidence that they can update to a new Go toolchain
safely with minimal disruption to their programs.
2. In the past we have planned to remove a GODEBUG after only a single release.
A single release cycle - six months - may well be too short for some developers,
especially where the GODEBUGs are adjusting settings that affect external
systems, like which protocols are used. For example, Go 1.14 (Feb 2020) removed
NPN support in crypto/tls,
but we patched it back into Google's internal Go toolchain
for almost three years while we waited for updates to
network devices that used NPN.
Today that would probably be a GODEBUG setting, and it would be
an example of something that takes a large company more than
six months to resolve.
3. When a developer is using a GODEBUG override, they need to be able to find out
whether it is safe to remove the override. Obviously testing is a good first step,
but production metrics can confirm what testing seems to show.
If the production systems are reporting zeros for `/godebug/non-default-behavior/<name>`,
that is strong evidence for the safety of removing that override.
4. Having the GODEBUG settings is not enough. Developers need to be able to determine
which ones to use when updating to a new Go toolchain.
Instead of forcing developers to look up what is new from one toolchain to the next,
setting the default to match the `go` line in `go.mod` keeps the program behavior
as close to the old toolchain as possible.
5. When developers do update the `go` line to a new Go version, they may still need to
keep a specific GODEBUG set to mimic an older toolchain.
There needs to be some way to bake that into the build:
it's not okay to make end users set an environment variable to run a program,
and setting the variable in main.main or even main's init can be too late.
The `//go:debug` lines provide a clear way to set those specific GODEBUGs,
presumably alongside comments explaining why they are needed and
when they can be removed.
6. This API is needed for the go command and other tools to scan source files
and find the new `//go:debug` lines.
7. This provides an easy way for developers to understand which default GODEBUG
their programs will be compiled with. It will be particularly useful when switching
from one `go` line to another.
8. This provides an easy way for developers to understand which default GODEBUG
their existing programs have been compiled with.
9. The compatibility documentation should explain all this so developers know about it.
## Compatibility
This entire proposal is about compatibility.
It does not violate any existing compatibility requirements.
It is worth pointing out that the GODEBUG mechanism is appropriate for security deprecations,
such as the SHA1 retirement, but not security fixes, like changing the version of LookPath
used by tools in the Go distribution. Security fixes need to always apply when building with
a new toolchain, not just when the `go` line has been moved forward.
One of the hard rules of point releases is it really must not break anyone,
because we never want someone to be unable to add an urgent security fix
due to some breakage in that same point release or an earlier one in the sequence.
That applies to the security fixes themselves too.
This means it is up to the authors of the security fix to find a fix
that does not require a GODEBUG.
LookPath is a good example.
There was a reported bug affecting go toolchain programs,
and we fixed the bug by making the LookPath change
in a forked copy of os/exec specifically for those programs.
We left the toolchain-wide fix for a major Go release precisely
because of the compatibility issue.
The same is true of net.ParseIP.
We decided it was an important security-hardening fix but on balance
inappropriate for a point release because of the potential for breakage.
It's hard for me to think of a security problem that would be so critical
that it must be fixed in a point release and simultaneously so broad
that the fix fundamentally must break unaffected user programs as collateral damage.
To date I believe we've always found a way to avoid such a fix,
and I think the onus is on those of us preparing security releases to continue to do that.
If this change is made in Go 1.N, then only GODEBUG settings introduced in Go 1.N
will be the first ones that are defaulted differently for earlier go.mod go lines.
Settings introduced in earlier Go versions will be accessible using `//go:debug`
but will not change their defaults based on the go.mod line.
The reason for this is compatibility: we want Go 1.N to behave as close as possible to Go 1.(N-1),
which did not change defaults based on the go.mod line.
To make this concrete, consider the GODEBUG `randautoseed=0`, which is supported in Go 1.20
to simulate Go 1.19 behavior.
When Go 1.20 builds a module that says `go 1.19`, it gets `randautoseed=1` behavior,
because Go 1.20 does not implement this GODEBUG proposal.
It would be strange for Go 1.21 to build the same code and turn on `randautoseed=1` behavior.
Updating from Go 1.19 to Go 1.20 has already incurred the behavior change
and potential breakage.
Updating from Go 1.20 to Go 1.21 should not revert the behavior change
and cause more potential breakage.
Continuing the concrete examples, Go 1.20 introduces a new GODEBUG
zipinsecurepath, which defaults to 1 in Go 1.20 to preserve old behavior
and allow insecure paths (for example absolute paths or paths starting with `../`).
Go 1.21 may change the default to 0, to start rejecting insecure paths in archive/zip.
If so, and if Go 1.21 also implements this GODEBUG proposal,
then modules with `go 1.20` lines compiled with Go 1.21 would keep allowing insecure paths.
Only when those modules update to `go 1.21` would they start rejecting insecure paths.
Of course, they could stay on Go 1.20 and add `//go:debug zipinsecurepath=0` to main
to get just the new behavior early,
and they could also update to Go 1.21 and add `//go:debug zipinsecurepath=1` to main
to opt out of the new behavior.
## Implementation
Overall the implementation is fairly short and straightforward.
Documentation probably outweighs new code.
Russ Cox, Michael Matloob, and Bryan Millls will do the work.
A complete sketch of the implementation is in
[CL 453618](https://go.dev/cl/453618),
[CL 453619](https://go.dev/cl/453619),
[CL 453603](https://go.dev/cl/453603),
[CL 453604](https://go.dev/cl/453604), and
[CL 453605](https://go.dev/cl/453605).
The sketch does not include tests and documentation.
| proposal/design/56986-godebug.md/0 | {
"file_path": "proposal/design/56986-godebug.md",
"repo_id": "proposal",
"token_count": 4440
} | 668 |
# Design Draft: Go Vulnerability Database
Authors: Roland Shoemaker, Filippo Valsorda
[golang.org/design/draft-vulndb](https://golang.org/design/draft-vulndb)
This is a Draft Design, not a formal Go proposal, since it is a
large change that is still flexible.
The goal of circulating this draft design is to collect feedback
to shape an intended eventual proposal.
## Goal
We want to provide a low-noise, reliable way for Go developers to
be alerted of known security vulnerabilities that affect their
applications.
We aim to build a first-party, curated, consistent database of
security vulnerabilities open to community submissions, and
static analysis tooling to surface only the vulnerabilities that
are likely to affect an application, minimizing false positives.
## The database
The vulnerability database will provide entries for known
vulnerabilities in importable (non-main) Go packages in public
modules.
**Curated dataset.**
The database will be actively maintained by the Go Security team,
and will provide consistent metadata and uniform analysis of the
tracked vulnerabilities, with a focus on enabling not just
detection, but also precise impact assessment.
**Basic metadata.**
Entries will include a database-specific unique identifier for
the vulnerability, affected package and version ranges, a coarse
severity grade, and `GOOS`/`GOARCH` if applicable.
If missing, we will also assign a CVE number.
**Targeting metadata.**
Each database entry will include metadata sufficient to enable
detection of impacted downstream applications with low false
positives.
For example, it will include affected symbols (functions,
methods, types, variables…) so that unaffected consumers can be
identified with static analysis.
**Web pages.**
Each vulnerability will link to a web page with the description
of the vulnerability, remediation instructions, and additional
links.
**Source of truth.**
The database will be maintained as a public git repository,
similar to other Go repositories.
The database entries will be available via a stable protocol (see
“The protocol”).
The contents of the repository itself will be in an internal
format which can change without notice.
**Triage process.**
Candidate entries will be sourced from existing streams (such as
the CVE database, and security mailing lists) as well as
community submissions.
Both will be processed by the team to ensure consistent metadata
and analysis.
*We want to specifically encourage maintainers to report
vulnerabilities in their own modules.*
**Not a disclosure process.**
Note that the goal of this database is tracking known, public
vulnerabilities, not coordinating the disclosure of new findings.
## The protocol
The vulnerability database will be served through a simple,
stable HTTPS and JSON-based protocol.
Vulnerabilities will be grouped by module, and an index file will
list the modules with known vulnerabilities and the last time
each entry has been updated.
The protocol will be designed to be served as a collection of
static files, and cacheable by simple HTTP proxies.
The index allows downloading and hosting a full mirror of the
database to avoid leaking module usage information.
Multiple databases can be fetched in parallel, and their entries
are combined, enabling private and commercial databases.
We’ll aim to use an interoperable format.
## The tooling
The primary consumer of the database and the protocol will be a
Go tool, tentatively `go audit`, which will analyze a module and
report what vulnerabilities it’s affected by.
The tool will analyze what vulnerabilities are likely to affect
the current module not only based on the versions of the
dependencies, but also based on the packages and code paths that
are reachable from a configured set of entry points (functions
and methods).
The precision of this analysis will be configurable.
When available, the tool will provide sample traces of how the
vulnerable code is reachable, to aid in assessing impact and
remediation.
The tool accepts a list of packages and reports the
vulnerabilities that affect them (considering as entry points the
`main` and `init` functions for main packages, and exported
functions and methods for non-main packages).
The tool will also support a `-json` output mode, to integrate
reports in other tools, processes such as CI, and UIs, like how
golang.org/x/tools/go/packages tools use `go list -json`.
### Integrations
Besides direct invocations on the CLI and in CI, we want to make
vulnerability entries and audit reports widely available.
The details of each integration involve some open questions.
**vscode-go** will surface reports for vulnerabilities affecting
the workspace and offer easy version bumps.
*Open question*: can vscode-go invoke `go audit`, or do we need a
tighter integration into `gopls`?
**pkg.go.dev** will show vulnerabilities in the displayed
package, and possibly vulnerabilities in its dependencies.
*Open question*: if we analyze transitive dependencies, what
versions should we consider?
At **runtime**, programs will be able to query reports affecting
the dependencies they were built with through `debug.BuildInfo`.
*Open question*: how should applications handle the fact that
runtime reports will have higher false positives due to lack of
source code access?
In the future, we'll also consider integration into other `go`
tool commands, like `go get` and/or `go test`.
Finally, we hope the entries in the database will flow into other
existing systems that provide vulnerability tracking, with their
own integrations.
| proposal/design/draft-vulndb.md/0 | {
"file_path": "proposal/design/draft-vulndb.md",
"repo_id": "proposal",
"token_count": 1276
} | 669 |
# Generics — Problem Overview
Russ Cox\
August 27, 2018
## Introduction
This overview and the accompanying
[detailed draft design](go2draft-contracts.md)
are part of a collection of [Go 2 draft design documents](go2draft.md).
The overall goal of the Go 2 effort is to address
the most significant ways that Go fails to scale
to large code bases and large developer efforts.
The Go team, and in particular Ian Lance Taylor,
has been investigating and discussing possible designs for "generics"
(that is, parametric polymorphism; see note below)
since before Go’s first open source release.
We understood from experience with C++ and Java
that the topic was rich and complex and would take
a long time to understand well enough to design a good solution.
Instead of attempting that at the start,
we spent our time on features more directly applicable to Go’s initial target
of networked system software (now "cloud software"),
such as concurrency, scalable builds, and low-latency garbage collection.
After the release of Go 1, we continued to explore various possible
designs for generics, and in April 2016 we
[released those early designs](https://go.googlesource.com/proposal/+/master/design/15292-generics.md#),
discussed in detail below.
As part of re-entering "design mode" for the Go 2 effort, we are again
attempting to find a design for generics that we feel fits well into
the language while providing enough of the flexibility and
expressivity that users want.
Some form of generics was one of the top two requested features in both the
[2016](https://blog.golang.org/survey2016-results) and
[2017](https://blog.golang.org/survey2017-results)
Go user surveys (the other was package management).
The Go community maintains a
"[Summary of Go Generics Discussions](https://docs.google.com/document/d/1vrAy9gMpMoS3uaVphB32uVXX4pi-HnNjkMEgyAHX4N4/view#heading=h.vuko0u3txoew)"
document.
Many people have concluded (incorrectly) that the Go team’s position
is "Go will never have generics." On the contrary, we understand the
potential generics have, both to make Go far more flexible and
powerful and to make Go far more complicated.
If we are to add generics, we want to do it in a way that gets as much
flexibility and power with as little added complexity as possible.
_Note on terminology_: Generalization based on type parameters was
called parametric polymorphism when it was
[first identified in 1967](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.332.3161&rep=rep1&type=pdf)
and for decades thereafter in the functional programming community.
The [GJ proposal](http://homepages.inf.ed.ac.uk/wadler/papers/gj-oopsla/gj-oopsla-letter.pdf),
which led to adding parametric polymorphism in Java 5, changed the
terminology first to "genericity" and eventually to "generics".
All imperative languages since Java that have added support for
parametric polymorphism have called it "generics." We make no
distinction between the terms, but it is important to emphasize that
"generics" means more than just generic data containers.
## Problem
To scale Go to large code bases and developer efforts, it is important that code reuse work well.
Indeed, one early focus for Go was simply to make sure that programs consisting of many independent packages built quickly, so that code reuse was not too expensive.
One of Go’s key distinguishing features is its approach to interfaces, which are also targeted directly at code reuse.
Specifically, interfaces make it possible to write abstract implementations of algorithms that elide unnecessary detail.
For example,
[container/heap](https://godoc.org/container/heap)
provides heap-maintenance algorithms as ordinary functions that operate on a
[heap.Interface](https://godoc.org/container/heap#Interface),
making them applicable to any backing storage, not just a slice of values.
This can be very powerful.
At the same time, most programmers who want a priority queue
don’t want to implement the underlying storage for it and then invoke the heap algorithms.
They would prefer to let the implementation manage its own array,
but Go does not permit expressing that in a type-safe way.
The closest one can come is to make a priority queue of `interface{}` values
and use type assertions after fetching each element.
(The standard [`container/list`](https://golang.org/pkg/container/list)
and [`container/ring`](https://golang.org/pkg/container/ring) implementations take this approach.)
Polymorphic programming is about more than data containers.
There are many general algorithms we might want to implement
as plain functions that would apply to a variety of types,
but every function we write in Go today must apply to only a single type.
Examples of generic functions we’d like to write include:
// Keys returns the keys from a map.
func Keys(m map[K]V) []K
// Uniq filters repeated elements from a channel,
// returning a channel of the filtered data.
func Uniq(<-chan T) <-chan T
// Merge merges all data received on any of the channels,
// returning a channel of the merged data.
func Merge(chans ...<-chan T) <-chan T
// SortSlice sorts a slice of data using the given comparison function.
func SortSlice(data []T, less func(x, y T) bool)
[Doug McIlroy has suggested](https://golang.org/issue/26282) that Go add two new
channel primitives `splice` and `clone`.
These could be implemented as polymorphic functions instead.
The
"[Go should have generics](https://go.googlesource.com/proposal/+/master/design/15292-generics.md#)" proposal
and the "[Summary of Go Generics Discussions](https://docs.google.com/document/d/1vrAy9gMpMoS3uaVphB32uVXX4pi-HnNjkMEgyAHX4N4/view#heading=h.vuko0u3txoew)"
contain additional discussion of the problem.
## Goals
Our goal is to address the problem of writing Go libraries that
abstract away needless type detail, such as the examples in the
previous section, by allowing parametric polymorphism with type
parameters.
In particular, in addition to the expected container types, we aim to
make it possible to write useful libraries for manipulating arbitrary
map and channel values, and ideally to write polymorphic functions
that can operate on both `[]byte` and `string` values.
It is not a goal to enable other kinds of parameterization, such as
parameterization by constant values.
It is also not a goal to enable specialized implementations of
polymorphic definitions, such as defining a general `vector<T>` and a
special-case `vector<bool>` using bit-packing.
We want to learn from and avoid the problems that generics have caused
for C++ and in Java (described in detail in the section about other
languages, below).
To support
[software engineering over time](https://research.swtch.com/vgo-eng),
generics for Go must record constraints on type parameters explicitly,
to serve as a clear, enforced agreement between caller and
implementation.
It is also critical that the compiler report clear errors when a
caller does not meet those constraints or an implementation exceeds
them.
Polymorphism in Go must fit smoothly into the surrounding language,
without awkward special cases and without exposing implementation
details.
For example, it would not be acceptable to limit type parameters to
those whose machine representation is a single pointer or single word.
As another example, once the general `Keys(map[K]V) []K` function
contemplated above has been instantiated with `K` = `int` and `V` = `string`,
it must be treated semantically as equivalent to a hand-written
non-generic function.
In particular it must be assignable to a variable of type `func(map[int]string) []int`.
Polymorphism in Go should be implementable both at compile time (by
repeated specialized compilation, as in C++) and at run time, so that
the decision about implementation strategy can be left as a decision
for the compiler and treated like any other compiler optimization.
This flexibility would address the
[generic dilemma](https://research.swtch.com/generic) we’ve discussed
in the past.
Go is in large part a language that is straightforward and
understandable for its users.
If we add polymorphism, we must preserve that.
## Draft Design
This section quickly summarizes the draft design, as a basis for
high-level discussion and comparison with other approaches.
The draft design adds a new syntax for introducing a type parameter
list in a type or function declaration: `(type` <_list of type names_>`)`.
For example:
type List(type T) []T
func Keys(type K, V)(m map[K]V) []K
Uses of a parameterized declaration supply the type arguments using ordinary call syntax:
var ints List(int)
keys := Keys(int, string)(map[int]string{1:"one", 2: "two"})
The generalizations in these examples require nothing of the types `T`,
`K`, and `V`: any type will do.
In general an implementation may need to constrain the possible types
that can be used.
For example, we might want to define a `Set(T)`, implemented as a list
or map, in which case values of type `T` must be able to be compared for
equality.
To express that, the draft design introduces the idea of a named
**_contract_**.
A contract is like a function body illustrating the operations the
type must support.
For example, to declare that values of type `T` must be comparable:
contract Equal(t T) {
t == t
}
To require a contract, we give its name after the list of type parameters:
type Set(type T Equal) []T
// Find returns the index of x in the set s,
// or -1 if x is not contained in s.
func (s Set(T)) Find(x T) int {
for i, v := range s {
if v == x {
return i
}
}
return -1
}
As another example, here is a generalized `Sum` function:
contract Addable(t T) {
t + t
}
func Sum(type T Addable)(x []T) T {
var total T
for _, v := range x {
total += v
}
return total
}
Generalized functions are invoked with type arguments
to select a specialized function and then invoked again with their value arguments:
var x []int
total := Sum(int)(x)
As you might expect, the two invocations can be split:
var x []int
intSum := Sum(int) // intSum has type func([]int) int
total := intSum(x)
The call with type arguments can be omitted, leaving only the call with values,
when the necessary type arguments can be inferred from the values:
var x []int
total := Sum(x) // shorthand for Sum(int)(x)
More than one type parameter is also allowed in types, functions, and contracts:
contract Graph(n Node, e Edge) {
var edges []Edge = n.Edges()
var nodes []Node = e.Nodes()
}
func ShortestPath(type N, E Graph)(src, dst N) []E
The contract is applied by default to the list of type parameters, so that `(type T Equal)` is shorthand for `(type T Equal(T))`,
and `(type N, E Graph)` is shorthand for `(type N, E Graph(N, E))`.
For details, see the [draft design](go2draft-contracts.md).
## Discussion and Open Questions
This draft design is meant only as a starting point for community discussion.
We fully expect the details to be revised based on feedback and especially experience reports.
This section outlines some of the questions that remain to be answered.
Our previous four designs for generics in Go all had significant problems, which we identified very quickly.
The current draft design appears to avoid the problems in the earlier ones: we’ve spent about half a year discussing and refining it so far and still believe it could work.
While we are not formally proposing it today, we think it is at least a good enough starting point for a community discussion with the potential to lead to a formal proposal.
Even after six months of (not full time) discussion, the design is still in its earliest stages.
We have written a parser but no type checker and no implementation.
It will be revised as we learn more about it.
Here we identify a few important things we are unsure about, but there are certainly more.
**Implied constraints**.
One of the examples above applies to maps of arbitrary key and value type:
func Keys(type K, V)(m map[K]V) []K {
...
}
But not all types can be used as key types,
so this function should more precisely be written as:
func Keys(type K, V Equal(K))(m map[K]V) []K {
...
}
It is unclear whether that precision about
`K` should be required of the user or inferred
from the use of `map[K]V` in the function signature.
**Dual implementation**.
We are hopeful that the draft design satisfies the
"dual-implementation" constraint mentioned above,
that every parameterized type or function can be implemented
either by compile-time or run-time type substitution,
so that the decision becomes purely a compiler optimization, not one of semantic significance.
But we have not yet confirmed that.
One consequence of the dual-implementation constraint
is that we have not included support for type parameters in method declarations.
The most common place where these arise is in modeling functional operations on general containers.
It is tempting to allow:
// A Set is a set of values of type T.
type Set(type T) ...
// Apply applies the function f to each value in the set s,
// returning a set of the results.
func (s Set(T)) Apply(type U)(f func(T) U) Set(U) // NOT ALLOWED!
The problem here is that a value of type `Set(int)`
would require an infinite number of `Apply` methods to be available at runtime,
one for every possible type `U`, all discoverable by reflection and type assertions.
They could not all be compiled ahead of time.
An earlier version of the design allowed generic methods but then disallowed their visibility in reflection and interface satisfaction, to avoid forcing the run-time implementation of generics.
Disallowing generalized methods entirely seemed cleaner than allowing them with these awkward special cases.
Note that it is still possible to write `Apply` as a top-level function:
func Apply(type T, U)(s Set(T), f func(T) U) Set(U)
Working within the intersection of compile-time and run-time implementations also requires being able to reject parameterized functions or types that cause generation of an arbitrary (or perhaps just very large) number of additional types.
For example, here are a few unfortunate programs:
// OK
type List(type T) struct {
elem T
next *List(T)
}
// NOT OK - Implies an infinite sequence of types as you follow .next pointers.
type Infinite(type T) struct {
next *Infinite(Infinite(T))
}
// BigArray(T)(n) returns a nil n-dimensional slice of T.
// BigArray(int)(1) returns []int
// BigArray(int)(2) returns [][]int
// ...
func BigArray(type T)(n int) interface{} {
if n <= 1 || n >= 1000000000 {
return []T(nil)
}
return BigArray([]T)(n-1)
}
It is unclear what the algorithm is for deciding which programs to accept and which to reject.
**Contract bodies**.
Contracts are meant to look like little functions.
They use a subset of function body syntax,
but the actual syntax is much more limited than just "any Go code" (see the full design for details).
We would like to understand better if it is feasible to allow any valid function body as a contract body.
The hard part is defining precisely which generic function bodies are allowed by a given contract body.
There are parallels with the C++ concepts design (discussed in detail below): the definition of a C++ concept started out being exactly a function body illustrating the necessary requirements, but over time the design changed to use a more limited list of requirements of a specific form.
Clearly it was not workable in C++ to support arbitrary function bodies.
But Go is a simpler language than C++ and it may be possible here.
We would like to explore whether it is possible to implement contract body syntax as exactly function body syntax and whether that would be simpler for users to understand.
**Feedback**.
The most useful general feedback would be examples of interesting uses that are enabled or disallowed by the draft design.
We’d also welcome feedback about the points above, especially based on experience type-checking or implementing generics in other languages.
We are most uncertain about exactly what to allow in contract bodies, to make them as easy to read and write for users while still being sure the compiler can enforce them as limits on the implementation.
That is, we are unsure about the exact algorithm to deduce the properties required for type-checking a generic function from a corresponding contract.
After that we are unsure about the details of a run-time-based (as opposed to compile-time-based) implementation.
Feedback on semantics and implementation details is far more useful and important than feedback about syntax.
We are collecting links to feedback at
[golang.org/wiki/Go2GenericsFeedback](https://golang.org/wiki/Go2GenericsFeedback).
## Designs in Other Languages
It is worth comparing the draft design with those in real-world use, either now or in the past.
We are not fluent programmers in many of these languages.
This is our best attempt to piece together the history, including links to references, but we would welcome corrections about the syntax, semantics, or history of any of these.
The discussion of other language designs in this section focuses on the specification of type constraints and also implementation details and problems, because those ended up being the two most difficult parts of the Go draft design for us to work out.
They are likely the two most difficult parts of any design for parametric polymorphism.
In retrospect, we were biased too much by experience with C++ without concepts and Java generics. We would have been well-served to spend more time with CLU and C++ concepts earlier.
We’ll use the `Set`, `Sum`, and `ShortestPath` examples above as points of comparison throughout this section.
### ML, 1975
ML was the first typed language to incorporate polymorphism.
Christopher Strachey is usually given credit for introducing the term parametric polymorphism in his 1967 survey, "[Fundamental Concepts in Programming Languages](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.332.3161&rep=rep1&type=pdf)."
Robin Milner’s 1978 paper "[A Theory of Type Polymorphism in Programming](https://courses.engr.illinois.edu/cs421/sp2013/project/milner-polymorphism.pdf)" introduced an algorithm to infer the most general types of polymorphic function bodies, instead of forcing the use of concrete types.
Milner had already implemented his algorithm for the ML language as part of the Edinburgh LCF system.
He wanted to be able to write the kinds of general functions possible in LISP, but in a typed language.
ML inferred constraints and for that matter the types themselves from the untyped function body.
But the inference was limited - there were no objects, classes, methods, or operators, just values (including function values).
There was not even equality checking.
Milner
[suggested adding "equality types"](http://www.lfcs.inf.ed.ac.uk/reports/87/ECS-LFCS-87-33/ECS-LFCS-87-33.pdf) in 1987, distinguishing a type variable with no constraints (`'t`) from a type variable that must represent a type allowing equality checks (`''t`).
The
[Standard ML of New Jersey compiler](https://www.cs.princeton.edu/research/techreps/TR-097-87) (1987) implements polymorphic functions by arranging that every value is
[represented as a single machine word](https://www.cs.princeton.edu/research/techreps/TR-142-88).
That uniformity of representation, combined with the near-complete lack of type constraints, made it possible to use one compiled body for all invocations.
Of course, boxing has its own allocation time and space overheads.
The
[MLton whole-program optimizing compiler](http://mlton.org/History) (1997) specializes polymorphic functions at compile time.
### CLU, 1977
The research language CLU, developed by Barbara Liskov’s group at MIT, was the first to introduce what we would now recognize as modern generics.
(CLU also introduced iterators and abstract data types.)
[CLU circa 1975](http://csg.csail.mit.edu/CSGArchives/memos/Memo-112-1.pdf) allowed defining parameterized types without constraints, much like in ML.
To enable implementing a generic set despite the lack of constraints, all types were required to implement an equal method.
By 1977,
[CLU had introduced "where clauses"](https://web.eecs.umich.edu/~weimerw/2008-615/reading/liskov-clu-abstraction.pdf) to constrain parameterized types, allowing the set implementation to make its need for `equal` explicit.
CLU also had operator methods, so that `x == y` was syntactic sugar for `t$equal(x, y)` where `t` is the type of both `x` and `y`.
set = cluster [t: type] is create, member, size, insert, delete, elements
where t has equal: proctype (t, t) returns (bool)
rep = array[t]
% implementation of methods here, using == on values of type t
end set
The more complex graph example is still simple in CLU:
shortestpath = proc[node, edge: type] (src, dst: node) returns array[edge]
where node has edges: proctype(node) returns array[edge],
edge has nodes: proctype(edge) returns array[node]
...
end shortestpath
The 1978 paper "[Aspects of Implementing CLU](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.3516&rep=rep1&type=pdf)" discusses the compile-time versus run-time implementations of parameterized generics and details CLU's run-time-only approach.
The "BigArray" function shown earlier is also taken from this paper (translated to Go, of course).
All the ingredients for modern generics are here: syntax for declaring generalized types and functions, syntax for invoking them, a simple constraint syntax, and a well thought-out implementation.
There was no inference of type parameters.
The CLU designers found it helpful to see all substitutions made explicitly.
In her 1992 retrospective "[A History of CLU](http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=F5D7C821199F22C5D30A51F155DB9D23?doi=10.1.1.46.9499&rep=rep1&type=pdf)," Liskov observed, "CLU was way ahead of its time in its solution for parameterized modules.
Even today, most languages do not support parametric polymorphism, although there is growing recognition of the need for it."
### Ada, 1983
Ada clearly lifted many ideas from CLU, including the approach for exceptions and parametric polymorphism, although not the elegant syntax.
Here is an example generic squaring function from the
[Ada 1983 spec](https://swtch.com/ada-mil-std-1815a.pdf), assembled from pages 197, 202, and 204 of the PDF.
The generic declaration introduces a parameterized "unit" and then the function declaration appears to come separately:
generic
type ITEM Is private;
with function "*"(U, V ITEM) return ITEM is <>;
function SQUARING(X : ITEM) return ITEM;
function SQUARING(X : ITEM) return ITEM is
begin
return X*X;
end;
Interestingly, this definition introduces a function SQUARING parameterized by both the type ITEM and the * operation.
If instantiated using type INTEGER, the * operation is taken from that type:
function SQUARE is new SQUARING (INTEGER);
But the * operation can also be substituted directly, allowing definition of a matrix squarer using the MATRIX-PRODUCT function.
These two instantiations are equivalent:
function SQUARE is new SQUARING (ITEM -> MATRIX, "*'" => MATRIX-PRODUCT);
function SQUARE is new SQUARING (MATRIX, MATRIX-PRODUCT);
We have not looked into how Ada generics were implemented.
The initial Ada design contest
[ran from 1975-1980 or so](https://www.red-gate.com/simple-talk/opinion/geek-of-the-week/tucker-taft-geek-of-the-week/), resulting eventually in the Ada 83 standard in 1983.
We are not sure exactly when generics were added.
### C++, 1991
[C++ introduced templates](http://www.stroustrup.com/hopl-almost-final.pdf) in 1991, in the Cfront 3.0 release.
The implementation was always by compile-time macro expansion, and there were no "where clauses" or other explicit constraints.
template<typename T>
class Set {
...
void Add(T item) {
...
}
};
template<typename T>
T Sum(x vector<T>) {
T s;
for(int i = 0; i < x.size(); i++) {
s += x[i];
}
return s;
}
Instead, if a template was invoked with an inappropriate type, such as a Sum<char*>, the compiler reported a type-checking error in the middle of the invoked function’s body.
This was not terribly user-friendly and soured many developers on the idea of parametric polymorphism.
The lack of type constraints enabled the creation of the STL and transformed C++ into a wholly different language than it had been.
Then the problem became how to add explicit type constraints sufficiently expressive to allow all the tricks used in the STL.
Programmers worked around the lack of constraints by establishing conventions for expressing them.
Stroustrup’s 1994 book
[The Design and Evolution of C++](http://www.stroustrup.com/dne.html) gives some examples.
The first option is to define constraints as classes:
template <class T> class Comparable {
T& operator=(const T&);
int operator==(const T&, const T&);
int operator<=(const T&, const T&);
int operator<(const T&, const T&);
};
template <class T : Comparable>
class vector {
// ...
};
Unfortunately, this requires the original type `T` to explicitly derive from `Comparable`.
Instead, Stroustrup suggested writing a function, conventionally named `constraints`, illustrating the requirements:
template<class T> class X {
// ...
void constraints(T* tp)
{ // T must have:
B* bp = tp; // an accessible base B
tp->f(); // a member function f
T a(l); // a constructor from int
a = *tp; // assignment
// ...
}
};
Compiler errors would at least be simple, targeted, and reported as a problem with `X<T>::constraints`.
Of course, nothing checked that other templates used only the features of T illustrated in the constraints.
In 2003, Stroustrup proposed formalizing this convention as
[C++ concepts](http://www.stroustrup.com/N1522-concept-criteria.pdf).
The feature was intended for C++0x (eventually C++11 (2011)) but
[removed in 2009](http://www.drdobbs.com/cpp/the-c0x-remove-concepts-decision/218600111).
Concepts were published as a
[separate ISO standard in 2015](https://www.iso.org/standard/64031.html), shipped in GCC, and were intended for C++17 (2017)
[but removed in 2016](http://honermann.net/blog/2016/03/06/why-concepts-didnt-make-cxx17/).
They are now intended for C++20 (2020).
The 2003 proposal gives this syntax:
concept Element {
constraints(Element e1, Element e2) {
bool b = e1<e2; // Elements can be compared using <
swap(e1,e2); // Elements can be swapped
}
};
By 2015, the syntax had changed a bit but the underlying idea was still the same.
Stroustrup’s 2015 paper "[Concepts: The Future of Generic Programming, or How to design good concepts and use them well](http://www.stroustrup.com/good_concepts.pdf)" presents as an example a concept for having equality checking.
(In C++, `==` and `!=` are unrelated operations so both must be specified.)
template<typename T>
concept bool Equality_comparable =
requires (T a, T b) {
{ a == b } -> bool; // compare Ts with ==
{ a != b } -> bool; // compare Ts with !=
};
A requires expression evaluates to true if each of the listed requirements is satisfied, false otherwise.
Thus `Equality_comparable<T>` is a boolean constant whose value depends on `T`.
Having defined the predicate, we can define our parameterized set:
template<Equality_comparable T>
class Set {
...
};
Set<int> set;
set.Add(1);
Here the `<Equality_comparable T>` introduces a type variable `T` with the constraint that `Equality_comparable<T> == true`.
The class declaration above is shorthand for:
template<typename T>
requires Equality_comparable<T>
class Set {
...
};
By allowing a single concept to constrain a group of related types, the C++ concept proposal makes it easy to define our shortest path example:
template<typename Node, typename Edge>
concept bool Graph =
requires(Node n, Edge e) {
{ n.Edges() } -> vector<Edge>;
{ e.Nodes() } -> vector<Node>;
};
template<typename Node, Edge>
requires Graph(Node, Edge)
vector<Edge> ShortestPath(Node src, Node dst) {
...
}
### Java, 1997-2004
In 1997, Martin Odersky and Philip Wadler introduced
[Pizza](http://pizzacompiler.sourceforge.net/doc/pizza-language-spec.pdf), a strict superset of Java, compiled to Java bytecodes, adding three features from functional programming: parametric polymorphism, higher-order functions, and algebraic data types.
In 1998, Odersky and Wadler, now joined by Gilad Bracha and David Stoutamire, introduced
[GJ](http://homepages.inf.ed.ac.uk/wadler/papers/gj-oopsla/gj-oopsla-letter.pdf), a Pizza-derived Java superset targeted solely at parametric polymorphism, now called generics.
The GJ design was adopted with minor changes in Java 5, released in 2004.
As seen in the example, this design uses interfaces to express type constraints, with the result that parameterized interfaces must be used to create common self-referential constraints such as having an equal method that checks two items of the same type for equality.
In CLU this constraint was written directly:
set = cluster[t: type] ...
where t has equal: proctype(t, t) returns bool
In Java 5, the same constraint is written indirectly, by first defining `Equal<T>`:
interface Equal<T> {
boolean equal(T o);
}
Then the constraint is `T implements Equal<T>` as in:
class Set<T implements Equal<T>> {
...
public void add(T o) {
...
}
}
Set<int> set;
set.add(1);
This is Java’s variant of the C++ "[curiously recurring template pattern](https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern)" and is a common source of confusion (or at least rote memorization) among Java programmers first learning generics.
The graph example is even more complex:
interface Node<Edge> {
List<Edge> Edges()
}
interface Edge<Node> {
List<Node> Nodes()
}
class ShortestPath<N implements Node<E>, E implements Edge<N>> {
static public List<Edge> Find(Node src, dst) {
...
}
}
Java 4 and earlier had provided untyped, heterogeneous container classes like `List` and `Set` that used the non-specific element type `Object`.
Java 5 generics aimed to provide type parameterization for those legacy containers.
The originals became `List<Object>` and `Set<Object>`, but now programmers could also write `List<String>`, `List<Set<String>>`, and so on.
The implementation was by "type erasure," converting to the original untyped containers, so that at runtime there were only the unparameterized implementations `List` and `Set` (of `Object`).
Because the implementation needed to be memory-compatible with `List<Object>`, which is to say a list of pointers, Java value types like `int` and `boolean` could not be used as type parameters: no `List<int>`.
Instead there is `List<Integer>`, in which each element becomes an class object instead of a plain `int`, with all the associated memory and allocation overhead.
Because of the erasure, reflection on these values, including dynamic type checks using `instanceof`, has no information about the expected type of elements.
Reflection and code written using untyped collections like `List` or `Set` therefore served as back doors to bypass the new type system.
The inability to use `instanceof` with generics introduced other rough edges, such as not being able to define parameterized exception classes, or more precisely being able to throw an instance of a parameterized class but not catch one.
Angelika Langer has written an
[extensive FAQ](http://www.angelikalanger.com/GenericsFAQ/JavaGenericsFAQ.html), the size of which gives a sense of the complexity of Java generics.
Java 10 may add runtime access to type parameter information.
Experience watching the Java generics story unfold, combined with discussions with some of the main players, was the primary reason we avoided tackling any sort of generics in the first version of Go.
Since much of the complexity arose from the design being boxed in by pre-existing container types, we mostly avoided adding container types to the standard library ([`container/list`](https://golang.org/pkg/container/list)
and [`container/ring`](https://golang.org/pkg/container/ring) are the exceptions, but they are not widely used).
Many developers associate Java generics first with the complexity around container types.
That complexity, combined with the fact that Java lacks the concept of a plain function (such as `Sum`) as opposed to methods bound to a class, led to the common belief that generics means parameterized data structures, or containers, ignoring parameterized functions.
This is particularly ironic given the original inspiration from functional programming.
### C#, 1999-2005
C#, and more broadly the .NET Common Language Runtime (CLR), added
[support for generics](https://msdn.microsoft.com/en-us/library/ms379564(v=vs.80).aspx) in C# 2.0, released in 2005 and the culmination of
[research beginning in 1999](http://mattwarren.org/2018/03/02/How-generics-were-added-to-.NET/).
The syntax and definition of type constraints mostly follows Java’s, using parameterized interfaces.
Learning from the Java generics implementation experience, C# removes many of the rough edges.
It makes parameterization information available at runtime, so that reflection can distinguish `List<string>` from `List<List<string>>`.
It also allows parameterization to use basic types like int, so that `List<int>` is valid and efficient.
### D, 2002
D
[added templates in D 0.40](https://wiki.dlang.org/Language_History_and_Future), released in September 2002.
We have not tracked down the original design to see how similar it was to the current templates.
The current D template mechanism allows parameterizing a block of arbitrary code:
template Template(T1, T2) {
... code using T1, T2 ...
}
The block is instantiated using `Template!` followed by actual types, as in `Template!(int, float64)`.
It appears that instantiation is always at compile-time, like in C++.
If a template contains a single declaration of the same name, the usage is shortened:
template Sum(T) {
T Sum(T[] x) {
...
}
}
int[] x = ...
int sum = Sum!(int)(x) // short for Sum!(int).Sum(x)
This code compiles and runs, but it can be made clearer by adding an
[explicit constraint on `T`](https://dlang.org/concepts.html) to say that it must support equality:
template hasEquals(T) {
const hasEquals = __traits(compiles, (T t) {
return t == t;
});
}
template Sum(T) if (hasEquals!(T)) {
T Sum(T []x) {
...
}
}
The `__traits(compiles, ...)` construct is a variant of the C++ concepts idea (see C++ discussion above).
As in C++, because the constraints can be applied to a group of types, defining `Graph` does not require mutually-recursive gymnastics:
template isGraph(Node, Edge) {
const isGraph = __traits(compiles, (Node n, Edge e) {
Edge[] edges = n.Edges();
Node[] nodes = e.Nodes();
});
}
template ShortestPath(Node, Edge)
if (isGraph!(Node, Edge)) {
Edge[] ShortestPath(Node src, Node dst) {
...
}
}
### Rust, 2012
Rust
[included generics in version 0.1](https://github.com/rust-lang/rust/blob/master/RELEASES.md#version-01--2012-01-20), released in 2012.
Rust defines generics with syntax similar to C#, using traits (Rust’s interfaces) as type constraints.
Rust avoids Java’s and C#'s curiously-recurring interface pattern for direct self-reference by introducing a `Self` type.
For example, the protocol for having an `Equals` method can be written:
pub trait Equals {
fn eq(&self, other: &Self) -> bool;
fn ne(&self, other: &Self) -> bool;
}
(In Rust, `&self` denotes the method's receiver variable, written without an explicit type; elsewhere in the function signature, `&Self` can be used to denote the receiver type.)
And then our `Set` type can be written:
struct Set<T: Equals> {
...
}
This is shorthand for
struct Set<T> where T: Equals {
...
}
The graph example still needs explicitly mutually-recursive traits:
pub trait Node<Edge> {
fn edges(&self) -> Vec<Edge>;
}
pub trait Edge<Node> {
fn nodes(&self) -> Vec<Node>;
}
pub fn shortest_path<N, E>(src: N, dst: N) -> Vec<E>
where N: Node<E>, E: Edge<N> {
...
}
In keeping with its "no runtime" philosophy, Rust implements generics by compile-time expansion, like C++ templates.
### Swift, 2017
Swift added generics in Swift 4, released in 2017.
The
[Swift language guide](https://docs.swift.org/swift-book/LanguageGuide/Generics.html) gives an example of sequential search through an array, which requires that the type parameter `T` support equality checking. (This is a popular example; it dates back to CLU.)
func findIndex<T: Equatable>(of valueToFind: T, in array:[T]) -> Int? {
for (index, value) in array.enumerated() {
if value == valueToFind {
return index
}
}
return nil
}
Declaring that `T` satisfies the
[`Equatable`](https://developer.apple.com/documentation/swift/equatable) protocol makes the use of `==` in the function body valid.
`Equatable` appears to be a built-in in Swift, not possible to define otherwise.
Like Rust, Swift avoids Java’s and C#'s curiously recurring interface pattern for direct self-reference by introducing a `Self` type.
For example, the protocol for having an `Equals` method is:
protocol EqualsMethod {
func Equals(other: Self) -> Bool
}
Protocols cannot be parameterized, but declaring "associated types" can be used for the same effect:
protocol Node {
associatedtype Edge;
func Edges() -> [Edge];
}
protocol Edge {
associatedtype Node;
func Nodes() -> [Node];
}
func ShortestPath<N: Node, E: Edge>(src: N, dst: N) -> [E]
where N.Edge == E, E.Node == N {
...
}
Swift’s default implementation of generic code is by single compilation with run-time substitution, via "[witness tables](https://www.reddit.com/r/swift/comments/3r4gpt/how_is_swift_generics_implemented/cwlo64w/?st=jkwrobje&sh=6741ba8b)".
The compiler is allowed to compile specialized versions of generic code as an optimization, just as we would like to do for Go.
## Earlier Go Designs
As noted above, the Go team, and in particular Ian Lance Taylor, has been investigating and discussing possible designs for "generics" since before the open source release.
In April 2016, we
[published the four main designs](https://go.googlesource.com/proposal/+/master/design/15292-generics.md) we most seriously considered (before the current one).
Looking back over the designs and comparing them to the current draft design, it is helpful to focus on four features that varied in the designs over time: syntax, type constraints, type inference, and implementation strategy.
**Syntax**.
How are generic types, funcs, or methods declared? How are generic types, funcs, or methods used?
**Type Constraints**.
How are type constraints defined?
**Type Inference**.
When can explicit function call type instantiations be omitted (inferred by the compiler)?
**Implementation**.
Is compile-time substitution required? Is run-time substitution required? Are both required? Can the compiler choose one or the other as it sees fit?
### [Type Functions](https://go.googlesource.com/proposal/+/master/design/15292/2010-06-type-functions.md), June 2010
The first design we explored was based on the idea of a "type function."
**Syntax.** "Type function" was the name for the syntax for a parameterized type.
type Vector(T) []T
Every use of a type function had to specify concrete instantiations for the type variables, as in
type VectorInt Vector(int)
Func definitions introduced type parameters implicitly by use of a type function or explicitly by use of an argument of type "`<name> type`", as in:
func Sum(x Vector(T type)) T
func Sum(x []T type) T
**Constraints.**
Type constraints were specified by optional interface names following the type parameter:
type PrintableVector(T fmt.Stringer) []T
func Print(x T type fmt.Stringer)
To allow use of operators like addition in generic code, this proposal relied upon a separate proposal to introduce "operator methods" (as in CLU), which would in turn make them available in interface definitions.
**Inference.** There were no function call type instantiations.
Instead there was an algorithm for determining the type instantiations, with no explicit fallback when the algorithm failed.
**Implementation.** Overall the goal was to enable writing complex type-independent code once, at a run-time cost: the implementation would always compile only a generic version of the code, which would be passed a type descriptor to supply necessary details.
This would make generics unsuitable for high-performance uses or even trivial uses like `Min` and `Max`.
If type `Vector(T)` defined a method `Read(b []T) (int, error)`, it was unclear how the generic `Read` implementation specialized to byte would necessarily be compatible in calling convention with `io.Reader`.
The proposal permitted the idea of unbound type parameters
that seemed to depend on unspecified runtime support, producing "generic values".
The doc uses as an example:
func Unknown() T type
x := Unknown()
It was not clear exactly what this meant or how it would be implemented.
Overall it seemed that the need for the concept of a "generic value" was an indicator that something was not quite right.
### [Generalized Types](https://go.googlesource.com/proposal/+/master/design/15292/2011-03-gen.md), March 2011
The next design we explored was called "generalized types," although type parameters applied equally to types and functions.
**Syntax.** A type variable was introduced by the syntax `gen [T]` before a declaration and instantiated by listing the types in square brackets after the declared name.
gen[T] type Vector []T
type VectorInt Vector[int]
gen[T] func Sum(x []T) T
gen[T] func Sum(x Vector[T]) T
sum := Sum[int]([]int{1,2,3})
gen[T1, T2] MakePair(x T1, y T2) Pair[T1, T2]
As an aside, we discussed but ultimately rejected reserving `gen` or `generic` as keywords for Go 1 in anticipation of adopting some proposal like this.
It is interesting to note that the current design avoids the need for any such keyword and does not seem to suffer for it.
**Constraints.** The type variable could be followed by an interface name:
gen [T Stringer] type PrintableVector []T
gen [T Stringer] func Print(x T)
The proposal suggested adding language-defined method names for operators, so that `Sum` could be written:
gen [T] type Number interface {
Plus(T) T
}
gen [T Number[T]] func Sum(x []T) T {
var total T
for _, v := range x {
total = total.Plus(v)
}
return total
}
**Inference.** This proposal defined a simple left-to-right greedy unification of the types of the function call arguments with the types of the generic parameter list.
The current proposal is non-greedy: it unifies the types, and then verifies that all type parameters were unified to the same type.
The reason the earlier proposal used a greedy algorithm was to handle untyped constants; in the current proposal untyped constants are handled by ignoring them in the first pass and doing a second pass if required.
**Implementation.** This proposal noted that every actual value in a running Go program would have a concrete type.
It eliminated the "generic values" of the previous proposal.
This was the first proposal that aimed to support both generic and specialized compilation, with an appropriate choice made by the compiler.
(Because the proposal was never implemented, it is unclear whether it would have achieved that goal.)
### [Generalized Types II](https://go.googlesource.com/proposal/+/master/design/15292/2013-10-gen.md), October 2013
This design was an adaptation of the previous design, at that point two years old, with only one significant change.
Instead of getting bogged down in specifying interfaces, especially interfaces for operators, the design discarded type constraints entirely.
This allowed writing `Sum` with the usual `+` operator instead of a new `.Plus` method:
gen[T] func Sum(x []T) T {
s := T(0)
for _, v := range x {
s += v
}
return s
}
As such, it was the first generics design that did not call for operator methods as well.
Unfortunately, the design did not explain exactly how constraints could be inferred and whether that was even feasible.
Worse, if contracts are not written down, there’s no way to ensure that an API does not change its requirements accidentally and therefore break clients unexpectedly.
### [Type Parameters](https://go.googlesource.com/proposal/+/master/design/15292/2013-12-type-params.md), December 2013
This design kept most of the semantics of the previous design but introduced new syntax.
It dropped the gen keyword and moved the type-variable-introducing brackets after the func or type keyword, as in:
type [T] Vector []T
type VectorInt Vector[int]
func [T] Sum(x []T) T
func [T] Sum(x Vector[T]) T
sum := Sum[int]([]int{1,2,3})
func [T1, T2] MakePair(x T1, y T2) Pair[T1, T2]
This design retained the implicit constraints of the previous one, but now with a much longer discussion of exactly how to infer restrictions from function bodies.
It was still unclear if the approach was workable in practice, and it seemed clearly incomplete.
The design noted ominously:
> The goal of the restrictions listed above is not to try to handle every possible case.
> It is to provide a reasonable and consistent approach to type checking of parameterized functions and preliminary type checking of types used to instantiate those functions.
>
> It’s possible that future compilers will become more restrictive; a parameterized function that can not be instantiated by any type argument is invalid even if it is never instantiated, but we do not require that every compiler diagnose it.
> In other words, it’s possible that even if a package compiles successfully today, it may fail to compile in the future if it defines an invalid parameterized function.
Still, after many years of struggling with explicit enumerations of type constraints, "just look at the function body" seemed quite attractive.
| proposal/design/go2draft-generics-overview.md/0 | {
"file_path": "proposal/design/go2draft-generics-overview.md",
"repo_id": "proposal",
"token_count": 12666
} | 670 |
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: jsonpb_proto/test2.proto
package jsonpb_proto
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
anypb "google.golang.org/protobuf/types/known/anypb"
durationpb "google.golang.org/protobuf/types/known/durationpb"
structpb "google.golang.org/protobuf/types/known/structpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Widget_Color int32
const (
Widget_RED Widget_Color = 0
Widget_GREEN Widget_Color = 1
Widget_BLUE Widget_Color = 2
)
var Widget_Color_name = map[int32]string{
0: "RED",
1: "GREEN",
2: "BLUE",
}
var Widget_Color_value = map[string]int32{
"RED": 0,
"GREEN": 1,
"BLUE": 2,
}
func (x Widget_Color) Enum() *Widget_Color {
p := new(Widget_Color)
*p = x
return p
}
func (x Widget_Color) String() string {
return proto.EnumName(Widget_Color_name, int32(x))
}
func (x *Widget_Color) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color")
if err != nil {
return err
}
*x = Widget_Color(value)
return nil
}
func (Widget_Color) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{3, 0}
}
// Test message for holding primitive types.
type Simple struct {
OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"`
OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"`
OInt32Str *int32 `protobuf:"varint,3,opt,name=o_int32_str,json=oInt32Str" json:"o_int32_str,omitempty"`
OInt64 *int64 `protobuf:"varint,4,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"`
OInt64Str *int64 `protobuf:"varint,5,opt,name=o_int64_str,json=oInt64Str" json:"o_int64_str,omitempty"`
OUint32 *uint32 `protobuf:"varint,6,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"`
OUint32Str *uint32 `protobuf:"varint,7,opt,name=o_uint32_str,json=oUint32Str" json:"o_uint32_str,omitempty"`
OUint64 *uint64 `protobuf:"varint,8,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"`
OUint64Str *uint64 `protobuf:"varint,9,opt,name=o_uint64_str,json=oUint64Str" json:"o_uint64_str,omitempty"`
OSint32 *int32 `protobuf:"zigzag32,10,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"`
OSint32Str *int32 `protobuf:"zigzag32,11,opt,name=o_sint32_str,json=oSint32Str" json:"o_sint32_str,omitempty"`
OSint64 *int64 `protobuf:"zigzag64,12,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"`
OSint64Str *int64 `protobuf:"zigzag64,13,opt,name=o_sint64_str,json=oSint64Str" json:"o_sint64_str,omitempty"`
OFloat *float32 `protobuf:"fixed32,14,opt,name=o_float,json=oFloat" json:"o_float,omitempty"`
OFloatStr *float32 `protobuf:"fixed32,15,opt,name=o_float_str,json=oFloatStr" json:"o_float_str,omitempty"`
ODouble *float64 `protobuf:"fixed64,16,opt,name=o_double,json=oDouble" json:"o_double,omitempty"`
ODoubleStr *float64 `protobuf:"fixed64,17,opt,name=o_double_str,json=oDoubleStr" json:"o_double_str,omitempty"`
OString *string `protobuf:"bytes,18,opt,name=o_string,json=oString" json:"o_string,omitempty"`
OBytes []byte `protobuf:"bytes,19,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Simple) Reset() { *m = Simple{} }
func (m *Simple) String() string { return proto.CompactTextString(m) }
func (*Simple) ProtoMessage() {}
func (*Simple) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{0}
}
func (m *Simple) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Simple.Unmarshal(m, b)
}
func (m *Simple) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Simple.Marshal(b, m, deterministic)
}
func (m *Simple) XXX_Merge(src proto.Message) {
xxx_messageInfo_Simple.Merge(m, src)
}
func (m *Simple) XXX_Size() int {
return xxx_messageInfo_Simple.Size(m)
}
func (m *Simple) XXX_DiscardUnknown() {
xxx_messageInfo_Simple.DiscardUnknown(m)
}
var xxx_messageInfo_Simple proto.InternalMessageInfo
func (m *Simple) GetOBool() bool {
if m != nil && m.OBool != nil {
return *m.OBool
}
return false
}
func (m *Simple) GetOInt32() int32 {
if m != nil && m.OInt32 != nil {
return *m.OInt32
}
return 0
}
func (m *Simple) GetOInt32Str() int32 {
if m != nil && m.OInt32Str != nil {
return *m.OInt32Str
}
return 0
}
func (m *Simple) GetOInt64() int64 {
if m != nil && m.OInt64 != nil {
return *m.OInt64
}
return 0
}
func (m *Simple) GetOInt64Str() int64 {
if m != nil && m.OInt64Str != nil {
return *m.OInt64Str
}
return 0
}
func (m *Simple) GetOUint32() uint32 {
if m != nil && m.OUint32 != nil {
return *m.OUint32
}
return 0
}
func (m *Simple) GetOUint32Str() uint32 {
if m != nil && m.OUint32Str != nil {
return *m.OUint32Str
}
return 0
}
func (m *Simple) GetOUint64() uint64 {
if m != nil && m.OUint64 != nil {
return *m.OUint64
}
return 0
}
func (m *Simple) GetOUint64Str() uint64 {
if m != nil && m.OUint64Str != nil {
return *m.OUint64Str
}
return 0
}
func (m *Simple) GetOSint32() int32 {
if m != nil && m.OSint32 != nil {
return *m.OSint32
}
return 0
}
func (m *Simple) GetOSint32Str() int32 {
if m != nil && m.OSint32Str != nil {
return *m.OSint32Str
}
return 0
}
func (m *Simple) GetOSint64() int64 {
if m != nil && m.OSint64 != nil {
return *m.OSint64
}
return 0
}
func (m *Simple) GetOSint64Str() int64 {
if m != nil && m.OSint64Str != nil {
return *m.OSint64Str
}
return 0
}
func (m *Simple) GetOFloat() float32 {
if m != nil && m.OFloat != nil {
return *m.OFloat
}
return 0
}
func (m *Simple) GetOFloatStr() float32 {
if m != nil && m.OFloatStr != nil {
return *m.OFloatStr
}
return 0
}
func (m *Simple) GetODouble() float64 {
if m != nil && m.ODouble != nil {
return *m.ODouble
}
return 0
}
func (m *Simple) GetODoubleStr() float64 {
if m != nil && m.ODoubleStr != nil {
return *m.ODoubleStr
}
return 0
}
func (m *Simple) GetOString() string {
if m != nil && m.OString != nil {
return *m.OString
}
return ""
}
func (m *Simple) GetOBytes() []byte {
if m != nil {
return m.OBytes
}
return nil
}
// Test message for holding special non-finites primitives.
type NonFinites struct {
FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"`
FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"`
FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"`
DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"`
DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"`
DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NonFinites) Reset() { *m = NonFinites{} }
func (m *NonFinites) String() string { return proto.CompactTextString(m) }
func (*NonFinites) ProtoMessage() {}
func (*NonFinites) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{1}
}
func (m *NonFinites) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NonFinites.Unmarshal(m, b)
}
func (m *NonFinites) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NonFinites.Marshal(b, m, deterministic)
}
func (m *NonFinites) XXX_Merge(src proto.Message) {
xxx_messageInfo_NonFinites.Merge(m, src)
}
func (m *NonFinites) XXX_Size() int {
return xxx_messageInfo_NonFinites.Size(m)
}
func (m *NonFinites) XXX_DiscardUnknown() {
xxx_messageInfo_NonFinites.DiscardUnknown(m)
}
var xxx_messageInfo_NonFinites proto.InternalMessageInfo
func (m *NonFinites) GetFNan() float32 {
if m != nil && m.FNan != nil {
return *m.FNan
}
return 0
}
func (m *NonFinites) GetFPinf() float32 {
if m != nil && m.FPinf != nil {
return *m.FPinf
}
return 0
}
func (m *NonFinites) GetFNinf() float32 {
if m != nil && m.FNinf != nil {
return *m.FNinf
}
return 0
}
func (m *NonFinites) GetDNan() float64 {
if m != nil && m.DNan != nil {
return *m.DNan
}
return 0
}
func (m *NonFinites) GetDPinf() float64 {
if m != nil && m.DPinf != nil {
return *m.DPinf
}
return 0
}
func (m *NonFinites) GetDNinf() float64 {
if m != nil && m.DNinf != nil {
return *m.DNinf
}
return 0
}
// Test message for holding repeated primitives.
type Repeats struct {
RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"`
RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"`
RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"`
RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"`
RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"`
RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"`
RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"`
RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"`
RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"`
RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"`
RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Repeats) Reset() { *m = Repeats{} }
func (m *Repeats) String() string { return proto.CompactTextString(m) }
func (*Repeats) ProtoMessage() {}
func (*Repeats) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{2}
}
func (m *Repeats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Repeats.Unmarshal(m, b)
}
func (m *Repeats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Repeats.Marshal(b, m, deterministic)
}
func (m *Repeats) XXX_Merge(src proto.Message) {
xxx_messageInfo_Repeats.Merge(m, src)
}
func (m *Repeats) XXX_Size() int {
return xxx_messageInfo_Repeats.Size(m)
}
func (m *Repeats) XXX_DiscardUnknown() {
xxx_messageInfo_Repeats.DiscardUnknown(m)
}
var xxx_messageInfo_Repeats proto.InternalMessageInfo
func (m *Repeats) GetRBool() []bool {
if m != nil {
return m.RBool
}
return nil
}
func (m *Repeats) GetRInt32() []int32 {
if m != nil {
return m.RInt32
}
return nil
}
func (m *Repeats) GetRInt64() []int64 {
if m != nil {
return m.RInt64
}
return nil
}
func (m *Repeats) GetRUint32() []uint32 {
if m != nil {
return m.RUint32
}
return nil
}
func (m *Repeats) GetRUint64() []uint64 {
if m != nil {
return m.RUint64
}
return nil
}
func (m *Repeats) GetRSint32() []int32 {
if m != nil {
return m.RSint32
}
return nil
}
func (m *Repeats) GetRSint64() []int64 {
if m != nil {
return m.RSint64
}
return nil
}
func (m *Repeats) GetRFloat() []float32 {
if m != nil {
return m.RFloat
}
return nil
}
func (m *Repeats) GetRDouble() []float64 {
if m != nil {
return m.RDouble
}
return nil
}
func (m *Repeats) GetRString() []string {
if m != nil {
return m.RString
}
return nil
}
func (m *Repeats) GetRBytes() [][]byte {
if m != nil {
return m.RBytes
}
return nil
}
// Test message for holding enums and nested messages.
type Widget struct {
Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb_test.Widget_Color" json:"color,omitempty"`
RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb_test.Widget_Color" json:"r_color,omitempty"`
Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"`
RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"`
Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"`
RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Widget) Reset() { *m = Widget{} }
func (m *Widget) String() string { return proto.CompactTextString(m) }
func (*Widget) ProtoMessage() {}
func (*Widget) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{3}
}
func (m *Widget) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Widget.Unmarshal(m, b)
}
func (m *Widget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Widget.Marshal(b, m, deterministic)
}
func (m *Widget) XXX_Merge(src proto.Message) {
xxx_messageInfo_Widget.Merge(m, src)
}
func (m *Widget) XXX_Size() int {
return xxx_messageInfo_Widget.Size(m)
}
func (m *Widget) XXX_DiscardUnknown() {
xxx_messageInfo_Widget.DiscardUnknown(m)
}
var xxx_messageInfo_Widget proto.InternalMessageInfo
func (m *Widget) GetColor() Widget_Color {
if m != nil && m.Color != nil {
return *m.Color
}
return Widget_RED
}
func (m *Widget) GetRColor() []Widget_Color {
if m != nil {
return m.RColor
}
return nil
}
func (m *Widget) GetSimple() *Simple {
if m != nil {
return m.Simple
}
return nil
}
func (m *Widget) GetRSimple() []*Simple {
if m != nil {
return m.RSimple
}
return nil
}
func (m *Widget) GetRepeats() *Repeats {
if m != nil {
return m.Repeats
}
return nil
}
func (m *Widget) GetRRepeats() []*Repeats {
if m != nil {
return m.RRepeats
}
return nil
}
type Maps struct {
MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Maps) Reset() { *m = Maps{} }
func (m *Maps) String() string { return proto.CompactTextString(m) }
func (*Maps) ProtoMessage() {}
func (*Maps) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{4}
}
func (m *Maps) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Maps.Unmarshal(m, b)
}
func (m *Maps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Maps.Marshal(b, m, deterministic)
}
func (m *Maps) XXX_Merge(src proto.Message) {
xxx_messageInfo_Maps.Merge(m, src)
}
func (m *Maps) XXX_Size() int {
return xxx_messageInfo_Maps.Size(m)
}
func (m *Maps) XXX_DiscardUnknown() {
xxx_messageInfo_Maps.DiscardUnknown(m)
}
var xxx_messageInfo_Maps proto.InternalMessageInfo
func (m *Maps) GetMInt64Str() map[int64]string {
if m != nil {
return m.MInt64Str
}
return nil
}
func (m *Maps) GetMBoolSimple() map[bool]*Simple {
if m != nil {
return m.MBoolSimple
}
return nil
}
type MsgWithOneof struct {
// Types that are valid to be assigned to Union:
// *MsgWithOneof_Title
// *MsgWithOneof_Salary
// *MsgWithOneof_Country
// *MsgWithOneof_HomeAddress
// *MsgWithOneof_MsgWithRequired
// *MsgWithOneof_NullValue
Union isMsgWithOneof_Union `protobuf_oneof:"union"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} }
func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) }
func (*MsgWithOneof) ProtoMessage() {}
func (*MsgWithOneof) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{5}
}
func (m *MsgWithOneof) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MsgWithOneof.Unmarshal(m, b)
}
func (m *MsgWithOneof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MsgWithOneof.Marshal(b, m, deterministic)
}
func (m *MsgWithOneof) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgWithOneof.Merge(m, src)
}
func (m *MsgWithOneof) XXX_Size() int {
return xxx_messageInfo_MsgWithOneof.Size(m)
}
func (m *MsgWithOneof) XXX_DiscardUnknown() {
xxx_messageInfo_MsgWithOneof.DiscardUnknown(m)
}
var xxx_messageInfo_MsgWithOneof proto.InternalMessageInfo
type isMsgWithOneof_Union interface {
isMsgWithOneof_Union()
}
type MsgWithOneof_Title struct {
Title string `protobuf:"bytes,1,opt,name=title,oneof"`
}
type MsgWithOneof_Salary struct {
Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"`
}
type MsgWithOneof_Country struct {
Country string `protobuf:"bytes,3,opt,name=Country,oneof"`
}
type MsgWithOneof_HomeAddress struct {
HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"`
}
type MsgWithOneof_MsgWithRequired struct {
MsgWithRequired *MsgWithRequired `protobuf:"bytes,5,opt,name=msg_with_required,json=msgWithRequired,oneof"`
}
type MsgWithOneof_NullValue struct {
NullValue structpb.NullValue `protobuf:"varint,6,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
}
func (*MsgWithOneof_Title) isMsgWithOneof_Union() {}
func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {}
func (*MsgWithOneof_Country) isMsgWithOneof_Union() {}
func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {}
func (*MsgWithOneof_MsgWithRequired) isMsgWithOneof_Union() {}
func (*MsgWithOneof_NullValue) isMsgWithOneof_Union() {}
func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union {
if m != nil {
return m.Union
}
return nil
}
func (m *MsgWithOneof) GetTitle() string {
if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok {
return x.Title
}
return ""
}
func (m *MsgWithOneof) GetSalary() int64 {
if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok {
return x.Salary
}
return 0
}
func (m *MsgWithOneof) GetCountry() string {
if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok {
return x.Country
}
return ""
}
func (m *MsgWithOneof) GetHomeAddress() string {
if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok {
return x.HomeAddress
}
return ""
}
func (m *MsgWithOneof) GetMsgWithRequired() *MsgWithRequired {
if x, ok := m.GetUnion().(*MsgWithOneof_MsgWithRequired); ok {
return x.MsgWithRequired
}
return nil
}
func (m *MsgWithOneof) GetNullValue() structpb.NullValue {
if x, ok := m.GetUnion().(*MsgWithOneof_NullValue); ok {
return x.NullValue
}
return structpb.NullValue_NULL_VALUE
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*MsgWithOneof) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*MsgWithOneof_Title)(nil),
(*MsgWithOneof_Salary)(nil),
(*MsgWithOneof_Country)(nil),
(*MsgWithOneof_HomeAddress)(nil),
(*MsgWithOneof_MsgWithRequired)(nil),
(*MsgWithOneof_NullValue)(nil),
}
}
type Real struct {
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
proto.XXX_InternalExtensions `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Real) Reset() { *m = Real{} }
func (m *Real) String() string { return proto.CompactTextString(m) }
func (*Real) ProtoMessage() {}
func (*Real) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{6}
}
var extRange_Real = []proto.ExtensionRange{
{Start: 100, End: 536870911},
}
func (*Real) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_Real
}
func (m *Real) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Real.Unmarshal(m, b)
}
func (m *Real) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Real.Marshal(b, m, deterministic)
}
func (m *Real) XXX_Merge(src proto.Message) {
xxx_messageInfo_Real.Merge(m, src)
}
func (m *Real) XXX_Size() int {
return xxx_messageInfo_Real.Size(m)
}
func (m *Real) XXX_DiscardUnknown() {
xxx_messageInfo_Real.DiscardUnknown(m)
}
var xxx_messageInfo_Real proto.InternalMessageInfo
func (m *Real) GetValue() float64 {
if m != nil && m.Value != nil {
return *m.Value
}
return 0
}
type Complex struct {
Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
proto.XXX_InternalExtensions `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Complex) Reset() { *m = Complex{} }
func (m *Complex) String() string { return proto.CompactTextString(m) }
func (*Complex) ProtoMessage() {}
func (*Complex) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{7}
}
var extRange_Complex = []proto.ExtensionRange{
{Start: 100, End: 536870911},
}
func (*Complex) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_Complex
}
func (m *Complex) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Complex.Unmarshal(m, b)
}
func (m *Complex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Complex.Marshal(b, m, deterministic)
}
func (m *Complex) XXX_Merge(src proto.Message) {
xxx_messageInfo_Complex.Merge(m, src)
}
func (m *Complex) XXX_Size() int {
return xxx_messageInfo_Complex.Size(m)
}
func (m *Complex) XXX_DiscardUnknown() {
xxx_messageInfo_Complex.DiscardUnknown(m)
}
var xxx_messageInfo_Complex proto.InternalMessageInfo
func (m *Complex) GetImaginary() float64 {
if m != nil && m.Imaginary != nil {
return *m.Imaginary
}
return 0
}
var E_Complex_RealExtension = &proto.ExtensionDesc{
ExtendedType: (*Real)(nil),
ExtensionType: (*Complex)(nil),
Field: 123,
Name: "jsonpb_test.Complex.real_extension",
Tag: "bytes,123,opt,name=real_extension",
Filename: "jsonpb_proto/test2.proto",
}
type KnownTypes struct {
An *anypb.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"`
Dur *durationpb.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"`
St *structpb.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"`
Ts *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"`
Lv *structpb.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"`
Val *structpb.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"`
Dbl *wrapperspb.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"`
Flt *wrapperspb.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"`
I64 *wrapperspb.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"`
U64 *wrapperspb.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"`
I32 *wrapperspb.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"`
U32 *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"`
Bool *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"`
Str *wrapperspb.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"`
Bytes *wrapperspb.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KnownTypes) Reset() { *m = KnownTypes{} }
func (m *KnownTypes) String() string { return proto.CompactTextString(m) }
func (*KnownTypes) ProtoMessage() {}
func (*KnownTypes) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{8}
}
func (m *KnownTypes) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KnownTypes.Unmarshal(m, b)
}
func (m *KnownTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KnownTypes.Marshal(b, m, deterministic)
}
func (m *KnownTypes) XXX_Merge(src proto.Message) {
xxx_messageInfo_KnownTypes.Merge(m, src)
}
func (m *KnownTypes) XXX_Size() int {
return xxx_messageInfo_KnownTypes.Size(m)
}
func (m *KnownTypes) XXX_DiscardUnknown() {
xxx_messageInfo_KnownTypes.DiscardUnknown(m)
}
var xxx_messageInfo_KnownTypes proto.InternalMessageInfo
func (m *KnownTypes) GetAn() *anypb.Any {
if m != nil {
return m.An
}
return nil
}
func (m *KnownTypes) GetDur() *durationpb.Duration {
if m != nil {
return m.Dur
}
return nil
}
func (m *KnownTypes) GetSt() *structpb.Struct {
if m != nil {
return m.St
}
return nil
}
func (m *KnownTypes) GetTs() *timestamppb.Timestamp {
if m != nil {
return m.Ts
}
return nil
}
func (m *KnownTypes) GetLv() *structpb.ListValue {
if m != nil {
return m.Lv
}
return nil
}
func (m *KnownTypes) GetVal() *structpb.Value {
if m != nil {
return m.Val
}
return nil
}
func (m *KnownTypes) GetDbl() *wrapperspb.DoubleValue {
if m != nil {
return m.Dbl
}
return nil
}
func (m *KnownTypes) GetFlt() *wrapperspb.FloatValue {
if m != nil {
return m.Flt
}
return nil
}
func (m *KnownTypes) GetI64() *wrapperspb.Int64Value {
if m != nil {
return m.I64
}
return nil
}
func (m *KnownTypes) GetU64() *wrapperspb.UInt64Value {
if m != nil {
return m.U64
}
return nil
}
func (m *KnownTypes) GetI32() *wrapperspb.Int32Value {
if m != nil {
return m.I32
}
return nil
}
func (m *KnownTypes) GetU32() *wrapperspb.UInt32Value {
if m != nil {
return m.U32
}
return nil
}
func (m *KnownTypes) GetBool() *wrapperspb.BoolValue {
if m != nil {
return m.Bool
}
return nil
}
func (m *KnownTypes) GetStr() *wrapperspb.StringValue {
if m != nil {
return m.Str
}
return nil
}
func (m *KnownTypes) GetBytes() *wrapperspb.BytesValue {
if m != nil {
return m.Bytes
}
return nil
}
// Test messages for marshaling/unmarshaling required fields.
type MsgWithRequired struct {
Str *string `protobuf:"bytes,1,req,name=str" json:"str,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MsgWithRequired) Reset() { *m = MsgWithRequired{} }
func (m *MsgWithRequired) String() string { return proto.CompactTextString(m) }
func (*MsgWithRequired) ProtoMessage() {}
func (*MsgWithRequired) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{9}
}
func (m *MsgWithRequired) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MsgWithRequired.Unmarshal(m, b)
}
func (m *MsgWithRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MsgWithRequired.Marshal(b, m, deterministic)
}
func (m *MsgWithRequired) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgWithRequired.Merge(m, src)
}
func (m *MsgWithRequired) XXX_Size() int {
return xxx_messageInfo_MsgWithRequired.Size(m)
}
func (m *MsgWithRequired) XXX_DiscardUnknown() {
xxx_messageInfo_MsgWithRequired.DiscardUnknown(m)
}
var xxx_messageInfo_MsgWithRequired proto.InternalMessageInfo
func (m *MsgWithRequired) GetStr() string {
if m != nil && m.Str != nil {
return *m.Str
}
return ""
}
type MsgWithIndirectRequired struct {
Subm *MsgWithRequired `protobuf:"bytes,1,opt,name=subm" json:"subm,omitempty"`
MapField map[string]*MsgWithRequired `protobuf:"bytes,2,rep,name=map_field,json=mapField" json:"map_field,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
SliceField []*MsgWithRequired `protobuf:"bytes,3,rep,name=slice_field,json=sliceField" json:"slice_field,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MsgWithIndirectRequired) Reset() { *m = MsgWithIndirectRequired{} }
func (m *MsgWithIndirectRequired) String() string { return proto.CompactTextString(m) }
func (*MsgWithIndirectRequired) ProtoMessage() {}
func (*MsgWithIndirectRequired) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{10}
}
func (m *MsgWithIndirectRequired) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MsgWithIndirectRequired.Unmarshal(m, b)
}
func (m *MsgWithIndirectRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MsgWithIndirectRequired.Marshal(b, m, deterministic)
}
func (m *MsgWithIndirectRequired) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgWithIndirectRequired.Merge(m, src)
}
func (m *MsgWithIndirectRequired) XXX_Size() int {
return xxx_messageInfo_MsgWithIndirectRequired.Size(m)
}
func (m *MsgWithIndirectRequired) XXX_DiscardUnknown() {
xxx_messageInfo_MsgWithIndirectRequired.DiscardUnknown(m)
}
var xxx_messageInfo_MsgWithIndirectRequired proto.InternalMessageInfo
func (m *MsgWithIndirectRequired) GetSubm() *MsgWithRequired {
if m != nil {
return m.Subm
}
return nil
}
func (m *MsgWithIndirectRequired) GetMapField() map[string]*MsgWithRequired {
if m != nil {
return m.MapField
}
return nil
}
func (m *MsgWithIndirectRequired) GetSliceField() []*MsgWithRequired {
if m != nil {
return m.SliceField
}
return nil
}
type MsgWithRequiredBytes struct {
Byts []byte `protobuf:"bytes,1,req,name=byts" json:"byts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MsgWithRequiredBytes) Reset() { *m = MsgWithRequiredBytes{} }
func (m *MsgWithRequiredBytes) String() string { return proto.CompactTextString(m) }
func (*MsgWithRequiredBytes) ProtoMessage() {}
func (*MsgWithRequiredBytes) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{11}
}
func (m *MsgWithRequiredBytes) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MsgWithRequiredBytes.Unmarshal(m, b)
}
func (m *MsgWithRequiredBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MsgWithRequiredBytes.Marshal(b, m, deterministic)
}
func (m *MsgWithRequiredBytes) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgWithRequiredBytes.Merge(m, src)
}
func (m *MsgWithRequiredBytes) XXX_Size() int {
return xxx_messageInfo_MsgWithRequiredBytes.Size(m)
}
func (m *MsgWithRequiredBytes) XXX_DiscardUnknown() {
xxx_messageInfo_MsgWithRequiredBytes.DiscardUnknown(m)
}
var xxx_messageInfo_MsgWithRequiredBytes proto.InternalMessageInfo
func (m *MsgWithRequiredBytes) GetByts() []byte {
if m != nil {
return m.Byts
}
return nil
}
type MsgWithRequiredWKT struct {
Str *wrapperspb.StringValue `protobuf:"bytes,1,req,name=str" json:"str,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MsgWithRequiredWKT) Reset() { *m = MsgWithRequiredWKT{} }
func (m *MsgWithRequiredWKT) String() string { return proto.CompactTextString(m) }
func (*MsgWithRequiredWKT) ProtoMessage() {}
func (*MsgWithRequiredWKT) Descriptor() ([]byte, []int) {
return fileDescriptor_50cab1d8463dea41, []int{12}
}
func (m *MsgWithRequiredWKT) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MsgWithRequiredWKT.Unmarshal(m, b)
}
func (m *MsgWithRequiredWKT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MsgWithRequiredWKT.Marshal(b, m, deterministic)
}
func (m *MsgWithRequiredWKT) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgWithRequiredWKT.Merge(m, src)
}
func (m *MsgWithRequiredWKT) XXX_Size() int {
return xxx_messageInfo_MsgWithRequiredWKT.Size(m)
}
func (m *MsgWithRequiredWKT) XXX_DiscardUnknown() {
xxx_messageInfo_MsgWithRequiredWKT.DiscardUnknown(m)
}
var xxx_messageInfo_MsgWithRequiredWKT proto.InternalMessageInfo
func (m *MsgWithRequiredWKT) GetStr() *wrapperspb.StringValue {
if m != nil {
return m.Str
}
return nil
}
var E_Name = &proto.ExtensionDesc{
ExtendedType: (*Real)(nil),
ExtensionType: (*string)(nil),
Field: 124,
Name: "jsonpb_test.name",
Tag: "bytes,124,opt,name=name",
Filename: "jsonpb_proto/test2.proto",
}
var E_Extm = &proto.ExtensionDesc{
ExtendedType: (*Real)(nil),
ExtensionType: (*MsgWithRequired)(nil),
Field: 125,
Name: "jsonpb_test.extm",
Tag: "bytes,125,opt,name=extm",
Filename: "jsonpb_proto/test2.proto",
}
func init() {
proto.RegisterEnum("jsonpb_test.Widget_Color", Widget_Color_name, Widget_Color_value)
proto.RegisterType((*Simple)(nil), "jsonpb_test.Simple")
proto.RegisterType((*NonFinites)(nil), "jsonpb_test.NonFinites")
proto.RegisterType((*Repeats)(nil), "jsonpb_test.Repeats")
proto.RegisterType((*Widget)(nil), "jsonpb_test.Widget")
proto.RegisterType((*Maps)(nil), "jsonpb_test.Maps")
proto.RegisterMapType((map[bool]*Simple)(nil), "jsonpb_test.Maps.MBoolSimpleEntry")
proto.RegisterMapType((map[int64]string)(nil), "jsonpb_test.Maps.MInt64StrEntry")
proto.RegisterType((*MsgWithOneof)(nil), "jsonpb_test.MsgWithOneof")
proto.RegisterType((*Real)(nil), "jsonpb_test.Real")
proto.RegisterExtension(E_Complex_RealExtension)
proto.RegisterType((*Complex)(nil), "jsonpb_test.Complex")
proto.RegisterType((*KnownTypes)(nil), "jsonpb_test.KnownTypes")
proto.RegisterType((*MsgWithRequired)(nil), "jsonpb_test.MsgWithRequired")
proto.RegisterType((*MsgWithIndirectRequired)(nil), "jsonpb_test.MsgWithIndirectRequired")
proto.RegisterMapType((map[string]*MsgWithRequired)(nil), "jsonpb_test.MsgWithIndirectRequired.MapFieldEntry")
proto.RegisterType((*MsgWithRequiredBytes)(nil), "jsonpb_test.MsgWithRequiredBytes")
proto.RegisterType((*MsgWithRequiredWKT)(nil), "jsonpb_test.MsgWithRequiredWKT")
proto.RegisterExtension(E_Name)
proto.RegisterExtension(E_Extm)
}
func init() { proto.RegisterFile("jsonpb_proto/test2.proto", fileDescriptor_50cab1d8463dea41) }
var fileDescriptor_50cab1d8463dea41 = []byte{
// 1537 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0xdd, 0x6e, 0xdb, 0xc8,
0x15, 0x36, 0x49, 0x51, 0x12, 0x8f, 0xec, 0xc4, 0x99, 0x64, 0x37, 0xb4, 0x1b, 0x6c, 0x09, 0x6d,
0xb7, 0x55, 0xb3, 0xa8, 0xdc, 0xa5, 0x05, 0xa1, 0xc8, 0x76, 0x81, 0xae, 0x13, 0xa7, 0xd9, 0xee,
0xc6, 0x5b, 0x8c, 0x93, 0x06, 0xed, 0x8d, 0x40, 0x99, 0x94, 0xc2, 0x96, 0x9c, 0x51, 0x67, 0x86,
0x4e, 0x84, 0xb6, 0x80, 0xfb, 0x0a, 0xed, 0x23, 0x14, 0xe8, 0x6d, 0xef, 0x7a, 0xd1, 0xe7, 0xe8,
0x03, 0x2d, 0xe6, 0xcc, 0x50, 0x7f, 0x96, 0x8d, 0xbd, 0xb2, 0x66, 0xbe, 0x9f, 0x19, 0xce, 0xf9,
0x78, 0x86, 0x86, 0xf0, 0x8f, 0x92, 0xb3, 0xd9, 0x78, 0x34, 0x13, 0x5c, 0xf1, 0x23, 0x95, 0x49,
0x15, 0xf7, 0xf1, 0x37, 0xe9, 0x58, 0x44, 0xcf, 0x1d, 0x1e, 0x4c, 0x39, 0x9f, 0x16, 0xd9, 0x11,
0x42, 0xe3, 0x6a, 0x72, 0x94, 0xb0, 0xb9, 0xe1, 0x1d, 0x7e, 0xb4, 0x09, 0xa5, 0x95, 0x48, 0x54,
0xce, 0x99, 0xc5, 0x1f, 0x6d, 0xe2, 0x52, 0x89, 0xea, 0x42, 0x59, 0xf4, 0x87, 0x9b, 0xa8, 0xca,
0xcb, 0x4c, 0xaa, 0xa4, 0x9c, 0xdd, 0x64, 0xff, 0x4e, 0x24, 0xb3, 0x59, 0x26, 0xa4, 0xc1, 0xbb,
0xff, 0x69, 0x40, 0xf3, 0x3c, 0x2f, 0x67, 0x45, 0x46, 0x3e, 0x80, 0x26, 0x1f, 0x8d, 0x39, 0x2f,
0x42, 0x27, 0x72, 0x7a, 0x6d, 0xea, 0xf3, 0x13, 0xce, 0x0b, 0xf2, 0x10, 0x5a, 0x7c, 0x94, 0x33,
0x75, 0x1c, 0x87, 0x6e, 0xe4, 0xf4, 0x7c, 0xda, 0xe4, 0x5f, 0xe9, 0x11, 0xf9, 0x08, 0x3a, 0x16,
0x18, 0x49, 0x25, 0x42, 0x0f, 0xc1, 0xc0, 0x80, 0xe7, 0x4a, 0x2c, 0x84, 0xc3, 0x41, 0xd8, 0x88,
0x9c, 0x9e, 0x67, 0x84, 0xc3, 0xc1, 0x42, 0x38, 0x1c, 0xa0, 0xd0, 0x47, 0x30, 0x30, 0xa0, 0x16,
0x1e, 0x40, 0x9b, 0x8f, 0x2a, 0xb3, 0x64, 0x33, 0x72, 0x7a, 0x7b, 0xb4, 0xc5, 0x5f, 0xe3, 0x90,
0x44, 0xb0, 0x5b, 0x43, 0xa8, 0x6d, 0x21, 0x0c, 0x16, 0x5e, 0x13, 0x0f, 0x07, 0x61, 0x3b, 0x72,
0x7a, 0x0d, 0x2b, 0x1e, 0x0e, 0x96, 0x62, 0xbb, 0x70, 0x80, 0x30, 0x58, 0x78, 0x21, 0x96, 0x66,
0x65, 0x88, 0x9c, 0xde, 0x3d, 0xda, 0xe2, 0xe7, 0x2b, 0x2b, 0xcb, 0xe5, 0xca, 0x1d, 0x84, 0xc1,
0xc2, 0x6b, 0xe2, 0xe1, 0x20, 0xdc, 0x8d, 0x9c, 0x1e, 0xb1, 0xe2, 0x7a, 0x65, 0xb9, 0x5c, 0x79,
0x0f, 0x61, 0xb0, 0xf0, 0xe2, 0xb0, 0x26, 0x05, 0x4f, 0x54, 0x78, 0x27, 0x72, 0x7a, 0x2e, 0x6d,
0xf2, 0xe7, 0x7a, 0x64, 0x0e, 0x0b, 0x01, 0x54, 0xde, 0x45, 0x30, 0x30, 0xe0, 0x62, 0xd5, 0x94,
0x57, 0xe3, 0x22, 0x0b, 0xf7, 0x23, 0xa7, 0xe7, 0xd0, 0x16, 0x7f, 0x86, 0x43, 0xb3, 0xaa, 0x81,
0x50, 0x7b, 0x0f, 0x61, 0xb0, 0xf0, 0x72, 0xcb, 0x4a, 0xe4, 0x6c, 0x1a, 0x92, 0xc8, 0xe9, 0x05,
0x7a, 0xcb, 0x38, 0x34, 0x1b, 0x1a, 0xcf, 0x55, 0x26, 0xc3, 0xfb, 0x91, 0xd3, 0xdb, 0xa5, 0x4d,
0x7e, 0xa2, 0x47, 0xdd, 0x7f, 0x38, 0x00, 0x67, 0x9c, 0x3d, 0xcf, 0x59, 0xae, 0x32, 0x49, 0xee,
0x83, 0x3f, 0x19, 0xb1, 0x84, 0x61, 0x68, 0x5c, 0xda, 0x98, 0x9c, 0x25, 0x4c, 0x47, 0x69, 0x32,
0x9a, 0xe5, 0x6c, 0x82, 0x91, 0x71, 0xa9, 0x3f, 0xf9, 0x6d, 0xce, 0x26, 0x66, 0x9a, 0xe9, 0x69,
0xcf, 0x4e, 0x9f, 0xe9, 0xe9, 0xfb, 0xe0, 0xa7, 0x68, 0xd1, 0xc0, 0x0d, 0x36, 0x52, 0x6b, 0x91,
0x1a, 0x0b, 0x1f, 0x67, 0xfd, 0xb4, 0xb6, 0x48, 0x8d, 0x45, 0xd3, 0x4e, 0x6b, 0x8b, 0xee, 0xbf,
0x5d, 0x68, 0xd1, 0x6c, 0x96, 0x25, 0x4a, 0x6a, 0x8a, 0xa8, 0x73, 0xec, 0xe9, 0x1c, 0x8b, 0x3a,
0xc7, 0x62, 0x91, 0x63, 0x4f, 0xe7, 0x58, 0x98, 0x1c, 0xd7, 0xc0, 0x70, 0x10, 0x7a, 0x91, 0xa7,
0x73, 0x2a, 0x4c, 0x4e, 0x0f, 0xa0, 0x2d, 0xea, 0x1c, 0x36, 0x22, 0x4f, 0xe7, 0x50, 0xd8, 0x1c,
0x2e, 0xa0, 0xe1, 0x20, 0xf4, 0x23, 0x4f, 0xa7, 0x4c, 0xd8, 0x94, 0x21, 0x24, 0xeb, 0xf4, 0x7a,
0x3a, 0x43, 0xe2, 0x7c, 0x45, 0x65, 0x13, 0xd2, 0x8a, 0x3c, 0x9d, 0x10, 0x61, 0x13, 0x82, 0x9b,
0x30, 0xf5, 0x6f, 0x47, 0x9e, 0xae, 0xbf, 0x30, 0xf5, 0x47, 0x8d, 0xad, 0x6f, 0x10, 0x79, 0xba,
0xbe, 0xc2, 0xd6, 0xd7, 0xd8, 0x99, 0xea, 0x41, 0xe4, 0xe9, 0xea, 0x89, 0x65, 0xf5, 0x84, 0xad,
0x5e, 0x27, 0xf2, 0x74, 0xf5, 0x84, 0xa9, 0xde, 0xff, 0x5d, 0x68, 0xbe, 0xc9, 0xd3, 0x69, 0xa6,
0xc8, 0x11, 0xf8, 0x17, 0xbc, 0xe0, 0x02, 0x2b, 0x77, 0x27, 0x3e, 0xe8, 0xaf, 0x74, 0xac, 0xbe,
0xe1, 0xf4, 0x9f, 0x6a, 0x02, 0x35, 0x3c, 0x12, 0x6b, 0x53, 0x23, 0xd1, 0x27, 0x78, 0xab, 0xa4,
0x29, 0xf0, 0x2f, 0xf9, 0x14, 0x9a, 0x12, 0xdb, 0x0b, 0xbe, 0x4f, 0x9d, 0xf8, 0xfe, 0x9a, 0xc4,
0x74, 0x1e, 0x6a, 0x29, 0xa4, 0x6f, 0xce, 0x07, 0xe9, 0x7a, 0xdb, 0x37, 0xd0, 0xf5, 0xa1, 0x59,
0x7e, 0x4b, 0x98, 0xa2, 0x87, 0x0f, 0xd0, 0xfd, 0xc1, 0x1a, 0xdd, 0x06, 0x82, 0xd6, 0x24, 0xf2,
0x19, 0x04, 0x62, 0x54, 0x2b, 0x3e, 0xc0, 0x05, 0xb6, 0x2b, 0xda, 0xc2, 0xfe, 0xea, 0x7e, 0x02,
0xbe, 0x79, 0x90, 0x16, 0x78, 0xf4, 0xf4, 0xd9, 0xfe, 0x0e, 0x09, 0xc0, 0xff, 0x35, 0x3d, 0x3d,
0x3d, 0xdb, 0x77, 0x48, 0x1b, 0x1a, 0x27, 0xdf, 0xbc, 0x3e, 0xdd, 0x77, 0xbb, 0xff, 0x72, 0xa1,
0xf1, 0x32, 0x99, 0x49, 0xf2, 0x2b, 0xe8, 0x94, 0x2b, 0xbd, 0xcd, 0xc1, 0x45, 0xa2, 0xb5, 0x45,
0x34, 0xaf, 0xff, 0xb2, 0xee, 0x76, 0xa7, 0x4c, 0x89, 0x39, 0x0d, 0xca, 0x45, 0xf7, 0x7b, 0x0e,
0x7b, 0x25, 0xc6, 0xb7, 0x3e, 0x09, 0x17, 0x3d, 0xba, 0x5b, 0x3c, 0x74, 0xae, 0xcd, 0x51, 0x18,
0x97, 0x4e, 0xb9, 0x9c, 0x39, 0xfc, 0x25, 0xdc, 0x59, 0x5f, 0x84, 0xec, 0x83, 0xf7, 0xa7, 0x6c,
0x8e, 0xe5, 0xf6, 0xa8, 0xfe, 0x49, 0x1e, 0x80, 0x7f, 0x99, 0x14, 0x55, 0x86, 0xaf, 0x69, 0x40,
0xcd, 0xe0, 0x89, 0xfb, 0x0b, 0xe7, 0xf0, 0x1c, 0xf6, 0x37, 0xed, 0x57, 0xf5, 0x6d, 0xa3, 0xff,
0xe9, 0xaa, 0xfe, 0x86, 0x6a, 0x2d, 0x4d, 0xbb, 0xff, 0x74, 0x61, 0xf7, 0xa5, 0x9c, 0xbe, 0xc9,
0xd5, 0xdb, 0x6f, 0x59, 0xc6, 0x27, 0xe4, 0x43, 0xf0, 0x55, 0xae, 0x8a, 0x0c, 0x3d, 0x83, 0x17,
0x3b, 0xd4, 0x0c, 0x49, 0x08, 0x4d, 0x99, 0x14, 0x89, 0x98, 0xa3, 0xb1, 0xf7, 0x62, 0x87, 0xda,
0x31, 0x39, 0x84, 0xd6, 0x53, 0x5e, 0xe9, 0xed, 0x60, 0x0f, 0xd1, 0x9a, 0x7a, 0x82, 0x7c, 0x0c,
0xbb, 0x6f, 0x79, 0x99, 0x8d, 0x92, 0x34, 0x15, 0x99, 0x94, 0xd8, 0x4e, 0x34, 0xa1, 0xa3, 0x67,
0xbf, 0x34, 0x93, 0xe4, 0x37, 0x70, 0xaf, 0x94, 0xd3, 0xd1, 0xbb, 0x5c, 0xbd, 0x1d, 0x89, 0xec,
0xcf, 0x55, 0x2e, 0xb2, 0x14, 0x5b, 0x4c, 0x27, 0x7e, 0xb4, 0x7e, 0xc4, 0x66, 0xa3, 0xd4, 0x72,
0x5e, 0xec, 0xd0, 0xbb, 0xe5, 0xfa, 0x14, 0xf9, 0x1c, 0x80, 0x55, 0x45, 0x31, 0x32, 0x67, 0xd0,
0xc4, 0xd7, 0xe8, 0xb0, 0x6f, 0x6e, 0xdc, 0x7e, 0x7d, 0xe3, 0xf6, 0xcf, 0xaa, 0xa2, 0xf8, 0x9d,
0x66, 0xbc, 0xd8, 0xa1, 0x01, 0xab, 0x07, 0x27, 0x2d, 0xf0, 0x2b, 0x96, 0x73, 0xd6, 0xfd, 0x31,
0x34, 0x68, 0x96, 0x14, 0xcb, 0x62, 0x38, 0xa6, 0xb3, 0xe1, 0xe0, 0x71, 0xbb, 0x9d, 0xee, 0x5f,
0x5d, 0x5d, 0x5d, 0xb9, 0xdd, 0xbf, 0x3b, 0xfa, 0xd9, 0xf5, 0x99, 0xbe, 0x27, 0x8f, 0x20, 0xc8,
0xcb, 0x64, 0x9a, 0x33, 0x7d, 0x46, 0x86, 0xbf, 0x9c, 0x58, 0x6a, 0xe2, 0x33, 0xb8, 0x23, 0xb2,
0xa4, 0x18, 0x65, 0xef, 0x55, 0xc6, 0x64, 0xce, 0x19, 0xb9, 0xb7, 0x11, 0xf8, 0xa4, 0x08, 0xff,
0xb2, 0xe5, 0xdd, 0xb1, 0x0b, 0xd1, 0x3d, 0x2d, 0x3f, 0xad, 0xd5, 0xdd, 0xff, 0xf9, 0x00, 0x5f,
0x33, 0xfe, 0x8e, 0xbd, 0x9a, 0xcf, 0x32, 0x49, 0x7e, 0x04, 0x6e, 0xc2, 0xf0, 0xc2, 0xd2, 0xfa,
0xcd, 0x07, 0xff, 0x92, 0xcd, 0xa9, 0x9b, 0x30, 0xf2, 0x29, 0x78, 0x69, 0x65, 0xda, 0x4c, 0x27,
0x3e, 0xb8, 0x46, 0x7b, 0x66, 0x3f, 0x78, 0xa8, 0x66, 0x91, 0x9f, 0x80, 0x2b, 0x15, 0xde, 0x9f,
0x9d, 0xf8, 0xe1, 0x35, 0xee, 0x39, 0x7e, 0xfc, 0x50, 0x57, 0x2a, 0xf2, 0x18, 0x5c, 0x25, 0x6d,
0xf0, 0xae, 0x1f, 0xfa, 0xab, 0xfa, 0x3b, 0x88, 0xba, 0x4a, 0x6a, 0x6e, 0x71, 0x89, 0x77, 0xe7,
0x36, 0xee, 0x37, 0xb9, 0x54, 0x58, 0x13, 0xea, 0x16, 0x97, 0xa4, 0x07, 0xde, 0x65, 0x52, 0xe0,
0x5d, 0xda, 0x89, 0x3f, 0xbc, 0x46, 0x36, 0x44, 0x4d, 0x21, 0x7d, 0xf0, 0xd2, 0x71, 0x81, 0x39,
0xd4, 0xe1, 0xb9, 0xf6, 0x5c, 0xd8, 0xa5, 0x2d, 0x3f, 0x1d, 0x17, 0xe4, 0x67, 0xe0, 0x4d, 0x0a,
0x85, 0xb1, 0xec, 0xc4, 0x3f, 0xb8, 0xc6, 0xc7, 0x7e, 0x6f, 0xe9, 0x93, 0x42, 0x69, 0x7a, 0x8e,
0xd7, 0xcb, 0x76, 0x3a, 0xbe, 0xdb, 0x96, 0x9e, 0x0f, 0x07, 0x7a, 0x37, 0xd5, 0x70, 0x80, 0x29,
0xdc, 0xb6, 0x9b, 0xd7, 0xab, 0xfc, 0x6a, 0x38, 0x40, 0xfb, 0xe3, 0x18, 0xbf, 0xa0, 0x6e, 0xb0,
0x3f, 0x8e, 0x6b, 0xfb, 0xe3, 0x18, 0xed, 0x8f, 0x63, 0xfc, 0xa4, 0xba, 0xc9, 0x7e, 0xc1, 0xaf,
0x90, 0xdf, 0xc0, 0x3b, 0x38, 0xb8, 0xe1, 0xd0, 0x75, 0x73, 0x31, 0x74, 0xe4, 0x69, 0x7f, 0xdd,
0x30, 0xe1, 0x06, 0x7f, 0x73, 0xaf, 0x59, 0x7f, 0xa9, 0x04, 0xf9, 0x0c, 0xfc, 0xfa, 0x7e, 0xdb,
0xfe, 0x00, 0x78, 0xdf, 0x19, 0x81, 0x61, 0x76, 0x3f, 0x86, 0xbb, 0x1b, 0x2f, 0xb5, 0x6e, 0x69,
0xa6, 0x4d, 0xbb, 0xbd, 0x00, 0x7d, 0xbb, 0xff, 0x75, 0xe1, 0xa1, 0x65, 0x7d, 0xc5, 0xd2, 0x5c,
0x64, 0x17, 0x6a, 0xc1, 0xfe, 0x39, 0x34, 0x64, 0x35, 0x2e, 0x6d, 0x92, 0x6f, 0x6d, 0x17, 0x14,
0x99, 0xe4, 0x5b, 0x08, 0xca, 0x64, 0x36, 0x9a, 0xe4, 0x59, 0x91, 0xda, 0x46, 0x1e, 0x6f, 0x93,
0x6d, 0x2e, 0xa5, 0x1b, 0xfc, 0x73, 0x2d, 0x32, 0x8d, 0xbd, 0x5d, 0xda, 0x21, 0xf9, 0x02, 0x3a,
0xb2, 0xc8, 0x2f, 0x32, 0x6b, 0xe9, 0xa1, 0xe5, 0xed, 0x3b, 0x01, 0x14, 0xa0, 0xfc, 0xf0, 0xf7,
0xb0, 0xb7, 0xe6, 0xbc, 0xda, 0xd3, 0x03, 0xd3, 0xd3, 0xe3, 0xf5, 0x9e, 0x7e, 0xbb, 0xf7, 0x4a,
0x73, 0x7f, 0x0c, 0x0f, 0x36, 0x50, 0xac, 0x00, 0x21, 0xd0, 0x18, 0xcf, 0x95, 0xc4, 0x33, 0xde,
0xa5, 0xf8, 0xbb, 0xfb, 0x0c, 0xc8, 0x06, 0xf7, 0xcd, 0xd7, 0xaf, 0xea, 0x08, 0x68, 0xe2, 0xf7,
0x89, 0xc0, 0x93, 0x4f, 0xa0, 0xc1, 0x92, 0x32, 0xdb, 0xd6, 0xd2, 0xfe, 0x8a, 0xcf, 0x83, 0xf0,
0x93, 0xa7, 0xd0, 0xc8, 0xde, 0xab, 0x72, 0x1b, 0xed, 0x6f, 0xdf, 0xa7, 0x90, 0x5a, 0x7c, 0xf2,
0xc5, 0x1f, 0x3e, 0x9f, 0xe6, 0xea, 0x6d, 0x35, 0xee, 0x5f, 0xf0, 0xf2, 0x68, 0xca, 0x8b, 0x84,
0x4d, 0x97, 0xff, 0x54, 0xe5, 0x4c, 0x65, 0x82, 0x25, 0x05, 0xfe, 0x07, 0x88, 0xb3, 0xf2, 0x68,
0xf5, 0x3f, 0xc3, 0xef, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5b, 0xac, 0xa6, 0xa5, 0x28, 0x0e, 0x00,
0x00,
}
| protobuf/internal/testprotos/jsonpb_proto/test2.pb.go/0 | {
"file_path": "protobuf/internal/testprotos/jsonpb_proto/test2.pb.go",
"repo_id": "protobuf",
"token_count": 22263
} | 671 |
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
func DiscardUnknown(m Message) {
if m != nil {
discardUnknown(MessageReflect(m))
}
}
func discardUnknown(m protoreflect.Message) {
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
discardUnknown(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
discardUnknown(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
discardUnknown(v.Message())
return true
})
}
}
return true
})
// Discard unknown fields.
if len(m.GetUnknown()) > 0 {
m.SetUnknown(nil)
}
}
| protobuf/proto/discard.go/0 | {
"file_path": "protobuf/proto/discard.go",
"repo_id": "protobuf",
"token_count": 578
} | 672 |
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
package descriptor
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/descriptor.proto.
type Edition = descriptorpb.Edition
const Edition_EDITION_UNKNOWN = descriptorpb.Edition_EDITION_UNKNOWN
const Edition_EDITION_PROTO2 = descriptorpb.Edition_EDITION_PROTO2
const Edition_EDITION_PROTO3 = descriptorpb.Edition_EDITION_PROTO3
const Edition_EDITION_2023 = descriptorpb.Edition_EDITION_2023
const Edition_EDITION_2024 = descriptorpb.Edition_EDITION_2024
const Edition_EDITION_1_TEST_ONLY = descriptorpb.Edition_EDITION_1_TEST_ONLY
const Edition_EDITION_2_TEST_ONLY = descriptorpb.Edition_EDITION_2_TEST_ONLY
const Edition_EDITION_99997_TEST_ONLY = descriptorpb.Edition_EDITION_99997_TEST_ONLY
const Edition_EDITION_99998_TEST_ONLY = descriptorpb.Edition_EDITION_99998_TEST_ONLY
const Edition_EDITION_99999_TEST_ONLY = descriptorpb.Edition_EDITION_99999_TEST_ONLY
const Edition_EDITION_MAX = descriptorpb.Edition_EDITION_MAX
var Edition_name = descriptorpb.Edition_name
var Edition_value = descriptorpb.Edition_value
type ExtensionRangeOptions_VerificationState = descriptorpb.ExtensionRangeOptions_VerificationState
const ExtensionRangeOptions_DECLARATION = descriptorpb.ExtensionRangeOptions_DECLARATION
const ExtensionRangeOptions_UNVERIFIED = descriptorpb.ExtensionRangeOptions_UNVERIFIED
var ExtensionRangeOptions_VerificationState_name = descriptorpb.ExtensionRangeOptions_VerificationState_name
var ExtensionRangeOptions_VerificationState_value = descriptorpb.ExtensionRangeOptions_VerificationState_value
type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
type FieldOptions_CType = descriptorpb.FieldOptions_CType
const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
type FieldOptions_OptionRetention = descriptorpb.FieldOptions_OptionRetention
const FieldOptions_RETENTION_UNKNOWN = descriptorpb.FieldOptions_RETENTION_UNKNOWN
const FieldOptions_RETENTION_RUNTIME = descriptorpb.FieldOptions_RETENTION_RUNTIME
const FieldOptions_RETENTION_SOURCE = descriptorpb.FieldOptions_RETENTION_SOURCE
var FieldOptions_OptionRetention_name = descriptorpb.FieldOptions_OptionRetention_name
var FieldOptions_OptionRetention_value = descriptorpb.FieldOptions_OptionRetention_value
type FieldOptions_OptionTargetType = descriptorpb.FieldOptions_OptionTargetType
const FieldOptions_TARGET_TYPE_UNKNOWN = descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN
const FieldOptions_TARGET_TYPE_FILE = descriptorpb.FieldOptions_TARGET_TYPE_FILE
const FieldOptions_TARGET_TYPE_EXTENSION_RANGE = descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE
const FieldOptions_TARGET_TYPE_MESSAGE = descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE
const FieldOptions_TARGET_TYPE_FIELD = descriptorpb.FieldOptions_TARGET_TYPE_FIELD
const FieldOptions_TARGET_TYPE_ONEOF = descriptorpb.FieldOptions_TARGET_TYPE_ONEOF
const FieldOptions_TARGET_TYPE_ENUM = descriptorpb.FieldOptions_TARGET_TYPE_ENUM
const FieldOptions_TARGET_TYPE_ENUM_ENTRY = descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY
const FieldOptions_TARGET_TYPE_SERVICE = descriptorpb.FieldOptions_TARGET_TYPE_SERVICE
const FieldOptions_TARGET_TYPE_METHOD = descriptorpb.FieldOptions_TARGET_TYPE_METHOD
var FieldOptions_OptionTargetType_name = descriptorpb.FieldOptions_OptionTargetType_name
var FieldOptions_OptionTargetType_value = descriptorpb.FieldOptions_OptionTargetType_value
type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
type FeatureSet_FieldPresence = descriptorpb.FeatureSet_FieldPresence
const FeatureSet_FIELD_PRESENCE_UNKNOWN = descriptorpb.FeatureSet_FIELD_PRESENCE_UNKNOWN
const FeatureSet_EXPLICIT = descriptorpb.FeatureSet_EXPLICIT
const FeatureSet_IMPLICIT = descriptorpb.FeatureSet_IMPLICIT
const FeatureSet_LEGACY_REQUIRED = descriptorpb.FeatureSet_LEGACY_REQUIRED
var FeatureSet_FieldPresence_name = descriptorpb.FeatureSet_FieldPresence_name
var FeatureSet_FieldPresence_value = descriptorpb.FeatureSet_FieldPresence_value
type FeatureSet_EnumType = descriptorpb.FeatureSet_EnumType
const FeatureSet_ENUM_TYPE_UNKNOWN = descriptorpb.FeatureSet_ENUM_TYPE_UNKNOWN
const FeatureSet_OPEN = descriptorpb.FeatureSet_OPEN
const FeatureSet_CLOSED = descriptorpb.FeatureSet_CLOSED
var FeatureSet_EnumType_name = descriptorpb.FeatureSet_EnumType_name
var FeatureSet_EnumType_value = descriptorpb.FeatureSet_EnumType_value
type FeatureSet_RepeatedFieldEncoding = descriptorpb.FeatureSet_RepeatedFieldEncoding
const FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN = descriptorpb.FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN
const FeatureSet_PACKED = descriptorpb.FeatureSet_PACKED
const FeatureSet_EXPANDED = descriptorpb.FeatureSet_EXPANDED
var FeatureSet_RepeatedFieldEncoding_name = descriptorpb.FeatureSet_RepeatedFieldEncoding_name
var FeatureSet_RepeatedFieldEncoding_value = descriptorpb.FeatureSet_RepeatedFieldEncoding_value
type FeatureSet_Utf8Validation = descriptorpb.FeatureSet_Utf8Validation
const FeatureSet_UTF8_VALIDATION_UNKNOWN = descriptorpb.FeatureSet_UTF8_VALIDATION_UNKNOWN
const FeatureSet_VERIFY = descriptorpb.FeatureSet_VERIFY
const FeatureSet_NONE = descriptorpb.FeatureSet_NONE
var FeatureSet_Utf8Validation_name = descriptorpb.FeatureSet_Utf8Validation_name
var FeatureSet_Utf8Validation_value = descriptorpb.FeatureSet_Utf8Validation_value
type FeatureSet_MessageEncoding = descriptorpb.FeatureSet_MessageEncoding
const FeatureSet_MESSAGE_ENCODING_UNKNOWN = descriptorpb.FeatureSet_MESSAGE_ENCODING_UNKNOWN
const FeatureSet_LENGTH_PREFIXED = descriptorpb.FeatureSet_LENGTH_PREFIXED
const FeatureSet_DELIMITED = descriptorpb.FeatureSet_DELIMITED
var FeatureSet_MessageEncoding_name = descriptorpb.FeatureSet_MessageEncoding_name
var FeatureSet_MessageEncoding_value = descriptorpb.FeatureSet_MessageEncoding_value
type FeatureSet_JsonFormat = descriptorpb.FeatureSet_JsonFormat
const FeatureSet_JSON_FORMAT_UNKNOWN = descriptorpb.FeatureSet_JSON_FORMAT_UNKNOWN
const FeatureSet_ALLOW = descriptorpb.FeatureSet_ALLOW
const FeatureSet_LEGACY_BEST_EFFORT = descriptorpb.FeatureSet_LEGACY_BEST_EFFORT
var FeatureSet_JsonFormat_name = descriptorpb.FeatureSet_JsonFormat_name
var FeatureSet_JsonFormat_value = descriptorpb.FeatureSet_JsonFormat_value
type GeneratedCodeInfo_Annotation_Semantic = descriptorpb.GeneratedCodeInfo_Annotation_Semantic
const GeneratedCodeInfo_Annotation_NONE = descriptorpb.GeneratedCodeInfo_Annotation_NONE
const GeneratedCodeInfo_Annotation_SET = descriptorpb.GeneratedCodeInfo_Annotation_SET
const GeneratedCodeInfo_Annotation_ALIAS = descriptorpb.GeneratedCodeInfo_Annotation_ALIAS
var GeneratedCodeInfo_Annotation_Semantic_name = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_name
var GeneratedCodeInfo_Annotation_Semantic_value = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_value
type FileDescriptorSet = descriptorpb.FileDescriptorSet
type FileDescriptorProto = descriptorpb.FileDescriptorProto
type DescriptorProto = descriptorpb.DescriptorProto
type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
const Default_ExtensionRangeOptions_Verification = descriptorpb.Default_ExtensionRangeOptions_Verification
type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
type FileOptions = descriptorpb.FileOptions
const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
type MessageOptions = descriptorpb.MessageOptions
const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
type FieldOptions = descriptorpb.FieldOptions
const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
const Default_FieldOptions_UnverifiedLazy = descriptorpb.Default_FieldOptions_UnverifiedLazy
const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
const Default_FieldOptions_DebugRedact = descriptorpb.Default_FieldOptions_DebugRedact
type OneofOptions = descriptorpb.OneofOptions
type EnumOptions = descriptorpb.EnumOptions
const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
type EnumValueOptions = descriptorpb.EnumValueOptions
const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
const Default_EnumValueOptions_DebugRedact = descriptorpb.Default_EnumValueOptions_DebugRedact
type ServiceOptions = descriptorpb.ServiceOptions
const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
type MethodOptions = descriptorpb.MethodOptions
const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
type UninterpretedOption = descriptorpb.UninterpretedOption
type FeatureSet = descriptorpb.FeatureSet
type FeatureSetDefaults = descriptorpb.FeatureSetDefaults
type SourceCodeInfo = descriptorpb.SourceCodeInfo
type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
type ExtensionRangeOptions_Declaration = descriptorpb.ExtensionRangeOptions_Declaration
type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
type FieldOptions_EditionDefault = descriptorpb.FieldOptions_EditionDefault
type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
type FeatureSetDefaults_FeatureSetEditionDefault = descriptorpb.FeatureSetDefaults_FeatureSetEditionDefault
type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x32,
}
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
}
| protobuf/protoc-gen-go/descriptor/descriptor.pb.go/0 | {
"file_path": "protobuf/protoc-gen-go/descriptor/descriptor.pb.go",
"repo_id": "protobuf",
"token_count": 6358
} | 673 |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return
// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
//
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
//
// Deprecated: Call the timestamppb.Now function instead.
func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
//
// Deprecated: Call the timestamppb.New function instead.
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
ts := ×tamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps.
// For invalid Timestamps, it returns an error message in parentheses.
//
// Deprecated: Call the ts.AsTime method instead,
// followed by a call to the Format method on the time.Time value.
func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
// and has a Nanos field in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes the problem.
//
// Every valid Timestamp can be represented by a time.Time,
// but the converse is not true.
func validateTimestamp(ts *timestamppb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}
| protobuf/ptypes/timestamp.go/0 | {
"file_path": "protobuf/ptypes/timestamp.go",
"repo_id": "protobuf",
"token_count": 1225
} | 674 |
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// cookieauth uses a “Netscape cookie file” to implement the GOAUTH protocol
// described in https://golang.org/issue/26232.
// It expects the location of the file as the first command-line argument.
//
// Example GOAUTH usage:
//
// export GOAUTH="cookieauth $(git config --get http.cookieFile)"
//
// See http://www.cookiecentral.com/faq/#3.5 for a description of the Netscape
// cookie file format.
package main
import (
"bufio"
"fmt"
"io"
"log"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"strconv"
"strings"
"time"
"unicode"
)
func main() {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "usage: %s COOKIEFILE [URL]\n", os.Args[0])
os.Exit(2)
}
log.SetPrefix("cookieauth: ")
f, err := os.Open(os.Args[1])
if err != nil {
log.Fatalf("failed to read cookie file: %v\n", os.Args[1])
}
defer f.Close()
var (
targetURL *url.URL
targetURLs = map[string]*url.URL{}
)
if len(os.Args) == 3 {
targetURL, err = url.ParseRequestURI(os.Args[2])
if err != nil {
log.Fatalf("invalid request URI (%v): %q\n", err, os.Args[2])
}
targetURLs[targetURL.String()] = targetURL
} else if len(os.Args) > 3 {
// Extra arguments were passed: maybe the protocol was expanded?
// We don't know how to interpret the request, so ignore it.
return
}
entries, err := parseCookieFile(f.Name(), f)
if err != nil {
log.Fatalf("error reading cookie file: %v\n", f.Name())
}
jar, err := cookiejar.New(nil)
if err != nil {
log.Fatalf("failed to initialize cookie jar: %v\n", err)
}
for _, e := range entries {
u := &url.URL{
Scheme: "https",
Host: e.Host,
Path: e.Cookie.Path,
}
if targetURL == nil {
targetURLs[u.String()] = u
}
jar.SetCookies(u, []*http.Cookie{&e.Cookie})
}
for _, u := range targetURLs {
req := &http.Request{URL: u, Header: make(http.Header)}
for _, c := range jar.Cookies(req.URL) {
req.AddCookie(c)
}
fmt.Printf("%s\n\n", u)
req.Header.Write(os.Stdout)
fmt.Println()
}
}
type Entry struct {
Host string
Cookie http.Cookie
}
// parseCookieFile parses a Netscape cookie file as described in
// http://www.cookiecentral.com/faq/#3.5.
func parseCookieFile(name string, r io.Reader) ([]*Entry, error) {
var entries []*Entry
s := bufio.NewScanner(r)
line := 0
for s.Scan() {
line++
text := strings.TrimSpace(s.Text())
if len(text) < 2 || (text[0] == '#' && unicode.IsSpace(rune(text[1]))) {
continue
}
e, err := parseCookieLine(text)
if err != nil {
log.Printf("%s:%d: %v\n", name, line, err)
continue
}
entries = append(entries, e)
}
return entries, s.Err()
}
func parseCookieLine(line string) (*Entry, error) {
f := strings.Fields(line)
if len(f) < 7 {
return nil, fmt.Errorf("found %d columns; want 7", len(f))
}
e := new(Entry)
c := &e.Cookie
if domain := f[0]; strings.HasPrefix(domain, "#HttpOnly_") {
c.HttpOnly = true
e.Host = strings.TrimPrefix(domain[10:], ".")
} else {
e.Host = strings.TrimPrefix(domain, ".")
}
isDomain, err := strconv.ParseBool(f[1])
if err != nil {
return nil, fmt.Errorf("non-boolean domain flag: %v", err)
}
if isDomain {
c.Domain = e.Host
}
c.Path = f[2]
c.Secure, err = strconv.ParseBool(f[3])
if err != nil {
return nil, fmt.Errorf("non-boolean secure flag: %v", err)
}
expiration, err := strconv.ParseInt(f[4], 10, 64)
if err != nil {
return nil, fmt.Errorf("malformed expiration: %v", err)
}
c.Expires = time.Unix(expiration, 0)
c.Name = f[5]
c.Value = f[6]
return e, nil
}
| tools/cmd/auth/cookieauth/cookieauth.go/0 | {
"file_path": "tools/cmd/auth/cookieauth/cookieauth.go",
"repo_id": "tools",
"token_count": 1539
} | 675 |
// The package doc comment
package initial
import (
"fmt"
"domain.name/importdecl"
)
type t int // type1
// const1
const c = 1 // const2
func foo() {
fmt.Println(importdecl.F())
}
// zinit
const (
z1 = iota // z1
z2 // z2
) // zend
| tools/cmd/bundle/testdata/src/initial/b.go/0 | {
"file_path": "tools/cmd/bundle/testdata/src/initial/b.go",
"repo_id": "tools",
"token_count": 110
} | 676 |
# Test of line-oriented output.
deadcode `-f={{range .Funcs}}{{printf "%s: %s.%s\n" .Position $.Path .Name}}{{end}}` -filter= example.com
want "main.go:13:10: example.com.T.Goodbye"
!want "example.com.T.Hello"
want "main.go:15:6: example.com.unreferenced"
want "fmt.Scanf"
want "fmt.Printf"
!want "fmt.Println"
-- go.mod --
module example.com
go 1.18
-- main.go --
package main
import "fmt"
type T int
func main() {
var x T
x.Hello()
}
func (T) Hello() { fmt.Println("hello") }
func (T) Goodbye() { fmt.Println("goodbye") }
func unreferenced() {} | tools/cmd/deadcode/testdata/lineflag.txtar/0 | {
"file_path": "tools/cmd/deadcode/testdata/lineflag.txtar",
"repo_id": "tools",
"token_count": 233
} | 677 |
package one // import "new.com/one"
| tools/cmd/fiximports/testdata/src/old.com/one/one.go/0 | {
"file_path": "tools/cmd/fiximports/testdata/src/old.com/one/one.go",
"repo_id": "tools",
"token_count": 12
} | 678 |
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
func findGOROOT() string {
if env := os.Getenv("GOROOT"); env != "" {
return filepath.Clean(env)
}
def := filepath.Clean(runtime.GOROOT())
if runtime.Compiler == "gccgo" {
// gccgo has no real GOROOT, and it certainly doesn't
// depend on the executable's location.
return def
}
out, err := exec.Command("go", "env", "GOROOT").Output()
if err != nil {
return def
}
return strings.TrimSpace(string(out))
}
| tools/cmd/godoc/goroot.go/0 | {
"file_path": "tools/cmd/godoc/goroot.go",
"repo_id": "tools",
"token_count": 245
} | 679 |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Goyacc is a version of yacc for Go.
It is written in Go and generates parsers written in Go.
Usage:
goyacc args...
It is largely transliterated from the Inferno version written in Limbo
which in turn was largely transliterated from the Plan 9 version
written in C and documented at
https://9p.io/magic/man2html/1/yacc
Adepts of the original yacc will have no trouble adapting to this
form of the tool.
The directory $GOPATH/src/golang.org/x/tools/cmd/goyacc/testdata/expr
is a yacc program for a very simple expression parser. See expr.y and
main.go in that directory for examples of how to write and build
goyacc programs.
The generated parser is reentrant. The parsing function yyParse expects
to be given an argument that conforms to the following interface:
type yyLexer interface {
Lex(lval *yySymType) int
Error(e string)
}
Lex should return the token identifier, and place other token
information in lval (which replaces the usual yylval).
Error is equivalent to yyerror in the original yacc.
Code inside the grammar actions may refer to the variable yylex,
which holds the yyLexer passed to yyParse.
Clients that need to understand more about the parser state can
create the parser separately from invoking it. The function yyNewParser
returns a yyParser conforming to the following interface:
type yyParser interface {
Parse(yyLex) int
Lookahead() int
}
Parse runs the parser; the top-level call yyParse(yylex) is equivalent
to yyNewParser().Parse(yylex).
Lookahead can be called during grammar actions to read (but not consume)
the value of the current lookahead token, as returned by yylex.Lex.
If there is no current lookahead token (because the parser has not called Lex
or has consumed the token returned by the most recent call to Lex),
Lookahead returns -1. Calling Lookahead is equivalent to reading
yychar from within in a grammar action.
Multiple grammars compiled into a single program should be placed in
distinct packages. If that is impossible, the "-p prefix" flag to
goyacc sets the prefix, by default yy, that begins the names of
symbols, including types, the parser, and the lexer, generated and
referenced by yacc's generated code. Setting it to distinct values
allows multiple grammars to be placed in a single package.
*/
package main
| tools/cmd/goyacc/doc.go/0 | {
"file_path": "tools/cmd/goyacc/doc.go",
"repo_id": "tools",
"token_count": 672
} | 680 |
p {
margin: 10px;
}
#presenter-slides {
display: block;
margin-top: -10px;
margin-left: -17px;
position: fixed;
border: 0;
width: 146%;
height: 750px;
transform: scale(0.7, 0.7);
transform-origin: top left;
-moz-transform: scale(0.7);
-moz-transform-origin: top left;
-o-transform: scale(0.7);
-o-transform-origin: top left;
-webkit-transform: scale(0.7);
-webkit-transform-origin: top left;
}
#presenter-notes {
margin-top: -180px;
font-family: 'Open Sans', Arial, sans-serif;
height: 30%;
width: 100%;
overflow: scroll;
position: fixed;
top: 706px;
}
| tools/cmd/present/static/notes.css/0 | {
"file_path": "tools/cmd/present/static/notes.css",
"repo_id": "tools",
"token_count": 244
} | 681 |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer
// interface. Given the name of a (signed or unsigned) integer type T that has constants
// defined, stringer will create a new self-contained Go source file implementing
//
// func (t T) String() string
//
// The file is created in the same package and directory as the package that defines T.
// It has helpful defaults designed for use with go generate.
//
// Stringer works best with constants that are consecutive values such as created using iota,
// but creates good code regardless. In the future it might also provide custom support for
// constant sets that are bit patterns.
//
// For example, given this snippet,
//
// package painkiller
//
// type Pill int
//
// const (
// Placebo Pill = iota
// Aspirin
// Ibuprofen
// Paracetamol
// Acetaminophen = Paracetamol
// )
//
// running this command
//
// stringer -type=Pill
//
// in the same directory will create the file pill_string.go, in package painkiller,
// containing a definition of
//
// func (Pill) String() string
//
// That method will translate the value of a Pill constant to the string representation
// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will
// print the string "Aspirin".
//
// Typically this process would be run using go generate, like this:
//
// //go:generate stringer -type=Pill
//
// If multiple constants have the same value, the lexically first matching name will
// be used (in the example, Acetaminophen will print as "Paracetamol").
//
// With no arguments, it processes the package in the current directory.
// Otherwise, the arguments must name a single directory holding a Go package
// or a set of Go source files that represent a single Go package.
//
// The -type flag accepts a comma-separated list of types so a single run can
// generate methods for multiple types. The default output file is t_string.go,
// where t is the lower-cased name of the first type listed. It can be overridden
// with the -output flag.
//
// The -linecomment flag tells stringer to generate the text of any line comment, trimmed
// of leading spaces, instead of the constant name. For instance, if the constants above had a
// Pill prefix, one could write
//
// PillAspirin // Aspirin
//
// to suppress it in the output.
package main // import "golang.org/x/tools/cmd/stringer"
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/constant"
"go/format"
"go/token"
"go/types"
"log"
"os"
"path/filepath"
"sort"
"strings"
"golang.org/x/tools/go/packages"
)
var (
typeNames = flag.String("type", "", "comma-separated list of type names; must be set")
output = flag.String("output", "", "output file name; default srcdir/<type>_string.go")
trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names")
linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present")
buildTags = flag.String("tags", "", "comma-separated list of build tags to apply")
)
// Usage is a replacement usage function for the flags package.
func Usage() {
fmt.Fprintf(os.Stderr, "Usage of stringer:\n")
fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n")
fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n")
fmt.Fprintf(os.Stderr, "For more information, see:\n")
fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n")
fmt.Fprintf(os.Stderr, "Flags:\n")
flag.PrintDefaults()
}
func main() {
log.SetFlags(0)
log.SetPrefix("stringer: ")
flag.Usage = Usage
flag.Parse()
if len(*typeNames) == 0 {
flag.Usage()
os.Exit(2)
}
types := strings.Split(*typeNames, ",")
var tags []string
if len(*buildTags) > 0 {
tags = strings.Split(*buildTags, ",")
}
// We accept either one directory or a list of files. Which do we have?
args := flag.Args()
if len(args) == 0 {
// Default: process whole package in current directory.
args = []string{"."}
}
// Parse the package once.
var dir string
g := Generator{
trimPrefix: *trimprefix,
lineComment: *linecomment,
}
// TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc).
if len(args) == 1 && isDirectory(args[0]) {
dir = args[0]
} else {
if len(tags) != 0 {
log.Fatal("-tags option applies only to directories, not when files are specified")
}
dir = filepath.Dir(args[0])
}
g.parsePackage(args, tags)
// Print the header and package clause.
g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.Printf("import \"strconv\"\n") // Used by all methods.
// Run generate for each type.
for _, typeName := range types {
g.generate(typeName)
}
// Format the output.
src := g.format()
// Write to file.
outputName := *output
if outputName == "" {
baseName := fmt.Sprintf("%s_string.go", types[0])
outputName = filepath.Join(dir, strings.ToLower(baseName))
}
err := os.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
}
// isDirectory reports whether the named file is a directory.
func isDirectory(name string) bool {
info, err := os.Stat(name)
if err != nil {
log.Fatal(err)
}
return info.IsDir()
}
// Generator holds the state of the analysis. Primarily used to buffer
// the output for format.Source.
type Generator struct {
buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
trimPrefix string
lineComment bool
logf func(format string, args ...interface{}) // test logging hook; nil when not testing
}
func (g *Generator) Printf(format string, args ...interface{}) {
fmt.Fprintf(&g.buf, format, args...)
}
// File holds a single parsed file and associated data.
type File struct {
pkg *Package // Package to which this file belongs.
file *ast.File // Parsed AST.
// These fields are reset for each type being generated.
typeName string // Name of the constant type.
values []Value // Accumulator for constant values of that type.
trimPrefix string
lineComment bool
}
type Package struct {
name string
defs map[*ast.Ident]types.Object
files []*File
}
// parsePackage analyzes the single package constructed from the patterns and tags.
// parsePackage exits if there is an error.
func (g *Generator) parsePackage(patterns []string, tags []string) {
cfg := &packages.Config{
Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax,
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
Tests: false,
BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))},
Logf: g.logf,
}
pkgs, err := packages.Load(cfg, patterns...)
if err != nil {
log.Fatal(err)
}
if len(pkgs) != 1 {
log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " "))
}
g.addPackage(pkgs[0])
}
// addPackage adds a type checked Package and its syntax files to the generator.
func (g *Generator) addPackage(pkg *packages.Package) {
g.pkg = &Package{
name: pkg.Name,
defs: pkg.TypesInfo.Defs,
files: make([]*File, len(pkg.Syntax)),
}
for i, file := range pkg.Syntax {
g.pkg.files[i] = &File{
file: file,
pkg: g.pkg,
trimPrefix: g.trimPrefix,
lineComment: g.lineComment,
}
}
}
// generate produces the String method for the named type.
func (g *Generator) generate(typeName string) {
values := make([]Value, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.values = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
values = append(values, file.values...)
}
}
if len(values) == 0 {
log.Fatalf("no values defined for type %s", typeName)
}
// Generate code that will fail if the constants change value.
g.Printf("func _() {\n")
g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n")
g.Printf("\t// Re-run the stringer command to generate them again.\n")
g.Printf("\tvar x [1]struct{}\n")
for _, v := range values {
g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str)
}
g.Printf("}\n")
runs := splitIntoRuns(values)
// The decision of which pattern to use depends on the number of
// runs in the numbers. If there's only one, it's easy. For more than
// one, there's a tradeoff between complexity and size of the data
// and code vs. the simplicity of a map. A map takes more space,
// but so does the code. The decision here (crossover at 10) is
// arbitrary, but considers that for large numbers of runs the cost
// of the linear scan in the switch might become important, and
// rather than use yet another algorithm such as binary search,
// we punt and use a map. In any case, the likelihood of a map
// being necessary for any realistic example other than bitmasks
// is very low. And bitmasks probably deserve their own analysis,
// to be done some other day.
switch {
case len(runs) == 1:
g.buildOneRun(runs, typeName)
case len(runs) <= 10:
g.buildMultipleRuns(runs, typeName)
default:
g.buildMap(runs, typeName)
}
}
// splitIntoRuns breaks the values into runs of contiguous sequences.
// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}.
// The input slice is known to be non-empty.
func splitIntoRuns(values []Value) [][]Value {
// We use stable sort so the lexically first name is chosen for equal elements.
sort.Stable(byValue(values))
// Remove duplicates. Stable sort has put the one we want to print first,
// so use that one. The String method won't care about which named constant
// was the argument, so the first name for the given value is the only one to keep.
// We need to do this because identical values would cause the switch or map
// to fail to compile.
j := 1
for i := 1; i < len(values); i++ {
if values[i].value != values[i-1].value {
values[j] = values[i]
j++
}
}
values = values[:j]
runs := make([][]Value, 0, 10)
for len(values) > 0 {
// One contiguous sequence per outer loop.
i := 1
for i < len(values) && values[i].value == values[i-1].value+1 {
i++
}
runs = append(runs, values[:i])
values = values[i:]
}
return runs
}
// format returns the gofmt-ed contents of the Generator's buffer.
func (g *Generator) format() []byte {
src, err := format.Source(g.buf.Bytes())
if err != nil {
// Should never happen, but can arise when developing this code.
// The user can compile the output to see the error.
log.Printf("warning: internal error: invalid Go generated: %s", err)
log.Printf("warning: compile the package to analyze the error")
return g.buf.Bytes()
}
return src
}
// Value represents a declared constant.
type Value struct {
originalName string // The name of the constant.
name string // The name with trimmed prefix.
// The value is stored as a bit pattern alone. The boolean tells us
// whether to interpret it as an int64 or a uint64; the only place
// this matters is when sorting.
// Much of the time the str field is all we need; it is printed
// by Value.String.
value uint64 // Will be converted to int64 when needed.
signed bool // Whether the constant is a signed type.
str string // The string representation given by the "go/constant" package.
}
func (v *Value) String() string {
return v.str
}
// byValue lets us sort the constants into increasing order.
// We take care in the Less method to sort in signed or unsigned order,
// as appropriate.
type byValue []Value
func (b byValue) Len() int { return len(b) }
func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byValue) Less(i, j int) bool {
if b[i].signed {
return int64(b[i].value) < int64(b[j].value)
}
return b[i].value < b[j].value
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.CONST {
// We only care about const declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST.
if vspec.Type == nil && len(vspec.Values) > 0 {
// "X = 1". With no type but a value. If the constant is untyped,
// skip this vspec and reset the remembered type.
typ = ""
// If this is a simple type conversion, remember the type.
// We don't mind if this is actually a call; a qualified call won't
// be matched (that will be SelectorExpr, not Ident), and only unusual
// situations will result in a function call that appears to be
// a type conversion.
ce, ok := vspec.Values[0].(*ast.CallExpr)
if !ok {
continue
}
id, ok := ce.Fun.(*ast.Ident)
if !ok {
continue
}
typ = id.Name
}
if vspec.Type != nil {
// "X T". We have a type. Remember it.
ident, ok := vspec.Type.(*ast.Ident)
if !ok {
continue
}
typ = ident.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
// Grab their names and actual values and store them in f.values.
for _, name := range vspec.Names {
if name.Name == "_" {
continue
}
// This dance lets the type checker find the values for us. It's a
// bit tricky: look up the object declared by the name, find its
// types.Const, and extract its value.
obj, ok := f.pkg.defs[name]
if !ok {
log.Fatalf("no value for constant %s", name)
}
info := obj.Type().Underlying().(*types.Basic).Info()
if info&types.IsInteger == 0 {
log.Fatalf("can't handle non-integer constant type %s", typ)
}
value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST.
if value.Kind() != constant.Int {
log.Fatalf("can't happen: constant is not an integer %s", name)
}
i64, isInt := constant.Int64Val(value)
u64, isUint := constant.Uint64Val(value)
if !isInt && !isUint {
log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String())
}
if !isInt {
u64 = uint64(i64)
}
v := Value{
originalName: name.Name,
value: u64,
signed: info&types.IsUnsigned == 0,
str: value.String(),
}
if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 {
v.name = strings.TrimSpace(c.Text())
} else {
v.name = strings.TrimPrefix(v.originalName, f.trimPrefix)
}
f.values = append(f.values, v)
}
}
return false
}
// Helpers
// usize returns the number of bits of the smallest unsigned integer
// type that will hold n. Used to create the smallest possible slice of
// integers to use as indexes into the concatenated strings.
func usize(n int) int {
switch {
case n < 1<<8:
return 8
case n < 1<<16:
return 16
default:
// 2^32 is enough constants for anyone.
return 32
}
}
// declareIndexAndNameVars declares the index slices and concatenated names
// strings representing the runs of values.
func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) {
var indexes, names []string
for i, run := range runs {
index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i))
if len(run) != 1 {
indexes = append(indexes, index)
}
names = append(names, name)
}
g.Printf("const (\n")
for _, name := range names {
g.Printf("\t%s\n", name)
}
g.Printf(")\n\n")
if len(indexes) > 0 {
g.Printf("var (")
for _, index := range indexes {
g.Printf("\t%s\n", index)
}
g.Printf(")\n\n")
}
}
// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars
func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) {
index, name := g.createIndexAndNameDecl(run, typeName, "")
g.Printf("const %s\n", name)
g.Printf("var %s\n", index)
}
// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var".
func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) {
b := new(bytes.Buffer)
indexes := make([]int, len(run))
for i := range run {
b.WriteString(run[i].name)
indexes[i] = b.Len()
}
nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String())
nameLen := b.Len()
b.Reset()
fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen))
for i, v := range indexes {
if i > 0 {
fmt.Fprintf(b, ", ")
}
fmt.Fprintf(b, "%d", v)
}
fmt.Fprintf(b, "}")
return b.String(), nameConst
}
// declareNameVars declares the concatenated names string representing all the values in the runs.
func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) {
g.Printf("const _%s_name%s = \"", typeName, suffix)
for _, run := range runs {
for i := range run {
g.Printf("%s", run[i].name)
}
}
g.Printf("\"\n")
}
// buildOneRun generates the variables and String method for a single run of contiguous values.
func (g *Generator) buildOneRun(runs [][]Value, typeName string) {
values := runs[0]
g.Printf("\n")
g.declareIndexAndNameVar(values, typeName)
// The generated code is simple enough to write as a Printf format.
lessThanZero := ""
if values[0].signed {
lessThanZero = "i < 0 || "
}
if values[0].value == 0 { // Signed or unsigned, 0 is still 0.
g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero)
} else {
g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero)
}
}
// Arguments to format are:
//
// [1]: type name
// [2]: size of index element (8 for uint8 etc.)
// [3]: less than zero check (for signed types)
const stringOneRun = `func (i %[1]s) String() string {
if %[3]si >= %[1]s(len(_%[1]s_index)-1) {
return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]]
}
`
// Arguments to format are:
// [1]: type name
// [2]: lowest defined value for type, as a string
// [3]: size of index element (8 for uint8 etc.)
// [4]: less than zero check (for signed types)
/*
*/
const stringOneRunWithOffset = `func (i %[1]s) String() string {
i -= %[2]s
if %[4]si >= %[1]s(len(_%[1]s_index)-1) {
return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")"
}
return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]]
}
`
// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values.
// For this pattern, a single Printf format won't do.
func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) {
g.Printf("\n")
g.declareIndexAndNameVars(runs, typeName)
g.Printf("func (i %s) String() string {\n", typeName)
g.Printf("\tswitch {\n")
for i, values := range runs {
if len(values) == 1 {
g.Printf("\tcase i == %s:\n", &values[0])
g.Printf("\t\treturn _%s_name_%d\n", typeName, i)
continue
}
if values[0].value == 0 && !values[0].signed {
// For an unsigned lower bound of 0, "0 <= i" would be redundant.
g.Printf("\tcase i <= %s:\n", &values[len(values)-1])
} else {
g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1])
}
if values[0].value != 0 {
g.Printf("\t\ti -= %s\n", &values[0])
}
g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n",
typeName, i, typeName, i, typeName, i)
}
g.Printf("\tdefault:\n")
g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName)
g.Printf("\t}\n")
g.Printf("}\n")
}
// buildMap handles the case where the space is so sparse a map is a reasonable fallback.
// It's a rare situation but has simple code.
func (g *Generator) buildMap(runs [][]Value, typeName string) {
g.Printf("\n")
g.declareNameVars(runs, typeName, "")
g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName)
n := 0
for _, values := range runs {
for _, value := range values {
g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name))
n += len(value.name)
}
}
g.Printf("}\n\n")
g.Printf(stringMap, typeName)
}
// Argument to format is the type name.
const stringMap = `func (i %[1]s) String() string {
if str, ok := _%[1]s_map[i]; ok {
return str
}
return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")"
}
`
| tools/cmd/stringer/stringer.go/0 | {
"file_path": "tools/cmd/stringer/stringer.go",
"repo_id": "tools",
"token_count": 7605
} | 682 |
#!/bin/bash
# Usage: buildall [-e] [-nocmp] [-work]
#
# Builds everything (std) for every GOOS/GOARCH combination but installs nothing.
#
# By default, runs the builds with -toolexec 'toolstash -cmp', to test that the
# toolchain is producing bit identical output to a previous known good toolchain.
#
# Options:
# -e: stop at first failure
# -nocmp: turn off toolstash -cmp; just check that ordinary builds succeed
# -work: pass -work to go command
sete=false
if [ "$1" = "-e" ]; then
sete=true
shift
fi
cmp=true
if [ "$1" = "-nocmp" ]; then
cmp=false
shift
fi
work=""
if [ "$1" = "-work" ]; then
work="-work"
shift
fi
cd $(go env GOROOT)/src
go install cmd/compile cmd/link cmd/asm || exit 1
pattern="$1"
if [ "$pattern" = "" ]; then
pattern=.
fi
targets="$(go tool dist list; echo linux/386/softfloat)"
targets="$(echo "$targets" | tr '/' '-' | sort | grep -E "$pattern" | grep -E -v 'android-arm|darwin-arm')"
# put linux first in the target list to get all the architectures up front.
targets="$(echo "$targets" | grep -E 'linux') $(echo "$targets" | grep -E -v 'linux')"
if [ "$sete" = true ]; then
set -e
fi
for target in $targets
do
echo $target
export GOOS=$(echo $target | sed 's/-.*//')
export GOARCH=$(echo $target | sed 's/.*-//')
unset GO386
if [ "$GOARCH" = "softfloat" ]; then
export GOARCH=386
export GO386=softfloat
fi
if $cmp; then
if [ "$GOOS" = "android" ]; then
go build $work -a -toolexec 'toolstash -cmp' std
else
go build $work -a -toolexec 'toolstash -cmp' std cmd
fi
else
go build $work -a std
fi
done
| tools/cmd/toolstash/buildall/0 | {
"file_path": "tools/cmd/toolstash/buildall",
"repo_id": "tools",
"token_count": 605
} | 683 |
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package analysis
import "go/token"
// A Diagnostic is a message associated with a source location or range.
//
// An Analyzer may return a variety of diagnostics; the optional Category,
// which should be a constant, may be used to classify them.
// It is primarily intended to make it easy to look up documentation.
//
// All Pos values are interpreted relative to Pass.Fset. If End is
// provided, the diagnostic is specified to apply to the range between
// Pos and End.
type Diagnostic struct {
Pos token.Pos
End token.Pos // optional
Category string // optional
Message string
// URL is the optional location of a web page that provides
// additional documentation for this diagnostic.
//
// If URL is empty but a Category is specified, then the
// Analysis driver should treat the URL as "#"+Category.
//
// The URL may be relative. If so, the base URL is that of the
// Analyzer that produced the diagnostic;
// see https://pkg.go.dev/net/url#URL.ResolveReference.
URL string
// SuggestedFixes is an optional list of fixes to address the
// problem described by the diagnostic. Each one represents
// an alternative strategy; at most one may be applied.
//
// Fixes for different diagnostics should be treated as
// independent changes to the same baseline file state,
// analogous to a set of git commits all with the same parent.
// Combining fixes requires resolving any conflicts that
// arise, analogous to a git merge.
// Any conflicts that remain may be dealt with, depending on
// the tool, by discarding fixes, consulting the user, or
// aborting the operation.
SuggestedFixes []SuggestedFix
// Related contains optional secondary positions and messages
// related to the primary diagnostic.
Related []RelatedInformation
}
// RelatedInformation contains information related to a diagnostic.
// For example, a diagnostic that flags duplicated declarations of a
// variable may include one RelatedInformation per existing
// declaration.
type RelatedInformation struct {
Pos token.Pos
End token.Pos // optional
Message string
}
// A SuggestedFix is a code change associated with a Diagnostic that a
// user can choose to apply to their code. Usually the SuggestedFix is
// meant to fix the issue flagged by the diagnostic.
//
// The TextEdits must not overlap, nor contain edits for other packages.
type SuggestedFix struct {
// A verb phrase describing the fix, to be shown to
// a user trying to decide whether to accept it.
//
// Example: "Remove the surplus argument"
Message string
TextEdits []TextEdit
}
// A TextEdit represents the replacement of the code between Pos and End with the new text.
// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos.
type TextEdit struct {
// For a pure insertion, End can either be set to Pos or token.NoPos.
Pos token.Pos
End token.Pos
NewText []byte
}
| tools/go/analysis/diagnostic.go/0 | {
"file_path": "tools/go/analysis/diagnostic.go",
"repo_id": "tools",
"token_count": 800
} | 684 |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains tests for the cgo checker.
package a
// void f(void *ptr) {}
import "C"
import "unsafe"
func CgoTest[T any]() {
var c chan bool
C.f(*(*unsafe.Pointer)(unsafe.Pointer(&c))) // want "embedded pointer"
C.f(unsafe.Pointer(&c)) // want "embedded pointer"
var schan S[chan bool]
C.f(*(*unsafe.Pointer)(unsafe.Pointer(&schan))) // want "embedded pointer"
C.f(unsafe.Pointer(&schan)) // want "embedded pointer"
var x T
C.f(*(*unsafe.Pointer)(unsafe.Pointer(&x))) // no findings as T is not known compile-time
C.f(unsafe.Pointer(&x))
// instantiating CgoTest should not yield any warnings
CgoTest[chan bool]()
var sint S[int]
C.f(*(*unsafe.Pointer)(unsafe.Pointer(&sint)))
C.f(unsafe.Pointer(&sint))
}
type S[X any] struct {
val X
}
| tools/go/analysis/passes/cgocall/testdata/src/typeparams/typeparams.go/0 | {
"file_path": "tools/go/analysis/passes/cgocall/testdata/src/typeparams/typeparams.go",
"repo_id": "tools",
"token_count": 388
} | 685 |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains tests for the copylock checker's
// range statement analysis.
package a
import "sync"
func rangeMutex() {
var mu sync.Mutex
var i int
var s []sync.Mutex
for range s {
}
for i = range s {
}
for i := range s {
}
for i, _ = range s {
}
for i, _ := range s {
}
for _, mu = range s { // want "range var mu copies lock: sync.Mutex"
}
for _, m := range s { // want "range var m copies lock: sync.Mutex"
}
for i, mu = range s { // want "range var mu copies lock: sync.Mutex"
}
for i, m := range s { // want "range var m copies lock: sync.Mutex"
}
var a [3]sync.Mutex
for _, m := range a { // want "range var m copies lock: sync.Mutex"
}
var m map[sync.Mutex]sync.Mutex
for k := range m { // want "range var k copies lock: sync.Mutex"
}
for mu, _ = range m { // want "range var mu copies lock: sync.Mutex"
}
for k, _ := range m { // want "range var k copies lock: sync.Mutex"
}
for _, mu = range m { // want "range var mu copies lock: sync.Mutex"
}
for _, v := range m { // want "range var v copies lock: sync.Mutex"
}
var c chan sync.Mutex
for range c {
}
for mu = range c { // want "range var mu copies lock: sync.Mutex"
}
for v := range c { // want "range var v copies lock: sync.Mutex"
}
// Test non-idents in range variables
var t struct {
i int
mu sync.Mutex
}
for t.i, t.mu = range s { // want "range var t.mu copies lock: sync.Mutex"
}
}
| tools/go/analysis/passes/copylock/testdata/src/a/copylock_range.go/0 | {
"file_path": "tools/go/analysis/passes/copylock/testdata/src/a/copylock_range.go",
"repo_id": "tools",
"token_count": 589
} | 686 |
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fieldalignment_test
import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
"golang.org/x/tools/go/analysis/passes/fieldalignment"
)
func TestTest(t *testing.T) {
testdata := analysistest.TestData()
analysistest.RunWithSuggestedFixes(t, testdata, fieldalignment.Analyzer, "a")
}
| tools/go/analysis/passes/fieldalignment/fieldalignment_test.go/0 | {
"file_path": "tools/go/analysis/passes/fieldalignment/fieldalignment_test.go",
"repo_id": "tools",
"token_count": 161
} | 687 |
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains tests for the ifaceassert checker.
package a
import "io"
func InterfaceAssertionTest() {
var (
a io.ReadWriteSeeker
b interface {
Read()
Write()
}
)
_ = a.(io.Reader)
_ = a.(io.ReadWriter)
_ = b.(io.Reader) // want `^impossible type assertion: no type can implement both interface{Read\(\); Write\(\)} and io.Reader \(conflicting types for Read method\)$`
_ = b.(interface { // want `^impossible type assertion: no type can implement both interface{Read\(\); Write\(\)} and interface{Read\(p \[\]byte\) \(n int, err error\)} \(conflicting types for Read method\)$`
Read(p []byte) (n int, err error)
})
switch a.(type) {
case io.ReadWriter:
case interface { // want `^impossible type assertion: no type can implement both io.ReadWriteSeeker and interface{Write\(\)} \(conflicting types for Write method\)$`
Write()
}:
default:
}
switch b := b.(type) {
case io.ReadWriter, interface{ Read() }: // want `^impossible type assertion: no type can implement both interface{Read\(\); Write\(\)} and io.ReadWriter \(conflicting types for Read method\)$`
case io.Writer: // want `^impossible type assertion: no type can implement both interface{Read\(\); Write\(\)} and io.Writer \(conflicting types for Write method\)$`
default:
_ = b
}
}
| tools/go/analysis/passes/ifaceassert/testdata/src/a/a.go/0 | {
"file_path": "tools/go/analysis/passes/ifaceassert/testdata/src/a/a.go",
"repo_id": "tools",
"token_count": 472
} | 688 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lostcancel defines an Analyzer that checks for failure to
// call a context cancellation function.
//
// # Analyzer lostcancel
//
// lostcancel: check cancel func returned by context.WithCancel is called
//
// The cancellation function returned by context.WithCancel, WithTimeout,
// and WithDeadline must be called or the new context will remain live
// until its parent context is cancelled.
// (The background context is never cancelled.)
package lostcancel
| tools/go/analysis/passes/lostcancel/doc.go/0 | {
"file_path": "tools/go/analysis/passes/lostcancel/doc.go",
"repo_id": "tools",
"token_count": 153
} | 689 |
package b
func f() {
var s []int
t := (*[0]int)(s)
_ = *t // want "nil dereference in load"
_ = (*[0]int)(s)
_ = *(*[0]int)(s) // want "nil dereference in load"
// these operation is panic
_ = (*[1]int)(s) // want "nil slice being cast to an array of len > 0 will always panic"
_ = *(*[1]int)(s) // want "nil slice being cast to an array of len > 0 will always panic"
}
func g() {
var s = make([]int, 0)
t := (*[0]int)(s)
println(*t)
}
func h() {
var s = make([]int, 1)
t := (*[1]int)(s)
println(*t)
}
func i(x []int) {
a := (*[1]int)(x)
if a != nil { // want "tautological condition: non-nil != nil"
_ = *a
}
}
func _(err error) {
if err == nil {
err.Error() // want "nil dereference in dynamic method call"
// SSA uses TypeAssert for the nil check in a method value:
_ = err.Error // want "nil dereference in type assertion"
}
}
| tools/go/analysis/passes/nilness/testdata/src/b/b.go/0 | {
"file_path": "tools/go/analysis/passes/nilness/testdata/src/b/b.go",
"repo_id": "tools",
"token_count": 354
} | 690 |
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package structtag defines an Analyzer that checks struct field tags
// are well formed.
package structtag
import (
"errors"
"go/ast"
"go/token"
"go/types"
"path/filepath"
"reflect"
"strconv"
"strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
const Doc = `check that struct field tags conform to reflect.StructTag.Get
Also report certain struct tags (json, xml) used with unexported fields.`
var Analyzer = &analysis.Analyzer{
Name: "structtag",
Doc: Doc,
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/structtag",
Requires: []*analysis.Analyzer{inspect.Analyzer},
RunDespiteErrors: true,
Run: run,
}
func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.StructType)(nil),
}
inspect.Preorder(nodeFilter, func(n ast.Node) {
styp, ok := pass.TypesInfo.Types[n.(*ast.StructType)].Type.(*types.Struct)
// Type information may be incomplete.
if !ok {
return
}
var seen namesSeen
for i := 0; i < styp.NumFields(); i++ {
field := styp.Field(i)
tag := styp.Tag(i)
checkCanonicalFieldTag(pass, field, tag, &seen)
}
})
return nil, nil
}
// namesSeen keeps track of encoding tags by their key, name, and nested level
// from the initial struct. The level is taken into account because equal
// encoding key names only conflict when at the same level; otherwise, the lower
// level shadows the higher level.
type namesSeen map[uniqueName]token.Pos
type uniqueName struct {
key string // "xml" or "json"
name string // the encoding name
level int // anonymous struct nesting level
}
func (s *namesSeen) Get(key, name string, level int) (token.Pos, bool) {
if *s == nil {
*s = make(map[uniqueName]token.Pos)
}
pos, ok := (*s)[uniqueName{key, name, level}]
return pos, ok
}
func (s *namesSeen) Set(key, name string, level int, pos token.Pos) {
if *s == nil {
*s = make(map[uniqueName]token.Pos)
}
(*s)[uniqueName{key, name, level}] = pos
}
var checkTagDups = []string{"json", "xml"}
var checkTagSpaces = map[string]bool{"json": true, "xml": true, "asn1": true}
// checkCanonicalFieldTag checks a single struct field tag.
func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, seen *namesSeen) {
switch pass.Pkg.Path() {
case "encoding/json", "encoding/xml":
// These packages know how to use their own APIs.
// Sometimes they are testing what happens to incorrect programs.
return
}
for _, key := range checkTagDups {
checkTagDuplicates(pass, tag, key, field, field, seen, 1)
}
if err := validateStructTag(tag); err != nil {
pass.Reportf(field.Pos(), "struct field tag %#q not compatible with reflect.StructTag.Get: %s", tag, err)
}
// Check for use of json or xml tags with unexported fields.
// Embedded struct. Nothing to do for now, but that
// may change, depending on what happens with issue 7363.
// TODO(adonovan): investigate, now that that issue is fixed.
if field.Anonymous() {
return
}
if field.Exported() {
return
}
for _, enc := range [...]string{"json", "xml"} {
switch reflect.StructTag(tag).Get(enc) {
// Ignore warning if the field not exported and the tag is marked as
// ignored.
case "", "-":
default:
pass.Reportf(field.Pos(), "struct field %s has %s tag but is not exported", field.Name(), enc)
return
}
}
}
// checkTagDuplicates checks a single struct field tag to see if any tags are
// duplicated. nearest is the field that's closest to the field being checked,
// while still being part of the top-level struct type.
func checkTagDuplicates(pass *analysis.Pass, tag, key string, nearest, field *types.Var, seen *namesSeen, level int) {
val := reflect.StructTag(tag).Get(key)
if val == "-" {
// Ignored, even if the field is anonymous.
return
}
if val == "" || val[0] == ',' {
if !field.Anonymous() {
// Ignored if the field isn't anonymous.
return
}
typ, ok := field.Type().Underlying().(*types.Struct)
if !ok {
return
}
for i := 0; i < typ.NumFields(); i++ {
field := typ.Field(i)
if !field.Exported() {
continue
}
tag := typ.Tag(i)
checkTagDuplicates(pass, tag, key, nearest, field, seen, level+1)
}
return
}
if key == "xml" && field.Name() == "XMLName" {
// XMLName defines the XML element name of the struct being
// checked. That name cannot collide with element or attribute
// names defined on other fields of the struct. Vet does not have a
// check for untagged fields of type struct defining their own name
// by containing a field named XMLName; see issue 18256.
return
}
if i := strings.Index(val, ","); i >= 0 {
if key == "xml" {
// Use a separate namespace for XML attributes.
for _, opt := range strings.Split(val[i:], ",") {
if opt == "attr" {
key += " attribute" // Key is part of the error message.
break
}
}
}
val = val[:i]
}
if pos, ok := seen.Get(key, val, level); ok {
alsoPos := pass.Fset.Position(pos)
alsoPos.Column = 0
// Make the "also at" position relative to the current position,
// to ensure that all warnings are unambiguous and correct. For
// example, via anonymous struct fields, it's possible for the
// two fields to be in different packages and directories.
thisPos := pass.Fset.Position(field.Pos())
rel, err := filepath.Rel(filepath.Dir(thisPos.Filename), alsoPos.Filename)
if err != nil {
// Possibly because the paths are relative; leave the
// filename alone.
} else {
alsoPos.Filename = rel
}
pass.Reportf(nearest.Pos(), "struct field %s repeats %s tag %q also at %s", field.Name(), key, val, alsoPos)
} else {
seen.Set(key, val, level, field.Pos())
}
}
var (
errTagSyntax = errors.New("bad syntax for struct tag pair")
errTagKeySyntax = errors.New("bad syntax for struct tag key")
errTagValueSyntax = errors.New("bad syntax for struct tag value")
errTagValueSpace = errors.New("suspicious space in struct tag value")
errTagSpace = errors.New("key:\"value\" pairs not separated by spaces")
)
// validateStructTag parses the struct tag and returns an error if it is not
// in the canonical format, which is a space-separated list of key:"value"
// settings. The value may contain spaces.
func validateStructTag(tag string) error {
// This code is based on the StructTag.Get code in package reflect.
n := 0
for ; tag != ""; n++ {
if n > 0 && tag != "" && tag[0] != ' ' {
// More restrictive than reflect, but catches likely mistakes
// like `x:"foo",y:"bar"`, which parses as `x:"foo" ,y:"bar"` with second key ",y".
return errTagSpace
}
// Skip leading space.
i := 0
for i < len(tag) && tag[i] == ' ' {
i++
}
tag = tag[i:]
if tag == "" {
break
}
// Scan to colon. A space, a quote or a control character is a syntax error.
// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
// as it is simpler to inspect the tag's bytes than the tag's runes.
i = 0
for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
i++
}
if i == 0 {
return errTagKeySyntax
}
if i+1 >= len(tag) || tag[i] != ':' {
return errTagSyntax
}
if tag[i+1] != '"' {
return errTagValueSyntax
}
key := tag[:i]
tag = tag[i+1:]
// Scan quoted string to find value.
i = 1
for i < len(tag) && tag[i] != '"' {
if tag[i] == '\\' {
i++
}
i++
}
if i >= len(tag) {
return errTagValueSyntax
}
qvalue := tag[:i+1]
tag = tag[i+1:]
value, err := strconv.Unquote(qvalue)
if err != nil {
return errTagValueSyntax
}
if !checkTagSpaces[key] {
continue
}
switch key {
case "xml":
// If the first or last character in the XML tag is a space, it is
// suspicious.
if strings.Trim(value, " ") != value {
return errTagValueSpace
}
// If there are multiple spaces, they are suspicious.
if strings.Count(value, " ") > 1 {
return errTagValueSpace
}
// If there is no comma, skip the rest of the checks.
comma := strings.IndexRune(value, ',')
if comma < 0 {
continue
}
// If the character before a comma is a space, this is suspicious.
if comma > 0 && value[comma-1] == ' ' {
return errTagValueSpace
}
value = value[comma+1:]
case "json":
// JSON allows using spaces in the name, so skip it.
comma := strings.IndexRune(value, ',')
if comma < 0 {
continue
}
value = value[comma+1:]
}
if strings.IndexByte(value, ' ') >= 0 {
return errTagValueSpace
}
}
return nil
}
| tools/go/analysis/passes/structtag/structtag.go/0 | {
"file_path": "tools/go/analysis/passes/structtag/structtag.go",
"repo_id": "tools",
"token_count": 3324
} | 691 |
package b
type Foo struct {
}
func (f *Foo) F() {
}
| tools/go/analysis/passes/tests/testdata/src/b/b.go/0 | {
"file_path": "tools/go/analysis/passes/tests/testdata/src/b/b.go",
"repo_id": "tools",
"token_count": 26
} | 692 |
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains tests for the unmarshal checker.
package testdata
import (
"encoding/asn1"
"encoding/gob"
"encoding/json"
"encoding/xml"
"io"
)
func _() {
type t struct {
a int
}
var v t
var r io.Reader
json.Unmarshal([]byte{}, v) // want "call of Unmarshal passes non-pointer as second argument"
json.Unmarshal([]byte{}, &v)
json.NewDecoder(r).Decode(v) // want "call of Decode passes non-pointer"
json.NewDecoder(r).Decode(&v)
gob.NewDecoder(r).Decode(v) // want "call of Decode passes non-pointer"
gob.NewDecoder(r).Decode(&v)
xml.Unmarshal([]byte{}, v) // want "call of Unmarshal passes non-pointer as second argument"
xml.Unmarshal([]byte{}, &v)
xml.NewDecoder(r).Decode(v) // want "call of Decode passes non-pointer"
xml.NewDecoder(r).Decode(&v)
asn1.Unmarshal([]byte{}, v) // want "call of Unmarshal passes non-pointer as second argument"
asn1.Unmarshal([]byte{}, &v)
var p *t
json.Unmarshal([]byte{}, p)
json.Unmarshal([]byte{}, *p) // want "call of Unmarshal passes non-pointer as second argument"
json.NewDecoder(r).Decode(p)
json.NewDecoder(r).Decode(*p) // want "call of Decode passes non-pointer"
gob.NewDecoder(r).Decode(p)
gob.NewDecoder(r).Decode(*p) // want "call of Decode passes non-pointer"
xml.Unmarshal([]byte{}, p)
xml.Unmarshal([]byte{}, *p) // want "call of Unmarshal passes non-pointer as second argument"
xml.NewDecoder(r).Decode(p)
xml.NewDecoder(r).Decode(*p) // want "call of Decode passes non-pointer"
asn1.Unmarshal([]byte{}, p)
asn1.Unmarshal([]byte{}, *p) // want "call of Unmarshal passes non-pointer as second argument"
var i interface{}
json.Unmarshal([]byte{}, i)
json.NewDecoder(r).Decode(i)
json.Unmarshal([]byte{}, nil) // want "call of Unmarshal passes non-pointer as second argument"
json.Unmarshal([]byte{}, []t{}) // want "call of Unmarshal passes non-pointer as second argument"
json.Unmarshal([]byte{}, map[string]int{}) // want "call of Unmarshal passes non-pointer as second argument"
json.NewDecoder(r).Decode(nil) // want "call of Decode passes non-pointer"
json.NewDecoder(r).Decode([]t{}) // want "call of Decode passes non-pointer"
json.NewDecoder(r).Decode(map[string]int{}) // want "call of Decode passes non-pointer"
json.Unmarshal(func() ([]byte, interface{}) { return []byte{}, v }())
}
| tools/go/analysis/passes/unmarshal/testdata/src/a/a.go/0 | {
"file_path": "tools/go/analysis/passes/unmarshal/testdata/src/a/a.go",
"repo_id": "tools",
"token_count": 999
} | 693 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unusedresult defines an analyzer that checks for unused
// results of calls to certain pure functions.
//
// # Analyzer unusedresult
//
// unusedresult: check for unused results of calls to some functions
//
// Some functions like fmt.Errorf return a result and have no side
// effects, so it is always a mistake to discard the result. Other
// functions may return an error that must not be ignored, or a cleanup
// operation that must be called. This analyzer reports calls to
// functions like these when the result of the call is ignored.
//
// The set of functions may be controlled using flags.
package unusedresult
| tools/go/analysis/passes/unusedresult/doc.go/0 | {
"file_path": "tools/go/analysis/passes/unusedresult/doc.go",
"repo_id": "tools",
"token_count": 186
} | 694 |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package usesgenerics
import (
_ "embed"
"reflect"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/typeparams/genericfeatures"
)
//go:embed doc.go
var doc string
var Analyzer = &analysis.Analyzer{
Name: "usesgenerics",
Doc: analysisutil.MustExtractDoc(doc, "usesgenerics"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/usesgenerics",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
ResultType: reflect.TypeOf((*Result)(nil)),
FactTypes: []analysis.Fact{new(featuresFact)},
}
type Features = genericfeatures.Features
const (
GenericTypeDecls = genericfeatures.GenericTypeDecls
GenericFuncDecls = genericfeatures.GenericFuncDecls
EmbeddedTypeSets = genericfeatures.EmbeddedTypeSets
TypeInstantiation = genericfeatures.TypeInstantiation
FuncInstantiation = genericfeatures.FuncInstantiation
)
// Result is the usesgenerics analyzer result type. The Direct field records
// features used directly by the package being analyzed (i.e. contained in the
// package source code). The Transitive field records any features used by the
// package or any of its transitive imports.
type Result struct {
Direct, Transitive Features
}
type featuresFact struct {
Features Features
}
func (f *featuresFact) AFact() {}
func (f *featuresFact) String() string { return f.Features.String() }
func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
direct := genericfeatures.ForPackage(inspect, pass.TypesInfo)
transitive := direct | importedTransitiveFeatures(pass)
if transitive != 0 {
pass.ExportPackageFact(&featuresFact{transitive})
}
return &Result{
Direct: direct,
Transitive: transitive,
}, nil
}
// importedTransitiveFeatures computes features that are used transitively via
// imports.
func importedTransitiveFeatures(pass *analysis.Pass) Features {
var feats Features
for _, imp := range pass.Pkg.Imports() {
var importedFact featuresFact
if pass.ImportPackageFact(imp, &importedFact) {
feats |= importedFact.Features
}
}
return feats
}
| tools/go/analysis/passes/usesgenerics/usesgenerics.go/0 | {
"file_path": "tools/go/analysis/passes/usesgenerics/usesgenerics.go",
"repo_id": "tools",
"token_count": 806
} | 695 |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil_test
import (
"bytes"
"go/ast"
"go/format"
"go/parser"
"go/token"
"testing"
"golang.org/x/tools/go/ast/astutil"
)
type rewriteTest struct {
name string
orig, want string
pre, post astutil.ApplyFunc
}
var rewriteTests = []rewriteTest{
{name: "nop", orig: "package p\n", want: "package p\n"},
{name: "replace",
orig: `package p
var x int
`,
want: `package p
var t T
`,
post: func(c *astutil.Cursor) bool {
if _, ok := c.Node().(*ast.ValueSpec); ok {
c.Replace(valspec("t", "T"))
return false
}
return true
},
},
{name: "set doc strings",
orig: `package p
const z = 0
type T struct{}
var x int
`,
want: `package p
// a foo is a foo
const z = 0
// a foo is a foo
type T struct{}
// a foo is a foo
var x int
`,
post: func(c *astutil.Cursor) bool {
if _, ok := c.Parent().(*ast.GenDecl); ok && c.Name() == "Doc" && c.Node() == nil {
c.Replace(&ast.CommentGroup{List: []*ast.Comment{{Text: "// a foo is a foo"}}})
}
return true
},
},
{name: "insert names",
orig: `package p
const a = 1
`,
want: `package p
const a, b, c = 1, 2, 3
`,
pre: func(c *astutil.Cursor) bool {
if _, ok := c.Parent().(*ast.ValueSpec); ok {
switch c.Name() {
case "Names":
c.InsertAfter(ast.NewIdent("c"))
c.InsertAfter(ast.NewIdent("b"))
case "Values":
c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "3"})
c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "2"})
}
}
return true
},
},
{name: "insert",
orig: `package p
var (
x int
y int
)
`,
want: `package p
var before1 int
var before2 int
var (
x int
y int
)
var after2 int
var after1 int
`,
pre: func(c *astutil.Cursor) bool {
if _, ok := c.Node().(*ast.GenDecl); ok {
c.InsertBefore(vardecl("before1", "int"))
c.InsertAfter(vardecl("after1", "int"))
c.InsertAfter(vardecl("after2", "int"))
c.InsertBefore(vardecl("before2", "int"))
}
return true
},
},
{name: "delete",
orig: `package p
var x int
var y int
var z int
`,
want: `package p
var y int
var z int
`,
pre: func(c *astutil.Cursor) bool {
n := c.Node()
if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" {
c.Delete()
}
return true
},
},
{name: "insertafter-delete",
orig: `package p
var x int
var y int
var z int
`,
want: `package p
var x1 int
var y int
var z int
`,
pre: func(c *astutil.Cursor) bool {
n := c.Node()
if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" {
c.InsertAfter(vardecl("x1", "int"))
c.Delete()
}
return true
},
},
{name: "delete-insertafter",
orig: `package p
var x int
var y int
var z int
`,
want: `package p
var y int
var x1 int
var z int
`,
pre: func(c *astutil.Cursor) bool {
n := c.Node()
if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" {
c.Delete()
// The cursor is now effectively atop the 'var y int' node.
c.InsertAfter(vardecl("x1", "int"))
}
return true
},
},
{
name: "replace",
orig: `package p
type T[P1, P2 any] int
type R T[int, string]
func F[Q1 any](q Q1) {}
`,
// TODO: note how the rewrite adds a trailing comma in "func F".
// Is that a bug in the test, or in astutil.Apply?
want: `package p
type S[R1, P2 any] int32
type R S[int32, string]
func F[X1 any](q X1,) {}
`,
post: func(c *astutil.Cursor) bool {
if ident, ok := c.Node().(*ast.Ident); ok {
switch ident.Name {
case "int":
c.Replace(ast.NewIdent("int32"))
case "T":
c.Replace(ast.NewIdent("S"))
case "P1":
c.Replace(ast.NewIdent("R1"))
case "Q1":
c.Replace(ast.NewIdent("X1"))
}
}
return true
},
},
}
func valspec(name, typ string) *ast.ValueSpec {
return &ast.ValueSpec{Names: []*ast.Ident{ast.NewIdent(name)},
Type: ast.NewIdent(typ),
}
}
func vardecl(name, typ string) *ast.GenDecl {
return &ast.GenDecl{
Tok: token.VAR,
Specs: []ast.Spec{valspec(name, typ)},
}
}
func TestRewrite(t *testing.T) {
t.Run("*", func(t *testing.T) {
for _, test := range rewriteTests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments)
if err != nil {
t.Fatal(err)
}
n := astutil.Apply(f, test.pre, test.post)
var buf bytes.Buffer
if err := format.Node(&buf, fset, n); err != nil {
t.Fatal(err)
}
got := buf.String()
if got != test.want {
t.Errorf("got:\n\n%s\nwant:\n\n%s\n", got, test.want)
}
})
}
})
}
var sink ast.Node
func BenchmarkRewrite(b *testing.B) {
for _, test := range rewriteTests {
b.Run(test.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments)
if err != nil {
b.Fatal(err)
}
b.StartTimer()
sink = astutil.Apply(f, test.pre, test.post)
}
})
}
}
| tools/go/ast/astutil/rewrite_test.go/0 | {
"file_path": "tools/go/ast/astutil/rewrite_test.go",
"repo_id": "tools",
"token_count": 2426
} | 696 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package callgraph_test
import (
"log"
"sync"
"testing"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/callgraph/cha"
"golang.org/x/tools/go/callgraph/rta"
"golang.org/x/tools/go/callgraph/static"
"golang.org/x/tools/go/callgraph/vta"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
)
// Benchmarks comparing different callgraph algorithms implemented in
// x/tools/go/callgraph. Comparison is on both speed, memory and precision.
// Fewer edges and fewer reachable nodes implies a more precise result.
// Comparison is done on a hello world http server using net/http.
//
// Current results were on an i7 macbook on go version devel go1.20-2730.
// Number of nodes, edges, and reachable function are expected to vary between
// go versions. Timing results are expected to vary between machines.
// BenchmarkStatic-12 53 ms/op 6 MB/op 12113 nodes 37355 edges 1522 reachable
// BenchmarkCHA-12 86 ms/op 16 MB/op 12113 nodes 131717 edges 7640 reachable
// BenchmarkRTA-12 110 ms/op 12 MB/op 6566 nodes 42291 edges 5099 reachable
// BenchmarkPTA-12 1427 ms/op 600 MB/op 8714 nodes 28244 edges 4184 reachable
// BenchmarkVTA-12 600 ms/op 78 MB/op 12114 nodes 44861 edges 4919 reachable
// BenchmarkVTA2-12 793 ms/op 104 MB/op 5450 nodes 22208 edges 4042 reachable
// BenchmarkVTA3-12 977 ms/op 124 MB/op 4621 nodes 19331 edges 3700 reachable
// BenchmarkVTAAlt-12 372 ms/op 57 MB/op 7763 nodes 29912 edges 4258 reachable
// BenchmarkVTAAlt2-12 570 ms/op 78 MB/op 4838 nodes 20169 edges 3737 reachable
//
// Note:
// * Static is unsound and may miss real edges.
// * RTA starts from a main function and only includes reachable functions.
// * CHA starts from all functions.
// * VTA, VTA2, and VTA3 are starting from all functions and the CHA callgraph.
// VTA2 and VTA3 are the result of re-applying VTA to the functions reachable
// from main() via the callgraph of the previous stage.
// * VTAAlt, and VTAAlt2 start from the functions reachable from main via the
// CHA callgraph.
// * All algorithms are unsound w.r.t. reflection.
const httpEx = `package main
import (
"fmt"
"net/http"
)
func hello(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "hello world\n")
}
func main() {
http.HandleFunc("/hello", hello)
http.ListenAndServe(":8090", nil)
}
`
var (
once sync.Once
prog *ssa.Program
main *ssa.Function
)
func example() (*ssa.Program, *ssa.Function) {
once.Do(func() {
var conf loader.Config
f, err := conf.ParseFile("<input>", httpEx)
if err != nil {
log.Fatal(err)
}
conf.CreateFromFiles(f.Name.Name, f)
lprog, err := conf.Load()
if err != nil {
log.Fatalf("test 'package %s': Load: %s", f.Name.Name, err)
}
prog = ssautil.CreateProgram(lprog, ssa.InstantiateGenerics)
prog.Build()
main = prog.Package(lprog.Created[0].Pkg).Members["main"].(*ssa.Function)
})
return prog, main
}
var stats bool = false // print stats?
func logStats(b *testing.B, cnd bool, name string, cg *callgraph.Graph, main *ssa.Function) {
if cnd && stats {
e := 0
for _, n := range cg.Nodes {
e += len(n.Out)
}
r := len(reaches(main, cg, false))
b.Logf("%s:\t%d nodes\t%d edges\t%d reachable", name, len(cg.Nodes), e, r)
}
}
func BenchmarkStatic(b *testing.B) {
b.StopTimer()
prog, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
cg := static.CallGraph(prog)
logStats(b, i == 0, "static", cg, main)
}
}
func BenchmarkCHA(b *testing.B) {
b.StopTimer()
prog, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
cg := cha.CallGraph(prog)
logStats(b, i == 0, "cha", cg, main)
}
}
func BenchmarkRTA(b *testing.B) {
b.StopTimer()
_, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
res := rta.Analyze([]*ssa.Function{main}, true)
cg := res.CallGraph
logStats(b, i == 0, "rta", cg, main)
}
}
func BenchmarkVTA(b *testing.B) {
b.StopTimer()
prog, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
cg := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
logStats(b, i == 0, "vta", cg, main)
}
}
func BenchmarkVTA2(b *testing.B) {
b.StopTimer()
prog, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
vta1 := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
cg := vta.CallGraph(reaches(main, vta1, true), vta1)
logStats(b, i == 0, "vta2", cg, main)
}
}
func BenchmarkVTA3(b *testing.B) {
b.StopTimer()
prog, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
vta1 := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
vta2 := vta.CallGraph(reaches(main, vta1, true), vta1)
cg := vta.CallGraph(reaches(main, vta2, true), vta2)
logStats(b, i == 0, "vta3", cg, main)
}
}
func BenchmarkVTAAlt(b *testing.B) {
b.StopTimer()
prog, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
cha := cha.CallGraph(prog)
cg := vta.CallGraph(reaches(main, cha, true), cha) // start from only functions reachable by CHA.
logStats(b, i == 0, "vta-alt", cg, main)
}
}
func BenchmarkVTAAlt2(b *testing.B) {
b.StopTimer()
prog, main := example()
b.StartTimer()
for i := 0; i < b.N; i++ {
cha := cha.CallGraph(prog)
vta1 := vta.CallGraph(reaches(main, cha, true), cha)
cg := vta.CallGraph(reaches(main, vta1, true), vta1)
logStats(b, i == 0, "vta-alt2", cg, main)
}
}
// reaches computes the transitive closure of functions forward reachable
// via calls in cg starting from `sources`. If refs is true, include
// functions referred to in an instruction.
func reaches(source *ssa.Function, cg *callgraph.Graph, refs bool) map[*ssa.Function]bool {
seen := make(map[*ssa.Function]bool)
var visit func(f *ssa.Function)
visit = func(f *ssa.Function) {
if seen[f] {
return
}
seen[f] = true
if n := cg.Nodes[f]; n != nil {
for _, e := range n.Out {
if e.Site != nil {
visit(e.Callee.Func)
}
}
}
if refs {
var buf [10]*ssa.Value // avoid alloc in common case
for _, b := range f.Blocks {
for _, instr := range b.Instrs {
for _, op := range instr.Operands(buf[:0]) {
if fn, ok := (*op).(*ssa.Function); ok {
visit(fn)
}
}
}
}
}
}
visit(source)
return seen
}
| tools/go/callgraph/callgraph_test.go/0 | {
"file_path": "tools/go/callgraph/callgraph_test.go",
"repo_id": "tools",
"token_count": 2707
} | 697 |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package static_test
import (
"fmt"
"go/parser"
"reflect"
"sort"
"testing"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/callgraph/static"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
)
const input = `package P
type C int
func (C) f()
type I interface{f()}
func f() {
p := func() {}
g()
p() // SSA constant propagation => static
if unknown {
p = h
}
p() // dynamic
C(0).f()
}
func g() {
var i I = C(0)
i.f()
}
func h()
var unknown bool
`
const genericsInput = `package P
type I interface {
F()
}
type A struct{}
func (a A) F() {}
type B struct{}
func (b B) F() {}
func instantiated[X I](x X) {
x.F()
}
func Bar() {}
func f(h func(), a A, b B) {
h()
instantiated[A](a)
instantiated[B](b)
}
`
func TestStatic(t *testing.T) {
for _, e := range []struct {
input string
want []string
// typeparams must be true if input uses type parameters
typeparams bool
}{
{input, []string{
"(*C).f -> (C).f",
"f -> (C).f",
"f -> f$1",
"f -> g",
}, false},
{genericsInput, []string{
"(*A).F -> (A).F",
"(*B).F -> (B).F",
"f -> instantiated[P.A]",
"f -> instantiated[P.B]",
"instantiated[P.A] -> (A).F",
"instantiated[P.B] -> (B).F",
}, true},
} {
conf := loader.Config{ParserMode: parser.ParseComments}
f, err := conf.ParseFile("P.go", e.input)
if err != nil {
t.Error(err)
continue
}
conf.CreateFromFiles("P", f)
iprog, err := conf.Load()
if err != nil {
t.Error(err)
continue
}
P := iprog.Created[0].Pkg
prog := ssautil.CreateProgram(iprog, ssa.InstantiateGenerics)
prog.Build()
cg := static.CallGraph(prog)
var edges []string
callgraph.GraphVisitEdges(cg, func(e *callgraph.Edge) error {
edges = append(edges, fmt.Sprintf("%s -> %s",
e.Caller.Func.RelString(P),
e.Callee.Func.RelString(P)))
return nil
})
sort.Strings(edges)
if !reflect.DeepEqual(edges, e.want) {
t.Errorf("Got edges %v, want %v", edges, e.want)
}
}
}
| tools/go/callgraph/static/static_test.go/0 | {
"file_path": "tools/go/callgraph/static/static_test.go",
"repo_id": "tools",
"token_count": 1007
} | 698 |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// go:build ignore
package testdata
type I interface {
Name() string
Foo()
}
var is = make(map[string]I)
func init() {
register(A{})
register(B{})
}
func register(i I) {
is[i.Name()] = i
}
type A struct{}
func (a A) Foo() {}
func (a A) Name() string { return "a" }
type B struct{}
func (b B) Foo() {}
func (b B) Name() string { return "b" }
func Do(n string) {
i, ok := is[n]
if !ok {
return
}
i.Foo()
}
func Go(n string) {
if i, ok := is[n]; !ok {
return
} else {
i.Foo()
}
}
func To(n string) {
var i I
var ok bool
if i, ok = is[n]; !ok {
return
}
i.Foo()
}
func Ro(n string) {
i := is[n]
i.Foo()
}
// Relevant SSA:
// func Do(n string):
// t0 = *is
// t1 = t0[n],ok
// t2 = extract t1 #0
// t3 = extract t1 #1
// if t3 goto 2 else 1
// 1:
// return
// 2:
// t4 = invoke t2.Foo()
// return
// WANT:
// register: invoke i.Name() -> A.Name, B.Name
// Do: invoke t2.Foo() -> A.Foo, B.Foo
// Go: invoke t2.Foo() -> A.Foo, B.Foo
// To: invoke t2.Foo() -> A.Foo, B.Foo
// Ro: invoke t1.Foo() -> A.Foo, B.Foo
| tools/go/callgraph/vta/testdata/src/callgraph_comma_maps.go/0 | {
"file_path": "tools/go/callgraph/vta/testdata/src/callgraph_comma_maps.go",
"repo_id": "tools",
"token_count": 620
} | 699 |