text
stringlengths
2
1.1M
id
stringlengths
11
117
metadata
dict
__index_level_0__
int64
0
885
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) package runtime func archauxv(tag, val uintptr) { } func osArchInit() {} //go:nosplit func cputicks() int64 { // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } const ( _SS_DISABLE = 2 _NSIG = 128 + 1 _SIG_BLOCK = 1 _SIG_UNBLOCK = 2 _SIG_SETMASK = 3 ) type sigset [4]uint32 var sigset_all = sigset{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)} //go:nosplit //go:nowritebarrierrec func sigaddset(mask *sigset, i int) { (*mask)[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31) } func sigdelset(mask *sigset, i int) { (*mask)[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31) } //go:nosplit func sigfillset(mask *[4]uint32) { (*mask)[0], (*mask)[1], (*mask)[2], (*mask)[3] = ^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0) }
go/src/runtime/os_linux_mipsx.go/0
{ "file_path": "go/src/runtime/os_linux_mipsx.go", "repo_id": "go", "token_count": 428 }
400
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import ( "internal/abi" "internal/goarch" "internal/runtime/atomic" "internal/runtime/sys" "internal/stringslite" "unsafe" ) // throwType indicates the current type of ongoing throw, which affects the // amount of detail printed to stderr. Higher values include more detail. type throwType uint32 const ( // throwTypeNone means that we are not throwing. throwTypeNone throwType = iota // throwTypeUser is a throw due to a problem with the application. // // These throws do not include runtime frames, system goroutines, or // frame metadata. throwTypeUser // throwTypeRuntime is a throw due to a problem with Go itself. // // These throws include as much information as possible to aid in // debugging the runtime, including runtime frames, system goroutines, // and frame metadata. throwTypeRuntime ) // We have two different ways of doing defers. The older way involves creating a // defer record at the time that a defer statement is executing and adding it to a // defer chain. This chain is inspected by the deferreturn call at all function // exits in order to run the appropriate defer calls. A cheaper way (which we call // open-coded defers) is used for functions in which no defer statements occur in // loops. In that case, we simply store the defer function/arg information into // specific stack slots at the point of each defer statement, as well as setting a // bit in a bitmask. At each function exit, we add inline code to directly make // the appropriate defer calls based on the bitmask and fn/arg information stored // on the stack. During panic/Goexit processing, the appropriate defer calls are // made using extra funcdata info that indicates the exact stack slots that // contain the bitmask and defer fn/args. // Check to make sure we can really generate a panic. If the panic // was generated from the runtime, or from inside malloc, then convert // to a throw of msg. // pc should be the program counter of the compiler-generated code that // triggered this panic. func panicCheck1(pc uintptr, msg string) { if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") { // Note: wasm can't tail call, so we can't get the original caller's pc. throw(msg) } // TODO: is this redundant? How could we be in malloc // but not in the runtime? runtime/internal/*, maybe? gp := getg() if gp != nil && gp.m != nil && gp.m.mallocing != 0 { throw(msg) } } // Same as above, but calling from the runtime is allowed. // // Using this function is necessary for any panic that may be // generated by runtime.sigpanic, since those are always called by the // runtime. func panicCheck2(err string) { // panic allocates, so to avoid recursive malloc, turn panics // during malloc into throws. gp := getg() if gp != nil && gp.m != nil && gp.m.mallocing != 0 { throw(err) } } // Many of the following panic entry-points turn into throws when they // happen in various runtime contexts. These should never happen in // the runtime, and if they do, they indicate a serious issue and // should not be caught by user code. // // The panic{Index,Slice,divide,shift} functions are called by // code generated by the compiler for out of bounds index expressions, // out of bounds slice expressions, division by zero, and shift by negative. // The panicdivide (again), panicoverflow, panicfloat, and panicmem // functions are called by the signal handler when a signal occurs // indicating the respective problem. // // Since panic{Index,Slice,shift} are never called directly, and // since the runtime package should never have an out of bounds slice // or array reference or negative shift, if we see those functions called from the // runtime package we turn the panic into a throw. That will dump the // entire runtime stack for easier debugging. // // The entry points called by the signal handler will be called from // runtime.sigpanic, so we can't disallow calls from the runtime to // these (they always look like they're called from the runtime). // Hence, for these, we just check for clearly bad runtime conditions. // // The panic{Index,Slice} functions are implemented in assembly and tail call // to the goPanic{Index,Slice} functions below. This is done so we can use // a space-minimal register calling convention. // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) // //go:yeswritebarrierrec func goPanicIndex(x int, y int) { panicCheck1(getcallerpc(), "index out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) } //go:yeswritebarrierrec func goPanicIndexU(x uint, y int) { panicCheck1(getcallerpc(), "index out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) } // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) // //go:yeswritebarrierrec func goPanicSliceAlen(x int, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) } //go:yeswritebarrierrec func goPanicSliceAlenU(x uint, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) } //go:yeswritebarrierrec func goPanicSliceAcap(x int, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) } //go:yeswritebarrierrec func goPanicSliceAcapU(x uint, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) } // failures in the comparisons for s[x:y], 0 <= x <= y // //go:yeswritebarrierrec func goPanicSliceB(x int, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) } //go:yeswritebarrierrec func goPanicSliceBU(x uint, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) } // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) func goPanicSlice3Alen(x int, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) } func goPanicSlice3AlenU(x uint, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) } func goPanicSlice3Acap(x int, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) } func goPanicSlice3AcapU(x uint, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) } // failures in the comparisons for s[:x:y], 0 <= x <= y func goPanicSlice3B(x int, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) } func goPanicSlice3BU(x uint, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) } // failures in the comparisons for s[x:y:], 0 <= x <= y func goPanicSlice3C(x int, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) } func goPanicSlice3CU(x uint, y int) { panicCheck1(getcallerpc(), "slice bounds out of range") panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) } // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s) func goPanicSliceConvert(x int, y int) { panicCheck1(getcallerpc(), "slice length too short to convert to array or pointer to array") panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) } // Implemented in assembly, as they take arguments in registers. // Declared here to mark them as ABIInternal. func panicIndex(x int, y int) func panicIndexU(x uint, y int) func panicSliceAlen(x int, y int) func panicSliceAlenU(x uint, y int) func panicSliceAcap(x int, y int) func panicSliceAcapU(x uint, y int) func panicSliceB(x int, y int) func panicSliceBU(x uint, y int) func panicSlice3Alen(x int, y int) func panicSlice3AlenU(x uint, y int) func panicSlice3Acap(x int, y int) func panicSlice3AcapU(x uint, y int) func panicSlice3B(x int, y int) func panicSlice3BU(x uint, y int) func panicSlice3C(x int, y int) func panicSlice3CU(x uint, y int) func panicSliceConvert(x int, y int) var shiftError = error(errorString("negative shift amount")) //go:yeswritebarrierrec func panicshift() { panicCheck1(getcallerpc(), "negative shift amount") panic(shiftError) } var divideError = error(errorString("integer divide by zero")) //go:yeswritebarrierrec func panicdivide() { panicCheck2("integer divide by zero") panic(divideError) } var overflowError = error(errorString("integer overflow")) func panicoverflow() { panicCheck2("integer overflow") panic(overflowError) } var floatError = error(errorString("floating point error")) func panicfloat() { panicCheck2("floating point error") panic(floatError) } var memoryError = error(errorString("invalid memory address or nil pointer dereference")) func panicmem() { panicCheck2("invalid memory address or nil pointer dereference") panic(memoryError) } func panicmemAddr(addr uintptr) { panicCheck2("invalid memory address or nil pointer dereference") panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) } // Create a new deferred function fn, which has no arguments and results. // The compiler turns a defer statement into a call to this. func deferproc(fn func()) { gp := getg() if gp.m.curg != gp { // go code on the system stack can't defer throw("defer on system stack") } d := newdefer() d.link = gp._defer gp._defer = d d.fn = fn d.pc = getcallerpc() // We must not be preempted between calling getcallersp and // storing it to d.sp because getcallersp's result is a // uintptr stack pointer. d.sp = getcallersp() // deferproc returns 0 normally. // a deferred func that stops a panic // makes the deferproc return 1. // the code the compiler generates always // checks the return value and jumps to the // end of the function if deferproc returns != 0. return0() // No code can go here - the C return register has // been set and must not be clobbered. } var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false")) var rangePanicError = error(errorString("range function continued iteration after loop body panic")) var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit")) var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking")) //go:noinline func panicrangestate(state int) { switch abi.RF_State(state) { case abi.RF_DONE: panic(rangeDoneError) case abi.RF_PANIC: panic(rangePanicError) case abi.RF_EXHAUSTED: panic(rangeExhaustedError) case abi.RF_MISSING_PANIC: panic(rangeMissingPanicError) } throw("unexpected state passed to panicrangestate") } // deferrangefunc is called by functions that are about to // execute a range-over-function loop in which the loop body // may execute a defer statement. That defer needs to add to // the chain for the current function, not the func literal synthesized // to represent the loop body. To do that, the original function // calls deferrangefunc to obtain an opaque token representing // the current frame, and then the loop body uses deferprocat // instead of deferproc to add to that frame's defer lists. // // The token is an 'any' with underlying type *atomic.Pointer[_defer]. // It is the atomically-updated head of a linked list of _defer structs // representing deferred calls. At the same time, we create a _defer // struct on the main g._defer list with d.head set to this head pointer. // // The g._defer list is now a linked list of deferred calls, // but an atomic list hanging off: // // g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil // | .head // | // +--> dY -> dX -> nil // // with each -> indicating a d.link pointer, and where drangefunc // has the d.rangefunc = true bit set. // Note that the function being ranged over may have added // its own defers (d4 and d3), so drangefunc need not be at the // top of the list when deferprocat is used. This is why we pass // the atomic head explicitly. // // To keep misbehaving programs from crashing the runtime, // deferprocat pushes new defers onto the .head list atomically. // The fact that it is a separate list from the main goroutine // defer list means that the main goroutine's defers can still // be handled non-atomically. // // In the diagram, dY and dX are meant to be processed when // drangefunc would be processed, which is to say the defer order // should be d4, d3, dY, dX, d2, d1. To make that happen, // when defer processing reaches a d with rangefunc=true, // it calls deferconvert to atomically take the extras // away from d.head and then adds them to the main list. // // That is, deferconvert changes this list: // // g._defer => drangefunc -> d2 -> d1 -> nil // | .head // | // +--> dY -> dX -> nil // // into this list: // // g._defer => dY -> dX -> d2 -> d1 -> nil // // It also poisons *drangefunc.head so that any future // deferprocat using that head will throw. // (The atomic head is ordinary garbage collected memory so that // it's not a problem if user code holds onto it beyond // the lifetime of drangefunc.) // // TODO: We could arrange for the compiler to call into the // runtime after the loop finishes normally, to do an eager // deferconvert, which would catch calling the loop body // and having it defer after the loop is done. If we have a // more general catch of loop body misuse, though, this // might not be worth worrying about in addition. // // See also ../cmd/compile/internal/rangefunc/rewrite.go. func deferrangefunc() any { gp := getg() if gp.m.curg != gp { // go code on the system stack can't defer throw("defer on system stack") } d := newdefer() d.link = gp._defer gp._defer = d d.pc = getcallerpc() // We must not be preempted between calling getcallersp and // storing it to d.sp because getcallersp's result is a // uintptr stack pointer. d.sp = getcallersp() d.rangefunc = true d.head = new(atomic.Pointer[_defer]) return d.head } // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head. func badDefer() *_defer { return (*_defer)(unsafe.Pointer(uintptr(1))) } // deferprocat is like deferproc but adds to the atomic list represented by frame. // See the doc comment for deferrangefunc for details. func deferprocat(fn func(), frame any) { head := frame.(*atomic.Pointer[_defer]) if raceenabled { racewritepc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferprocat)) } d1 := newdefer() d1.fn = fn for { d1.link = head.Load() if d1.link == badDefer() { throw("defer after range func returned") } if head.CompareAndSwap(d1.link, d1) { break } } // Must be last - see deferproc above. return0() } // deferconvert converts the rangefunc defer list of d0 into an ordinary list // following d0. // See the doc comment for deferrangefunc for details. func deferconvert(d0 *_defer) { head := d0.head if raceenabled { racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert)) } tail := d0.link d0.rangefunc = false var d *_defer for { d = head.Load() if head.CompareAndSwap(d, badDefer()) { break } } if d == nil { return } for d1 := d; ; d1 = d1.link { d1.sp = d0.sp d1.pc = d0.pc if d1.link == nil { d1.link = tail break } } d0.link = d return } // deferprocStack queues a new deferred function with a defer record on the stack. // The defer record must have its fn field initialized. // All other fields can contain junk. // Nosplit because of the uninitialized pointer fields on the stack. // //go:nosplit func deferprocStack(d *_defer) { gp := getg() if gp.m.curg != gp { // go code on the system stack can't defer throw("defer on system stack") } // fn is already set. // The other fields are junk on entry to deferprocStack and // are initialized here. d.heap = false d.rangefunc = false d.sp = getcallersp() d.pc = getcallerpc() // The lines below implement: // d.panic = nil // d.fd = nil // d.link = gp._defer // d.head = nil // gp._defer = d // But without write barriers. The first three are writes to // the stack so they don't need a write barrier, and furthermore // are to uninitialized memory, so they must not use a write barrier. // The fourth write does not require a write barrier because we // explicitly mark all the defer structures, so we don't need to // keep track of pointers to them with a write barrier. *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) *(*uintptr)(unsafe.Pointer(&d.head)) = 0 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) return0() // No code can go here - the C return register has // been set and must not be clobbered. } // Each P holds a pool for defers. // Allocate a Defer, usually using per-P pool. // Each defer must be released with freedefer. The defer is not // added to any defer chain yet. func newdefer() *_defer { var d *_defer mp := acquirem() pp := mp.p.ptr() if len(pp.deferpool) == 0 && sched.deferpool != nil { lock(&sched.deferlock) for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { d := sched.deferpool sched.deferpool = d.link d.link = nil pp.deferpool = append(pp.deferpool, d) } unlock(&sched.deferlock) } if n := len(pp.deferpool); n > 0 { d = pp.deferpool[n-1] pp.deferpool[n-1] = nil pp.deferpool = pp.deferpool[:n-1] } releasem(mp) mp, pp = nil, nil if d == nil { // Allocate new defer. d = new(_defer) } d.heap = true return d } // popDefer pops the head of gp's defer list and frees it. func popDefer(gp *g) { d := gp._defer d.fn = nil // Can in theory point to the stack // We must not copy the stack between the updating gp._defer and setting // d.link to nil. Between these two steps, d is not on any defer list, so // stack copying won't adjust stack pointers in it (namely, d.link). Hence, // if we were to copy the stack, d could then contain a stale pointer. gp._defer = d.link d.link = nil // After this point we can copy the stack. if !d.heap { return } mp := acquirem() pp := mp.p.ptr() if len(pp.deferpool) == cap(pp.deferpool) { // Transfer half of local cache to the central cache. var first, last *_defer for len(pp.deferpool) > cap(pp.deferpool)/2 { n := len(pp.deferpool) d := pp.deferpool[n-1] pp.deferpool[n-1] = nil pp.deferpool = pp.deferpool[:n-1] if first == nil { first = d } else { last.link = d } last = d } lock(&sched.deferlock) last.link = sched.deferpool sched.deferpool = first unlock(&sched.deferlock) } *d = _defer{} pp.deferpool = append(pp.deferpool, d) releasem(mp) mp, pp = nil, nil } // deferreturn runs deferred functions for the caller's frame. // The compiler inserts a call to this at the end of any // function which calls defer. func deferreturn() { var p _panic p.deferreturn = true p.start(getcallerpc(), unsafe.Pointer(getcallersp())) for { fn, ok := p.nextDefer() if !ok { break } fn() } } // Goexit terminates the goroutine that calls it. No other goroutine is affected. // Goexit runs all deferred calls before terminating the goroutine. Because Goexit // is not a panic, any recover calls in those deferred functions will return nil. // // Calling Goexit from the main goroutine terminates that goroutine // without func main returning. Since func main has not returned, // the program continues execution of other goroutines. // If all other goroutines exit, the program crashes. func Goexit() { // Create a panic object for Goexit, so we can recognize when it might be // bypassed by a recover(). var p _panic p.goexit = true p.start(getcallerpc(), unsafe.Pointer(getcallersp())) for { fn, ok := p.nextDefer() if !ok { break } fn() } goexit1() } // Call all Error and String methods before freezing the world. // Used when crashing with panicking. func preprintpanics(p *_panic) { defer func() { text := "panic while printing panic value" switch r := recover().(type) { case nil: // nothing to do case string: throw(text + ": " + r) default: throw(text + ": type " + toRType(efaceOf(&r)._type).string()) } }() for p != nil { switch v := p.arg.(type) { case error: p.arg = v.Error() case stringer: p.arg = v.String() } p = p.link } } // Print all currently active panics. Used when crashing. // Should only be called after preprintpanics. func printpanics(p *_panic) { if p.link != nil { printpanics(p.link) if !p.link.goexit { print("\t") } } if p.goexit { return } print("panic: ") printpanicval(p.arg) if p.recovered { print(" [recovered]") } print("\n") } // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the // uint32 and a pointer to the byte following the varint. // // The implementation is the same with runtime.readvarint, except that this function // uses unsafe.Pointer for speed. func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { var r uint32 var shift int for { b := *(*uint8)(fd) fd = add(fd, unsafe.Sizeof(b)) if b < 128 { return r + uint32(b)<<shift, fd } r += uint32(b&0x7F) << (shift & 31) shift += 7 if shift > 28 { panic("Bad varint") } } } // A PanicNilError happens when code calls panic(nil). // // Before Go 1.21, programs that called panic(nil) observed recover returning nil. // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError. // Programs can change back to the old behavior by setting GODEBUG=panicnil=1. type PanicNilError struct { // This field makes PanicNilError structurally different from // any other struct in this package, and the _ makes it different // from any struct in other packages too. // This avoids any accidental conversions being possible // between this struct and some other struct sharing the same fields, // like happened in go.dev/issue/56603. _ [0]*PanicNilError } func (*PanicNilError) Error() string { return "panic called with nil argument" } func (*PanicNilError) RuntimeError() {} var panicnil = &godebugInc{name: "panicnil"} // The implementation of the predeclared function panic. // The compiler emits calls to this function. // // gopanic should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - go.undefinedlabs.com/scopeagent // - github.com/goplus/igop // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname gopanic func gopanic(e any) { if e == nil { if debug.panicnil.Load() != 1 { e = new(PanicNilError) } else { panicnil.IncNonDefault() } } gp := getg() if gp.m.curg != gp { print("panic: ") printpanicval(e) print("\n") throw("panic on system stack") } if gp.m.mallocing != 0 { print("panic: ") printpanicval(e) print("\n") throw("panic during malloc") } if gp.m.preemptoff != "" { print("panic: ") printpanicval(e) print("\n") print("preempt off reason: ") print(gp.m.preemptoff) print("\n") throw("panic during preemptoff") } if gp.m.locks != 0 { print("panic: ") printpanicval(e) print("\n") throw("panic holding locks") } var p _panic p.arg = e runningPanicDefers.Add(1) p.start(getcallerpc(), unsafe.Pointer(getcallersp())) for { fn, ok := p.nextDefer() if !ok { break } fn() } // If we're tracing, flush the current generation to make the trace more // readable. // // TODO(aktau): Handle a panic from within traceAdvance more gracefully. // Currently it would hang. Not handled now because it is very unlikely, and // already unrecoverable. if traceEnabled() { traceAdvance(false) } // ran out of deferred calls - old-school panic now // Because it is unsafe to call arbitrary user code after freezing // the world, we call preprintpanics to invoke all necessary Error // and String methods to prepare the panic strings before startpanic. preprintpanics(&p) fatalpanic(&p) // should not return *(*int)(nil) = 0 // not reached } // start initializes a panic to start unwinding the stack. // // If p.goexit is true, then start may return multiple times. func (p *_panic) start(pc uintptr, sp unsafe.Pointer) { gp := getg() // Record the caller's PC and SP, so recovery can identify panics // that have been recovered. Also, so that if p is from Goexit, we // can restart its defer processing loop if a recovered panic tries // to jump past it. p.startPC = getcallerpc() p.startSP = unsafe.Pointer(getcallersp()) if p.deferreturn { p.sp = sp if s := (*savedOpenDeferState)(gp.param); s != nil { // recovery saved some state for us, so that we can resume // calling open-coded defers without unwinding the stack. gp.param = nil p.retpc = s.retpc p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset)) p.slotsPtr = add(sp, s.slotsOffset) } return } p.link = gp._panic gp._panic = (*_panic)(noescape(unsafe.Pointer(p))) // Initialize state machine, and find the first frame with a defer. // // Note: We could use startPC and startSP here, but callers will // never have defer statements themselves. By starting at their // caller instead, we avoid needing to unwind through an extra // frame. It also somewhat simplifies the terminating condition for // deferreturn. p.lr, p.fp = pc, sp p.nextFrame() } // nextDefer returns the next deferred function to invoke, if any. // // Note: The "ok bool" result is necessary to correctly handle when // the deferred function itself was nil (e.g., "defer (func())(nil)"). func (p *_panic) nextDefer() (func(), bool) { gp := getg() if !p.deferreturn { if gp._panic != p { throw("bad panic stack") } if p.recovered { mcall(recovery) // does not return throw("recovery failed") } } // The assembler adjusts p.argp in wrapper functions that shouldn't // be visible to recover(), so we need to restore it each iteration. p.argp = add(p.startSP, sys.MinFrameSize) for { for p.deferBitsPtr != nil { bits := *p.deferBitsPtr // Check whether any open-coded defers are still pending. // // Note: We need to check this upfront (rather than after // clearing the top bit) because it's possible that Goexit // invokes a deferred call, and there were still more pending // open-coded defers in the frame; but then the deferred call // panic and invoked the remaining defers in the frame, before // recovering and restarting the Goexit loop. if bits == 0 { p.deferBitsPtr = nil break } // Find index of top bit set. i := 7 - uintptr(sys.LeadingZeros8(bits)) // Clear bit and store it back. bits &^= 1 << i *p.deferBitsPtr = bits return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true } Recheck: if d := gp._defer; d != nil && d.sp == uintptr(p.sp) { if d.rangefunc { deferconvert(d) popDefer(gp) goto Recheck } fn := d.fn // TODO(mdempsky): Instead of having each deferproc call have // its own "deferreturn(); return" sequence, we should just make // them reuse the one we emit for open-coded defers. p.retpc = d.pc // Unlink and free. popDefer(gp) return fn, true } if !p.nextFrame() { return nil, false } } } // nextFrame finds the next frame that contains deferred calls, if any. func (p *_panic) nextFrame() (ok bool) { if p.lr == 0 { return false } gp := getg() systemstack(func() { var limit uintptr if d := gp._defer; d != nil { limit = d.sp } var u unwinder u.initAt(p.lr, uintptr(p.fp), 0, gp, 0) for { if !u.valid() { p.lr = 0 return // ok == false } // TODO(mdempsky): If we populate u.frame.fn.deferreturn for // every frame containing a defer (not just open-coded defers), // then we can simply loop until we find the next frame where // it's non-zero. if u.frame.sp == limit { break // found a frame with linked defers } if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) { break // found a frame with open-coded defers } u.next() } p.lr = u.frame.lr p.sp = unsafe.Pointer(u.frame.sp) p.fp = unsafe.Pointer(u.frame.fp) ok = true }) return } func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool { fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo) if fd == nil { return false } if fn.deferreturn == 0 { throw("missing deferreturn") } deferBitsOffset, fd := readvarintUnsafe(fd) deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset))) if *deferBitsPtr == 0 { return false // has open-coded defers, but none pending } slotsOffset, fd := readvarintUnsafe(fd) p.retpc = fn.entry() + uintptr(fn.deferreturn) p.deferBitsPtr = deferBitsPtr p.slotsPtr = add(varp, -uintptr(slotsOffset)) return true } // The implementation of the predeclared function recover. // Cannot split the stack because it needs to reliably // find the stack segment of its caller. // // TODO(rsc): Once we commit to CopyStackAlways, // this doesn't need to be nosplit. // //go:nosplit func gorecover(argp uintptr) any { // Must be in a function running as part of a deferred call during the panic. // Must be called from the topmost function of the call // (the function used in the defer statement). // p.argp is the argument pointer of that topmost deferred function call. // Compare against argp reported by caller. // If they match, the caller is the one who can recover. gp := getg() p := gp._panic if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { p.recovered = true return p.arg } return nil } //go:linkname sync_throw sync.throw func sync_throw(s string) { throw(s) } //go:linkname sync_fatal sync.fatal func sync_fatal(s string) { fatal(s) } // throw triggers a fatal error that dumps a stack trace and exits. // // throw should be used for runtime-internal fatal errors where Go itself, // rather than user code, may be at fault for the failure. // // NOTE: temporarily marked "go:noinline" pending investigation/fix of // issue #67274, so as to fix longtest builders. // // throw should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - github.com/bytedance/sonic // - github.com/cockroachdb/pebble // - github.com/dgraph-io/ristretto // - github.com/outcaste-io/ristretto // - github.com/pingcap/br // - gvisor.dev/gvisor // - github.com/sagernet/gvisor // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname throw //go:nosplit func throw(s string) { // Everything throw does should be recursively nosplit so it // can be called even when it's unsafe to grow the stack. systemstack(func() { print("fatal error: ") printindented(s) // logically printpanicval(s), but avoids convTstring write barrier print("\n") }) fatalthrow(throwTypeRuntime) } // fatal triggers a fatal error that dumps a stack trace and exits. // // fatal is equivalent to throw, but is used when user code is expected to be // at fault for the failure, such as racing map writes. // // fatal does not include runtime frames, system goroutines, or frame metadata // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. // //go:nosplit func fatal(s string) { // Everything fatal does should be recursively nosplit so it // can be called even when it's unsafe to grow the stack. systemstack(func() { print("fatal error: ") printindented(s) // logically printpanicval(s), but avoids convTstring write barrier print("\n") }) fatalthrow(throwTypeUser) } // runningPanicDefers is non-zero while running deferred functions for panic. // This is used to try hard to get a panic stack trace out when exiting. var runningPanicDefers atomic.Uint32 // panicking is non-zero when crashing the program for an unrecovered panic. var panicking atomic.Uint32 // paniclk is held while printing the panic information and stack trace, // so that two concurrent panics don't overlap their output. var paniclk mutex // Unwind the stack after a deferred function calls recover // after a panic. Then arrange to continue running as though // the caller of the deferred function returned normally. // // However, if unwinding the stack would skip over a Goexit call, we // return into the Goexit loop instead, so it can continue processing // defers instead. func recovery(gp *g) { p := gp._panic pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp) p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0 // Unwind the panic stack. for ; p != nil && uintptr(p.startSP) < sp; p = p.link { // Don't allow jumping past a pending Goexit. // Instead, have its _panic.start() call return again. // // TODO(mdempsky): In this case, Goexit will resume walking the // stack where it left off, which means it will need to rewalk // frames that we've already processed. // // There's a similar issue with nested panics, when the inner // panic supercedes the outer panic. Again, we end up needing to // walk the same stack frames. // // These are probably pretty rare occurrences in practice, and // they don't seem any worse than the existing logic. But if we // move the unwinding state into _panic, we could detect when we // run into where the last panic started, and then just pick up // where it left off instead. // // With how subtle defer handling is, this might not actually be // worthwhile though. if p.goexit { pc, sp = p.startPC, uintptr(p.startSP) saveOpenDeferState = false // goexit is unwinding the stack anyway break } runningPanicDefers.Add(-1) } gp._panic = p if p == nil { // must be done with signal gp.sig = 0 } if gp.param != nil { throw("unexpected gp.param") } if saveOpenDeferState { // If we're returning to deferreturn and there are more open-coded // defers for it to call, save enough state for it to be able to // pick up where p0 left off. gp.param = unsafe.Pointer(&savedOpenDeferState{ retpc: p0.retpc, // We need to save deferBitsPtr and slotsPtr too, but those are // stack pointers. To avoid issues around heap objects pointing // to the stack, save them as offsets from SP. deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp), slotsOffset: uintptr(p0.slotsPtr) - uintptr(p0.sp), }) } // TODO(mdempsky): Currently, we rely on frames containing "defer" // to end with "CALL deferreturn; RET". This allows deferreturn to // finish running any pending defers in the frame. // // But we should be able to tell whether there are still pending // defers here. If there aren't, we can just jump directly to the // "RET" instruction. And if there are, we don't need an actual // "CALL deferreturn" instruction; we can simulate it with something // like: // // if usesLR { // lr = pc // } else { // sp -= sizeof(pc) // *(*uintptr)(sp) = pc // } // pc = funcPC(deferreturn) // // So that we effectively tail call into deferreturn, such that it // then returns to the simple "RET" epilogue. That would save the // overhead of the "deferreturn" call when there aren't actually any // pending defers left, and shrink the TEXT size of compiled // binaries. (Admittedly, both of these are modest savings.) // Ensure we're recovering within the appropriate stack. if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") throw("bad recovery") } // Make the deferproc for this d return again, // this time returning 1. The calling function will // jump to the standard return epilogue. gp.sched.sp = sp gp.sched.pc = pc gp.sched.lr = 0 // Restore the bp on platforms that support frame pointers. // N.B. It's fine to not set anything for platforms that don't // support frame pointers, since nothing consumes them. switch { case goarch.IsAmd64 != 0: // on x86, fp actually points one word higher than the top of // the frame since the return address is saved on the stack by // the caller gp.sched.bp = fp - 2*goarch.PtrSize case goarch.IsArm64 != 0: // on arm64, the architectural bp points one word higher // than the sp. fp is totally useless to us here, because it // only gets us to the caller's fp. gp.sched.bp = sp - goarch.PtrSize } gp.sched.ret = 1 gogo(&gp.sched) } // fatalthrow implements an unrecoverable runtime throw. It freezes the // system, prints stack traces starting from its caller, and terminates the // process. // //go:nosplit func fatalthrow(t throwType) { pc := getcallerpc() sp := getcallersp() gp := getg() if gp.m.throwing == throwTypeNone { gp.m.throwing = t } // Switch to the system stack to avoid any stack growth, which may make // things worse if the runtime is in a bad state. systemstack(func() { if isSecureMode() { exit(2) } startpanic_m() if dopanic_m(gp, pc, sp) { // crash uses a decent amount of nosplit stack and we're already // low on stack in throw, so crash on the system stack (unlike // fatalpanic). crash() } exit(2) }) *(*int)(nil) = 0 // not reached } // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except // that if msgs != nil, fatalpanic also prints panic messages and decrements // runningPanicDefers once main is blocked from exiting. // //go:nosplit func fatalpanic(msgs *_panic) { pc := getcallerpc() sp := getcallersp() gp := getg() var docrash bool // Switch to the system stack to avoid any stack growth, which // may make things worse if the runtime is in a bad state. systemstack(func() { if startpanic_m() && msgs != nil { // There were panic messages and startpanic_m // says it's okay to try to print them. // startpanic_m set panicking, which will // block main from exiting, so now OK to // decrement runningPanicDefers. runningPanicDefers.Add(-1) printpanics(msgs) } docrash = dopanic_m(gp, pc, sp) }) if docrash { // By crashing outside the above systemstack call, debuggers // will not be confused when generating a backtrace. // Function crash is marked nosplit to avoid stack growth. crash() } systemstack(func() { exit(2) }) *(*int)(nil) = 0 // not reached } // startpanic_m prepares for an unrecoverable panic. // // It returns true if panic messages should be printed, or false if // the runtime is in bad shape and should just print stacks. // // It must not have write barriers even though the write barrier // explicitly ignores writes once dying > 0. Write barriers still // assume that g.m.p != nil, and this function may not have P // in some contexts (e.g. a panic in a signal handler for a signal // sent to an M with no P). // //go:nowritebarrierrec func startpanic_m() bool { gp := getg() if mheap_.cachealloc.size == 0 { // very early print("runtime: panic before malloc heap initialized\n") } // Disallow malloc during an unrecoverable panic. A panic // could happen in a signal handler, or in a throw, or inside // malloc itself. We want to catch if an allocation ever does // happen (even if we're not in one of these situations). gp.m.mallocing++ // If we're dying because of a bad lock count, set it to a // good lock count so we don't recursively panic below. if gp.m.locks < 0 { gp.m.locks = 1 } switch gp.m.dying { case 0: // Setting dying >0 has the side-effect of disabling this G's writebuf. gp.m.dying = 1 panicking.Add(1) lock(&paniclk) if debug.schedtrace > 0 || debug.scheddetail > 0 { schedtrace(true) } freezetheworld() return true case 1: // Something failed while panicking. // Just print a stack trace and exit. gp.m.dying = 2 print("panic during panic\n") return false case 2: // This is a genuine bug in the runtime, we couldn't even // print the stack trace successfully. gp.m.dying = 3 print("stack trace unavailable\n") exit(4) fallthrough default: // Can't even print! Just exit. exit(5) return false // Need to return something. } } var didothers bool var deadlock mutex // gp is the crashing g running on this M, but may be a user G, while getg() is // always g0. func dopanic_m(gp *g, pc, sp uintptr) bool { if gp.sig != 0 { signame := signame(gp.sig) if signame != "" { print("[signal ", signame) } else { print("[signal ", hex(gp.sig)) } print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") } level, all, docrash := gotraceback() if level > 0 { if gp != gp.m.curg { all = true } if gp != gp.m.g0 { print("\n") goroutineheader(gp) traceback(pc, sp, 0, gp) } else if level >= 2 || gp.m.throwing >= throwTypeRuntime { print("\nruntime stack:\n") traceback(pc, sp, 0, gp) } if !didothers && all { didothers = true tracebackothers(gp) } } unlock(&paniclk) if panicking.Add(-1) != 0 { // Some other m is panicking too. // Let it print what it needs to print. // Wait forever without chewing up cpu. // It will exit when it's done. lock(&deadlock) lock(&deadlock) } printDebugLog() return docrash } // canpanic returns false if a signal should throw instead of // panicking. // //go:nosplit func canpanic() bool { gp := getg() mp := acquirem() // Is it okay for gp to panic instead of crashing the program? // Yes, as long as it is running Go code, not runtime code, // and not stuck in a system call. if gp != mp.curg { releasem(mp) return false } // N.B. mp.locks != 1 instead of 0 to account for acquirem. if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 { releasem(mp) return false } status := readgstatus(gp) if status&^_Gscan != _Grunning || gp.syscallsp != 0 { releasem(mp) return false } if GOOS == "windows" && mp.libcallsp != 0 { releasem(mp) return false } releasem(mp) return true } // shouldPushSigpanic reports whether pc should be used as sigpanic's // return PC (pushing a frame for the call). Otherwise, it should be // left alone so that LR is used as sigpanic's return PC, effectively // replacing the top-most frame with sigpanic. This is used by // preparePanic. func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { if pc == 0 { // Probably a call to a nil func. The old LR is more // useful in the stack trace. Not pushing the frame // will make the trace look like a call to sigpanic // instead. (Otherwise the trace will end at sigpanic // and we won't get to see who faulted.) return false } // If we don't recognize the PC as code, but we do recognize // the link register as code, then this assumes the panic was // caused by a call to non-code. In this case, we want to // ignore this call to make unwinding show the context. // // If we running C code, we're not going to recognize pc as a // Go function, so just assume it's good. Otherwise, traceback // may try to read a stale LR that looks like a Go code // pointer and wander into the woods. if gp.m.incgo || findfunc(pc).valid() { // This wasn't a bad call, so use PC as sigpanic's // return PC. return true } if findfunc(lr).valid() { // This was a bad call, but the LR is good, so use the // LR as sigpanic's return PC. return false } // Neither the PC or LR is good. Hopefully pushing a frame // will work. return true } // isAbortPC reports whether pc is the program counter at which // runtime.abort raises a signal. // // It is nosplit because it's part of the isgoexception // implementation. // //go:nosplit func isAbortPC(pc uintptr) bool { f := findfunc(pc) if !f.valid() { return false } return f.funcID == abi.FuncID_abort }
go/src/runtime/panic.go/0
{ "file_path": "go/src/runtime/panic.go", "repo_id": "go", "token_count": 15392 }
401
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package pprof writes runtime profiling data in the format expected // by the pprof visualization tool. // // # Profiling a Go program // // The first step to profiling a Go program is to enable profiling. // Support for profiling benchmarks built with the standard testing // package is built into go test. For example, the following command // runs benchmarks in the current directory and writes the CPU and // memory profiles to cpu.prof and mem.prof: // // go test -cpuprofile cpu.prof -memprofile mem.prof -bench . // // To add equivalent profiling support to a standalone program, add // code like the following to your main function: // // var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") // var memprofile = flag.String("memprofile", "", "write memory profile to `file`") // // func main() { // flag.Parse() // if *cpuprofile != "" { // f, err := os.Create(*cpuprofile) // if err != nil { // log.Fatal("could not create CPU profile: ", err) // } // defer f.Close() // error handling omitted for example // if err := pprof.StartCPUProfile(f); err != nil { // log.Fatal("could not start CPU profile: ", err) // } // defer pprof.StopCPUProfile() // } // // // ... rest of the program ... // // if *memprofile != "" { // f, err := os.Create(*memprofile) // if err != nil { // log.Fatal("could not create memory profile: ", err) // } // defer f.Close() // error handling omitted for example // runtime.GC() // get up-to-date statistics // if err := pprof.WriteHeapProfile(f); err != nil { // log.Fatal("could not write memory profile: ", err) // } // } // } // // There is also a standard HTTP interface to profiling data. Adding // the following line will install handlers under the /debug/pprof/ // URL to download live profiles: // // import _ "net/http/pprof" // // See the net/http/pprof package for more details. // // Profiles can then be visualized with the pprof tool: // // go tool pprof cpu.prof // // There are many commands available from the pprof command line. // Commonly used commands include "top", which prints a summary of the // top program hot-spots, and "web", which opens an interactive graph // of hot-spots and their call graphs. Use "help" for information on // all pprof commands. // // For more information about pprof, see // https://github.com/google/pprof/blob/main/doc/README.md. package pprof import ( "bufio" "cmp" "fmt" "internal/abi" "internal/profilerecord" "io" "runtime" "slices" "sort" "strings" "sync" "text/tabwriter" "time" "unsafe" ) // BUG(rsc): Profiles are only as good as the kernel support used to generate them. // See https://golang.org/issue/13841 for details about known problems. // A Profile is a collection of stack traces showing the call sequences // that led to instances of a particular event, such as allocation. // Packages can create and maintain their own profiles; the most common // use is for tracking resources that must be explicitly closed, such as files // or network connections. // // A Profile's methods can be called from multiple goroutines simultaneously. // // Each Profile has a unique name. A few profiles are predefined: // // goroutine - stack traces of all current goroutines // heap - a sampling of memory allocations of live objects // allocs - a sampling of all past memory allocations // threadcreate - stack traces that led to the creation of new OS threads // block - stack traces that led to blocking on synchronization primitives // mutex - stack traces of holders of contended mutexes // // These predefined profiles maintain themselves and panic on an explicit // [Profile.Add] or [Profile.Remove] method call. // // The CPU profile is not available as a Profile. It has a special API, // the [StartCPUProfile] and [StopCPUProfile] functions, because it streams // output to a writer during profiling. // // # Heap profile // // The heap profile reports statistics as of the most recently completed // garbage collection; it elides more recent allocation to avoid skewing // the profile away from live data and toward garbage. // If there has been no garbage collection at all, the heap profile reports // all known allocations. This exception helps mainly in programs running // without garbage collection enabled, usually for debugging purposes. // // The heap profile tracks both the allocation sites for all live objects in // the application memory and for all objects allocated since the program start. // Pprof's -inuse_space, -inuse_objects, -alloc_space, and -alloc_objects // flags select which to display, defaulting to -inuse_space (live objects, // scaled by size). // // # Allocs profile // // The allocs profile is the same as the heap profile but changes the default // pprof display to -alloc_space, the total number of bytes allocated since // the program began (including garbage-collected bytes). // // # Block profile // // The block profile tracks time spent blocked on synchronization primitives, // such as [sync.Mutex], [sync.RWMutex], [sync.WaitGroup], [sync.Cond], and // channel send/receive/select. // // Stack traces correspond to the location that blocked (for example, // [sync.Mutex.Lock]). // // Sample values correspond to cumulative time spent blocked at that stack // trace, subject to time-based sampling specified by // [runtime.SetBlockProfileRate]. // // # Mutex profile // // The mutex profile tracks contention on mutexes, such as [sync.Mutex], // [sync.RWMutex], and runtime-internal locks. // // Stack traces correspond to the end of the critical section causing // contention. For example, a lock held for a long time while other goroutines // are waiting to acquire the lock will report contention when the lock is // finally unlocked (that is, at [sync.Mutex.Unlock]). // // Sample values correspond to the approximate cumulative time other goroutines // spent blocked waiting for the lock, subject to event-based sampling // specified by [runtime.SetMutexProfileFraction]. For example, if a caller // holds a lock for 1s while 5 other goroutines are waiting for the entire // second to acquire the lock, its unlock call stack will report 5s of // contention. // // Runtime-internal locks are always reported at the location // "runtime._LostContendedRuntimeLock". More detailed stack traces for // runtime-internal locks can be obtained by setting // `GODEBUG=runtimecontentionstacks=1` (see package [runtime] docs for // caveats). type Profile struct { name string mu sync.Mutex m map[any][]uintptr count func() int write func(io.Writer, int) error } // profiles records all registered profiles. var profiles struct { mu sync.Mutex m map[string]*Profile } var goroutineProfile = &Profile{ name: "goroutine", count: countGoroutine, write: writeGoroutine, } var threadcreateProfile = &Profile{ name: "threadcreate", count: countThreadCreate, write: writeThreadCreate, } var heapProfile = &Profile{ name: "heap", count: countHeap, write: writeHeap, } var allocsProfile = &Profile{ name: "allocs", count: countHeap, // identical to heap profile write: writeAlloc, } var blockProfile = &Profile{ name: "block", count: countBlock, write: writeBlock, } var mutexProfile = &Profile{ name: "mutex", count: countMutex, write: writeMutex, } func lockProfiles() { profiles.mu.Lock() if profiles.m == nil { // Initial built-in profiles. profiles.m = map[string]*Profile{ "goroutine": goroutineProfile, "threadcreate": threadcreateProfile, "heap": heapProfile, "allocs": allocsProfile, "block": blockProfile, "mutex": mutexProfile, } } } func unlockProfiles() { profiles.mu.Unlock() } // NewProfile creates a new profile with the given name. // If a profile with that name already exists, NewProfile panics. // The convention is to use a 'import/path.' prefix to create // separate name spaces for each package. // For compatibility with various tools that read pprof data, // profile names should not contain spaces. func NewProfile(name string) *Profile { lockProfiles() defer unlockProfiles() if name == "" { panic("pprof: NewProfile with empty name") } if profiles.m[name] != nil { panic("pprof: NewProfile name already in use: " + name) } p := &Profile{ name: name, m: map[any][]uintptr{}, } profiles.m[name] = p return p } // Lookup returns the profile with the given name, or nil if no such profile exists. func Lookup(name string) *Profile { lockProfiles() defer unlockProfiles() return profiles.m[name] } // Profiles returns a slice of all the known profiles, sorted by name. func Profiles() []*Profile { lockProfiles() defer unlockProfiles() all := make([]*Profile, 0, len(profiles.m)) for _, p := range profiles.m { all = append(all, p) } slices.SortFunc(all, func(a, b *Profile) int { return strings.Compare(a.name, b.name) }) return all } // Name returns this profile's name, which can be passed to [Lookup] to reobtain the profile. func (p *Profile) Name() string { return p.name } // Count returns the number of execution stacks currently in the profile. func (p *Profile) Count() int { p.mu.Lock() defer p.mu.Unlock() if p.count != nil { return p.count() } return len(p.m) } // Add adds the current execution stack to the profile, associated with value. // Add stores value in an internal map, so value must be suitable for use as // a map key and will not be garbage collected until the corresponding // call to [Profile.Remove]. Add panics if the profile already contains a stack for value. // // The skip parameter has the same meaning as [runtime.Caller]'s skip // and controls where the stack trace begins. Passing skip=0 begins the // trace in the function calling Add. For example, given this // execution stack: // // Add // called from rpc.NewClient // called from mypkg.Run // called from main.main // // Passing skip=0 begins the stack trace at the call to Add inside rpc.NewClient. // Passing skip=1 begins the stack trace at the call to NewClient inside mypkg.Run. func (p *Profile) Add(value any, skip int) { if p.name == "" { panic("pprof: use of uninitialized Profile") } if p.write != nil { panic("pprof: Add called on built-in Profile " + p.name) } stk := make([]uintptr, 32) n := runtime.Callers(skip+1, stk[:]) stk = stk[:n] if len(stk) == 0 { // The value for skip is too large, and there's no stack trace to record. stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent)} } p.mu.Lock() defer p.mu.Unlock() if p.m[value] != nil { panic("pprof: Profile.Add of duplicate value") } p.m[value] = stk } // Remove removes the execution stack associated with value from the profile. // It is a no-op if the value is not in the profile. func (p *Profile) Remove(value any) { p.mu.Lock() defer p.mu.Unlock() delete(p.m, value) } // WriteTo writes a pprof-formatted snapshot of the profile to w. // If a write to w returns an error, WriteTo returns that error. // Otherwise, WriteTo returns nil. // // The debug parameter enables additional output. // Passing debug=0 writes the gzip-compressed protocol buffer described // in https://github.com/google/pprof/tree/main/proto#overview. // Passing debug=1 writes the legacy text format with comments // translating addresses to function names and line numbers, so that a // programmer can read the profile without tools. // // The predefined profiles may assign meaning to other debug values; // for example, when printing the "goroutine" profile, debug=2 means to // print the goroutine stacks in the same form that a Go program uses // when dying due to an unrecovered panic. func (p *Profile) WriteTo(w io.Writer, debug int) error { if p.name == "" { panic("pprof: use of zero Profile") } if p.write != nil { return p.write(w, debug) } // Obtain consistent snapshot under lock; then process without lock. p.mu.Lock() all := make([][]uintptr, 0, len(p.m)) for _, stk := range p.m { all = append(all, stk) } p.mu.Unlock() // Map order is non-deterministic; make output deterministic. slices.SortFunc(all, slices.Compare) return printCountProfile(w, debug, p.name, stackProfile(all)) } type stackProfile [][]uintptr func (x stackProfile) Len() int { return len(x) } func (x stackProfile) Stack(i int) []uintptr { return x[i] } func (x stackProfile) Label(i int) *labelMap { return nil } // A countProfile is a set of stack traces to be printed as counts // grouped by stack trace. There are multiple implementations: // all that matters is that we can find out how many traces there are // and obtain each trace in turn. type countProfile interface { Len() int Stack(i int) []uintptr Label(i int) *labelMap } // expandInlinedFrames copies the call stack from pcs into dst, expanding any // PCs corresponding to inlined calls into the corresponding PCs for the inlined // functions. Returns the number of frames copied to dst. func expandInlinedFrames(dst, pcs []uintptr) int { cf := runtime.CallersFrames(pcs) var n int for n < len(dst) { f, more := cf.Next() // f.PC is a "call PC", but later consumers will expect // "return PCs" dst[n] = f.PC + 1 n++ if !more { break } } return n } // printCountCycleProfile outputs block profile records (for block or mutex profiles) // as the pprof-proto format output. Translations from cycle count to time duration // are done because The proto expects count and time (nanoseconds) instead of count // and the number of cycles for block, contention profiles. func printCountCycleProfile(w io.Writer, countName, cycleName string, records []profilerecord.BlockProfileRecord) error { // Output profile in protobuf form. b := newProfileBuilder(w) b.pbValueType(tagProfile_PeriodType, countName, "count") b.pb.int64Opt(tagProfile_Period, 1) b.pbValueType(tagProfile_SampleType, countName, "count") b.pbValueType(tagProfile_SampleType, cycleName, "nanoseconds") cpuGHz := float64(pprof_cyclesPerSecond()) / 1e9 values := []int64{0, 0} var locs []uint64 expandedStack := pprof_makeProfStack() for _, r := range records { values[0] = r.Count values[1] = int64(float64(r.Cycles) / cpuGHz) // For count profiles, all stack addresses are // return PCs, which is what appendLocsForStack expects. n := expandInlinedFrames(expandedStack, r.Stack) locs = b.appendLocsForStack(locs[:0], expandedStack[:n]) b.pbSample(values, locs, nil) } b.build() return nil } // printCountProfile prints a countProfile at the specified debug level. // The profile will be in compressed proto format unless debug is nonzero. func printCountProfile(w io.Writer, debug int, name string, p countProfile) error { // Build count of each stack. var buf strings.Builder key := func(stk []uintptr, lbls *labelMap) string { buf.Reset() fmt.Fprintf(&buf, "@") for _, pc := range stk { fmt.Fprintf(&buf, " %#x", pc) } if lbls != nil { buf.WriteString("\n# labels: ") buf.WriteString(lbls.String()) } return buf.String() } count := map[string]int{} index := map[string]int{} var keys []string n := p.Len() for i := 0; i < n; i++ { k := key(p.Stack(i), p.Label(i)) if count[k] == 0 { index[k] = i keys = append(keys, k) } count[k]++ } sort.Sort(&keysByCount{keys, count}) if debug > 0 { // Print debug profile in legacy format tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0) fmt.Fprintf(tw, "%s profile: total %d\n", name, p.Len()) for _, k := range keys { fmt.Fprintf(tw, "%d %s\n", count[k], k) printStackRecord(tw, p.Stack(index[k]), false) } return tw.Flush() } // Output profile in protobuf form. b := newProfileBuilder(w) b.pbValueType(tagProfile_PeriodType, name, "count") b.pb.int64Opt(tagProfile_Period, 1) b.pbValueType(tagProfile_SampleType, name, "count") values := []int64{0} var locs []uint64 for _, k := range keys { values[0] = int64(count[k]) // For count profiles, all stack addresses are // return PCs, which is what appendLocsForStack expects. locs = b.appendLocsForStack(locs[:0], p.Stack(index[k])) idx := index[k] var labels func() if p.Label(idx) != nil { labels = func() { for k, v := range *p.Label(idx) { b.pbLabel(tagSample_Label, k, v, 0) } } } b.pbSample(values, locs, labels) } b.build() return nil } // keysByCount sorts keys with higher counts first, breaking ties by key string order. type keysByCount struct { keys []string count map[string]int } func (x *keysByCount) Len() int { return len(x.keys) } func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] } func (x *keysByCount) Less(i, j int) bool { ki, kj := x.keys[i], x.keys[j] ci, cj := x.count[ki], x.count[kj] if ci != cj { return ci > cj } return ki < kj } // printStackRecord prints the function + source line information // for a single stack trace. func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) { show := allFrames frames := runtime.CallersFrames(stk) for { frame, more := frames.Next() name := frame.Function if name == "" { show = true fmt.Fprintf(w, "#\t%#x\n", frame.PC) } else if name != "runtime.goexit" && (show || !strings.HasPrefix(name, "runtime.")) { // Hide runtime.goexit and any runtime functions at the beginning. // This is useful mainly for allocation traces. show = true fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line) } if !more { break } } if !show { // We didn't print anything; do it again, // and this time include runtime functions. printStackRecord(w, stk, true) return } fmt.Fprintf(w, "\n") } // Interface to system profiles. // WriteHeapProfile is shorthand for [Lookup]("heap").WriteTo(w, 0). // It is preserved for backwards compatibility. func WriteHeapProfile(w io.Writer) error { return writeHeap(w, 0) } // countHeap returns the number of records in the heap profile. func countHeap() int { n, _ := runtime.MemProfile(nil, true) return n } // writeHeap writes the current runtime heap profile to w. func writeHeap(w io.Writer, debug int) error { return writeHeapInternal(w, debug, "") } // writeAlloc writes the current runtime heap profile to w // with the total allocation space as the default sample type. func writeAlloc(w io.Writer, debug int) error { return writeHeapInternal(w, debug, "alloc_space") } func writeHeapInternal(w io.Writer, debug int, defaultSampleType string) error { var memStats *runtime.MemStats if debug != 0 { // Read mem stats first, so that our other allocations // do not appear in the statistics. memStats = new(runtime.MemStats) runtime.ReadMemStats(memStats) } // Find out how many records there are (the call // pprof_memProfileInternal(nil, true) below), // allocate that many records, and get the data. // There's a race—more records might be added between // the two calls—so allocate a few extra records for safety // and also try again if we're very unlucky. // The loop should only execute one iteration in the common case. var p []profilerecord.MemProfileRecord n, ok := pprof_memProfileInternal(nil, true) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added // since the call to MemProfile. p = make([]profilerecord.MemProfileRecord, n+50) n, ok = pprof_memProfileInternal(p, true) if ok { p = p[0:n] break } // Profile grew; try again. } if debug == 0 { return writeHeapProto(w, p, int64(runtime.MemProfileRate), defaultSampleType) } slices.SortFunc(p, func(a, b profilerecord.MemProfileRecord) int { return cmp.Compare(a.InUseBytes(), b.InUseBytes()) }) b := bufio.NewWriter(w) tw := tabwriter.NewWriter(b, 1, 8, 1, '\t', 0) w = tw var total runtime.MemProfileRecord for i := range p { r := &p[i] total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } // Technically the rate is MemProfileRate not 2*MemProfileRate, // but early versions of the C++ heap profiler reported 2*MemProfileRate, // so that's what pprof has come to expect. rate := 2 * runtime.MemProfileRate // pprof reads a profile with alloc == inuse as being a "2-column" profile // (objects and bytes, not distinguishing alloc from inuse), // but then such a profile can't be merged using pprof *.prof with // other 4-column profiles where alloc != inuse. // The easiest way to avoid this bug is to adjust allocBytes so it's never == inuseBytes. // pprof doesn't use these header values anymore except for checking equality. inUseBytes := total.InUseBytes() allocBytes := total.AllocBytes if inUseBytes == allocBytes { allocBytes++ } fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n", total.InUseObjects(), inUseBytes, total.AllocObjects, allocBytes, rate) for i := range p { r := &p[i] fmt.Fprintf(w, "%d: %d [%d: %d] @", r.InUseObjects(), r.InUseBytes(), r.AllocObjects, r.AllocBytes) for _, pc := range r.Stack { fmt.Fprintf(w, " %#x", pc) } fmt.Fprintf(w, "\n") printStackRecord(w, r.Stack, false) } // Print memstats information too. // Pprof will ignore, but useful for people s := memStats fmt.Fprintf(w, "\n# runtime.MemStats\n") fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc) fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc) fmt.Fprintf(w, "# Sys = %d\n", s.Sys) fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups) fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs) fmt.Fprintf(w, "# Frees = %d\n", s.Frees) fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc) fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys) fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle) fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse) fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased) fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects) fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys) fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys) fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys) fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys) fmt.Fprintf(w, "# GCSys = %d\n", s.GCSys) fmt.Fprintf(w, "# OtherSys = %d\n", s.OtherSys) fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC) fmt.Fprintf(w, "# LastGC = %d\n", s.LastGC) fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs) fmt.Fprintf(w, "# PauseEnd = %d\n", s.PauseEnd) fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC) fmt.Fprintf(w, "# NumForcedGC = %d\n", s.NumForcedGC) fmt.Fprintf(w, "# GCCPUFraction = %v\n", s.GCCPUFraction) fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC) // Also flush out MaxRSS on supported platforms. addMaxRSS(w) tw.Flush() return b.Flush() } // countThreadCreate returns the size of the current ThreadCreateProfile. func countThreadCreate() int { n, _ := runtime.ThreadCreateProfile(nil) return n } // writeThreadCreate writes the current runtime ThreadCreateProfile to w. func writeThreadCreate(w io.Writer, debug int) error { // Until https://golang.org/issues/6104 is addressed, wrap // ThreadCreateProfile because there's no point in tracking labels when we // don't get any stack-traces. return writeRuntimeProfile(w, debug, "threadcreate", func(p []profilerecord.StackRecord, _ []unsafe.Pointer) (n int, ok bool) { return pprof_threadCreateInternal(p) }) } // countGoroutine returns the number of goroutines. func countGoroutine() int { return runtime.NumGoroutine() } // writeGoroutine writes the current runtime GoroutineProfile to w. func writeGoroutine(w io.Writer, debug int) error { if debug >= 2 { return writeGoroutineStacks(w) } return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels) } func writeGoroutineStacks(w io.Writer) error { // We don't know how big the buffer needs to be to collect // all the goroutines. Start with 1 MB and try a few times, doubling each time. // Give up and use a truncated trace if 64 MB is not enough. buf := make([]byte, 1<<20) for i := 0; ; i++ { n := runtime.Stack(buf, true) if n < len(buf) { buf = buf[:n] break } if len(buf) >= 64<<20 { // Filled 64 MB - stop there. break } buf = make([]byte, 2*len(buf)) } _, err := w.Write(buf) return err } func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]profilerecord.StackRecord, []unsafe.Pointer) (int, bool)) error { // Find out how many records there are (fetch(nil)), // allocate that many records, and get the data. // There's a race—more records might be added between // the two calls—so allocate a few extra records for safety // and also try again if we're very unlucky. // The loop should only execute one iteration in the common case. var p []profilerecord.StackRecord var labels []unsafe.Pointer n, ok := fetch(nil, nil) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added // since the call to ThreadProfile. p = make([]profilerecord.StackRecord, n+10) labels = make([]unsafe.Pointer, n+10) n, ok = fetch(p, labels) if ok { p = p[0:n] break } // Profile grew; try again. } return printCountProfile(w, debug, name, &runtimeProfile{p, labels}) } type runtimeProfile struct { stk []profilerecord.StackRecord labels []unsafe.Pointer } func (p *runtimeProfile) Len() int { return len(p.stk) } func (p *runtimeProfile) Stack(i int) []uintptr { return p.stk[i].Stack } func (p *runtimeProfile) Label(i int) *labelMap { return (*labelMap)(p.labels[i]) } var cpu struct { sync.Mutex profiling bool done chan bool } // StartCPUProfile enables CPU profiling for the current process. // While profiling, the profile will be buffered and written to w. // StartCPUProfile returns an error if profiling is already enabled. // // On Unix-like systems, StartCPUProfile does not work by default for // Go code built with -buildmode=c-archive or -buildmode=c-shared. // StartCPUProfile relies on the SIGPROF signal, but that signal will // be delivered to the main program's SIGPROF signal handler (if any) // not to the one used by Go. To make it work, call [os/signal.Notify] // for [syscall.SIGPROF], but note that doing so may break any profiling // being done by the main program. func StartCPUProfile(w io.Writer) error { // The runtime routines allow a variable profiling rate, // but in practice operating systems cannot trigger signals // at more than about 500 Hz, and our processing of the // signal is not cheap (mostly getting the stack trace). // 100 Hz is a reasonable choice: it is frequent enough to // produce useful data, rare enough not to bog down the // system, and a nice round number to make it easy to // convert sample counts to seconds. Instead of requiring // each client to specify the frequency, we hard code it. const hz = 100 cpu.Lock() defer cpu.Unlock() if cpu.done == nil { cpu.done = make(chan bool) } // Double-check. if cpu.profiling { return fmt.Errorf("cpu profiling already in use") } cpu.profiling = true runtime.SetCPUProfileRate(hz) go profileWriter(w) return nil } // readProfile, provided by the runtime, returns the next chunk of // binary CPU profiling stack trace data, blocking until data is available. // If profiling is turned off and all the profile data accumulated while it was // on has been returned, readProfile returns eof=true. // The caller must save the returned data and tags before calling readProfile again. func readProfile() (data []uint64, tags []unsafe.Pointer, eof bool) func profileWriter(w io.Writer) { b := newProfileBuilder(w) var err error for { time.Sleep(100 * time.Millisecond) data, tags, eof := readProfile() if e := b.addCPUData(data, tags); e != nil && err == nil { err = e } if eof { break } } if err != nil { // The runtime should never produce an invalid or truncated profile. // It drops records that can't fit into its log buffers. panic("runtime/pprof: converting profile: " + err.Error()) } b.build() cpu.done <- true } // StopCPUProfile stops the current CPU profile, if any. // StopCPUProfile only returns after all the writes for the // profile have completed. func StopCPUProfile() { cpu.Lock() defer cpu.Unlock() if !cpu.profiling { return } cpu.profiling = false runtime.SetCPUProfileRate(0) <-cpu.done } // countBlock returns the number of records in the blocking profile. func countBlock() int { n, _ := runtime.BlockProfile(nil) return n } // countMutex returns the number of records in the mutex profile. func countMutex() int { n, _ := runtime.MutexProfile(nil) return n } // writeBlock writes the current blocking profile to w. func writeBlock(w io.Writer, debug int) error { return writeProfileInternal(w, debug, "contention", pprof_blockProfileInternal) } // writeMutex writes the current mutex profile to w. func writeMutex(w io.Writer, debug int) error { return writeProfileInternal(w, debug, "mutex", pprof_mutexProfileInternal) } // writeProfileInternal writes the current blocking or mutex profile depending on the passed parameters. func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile func([]profilerecord.BlockProfileRecord) (int, bool)) error { var p []profilerecord.BlockProfileRecord n, ok := runtimeProfile(nil) for { p = make([]profilerecord.BlockProfileRecord, n+50) n, ok = runtimeProfile(p) if ok { p = p[:n] break } } slices.SortFunc(p, func(a, b profilerecord.BlockProfileRecord) int { return cmp.Compare(b.Cycles, a.Cycles) }) if debug <= 0 { return printCountCycleProfile(w, "contentions", "delay", p) } b := bufio.NewWriter(w) tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0) w = tw fmt.Fprintf(w, "--- %v:\n", name) fmt.Fprintf(w, "cycles/second=%v\n", pprof_cyclesPerSecond()) if name == "mutex" { fmt.Fprintf(w, "sampling period=%d\n", runtime.SetMutexProfileFraction(-1)) } expandedStack := pprof_makeProfStack() for i := range p { r := &p[i] fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count) n := expandInlinedFrames(expandedStack, r.Stack) stack := expandedStack[:n] for _, pc := range stack { fmt.Fprintf(w, " %#x", pc) } fmt.Fprint(w, "\n") if debug > 0 { printStackRecord(w, stack, true) } } if tw != nil { tw.Flush() } return b.Flush() } //go:linkname pprof_goroutineProfileWithLabels runtime.pprof_goroutineProfileWithLabels func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) //go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond func pprof_cyclesPerSecond() int64 //go:linkname pprof_memProfileInternal runtime.pprof_memProfileInternal func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool) //go:linkname pprof_blockProfileInternal runtime.pprof_blockProfileInternal func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) //go:linkname pprof_mutexProfileInternal runtime.pprof_mutexProfileInternal func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) //go:linkname pprof_threadCreateInternal runtime.pprof_threadCreateInternal func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool) //go:linkname pprof_fpunwindExpand runtime.pprof_fpunwindExpand func pprof_fpunwindExpand(dst, src []uintptr) int //go:linkname pprof_makeProfStack runtime.pprof_makeProfStack func pprof_makeProfStack() []uintptr
go/src/runtime/pprof/pprof.go/0
{ "file_path": "go/src/runtime/pprof/pprof.go", "repo_id": "go", "token_count": 10897 }
402
These binaries were generated by: $ cat empty.s .global _start _start: $ as --32 -o empty.o empty.s && ld --build-id -m elf_i386 -o test32 empty.o $ as --64 -o empty.o empty.s && ld --build-id -o test64 empty.o $ powerpc-linux-gnu-as -o empty.o empty.s && powerpc-linux-gnu-ld --build-id -o test32be empty.o $ powerpc64-linux-gnu-as -o empty.o empty.s && powerpc64-linux-gnu-ld --build-id -o test64be empty.o
go/src/runtime/pprof/testdata/README/0
{ "file_path": "go/src/runtime/pprof/testdata/README", "repo_id": "go", "token_count": 164 }
403
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package race_test import "unsafe" // golang.org/issue/12225 // The test is that this compiles at all. //go:noinline func convert(s string) []byte { return []byte(s) } func issue12225() { println(*(*int)(unsafe.Pointer(&convert("")[0]))) println(*(*int)(unsafe.Pointer(&[]byte("")[0]))) }
go/src/runtime/race/testdata/issue12225_test.go/0
{ "file_path": "go/src/runtime/race/testdata/issue12225_test.go", "repo_id": "go", "token_count": 156 }
404
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build race package race_test import ( "sync" "testing" "time" ) func TestTimers(t *testing.T) { const goroutines = 8 var wg sync.WaitGroup wg.Add(goroutines) var mu sync.Mutex for i := 0; i < goroutines; i++ { go func() { defer wg.Done() ticker := time.NewTicker(1) defer ticker.Stop() for c := 0; c < 1000; c++ { <-ticker.C mu.Lock() mu.Unlock() } }() } wg.Wait() }
go/src/runtime/race/timer_test.go/0
{ "file_path": "go/src/runtime/race/timer_test.go", "repo_id": "go", "token_count": 252 }
405
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" #include "cgo/abi_arm64.h" TEXT _rt0_arm64_darwin(SB),NOSPLIT|NOFRAME,$0 MOVD $runtime·rt0_go(SB), R2 BL (R2) exit: MOVD $0, R0 MOVD $1, R16 // sys_exit SVC $0x80 B exit // When linking with -buildmode=c-archive or -buildmode=c-shared, // this symbol is called from a global initialization function. // // Note that all currently shipping darwin/arm64 platforms require // cgo and do not support c-shared. TEXT _rt0_arm64_darwin_lib(SB),NOSPLIT,$152 // Preserve callee-save registers. SAVE_R19_TO_R28(8) SAVE_F8_TO_F15(88) MOVD R0, _rt0_arm64_darwin_lib_argc<>(SB) MOVD R1, _rt0_arm64_darwin_lib_argv<>(SB) MOVD $0, g // initialize g to nil // Synchronous initialization. MOVD $runtime·libpreinit(SB), R4 BL (R4) // Create a new thread to do the runtime initialization and return. MOVD _cgo_sys_thread_create(SB), R4 MOVD $_rt0_arm64_darwin_lib_go(SB), R0 MOVD $0, R1 SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R4) ADD $16, RSP // Restore callee-save registers. RESTORE_R19_TO_R28(8) RESTORE_F8_TO_F15(88) RET TEXT _rt0_arm64_darwin_lib_go(SB),NOSPLIT,$0 MOVD _rt0_arm64_darwin_lib_argc<>(SB), R0 MOVD _rt0_arm64_darwin_lib_argv<>(SB), R1 MOVD $runtime·rt0_go(SB), R4 B (R4) DATA _rt0_arm64_darwin_lib_argc<>(SB)/8, $0 GLOBL _rt0_arm64_darwin_lib_argc<>(SB),NOPTR, $8 DATA _rt0_arm64_darwin_lib_argv<>(SB)/8, $0 GLOBL _rt0_arm64_darwin_lib_argv<>(SB),NOPTR, $8 // external linking entry point. TEXT main(SB),NOSPLIT|NOFRAME,$0 JMP _rt0_arm64_darwin(SB)
go/src/runtime/rt0_darwin_arm64.s/0
{ "file_path": "go/src/runtime/rt0_darwin_arm64.s", "repo_id": "go", "token_count": 808 }
406
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" TEXT _rt0_riscv64_openbsd(SB),NOSPLIT|NOFRAME,$0 MOV 0(X2), A0 // argc ADD $8, X2, A1 // argv JMP main(SB) TEXT main(SB),NOSPLIT|NOFRAME,$0 MOV $runtime·rt0_go(SB), T0 JALR ZERO, T0
go/src/runtime/rt0_openbsd_riscv64.s/0
{ "file_path": "go/src/runtime/rt0_openbsd_riscv64.s", "repo_id": "go", "token_count": 166 }
407
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import ( "internal/bytealg" "internal/goarch" "internal/runtime/atomic" "unsafe" ) // Keep a cached value to make gotraceback fast, // since we call it on every call to gentraceback. // The cached value is a uint32 in which the low bits // are the "crash" and "all" settings and the remaining // bits are the traceback value (0 off, 1 on, 2 include system). const ( tracebackCrash = 1 << iota tracebackAll tracebackShift = iota ) var traceback_cache uint32 = 2 << tracebackShift var traceback_env uint32 // gotraceback returns the current traceback settings. // // If level is 0, suppress all tracebacks. // If level is 1, show tracebacks, but exclude runtime frames. // If level is 2, show tracebacks including runtime frames. // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. // If crash is set, crash (core dump, etc) after tracebacking. // //go:nosplit func gotraceback() (level int32, all, crash bool) { gp := getg() t := atomic.Load(&traceback_cache) crash = t&tracebackCrash != 0 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0 if gp.m.traceback != 0 { level = int32(gp.m.traceback) } else if gp.m.throwing >= throwTypeRuntime { // Always include runtime frames in runtime throws unless // otherwise overridden by m.traceback. level = 2 } else { level = int32(t >> tracebackShift) } return } var ( argc int32 argv **byte ) // nosplit for use in linux startup sysargs. // //go:nosplit func argv_index(argv **byte, i int32) *byte { return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize)) } func args(c int32, v **byte) { argc = c argv = v sysargs(c, v) } func goargs() { if GOOS == "windows" { return } argslice = make([]string, argc) for i := int32(0); i < argc; i++ { argslice[i] = gostringnocopy(argv_index(argv, i)) } } func goenvs_unix() { // TODO(austin): ppc64 in dynamic linking mode doesn't // guarantee env[] will immediately follow argv. Might cause // problems. n := int32(0) for argv_index(argv, argc+1+n) != nil { n++ } envs = make([]string, n) for i := int32(0); i < n; i++ { envs[i] = gostring(argv_index(argv, argc+1+i)) } } func environ() []string { return envs } // TODO: These should be locals in testAtomic64, but we don't 8-byte // align stack variables on 386. var test_z64, test_x64 uint64 func testAtomic64() { test_z64 = 42 test_x64 = 0 if atomic.Cas64(&test_z64, test_x64, 1) { throw("cas64 failed") } if test_x64 != 0 { throw("cas64 failed") } test_x64 = 42 if !atomic.Cas64(&test_z64, test_x64, 1) { throw("cas64 failed") } if test_x64 != 42 || test_z64 != 1 { throw("cas64 failed") } if atomic.Load64(&test_z64) != 1 { throw("load64 failed") } atomic.Store64(&test_z64, (1<<40)+1) if atomic.Load64(&test_z64) != (1<<40)+1 { throw("store64 failed") } if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 { throw("xadd64 failed") } if atomic.Load64(&test_z64) != (2<<40)+2 { throw("xadd64 failed") } if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 { throw("xchg64 failed") } if atomic.Load64(&test_z64) != (3<<40)+3 { throw("xchg64 failed") } } func check() { var ( a int8 b uint8 c int16 d uint16 e int32 f uint32 g int64 h uint64 i, i1 float32 j, j1 float64 k unsafe.Pointer l *uint16 m [4]byte ) type x1t struct { x uint8 } type y1t struct { x1 x1t y uint8 } var x1 x1t var y1 y1t if unsafe.Sizeof(a) != 1 { throw("bad a") } if unsafe.Sizeof(b) != 1 { throw("bad b") } if unsafe.Sizeof(c) != 2 { throw("bad c") } if unsafe.Sizeof(d) != 2 { throw("bad d") } if unsafe.Sizeof(e) != 4 { throw("bad e") } if unsafe.Sizeof(f) != 4 { throw("bad f") } if unsafe.Sizeof(g) != 8 { throw("bad g") } if unsafe.Sizeof(h) != 8 { throw("bad h") } if unsafe.Sizeof(i) != 4 { throw("bad i") } if unsafe.Sizeof(j) != 8 { throw("bad j") } if unsafe.Sizeof(k) != goarch.PtrSize { throw("bad k") } if unsafe.Sizeof(l) != goarch.PtrSize { throw("bad l") } if unsafe.Sizeof(x1) != 1 { throw("bad unsafe.Sizeof x1") } if unsafe.Offsetof(y1.y) != 1 { throw("bad offsetof y1.y") } if unsafe.Sizeof(y1) != 2 { throw("bad unsafe.Sizeof y1") } if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 { throw("bad timediv") } var z uint32 z = 1 if !atomic.Cas(&z, 1, 2) { throw("cas1") } if z != 2 { throw("cas2") } z = 4 if atomic.Cas(&z, 5, 6) { throw("cas3") } if z != 4 { throw("cas4") } z = 0xffffffff if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) { throw("cas5") } if z != 0xfffffffe { throw("cas6") } m = [4]byte{1, 1, 1, 1} atomic.Or8(&m[1], 0xf0) if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 { throw("atomicor8") } m = [4]byte{0xff, 0xff, 0xff, 0xff} atomic.And8(&m[1], 0x1) if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff { throw("atomicand8") } *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0) if j == j { throw("float64nan") } if !(j != j) { throw("float64nan1") } *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1) if j == j1 { throw("float64nan2") } if !(j != j1) { throw("float64nan3") } *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0) if i == i { throw("float32nan") } if i == i { throw("float32nan1") } *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1) if i == i1 { throw("float32nan2") } if i == i1 { throw("float32nan3") } testAtomic64() if fixedStack != round2(fixedStack) { throw("FixedStack is not power-of-2") } if !checkASM() { throw("assembly checks failed") } } type dbgVar struct { name string value *int32 // for variables that can only be set at startup atomic *atomic.Int32 // for variables that can be changed during execution def int32 // default value (ideally zero) } // Holds variables parsed from GODEBUG env var, // except for "memprofilerate" since there is an // existing int var for that value, which may // already have an initial value. var debug struct { cgocheck int32 clobberfree int32 disablethp int32 dontfreezetheworld int32 efence int32 gccheckmark int32 gcpacertrace int32 gcshrinkstackoff int32 gcstoptheworld int32 gctrace int32 invalidptr int32 madvdontneed int32 // for Linux; issue 28466 runtimeContentionStacks atomic.Int32 scavtrace int32 scheddetail int32 schedtrace int32 tracebackancestors int32 asyncpreemptoff int32 harddecommit int32 adaptivestackstart int32 tracefpunwindoff int32 traceadvanceperiod int32 traceCheckStackOwnership int32 profstackdepth int32 // debug.malloc is used as a combined debug check // in the malloc function and should be set // if any of the below debug options is != 0. malloc bool inittrace int32 sbrk int32 // traceallocfree controls whether execution traces contain // detailed trace data about memory allocation. This value // affects debug.malloc only if it is != 0 and the execution // tracer is enabled, in which case debug.malloc will be // set to "true" if it isn't already while tracing is enabled. // It will be set while the world is stopped, so it's safe. // The value of traceallocfree can be changed any time in response // to os.Setenv("GODEBUG"). traceallocfree atomic.Int32 panicnil atomic.Int32 // asynctimerchan controls whether timer channels // behave asynchronously (as in Go 1.22 and earlier) // instead of their Go 1.23+ synchronous behavior. // The value can change at any time (in response to os.Setenv("GODEBUG")) // and affects all extant timer channels immediately. // Programs wouldn't normally change over an execution, // but allowing it is convenient for testing and for programs // that do an os.Setenv in main.init or main.main. asynctimerchan atomic.Int32 } var dbgvars = []*dbgVar{ {name: "adaptivestackstart", value: &debug.adaptivestackstart}, {name: "asyncpreemptoff", value: &debug.asyncpreemptoff}, {name: "asynctimerchan", atomic: &debug.asynctimerchan}, {name: "cgocheck", value: &debug.cgocheck}, {name: "clobberfree", value: &debug.clobberfree}, {name: "disablethp", value: &debug.disablethp}, {name: "dontfreezetheworld", value: &debug.dontfreezetheworld}, {name: "efence", value: &debug.efence}, {name: "gccheckmark", value: &debug.gccheckmark}, {name: "gcpacertrace", value: &debug.gcpacertrace}, {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff}, {name: "gcstoptheworld", value: &debug.gcstoptheworld}, {name: "gctrace", value: &debug.gctrace}, {name: "harddecommit", value: &debug.harddecommit}, {name: "inittrace", value: &debug.inittrace}, {name: "invalidptr", value: &debug.invalidptr}, {name: "madvdontneed", value: &debug.madvdontneed}, {name: "panicnil", atomic: &debug.panicnil}, {name: "profstackdepth", value: &debug.profstackdepth, def: 128}, {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks}, {name: "sbrk", value: &debug.sbrk}, {name: "scavtrace", value: &debug.scavtrace}, {name: "scheddetail", value: &debug.scheddetail}, {name: "schedtrace", value: &debug.schedtrace}, {name: "traceadvanceperiod", value: &debug.traceadvanceperiod}, {name: "traceallocfree", atomic: &debug.traceallocfree}, {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership}, {name: "tracebackancestors", value: &debug.tracebackancestors}, {name: "tracefpunwindoff", value: &debug.tracefpunwindoff}, } func parsedebugvars() { // defaults debug.cgocheck = 1 debug.invalidptr = 1 debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off if GOOS == "linux" { // On Linux, MADV_FREE is faster than MADV_DONTNEED, // but doesn't affect many of the statistics that // MADV_DONTNEED does until the memory is actually // reclaimed. This generally leads to poor user // experience, like confusing stats in top and other // monitoring tools; and bad integration with // management systems that respond to memory usage. // Hence, default to MADV_DONTNEED. debug.madvdontneed = 1 } debug.traceadvanceperiod = defaultTraceAdvancePeriod godebug := gogetenv("GODEBUG") p := new(string) *p = godebug godebugEnv.Store(p) // apply runtime defaults, if any for _, v := range dbgvars { if v.def != 0 { // Every var should have either v.value or v.atomic set. if v.value != nil { *v.value = v.def } else if v.atomic != nil { v.atomic.Store(v.def) } } } // apply compile-time GODEBUG settings parsegodebug(godebugDefault, nil) // apply environment settings parsegodebug(godebug, nil) debug.malloc = (debug.inittrace | debug.sbrk) != 0 debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth) setTraceback(gogetenv("GOTRACEBACK")) traceback_env = traceback_cache } // reparsedebugvars reparses the runtime's debug variables // because the environment variable has been changed to env. func reparsedebugvars(env string) { seen := make(map[string]bool) // apply environment settings parsegodebug(env, seen) // apply compile-time GODEBUG settings for as-yet-unseen variables parsegodebug(godebugDefault, seen) // apply defaults for as-yet-unseen variables for _, v := range dbgvars { if v.atomic != nil && !seen[v.name] { v.atomic.Store(0) } } } // parsegodebug parses the godebug string, updating variables listed in dbgvars. // If seen == nil, this is startup time and we process the string left to right // overwriting older settings with newer ones. // If seen != nil, $GODEBUG has changed and we are doing an // incremental update. To avoid flapping in the case where a value is // set multiple times (perhaps in the default and the environment, // or perhaps twice in the environment), we process the string right-to-left // and only change values not already seen. After doing this for both // the environment and the default settings, the caller must also call // cleargodebug(seen) to reset any now-unset values back to their defaults. func parsegodebug(godebug string, seen map[string]bool) { for p := godebug; p != ""; { var field string if seen == nil { // startup: process left to right, overwriting older settings with newer i := bytealg.IndexByteString(p, ',') if i < 0 { field, p = p, "" } else { field, p = p[:i], p[i+1:] } } else { // incremental update: process right to left, updating and skipping seen i := len(p) - 1 for i >= 0 && p[i] != ',' { i-- } if i < 0 { p, field = "", p } else { p, field = p[:i], p[i+1:] } } i := bytealg.IndexByteString(field, '=') if i < 0 { continue } key, value := field[:i], field[i+1:] if seen[key] { continue } if seen != nil { seen[key] = true } // Update MemProfileRate directly here since it // is int, not int32, and should only be updated // if specified in GODEBUG. if seen == nil && key == "memprofilerate" { if n, ok := atoi(value); ok { MemProfileRate = n } } else { for _, v := range dbgvars { if v.name == key { if n, ok := atoi32(value); ok { if seen == nil && v.value != nil { *v.value = n } else if v.atomic != nil { v.atomic.Store(n) } } } } } } if debug.cgocheck > 1 { throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.") } } //go:linkname setTraceback runtime/debug.SetTraceback func setTraceback(level string) { var t uint32 switch level { case "none": t = 0 case "single", "": t = 1 << tracebackShift case "all": t = 1<<tracebackShift | tracebackAll case "system": t = 2<<tracebackShift | tracebackAll case "crash": t = 2<<tracebackShift | tracebackAll | tracebackCrash case "wer": if GOOS == "windows" { t = 2<<tracebackShift | tracebackAll | tracebackCrash enableWER() break } fallthrough default: t = tracebackAll if n, ok := atoi(level); ok && n == int(uint32(n)) { t |= uint32(n) << tracebackShift } } // when C owns the process, simply exit'ing the process on fatal errors // and panics is surprising. Be louder and abort instead. if islibrary || isarchive { t |= tracebackCrash } t |= traceback_env atomic.Store(&traceback_cache, t) } // Poor mans 64-bit division. // This is a very special function, do not use it if you are not sure what you are doing. // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. // Handles overflow in a time-specific manner. // This keeps us within no-split stack limits on 32-bit processors. // //go:nosplit func timediv(v int64, div int32, rem *int32) int32 { res := int32(0) for bit := 30; bit >= 0; bit-- { if v >= int64(div)<<uint(bit) { v = v - (int64(div) << uint(bit)) // Before this for loop, res was 0, thus all these // power of 2 increments are now just bitsets. res |= 1 << uint(bit) } } if v >= int64(div) { if rem != nil { *rem = 0 } return 0x7fffffff } if rem != nil { *rem = int32(v) } return res } // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. //go:nosplit func acquirem() *m { gp := getg() gp.m.locks++ return gp.m } //go:nosplit func releasem(mp *m) { gp := getg() mp.locks-- if mp.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack gp.stackguard0 = stackPreempt } } // reflect_typelinks is meant for package reflect, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - gitee.com/quant1x/gox // - github.com/goccy/json // - github.com/modern-go/reflect2 // - github.com/vmware/govmomi // - github.com/pinpoint-apm/pinpoint-go-agent // - github.com/timandy/routine // - github.com/v2pro/plz // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname reflect_typelinks reflect.typelinks func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { modules := activeModules() sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)} ret := [][]int32{modules[0].typelinks} for _, md := range modules[1:] { sections = append(sections, unsafe.Pointer(md.types)) ret = append(ret, md.typelinks) } return sections, ret } // reflect_resolveNameOff resolves a name offset from a base pointer. // // reflect_resolveNameOff is for package reflect, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - github.com/agiledragon/gomonkey/v2 // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname reflect_resolveNameOff reflect.resolveNameOff func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer { return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes) } // reflect_resolveTypeOff resolves an *rtype offset from a base type. // // reflect_resolveTypeOff is meant for package reflect, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - gitee.com/quant1x/gox // - github.com/modern-go/reflect2 // - github.com/v2pro/plz // - github.com/timandy/routine // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off))) } // reflect_resolveTextOff resolves a function pointer offset from a base type. // // reflect_resolveTextOff is for package reflect, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - github.com/cloudwego/frugal // - github.com/agiledragon/gomonkey/v2 // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname reflect_resolveTextOff reflect.resolveTextOff func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { return toRType((*_type)(rtype)).textOff(textOff(off)) } // reflectlite_resolveNameOff resolves a name offset from a base pointer. // //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer { return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes) } // reflectlite_resolveTypeOff resolves an *rtype offset from a base type. // //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off))) } // reflect_addReflectOff adds a pointer to the reflection offset lookup map. // //go:linkname reflect_addReflectOff reflect.addReflectOff func reflect_addReflectOff(ptr unsafe.Pointer) int32 { reflectOffsLock() if reflectOffs.m == nil { reflectOffs.m = make(map[int32]unsafe.Pointer) reflectOffs.minv = make(map[unsafe.Pointer]int32) reflectOffs.next = -1 } id, found := reflectOffs.minv[ptr] if !found { id = reflectOffs.next reflectOffs.next-- // use negative offsets as IDs to aid debugging reflectOffs.m[id] = ptr reflectOffs.minv[ptr] = id } reflectOffsUnlock() return id }
go/src/runtime/runtime1.go/0
{ "file_path": "go/src/runtime/runtime1.go", "repo_id": "go", "token_count": 7715 }
408
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Semaphore implementation exposed to Go. // Intended use is provide a sleep and wakeup // primitive that can be used in the contended case // of other synchronization primitives. // Thus it targets the same goal as Linux's futex, // but it has much simpler semantics. // // That is, don't think of these as semaphores. // Think of them as a way to implement sleep and wakeup // such that every sleep is paired with a single wakeup, // even if, due to races, the wakeup happens before the sleep. // // See Mullender and Cox, ``Semaphores in Plan 9,'' // https://swtch.com/semaphore.pdf package runtime import ( "internal/cpu" "internal/runtime/atomic" "unsafe" ) // Asynchronous semaphore for sync.Mutex. // A semaRoot holds a balanced tree of sudog with distinct addresses (s.elem). // Each of those sudog may in turn point (through s.waitlink) to a list // of other sudogs waiting on the same address. // The operations on the inner lists of sudogs with the same address // are all O(1). The scanning of the top-level semaRoot list is O(log n), // where n is the number of distinct addresses with goroutines blocked // on them that hash to the given semaRoot. // See golang.org/issue/17953 for a program that worked badly // before we introduced the second level of list, and // BenchmarkSemTable/OneAddrCollision/* for a benchmark that exercises this. type semaRoot struct { lock mutex treap *sudog // root of balanced tree of unique waiters. nwait atomic.Uint32 // Number of waiters. Read w/o the lock. } var semtable semTable // Prime to not correlate with any user patterns. const semTabSize = 251 type semTable [semTabSize]struct { root semaRoot pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte } func (t *semTable) rootFor(addr *uint32) *semaRoot { return &t[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root } // sync_runtime_Semacquire should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - gvisor.dev/gvisor // - github.com/sagernet/gvisor // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname sync_runtime_Semacquire sync.runtime_Semacquire func sync_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire) } //go:linkname poll_runtime_Semacquire internal/poll.runtime_Semacquire func poll_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire) } // sync_runtime_Semrelease should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - gvisor.dev/gvisor // - github.com/sagernet/gvisor // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname sync_runtime_Semrelease sync.runtime_Semrelease func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) { semrelease1(addr, handoff, skipframes) } //go:linkname sync_runtime_SemacquireMutex sync.runtime_SemacquireMutex func sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncMutexLock) } //go:linkname sync_runtime_SemacquireRWMutexR sync.runtime_SemacquireRWMutexR func sync_runtime_SemacquireRWMutexR(addr *uint32, lifo bool, skipframes int) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncRWMutexRLock) } //go:linkname sync_runtime_SemacquireRWMutex sync.runtime_SemacquireRWMutex func sync_runtime_SemacquireRWMutex(addr *uint32, lifo bool, skipframes int) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncRWMutexLock) } //go:linkname poll_runtime_Semrelease internal/poll.runtime_Semrelease func poll_runtime_Semrelease(addr *uint32) { semrelease(addr) } func readyWithTime(s *sudog, traceskip int) { if s.releasetime != 0 { s.releasetime = cputicks() } goready(s.g, traceskip) } type semaProfileFlags int const ( semaBlockProfile semaProfileFlags = 1 << iota semaMutexProfile ) // Called from runtime. func semacquire(addr *uint32) { semacquire1(addr, false, 0, 0, waitReasonSemacquire) } func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int, reason waitReason) { gp := getg() if gp != gp.m.curg { throw("semacquire not on the G stack") } // Easy case. if cansemacquire(addr) { return } // Harder case: // increment waiter count // try cansemacquire one more time, return if succeeded // enqueue itself as a waiter // sleep // (waiter descriptor is dequeued by signaler) s := acquireSudog() root := semtable.rootFor(addr) t0 := int64(0) s.releasetime = 0 s.acquiretime = 0 s.ticket = 0 if profile&semaBlockProfile != 0 && blockprofilerate > 0 { t0 = cputicks() s.releasetime = -1 } if profile&semaMutexProfile != 0 && mutexprofilerate > 0 { if t0 == 0 { t0 = cputicks() } s.acquiretime = t0 } for { lockWithRank(&root.lock, lockRankRoot) // Add ourselves to nwait to disable "easy case" in semrelease. root.nwait.Add(1) // Check cansemacquire to avoid missed wakeup. if cansemacquire(addr) { root.nwait.Add(-1) unlock(&root.lock) break } // Any semrelease after the cansemacquire knows we're waiting // (we set nwait above), so go to sleep. root.queue(addr, s, lifo) goparkunlock(&root.lock, reason, traceBlockSync, 4+skipframes) if s.ticket != 0 || cansemacquire(addr) { break } } if s.releasetime > 0 { blockevent(s.releasetime-t0, 3+skipframes) } releaseSudog(s) } func semrelease(addr *uint32) { semrelease1(addr, false, 0) } func semrelease1(addr *uint32, handoff bool, skipframes int) { root := semtable.rootFor(addr) atomic.Xadd(addr, 1) // Easy case: no waiters? // This check must happen after the xadd, to avoid a missed wakeup // (see loop in semacquire). if root.nwait.Load() == 0 { return } // Harder case: search for a waiter and wake it. lockWithRank(&root.lock, lockRankRoot) if root.nwait.Load() == 0 { // The count is already consumed by another goroutine, // so no need to wake up another goroutine. unlock(&root.lock) return } s, t0, tailtime := root.dequeue(addr) if s != nil { root.nwait.Add(-1) } unlock(&root.lock) if s != nil { // May be slow or even yield, so unlock first acquiretime := s.acquiretime if acquiretime != 0 { // Charge contention that this (delayed) unlock caused. // If there are N more goroutines waiting beyond the // one that's waking up, charge their delay as well, so that // contention holding up many goroutines shows up as // more costly than contention holding up a single goroutine. // It would take O(N) time to calculate how long each goroutine // has been waiting, so instead we charge avg(head-wait, tail-wait)*N. // head-wait is the longest wait and tail-wait is the shortest. // (When we do a lifo insertion, we preserve this property by // copying the old head's acquiretime into the inserted new head. // In that case the overall average may be slightly high, but that's fine: // the average of the ends is only an approximation to the actual // average anyway.) // The root.dequeue above changed the head and tail acquiretime // to the current time, so the next unlock will not re-count this contention. dt0 := t0 - acquiretime dt := dt0 if s.waiters != 0 { dtail := t0 - tailtime dt += (dtail + dt0) / 2 * int64(s.waiters) } mutexevent(dt, 3+skipframes) } if s.ticket != 0 { throw("corrupted semaphore ticket") } if handoff && cansemacquire(addr) { s.ticket = 1 } readyWithTime(s, 5+skipframes) if s.ticket == 1 && getg().m.locks == 0 { // Direct G handoff // readyWithTime has added the waiter G as runnext in the // current P; we now call the scheduler so that we start running // the waiter G immediately. // Note that waiter inherits our time slice: this is desirable // to avoid having a highly contended semaphore hog the P // indefinitely. goyield is like Gosched, but it emits a // "preempted" trace event instead and, more importantly, puts // the current G on the local runq instead of the global one. // We only do this in the starving regime (handoff=true), as in // the non-starving case it is possible for a different waiter // to acquire the semaphore while we are yielding/scheduling, // and this would be wasteful. We wait instead to enter starving // regime, and then we start to do direct handoffs of ticket and // P. // See issue 33747 for discussion. goyield() } } } func cansemacquire(addr *uint32) bool { for { v := atomic.Load(addr) if v == 0 { return false } if atomic.Cas(addr, v, v-1) { return true } } } // queue adds s to the blocked goroutines in semaRoot. func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { s.g = getg() s.elem = unsafe.Pointer(addr) s.next = nil s.prev = nil s.waiters = 0 var last *sudog pt := &root.treap for t := *pt; t != nil; t = *pt { if t.elem == unsafe.Pointer(addr) { // Already have addr in list. if lifo { // Substitute s in t's place in treap. *pt = s s.ticket = t.ticket s.acquiretime = t.acquiretime // preserve head acquiretime as oldest time s.parent = t.parent s.prev = t.prev s.next = t.next if s.prev != nil { s.prev.parent = s } if s.next != nil { s.next.parent = s } // Add t first in s's wait list. s.waitlink = t s.waittail = t.waittail if s.waittail == nil { s.waittail = t } s.waiters = t.waiters if s.waiters+1 != 0 { s.waiters++ } t.parent = nil t.prev = nil t.next = nil t.waittail = nil } else { // Add s to end of t's wait list. if t.waittail == nil { t.waitlink = s } else { t.waittail.waitlink = s } t.waittail = s s.waitlink = nil if t.waiters+1 != 0 { t.waiters++ } } return } last = t if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) { pt = &t.prev } else { pt = &t.next } } // Add s as new leaf in tree of unique addrs. // The balanced tree is a treap using ticket as the random heap priority. // That is, it is a binary tree ordered according to the elem addresses, // but then among the space of possible binary trees respecting those // addresses, it is kept balanced on average by maintaining a heap ordering // on the ticket: s.ticket <= both s.prev.ticket and s.next.ticket. // https://en.wikipedia.org/wiki/Treap // https://faculty.washington.edu/aragon/pubs/rst89.pdf // // s.ticket compared with zero in couple of places, therefore set lowest bit. // It will not affect treap's quality noticeably. s.ticket = cheaprand() | 1 s.parent = last *pt = s // Rotate up into tree according to ticket (priority). for s.parent != nil && s.parent.ticket > s.ticket { if s.parent.prev == s { root.rotateRight(s.parent) } else { if s.parent.next != s { panic("semaRoot queue") } root.rotateLeft(s.parent) } } } // dequeue searches for and finds the first goroutine // in semaRoot blocked on addr. // If the sudog was being profiled, dequeue returns the time // at which it was woken up as now. Otherwise now is 0. // If there are additional entries in the wait list, dequeue // returns tailtime set to the last entry's acquiretime. // Otherwise tailtime is found.acquiretime. func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) { ps := &root.treap s := *ps for ; s != nil; s = *ps { if s.elem == unsafe.Pointer(addr) { goto Found } if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) { ps = &s.prev } else { ps = &s.next } } return nil, 0, 0 Found: now = int64(0) if s.acquiretime != 0 { now = cputicks() } if t := s.waitlink; t != nil { // Substitute t, also waiting on addr, for s in root tree of unique addrs. *ps = t t.ticket = s.ticket t.parent = s.parent t.prev = s.prev if t.prev != nil { t.prev.parent = t } t.next = s.next if t.next != nil { t.next.parent = t } if t.waitlink != nil { t.waittail = s.waittail } else { t.waittail = nil } t.waiters = s.waiters if t.waiters > 1 { t.waiters-- } // Set head and tail acquire time to 'now', // because the caller will take care of charging // the delays before now for all entries in the list. t.acquiretime = now tailtime = s.waittail.acquiretime s.waittail.acquiretime = now s.waitlink = nil s.waittail = nil } else { // Rotate s down to be leaf of tree for removal, respecting priorities. for s.next != nil || s.prev != nil { if s.next == nil || s.prev != nil && s.prev.ticket < s.next.ticket { root.rotateRight(s) } else { root.rotateLeft(s) } } // Remove s, now a leaf. if s.parent != nil { if s.parent.prev == s { s.parent.prev = nil } else { s.parent.next = nil } } else { root.treap = nil } tailtime = s.acquiretime } s.parent = nil s.elem = nil s.next = nil s.prev = nil s.ticket = 0 return s, now, tailtime } // rotateLeft rotates the tree rooted at node x. // turning (x a (y b c)) into (y (x a b) c). func (root *semaRoot) rotateLeft(x *sudog) { // p -> (x a (y b c)) p := x.parent y := x.next b := y.prev y.prev = x x.parent = y x.next = b if b != nil { b.parent = x } y.parent = p if p == nil { root.treap = y } else if p.prev == x { p.prev = y } else { if p.next != x { throw("semaRoot rotateLeft") } p.next = y } } // rotateRight rotates the tree rooted at node y. // turning (y (x a b) c) into (x a (y b c)). func (root *semaRoot) rotateRight(y *sudog) { // p -> (y (x a b) c) p := y.parent x := y.prev b := x.next x.next = y y.parent = x y.prev = b if b != nil { b.parent = y } x.parent = p if p == nil { root.treap = x } else if p.prev == y { p.prev = x } else { if p.next != y { throw("semaRoot rotateRight") } p.next = x } } // notifyList is a ticket-based notification list used to implement sync.Cond. // // It must be kept in sync with the sync package. type notifyList struct { // wait is the ticket number of the next waiter. It is atomically // incremented outside the lock. wait atomic.Uint32 // notify is the ticket number of the next waiter to be notified. It can // be read outside the lock, but is only written to with lock held. // // Both wait & notify can wrap around, and such cases will be correctly // handled as long as their "unwrapped" difference is bounded by 2^31. // For this not to be the case, we'd need to have 2^31+ goroutines // blocked on the same condvar, which is currently not possible. notify uint32 // List of parked waiters. lock mutex head *sudog tail *sudog } // less checks if a < b, considering a & b running counts that may overflow the // 32-bit range, and that their "unwrapped" difference is always less than 2^31. func less(a, b uint32) bool { return int32(a-b) < 0 } // notifyListAdd adds the caller to a notify list such that it can receive // notifications. The caller must eventually call notifyListWait to wait for // such a notification, passing the returned ticket number. // //go:linkname notifyListAdd sync.runtime_notifyListAdd func notifyListAdd(l *notifyList) uint32 { // This may be called concurrently, for example, when called from // sync.Cond.Wait while holding a RWMutex in read mode. return l.wait.Add(1) - 1 } // notifyListWait waits for a notification. If one has been sent since // notifyListAdd was called, it returns immediately. Otherwise, it blocks. // //go:linkname notifyListWait sync.runtime_notifyListWait func notifyListWait(l *notifyList, t uint32) { lockWithRank(&l.lock, lockRankNotifyList) // Return right away if this ticket has already been notified. if less(t, l.notify) { unlock(&l.lock) return } // Enqueue itself. s := acquireSudog() s.g = getg() s.ticket = t s.releasetime = 0 t0 := int64(0) if blockprofilerate > 0 { t0 = cputicks() s.releasetime = -1 } if l.tail == nil { l.head = s } else { l.tail.next = s } l.tail = s goparkunlock(&l.lock, waitReasonSyncCondWait, traceBlockCondWait, 3) if t0 != 0 { blockevent(s.releasetime-t0, 2) } releaseSudog(s) } // notifyListNotifyAll notifies all entries in the list. // //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll func notifyListNotifyAll(l *notifyList) { // Fast-path: if there are no new waiters since the last notification // we don't need to acquire the lock. if l.wait.Load() == atomic.Load(&l.notify) { return } // Pull the list out into a local variable, waiters will be readied // outside the lock. lockWithRank(&l.lock, lockRankNotifyList) s := l.head l.head = nil l.tail = nil // Update the next ticket to be notified. We can set it to the current // value of wait because any previous waiters are already in the list // or will notice that they have already been notified when trying to // add themselves to the list. atomic.Store(&l.notify, l.wait.Load()) unlock(&l.lock) // Go through the local list and ready all waiters. for s != nil { next := s.next s.next = nil readyWithTime(s, 4) s = next } } // notifyListNotifyOne notifies one entry in the list. // //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne func notifyListNotifyOne(l *notifyList) { // Fast-path: if there are no new waiters since the last notification // we don't need to acquire the lock at all. if l.wait.Load() == atomic.Load(&l.notify) { return } lockWithRank(&l.lock, lockRankNotifyList) // Re-check under the lock if we need to do anything. t := l.notify if t == l.wait.Load() { unlock(&l.lock) return } // Update the next notify ticket number. atomic.Store(&l.notify, t+1) // Try to find the g that needs to be notified. // If it hasn't made it to the list yet we won't find it, // but it won't park itself once it sees the new notify number. // // This scan looks linear but essentially always stops quickly. // Because g's queue separately from taking numbers, // there may be minor reorderings in the list, but we // expect the g we're looking for to be near the front. // The g has others in front of it on the list only to the // extent that it lost the race, so the iteration will not // be too long. This applies even when the g is missing: // it hasn't yet gotten to sleep and has lost the race to // the (few) other g's that we find on the list. for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next { if s.ticket == t { n := s.next if p != nil { p.next = n } else { l.head = n } if n == nil { l.tail = p } unlock(&l.lock) s.next = nil readyWithTime(s, 4) return } } unlock(&l.lock) } //go:linkname notifyListCheck sync.runtime_notifyListCheck func notifyListCheck(sz uintptr) { if sz != unsafe.Sizeof(notifyList{}) { print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n") throw("bad notifyList size") } } //go:linkname sync_nanotime sync.runtime_nanotime func sync_nanotime() int64 { return nanotime() }
go/src/runtime/sema.go/0
{ "file_path": "go/src/runtime/sema.go", "repo_id": "go", "token_count": 7208 }
409
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build (linux || freebsd || openbsd) && riscv64 package runtime import ( "internal/abi" "internal/goarch" "unsafe" ) func dumpregs(c *sigctxt) { print("ra ", hex(c.ra()), "\t") print("sp ", hex(c.sp()), "\n") print("gp ", hex(c.gp()), "\t") print("tp ", hex(c.tp()), "\n") print("t0 ", hex(c.t0()), "\t") print("t1 ", hex(c.t1()), "\n") print("t2 ", hex(c.t2()), "\t") print("s0 ", hex(c.s0()), "\n") print("s1 ", hex(c.s1()), "\t") print("a0 ", hex(c.a0()), "\n") print("a1 ", hex(c.a1()), "\t") print("a2 ", hex(c.a2()), "\n") print("a3 ", hex(c.a3()), "\t") print("a4 ", hex(c.a4()), "\n") print("a5 ", hex(c.a5()), "\t") print("a6 ", hex(c.a6()), "\n") print("a7 ", hex(c.a7()), "\t") print("s2 ", hex(c.s2()), "\n") print("s3 ", hex(c.s3()), "\t") print("s4 ", hex(c.s4()), "\n") print("s5 ", hex(c.s5()), "\t") print("s6 ", hex(c.s6()), "\n") print("s7 ", hex(c.s7()), "\t") print("s8 ", hex(c.s8()), "\n") print("s9 ", hex(c.s9()), "\t") print("s10 ", hex(c.s10()), "\n") print("s11 ", hex(c.s11()), "\t") print("t3 ", hex(c.t3()), "\n") print("t4 ", hex(c.t4()), "\t") print("t5 ", hex(c.t5()), "\n") print("t6 ", hex(c.t6()), "\t") print("pc ", hex(c.pc()), "\n") } //go:nosplit //go:nowritebarrierrec func (c *sigctxt) sigpc() uintptr { return uintptr(c.pc()) } func (c *sigctxt) sigsp() uintptr { return uintptr(c.sp()) } func (c *sigctxt) siglr() uintptr { return uintptr(c.ra()) } func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) } // preparePanic sets up the stack to look like a call to sigpanic. func (c *sigctxt) preparePanic(sig uint32, gp *g) { // We arrange RA, and pc to pretend the panicking // function calls sigpanic directly. // Always save RA to stack so that panics in leaf // functions are correctly handled. This smashes // the stack frame but we're not going back there // anyway. sp := c.sp() - goarch.PtrSize c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra() pc := gp.sigpc if shouldPushSigpanic(gp, pc, uintptr(c.ra())) { // Make it look the like faulting PC called sigpanic. c.set_ra(uint64(pc)) } // In case we are panicking from external C code c.set_gp(uint64(uintptr(unsafe.Pointer(gp)))) c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic))) } func (c *sigctxt) pushCall(targetPC, resumePC uintptr) { // Push the LR to stack, as we'll clobber it in order to // push the call. The function being pushed is responsible // for restoring the LR and setting the SP back. // This extra slot is known to gentraceback. sp := c.sp() - goarch.PtrSize c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra() // Set up PC and LR to pretend the function being signaled // calls targetPC at resumePC. c.set_ra(uint64(resumePC)) c.set_pc(uint64(targetPC)) }
go/src/runtime/signal_riscv64.go/0
{ "file_path": "go/src/runtime/signal_riscv64.go", "repo_id": "go", "token_count": 1289 }
410
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Software IEEE754 64-bit floating point. // Only referred to (and thus linked in) by softfloat targets // and by tests in this directory. package runtime const ( mantbits64 uint = 52 expbits64 uint = 11 bias64 = -1<<(expbits64-1) + 1 nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1<<(mantbits64-1) // quiet NaN, 0 payload inf64 uint64 = (1<<expbits64 - 1) << mantbits64 neg64 uint64 = 1 << (expbits64 + mantbits64) mantbits32 uint = 23 expbits32 uint = 8 bias32 = -1<<(expbits32-1) + 1 nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1<<(mantbits32-1) // quiet NaN, 0 payload inf32 uint32 = (1<<expbits32 - 1) << mantbits32 neg32 uint32 = 1 << (expbits32 + mantbits32) ) func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool) { sign = f & (1 << (mantbits64 + expbits64)) mant = f & (1<<mantbits64 - 1) exp = int(f>>mantbits64) & (1<<expbits64 - 1) switch exp { case 1<<expbits64 - 1: if mant != 0 { nan = true return } inf = true return case 0: // denormalized if mant != 0 { exp += bias64 + 1 for mant < 1<<mantbits64 { mant <<= 1 exp-- } } default: // add implicit top bit mant |= 1 << mantbits64 exp += bias64 } return } func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool) { sign = f & (1 << (mantbits32 + expbits32)) mant = f & (1<<mantbits32 - 1) exp = int(f>>mantbits32) & (1<<expbits32 - 1) switch exp { case 1<<expbits32 - 1: if mant != 0 { nan = true return } inf = true return case 0: // denormalized if mant != 0 { exp += bias32 + 1 for mant < 1<<mantbits32 { mant <<= 1 exp-- } } default: // add implicit top bit mant |= 1 << mantbits32 exp += bias32 } return } func fpack64(sign, mant uint64, exp int, trunc uint64) uint64 { mant0, exp0, trunc0 := mant, exp, trunc if mant == 0 { return sign } for mant < 1<<mantbits64 { mant <<= 1 exp-- } for mant >= 4<<mantbits64 { trunc |= mant & 1 mant >>= 1 exp++ } if mant >= 2<<mantbits64 { if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { mant++ if mant >= 4<<mantbits64 { mant >>= 1 exp++ } } mant >>= 1 exp++ } if exp >= 1<<expbits64-1+bias64 { return sign ^ inf64 } if exp < bias64+1 { if exp < bias64-int(mantbits64) { return sign | 0 } // repeat expecting denormal mant, exp, trunc = mant0, exp0, trunc0 for exp < bias64 { trunc |= mant & 1 mant >>= 1 exp++ } if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { mant++ } mant >>= 1 exp++ if mant < 1<<mantbits64 { return sign | mant } } return sign | uint64(exp-bias64)<<mantbits64 | mant&(1<<mantbits64-1) } func fpack32(sign, mant uint32, exp int, trunc uint32) uint32 { mant0, exp0, trunc0 := mant, exp, trunc if mant == 0 { return sign } for mant < 1<<mantbits32 { mant <<= 1 exp-- } for mant >= 4<<mantbits32 { trunc |= mant & 1 mant >>= 1 exp++ } if mant >= 2<<mantbits32 { if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { mant++ if mant >= 4<<mantbits32 { mant >>= 1 exp++ } } mant >>= 1 exp++ } if exp >= 1<<expbits32-1+bias32 { return sign ^ inf32 } if exp < bias32+1 { if exp < bias32-int(mantbits32) { return sign | 0 } // repeat expecting denormal mant, exp, trunc = mant0, exp0, trunc0 for exp < bias32 { trunc |= mant & 1 mant >>= 1 exp++ } if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { mant++ } mant >>= 1 exp++ if mant < 1<<mantbits32 { return sign | mant } } return sign | uint32(exp-bias32)<<mantbits32 | mant&(1<<mantbits32-1) } func fadd64(f, g uint64) uint64 { fs, fm, fe, fi, fn := funpack64(f) gs, gm, ge, gi, gn := funpack64(g) // Special cases. switch { case fn || gn: // NaN + x or x + NaN = NaN return nan64 case fi && gi && fs != gs: // +Inf + -Inf or -Inf + +Inf = NaN return nan64 case fi: // ±Inf + g = ±Inf return f case gi: // f + ±Inf = ±Inf return g case fm == 0 && gm == 0 && fs != 0 && gs != 0: // -0 + -0 = -0 return f case fm == 0: // 0 + g = g but 0 + -0 = +0 if gm == 0 { g ^= gs } return g case gm == 0: // f + 0 = f return f } if fe < ge || fe == ge && fm < gm { f, g, fs, fm, fe, gs, gm, ge = g, f, gs, gm, ge, fs, fm, fe } shift := uint(fe - ge) fm <<= 2 gm <<= 2 trunc := gm & (1<<shift - 1) gm >>= shift if fs == gs { fm += gm } else { fm -= gm if trunc != 0 { fm-- } } if fm == 0 { fs = 0 } return fpack64(fs, fm, fe-2, trunc) } func fsub64(f, g uint64) uint64 { return fadd64(f, fneg64(g)) } func fneg64(f uint64) uint64 { return f ^ (1 << (mantbits64 + expbits64)) } func fmul64(f, g uint64) uint64 { fs, fm, fe, fi, fn := funpack64(f) gs, gm, ge, gi, gn := funpack64(g) // Special cases. switch { case fn || gn: // NaN * g or f * NaN = NaN return nan64 case fi && gi: // Inf * Inf = Inf (with sign adjusted) return f ^ gs case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN return nan64 case fm == 0: // 0 * x = 0 (with sign adjusted) return f ^ gs case gm == 0: // x * 0 = 0 (with sign adjusted) return g ^ fs } // 53-bit * 53-bit = 107- or 108-bit lo, hi := mullu(fm, gm) shift := mantbits64 - 1 trunc := lo & (1<<shift - 1) mant := hi<<(64-shift) | lo>>shift return fpack64(fs^gs, mant, fe+ge-1, trunc) } func fdiv64(f, g uint64) uint64 { fs, fm, fe, fi, fn := funpack64(f) gs, gm, ge, gi, gn := funpack64(g) // Special cases. switch { case fn || gn: // NaN / g = f / NaN = NaN return nan64 case fi && gi: // ±Inf / ±Inf = NaN return nan64 case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN return nan64 case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf return fs ^ gs ^ inf64 case gi, fm == 0: // f / Inf = 0 / g = Inf return fs ^ gs ^ 0 } _, _, _, _ = fi, fn, gi, gn // 53-bit<<54 / 53-bit = 53- or 54-bit. shift := mantbits64 + 2 q, r := divlu(fm>>(64-shift), fm<<shift, gm) return fpack64(fs^gs, q, fe-ge-2, r) } func f64to32(f uint64) uint32 { fs, fm, fe, fi, fn := funpack64(f) if fn { return nan32 } fs32 := uint32(fs >> 32) if fi { return fs32 ^ inf32 } const d = mantbits64 - mantbits32 - 1 return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1<<d-1))) } func f32to64(f uint32) uint64 { const d = mantbits64 - mantbits32 fs, fm, fe, fi, fn := funpack32(f) if fn { return nan64 } fs64 := uint64(fs) << 32 if fi { return fs64 ^ inf64 } return fpack64(fs64, uint64(fm)<<d, fe, 0) } func fcmp64(f, g uint64) (cmp int32, isnan bool) { fs, fm, _, fi, fn := funpack64(f) gs, gm, _, gi, gn := funpack64(g) switch { case fn, gn: // flag NaN return 0, true case !fi && !gi && fm == 0 && gm == 0: // ±0 == ±0 return 0, false case fs > gs: // f < 0, g > 0 return -1, false case fs < gs: // f > 0, g < 0 return +1, false // Same sign, not NaN. // Can compare encodings directly now. // Reverse for sign. case fs == 0 && f < g, fs != 0 && f > g: return -1, false case fs == 0 && f > g, fs != 0 && f < g: return +1, false } // f == g return 0, false } func f64toint(f uint64) (val int64, ok bool) { fs, fm, fe, fi, fn := funpack64(f) switch { case fi, fn: // NaN return 0, false case fe < -1: // f < 0.5 return 0, false case fe > 63: // f >= 2^63 if fs != 0 && fm == 0 { // f == -2^63 return -1 << 63, true } if fs != 0 { return 0, false } return 0, false } for fe > int(mantbits64) { fe-- fm <<= 1 } for fe < int(mantbits64) { fe++ fm >>= 1 } val = int64(fm) if fs != 0 { val = -val } return val, true } func fintto64(val int64) (f uint64) { fs := uint64(val) & (1 << 63) mant := uint64(val) if fs != 0 { mant = -mant } return fpack64(fs, mant, int(mantbits64), 0) } func fintto32(val int64) (f uint32) { fs := uint64(val) & (1 << 63) mant := uint64(val) if fs != 0 { mant = -mant } // Reduce mantissa size until it fits into a uint32. // Keep track of the bits we throw away, and if any are // nonzero or them into the lowest bit. exp := int(mantbits32) var trunc uint32 for mant >= 1<<32 { trunc |= uint32(mant) & 1 mant >>= 1 exp++ } return fpack32(uint32(fs>>32), uint32(mant), exp, trunc) } // 64x64 -> 128 multiply. // adapted from hacker's delight. func mullu(u, v uint64) (lo, hi uint64) { const ( s = 32 mask = 1<<s - 1 ) u0 := u & mask u1 := u >> s v0 := v & mask v1 := v >> s w0 := u0 * v0 t := u1*v0 + w0>>s w1 := t & mask w2 := t >> s w1 += u0 * v1 return u * v, u1*v1 + w2 + w1>>s } // 128/64 -> 64 quotient, 64 remainder. // adapted from hacker's delight func divlu(u1, u0, v uint64) (q, r uint64) { const b = 1 << 32 if u1 >= v { return 1<<64 - 1, 1<<64 - 1 } // s = nlz(v); v <<= s s := uint(0) for v&(1<<63) == 0 { s++ v <<= 1 } vn1 := v >> 32 vn0 := v & (1<<32 - 1) un32 := u1<<s | u0>>(64-s) un10 := u0 << s un1 := un10 >> 32 un0 := un10 & (1<<32 - 1) q1 := un32 / vn1 rhat := un32 - q1*vn1 again1: if q1 >= b || q1*vn0 > b*rhat+un1 { q1-- rhat += vn1 if rhat < b { goto again1 } } un21 := un32*b + un1 - q1*v q0 := un21 / vn1 rhat = un21 - q0*vn1 again2: if q0 >= b || q0*vn0 > b*rhat+un0 { q0-- rhat += vn1 if rhat < b { goto again2 } } return q1*b + q0, (un21*b + un0 - q0*v) >> s } func fadd32(x, y uint32) uint32 { return f64to32(fadd64(f32to64(x), f32to64(y))) } func fmul32(x, y uint32) uint32 { return f64to32(fmul64(f32to64(x), f32to64(y))) } func fdiv32(x, y uint32) uint32 { // TODO: are there double-rounding problems here? See issue 48807. return f64to32(fdiv64(f32to64(x), f32to64(y))) } func feq32(x, y uint32) bool { cmp, nan := fcmp64(f32to64(x), f32to64(y)) return cmp == 0 && !nan } func fgt32(x, y uint32) bool { cmp, nan := fcmp64(f32to64(x), f32to64(y)) return cmp >= 1 && !nan } func fge32(x, y uint32) bool { cmp, nan := fcmp64(f32to64(x), f32to64(y)) return cmp >= 0 && !nan } func feq64(x, y uint64) bool { cmp, nan := fcmp64(x, y) return cmp == 0 && !nan } func fgt64(x, y uint64) bool { cmp, nan := fcmp64(x, y) return cmp >= 1 && !nan } func fge64(x, y uint64) bool { cmp, nan := fcmp64(x, y) return cmp >= 0 && !nan } func fint32to32(x int32) uint32 { return fintto32(int64(x)) } func fint32to64(x int32) uint64 { return fintto64(int64(x)) } func fint64to32(x int64) uint32 { return fintto32(x) } func fint64to64(x int64) uint64 { return fintto64(x) } func f32toint32(x uint32) int32 { val, _ := f64toint(f32to64(x)) return int32(val) } func f32toint64(x uint32) int64 { val, _ := f64toint(f32to64(x)) return val } func f64toint32(x uint64) int32 { val, _ := f64toint(x) return int32(val) } func f64toint64(x uint64) int64 { val, _ := f64toint(x) return val } func f64touint64(x uint64) uint64 { var m uint64 = 0x43e0000000000000 // float64 1<<63 if fgt64(m, x) { return uint64(f64toint64(x)) } y := fadd64(x, -m) z := uint64(f64toint64(y)) return z | (1 << 63) } func f32touint64(x uint32) uint64 { var m uint32 = 0x5f000000 // float32 1<<63 if fgt32(m, x) { return uint64(f32toint64(x)) } y := fadd32(x, -m) z := uint64(f32toint64(y)) return z | (1 << 63) } func fuint64to64(x uint64) uint64 { if int64(x) >= 0 { return fint64to64(int64(x)) } // See ../cmd/compile/internal/ssagen/ssa.go:uint64Tofloat y := x & 1 z := x >> 1 z = z | y r := fint64to64(int64(z)) return fadd64(r, r) } func fuint64to32(x uint64) uint32 { if int64(x) >= 0 { return fint64to32(int64(x)) } // See ../cmd/compile/internal/ssagen/ssa.go:uint64Tofloat y := x & 1 z := x >> 1 z = z | y r := fint64to32(int64(z)) return fadd32(r, r) }
go/src/runtime/softfloat64.go/0
{ "file_path": "go/src/runtime/softfloat64.go", "repo_id": "go", "token_count": 5657 }
411
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build linux package runtime import "unsafe" func sbrk0() uintptr // Called from write_err_android.go only, but defined in sys_linux_*.s; // declared here (instead of in write_err_android.go) for go vet on non-android builds. // The return value is the raw syscall result, which may encode an error number. // //go:noescape func access(name *byte, mode int32) int32 func connect(fd int32, addr unsafe.Pointer, len int32) int32 func socket(domain int32, typ int32, prot int32) int32
go/src/runtime/stubs_linux.go/0
{ "file_path": "go/src/runtime/stubs_linux.go", "repo_id": "go", "token_count": 198 }
412
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import ( "internal/abi" "internal/runtime/atomic" "unsafe" ) // The X versions of syscall expect the libc call to return a 64-bit result. // Otherwise (the non-X version) expects a 32-bit result. // This distinction is required because an error is indicated by returning -1, // and we need to know whether to check 32 or 64 bits of the result. // (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.) // golang.org/x/sys linknames syscall_syscall // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_syscall syscall.syscall //go:nosplit func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err} entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args)) exitsyscall() return args.r1, args.r2, args.err } func syscall() //go:linkname syscall_syscallX syscall.syscallX //go:nosplit func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err} entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&args)) exitsyscall() return args.r1, args.r2, args.err } func syscallX() // golang.org/x/sys linknames syscall.syscall6 // (in addition to standard package syscall). // Do not remove or change the type signature. // // syscall.syscall6 is meant for package syscall (and x/sys), // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - github.com/tetratelabs/wazero // // See go.dev/issue/67401. // //go:linkname syscall_syscall6 syscall.syscall6 //go:nosplit func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err} entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args)) exitsyscall() return args.r1, args.r2, args.err } func syscall6() // golang.org/x/sys linknames syscall.syscall9 // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_syscall9 syscall.syscall9 //go:nosplit //go:cgo_unsafe_args func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall9)), unsafe.Pointer(&fn)) exitsyscall() return } func syscall9() //go:linkname syscall_syscall6X syscall.syscall6X //go:nosplit func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err} entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&args)) exitsyscall() return args.r1, args.r2, args.err } func syscall6X() // golang.org/x/sys linknames syscall.syscallPtr // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_syscallPtr syscall.syscallPtr //go:nosplit func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err} entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&args)) exitsyscall() return args.r1, args.r2, args.err } func syscallPtr() // golang.org/x/sys linknames syscall_rawSyscall // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_rawSyscall syscall.rawSyscall //go:nosplit func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err} libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args)) return args.r1, args.r2, args.err } // golang.org/x/sys linknames syscall_rawSyscall6 // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_rawSyscall6 syscall.rawSyscall6 //go:nosplit func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err} libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args)) return args.r1, args.r2, args.err } // crypto_x509_syscall is used in crypto/x509/internal/macos to call into Security.framework and CF. //go:linkname crypto_x509_syscall crypto/x509/internal/macos.syscall //go:nosplit func crypto_x509_syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) (r1 uintptr) { args := struct { fn, a1, a2, a3, a4, a5 uintptr f1 float64 r1 uintptr }{fn, a1, a2, a3, a4, a5, f1, r1} entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall_x509)), unsafe.Pointer(&args)) exitsyscall() return args.r1 } func syscall_x509() // The *_trampoline functions convert from the Go calling convention to the C calling convention // and then call the underlying libc function. They are defined in sys_darwin_$ARCH.s. //go:nosplit //go:cgo_unsafe_args func pthread_attr_init(attr *pthreadattr) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_init_trampoline)), unsafe.Pointer(&attr)) KeepAlive(attr) return ret } func pthread_attr_init_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr)) KeepAlive(attr) KeepAlive(size) return ret } func pthread_attr_getstacksize_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr)) KeepAlive(attr) return ret } func pthread_attr_setdetachstate_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_create_trampoline)), unsafe.Pointer(&attr)) KeepAlive(attr) KeepAlive(arg) // Just for consistency. Arg of course needs to be kept alive for the start function. return ret } func pthread_create_trampoline() //go:nosplit //go:cgo_unsafe_args func raise(sig uint32) { libcCall(unsafe.Pointer(abi.FuncPCABI0(raise_trampoline)), unsafe.Pointer(&sig)) } func raise_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_self() (t pthread) { libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_self_trampoline)), unsafe.Pointer(&t)) return } func pthread_self_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_kill(t pthread, sig uint32) { libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_kill_trampoline)), unsafe.Pointer(&t)) return } func pthread_kill_trampoline() // osinit_hack is a clumsy hack to work around Apple libc bugs // causing fork+exec to hang in the child process intermittently. // See go.dev/issue/33565 and go.dev/issue/56784 for a few reports. // // The stacks obtained from the hung child processes are in // libSystem_atfork_child, which is supposed to reinitialize various // parts of the C library in the new process. // // One common stack dies in _notify_fork_child calling _notify_globals // (inlined) calling _os_alloc_once, because _os_alloc_once detects that // the once lock is held by the parent process and then calls // _os_once_gate_corruption_abort. The allocation is setting up the // globals for the notification subsystem. See the source code at [1]. // To work around this, we can allocate the globals earlier in the Go // program's lifetime, before any execs are involved, by calling any // notify routine that is exported, calls _notify_globals, and doesn't do // anything too expensive otherwise. notify_is_valid_token(0) fits the bill. // // The other common stack dies in xpc_atfork_child calling // _objc_msgSend_uncached which ends up in // WAITING_FOR_ANOTHER_THREAD_TO_FINISH_CALLING_+initialize. Of course, // whatever thread the child is waiting for is in the parent process and // is not going to finish anything in the child process. There is no // public source code for these routines, so it is unclear exactly what // the problem is. An Apple engineer suggests using xpc_date_create_from_current, // which empirically does fix the problem. // // So osinit_hack_trampoline (in sys_darwin_$GOARCH.s) calls // notify_is_valid_token(0) and xpc_date_create_from_current(), which makes the // fork+exec hangs stop happening. If Apple fixes the libc bug in // some future version of macOS, then we can remove this awful code. // //go:nosplit func osinit_hack() { if GOOS == "darwin" { // not ios libcCall(unsafe.Pointer(abi.FuncPCABI0(osinit_hack_trampoline)), nil) } return } func osinit_hack_trampoline() // mmap is used to do low-level memory allocation via mmap. Don't allow stack // splits, since this function (used by sysAlloc) is called in a lot of low-level // parts of the runtime and callers often assume it won't acquire any locks. // //go:nosplit func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) { args := struct { addr unsafe.Pointer n uintptr prot, flags, fd int32 off uint32 ret1 unsafe.Pointer ret2 int }{addr, n, prot, flags, fd, off, nil, 0} libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args)) return args.ret1, args.ret2 } func mmap_trampoline() //go:nosplit //go:cgo_unsafe_args func munmap(addr unsafe.Pointer, n uintptr) { libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr)) KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. } func munmap_trampoline() //go:nosplit //go:cgo_unsafe_args func madvise(addr unsafe.Pointer, n uintptr, flags int32) { libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr)) KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. } func madvise_trampoline() //go:nosplit //go:cgo_unsafe_args func mlock(addr unsafe.Pointer, n uintptr) { libcCall(unsafe.Pointer(abi.FuncPCABI0(mlock_trampoline)), unsafe.Pointer(&addr)) KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. } func mlock_trampoline() //go:nosplit //go:cgo_unsafe_args func read(fd int32, p unsafe.Pointer, n int32) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd)) KeepAlive(p) return ret } func read_trampoline() func pipe() (r, w int32, errno int32) { var p [2]int32 errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe_trampoline)), noescape(unsafe.Pointer(&p))) return p[0], p[1], errno } func pipe_trampoline() //go:nosplit //go:cgo_unsafe_args func closefd(fd int32) int32 { return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd)) } func close_trampoline() // This is exported via linkname to assembly in runtime/cgo. // //go:nosplit //go:cgo_unsafe_args //go:linkname exit func exit(code int32) { libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code)) } func exit_trampoline() //go:nosplit //go:cgo_unsafe_args func usleep(usec uint32) { libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec)) } func usleep_trampoline() //go:nosplit //go:cgo_unsafe_args func usleep_no_g(usec uint32) { asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec)) } //go:nosplit //go:cgo_unsafe_args func write1(fd uintptr, p unsafe.Pointer, n int32) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd)) KeepAlive(p) return ret } func write_trampoline() //go:nosplit //go:cgo_unsafe_args func open(name *byte, mode, perm int32) (ret int32) { ret = libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name)) KeepAlive(name) return } func open_trampoline() //go:nosplit //go:cgo_unsafe_args func nanotime1() int64 { var r struct { t int64 // raw timer numer, denom uint32 // conversion factors. nanoseconds = t * numer / denom. } libcCall(unsafe.Pointer(abi.FuncPCABI0(nanotime_trampoline)), unsafe.Pointer(&r)) // Note: Apple seems unconcerned about overflow here. See // https://developer.apple.com/library/content/qa/qa1398/_index.html // Note also, numer == denom == 1 is common. t := r.t if r.numer != 1 { t *= int64(r.numer) } if r.denom != 1 { t /= int64(r.denom) } return t } func nanotime_trampoline() // walltime should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - gitee.com/quant1x/gox // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname walltime //go:nosplit //go:cgo_unsafe_args func walltime() (int64, int32) { var t timespec libcCall(unsafe.Pointer(abi.FuncPCABI0(walltime_trampoline)), unsafe.Pointer(&t)) return t.tv_sec, int32(t.tv_nsec) } func walltime_trampoline() //go:nosplit //go:cgo_unsafe_args func sigaction(sig uint32, new *usigactiont, old *usigactiont) { libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig)) KeepAlive(new) KeepAlive(old) } func sigaction_trampoline() //go:nosplit //go:cgo_unsafe_args func sigprocmask(how uint32, new *sigset, old *sigset) { libcCall(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how)) KeepAlive(new) KeepAlive(old) } func sigprocmask_trampoline() //go:nosplit //go:cgo_unsafe_args func sigaltstack(new *stackt, old *stackt) { if new != nil && new.ss_flags&_SS_DISABLE != 0 && new.ss_size == 0 { // Despite the fact that Darwin's sigaltstack man page says it ignores the size // when SS_DISABLE is set, it doesn't. sigaltstack returns ENOMEM // if we don't give it a reasonable size. // ref: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20140421/214296.html new.ss_size = 32768 } libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new)) KeepAlive(new) KeepAlive(old) } func sigaltstack_trampoline() //go:nosplit //go:cgo_unsafe_args func raiseproc(sig uint32) { libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig)) } func raiseproc_trampoline() //go:nosplit //go:cgo_unsafe_args func setitimer(mode int32, new, old *itimerval) { libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode)) KeepAlive(new) KeepAlive(old) } func setitimer_trampoline() //go:nosplit //go:cgo_unsafe_args func sysctl(mib *uint32, miblen uint32, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib)) KeepAlive(mib) KeepAlive(oldp) KeepAlive(oldlenp) KeepAlive(newp) return ret } func sysctl_trampoline() //go:nosplit //go:cgo_unsafe_args func sysctlbyname(name *byte, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctlbyname_trampoline)), unsafe.Pointer(&name)) KeepAlive(name) KeepAlive(oldp) KeepAlive(oldlenp) KeepAlive(newp) return ret } func sysctlbyname_trampoline() //go:nosplit //go:cgo_unsafe_args func fcntl(fd, cmd, arg int32) (ret int32, errno int32) { args := struct { fd, cmd, arg int32 ret, errno int32 }{fd, cmd, arg, 0, 0} libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&args)) return args.ret, args.errno } func fcntl_trampoline() //go:nosplit //go:cgo_unsafe_args func kqueue() int32 { v := libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil) return v } func kqueue_trampoline() //go:nosplit //go:cgo_unsafe_args func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq)) KeepAlive(ch) KeepAlive(ev) KeepAlive(ts) return ret } func kevent_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_mutex_init(m *pthreadmutex, attr *pthreadmutexattr) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_init_trampoline)), unsafe.Pointer(&m)) KeepAlive(m) KeepAlive(attr) return ret } func pthread_mutex_init_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_mutex_lock(m *pthreadmutex) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_lock_trampoline)), unsafe.Pointer(&m)) KeepAlive(m) return ret } func pthread_mutex_lock_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_mutex_unlock(m *pthreadmutex) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_unlock_trampoline)), unsafe.Pointer(&m)) KeepAlive(m) return ret } func pthread_mutex_unlock_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_cond_init(c *pthreadcond, attr *pthreadcondattr) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_init_trampoline)), unsafe.Pointer(&c)) KeepAlive(c) KeepAlive(attr) return ret } func pthread_cond_init_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_cond_wait(c *pthreadcond, m *pthreadmutex) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_wait_trampoline)), unsafe.Pointer(&c)) KeepAlive(c) KeepAlive(m) return ret } func pthread_cond_wait_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_cond_timedwait_relative_np(c *pthreadcond, m *pthreadmutex, t *timespec) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_timedwait_relative_np_trampoline)), unsafe.Pointer(&c)) KeepAlive(c) KeepAlive(m) KeepAlive(t) return ret } func pthread_cond_timedwait_relative_np_trampoline() //go:nosplit //go:cgo_unsafe_args func pthread_cond_signal(c *pthreadcond) int32 { ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_signal_trampoline)), unsafe.Pointer(&c)) KeepAlive(c) return ret } func pthread_cond_signal_trampoline() // Not used on Darwin, but must be defined. func exitThread(wait *atomic.Uint32) { throw("exitThread") } //go:nosplit func setNonblock(fd int32) { flags, _ := fcntl(fd, _F_GETFL, 0) if flags != -1 { fcntl(fd, _F_SETFL, flags|_O_NONBLOCK) } } func issetugid() int32 { return libcCall(unsafe.Pointer(abi.FuncPCABI0(issetugid_trampoline)), nil) } func issetugid_trampoline() // mach_vm_region is used to obtain virtual memory mappings for use by the // profiling system and is only exported to runtime/pprof. It is restricted // to obtaining mappings for the current process. // //go:linkname mach_vm_region runtime/pprof.mach_vm_region func mach_vm_region(address, region_size *uint64, info unsafe.Pointer) int32 { // kern_return_t mach_vm_region( // vm_map_read_t target_task, // mach_vm_address_t *address, // mach_vm_size_t *size, // vm_region_flavor_t flavor, // vm_region_info_t info, // mach_msg_type_number_t *infoCnt, // mach_port_t *object_name); var count machMsgTypeNumber = _VM_REGION_BASIC_INFO_COUNT_64 var object_name machPort args := struct { address *uint64 size *uint64 flavor machVMRegionFlavour info unsafe.Pointer count *machMsgTypeNumber object_name *machPort }{ address: address, size: region_size, flavor: _VM_REGION_BASIC_INFO_64, info: info, count: &count, object_name: &object_name, } return libcCall(unsafe.Pointer(abi.FuncPCABI0(mach_vm_region_trampoline)), unsafe.Pointer(&args)) } func mach_vm_region_trampoline() //go:linkname proc_regionfilename runtime/pprof.proc_regionfilename func proc_regionfilename(pid int, address uint64, buf *byte, buflen int64) int32 { args := struct { pid int address uint64 buf *byte bufSize int64 }{ pid: pid, address: address, buf: buf, bufSize: buflen, } return libcCall(unsafe.Pointer(abi.FuncPCABI0(proc_regionfilename_trampoline)), unsafe.Pointer(&args)) } func proc_regionfilename_trampoline() // Tell the linker that the libc_* functions are to be found // in a system library, with the libc_ prefix missing. //go:cgo_import_dynamic libc_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_attr_setdetachstate pthread_attr_setdetachstate "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_create pthread_create "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_self pthread_self "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_kill pthread_kill "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_exit _exit "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_raise raise "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_error __error "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_usleep usleep "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_proc_regionfilename proc_regionfilename "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_mach_task_self_ mach_task_self_ "/usr/lib/libSystem.B.dylib"" //go:cgo_import_dynamic libc_mach_vm_region mach_vm_region "/usr/lib/libSystem.B.dylib"" //go:cgo_import_dynamic libc_mach_timebase_info mach_timebase_info "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_mach_absolute_time mach_absolute_time "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_sigaction sigaction "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_sigaltstack sigaltstack "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_setitimer setitimer "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_mutex_init pthread_mutex_init "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_cond_init pthread_cond_init "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_cond_wait pthread_cond_wait "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_cond_timedwait_relative_np pthread_cond_timedwait_relative_np "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_pthread_cond_signal pthread_cond_signal "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_notify_is_valid_token notify_is_valid_token "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_xpc_date_create_from_current xpc_date_create_from_current "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib"
go/src/runtime/sys_darwin.go/0
{ "file_path": "go/src/runtime/sys_darwin.go", "repo_id": "go", "token_count": 9999 }
413
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) // // System calls and other sys.stuff for mips64, Linux // #include "go_asm.h" #include "go_tls.h" #include "textflag.h" #define AT_FDCWD -100 #define SYS_exit 5058 #define SYS_read 5000 #define SYS_write 5001 #define SYS_close 5003 #define SYS_getpid 5038 #define SYS_kill 5060 #define SYS_mmap 5009 #define SYS_munmap 5011 #define SYS_setitimer 5036 #define SYS_clone 5055 #define SYS_nanosleep 5034 #define SYS_sched_yield 5023 #define SYS_rt_sigreturn 5211 #define SYS_rt_sigaction 5013 #define SYS_rt_sigprocmask 5014 #define SYS_sigaltstack 5129 #define SYS_madvise 5027 #define SYS_mincore 5026 #define SYS_gettid 5178 #define SYS_futex 5194 #define SYS_sched_getaffinity 5196 #define SYS_exit_group 5205 #define SYS_timer_create 5216 #define SYS_timer_settime 5217 #define SYS_timer_delete 5220 #define SYS_tgkill 5225 #define SYS_openat 5247 #define SYS_clock_gettime 5222 #define SYS_brk 5012 #define SYS_pipe2 5287 TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4 MOVW code+0(FP), R4 MOVV $SYS_exit_group, R2 SYSCALL RET // func exitThread(wait *atomic.Uint32) TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8 MOVV wait+0(FP), R1 // We're done using the stack. MOVW $0, R2 SYNC MOVW R2, (R1) SYNC MOVW $0, R4 // exit code MOVV $SYS_exit, R2 SYSCALL JMP 0(PC) TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20 // This uses openat instead of open, because Android O blocks open. MOVW $AT_FDCWD, R4 // AT_FDCWD, so this acts like open MOVV name+0(FP), R5 MOVW mode+8(FP), R6 MOVW perm+12(FP), R7 MOVV $SYS_openat, R2 SYSCALL BEQ R7, 2(PC) MOVW $-1, R2 MOVW R2, ret+16(FP) RET TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12 MOVW fd+0(FP), R4 MOVV $SYS_close, R2 SYSCALL BEQ R7, 2(PC) MOVW $-1, R2 MOVW R2, ret+8(FP) RET TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28 MOVV fd+0(FP), R4 MOVV p+8(FP), R5 MOVW n+16(FP), R6 MOVV $SYS_write, R2 SYSCALL BEQ R7, 2(PC) SUBVU R2, R0, R2 // caller expects negative errno MOVW R2, ret+24(FP) RET TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28 MOVW fd+0(FP), R4 MOVV p+8(FP), R5 MOVW n+16(FP), R6 MOVV $SYS_read, R2 SYSCALL BEQ R7, 2(PC) SUBVU R2, R0, R2 // caller expects negative errno MOVW R2, ret+24(FP) RET // func pipe2(flags int32) (r, w int32, errno int32) TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20 MOVV $r+8(FP), R4 MOVW flags+0(FP), R5 MOVV $SYS_pipe2, R2 SYSCALL BEQ R7, 2(PC) SUBVU R2, R0, R2 // caller expects negative errno MOVW R2, errno+16(FP) RET TEXT runtime·usleep(SB),NOSPLIT,$16-4 MOVWU usec+0(FP), R3 MOVV R3, R5 MOVW $1000000, R4 DIVVU R4, R3 MOVV LO, R3 MOVV R3, 8(R29) MOVW $1000, R4 MULVU R3, R4 MOVV LO, R4 SUBVU R4, R5 MOVV R5, 16(R29) // nanosleep(&ts, 0) ADDV $8, R29, R4 MOVW $0, R5 MOVV $SYS_nanosleep, R2 SYSCALL RET TEXT runtime·gettid(SB),NOSPLIT,$0-4 MOVV $SYS_gettid, R2 SYSCALL MOVW R2, ret+0(FP) RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 MOVV $SYS_getpid, R2 SYSCALL MOVW R2, R16 MOVV $SYS_gettid, R2 SYSCALL MOVW R2, R5 // arg 2 tid MOVW R16, R4 // arg 1 pid MOVW sig+0(FP), R6 // arg 3 MOVV $SYS_tgkill, R2 SYSCALL RET TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 MOVV $SYS_getpid, R2 SYSCALL MOVW R2, R4 // arg 1 pid MOVW sig+0(FP), R5 // arg 2 MOVV $SYS_kill, R2 SYSCALL RET TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8 MOVV $SYS_getpid, R2 SYSCALL MOVV R2, ret+0(FP) RET TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24 MOVV tgid+0(FP), R4 MOVV tid+8(FP), R5 MOVV sig+16(FP), R6 MOVV $SYS_tgkill, R2 SYSCALL RET TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24 MOVW mode+0(FP), R4 MOVV new+8(FP), R5 MOVV old+16(FP), R6 MOVV $SYS_setitimer, R2 SYSCALL RET TEXT runtime·timer_create(SB),NOSPLIT,$0-28 MOVW clockid+0(FP), R4 MOVV sevp+8(FP), R5 MOVV timerid+16(FP), R6 MOVV $SYS_timer_create, R2 SYSCALL MOVW R2, ret+24(FP) RET TEXT runtime·timer_settime(SB),NOSPLIT,$0-28 MOVW timerid+0(FP), R4 MOVW flags+4(FP), R5 MOVV new+8(FP), R6 MOVV old+16(FP), R7 MOVV $SYS_timer_settime, R2 SYSCALL MOVW R2, ret+24(FP) RET TEXT runtime·timer_delete(SB),NOSPLIT,$0-12 MOVW timerid+0(FP), R4 MOVV $SYS_timer_delete, R2 SYSCALL MOVW R2, ret+8(FP) RET TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28 MOVV addr+0(FP), R4 MOVV n+8(FP), R5 MOVV dst+16(FP), R6 MOVV $SYS_mincore, R2 SYSCALL SUBVU R2, R0, R2 // caller expects negative errno MOVW R2, ret+24(FP) RET // func walltime() (sec int64, nsec int32) TEXT runtime·walltime(SB),NOSPLIT,$16-12 MOVV R29, R16 // R16 is unchanged by C code MOVV R29, R1 MOVV g_m(g), R17 // R17 = m // Set vdsoPC and vdsoSP for SIGPROF traceback. // Save the old values on stack and restore them on exit, // so this function is reentrant. MOVV m_vdsoPC(R17), R2 MOVV m_vdsoSP(R17), R3 MOVV R2, 8(R29) MOVV R3, 16(R29) MOVV $ret-8(FP), R2 // caller's SP MOVV R31, m_vdsoPC(R17) MOVV R2, m_vdsoSP(R17) MOVV m_curg(R17), R4 MOVV g, R5 BNE R4, R5, noswitch MOVV m_g0(R17), R4 MOVV (g_sched+gobuf_sp)(R4), R1 // Set SP to g0 stack noswitch: SUBV $16, R1 AND $~15, R1 // Align for C code MOVV R1, R29 MOVW $0, R4 // CLOCK_REALTIME MOVV $0(R29), R5 MOVV runtime·vdsoClockgettimeSym(SB), R25 BEQ R25, fallback JAL (R25) // check on vdso call return for kernel compatibility // see https://golang.org/issues/39046 // if we get any error make fallback permanent. BEQ R2, R0, finish MOVV R0, runtime·vdsoClockgettimeSym(SB) MOVW $0, R4 // CLOCK_REALTIME MOVV $0(R29), R5 JMP fallback finish: MOVV 0(R29), R3 // sec MOVV 8(R29), R5 // nsec MOVV R16, R29 // restore SP // Restore vdsoPC, vdsoSP // We don't worry about being signaled between the two stores. // If we are not in a signal handler, we'll restore vdsoSP to 0, // and no one will care about vdsoPC. If we are in a signal handler, // we cannot receive another signal. MOVV 16(R29), R1 MOVV R1, m_vdsoSP(R17) MOVV 8(R29), R1 MOVV R1, m_vdsoPC(R17) MOVV R3, sec+0(FP) MOVW R5, nsec+8(FP) RET fallback: MOVV $SYS_clock_gettime, R2 SYSCALL JMP finish TEXT runtime·nanotime1(SB),NOSPLIT,$16-8 MOVV R29, R16 // R16 is unchanged by C code MOVV R29, R1 MOVV g_m(g), R17 // R17 = m // Set vdsoPC and vdsoSP for SIGPROF traceback. // Save the old values on stack and restore them on exit, // so this function is reentrant. MOVV m_vdsoPC(R17), R2 MOVV m_vdsoSP(R17), R3 MOVV R2, 8(R29) MOVV R3, 16(R29) MOVV $ret-8(FP), R2 // caller's SP MOVV R31, m_vdsoPC(R17) MOVV R2, m_vdsoSP(R17) MOVV m_curg(R17), R4 MOVV g, R5 BNE R4, R5, noswitch MOVV m_g0(R17), R4 MOVV (g_sched+gobuf_sp)(R4), R1 // Set SP to g0 stack noswitch: SUBV $16, R1 AND $~15, R1 // Align for C code MOVV R1, R29 MOVW $1, R4 // CLOCK_MONOTONIC MOVV $0(R29), R5 MOVV runtime·vdsoClockgettimeSym(SB), R25 BEQ R25, fallback JAL (R25) // see walltime for detail BEQ R2, R0, finish MOVV R0, runtime·vdsoClockgettimeSym(SB) MOVW $1, R4 // CLOCK_MONOTONIC MOVV $0(R29), R5 JMP fallback finish: MOVV 0(R29), R3 // sec MOVV 8(R29), R5 // nsec MOVV R16, R29 // restore SP // Restore vdsoPC, vdsoSP // We don't worry about being signaled between the two stores. // If we are not in a signal handler, we'll restore vdsoSP to 0, // and no one will care about vdsoPC. If we are in a signal handler, // we cannot receive another signal. MOVV 16(R29), R1 MOVV R1, m_vdsoSP(R17) MOVV 8(R29), R1 MOVV R1, m_vdsoPC(R17) // sec is in R3, nsec in R5 // return nsec in R3 MOVV $1000000000, R4 MULVU R4, R3 MOVV LO, R3 ADDVU R5, R3 MOVV R3, ret+0(FP) RET fallback: MOVV $SYS_clock_gettime, R2 SYSCALL JMP finish TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28 MOVW how+0(FP), R4 MOVV new+8(FP), R5 MOVV old+16(FP), R6 MOVW size+24(FP), R7 MOVV $SYS_rt_sigprocmask, R2 SYSCALL BEQ R7, 2(PC) MOVV R0, 0xf1(R0) // crash RET TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36 MOVV sig+0(FP), R4 MOVV new+8(FP), R5 MOVV old+16(FP), R6 MOVV size+24(FP), R7 MOVV $SYS_rt_sigaction, R2 SYSCALL BEQ R7, 2(PC) SUBVU R2, R0, R2 // caller expects negative errno MOVW R2, ret+32(FP) RET TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 MOVW sig+8(FP), R4 MOVV info+16(FP), R5 MOVV ctx+24(FP), R6 MOVV fn+0(FP), R25 JAL (R25) RET TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$64 // initialize REGSB = PC&0xffffffff00000000 BGEZAL R0, 1(PC) SRLV $32, R31, RSB SLLV $32, RSB // this might be called in external code context, // where g is not set. MOVB runtime·iscgo(SB), R1 BEQ R1, 2(PC) JAL runtime·load_g(SB) MOVW R4, 8(R29) MOVV R5, 16(R29) MOVV R6, 24(R29) MOVV $runtime·sigtrampgo(SB), R1 JAL (R1) RET TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 JMP runtime·sigtramp(SB) TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0 MOVV addr+0(FP), R4 MOVV n+8(FP), R5 MOVW prot+16(FP), R6 MOVW flags+20(FP), R7 MOVW fd+24(FP), R8 MOVW off+28(FP), R9 MOVV $SYS_mmap, R2 SYSCALL BEQ R7, ok MOVV $0, p+32(FP) MOVV R2, err+40(FP) RET ok: MOVV R2, p+32(FP) MOVV $0, err+40(FP) RET TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0 MOVV addr+0(FP), R4 MOVV n+8(FP), R5 MOVV $SYS_munmap, R2 SYSCALL BEQ R7, 2(PC) MOVV R0, 0xf3(R0) // crash RET TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVV addr+0(FP), R4 MOVV n+8(FP), R5 MOVW flags+16(FP), R6 MOVV $SYS_madvise, R2 SYSCALL MOVW R2, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, // struct timespec *timeout, int32 *uaddr2, int32 val2); TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0 MOVV addr+0(FP), R4 MOVW op+8(FP), R5 MOVW val+12(FP), R6 MOVV ts+16(FP), R7 MOVV addr2+24(FP), R8 MOVW val3+32(FP), R9 MOVV $SYS_futex, R2 SYSCALL BEQ R7, 2(PC) SUBVU R2, R0, R2 // caller expects negative errno MOVW R2, ret+40(FP) RET // int64 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void)); TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0 MOVW flags+0(FP), R4 MOVV stk+8(FP), R5 // Copy mp, gp, fn off parent stack for use by child. // Careful: Linux system call clobbers ???. MOVV mp+16(FP), R16 MOVV gp+24(FP), R17 MOVV fn+32(FP), R18 MOVV R16, -8(R5) MOVV R17, -16(R5) MOVV R18, -24(R5) MOVV $1234, R16 MOVV R16, -32(R5) MOVV $SYS_clone, R2 SYSCALL BEQ R7, 2(PC) SUBVU R2, R0, R2 // caller expects negative errno // In parent, return. BEQ R2, 3(PC) MOVW R2, ret+40(FP) RET // In child, on new stack. MOVV -32(R29), R16 MOVV $1234, R1 BEQ R16, R1, 2(PC) MOVV R0, 0(R0) // Initialize m->procid to Linux tid MOVV $SYS_gettid, R2 SYSCALL MOVV -24(R29), R18 // fn MOVV -16(R29), R17 // g MOVV -8(R29), R16 // m BEQ R16, nog BEQ R17, nog MOVV R2, m_procid(R16) // TODO: setup TLS. // In child, set up new stack MOVV R16, g_m(R17) MOVV R17, g //CALL runtime·stackcheck(SB) nog: // Call fn JAL (R18) // It shouldn't return. If it does, exit that thread. MOVW $111, R4 MOVV $SYS_exit, R2 SYSCALL JMP -3(PC) // keep exiting TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 MOVV new+0(FP), R4 MOVV old+8(FP), R5 MOVV $SYS_sigaltstack, R2 SYSCALL BEQ R7, 2(PC) MOVV R0, 0xf1(R0) // crash RET TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0 MOVV $SYS_sched_yield, R2 SYSCALL RET TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0 MOVV pid+0(FP), R4 MOVV len+8(FP), R5 MOVV buf+16(FP), R6 MOVV $SYS_sched_getaffinity, R2 SYSCALL BEQ R7, 2(PC) SUBVU R2, R0, R2 // caller expects negative errno MOVW R2, ret+24(FP) RET // func sbrk0() uintptr TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8 // Implemented as brk(NULL). MOVV $0, R4 MOVV $SYS_brk, R2 SYSCALL MOVV R2, ret+0(FP) RET TEXT runtime·access(SB),$0-20 MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go MOVW R0, ret+16(FP) // for vet RET TEXT runtime·connect(SB),$0-28 MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go MOVW R0, ret+24(FP) // for vet RET TEXT runtime·socket(SB),$0-20 MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go MOVW R0, ret+16(FP) // for vet RET
go/src/runtime/sys_linux_mips64x.s/0
{ "file_path": "go/src/runtime/sys_linux_mips64x.s", "repo_id": "go", "token_count": 6715 }
414
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build openbsd && !mips64 package runtime import ( "internal/abi" "unsafe" ) // The X versions of syscall expect the libc call to return a 64-bit result. // Otherwise (the non-X version) expects a 32-bit result. // This distinction is required because an error is indicated by returning -1, // and we need to know whether to check 32 or 64 bits of the result. // (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.) // golang.org/x/sys linknames syscall_syscall // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_syscall syscall.syscall //go:nosplit //go:cgo_unsafe_args func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn)) exitsyscall() return } func syscall() //go:linkname syscall_syscallX syscall.syscallX //go:nosplit //go:cgo_unsafe_args func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn)) exitsyscall() return } func syscallX() // golang.org/x/sys linknames syscall.syscall6 // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_syscall6 syscall.syscall6 //go:nosplit //go:cgo_unsafe_args func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn)) exitsyscall() return } func syscall6() //go:linkname syscall_syscall6X syscall.syscall6X //go:nosplit //go:cgo_unsafe_args func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn)) exitsyscall() return } func syscall6X() // golang.org/x/sys linknames syscall.syscall10 // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_syscall10 syscall.syscall10 //go:nosplit //go:cgo_unsafe_args func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn)) exitsyscall() return } func syscall10() //go:linkname syscall_syscall10X syscall.syscall10X //go:nosplit //go:cgo_unsafe_args func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { entersyscall() libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn)) exitsyscall() return } func syscall10X() // golang.org/x/sys linknames syscall_rawSyscall // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_rawSyscall syscall.rawSyscall //go:nosplit //go:cgo_unsafe_args func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn)) return } // golang.org/x/sys linknames syscall_rawSyscall6 // (in addition to standard package syscall). // Do not remove or change the type signature. // //go:linkname syscall_rawSyscall6 syscall.rawSyscall6 //go:nosplit //go:cgo_unsafe_args func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn)) return } //go:linkname syscall_rawSyscall6X syscall.rawSyscall6X //go:nosplit //go:cgo_unsafe_args func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn)) return } //go:linkname syscall_rawSyscall10X syscall.rawSyscall10X //go:nosplit //go:cgo_unsafe_args func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn)) return }
go/src/runtime/sys_openbsd3.go/0
{ "file_path": "go/src/runtime/sys_openbsd3.go", "repo_id": "go", "token_count": 1748 }
415
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" TEXT runtime·wasmDiv(SB), NOSPLIT, $0-0 Get R0 I64Const $-0x8000000000000000 I64Eq If Get R1 I64Const $-1 I64Eq If I64Const $-0x8000000000000000 Return End End Get R0 Get R1 I64DivS Return TEXT runtime·wasmTruncS(SB), NOSPLIT, $0-0 Get R0 Get R0 F64Ne // NaN If I64Const $0x8000000000000000 Return End Get R0 F64Const $0x7ffffffffffffc00p0 // Maximum truncated representation of 0x7fffffffffffffff F64Gt If I64Const $0x8000000000000000 Return End Get R0 F64Const $-0x7ffffffffffffc00p0 // Minimum truncated representation of -0x8000000000000000 F64Lt If I64Const $0x8000000000000000 Return End Get R0 I64TruncF64S Return TEXT runtime·wasmTruncU(SB), NOSPLIT, $0-0 Get R0 Get R0 F64Ne // NaN If I64Const $0x8000000000000000 Return End Get R0 F64Const $0xfffffffffffff800p0 // Maximum truncated representation of 0xffffffffffffffff F64Gt If I64Const $0x8000000000000000 Return End Get R0 F64Const $0. F64Lt If I64Const $0x8000000000000000 Return End Get R0 I64TruncF64U Return TEXT runtime·exitThread(SB), NOSPLIT, $0-0 UNDEF TEXT runtime·osyield(SB), NOSPLIT, $0-0 UNDEF TEXT runtime·growMemory(SB), NOSPLIT, $0 Get SP I32Load pages+0(FP) GrowMemory I32Store ret+8(FP) RET
go/src/runtime/sys_wasm.s/0
{ "file_path": "go/src/runtime/sys_wasm.s", "repo_id": "go", "token_count": 646 }
416
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Create a large frame to force stack growth. See #62326. TEXT ·testSPWrite(SB),0,$16384-0 // Write to SP MOVQ SP, AX ANDQ $~0xf, SP MOVQ AX, SP RET
go/src/runtime/test_amd64.s/0
{ "file_path": "go/src/runtime/test_amd64.s", "repo_id": "go", "token_count": 109 }
417
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "os" "runtime" "sync" "time" ) var mainTID int func init() { registerInit("LockOSThreadMain", func() { // init is guaranteed to run on the main thread. mainTID = gettid() }) register("LockOSThreadMain", LockOSThreadMain) registerInit("LockOSThreadAlt", func() { // Lock the OS thread now so main runs on the main thread. runtime.LockOSThread() }) register("LockOSThreadAlt", LockOSThreadAlt) registerInit("LockOSThreadAvoidsStatePropagation", func() { // Lock the OS thread now so main runs on the main thread. runtime.LockOSThread() }) register("LockOSThreadAvoidsStatePropagation", LockOSThreadAvoidsStatePropagation) register("LockOSThreadTemplateThreadRace", LockOSThreadTemplateThreadRace) } func LockOSThreadMain() { // gettid only works on Linux, so on other platforms this just // checks that the runtime doesn't do anything terrible. // This requires GOMAXPROCS=1 from the beginning to reliably // start a goroutine on the main thread. if runtime.GOMAXPROCS(-1) != 1 { println("requires GOMAXPROCS=1") os.Exit(1) } ready := make(chan bool, 1) go func() { // Because GOMAXPROCS=1, this *should* be on the main // thread. Stay there. runtime.LockOSThread() if mainTID != 0 && gettid() != mainTID { println("failed to start goroutine on main thread") os.Exit(1) } // Exit with the thread locked, which should exit the // main thread. ready <- true }() <-ready time.Sleep(1 * time.Millisecond) // Check that this goroutine is still running on a different // thread. if mainTID != 0 && gettid() == mainTID { println("goroutine migrated to locked thread") os.Exit(1) } println("OK") } func LockOSThreadAlt() { // This is running locked to the main OS thread. var subTID int ready := make(chan bool, 1) go func() { // This goroutine must be running on a new thread. runtime.LockOSThread() subTID = gettid() ready <- true // Exit with the thread locked. }() <-ready runtime.UnlockOSThread() for i := 0; i < 100; i++ { time.Sleep(1 * time.Millisecond) // Check that this goroutine is running on a different thread. if subTID != 0 && gettid() == subTID { println("locked thread reused") os.Exit(1) } exists, supported, err := tidExists(subTID) if err != nil { println("error:", err.Error()) return } if !supported || !exists { goto ok } } println("sub thread", subTID, "still running") return ok: println("OK") } func LockOSThreadAvoidsStatePropagation() { // This test is similar to LockOSThreadAlt in that it will detect if a thread // which should have died is still running. However, rather than do this with // thread IDs, it does this by unsharing state on that thread. This way, it // also detects whether new threads were cloned from the dead thread, and not // from a clean thread. Cloning from a locked thread is undesirable since // cloned threads will inherit potentially unwanted OS state. // // unshareFs, getcwd, and chdir("/tmp") are only guaranteed to work on // Linux, so on other platforms this just checks that the runtime doesn't // do anything terrible. // // This is running locked to the main OS thread. // GOMAXPROCS=1 makes this fail much more reliably if a tainted thread is // cloned from. if runtime.GOMAXPROCS(-1) != 1 { println("requires GOMAXPROCS=1") os.Exit(1) } if err := chdir("/"); err != nil { println("failed to chdir:", err.Error()) os.Exit(1) } // On systems other than Linux, cwd == "". cwd, err := getcwd() if err != nil { println("failed to get cwd:", err.Error()) os.Exit(1) } if cwd != "" && cwd != "/" { println("unexpected cwd", cwd, " wanted /") os.Exit(1) } ready := make(chan bool, 1) go func() { // This goroutine must be running on a new thread. runtime.LockOSThread() // Unshare details about the FS, like the CWD, with // the rest of the process on this thread. // On systems other than Linux, this is a no-op. if err := unshareFs(); err != nil { if err == errNotPermitted { println("unshare not permitted") os.Exit(0) } println("failed to unshare fs:", err.Error()) os.Exit(1) } // Chdir to somewhere else on this thread. // On systems other than Linux, this is a no-op. if err := chdir(os.TempDir()); err != nil { println("failed to chdir:", err.Error()) os.Exit(1) } // The state on this thread is now considered "tainted", but it // should no longer be observable in any other context. ready <- true // Exit with the thread locked. }() <-ready // Spawn yet another goroutine and lock it. Since GOMAXPROCS=1, if // for some reason state from the (hopefully dead) locked thread above // propagated into a newly created thread (via clone), or that thread // is actually being re-used, then we should get scheduled on such a // thread with high likelihood. done := make(chan bool) go func() { runtime.LockOSThread() // Get the CWD and check if this is the same as the main thread's // CWD. Every thread should share the same CWD. // On systems other than Linux, wd == "". wd, err := getcwd() if err != nil { println("failed to get cwd:", err.Error()) os.Exit(1) } if wd != cwd { println("bad state from old thread propagated after it should have died") os.Exit(1) } <-done runtime.UnlockOSThread() }() done <- true runtime.UnlockOSThread() println("OK") } func LockOSThreadTemplateThreadRace() { // This test attempts to reproduce the race described in // golang.org/issue/38931. To do so, we must have a stop-the-world // (achieved via ReadMemStats) racing with two LockOSThread calls. // // While this test attempts to line up the timing, it is only expected // to fail (and thus hang) around 2% of the time if the race is // present. // Ensure enough Ps to actually run everything in parallel. Though on // <4 core machines, we are still at the whim of the kernel scheduler. runtime.GOMAXPROCS(4) go func() { // Stop the world; race with LockOSThread below. var m runtime.MemStats for { runtime.ReadMemStats(&m) } }() // Try to synchronize both LockOSThreads. start := time.Now().Add(10 * time.Millisecond) var wg sync.WaitGroup wg.Add(2) for i := 0; i < 2; i++ { go func() { for time.Now().Before(start) { } // Add work to the local runq to trigger early startm // in handoffp. go func() {}() runtime.LockOSThread() runtime.Gosched() // add a preemption point. wg.Done() }() } wg.Wait() // If both LockOSThreads completed then we did not hit the race. println("OK") }
go/src/runtime/testdata/testprog/lockosthread.go/0
{ "file_path": "go/src/runtime/testdata/testprog/lockosthread.go", "repo_id": "go", "token_count": 2351 }
418
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "fmt" "internal/testenv" "io" "os" "syscall" ) func gettid() int { return syscall.Gettid() } func tidExists(tid int) (exists, supported bool, err error) { // Open the magic proc status file for reading with the syscall package. // We want to identify certain valid errors very precisely. statusFile := fmt.Sprintf("/proc/self/task/%d/status", tid) fd, err := syscall.Open(statusFile, syscall.O_RDONLY, 0) if errno, ok := err.(syscall.Errno); ok { if errno == syscall.ENOENT || errno == syscall.ESRCH { return false, true, nil } } if err != nil { return false, false, err } status, err := io.ReadAll(os.NewFile(uintptr(fd), statusFile)) if err != nil { return false, false, err } lines := bytes.Split(status, []byte{'\n'}) // Find the State line. stateLineIdx := -1 for i, line := range lines { if bytes.HasPrefix(line, []byte("State:")) { stateLineIdx = i break } } if stateLineIdx < 0 { // Malformed status file? return false, false, fmt.Errorf("unexpected status file format: %s:\n%s", statusFile, status) } stateLine := bytes.SplitN(lines[stateLineIdx], []byte{':'}, 2) if len(stateLine) != 2 { // Malformed status file? return false, false, fmt.Errorf("unexpected status file format: %s:\n%s", statusFile, status) } // Check if it's a zombie thread. return !bytes.Contains(stateLine[1], []byte{'Z'}), true, nil } func getcwd() (string, error) { if !syscall.ImplementsGetwd { return "", nil } // Use the syscall to get the current working directory. // This is imperative for checking for OS thread state // after an unshare since os.Getwd might just check the // environment, or use some other mechanism. var buf [4096]byte n, err := syscall.Getcwd(buf[:]) if err != nil { return "", err } // Subtract one for null terminator. return string(buf[:n-1]), nil } func unshareFs() error { err := syscall.Unshare(syscall.CLONE_FS) if testenv.SyscallIsNotSupported(err) { return errNotPermitted } return err } func chdir(path string) error { return syscall.Chdir(path) }
go/src/runtime/testdata/testprog/syscalls_linux.go/0
{ "file_path": "go/src/runtime/testdata/testprog/syscalls_linux.go", "repo_id": "go", "token_count": 837 }
419
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !plan9 && !windows // +build !plan9,!windows package main import ( "os" "runtime" "sync/atomic" "time" "unsafe" ) /* #include <pthread.h> #include <stdint.h> extern uint32_t threadExited; void setExited(void *x); */ import "C" var mainThread C.pthread_t func init() { registerInit("LockOSThreadMain", func() { // init is guaranteed to run on the main thread. mainThread = C.pthread_self() }) register("LockOSThreadMain", LockOSThreadMain) registerInit("LockOSThreadAlt", func() { // Lock the OS thread now so main runs on the main thread. runtime.LockOSThread() }) register("LockOSThreadAlt", LockOSThreadAlt) } func LockOSThreadMain() { // This requires GOMAXPROCS=1 from the beginning to reliably // start a goroutine on the main thread. if runtime.GOMAXPROCS(-1) != 1 { println("requires GOMAXPROCS=1") os.Exit(1) } ready := make(chan bool, 1) go func() { // Because GOMAXPROCS=1, this *should* be on the main // thread. Stay there. runtime.LockOSThread() self := C.pthread_self() if C.pthread_equal(mainThread, self) == 0 { println("failed to start goroutine on main thread") os.Exit(1) } // Exit with the thread locked, which should exit the // main thread. ready <- true }() <-ready time.Sleep(1 * time.Millisecond) // Check that this goroutine is still running on a different // thread. self := C.pthread_self() if C.pthread_equal(mainThread, self) != 0 { println("goroutine migrated to locked thread") os.Exit(1) } println("OK") } func LockOSThreadAlt() { // This is running locked to the main OS thread. var subThread C.pthread_t ready := make(chan bool, 1) C.threadExited = 0 go func() { // This goroutine must be running on a new thread. runtime.LockOSThread() subThread = C.pthread_self() // Register a pthread destructor so we can tell this // thread has exited. var key C.pthread_key_t C.pthread_key_create(&key, (*[0]byte)(unsafe.Pointer(C.setExited))) C.pthread_setspecific(key, unsafe.Pointer(new(int))) ready <- true // Exit with the thread locked. }() <-ready for { time.Sleep(1 * time.Millisecond) // Check that this goroutine is running on a different thread. self := C.pthread_self() if C.pthread_equal(subThread, self) != 0 { println("locked thread reused") os.Exit(1) } if atomic.LoadUint32((*uint32)(&C.threadExited)) != 0 { println("OK") return } } }
go/src/runtime/testdata/testprogcgo/lockosthread.go/0
{ "file_path": "go/src/runtime/testdata/testprogcgo/lockosthread.go", "repo_id": "go", "token_count": 962 }
420
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import "os" var cmds = map[string]func(){} func register(name string, f func()) { if cmds[name] != nil { panic("duplicate registration: " + name) } cmds[name] = f } func registerInit(name string, f func()) { if len(os.Args) >= 2 && os.Args[1] == name { f() } } func main() { if len(os.Args) < 2 { println("usage: " + os.Args[0] + " name-of-test") return } f := cmds[os.Args[1]] if f == nil { println("unknown function: " + os.Args[1]) return } f() }
go/src/runtime/testdata/testprognet/main.go/0
{ "file_path": "go/src/runtime/testdata/testprognet/main.go", "repo_id": "go", "token_count": 255 }
421
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Time-related runtime and pieces of package time. package runtime import ( "internal/abi" "internal/runtime/atomic" "internal/runtime/sys" "unsafe" ) // A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq). // Timers are allocated by client code, often as part of other data structures. // Each P has a heap of pointers to timers that it manages. // // A timer is expected to be used by only one client goroutine at a time, // but there will be concurrent access by the P managing that timer. // Timer accesses are protected by the lock t.mu, with a snapshot of // t's state bits published in t.astate to enable certain fast paths to make // decisions about a timer without acquiring the lock. type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex astate atomic.Uint8 // atomic copy of state bits at last unlock state uint8 // state bits isChan bool // timer has a channel; immutable; can be read without lock blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, seq, delay) in the timer goroutine, so f must be // a well-behaved function and not block. // // The arg and seq are client-specified opaque arguments passed back to f. // When used from netpoll, arg and seq have meanings defined by netpoll // and are completely opaque to this code; in that context, seq is a sequence // number to recognize and squech stale function invocations. // When used from package time, arg is a channel (for After, NewTicker) // or the function to call (for AfterFunc) and seq is unused (0). // // Package time does not know about seq, but if this is a channel timer (t.isChan == true), // this file uses t.seq as a sequence number to recognize and squelch // sends that correspond to an earlier (stale) timer configuration, // similar to its use in netpoll. In this usage (that is, when t.isChan == true), // writes to seq are protected by both t.mu and t.sendLock, // so reads are allowed when holding either of the two mutexes. // // The delay argument is nanotime() - t.when, meaning the delay in ns between // when the timer should have gone off and now. Normally that amount is // small enough not to matter, but for channel timers that are fed lazily, // the delay can be arbitrarily long; package time subtracts it out to make // it look like the send happened earlier than it actually did. // (No one looked at the channel since then, or the send would have // not happened so late, so no one can tell the difference.) when int64 period int64 f func(arg any, seq uintptr, delay int64) arg any seq uintptr // If non-nil, the timers containing t. ts *timers // sendLock protects sends on the timer's channel. // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. sendLock mutex } // init initializes a newly allocated timer t. // Any code that allocates a timer must call t.init before using it. // The arg and f can be set during init, or they can be nil in init // and set by a future call to t.modify. func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) { lockInit(&t.mu, lockRankTimer) t.f = f t.arg = arg } // A timers is a per-P set of timers. type timers struct { // mu protects timers; timers are per-P, but the scheduler can // access the timers of another P, so we have to lock. mu mutex // heap is the set of timers, ordered by heap[i].when. // Must hold lock to access. heap []timerWhen // len is an atomic copy of len(heap). len atomic.Uint32 // zombies is the number of timers in the heap // that are marked for removal. zombies atomic.Int32 // raceCtx is the race context used while executing timer functions. raceCtx uintptr // minWhenHeap is the minimum heap[i].when value (= heap[0].when). // The wakeTime method uses minWhenHeap and minWhenModified // to determine the next wake time. // If minWhenHeap = 0, it means there are no timers in the heap. minWhenHeap atomic.Int64 // minWhenModified is a lower bound on the minimum // heap[i].when over timers with the timerModified bit set. // If minWhenModified = 0, it means there are no timerModified timers in the heap. minWhenModified atomic.Int64 } type timerWhen struct { timer *timer when int64 } func (ts *timers) lock() { lock(&ts.mu) } func (ts *timers) unlock() { // Update atomic copy of len(ts.heap). // We only update at unlock so that the len is always // the most recent unlocked length, not an ephemeral length. // This matters if we lock ts, delete the only timer from the heap, // add it back, and unlock. We want ts.len.Load to return 1 the // entire time, never 0. This is important for pidleput deciding // whether ts is empty. ts.len.Store(uint32(len(ts.heap))) unlock(&ts.mu) } // Timer state field. const ( // timerHeaped is set when the timer is stored in some P's heap. timerHeaped uint8 = 1 << iota // timerModified is set when t.when has been modified // but the heap's heap[i].when entry still needs to be updated. // That change waits until the heap in which // the timer appears can be locked and rearranged. // timerModified is only set when timerHeaped is also set. timerModified // timerZombie is set when the timer has been stopped // but is still present in some P's heap. // Only set when timerHeaped is also set. // It is possible for timerModified and timerZombie to both // be set, meaning that the timer was modified and then stopped. // A timer sending to a channel may be placed in timerZombie // to take it out of the heap even though the timer is not stopped, // as long as nothing is reading from the channel. timerZombie ) // timerDebug enables printing a textual debug trace of all timer operations to stderr. const timerDebug = false func (t *timer) trace(op string) { if timerDebug { t.trace1(op) } } func (t *timer) trace1(op string) { if !timerDebug { return } bits := [4]string{"h", "m", "z", "c"} for i := range 3 { if t.state&(1<<i) == 0 { bits[i] = "-" } } if !t.isChan { bits[3] = "-" } print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n") } func (ts *timers) trace(op string) { if timerDebug { println("TS", ts, op) } } // lock locks the timer, allowing reading or writing any of the timer fields. func (t *timer) lock() { lock(&t.mu) t.trace("lock") } // unlock updates t.astate and unlocks the timer. func (t *timer) unlock() { t.trace("unlock") // Let heap fast paths know whether heap[i].when is accurate. // Also let maybeRunChan know whether channel is in heap. t.astate.Store(t.state) unlock(&t.mu) } // hchan returns the channel in t.arg. // t must be a timer with a channel. func (t *timer) hchan() *hchan { if !t.isChan { badTimer() } // Note: t.arg is a chan time.Time, // and runtime cannot refer to that type, // so we cannot use a type assertion. return (*hchan)(efaceOf(&t.arg).data) } // updateHeap updates t as directed by t.state, updating t.state // and returning a bool indicating whether the state (and ts.heap[0].when) changed. // The caller must hold t's lock, or the world can be stopped instead. // The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap // takes care of moving t within the timers heap to preserve the heap invariants. // If ts == nil, then t must not be in a heap (or is in a heap that is // temporarily not maintaining its invariant, such as during timers.adjust). func (t *timer) updateHeap() (updated bool) { assertWorldStoppedOrLockHeld(&t.mu) t.trace("updateHeap") ts := t.ts if ts == nil || t != ts.heap[0].timer { badTimer() } assertLockHeld(&ts.mu) if t.state&timerZombie != 0 { // Take timer out of heap. t.state &^= timerHeaped | timerZombie | timerModified ts.zombies.Add(-1) ts.deleteMin() return true } if t.state&timerModified != 0 { // Update ts.heap[0].when and move within heap. t.state &^= timerModified ts.heap[0].when = t.when ts.siftDown(0) ts.updateMinWhenHeap() return true } return false } // maxWhen is the maximum value for timer's when field. const maxWhen = 1<<63 - 1 // verifyTimers can be set to true to add debugging checks that the // timer heaps are valid. const verifyTimers = false // Package time APIs. // Godoc uses the comments in package time, not these. // time.now is implemented in assembly. // timeSleep puts the current goroutine to sleep for at least ns nanoseconds. // //go:linkname timeSleep time.Sleep func timeSleep(ns int64) { if ns <= 0 { return } gp := getg() t := gp.timer if t == nil { t = new(timer) t.init(goroutineReady, gp) gp.timer = t } when := nanotime() + ns if when < 0 { // check for overflow. when = maxWhen } gp.sleepWhen = when gopark(resetForSleep, nil, waitReasonSleep, traceBlockSleep, 1) } // resetForSleep is called after the goroutine is parked for timeSleep. // We can't call timer.reset in timeSleep itself because if this is a short // sleep and there are many goroutines then the P can wind up running the // timer function, goroutineReady, before the goroutine has been parked. func resetForSleep(gp *g, _ unsafe.Pointer) bool { gp.timer.reset(gp.sleepWhen, 0) return true } // A timeTimer is a runtime-allocated time.Timer or time.Ticker // with the additional runtime state following it. // The runtime state is inaccessible to package time. type timeTimer struct { c unsafe.Pointer // <-chan time.Time init bool timer } // newTimer allocates and returns a new time.Timer or time.Ticker (same layout) // with the given parameters. // //go:linkname newTimer time.newTimer func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer { t := new(timeTimer) t.timer.init(nil, nil) t.trace("new") if raceenabled { racerelease(unsafe.Pointer(&t.timer)) } if c != nil { lockInit(&t.sendLock, lockRankTimerSend) t.isChan = true c.timer = &t.timer if c.dataqsiz == 0 { throw("invalid timer channel: no capacity") } } t.modify(when, period, f, arg, 0) t.init = true return t } // stopTimer stops a timer. // It reports whether t was stopped before being run. // //go:linkname stopTimer time.stopTimer func stopTimer(t *timeTimer) bool { return t.stop() } // resetTimer resets an inactive timer, adding it to the timer heap. // // Reports whether the timer was modified before it was run. // //go:linkname resetTimer time.resetTimer func resetTimer(t *timeTimer, when, period int64) bool { if raceenabled { racerelease(unsafe.Pointer(&t.timer)) } return t.reset(when, period) } // Go runtime. // Ready the goroutine arg. func goroutineReady(arg any, _ uintptr, _ int64) { goready(arg.(*g), 0) } // addHeap adds t to the timers heap. // The caller must hold ts.lock or the world must be stopped. // The caller must also have checked that t belongs in the heap. // Callers that are not sure can call t.maybeAdd instead, // but note that maybeAdd has different locking requirements. func (ts *timers) addHeap(t *timer) { assertWorldStoppedOrLockHeld(&ts.mu) // Timers rely on the network poller, so make sure the poller // has started. if netpollInited.Load() == 0 { netpollGenericInit() } if t.ts != nil { throw("ts set in timer") } t.ts = ts ts.heap = append(ts.heap, timerWhen{t, t.when}) ts.siftUp(len(ts.heap) - 1) if t == ts.heap[0].timer { ts.updateMinWhenHeap() } } // maybeRunAsync checks whether t needs to be triggered and runs it if so. // The caller is responsible for locking the timer and for checking that we // are running timers in async mode. If the timer needs to be run, // maybeRunAsync will unlock and re-lock it. // The timer is always locked on return. func (t *timer) maybeRunAsync() { assertLockHeld(&t.mu) if t.state&timerHeaped == 0 && t.isChan && t.when > 0 { // If timer should have triggered already (but nothing looked at it yet), // trigger now, so that a receive after the stop sees the "old" value // that should be there. // (It is possible to have t.blocked > 0 if there is a racing receive // in blockTimerChan, but timerHeaped not being set means // it hasn't run t.maybeAdd yet; in that case, running the // timer ourselves now is fine.) if now := nanotime(); t.when <= now { systemstack(func() { t.unlockAndRun(now) // resets t.when }) t.lock() } } } // stop stops the timer t. It may be on some other P, so we can't // actually remove it from the timers heap. We can only mark it as stopped. // It will be removed in due course by the P whose heap it is on. // Reports whether the timer was stopped before it was run. func (t *timer) stop() bool { async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { lock(&t.sendLock) } t.lock() t.trace("stop") if async { t.maybeRunAsync() } if t.state&timerHeaped != 0 { t.state |= timerModified if t.state&timerZombie == 0 { t.state |= timerZombie t.ts.zombies.Add(1) } } pending := t.when > 0 t.when = 0 if !async && t.isChan { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ } t.unlock() if !async && t.isChan { unlock(&t.sendLock) if timerchandrain(t.hchan()) { pending = true } } return pending } // deleteMin removes timer 0 from ts. // ts must be locked. func (ts *timers) deleteMin() { assertLockHeld(&ts.mu) t := ts.heap[0].timer if t.ts != ts { throw("wrong timers") } t.ts = nil last := len(ts.heap) - 1 if last > 0 { ts.heap[0] = ts.heap[last] } ts.heap[last] = timerWhen{} ts.heap = ts.heap[:last] if last > 0 { ts.siftDown(0) } ts.updateMinWhenHeap() if last == 0 { // If there are no timers, then clearly there are no timerModified timers. ts.minWhenModified.Store(0) } } // modify modifies an existing timer. // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset. // Reports whether the timer was modified before it was run. // If f == nil, then t.f, t.arg, and t.seq are not modified. func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool { if when <= 0 { throw("timer when must be positive") } if period < 0 { throw("timer period must be non-negative") } async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { lock(&t.sendLock) } t.lock() if async { t.maybeRunAsync() } t.trace("modify") t.period = period if f != nil { t.f = f t.arg = arg t.seq = seq } wake := false pending := t.when > 0 t.when = when if t.state&timerHeaped != 0 { t.state |= timerModified if t.state&timerZombie != 0 { // In the heap but marked for removal (by a Stop). // Unmark it, since it has been Reset and will be running again. t.ts.zombies.Add(-1) t.state &^= timerZombie } // The corresponding heap[i].when is updated later. // See comment in type timer above and in timers.adjust below. if min := t.ts.minWhenModified.Load(); min == 0 || when < min { wake = true // Force timerModified bit out to t.astate before updating t.minWhenModified, // to synchronize with t.ts.adjust. See comment in adjust. t.astate.Store(t.state) t.ts.updateMinWhenModified(when) } } add := t.needsAdd() if !async && t.isChan { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ } t.unlock() if !async && t.isChan { if timerchandrain(t.hchan()) { pending = true } unlock(&t.sendLock) } if add { t.maybeAdd() } if wake { wakeNetPoller(when) } return pending } // needsAdd reports whether t needs to be added to a timers heap. // t must be locked. func (t *timer) needsAdd() bool { assertLockHeld(&t.mu) need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0) if need { t.trace("needsAdd+") } else { t.trace("needsAdd-") } return need } // maybeAdd adds t to the local timers heap if it needs to be in a heap. // The caller must not hold t's lock nor any timers heap lock. // The caller probably just unlocked t, but that lock must be dropped // in order to acquire a ts.lock, to avoid lock inversions. // (timers.adjust holds ts.lock while acquiring each t's lock, // so we cannot hold any t's lock while acquiring ts.lock). // // Strictly speaking it *might* be okay to hold t.lock and // acquire ts.lock at the same time, because we know that // t is not in any ts.heap, so nothing holding a ts.lock would // be acquiring the t.lock at the same time, meaning there // isn't a possible deadlock. But it is easier and safer not to be // too clever and respect the static ordering. // (If we don't, we have to change the static lock checking of t and ts.) // // Concurrent calls to time.Timer.Reset or blockTimerChan // may result in concurrent calls to t.maybeAdd, // so we cannot assume that t is not in a heap on entry to t.maybeAdd. func (t *timer) maybeAdd() { // Note: Not holding any locks on entry to t.maybeAdd, // so the current g can be rescheduled to a different M and P // at any time, including between the ts := assignment and the // call to ts.lock. If a reschedule happened then, we would be // adding t to some other P's timers, perhaps even a P that the scheduler // has marked as idle with no timers, in which case the timer could // go unnoticed until long after t.when. // Calling acquirem instead of using getg().m makes sure that // we end up locking and inserting into the current P's timers. mp := acquirem() ts := &mp.p.ptr().timers ts.lock() ts.cleanHead() t.lock() t.trace("maybeAdd") when := int64(0) wake := false if t.needsAdd() { t.state |= timerHeaped when = t.when wakeTime := ts.wakeTime() wake = wakeTime == 0 || when < wakeTime ts.addHeap(t) } t.unlock() ts.unlock() releasem(mp) if wake { wakeNetPoller(when) } } // reset resets the time when a timer should fire. // If used for an inactive timer, the timer will become active. // Reports whether the timer was active and was stopped. func (t *timer) reset(when, period int64) bool { return t.modify(when, period, nil, nil, 0) } // cleanHead cleans up the head of the timer queue. This speeds up // programs that create and delete timers; leaving them in the heap // slows down heap operations. // The caller must have locked ts. func (ts *timers) cleanHead() { ts.trace("cleanHead") assertLockHeld(&ts.mu) gp := getg() for { if len(ts.heap) == 0 { return } // This loop can theoretically run for a while, and because // it is holding timersLock it cannot be preempted. // If someone is trying to preempt us, just return. // We can clean the timers later. if gp.preemptStop { return } // Delete zombies from tail of heap. It requires no heap adjustments at all, // and doing so increases the chances that when we swap out a zombie // in heap[0] for the tail of the heap, we'll get a non-zombie timer, // shortening this loop. n := len(ts.heap) if t := ts.heap[n-1].timer; t.astate.Load()&timerZombie != 0 { t.lock() if t.state&timerZombie != 0 { t.state &^= timerHeaped | timerZombie | timerModified t.ts = nil ts.zombies.Add(-1) ts.heap[n-1] = timerWhen{} ts.heap = ts.heap[:n-1] } t.unlock() continue } t := ts.heap[0].timer if t.ts != ts { throw("bad ts") } if t.astate.Load()&(timerModified|timerZombie) == 0 { // Fast path: head of timers does not need adjustment. return } t.lock() updated := t.updateHeap() t.unlock() if !updated { // Head of timers does not need adjustment. return } } } // take moves any timers from src into ts // and then clears the timer state from src, // because src is being destroyed. // The caller must not have locked either timers. // For now this is only called when the world is stopped. func (ts *timers) take(src *timers) { ts.trace("take") assertWorldStopped() if len(src.heap) > 0 { // The world is stopped, so we ignore the locking of ts and src here. // That would introduce a sched < timers lock ordering, // which we'd rather avoid in the static ranking. for _, tw := range src.heap { t := tw.timer t.ts = nil if t.state&timerZombie != 0 { t.state &^= timerHeaped | timerZombie | timerModified } else { t.state &^= timerModified ts.addHeap(t) } } src.heap = nil src.zombies.Store(0) src.minWhenHeap.Store(0) src.minWhenModified.Store(0) src.len.Store(0) ts.len.Store(uint32(len(ts.heap))) } } // adjust looks through the timers in ts.heap for // any timers that have been modified to run earlier, and puts them in // the correct place in the heap. While looking for those timers, // it also moves timers that have been modified to run later, // and removes deleted timers. The caller must have locked ts. func (ts *timers) adjust(now int64, force bool) { ts.trace("adjust") assertLockHeld(&ts.mu) // If we haven't yet reached the time of the earliest modified // timer, don't do anything. This speeds up programs that adjust // a lot of timers back and forth if the timers rarely expire. // We'll postpone looking through all the adjusted timers until // one would actually expire. if !force { first := ts.minWhenModified.Load() if first == 0 || first > now { if verifyTimers { ts.verify() } return } } // minWhenModified is a lower bound on the earliest t.when // among the timerModified timers. We want to make it more precise: // we are going to scan the heap and clean out all the timerModified bits, // at which point minWhenModified can be set to 0 (indicating none at all). // // Other P's can be calling ts.wakeTime concurrently, and we'd like to // keep ts.wakeTime returning an accurate value throughout this entire process. // // Setting minWhenModified = 0 *before* the scan could make wakeTime // return an incorrect value: if minWhenModified < minWhenHeap, then clearing // it to 0 will make wakeTime return minWhenHeap (too late) until the scan finishes. // To avoid that, we want to set minWhenModified to 0 *after* the scan. // // Setting minWhenModified = 0 *after* the scan could result in missing // concurrent timer modifications in other goroutines; those will lock // the specific timer, set the timerModified bit, and set t.when. // To avoid that, we want to set minWhenModified to 0 *before* the scan. // // The way out of this dilemma is to preserve wakeTime a different way. // wakeTime is min(minWhenHeap, minWhenModified), and minWhenHeap // is protected by ts.lock, which we hold, so we can modify it however we like // in service of keeping wakeTime accurate. // // So we can: // // 1. Set minWhenHeap = min(minWhenHeap, minWhenModified) // 2. Set minWhenModified = 0 // (Other goroutines may modify timers and update minWhenModified now.) // 3. Scan timers // 4. Set minWhenHeap = heap[0].when // // That order preserves a correct value of wakeTime throughout the entire // operation: // Step 1 “locks in” an accurate wakeTime even with minWhenModified cleared. // Step 2 makes sure concurrent t.when updates are not lost during the scan. // Step 3 processes all modified timer values, justifying minWhenModified = 0. // Step 4 corrects minWhenHeap to a precise value. // // The wakeTime method implementation reads minWhenModified *before* minWhenHeap, // so that if the minWhenModified is observed to be 0, that means the minWhenHeap that // follows will include the information that was zeroed out of it. // // Originally Step 3 locked every timer, which made sure any timer update that was // already in progress during Steps 1+2 completed and was observed by Step 3. // All that locking was too expensive, so now we do an atomic load of t.astate to // decide whether we need to do a full lock. To make sure that we still observe any // timer update already in progress during Steps 1+2, t.modify sets timerModified // in t.astate *before* calling t.updateMinWhenModified. That ensures that the // overwrite in Step 2 cannot lose an update: if it does overwrite an update, Step 3 // will see the timerModified and do a full lock. ts.minWhenHeap.Store(ts.wakeTime()) ts.minWhenModified.Store(0) changed := false for i := 0; i < len(ts.heap); i++ { tw := &ts.heap[i] t := tw.timer if t.ts != ts { throw("bad ts") } if t.astate.Load()&(timerModified|timerZombie) == 0 { // Does not need adjustment. continue } t.lock() switch { case t.state&timerHeaped == 0: badTimer() case t.state&timerZombie != 0: ts.zombies.Add(-1) t.state &^= timerHeaped | timerZombie | timerModified n := len(ts.heap) ts.heap[i] = ts.heap[n-1] ts.heap[n-1] = timerWhen{} ts.heap = ts.heap[:n-1] t.ts = nil i-- changed = true case t.state&timerModified != 0: tw.when = t.when t.state &^= timerModified changed = true } t.unlock() } if changed { ts.initHeap() } ts.updateMinWhenHeap() if verifyTimers { ts.verify() } } // wakeTime looks at ts's timers and returns the time when we // should wake up the netpoller. It returns 0 if there are no timers. // This function is invoked when dropping a P, so it must run without // any write barriers. // //go:nowritebarrierrec func (ts *timers) wakeTime() int64 { // Note that the order of these two loads matters: // adjust updates minWhen to make it safe to clear minNextWhen. // We read minWhen after reading minNextWhen so that // if we see a cleared minNextWhen, we are guaranteed to see // the updated minWhen. nextWhen := ts.minWhenModified.Load() when := ts.minWhenHeap.Load() if when == 0 || (nextWhen != 0 && nextWhen < when) { when = nextWhen } return when } // check runs any timers in ts that are ready. // If now is not 0 it is the current time. // It returns the passed time or the current time if now was passed as 0. // and the time when the next timer should run or 0 if there is no next timer, // and reports whether it ran any timers. // If the time when the next timer should run is not 0, // it is always larger than the returned time. // We pass now in and out to avoid extra calls of nanotime. // //go:yeswritebarrierrec func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) { ts.trace("check") // If it's not yet time for the first timer, or the first adjusted // timer, then there is nothing to do. next := ts.wakeTime() if next == 0 { // No timers to run or adjust. return now, 0, false } if now == 0 { now = nanotime() } // If this is the local P, and there are a lot of deleted timers, // clear them out. We only do this for the local P to reduce // lock contention on timersLock. zombies := ts.zombies.Load() if zombies < 0 { badTimer() } force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4 if now < next && !force { // Next timer is not ready to run, and we don't need to clear deleted timers. return now, next, false } ts.lock() if len(ts.heap) > 0 { ts.adjust(now, false) for len(ts.heap) > 0 { // Note that runtimer may temporarily unlock ts. if tw := ts.run(now); tw != 0 { if tw > 0 { pollUntil = tw } break } ran = true } // Note: Delaying the forced adjustment until after the ts.run // (as opposed to calling ts.adjust(now, force) above) // is significantly faster under contention, such as in // package time's BenchmarkTimerAdjust10000, // though we do not fully understand why. force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4 if force { ts.adjust(now, true) } } ts.unlock() return now, pollUntil, ran } // run examines the first timer in ts. If it is ready based on now, // it runs the timer and removes or updates it. // Returns 0 if it ran a timer, -1 if there are no more timers, or the time // when the first timer should run. // The caller must have locked ts. // If a timer is run, this will temporarily unlock ts. // //go:systemstack func (ts *timers) run(now int64) int64 { ts.trace("run") assertLockHeld(&ts.mu) Redo: if len(ts.heap) == 0 { return -1 } tw := ts.heap[0] t := tw.timer if t.ts != ts { throw("bad ts") } if t.astate.Load()&(timerModified|timerZombie) == 0 && tw.when > now { // Fast path: not ready to run. return tw.when } t.lock() if t.updateHeap() { t.unlock() goto Redo } if t.state&timerHeaped == 0 || t.state&timerModified != 0 { badTimer() } if t.when > now { // Not ready to run. t.unlock() return t.when } t.unlockAndRun(now) assertLockHeld(&ts.mu) // t is unlocked now, but not ts return 0 } // unlockAndRun unlocks and runs the timer t (which must be locked). // If t is in a timer set (t.ts != nil), the caller must also have locked the timer set, // and this call will temporarily unlock the timer set while running the timer function. // unlockAndRun returns with t unlocked and t.ts (re-)locked. // //go:systemstack func (t *timer) unlockAndRun(now int64) { t.trace("unlockAndRun") assertLockHeld(&t.mu) if t.ts != nil { assertLockHeld(&t.ts.mu) } if raceenabled { // Note that we are running on a system stack, // so there is no chance of getg().m being reassigned // out from under us while this function executes. tsLocal := &getg().m.p.ptr().timers if tsLocal.raceCtx == 0 { tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum) } raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t)) } if t.state&(timerModified|timerZombie) != 0 { badTimer() } f := t.f arg := t.arg seq := t.seq var next int64 delay := now - t.when if t.period > 0 { // Leave in heap but adjust next time to fire. next = t.when + t.period*(1+delay/t.period) if next < 0 { // check for overflow. next = maxWhen } } else { next = 0 } ts := t.ts t.when = next if t.state&timerHeaped != 0 { t.state |= timerModified if next == 0 { t.state |= timerZombie t.ts.zombies.Add(1) } t.updateHeap() } t.unlock() if raceenabled { // Temporarily use the current P's racectx for g0. gp := getg() if gp.racectx != 0 { throw("unexpected racectx") } gp.racectx = gp.m.p.ptr().timers.raceCtx } if ts != nil { ts.unlock() } async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { // For a timer channel, we want to make sure that no stale sends // happen after a t.stop or t.modify, but we cannot hold t.mu // during the actual send (which f does) due to lock ordering. // It can happen that we are holding t's lock above, we decide // it's time to send a time value (by calling f), grab the parameters, // unlock above, and then a t.stop or t.modify changes the timer // and returns. At that point, the send needs not to happen after all. // The way we arrange for it not to happen is that t.stop and t.modify // both increment t.seq while holding both t.mu and t.sendLock. // We copied the seq value above while holding t.mu. // Now we can acquire t.sendLock (which will be held across the send) // and double-check that t.seq is still the seq value we saw above. // If not, the timer has been updated and we should skip the send. // We skip the send by reassigning f to a no-op function. lock(&t.sendLock) if t.seq != seq { f = func(any, uintptr, int64) {} } } f(arg, seq, delay) if !async && t.isChan { unlock(&t.sendLock) } if ts != nil { ts.lock() } if raceenabled { gp := getg() gp.racectx = 0 } } // verifyTimerHeap verifies that the timers is in a valid state. // This is only for debugging, and is only called if verifyTimers is true. // The caller must have locked ts. func (ts *timers) verify() { assertLockHeld(&ts.mu) for i, tw := range ts.heap { if i == 0 { // First timer has no parent. continue } // The heap is timerHeapN-ary. See siftupTimer and siftdownTimer. p := int(uint(i-1) / timerHeapN) if tw.when < ts.heap[p].when { print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n") throw("bad timer heap") } } if n := int(ts.len.Load()); len(ts.heap) != n { println("timer heap len", len(ts.heap), "!= atomic len", n) throw("bad timer heap len") } } // updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when. // The caller must have locked ts or the world must be stopped. func (ts *timers) updateMinWhenHeap() { assertWorldStoppedOrLockHeld(&ts.mu) if len(ts.heap) == 0 { ts.minWhenHeap.Store(0) } else { ts.minWhenHeap.Store(ts.heap[0].when) } } // updateMinWhenModified updates ts.minWhenModified to be <= when. // ts need not be (and usually is not) locked. func (ts *timers) updateMinWhenModified(when int64) { for { old := ts.minWhenModified.Load() if old != 0 && old < when { return } if ts.minWhenModified.CompareAndSwap(old, when) { return } } } // timeSleepUntil returns the time when the next timer should fire. Returns // maxWhen if there are no timers. // This is only called by sysmon and checkdead. func timeSleepUntil() int64 { next := int64(maxWhen) // Prevent allp slice changes. This is like retake. lock(&allpLock) for _, pp := range allp { if pp == nil { // This can happen if procresize has grown // allp but not yet created new Ps. continue } if w := pp.timers.wakeTime(); w != 0 { next = min(next, w) } } unlock(&allpLock) return next } const timerHeapN = 4 // Heap maintenance algorithms. // These algorithms check for slice index errors manually. // Slice index error can happen if the program is using racy // access to timers. We don't want to panic here, because // it will cause the program to crash with a mysterious // "panic holding locks" message. Instead, we panic while not // holding a lock. // siftUp puts the timer at position i in the right place // in the heap by moving it up toward the top of the heap. func (ts *timers) siftUp(i int) { heap := ts.heap if i >= len(heap) { badTimer() } tw := heap[i] when := tw.when if when <= 0 { badTimer() } for i > 0 { p := int(uint(i-1) / timerHeapN) // parent if when >= heap[p].when { break } heap[i] = heap[p] i = p } if heap[i].timer != tw.timer { heap[i] = tw } } // siftDown puts the timer at position i in the right place // in the heap by moving it down toward the bottom of the heap. func (ts *timers) siftDown(i int) { heap := ts.heap n := len(heap) if i >= n { badTimer() } if i*timerHeapN+1 >= n { return } tw := heap[i] when := tw.when if when <= 0 { badTimer() } for { leftChild := i*timerHeapN + 1 if leftChild >= n { break } w := when c := -1 for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] { if tw.when < w { w = tw.when c = leftChild + j } } if c < 0 { break } heap[i] = heap[c] i = c } if heap[i].timer != tw.timer { heap[i] = tw } } // initHeap reestablishes the heap order in the slice ts.heap. // It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations. func (ts *timers) initHeap() { // Last possible element that needs sifting down is parent of last element; // last element is len(t)-1; parent of last element is (len(t)-1-1)/timerHeapN. if len(ts.heap) <= 1 { return } for i := int(uint(len(ts.heap)-1-1) / timerHeapN); i >= 0; i-- { ts.siftDown(i) } } // badTimer is called if the timer data structures have been corrupted, // presumably due to racy use by the program. We panic here rather than // panicking due to invalid slice access while holding locks. // See issue #25686. func badTimer() { throw("timer data corruption") } // Timer channels. // maybeRunChan checks whether the timer needs to run // to send a value to its associated channel. If so, it does. // The timer must not be locked. func (t *timer) maybeRunChan() { if t.astate.Load()&timerHeaped != 0 { // If the timer is in the heap, the ordinary timer code // is in charge of sending when appropriate. return } t.lock() now := nanotime() if t.state&timerHeaped != 0 || t.when == 0 || t.when > now { t.trace("maybeRunChan-") // Timer in the heap, or not running at all, or not triggered. t.unlock() return } t.trace("maybeRunChan+") systemstack(func() { t.unlockAndRun(now) }) } // blockTimerChan is called when a channel op has decided to block on c. // The caller holds the channel lock for c and possibly other channels. // blockTimerChan makes sure that c is in a timer heap, // adding it if needed. func blockTimerChan(c *hchan) { t := c.timer t.lock() t.trace("blockTimerChan") if !t.isChan { badTimer() } t.blocked++ // If this is the first enqueue after a recent dequeue, // the timer may still be in the heap but marked as a zombie. // Unmark it in this case, if the timer is still pending. if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 { t.state &^= timerZombie t.ts.zombies.Add(-1) } // t.maybeAdd must be called with t unlocked, // because it needs to lock t.ts before t. // Then it will do nothing if t.needsAdd(state) is false. // Check that now before the unlock, // avoiding the extra lock-lock-unlock-unlock // inside maybeAdd when t does not need to be added. add := t.needsAdd() t.unlock() if add { t.maybeAdd() } } // unblockTimerChan is called when a channel op that was blocked on c // is no longer blocked. Every call to blockTimerChan must be paired with // a call to unblockTimerChan. // The caller holds the channel lock for c and possibly other channels. // unblockTimerChan removes c from the timer heap when nothing is // blocked on it anymore. func unblockTimerChan(c *hchan) { t := c.timer t.lock() t.trace("unblockTimerChan") if !t.isChan || t.blocked == 0 { badTimer() } t.blocked-- if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 { // Last goroutine that was blocked on this timer. // Mark for removal from heap but do not clear t.when, // so that we know what time it is still meant to trigger. t.state |= timerZombie t.ts.zombies.Add(1) } t.unlock() }
go/src/runtime/time.go/0
{ "file_path": "go/src/runtime/time.go", "repo_id": "go", "token_count": 13406 }
422
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "go_asm.h" #include "go_tls.h" #include "funcdata.h" #include "textflag.h" // If !iscgo, this is a no-op. // // NOTE: mcall() assumes this clobbers only R30 (REGTMP). TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0 MOVB runtime·iscgo(SB), R30 BEQ R30, nocgo MOVV g, runtime·tls_g(SB) nocgo: RET TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0 MOVV runtime·tls_g(SB), g RET GLOBL runtime·tls_g(SB), TLSBSS, $8
go/src/runtime/tls_loong64.s/0
{ "file_path": "go/src/runtime/tls_loong64.s", "repo_id": "go", "token_count": 255 }
423
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Runtime -> tracer API for memory events. package runtime import ( "internal/abi" "internal/runtime/sys" ) // Batch type values for the alloc/free experiment. const ( traceAllocFreeTypesBatch = iota // Contains types. [{id, address, size, ptrspan, name length, name string} ...] traceAllocFreeInfoBatch // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align] ) // traceSnapshotMemory takes a snapshot of all runtime memory that there are events for // (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them. // // The world must be stopped and tracing must be enabled when this function is called. func traceSnapshotMemory(gen uintptr) { assertWorldStopped() // Write a batch containing information that'll be necessary to // interpret the events. var flushed bool w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree) w, flushed = w.ensure(1 + 4*traceBytesPerNumber) if flushed { // Annotate the batch as containing additional info. w.byte(byte(traceAllocFreeInfoBatch)) } // Emit info. w.varint(uint64(trace.minPageHeapAddr)) w.varint(uint64(pageSize)) w.varint(uint64(minHeapAlign)) w.varint(uint64(fixedStack)) // Finish writing the batch. w.flush().end() // Start tracing. trace := traceAcquire() if !trace.ok() { throw("traceSnapshotMemory: tracing is not enabled") } // Write out all the heap spans and heap objects. for _, s := range mheap_.allspans { if s.state.get() == mSpanDead { continue } // It's some kind of span, so trace that it exists. trace.SpanExists(s) // Write out allocated objects if it's a heap span. if s.state.get() != mSpanInUse { continue } // Find all allocated objects. abits := s.allocBitsForIndex(0) for i := uintptr(0); i < uintptr(s.nelems); i++ { if abits.index < uintptr(s.freeindex) || abits.isMarked() { x := s.base() + i*s.elemsize trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ) } abits.advance() } } // Write out all the goroutine stacks. forEachGRace(func(gp *g) { trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo) }) traceRelease(trace) } func traceSpanTypeAndClass(s *mspan) traceArg { if s.state.get() == mSpanInUse { return traceArg(s.spanclass) << 1 } return traceArg(1) } // SpanExists records an event indicating that the span exists. func (tl traceLocker) SpanExists(s *mspan) { tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s)) } // SpanAlloc records an event indicating that the span has just been allocated. func (tl traceLocker) SpanAlloc(s *mspan) { tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s)) } // SpanFree records an event indicating that the span is about to be freed. func (tl traceLocker) SpanFree(s *mspan) { tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanFree, traceSpanID(s)) } // traceSpanID creates a trace ID for the span s for the trace. func traceSpanID(s *mspan) traceArg { return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize } // HeapObjectExists records that an object already exists at addr with the provided type. // The type is optional, and the size of the slot occupied the object is inferred from the // span containing it. func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) { tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObject, traceHeapObjectID(addr), tl.rtype(typ)) } // HeapObjectAlloc records that an object was newly allocated at addr with the provided type. // The type is optional, and the size of the slot occupied the object is inferred from the // span containing it. func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) { tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ)) } // HeapObjectFree records that an object at addr is about to be freed. func (tl traceLocker) HeapObjectFree(addr uintptr) { tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectFree, traceHeapObjectID(addr)) } // traceHeapObjectID creates a trace ID for a heap object at address addr. func traceHeapObjectID(addr uintptr) traceArg { return traceArg(uint64(addr)-trace.minPageHeapAddr) / minHeapAlign } // GoroutineStackExists records that a goroutine stack already exists at address base with the provided size. func (tl traceLocker) GoroutineStackExists(base, size uintptr) { order := traceCompressStackSize(size) tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStack, traceGoroutineStackID(base), order) } // GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size.. func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) { order := traceCompressStackSize(size) tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackAlloc, traceGoroutineStackID(base), order) } // GoroutineStackFree records that a goroutine stack at address base is about to be freed. func (tl traceLocker) GoroutineStackFree(base uintptr) { tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackFree, traceGoroutineStackID(base)) } // traceGoroutineStackID creates a trace ID for the goroutine stack from its base address. func traceGoroutineStackID(base uintptr) traceArg { return traceArg(uint64(base)-trace.minPageHeapAddr) / fixedStack } // traceCompressStackSize assumes size is a power of 2 and returns log2(size). func traceCompressStackSize(size uintptr) traceArg { if size&(size-1) != 0 { throw("goroutine stack size is not a power of 2") } return traceArg(sys.Len64(uint64(size))) }
go/src/runtime/traceallocfree.go/0
{ "file_path": "go/src/runtime/traceallocfree.go", "repo_id": "go", "token_count": 1966 }
424
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Trace stack table and acquisition. package runtime import ( "internal/abi" "internal/goarch" "unsafe" ) // traceTypeTable maps stack traces (arrays of PC's) to unique uint32 ids. // It is lock-free for reading. type traceTypeTable struct { tab traceMap } // put returns a unique id for the type typ and caches it in the table, // if it's seeing it for the first time. // // N.B. typ must be kept alive forever for this to work correctly. func (t *traceTypeTable) put(typ *abi.Type) uint64 { if typ == nil { return 0 } // Insert the pointer to the type itself. id, _ := t.tab.put(noescape(unsafe.Pointer(&typ)), goarch.PtrSize) return id } // dump writes all previously cached types to trace buffers and // releases all memory and resets state. It must only be called once the caller // can guarantee that there are no more writers to the table. func (t *traceTypeTable) dump(gen uintptr) { w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree) if root := (*traceMapNode)(t.tab.root.Load()); root != nil { w = dumpTypesRec(root, w) } w.flush().end() t.tab.reset() } func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter { typ := (*abi.Type)(*(*unsafe.Pointer)(unsafe.Pointer(&node.data[0]))) typName := toRType(typ).string() // The maximum number of bytes required to hold the encoded type. maxBytes := 1 + 5*traceBytesPerNumber + len(typName) // Estimate the size of this record. This // bound is pretty loose, but avoids counting // lots of varint sizes. // // Add 1 because we might also write a traceAllocFreeTypesBatch byte. var flushed bool w, flushed = w.ensure(1 + maxBytes) if flushed { // Annotate the batch as containing types. w.byte(byte(traceAllocFreeTypesBatch)) } // Emit type. w.varint(uint64(node.id)) w.varint(uint64(uintptr(unsafe.Pointer(typ)))) w.varint(uint64(typ.Size())) w.varint(uint64(typ.PtrBytes)) w.varint(uint64(len(typName))) w.stringData(typName) // Recursively walk all child nodes. for i := range node.children { child := node.children[i].Load() if child == nil { continue } w = dumpTypesRec((*traceMapNode)(child), w) } return w }
go/src/runtime/tracetype.go/0
{ "file_path": "go/src/runtime/tracetype.go", "repo_id": "go", "token_count": 785 }
425
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import _ "unsafe" // for linkname const ( // vdsoArrayMax is the byte-size of a maximally sized array on this architecture. // See cmd/compile/internal/amd64/galign.go arch.MAXWIDTH initialization. vdsoArrayMax = 1<<50 - 1 ) var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6", 0x3ae75f6} var vdsoSymbolKeys = []vdsoSymbolKey{ {"__vdso_gettimeofday", 0x315ca59, 0xb01bca00, &vdsoGettimeofdaySym}, {"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym}, } var ( vdsoGettimeofdaySym uintptr vdsoClockgettimeSym uintptr ) // vdsoGettimeofdaySym is accessed from the syscall package. //go:linkname vdsoGettimeofdaySym
go/src/runtime/vdso_linux_amd64.go/0
{ "file_path": "go/src/runtime/vdso_linux_amd64.go", "repo_id": "go", "token_count": 301 }
426
// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. package runtime const cb_max = 2000 // maximum number of windows callbacks allowed
go/src/runtime/zcallback_windows.go/0
{ "file_path": "go/src/runtime/zcallback_windows.go", "repo_id": "go", "token_count": 39 }
427
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sort_test import ( "fmt" "sort" ) // A Change is a record of source code changes, recording user, language, and delta size. type Change struct { user string language string lines int } type lessFunc func(p1, p2 *Change) bool // multiSorter implements the Sort interface, sorting the changes within. type multiSorter struct { changes []Change less []lessFunc } // Sort sorts the argument slice according to the less functions passed to OrderedBy. func (ms *multiSorter) Sort(changes []Change) { ms.changes = changes sort.Sort(ms) } // OrderedBy returns a Sorter that sorts using the less functions, in order. // Call its Sort method to sort the data. func OrderedBy(less ...lessFunc) *multiSorter { return &multiSorter{ less: less, } } // Len is part of sort.Interface. func (ms *multiSorter) Len() int { return len(ms.changes) } // Swap is part of sort.Interface. func (ms *multiSorter) Swap(i, j int) { ms.changes[i], ms.changes[j] = ms.changes[j], ms.changes[i] } // Less is part of sort.Interface. It is implemented by looping along the // less functions until it finds a comparison that discriminates between // the two items (one is less than the other). Note that it can call the // less functions twice per call. We could change the functions to return // -1, 0, 1 and reduce the number of calls for greater efficiency: an // exercise for the reader. func (ms *multiSorter) Less(i, j int) bool { p, q := &ms.changes[i], &ms.changes[j] // Try all but the last comparison. var k int for k = 0; k < len(ms.less)-1; k++ { less := ms.less[k] switch { case less(p, q): // p < q, so we have a decision. return true case less(q, p): // p > q, so we have a decision. return false } // p == q; try the next comparison. } // All comparisons to here said "equal", so just return whatever // the final comparison reports. return ms.less[k](p, q) } var changes = []Change{ {"gri", "Go", 100}, {"ken", "C", 150}, {"glenda", "Go", 200}, {"rsc", "Go", 200}, {"r", "Go", 100}, {"ken", "Go", 200}, {"dmr", "C", 100}, {"r", "C", 150}, {"gri", "Smalltalk", 80}, } // ExampleMultiKeys demonstrates a technique for sorting a struct type using different // sets of multiple fields in the comparison. We chain together "Less" functions, each of // which compares a single field. func Example_sortMultiKeys() { // Closures that order the Change structure. user := func(c1, c2 *Change) bool { return c1.user < c2.user } language := func(c1, c2 *Change) bool { return c1.language < c2.language } increasingLines := func(c1, c2 *Change) bool { return c1.lines < c2.lines } decreasingLines := func(c1, c2 *Change) bool { return c1.lines > c2.lines // Note: > orders downwards. } // Simple use: Sort by user. OrderedBy(user).Sort(changes) fmt.Println("By user:", changes) // More examples. OrderedBy(user, increasingLines).Sort(changes) fmt.Println("By user,<lines:", changes) OrderedBy(user, decreasingLines).Sort(changes) fmt.Println("By user,>lines:", changes) OrderedBy(language, increasingLines).Sort(changes) fmt.Println("By language,<lines:", changes) OrderedBy(language, increasingLines, user).Sort(changes) fmt.Println("By language,<lines,user:", changes) // Output: // By user: [{dmr C 100} {glenda Go 200} {gri Go 100} {gri Smalltalk 80} {ken C 150} {ken Go 200} {r Go 100} {r C 150} {rsc Go 200}] // By user,<lines: [{dmr C 100} {glenda Go 200} {gri Smalltalk 80} {gri Go 100} {ken C 150} {ken Go 200} {r Go 100} {r C 150} {rsc Go 200}] // By user,>lines: [{dmr C 100} {glenda Go 200} {gri Go 100} {gri Smalltalk 80} {ken Go 200} {ken C 150} {r C 150} {r Go 100} {rsc Go 200}] // By language,<lines: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {glenda Go 200} {ken Go 200} {rsc Go 200} {gri Smalltalk 80}] // By language,<lines,user: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {glenda Go 200} {ken Go 200} {rsc Go 200} {gri Smalltalk 80}] }
go/src/sort/example_multi_test.go/0
{ "file_path": "go/src/sort/example_multi_test.go", "repo_id": "go", "token_count": 1487 }
428
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package strconv // ParseBool returns the boolean value represented by the string. // It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False. // Any other value returns an error. func ParseBool(str string) (bool, error) { switch str { case "1", "t", "T", "true", "TRUE", "True": return true, nil case "0", "f", "F", "false", "FALSE", "False": return false, nil } return false, syntaxError("ParseBool", str) } // FormatBool returns "true" or "false" according to the value of b. func FormatBool(b bool) string { if b { return "true" } return "false" } // AppendBool appends "true" or "false", according to the value of b, // to dst and returns the extended buffer. func AppendBool(dst []byte, b bool) []byte { if b { return append(dst, "true"...) } return append(dst, "false"...) }
go/src/strconv/atob.go/0
{ "file_path": "go/src/strconv/atob.go", "repo_id": "go", "token_count": 338 }
429
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package strconv_test import ( "fmt" "log" "strconv" ) func ExampleAppendBool() { b := []byte("bool:") b = strconv.AppendBool(b, true) fmt.Println(string(b)) // Output: // bool:true } func ExampleAppendFloat() { b32 := []byte("float32:") b32 = strconv.AppendFloat(b32, 3.1415926535, 'E', -1, 32) fmt.Println(string(b32)) b64 := []byte("float64:") b64 = strconv.AppendFloat(b64, 3.1415926535, 'E', -1, 64) fmt.Println(string(b64)) // Output: // float32:3.1415927E+00 // float64:3.1415926535E+00 } func ExampleAppendInt() { b10 := []byte("int (base 10):") b10 = strconv.AppendInt(b10, -42, 10) fmt.Println(string(b10)) b16 := []byte("int (base 16):") b16 = strconv.AppendInt(b16, -42, 16) fmt.Println(string(b16)) // Output: // int (base 10):-42 // int (base 16):-2a } func ExampleAppendQuote() { b := []byte("quote:") b = strconv.AppendQuote(b, `"Fran & Freddie's Diner"`) fmt.Println(string(b)) // Output: // quote:"\"Fran & Freddie's Diner\"" } func ExampleAppendQuoteRune() { b := []byte("rune:") b = strconv.AppendQuoteRune(b, '☺') fmt.Println(string(b)) // Output: // rune:'☺' } func ExampleAppendQuoteRuneToASCII() { b := []byte("rune (ascii):") b = strconv.AppendQuoteRuneToASCII(b, '☺') fmt.Println(string(b)) // Output: // rune (ascii):'\u263a' } func ExampleAppendQuoteToASCII() { b := []byte("quote (ascii):") b = strconv.AppendQuoteToASCII(b, `"Fran & Freddie's Diner"`) fmt.Println(string(b)) // Output: // quote (ascii):"\"Fran & Freddie's Diner\"" } func ExampleAppendUint() { b10 := []byte("uint (base 10):") b10 = strconv.AppendUint(b10, 42, 10) fmt.Println(string(b10)) b16 := []byte("uint (base 16):") b16 = strconv.AppendUint(b16, 42, 16) fmt.Println(string(b16)) // Output: // uint (base 10):42 // uint (base 16):2a } func ExampleAtoi() { v := "10" if s, err := strconv.Atoi(v); err == nil { fmt.Printf("%T, %v", s, s) } // Output: // int, 10 } func ExampleCanBackquote() { fmt.Println(strconv.CanBackquote("Fran & Freddie's Diner ☺")) fmt.Println(strconv.CanBackquote("`can't backquote this`")) // Output: // true // false } func ExampleFormatBool() { v := true s := strconv.FormatBool(v) fmt.Printf("%T, %v\n", s, s) // Output: // string, true } func ExampleFormatFloat() { v := 3.1415926535 s32 := strconv.FormatFloat(v, 'E', -1, 32) fmt.Printf("%T, %v\n", s32, s32) s64 := strconv.FormatFloat(v, 'E', -1, 64) fmt.Printf("%T, %v\n", s64, s64) // fmt.Println uses these arguments to print floats fmt64 := strconv.FormatFloat(v, 'g', -1, 64) fmt.Printf("%T, %v\n", fmt64, fmt64) // Output: // string, 3.1415927E+00 // string, 3.1415926535E+00 // string, 3.1415926535 } func ExampleFormatInt() { v := int64(-42) s10 := strconv.FormatInt(v, 10) fmt.Printf("%T, %v\n", s10, s10) s16 := strconv.FormatInt(v, 16) fmt.Printf("%T, %v\n", s16, s16) // Output: // string, -42 // string, -2a } func ExampleFormatUint() { v := uint64(42) s10 := strconv.FormatUint(v, 10) fmt.Printf("%T, %v\n", s10, s10) s16 := strconv.FormatUint(v, 16) fmt.Printf("%T, %v\n", s16, s16) // Output: // string, 42 // string, 2a } func ExampleIsGraphic() { shamrock := strconv.IsGraphic('☘') fmt.Println(shamrock) a := strconv.IsGraphic('a') fmt.Println(a) bel := strconv.IsGraphic('\007') fmt.Println(bel) // Output: // true // true // false } func ExampleIsPrint() { c := strconv.IsPrint('\u263a') fmt.Println(c) bel := strconv.IsPrint('\007') fmt.Println(bel) // Output: // true // false } func ExampleItoa() { i := 10 s := strconv.Itoa(i) fmt.Printf("%T, %v\n", s, s) // Output: // string, 10 } func ExampleParseBool() { v := "true" if s, err := strconv.ParseBool(v); err == nil { fmt.Printf("%T, %v\n", s, s) } // Output: // bool, true } func ExampleParseFloat() { v := "3.1415926535" if s, err := strconv.ParseFloat(v, 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseFloat(v, 64); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseFloat("NaN", 32); err == nil { fmt.Printf("%T, %v\n", s, s) } // ParseFloat is case insensitive if s, err := strconv.ParseFloat("nan", 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseFloat("inf", 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseFloat("+Inf", 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseFloat("-Inf", 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseFloat("-0", 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseFloat("+0", 32); err == nil { fmt.Printf("%T, %v\n", s, s) } // Output: // float64, 3.1415927410125732 // float64, 3.1415926535 // float64, NaN // float64, NaN // float64, +Inf // float64, +Inf // float64, -Inf // float64, -0 // float64, 0 } func ExampleParseInt() { v32 := "-354634382" if s, err := strconv.ParseInt(v32, 10, 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseInt(v32, 16, 32); err == nil { fmt.Printf("%T, %v\n", s, s) } v64 := "-3546343826724305832" if s, err := strconv.ParseInt(v64, 10, 64); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseInt(v64, 16, 64); err == nil { fmt.Printf("%T, %v\n", s, s) } // Output: // int64, -354634382 // int64, -3546343826724305832 } func ExampleParseUint() { v := "42" if s, err := strconv.ParseUint(v, 10, 32); err == nil { fmt.Printf("%T, %v\n", s, s) } if s, err := strconv.ParseUint(v, 10, 64); err == nil { fmt.Printf("%T, %v\n", s, s) } // Output: // uint64, 42 // uint64, 42 } func ExampleQuote() { // This string literal contains a tab character. s := strconv.Quote(`"Fran & Freddie's Diner ☺"`) fmt.Println(s) // Output: // "\"Fran & Freddie's Diner\t☺\"" } func ExampleQuoteRune() { s := strconv.QuoteRune('☺') fmt.Println(s) // Output: // '☺' } func ExampleQuoteRuneToASCII() { s := strconv.QuoteRuneToASCII('☺') fmt.Println(s) // Output: // '\u263a' } func ExampleQuoteRuneToGraphic() { s := strconv.QuoteRuneToGraphic('☺') fmt.Println(s) s = strconv.QuoteRuneToGraphic('\u263a') fmt.Println(s) s = strconv.QuoteRuneToGraphic('\u000a') fmt.Println(s) s = strconv.QuoteRuneToGraphic(' ') // tab character fmt.Println(s) // Output: // '☺' // '☺' // '\n' // '\t' } func ExampleQuoteToASCII() { // This string literal contains a tab character. s := strconv.QuoteToASCII(`"Fran & Freddie's Diner ☺"`) fmt.Println(s) // Output: // "\"Fran & Freddie's Diner\t\u263a\"" } func ExampleQuoteToGraphic() { s := strconv.QuoteToGraphic("☺") fmt.Println(s) // This string literal contains a tab character. s = strconv.QuoteToGraphic("This is a \u263a \u000a") fmt.Println(s) s = strconv.QuoteToGraphic(`" This is a ☺ \n "`) fmt.Println(s) // Output: // "☺" // "This is a ☺\t\n" // "\" This is a ☺ \\n \"" } func ExampleQuotedPrefix() { s, err := strconv.QuotedPrefix("not a quoted string") fmt.Printf("%q, %v\n", s, err) s, err = strconv.QuotedPrefix("\"double-quoted string\" with trailing text") fmt.Printf("%q, %v\n", s, err) s, err = strconv.QuotedPrefix("`or backquoted` with more trailing text") fmt.Printf("%q, %v\n", s, err) s, err = strconv.QuotedPrefix("'\u263a' is also okay") fmt.Printf("%q, %v\n", s, err) // Output: // "", invalid syntax // "\"double-quoted string\"", <nil> // "`or backquoted`", <nil> // "'☺'", <nil> } func ExampleUnquote() { s, err := strconv.Unquote("You can't unquote a string without quotes") fmt.Printf("%q, %v\n", s, err) s, err = strconv.Unquote("\"The string must be either double-quoted\"") fmt.Printf("%q, %v\n", s, err) s, err = strconv.Unquote("`or backquoted.`") fmt.Printf("%q, %v\n", s, err) s, err = strconv.Unquote("'\u263a'") // single character only allowed in single quotes fmt.Printf("%q, %v\n", s, err) s, err = strconv.Unquote("'\u2639\u2639'") fmt.Printf("%q, %v\n", s, err) // Output: // "", invalid syntax // "The string must be either double-quoted", <nil> // "or backquoted.", <nil> // "☺", <nil> // "", invalid syntax } func ExampleUnquoteChar() { v, mb, t, err := strconv.UnquoteChar(`\"Fran & Freddie's Diner\"`, '"') if err != nil { log.Fatal(err) } fmt.Println("value:", string(v)) fmt.Println("multibyte:", mb) fmt.Println("tail:", t) // Output: // value: " // multibyte: false // tail: Fran & Freddie's Diner\" } func ExampleNumError() { str := "Not a number" if _, err := strconv.ParseFloat(str, 64); err != nil { e := err.(*strconv.NumError) fmt.Println("Func:", e.Func) fmt.Println("Num:", e.Num) fmt.Println("Err:", e.Err) fmt.Println(err) } // Output: // Func: ParseFloat // Num: Not a number // Err: invalid syntax // strconv.ParseFloat: parsing "Not a number": invalid syntax }
go/src/strconv/example_test.go/0
{ "file_path": "go/src/strconv/example_test.go", "repo_id": "go", "token_count": 4155 }
430
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package strings import ( "internal/abi" "internal/bytealg" "unicode/utf8" "unsafe" ) // A Builder is used to efficiently build a string using [Builder.Write] methods. // It minimizes memory copying. The zero value is ready to use. // Do not copy a non-zero Builder. type Builder struct { addr *Builder // of receiver, to detect copies by value // External users should never get direct access to this buffer, since // the slice at some point will be converted to a string using unsafe, also // data between len(buf) and cap(buf) might be uninitialized. buf []byte } func (b *Builder) copyCheck() { if b.addr == nil { // This hack works around a failing of Go's escape analysis // that was causing b to escape and be heap allocated. // See issue 23382. // TODO: once issue 7921 is fixed, this should be reverted to // just "b.addr = b". b.addr = (*Builder)(abi.NoEscape(unsafe.Pointer(b))) } else if b.addr != b { panic("strings: illegal use of non-zero Builder copied by value") } } // String returns the accumulated string. func (b *Builder) String() string { return unsafe.String(unsafe.SliceData(b.buf), len(b.buf)) } // Len returns the number of accumulated bytes; b.Len() == len(b.String()). func (b *Builder) Len() int { return len(b.buf) } // Cap returns the capacity of the builder's underlying byte slice. It is the // total space allocated for the string being built and includes any bytes // already written. func (b *Builder) Cap() int { return cap(b.buf) } // Reset resets the [Builder] to be empty. func (b *Builder) Reset() { b.addr = nil b.buf = nil } // grow copies the buffer to a new, larger buffer so that there are at least n // bytes of capacity beyond len(b.buf). func (b *Builder) grow(n int) { buf := bytealg.MakeNoZero(2*cap(b.buf) + n)[:len(b.buf)] copy(buf, b.buf) b.buf = buf } // Grow grows b's capacity, if necessary, to guarantee space for // another n bytes. After Grow(n), at least n bytes can be written to b // without another allocation. If n is negative, Grow panics. func (b *Builder) Grow(n int) { b.copyCheck() if n < 0 { panic("strings.Builder.Grow: negative count") } if cap(b.buf)-len(b.buf) < n { b.grow(n) } } // Write appends the contents of p to b's buffer. // Write always returns len(p), nil. func (b *Builder) Write(p []byte) (int, error) { b.copyCheck() b.buf = append(b.buf, p...) return len(p), nil } // WriteByte appends the byte c to b's buffer. // The returned error is always nil. func (b *Builder) WriteByte(c byte) error { b.copyCheck() b.buf = append(b.buf, c) return nil } // WriteRune appends the UTF-8 encoding of Unicode code point r to b's buffer. // It returns the length of r and a nil error. func (b *Builder) WriteRune(r rune) (int, error) { b.copyCheck() n := len(b.buf) b.buf = utf8.AppendRune(b.buf, r) return len(b.buf) - n, nil } // WriteString appends the contents of s to b's buffer. // It returns the length of s and a nil error. func (b *Builder) WriteString(s string) (int, error) { b.copyCheck() b.buf = append(b.buf, s...) return len(s), nil }
go/src/strings/builder.go/0
{ "file_path": "go/src/strings/builder.go", "repo_id": "go", "token_count": 1089 }
431
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package structs defines marker types that can be used as struct fields // to modify the properties of a struct. // // By convention, a marker type should be used as the type of a field // named "_", placed at the beginning of a struct type definition. package structs
go/src/structs/doc.go/0
{ "file_path": "go/src/structs/doc.go", "repo_id": "go", "token_count": 104 }
432
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sync_test import ( "fmt" "reflect" "sync" "sync/atomic" "testing" ) type bench struct { setup func(*testing.B, mapInterface) perG func(b *testing.B, pb *testing.PB, i int, m mapInterface) } func benchMap(b *testing.B, bench bench) { for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} { b.Run(fmt.Sprintf("%T", m), func(b *testing.B) { m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface) if bench.setup != nil { bench.setup(b, m) } b.ResetTimer() var i int64 b.RunParallel(func(pb *testing.PB) { id := int(atomic.AddInt64(&i, 1) - 1) bench.perG(b, pb, id*b.N, m) }) }) } } func BenchmarkLoadMostlyHits(b *testing.B) { const hits, misses = 1023, 1 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Load(i % (hits + misses)) } }, }) } func BenchmarkLoadMostlyMisses(b *testing.B) { const hits, misses = 1, 1023 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Load(i % (hits + misses)) } }, }) } func BenchmarkLoadOrStoreBalanced(b *testing.B) { const hits, misses = 128, 128 benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { j := i % (hits + misses) if j < hits { if _, ok := m.LoadOrStore(j, i); !ok { b.Fatalf("unexpected miss for %v", j) } } else { if v, loaded := m.LoadOrStore(i, i); loaded { b.Fatalf("failed to store %v: existing value %v", i, v) } } } }, }) } func BenchmarkLoadOrStoreUnique(b *testing.B) { benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.LoadOrStore(i, i) } }, }) } func BenchmarkLoadOrStoreCollision(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.LoadOrStore(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.LoadOrStore(0, 0) } }, }) } func BenchmarkLoadAndDeleteBalanced(b *testing.B) { const hits, misses = 128, 128 benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { j := i % (hits + misses) if j < hits { m.LoadAndDelete(j) } else { m.LoadAndDelete(i) } } }, }) } func BenchmarkLoadAndDeleteUnique(b *testing.B) { benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.LoadAndDelete(i) } }, }) } func BenchmarkLoadAndDeleteCollision(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.LoadOrStore(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { if _, loaded := m.LoadAndDelete(0); loaded { m.Store(0, 0) } } }, }) } func BenchmarkRange(b *testing.B) { const mapSize = 1 << 10 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < mapSize; i++ { m.Store(i, i) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Range(func(_, _ any) bool { return true }) } }, }) } // BenchmarkAdversarialAlloc tests performance when we store a new value // immediately whenever the map is promoted to clean and otherwise load a // unique, missing key. // // This forces the Load calls to always acquire the map's mutex. func BenchmarkAdversarialAlloc(b *testing.B) { benchMap(b, bench{ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { var stores, loadsSinceStore int64 for ; pb.Next(); i++ { m.Load(i) if loadsSinceStore++; loadsSinceStore > stores { m.LoadOrStore(i, stores) loadsSinceStore = 0 stores++ } } }, }) } // BenchmarkAdversarialDelete tests performance when we periodically delete // one key and add a different one in a large map. // // This forces the Load calls to always acquire the map's mutex and periodically // makes a full copy of the map despite changing only one entry. func BenchmarkAdversarialDelete(b *testing.B) { const mapSize = 1 << 10 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < mapSize; i++ { m.Store(i, i) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Load(i) if i%mapSize == 0 { m.Range(func(k, _ any) bool { m.Delete(k) return false }) m.Store(i, i) } } }, }) } func BenchmarkDeleteCollision(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.LoadOrStore(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Delete(0) } }, }) } func BenchmarkSwapCollision(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.LoadOrStore(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.Swap(0, 0) } }, }) } func BenchmarkSwapMostlyHits(b *testing.B) { const hits, misses = 1023, 1 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { if i%(hits+misses) < hits { v := i % (hits + misses) m.Swap(v, v) } else { m.Swap(i, i) m.Delete(i) } } }, }) } func BenchmarkSwapMostlyMisses(b *testing.B) { const hits, misses = 1, 1023 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { if i%(hits+misses) < hits { v := i % (hits + misses) m.Swap(v, v) } else { m.Swap(i, i) m.Delete(i) } } }, }) } func BenchmarkCompareAndSwapCollision(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.LoadOrStore(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for pb.Next() { if m.CompareAndSwap(0, 0, 42) { m.CompareAndSwap(0, 42, 0) } } }, }) } func BenchmarkCompareAndSwapNoExistingKey(b *testing.B) { benchMap(b, bench{ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { if m.CompareAndSwap(i, 0, 0) { m.Delete(i) } } }, }) } func BenchmarkCompareAndSwapValueNotEqual(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.Store(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { m.CompareAndSwap(0, 1, 2) } }, }) } func BenchmarkCompareAndSwapMostlyHits(b *testing.B) { const hits, misses = 1023, 1 benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { v := i if i%(hits+misses) < hits { v = i % (hits + misses) } m.CompareAndSwap(v, v, v) } }, }) } func BenchmarkCompareAndSwapMostlyMisses(b *testing.B) { const hits, misses = 1, 1023 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { v := i if i%(hits+misses) < hits { v = i % (hits + misses) } m.CompareAndSwap(v, v, v) } }, }) } func BenchmarkCompareAndDeleteCollision(b *testing.B) { benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { m.LoadOrStore(0, 0) }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { if m.CompareAndDelete(0, 0) { m.Store(0, 0) } } }, }) } func BenchmarkCompareAndDeleteMostlyHits(b *testing.B) { const hits, misses = 1023, 1 benchMap(b, bench{ setup: func(b *testing.B, m mapInterface) { if _, ok := m.(*DeepCopyMap); ok { b.Skip("DeepCopyMap has quadratic running time.") } for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { v := i if i%(hits+misses) < hits { v = i % (hits + misses) } if m.CompareAndDelete(v, v) { m.Store(v, v) } } }, }) } func BenchmarkCompareAndDeleteMostlyMisses(b *testing.B) { const hits, misses = 1, 1023 benchMap(b, bench{ setup: func(_ *testing.B, m mapInterface) { for i := 0; i < hits; i++ { m.LoadOrStore(i, i) } // Prime the map to get it into a steady state. for i := 0; i < hits*2; i++ { m.Load(i % hits) } }, perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { v := i if i%(hits+misses) < hits { v = i % (hits + misses) } if m.CompareAndDelete(v, v) { m.Store(v, v) } } }, }) } func BenchmarkClear(b *testing.B) { benchMap(b, bench{ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { k, v := i%256, i%256 m.Clear() m.Store(k, v) } }, }) }
go/src/sync/map_bench_test.go/0
{ "file_path": "go/src/sync/map_bench_test.go", "repo_id": "go", "token_count": 5569 }
433
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sync import ( "internal/race" "sync/atomic" "unsafe" ) // There is a modified copy of this file in runtime/rwmutex.go. // If you make any changes here, see if you should make them there. // A RWMutex is a reader/writer mutual exclusion lock. // The lock can be held by an arbitrary number of readers or a single writer. // The zero value for a RWMutex is an unlocked mutex. // // A RWMutex must not be copied after first use. // // If any goroutine calls [RWMutex.Lock] while the lock is already held by // one or more readers, concurrent calls to [RWMutex.RLock] will block until // the writer has acquired (and released) the lock, to ensure that // the lock eventually becomes available to the writer. // Note that this prohibits recursive read-locking. // // In the terminology of [the Go memory model], // the n'th call to [RWMutex.Unlock] “synchronizes before” the m'th call to Lock // for any n < m, just as for [Mutex]. // For any call to RLock, there exists an n such that // the n'th call to Unlock “synchronizes before” that call to RLock, // and the corresponding call to [RWMutex.RUnlock] “synchronizes before” // the n+1'th call to Lock. // // [the Go memory model]: https://go.dev/ref/mem type RWMutex struct { w Mutex // held if there are pending writers writerSem uint32 // semaphore for writers to wait for completing readers readerSem uint32 // semaphore for readers to wait for completing writers readerCount atomic.Int32 // number of pending readers readerWait atomic.Int32 // number of departing readers } const rwmutexMaxReaders = 1 << 30 // Happens-before relationships are indicated to the race detector via: // - Unlock -> Lock: readerSem // - Unlock -> RLock: readerSem // - RUnlock -> Lock: writerSem // // The methods below temporarily disable handling of race synchronization // events in order to provide the more precise model above to the race // detector. // // For example, atomic.AddInt32 in RLock should not appear to provide // acquire-release semantics, which would incorrectly synchronize racing // readers, thus potentially missing races. // RLock locks rw for reading. // // It should not be used for recursive read locking; a blocked Lock // call excludes new readers from acquiring the lock. See the // documentation on the [RWMutex] type. func (rw *RWMutex) RLock() { if race.Enabled { _ = rw.w.state race.Disable() } if rw.readerCount.Add(1) < 0 { // A writer is pending, wait for it. runtime_SemacquireRWMutexR(&rw.readerSem, false, 0) } if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(&rw.readerSem)) } } // TryRLock tries to lock rw for reading and reports whether it succeeded. // // Note that while correct uses of TryRLock do exist, they are rare, // and use of TryRLock is often a sign of a deeper problem // in a particular use of mutexes. func (rw *RWMutex) TryRLock() bool { if race.Enabled { _ = rw.w.state race.Disable() } for { c := rw.readerCount.Load() if c < 0 { if race.Enabled { race.Enable() } return false } if rw.readerCount.CompareAndSwap(c, c+1) { if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(&rw.readerSem)) } return true } } } // RUnlock undoes a single [RWMutex.RLock] call; // it does not affect other simultaneous readers. // It is a run-time error if rw is not locked for reading // on entry to RUnlock. func (rw *RWMutex) RUnlock() { if race.Enabled { _ = rw.w.state race.ReleaseMerge(unsafe.Pointer(&rw.writerSem)) race.Disable() } if r := rw.readerCount.Add(-1); r < 0 { // Outlined slow-path to allow the fast-path to be inlined rw.rUnlockSlow(r) } if race.Enabled { race.Enable() } } func (rw *RWMutex) rUnlockSlow(r int32) { if r+1 == 0 || r+1 == -rwmutexMaxReaders { race.Enable() fatal("sync: RUnlock of unlocked RWMutex") } // A writer is pending. if rw.readerWait.Add(-1) == 0 { // The last reader unblocks the writer. runtime_Semrelease(&rw.writerSem, false, 1) } } // Lock locks rw for writing. // If the lock is already locked for reading or writing, // Lock blocks until the lock is available. func (rw *RWMutex) Lock() { if race.Enabled { _ = rw.w.state race.Disable() } // First, resolve competition with other writers. rw.w.Lock() // Announce to readers there is a pending writer. r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders // Wait for active readers. if r != 0 && rw.readerWait.Add(r) != 0 { runtime_SemacquireRWMutex(&rw.writerSem, false, 0) } if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(&rw.readerSem)) race.Acquire(unsafe.Pointer(&rw.writerSem)) } } // TryLock tries to lock rw for writing and reports whether it succeeded. // // Note that while correct uses of TryLock do exist, they are rare, // and use of TryLock is often a sign of a deeper problem // in a particular use of mutexes. func (rw *RWMutex) TryLock() bool { if race.Enabled { _ = rw.w.state race.Disable() } if !rw.w.TryLock() { if race.Enabled { race.Enable() } return false } if !rw.readerCount.CompareAndSwap(0, -rwmutexMaxReaders) { rw.w.Unlock() if race.Enabled { race.Enable() } return false } if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(&rw.readerSem)) race.Acquire(unsafe.Pointer(&rw.writerSem)) } return true } // Unlock unlocks rw for writing. It is a run-time error if rw is // not locked for writing on entry to Unlock. // // As with Mutexes, a locked [RWMutex] is not associated with a particular // goroutine. One goroutine may [RWMutex.RLock] ([RWMutex.Lock]) a RWMutex and then // arrange for another goroutine to [RWMutex.RUnlock] ([RWMutex.Unlock]) it. func (rw *RWMutex) Unlock() { if race.Enabled { _ = rw.w.state race.Release(unsafe.Pointer(&rw.readerSem)) race.Disable() } // Announce to readers there is no active writer. r := rw.readerCount.Add(rwmutexMaxReaders) if r >= rwmutexMaxReaders { race.Enable() fatal("sync: Unlock of unlocked RWMutex") } // Unblock blocked readers, if any. for i := 0; i < int(r); i++ { runtime_Semrelease(&rw.readerSem, false, 0) } // Allow other writers to proceed. rw.w.Unlock() if race.Enabled { race.Enable() } } // syscall_hasWaitingReaders reports whether any goroutine is waiting // to acquire a read lock on rw. This exists because syscall.ForkLock // is an RWMutex, and we can't change that without breaking compatibility. // We don't need or want RWMutex semantics for ForkLock, and we use // this private API to avoid having to change the type of ForkLock. // For more details see the syscall package. // //go:linkname syscall_hasWaitingReaders syscall.hasWaitingReaders func syscall_hasWaitingReaders(rw *RWMutex) bool { r := rw.readerCount.Load() return r < 0 && r+rwmutexMaxReaders > 0 } // RLocker returns a [Locker] interface that implements // the [Locker.Lock] and [Locker.Unlock] methods by calling rw.RLock and rw.RUnlock. func (rw *RWMutex) RLocker() Locker { return (*rlocker)(rw) } type rlocker RWMutex func (r *rlocker) Lock() { (*RWMutex)(r).RLock() } func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
go/src/sync/rwmutex.go/0
{ "file_path": "go/src/sync/rwmutex.go", "repo_id": "go", "token_count": 2592 }
434
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) #include "textflag.h" // // System calls for mips64, Linux // // func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr) TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-48 MOVV a1+8(FP), R4 MOVV a2+16(FP), R5 MOVV a3+24(FP), R6 MOVV R0, R7 MOVV R0, R8 MOVV R0, R9 MOVV trap+0(FP), R2 // syscall entry SYSCALL BEQ R7, ok MOVV $-1, R1 MOVV R1, r1+32(FP) // r1 MOVV R2, err+40(FP) // errno RET ok: MOVV R2, r1+32(FP) // r1 MOVV R0, err+40(FP) // errno RET TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48 MOVV a1+8(FP), R4 MOVV a2+16(FP), R5 MOVV a3+24(FP), R6 MOVV R0, R7 MOVV R0, R8 MOVV R0, R9 MOVV trap+0(FP), R2 // syscall entry MOVV R0, R3 // reset R3 to zero as 1-ret SYSCALL keeps it SYSCALL MOVV R2, r1+32(FP) // r1 MOVV R3, r2+40(FP) // r2 RET
go/src/syscall/asm_linux_mips64x.s/0
{ "file_path": "go/src/syscall/asm_linux_mips64x.s", "repo_id": "go", "token_count": 543 }
435
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" #include "funcdata.h" #define SYS_ERRSTR 41 /* from zsysnum_plan9.go */ #define SYS_SEEK 39 /* from zsysnum_plan9.go */ // System call support for plan9 on arm //func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err ErrorString) TEXT ·Syscall(SB),NOSPLIT,$144-32 NO_LOCAL_POINTERS BL runtime·entersyscall(SB) MOVW $a1+4(FP), R0 // move syscall args MOVW $sysargs-144(SP), R1 MOVM.IA (R0), [R2-R4] MOVM.IA [R2-R4], (R1) MOVW trap+0(FP), R0 // syscall num SWI $0 MOVW $0, R2 MOVW $r1+16(FP), R3 MOVM.IA [R0,R2], (R3) CMP $-1, R0 B.EQ syscallerr BL runtime·exitsyscall(SB) MOVW $·emptystring+0(SB), R2 B syscallok syscallerr: MOVW $errbuf-128(SP), R2 MOVW $128, R3 MOVM.IA [R2,R3], (R1) MOVW $SYS_ERRSTR, R0 SWI $0 BL runtime·exitsyscall(SB) BL runtime·gostring(SB) MOVW $str-140(SP), R2 syscallok: MOVW $err+24(FP), R1 MOVM.IA (R2), [R3-R4] MOVM.IA [R3-R4], (R1) RET //func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err ErrorString) // Actually Syscall5 but the rest of the code expects it to be named Syscall6. TEXT ·Syscall6(SB),NOSPLIT,$144-44 NO_LOCAL_POINTERS BL runtime·entersyscall(SB) MOVW $a1+4(FP), R0 // move syscall args MOVW $sysargs-144(SP), R1 MOVM.IA (R0), [R2-R6] MOVM.IA [R2-R6], (R1) MOVW trap+0(FP), R0 // syscall num SWI $0 MOVW $0, R2 MOVW $r1+28(FP), R3 MOVM.IA.W [R0,R2], (R3) CMP $-1, R0 B.EQ syscall6err BL runtime·exitsyscall(SB) MOVW $·emptystring+0(SB), R2 B syscall6ok syscall6err: MOVW $errbuf-128(SP), R2 MOVW $128, R3 MOVM.IA [R2,R3], (R1) MOVW $SYS_ERRSTR, R0 SWI $0 BL runtime·exitsyscall(SB) BL runtime·gostring(SB) MOVW $str-140(SP), R2 syscall6ok: MOVW $err+36(FP), R1 MOVM.IA (R2), [R3-R4] MOVM.IA [R3-R4], (R1) RET //func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) TEXT ·RawSyscall(SB),NOSPLIT,$12-28 MOVW $a1+4(FP), R0 // move syscall args MOVW $sysargs-12(SP), R1 MOVM.IA (R0), [R2-R4] MOVM.IA [R2-R4], (R1) MOVW trap+0(FP), R0 // syscall num SWI $0 MOVW R0, r1+16(FP) MOVW R0, r2+20(FP) MOVW R0, err+24(FP) RET //func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) // Actually RawSyscall5 but the rest of the code expects it to be named RawSyscall6. TEXT ·RawSyscall6(SB),NOSPLIT,$20-40 MOVW $a1+4(FP), R0 // move syscall args MOVW $sysargs-20(SP), R1 MOVM.IA (R0), [R2-R6] MOVM.IA [R2-R6], (R1) MOVW trap+0(FP), R0 // syscall num SWI $0 MOVW R0, r1+28(FP) MOVW R0, r2+32(FP) MOVW R0, err+36(FP) RET //func seek(placeholder uintptr, fd int, offset int64, whence int) (newoffset int64, err string) TEXT ·seek(SB),NOSPLIT,$20-36 NO_LOCAL_POINTERS MOVW $newoffset_lo+20(FP), R6 MOVW R6, sysargs-20(SP) // dest for return value MOVW $fd+4(FP), R0 // move syscall args MOVW $sysarg1-16(SP), R1 MOVM.IA (R0), [R2-R5] MOVM.IA [R2-R5], (R1) MOVW $SYS_SEEK, R0 // syscall num SWI $0 CMP $-1, R0 B.EQ seekerr MOVW $·emptystring+0(SB), R2 B seekok seekerr: MOVW R0, 0(R6) MOVW R0, 4(R6) BL ·errstr(SB) MOVW $ret-20(SP), R2 seekok: MOVW $err+28(FP), R1 MOVM.IA (R2), [R3-R4] MOVM.IA [R3-R4], (R1) RET
go/src/syscall/asm_plan9_arm.s/0
{ "file_path": "go/src/syscall/asm_plan9_arm.s", "repo_id": "go", "token_count": 1899 }
436
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build dragonfly || netbsd || (openbsd && mips64) package syscall import ( "runtime" "unsafe" ) type SysProcAttr struct { Chroot string // Chroot. Credential *Credential // Credential. Ptrace bool // Enable tracing. Setsid bool // Create session. // Setpgid sets the process group ID of the child to Pgid, // or, if Pgid == 0, to the new child's process ID. Setpgid bool // Setctty sets the controlling terminal of the child to // file descriptor Ctty. Ctty must be a descriptor number // in the child process: an index into ProcAttr.Files. // This is only meaningful if Setsid is true. Setctty bool Noctty bool // Detach fd 0 from controlling terminal Ctty int // Controlling TTY fd // Foreground places the child process group in the foreground. // This implies Setpgid. The Ctty field must be set to // the descriptor of the controlling TTY. // Unlike Setctty, in this case Ctty must be a descriptor // number in the parent process. Foreground bool Pgid int // Child's process group ID if Setpgid. } // Implemented in runtime package. func runtime_BeforeFork() func runtime_AfterFork() func runtime_AfterForkInChild() // Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child. // If a dup or exec fails, write the errno error to pipe. // (Pipe is close-on-exec so if exec succeeds, it will be closed.) // In the child, this function must not acquire any locks, because // they might have been locked at the time of the fork. This means // no rescheduling, no malloc calls, and no new stack segments. // For the same reason compiler does not race instrument it. // The calls to RawSyscall are okay because they are assembly // functions that do not grow the stack. // //go:norace func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) { // Declare all variables at top in case any // declarations require heap allocation (e.g., err1). var ( r1 uintptr err1 Errno nextfd int i int pgrp _C_int cred *Credential ngroups, groups uintptr ) rlim := origRlimitNofile.Load() // guard against side effects of shuffling fds below. // Make sure that nextfd is beyond any currently open files so // that we can't run the risk of overwriting any of them. fd := make([]int, len(attr.Files)) nextfd = len(attr.Files) for i, ufd := range attr.Files { if nextfd < int(ufd) { nextfd = int(ufd) } fd[i] = int(ufd) } nextfd++ // About to call fork. // No more allocation or calls of non-assembly functions. runtime_BeforeFork() r1, _, err1 = RawSyscall(SYS_FORK, 0, 0, 0) if err1 != 0 { runtime_AfterFork() return 0, err1 } if r1 != 0 { // parent; return PID runtime_AfterFork() return int(r1), 0 } // Fork succeeded, now in child. // Enable tracing if requested. if sys.Ptrace { _, _, err1 = RawSyscall(SYS_PTRACE, uintptr(PTRACE_TRACEME), 0, 0) if err1 != 0 { goto childerror } } // Session ID if sys.Setsid { _, _, err1 = RawSyscall(SYS_SETSID, 0, 0, 0) if err1 != 0 { goto childerror } } // Set process group if sys.Setpgid || sys.Foreground { // Place child in process group. _, _, err1 = RawSyscall(SYS_SETPGID, 0, uintptr(sys.Pgid), 0) if err1 != 0 { goto childerror } } if sys.Foreground { // This should really be pid_t, however _C_int (aka int32) is // generally equivalent. pgrp = _C_int(sys.Pgid) if pgrp == 0 { r1, _, err1 = RawSyscall(SYS_GETPID, 0, 0, 0) if err1 != 0 { goto childerror } pgrp = _C_int(r1) } // Place process group in foreground. _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSPGRP), uintptr(unsafe.Pointer(&pgrp))) if err1 != 0 { goto childerror } } // Restore the signal mask. We do this after TIOCSPGRP to avoid // having the kernel send a SIGTTOU signal to the process group. runtime_AfterForkInChild() // Chroot if chroot != nil { _, _, err1 = RawSyscall(SYS_CHROOT, uintptr(unsafe.Pointer(chroot)), 0, 0) if err1 != 0 { goto childerror } } // User and groups if cred = sys.Credential; cred != nil { ngroups = uintptr(len(cred.Groups)) groups = uintptr(0) if ngroups > 0 { groups = uintptr(unsafe.Pointer(&cred.Groups[0])) } if !cred.NoSetGroups { _, _, err1 = RawSyscall(SYS_SETGROUPS, ngroups, groups, 0) if err1 != 0 { goto childerror } } _, _, err1 = RawSyscall(SYS_SETGID, uintptr(cred.Gid), 0, 0) if err1 != 0 { goto childerror } _, _, err1 = RawSyscall(SYS_SETUID, uintptr(cred.Uid), 0, 0) if err1 != 0 { goto childerror } } // Chdir if dir != nil { _, _, err1 = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0) if err1 != 0 { goto childerror } } // Pass 1: look for fd[i] < i and move those up above len(fd) // so that pass 2 won't stomp on an fd it needs later. if pipe < nextfd { if runtime.GOOS == "netbsd" || (runtime.GOOS == "openbsd" && runtime.GOARCH == "mips64") { _, _, err1 = RawSyscall(_SYS_DUP3, uintptr(pipe), uintptr(nextfd), O_CLOEXEC) } else if runtime.GOOS == "dragonfly" { _, _, err1 = RawSyscall(SYS_FCNTL, uintptr(pipe), _F_DUP2FD_CLOEXEC, uintptr(nextfd)) } else { _, _, err1 = RawSyscall(SYS_DUP2, uintptr(pipe), uintptr(nextfd), 0) if err1 != 0 { goto childerror } _, _, err1 = RawSyscall(SYS_FCNTL, uintptr(nextfd), F_SETFD, FD_CLOEXEC) } if err1 != 0 { goto childerror } pipe = nextfd nextfd++ } for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] < i { if nextfd == pipe { // don't stomp on pipe nextfd++ } if runtime.GOOS == "netbsd" || (runtime.GOOS == "openbsd" && runtime.GOARCH == "mips64") { _, _, err1 = RawSyscall(_SYS_DUP3, uintptr(fd[i]), uintptr(nextfd), O_CLOEXEC) } else if runtime.GOOS == "dragonfly" { _, _, err1 = RawSyscall(SYS_FCNTL, uintptr(fd[i]), _F_DUP2FD_CLOEXEC, uintptr(nextfd)) } else { _, _, err1 = RawSyscall(SYS_DUP2, uintptr(fd[i]), uintptr(nextfd), 0) if err1 != 0 { goto childerror } _, _, err1 = RawSyscall(SYS_FCNTL, uintptr(nextfd), F_SETFD, FD_CLOEXEC) } if err1 != 0 { goto childerror } fd[i] = nextfd nextfd++ } } // Pass 2: dup fd[i] down onto i. for i = 0; i < len(fd); i++ { if fd[i] == -1 { RawSyscall(SYS_CLOSE, uintptr(i), 0, 0) continue } if fd[i] == i { // dup2(i, i) won't clear close-on-exec flag on Linux, // probably not elsewhere either. _, _, err1 = RawSyscall(SYS_FCNTL, uintptr(fd[i]), F_SETFD, 0) if err1 != 0 { goto childerror } continue } // The new fd is created NOT close-on-exec, // which is exactly what we want. _, _, err1 = RawSyscall(SYS_DUP2, uintptr(fd[i]), uintptr(i), 0) if err1 != 0 { goto childerror } } // By convention, we don't close-on-exec the fds we are // started with, so if len(fd) < 3, close 0, 1, 2 as needed. // Programs that know they inherit fds >= 3 will need // to set them close-on-exec. for i = len(fd); i < 3; i++ { RawSyscall(SYS_CLOSE, uintptr(i), 0, 0) } // Detach fd 0 from tty if sys.Noctty { _, _, err1 = RawSyscall(SYS_IOCTL, 0, uintptr(TIOCNOTTY), 0) if err1 != 0 { goto childerror } } // Set the controlling TTY to Ctty if sys.Setctty { _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0) if err1 != 0 { goto childerror } } // Restore original rlimit. if rlim != nil { RawSyscall(SYS_SETRLIMIT, uintptr(RLIMIT_NOFILE), uintptr(unsafe.Pointer(rlim)), 0) } // Time to exec. _, _, err1 = RawSyscall(SYS_EXECVE, uintptr(unsafe.Pointer(argv0)), uintptr(unsafe.Pointer(&argv[0])), uintptr(unsafe.Pointer(&envv[0]))) childerror: // send error code on pipe RawSyscall(SYS_WRITE, uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1)) for { RawSyscall(SYS_EXIT, 253, 0, 0) } }
go/src/syscall/exec_bsd.go/0
{ "file_path": "go/src/syscall/exec_bsd.go", "repo_id": "go", "token_count": 3497 }
437
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // The runtime package uses //go:linkname to push the setEventHandler to this // package. To prevent the go tool from passing -complete to the compile tool, // this file must remain stubbed out.
go/src/syscall/js/js_js.s/0
{ "file_path": "go/src/syscall/js/js_js.s", "repo_id": "go", "token_count": 89 }
438
#!/usr/bin/env perl # Copyright 2009 The Go Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. # # Generate system call table for DragonFly from master list # (for example, /usr/src/sys/kern/syscalls.master). use strict; my $command = "mksysnum_dragonfly.pl " . join(' ', @ARGV); print <<EOF; // $command // Code generated by the command above; DO NOT EDIT. package syscall const ( EOF while(<>){ if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){ my $num = $1; my $proto = $2; my $name = "SYS_$3"; $name =~ y/a-z/A-Z/; # There are multiple entries for enosys and nosys, so comment them out. if($name =~ /^SYS_E?NOSYS$/){ $name = "// $name"; } if($name eq 'SYS_SYS_EXIT'){ $name = 'SYS_EXIT'; } print " $name = $num; // $proto\n"; } } print <<EOF; ) EOF
go/src/syscall/mksysnum_dragonfly.pl/0
{ "file_path": "go/src/syscall/mksysnum_dragonfly.pl", "repo_id": "go", "token_count": 377 }
439
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package syscall import "unsafe" const ( _SYS_setgroups = SYS_SETGROUPS _SYS_clone3 = 435 _SYS_faccessat2 = 439 _SYS_fchmodat2 = 452 ) //sys Dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT //sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) //sysnb Geteuid() (euid int) //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_GETRLIMIT //sysnb Getuid() (uid int) //sysnb InotifyInit() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) //sysnb setrlimit(resource int, rlim *Rlimit) (err error) = SYS_SETRLIMIT //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE //sys Truncate(path string, length int64) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) //sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { var tv Timeval err = Gettimeofday(&tv) if err != nil { return 0, err } if t != nil { *t = Time_t(tv.Sec) } return Time_t(tv.Sec), nil } //sys Utime(path string, buf *Utimbuf) (err error) //sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } // Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct. // mmap2 also requires arguments to be passed in a struct; it is currently not exposed in <asm/unistd.h>. func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { mmap_args := [6]uintptr{addr, length, uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)} r0, _, e1 := Syscall(SYS_MMAP, uintptr(unsafe.Pointer(&mmap_args[0])), 0, 0) xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } return } // On s390x Linux, all the socket calls go through an extra indirection. // The arguments to the underlying system call are the number below // and a pointer to an array of uintptr. We hide the pointer in the // socketcall assembly to avoid allocation on every system call. const ( // see linux/net.h _SOCKET = 1 _BIND = 2 _CONNECT = 3 _LISTEN = 4 _ACCEPT = 5 _GETSOCKNAME = 6 _GETPEERNAME = 7 _SOCKETPAIR = 8 _SEND = 9 _RECV = 10 _SENDTO = 11 _RECVFROM = 12 _SHUTDOWN = 13 _SETSOCKOPT = 14 _GETSOCKOPT = 15 _SENDMSG = 16 _RECVMSG = 17 _ACCEPT4 = 18 _RECVMMSG = 19 _SENDMMSG = 20 ) func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno) func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno) func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) if e != 0 { err = e } return } func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, e := rawsocketcall(_GETSOCKNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e != 0 { err = e } return } func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, e := rawsocketcall(_GETPEERNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e != 0 { err = e } return } func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) { _, e := rawsocketcall(_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0) if e != 0 { err = e } return } func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, e := socketcall(_BIND, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e != 0 { err = e } return } func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, e := socketcall(_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e != 0 { err = e } return } func socket(domain int, typ int, proto int) (fd int, err error) { fd, e := rawsocketcall(_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) if e != 0 { err = e } return } func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, e := socketcall(_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e != 0 { err = e } return } func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, e := socketcall(_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen, 0) if e != 0 { err = e } return } func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { var base uintptr if len(p) > 0 { base = uintptr(unsafe.Pointer(&p[0])) } n, e := socketcall(_RECVFROM, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) if e != 0 { err = e } return } func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { var base uintptr if len(p) > 0 { base = uintptr(unsafe.Pointer(&p[0])) } _, e := socketcall(_SENDTO, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e != 0 { err = e } return } func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { n, e := socketcall(_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) if e != 0 { err = e } return } func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { n, e := socketcall(_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) if e != 0 { err = e } return } func Listen(s int, n int) (err error) { _, e := socketcall(_LISTEN, uintptr(s), uintptr(n), 0, 0, 0, 0) if e != 0 { err = e } return } func Shutdown(s, how int) (err error) { _, e := socketcall(_SHUTDOWN, uintptr(s), uintptr(how), 0, 0, 0, 0) if e != 0 { err = e } return } //go:nosplit func rawSetrlimit(resource int, rlim *Rlimit) Errno { _, _, errno := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) return errno } func (r *PtraceRegs) PC() uint64 { return r.Psw.Addr } func (r *PtraceRegs) SetPC(pc uint64) { r.Psw.Addr = pc } func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) }
go/src/syscall/syscall_linux_s390x.go/0
{ "file_path": "go/src/syscall/syscall_linux_s390x.go", "repo_id": "go", "token_count": 3430 }
440
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !faketime package syscall const faketime = false func faketimeWrite(fd int, p []byte) int { // This should never be called since faketime is false. panic("not implemented") }
go/src/syscall/time_nofake.go/0
{ "file_path": "go/src/syscall/time_nofake.go", "repo_id": "go", "token_count": 105 }
441
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Windows UTF-16 strings can contain unpaired surrogates, which can't be // decoded into a valid UTF-8 string. This file defines a set of functions // that can be used to encode and decode potentially ill-formed UTF-16 strings // by using the [the WTF-8 encoding](https://simonsapin.github.io/wtf-8/). // // WTF-8 is a strict superset of UTF-8, i.e. any string that is // well-formed in UTF-8 is also well-formed in WTF-8 and the content // is unchanged. Also, the conversion never fails and is lossless. // // The benefit of using WTF-8 instead of UTF-8 when decoding a UTF-16 string // is that the conversion is lossless even for ill-formed UTF-16 strings. // This property allows to read an ill-formed UTF-16 string, convert it // to a Go string, and convert it back to the same original UTF-16 string. // // See go.dev/issues/59971 for more info. package syscall import ( "unicode/utf16" "unicode/utf8" ) const ( surr1 = 0xd800 surr2 = 0xdc00 surr3 = 0xe000 tx = 0b10000000 t3 = 0b11100000 maskx = 0b00111111 mask3 = 0b00001111 rune1Max = 1<<7 - 1 rune2Max = 1<<11 - 1 ) // encodeWTF16 returns the potentially ill-formed // UTF-16 encoding of s. func encodeWTF16(s string, buf []uint16) []uint16 { for i := 0; i < len(s); { // Cannot use 'for range s' because it expects valid // UTF-8 runes. r, size := utf8.DecodeRuneInString(s[i:]) if r == utf8.RuneError { // Check if s[i:] contains a valid WTF-8 encoded surrogate. if sc := s[i:]; len(sc) >= 3 && sc[0] == 0xED && 0xA0 <= sc[1] && sc[1] <= 0xBF && 0x80 <= sc[2] && sc[2] <= 0xBF { r = rune(sc[0]&mask3)<<12 + rune(sc[1]&maskx)<<6 + rune(sc[2]&maskx) buf = append(buf, uint16(r)) i += 3 continue } } i += size buf = utf16.AppendRune(buf, r) } return buf } // decodeWTF16 returns the WTF-8 encoding of // the potentially ill-formed UTF-16 s. func decodeWTF16(s []uint16, buf []byte) []byte { for i := 0; i < len(s); i++ { var ar rune switch r := s[i]; { case r < surr1, surr3 <= r: // normal rune ar = rune(r) case surr1 <= r && r < surr2 && i+1 < len(s) && surr2 <= s[i+1] && s[i+1] < surr3: // valid surrogate sequence ar = utf16.DecodeRune(rune(r), rune(s[i+1])) i++ default: // WTF-8 fallback. // This only handles the 3-byte case of utf8.AppendRune, // as surrogates always fall in that case. ar = rune(r) if ar > utf8.MaxRune { ar = utf8.RuneError } buf = append(buf, t3|byte(ar>>12), tx|byte(ar>>6)&maskx, tx|byte(ar)&maskx) continue } buf = utf8.AppendRune(buf, ar) } return buf }
go/src/syscall/wtf8_windows.go/0
{ "file_path": "go/src/syscall/wtf8_windows.go", "repo_id": "go", "token_count": 1118 }
442
// Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go //go:build 386 && openbsd package syscall const ( sizeofPtr = 0x4 sizeofShort = 0x2 sizeofInt = 0x4 sizeofLong = 0x4 sizeofLongLong = 0x8 ) type ( _C_short int16 _C_int int32 _C_long int32 _C_long_long int64 ) type Timespec struct { Sec int64 Nsec int32 } type Timeval struct { Sec int64 Usec int32 } type Rusage struct { Utime Timeval Stime Timeval Maxrss int32 Ixrss int32 Idrss int32 Isrss int32 Minflt int32 Majflt int32 Nswap int32 Inblock int32 Oublock int32 Msgsnd int32 Msgrcv int32 Nsignals int32 Nvcsw int32 Nivcsw int32 } type Rlimit struct { Cur uint64 Max uint64 } type _Gid_t uint32 const ( S_IFMT = 0xf000 S_IFIFO = 0x1000 S_IFCHR = 0x2000 S_IFDIR = 0x4000 S_IFBLK = 0x6000 S_IFREG = 0x8000 S_IFLNK = 0xa000 S_IFSOCK = 0xc000 S_ISUID = 0x800 S_ISGID = 0x400 S_ISVTX = 0x200 S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 S_IRWXG = 0x38 S_IRWXO = 0x7 ) type Stat_t struct { Mode uint32 Dev int32 Ino uint64 Nlink uint32 Uid uint32 Gid uint32 Rdev int32 Atim Timespec Mtim Timespec Ctim Timespec Size int64 Blocks int64 Blksize uint32 Flags uint32 Gen uint32 X__st_birthtim Timespec } type Statfs_t struct { F_flags uint32 F_bsize uint32 F_iosize uint32 F_blocks uint64 F_bfree uint64 F_bavail int64 F_files uint64 F_ffree uint64 F_favail int64 F_syncwrites uint64 F_syncreads uint64 F_asyncwrites uint64 F_asyncreads uint64 F_fsid Fsid F_namemax uint32 F_owner uint32 F_ctime uint64 F_fstypename [16]int8 F_mntonname [90]int8 F_mntfromname [90]int8 F_mntfromspec [90]int8 Pad_cgo_0 [2]byte Mount_info [160]byte } type Flock_t struct { Start int64 Len int64 Pid int32 Type int16 Whence int16 } type Dirent struct { Fileno uint64 Off int64 Reclen uint16 Type uint8 Namlen uint8 X__d_padding [4]uint8 Name [256]int8 } type Fsid struct { Val [2]int32 } const ( pathMax = 0x400 ) type RawSockaddrInet4 struct { Len uint8 Family uint8 Port uint16 Addr [4]byte /* in_addr */ Zero [8]int8 } type RawSockaddrInet6 struct { Len uint8 Family uint8 Port uint16 Flowinfo uint32 Addr [16]byte /* in6_addr */ Scope_id uint32 } type RawSockaddrUnix struct { Len uint8 Family uint8 Path [104]int8 } type RawSockaddrDatalink struct { Len uint8 Family uint8 Index uint16 Type uint8 Nlen uint8 Alen uint8 Slen uint8 Data [24]int8 } type RawSockaddr struct { Len uint8 Family uint8 Data [14]int8 } type RawSockaddrAny struct { Addr RawSockaddr Pad [92]int8 } type _Socklen uint32 type Linger struct { Onoff int32 Linger int32 } type Iovec struct { Base *byte Len uint32 } type IPMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } type IPv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } type Msghdr struct { Name *byte Namelen uint32 Iov *Iovec Iovlen uint32 Control *byte Controllen uint32 Flags int32 } type Cmsghdr struct { Len uint32 Level int32 Type int32 } type Inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } type IPv6MTUInfo struct { Addr RawSockaddrInet6 Mtu uint32 } type ICMPv6Filter struct { Filt [8]uint32 } const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x1c SizeofCmsghdr = 0xc SizeofInet6Pktinfo = 0x14 SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 ) const ( PTRACE_TRACEME = 0x0 PTRACE_CONT = 0x7 PTRACE_KILL = 0x8 ) type Kevent_t struct { Ident uint32 Filter int16 Flags uint16 Fflags uint32 Data int64 Udata *byte } type FdSet struct { Bits [32]uint32 } const ( SizeofIfMsghdr = 0xec SizeofIfData = 0xd4 SizeofIfaMsghdr = 0x18 SizeofIfAnnounceMsghdr = 0x1a SizeofRtMsghdr = 0x60 SizeofRtMetrics = 0x38 ) type IfMsghdr struct { Msglen uint16 Version uint8 Type uint8 Hdrlen uint16 Index uint16 Tableid uint16 Pad1 uint8 Pad2 uint8 Addrs int32 Flags int32 Xflags int32 Data IfData } type IfData struct { Type uint8 Addrlen uint8 Hdrlen uint8 Link_state uint8 Mtu uint32 Metric uint32 Pad uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 Opackets uint64 Oerrors uint64 Collisions uint64 Ibytes uint64 Obytes uint64 Imcasts uint64 Omcasts uint64 Iqdrops uint64 Noproto uint64 Capabilities uint32 Lastchange Timeval Mclpool [7]Mclpool } type IfaMsghdr struct { Msglen uint16 Version uint8 Type uint8 Hdrlen uint16 Index uint16 Tableid uint16 Pad1 uint8 Pad2 uint8 Addrs int32 Flags int32 Metric int32 } type IfAnnounceMsghdr struct { Msglen uint16 Version uint8 Type uint8 Hdrlen uint16 Index uint16 What uint16 Name [16]int8 } type RtMsghdr struct { Msglen uint16 Version uint8 Type uint8 Hdrlen uint16 Index uint16 Tableid uint16 Priority uint8 Mpls uint8 Addrs int32 Flags int32 Fmask int32 Pid int32 Seq int32 Errno int32 Inits uint32 Rmx RtMetrics } type RtMetrics struct { Pksent uint64 Expire int64 Locks uint32 Mtu uint32 Refcnt uint32 Hopcount uint32 Recvpipe uint32 Sendpipe uint32 Ssthresh uint32 Rtt uint32 Rttvar uint32 Pad uint32 } type Mclpool struct { Grown int32 Alive uint16 Hwm uint16 Cwm uint16 Lwm uint16 } const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 SizeofBpfHdr = 0x14 ) type BpfVersion struct { Major uint16 Minor uint16 } type BpfStat struct { Recv uint32 Drop uint32 } type BpfProgram struct { Len uint32 Insns *BpfInsn } type BpfInsn struct { Code uint16 Jt uint8 Jf uint8 K uint32 } type BpfHdr struct { Tstamp BpfTimeval Caplen uint32 Datalen uint32 Hdrlen uint16 Pad_cgo_0 [2]byte } type BpfTimeval struct { Sec uint32 Usec uint32 } const ( _AT_FDCWD = -0x64 ) type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Cc [20]uint8 Ispeed int32 Ospeed int32 }
go/src/syscall/ztypes_openbsd_386.go/0
{ "file_path": "go/src/syscall/ztypes_openbsd_386.go", "repo_id": "go", "token_count": 3635 }
443
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package testing_test import ( "flag" "internal/testenv" "os" "os/exec" "testing" ) var testFlagArg = flag.String("test_flag_arg", "", "TestFlag: passing -v option") const flagTestEnv = "GO_WANT_FLAG_HELPER_PROCESS" func TestFlag(t *testing.T) { if os.Getenv(flagTestEnv) == "1" { testFlagHelper(t) return } testenv.MustHaveExec(t) for _, flag := range []string{"", "-test.v", "-test.v=test2json"} { flag := flag t.Run(flag, func(t *testing.T) { t.Parallel() exe, err := os.Executable() if err != nil { exe = os.Args[0] } cmd := exec.Command(exe, "-test.run=^TestFlag$", "-test_flag_arg="+flag) if flag != "" { cmd.Args = append(cmd.Args, flag) } cmd.Env = append(cmd.Environ(), flagTestEnv+"=1") b, err := cmd.CombinedOutput() if len(b) > 0 { // When we set -test.v=test2json, we need to escape the ^V control // character used for JSON framing so that the JSON parser doesn't // misinterpret the subprocess output as output from the parent test. t.Logf("%q", b) } if err != nil { t.Error(err) } }) } } // testFlagHelper is called by the TestFlagHelper subprocess. func testFlagHelper(t *testing.T) { f := flag.Lookup("test.v") if f == nil { t.Fatal(`flag.Lookup("test.v") failed`) } bf, ok := f.Value.(interface{ IsBoolFlag() bool }) if !ok { t.Errorf("test.v flag (type %T) does not have IsBoolFlag method", f) } else if !bf.IsBoolFlag() { t.Error("test.v IsBoolFlag() returned false") } gf, ok := f.Value.(flag.Getter) if !ok { t.Fatalf("test.v flag (type %T) does not have Get method", f) } v := gf.Get() var want any switch *testFlagArg { case "": want = false case "-test.v": want = true case "-test.v=test2json": want = "test2json" default: t.Fatalf("unexpected test_flag_arg %q", *testFlagArg) } if v != want { t.Errorf("test.v is %v want %v", v, want) } }
go/src/testing/flag_test.go/0
{ "file_path": "go/src/testing/flag_test.go", "repo_id": "go", "token_count": 866 }
444
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package testing import ( "fmt" "os" "strconv" "strings" "sync" ) // matcher sanitizes, uniques, and filters names of subtests and subbenchmarks. type matcher struct { filter filterMatch skip filterMatch matchFunc func(pat, str string) (bool, error) mu sync.Mutex // subNames is used to deduplicate subtest names. // Each key is the subtest name joined to the deduplicated name of the parent test. // Each value is the count of the number of occurrences of the given subtest name // already seen. subNames map[string]int32 } type filterMatch interface { // matches checks the name against the receiver's pattern strings using the // given match function. matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) // verify checks that the receiver's pattern strings are valid filters by // calling the given match function. verify(name string, matchString func(pat, str string) (bool, error)) error } // simpleMatch matches a test name if all of the pattern strings match in // sequence. type simpleMatch []string // alternationMatch matches a test name if one of the alternations match. type alternationMatch []filterMatch // TODO: fix test_main to avoid race and improve caching, also allowing to // eliminate this Mutex. var matchMutex sync.Mutex func allMatcher() *matcher { return newMatcher(nil, "", "", "") } func newMatcher(matchString func(pat, str string) (bool, error), patterns, name, skips string) *matcher { var filter, skip filterMatch if patterns == "" { filter = simpleMatch{} // always partial true } else { filter = splitRegexp(patterns) if err := filter.verify(name, matchString); err != nil { fmt.Fprintf(os.Stderr, "testing: invalid regexp for %s\n", err) os.Exit(1) } } if skips == "" { skip = alternationMatch{} // always false } else { skip = splitRegexp(skips) if err := skip.verify("-test.skip", matchString); err != nil { fmt.Fprintf(os.Stderr, "testing: invalid regexp for %v\n", err) os.Exit(1) } } return &matcher{ filter: filter, skip: skip, matchFunc: matchString, subNames: map[string]int32{}, } } func (m *matcher) fullName(c *common, subname string) (name string, ok, partial bool) { name = subname m.mu.Lock() defer m.mu.Unlock() if c != nil && c.level > 0 { name = m.unique(c.name, rewrite(subname)) } matchMutex.Lock() defer matchMutex.Unlock() // We check the full array of paths each time to allow for the case that a pattern contains a '/'. elem := strings.Split(name, "/") // filter must match. // accept partial match that may produce full match later. ok, partial = m.filter.matches(elem, m.matchFunc) if !ok { return name, false, false } // skip must not match. // ignore partial match so we can get to more precise match later. skip, partialSkip := m.skip.matches(elem, m.matchFunc) if skip && !partialSkip { return name, false, false } return name, ok, partial } // clearSubNames clears the matcher's internal state, potentially freeing // memory. After this is called, T.Name may return the same strings as it did // for earlier subtests. func (m *matcher) clearSubNames() { m.mu.Lock() defer m.mu.Unlock() clear(m.subNames) } func (m simpleMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) { for i, s := range name { if i >= len(m) { break } if ok, _ := matchString(m[i], s); !ok { return false, false } } return true, len(name) < len(m) } func (m simpleMatch) verify(name string, matchString func(pat, str string) (bool, error)) error { for i, s := range m { m[i] = rewrite(s) } // Verify filters before doing any processing. for i, s := range m { if _, err := matchString(s, "non-empty"); err != nil { return fmt.Errorf("element %d of %s (%q): %s", i, name, s, err) } } return nil } func (m alternationMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) { for _, m := range m { if ok, partial = m.matches(name, matchString); ok { return ok, partial } } return false, false } func (m alternationMatch) verify(name string, matchString func(pat, str string) (bool, error)) error { for i, m := range m { if err := m.verify(name, matchString); err != nil { return fmt.Errorf("alternation %d of %s", i, err) } } return nil } func splitRegexp(s string) filterMatch { a := make(simpleMatch, 0, strings.Count(s, "/")) b := make(alternationMatch, 0, strings.Count(s, "|")) cs := 0 cp := 0 for i := 0; i < len(s); { switch s[i] { case '[': cs++ case ']': if cs--; cs < 0 { // An unmatched ']' is legal. cs = 0 } case '(': if cs == 0 { cp++ } case ')': if cs == 0 { cp-- } case '\\': i++ case '/': if cs == 0 && cp == 0 { a = append(a, s[:i]) s = s[i+1:] i = 0 continue } case '|': if cs == 0 && cp == 0 { a = append(a, s[:i]) s = s[i+1:] i = 0 b = append(b, a) a = make(simpleMatch, 0, len(a)) continue } } i++ } a = append(a, s) if len(b) == 0 { return a } return append(b, a) } // unique creates a unique name for the given parent and subname by affixing it // with one or more counts, if necessary. func (m *matcher) unique(parent, subname string) string { base := parent + "/" + subname for { n := m.subNames[base] if n < 0 { panic("subtest count overflow") } m.subNames[base] = n + 1 if n == 0 && subname != "" { prefix, nn := parseSubtestNumber(base) if len(prefix) < len(base) && nn < m.subNames[prefix] { // This test is explicitly named like "parent/subname#NN", // and #NN was already used for the NNth occurrence of "parent/subname". // Loop to add a disambiguating suffix. continue } return base } name := fmt.Sprintf("%s#%02d", base, n) if m.subNames[name] != 0 { // This is the nth occurrence of base, but the name "parent/subname#NN" // collides with the first occurrence of a subtest *explicitly* named // "parent/subname#NN". Try the next number. continue } return name } } // parseSubtestNumber splits a subtest name into a "#%02d"-formatted int32 // suffix (if present), and a prefix preceding that suffix (always). func parseSubtestNumber(s string) (prefix string, nn int32) { i := strings.LastIndex(s, "#") if i < 0 { return s, 0 } prefix, suffix := s[:i], s[i+1:] if len(suffix) < 2 || (len(suffix) > 2 && suffix[0] == '0') { // Even if suffix is numeric, it is not a possible output of a "%02" format // string: it has either too few digits or too many leading zeroes. return s, 0 } if suffix == "00" { if !strings.HasSuffix(prefix, "/") { // We only use "#00" as a suffix for subtests named with the empty // string — it isn't a valid suffix if the subtest name is non-empty. return s, 0 } } n, err := strconv.ParseInt(suffix, 10, 32) if err != nil || n < 0 { return s, 0 } return prefix, int32(n) } // rewrite rewrites a subname to having only printable characters and no white // space. func rewrite(s string) string { b := []byte{} for _, r := range s { switch { case isSpace(r): b = append(b, '_') case !strconv.IsPrint(r): s := strconv.QuoteRune(r) b = append(b, s[1:len(s)-1]...) default: b = append(b, string(r)...) } } return string(b) } func isSpace(r rune) bool { if r < 0x2000 { switch r { // Note: not the same as Unicode Z class. case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0, 0x1680: return true } } else { if r <= 0x200a { return true } switch r { case 0x2028, 0x2029, 0x202f, 0x205f, 0x3000: return true } } return false }
go/src/testing/match.go/0
{ "file_path": "go/src/testing/match.go", "repo_id": "go", "token_count": 3018 }
445
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package testing_test import ( "testing" "time" ) var sink time.Time var sinkHPT testing.HighPrecisionTime func BenchmarkTimeNow(b *testing.B) { for i := 0; i < b.N; i++ { sink = time.Now() } } func BenchmarkHighPrecisionTimeNow(b *testing.B) { for i := 0; i < b.N; i++ { sinkHPT = testing.HighPrecisionTimeNow() } }
go/src/testing/testing_windows_test.go/0
{ "file_path": "go/src/testing/testing_windows_test.go", "repo_id": "go", "token_count": 181 }
446
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package template // Tests for multiple-template parsing and execution. import ( "fmt" "os" "strings" "testing" "text/template/parse" ) const ( noError = true hasError = false ) type multiParseTest struct { name string input string ok bool names []string results []string } var multiParseTests = []multiParseTest{ {"empty", "", noError, nil, nil}, {"one", `{{define "foo"}} FOO {{end}}`, noError, []string{"foo"}, []string{" FOO "}}, {"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError, []string{"foo", "bar"}, []string{" FOO ", " BAR "}}, // errors {"missing end", `{{define "foo"}} FOO `, hasError, nil, nil}, {"malformed name", `{{define "foo}} FOO `, hasError, nil, nil}, } func TestMultiParse(t *testing.T) { for _, test := range multiParseTests { template, err := New("root").Parse(test.input) switch { case err == nil && !test.ok: t.Errorf("%q: expected error; got none", test.name) continue case err != nil && test.ok: t.Errorf("%q: unexpected error: %v", test.name, err) continue case err != nil && !test.ok: // expected error, got one if *debug { fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err) } continue } if template == nil { continue } if len(template.tmpl) != len(test.names)+1 { // +1 for root t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(template.tmpl)) continue } for i, name := range test.names { tmpl, ok := template.tmpl[name] if !ok { t.Errorf("%s: can't find template %q", test.name, name) continue } result := tmpl.Root.String() if result != test.results[i] { t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i]) } } } } var multiExecTests = []execTest{ {"empty", "", "", nil, true}, {"text", "some text", "some text", nil, true}, {"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true}, {"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true}, {"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true}, {"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true}, {"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true}, {"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true}, {"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true}, // User-defined function: test argument evaluator. {"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true}, {"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true}, } // These strings are also in testdata/*. const multiText1 = ` {{define "x"}}TEXT{{end}} {{define "dotV"}}{{.V}}{{end}} ` const multiText2 = ` {{define "dot"}}{{.}}{{end}} {{define "nested"}}{{template "dot" .}}{{end}} ` func TestMultiExecute(t *testing.T) { // Declare a couple of templates first. template, err := New("root").Parse(multiText1) if err != nil { t.Fatalf("parse error for 1: %s", err) } _, err = template.Parse(multiText2) if err != nil { t.Fatalf("parse error for 2: %s", err) } testExecute(multiExecTests, template, t) } func TestParseFiles(t *testing.T) { _, err := ParseFiles("DOES NOT EXIST") if err == nil { t.Error("expected error for non-existent file; got none") } template := New("root") _, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl") if err != nil { t.Fatalf("error parsing files: %v", err) } testExecute(multiExecTests, template, t) } func TestParseGlob(t *testing.T) { _, err := ParseGlob("DOES NOT EXIST") if err == nil { t.Error("expected error for non-existent file; got none") } _, err = New("error").ParseGlob("[x") if err == nil { t.Error("expected error for bad pattern; got none") } template := New("root") _, err = template.ParseGlob("testdata/file*.tmpl") if err != nil { t.Fatalf("error parsing files: %v", err) } testExecute(multiExecTests, template, t) } func TestParseFS(t *testing.T) { fs := os.DirFS("testdata") { _, err := ParseFS(fs, "DOES NOT EXIST") if err == nil { t.Error("expected error for non-existent file; got none") } } { template := New("root") _, err := template.ParseFS(fs, "file1.tmpl", "file2.tmpl") if err != nil { t.Fatalf("error parsing files: %v", err) } testExecute(multiExecTests, template, t) } { template := New("root") _, err := template.ParseFS(fs, "file*.tmpl") if err != nil { t.Fatalf("error parsing files: %v", err) } testExecute(multiExecTests, template, t) } } // In these tests, actual content (not just template definitions) comes from the parsed files. var templateFileExecTests = []execTest{ {"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true}, } func TestParseFilesWithData(t *testing.T) { template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl") if err != nil { t.Fatalf("error parsing files: %v", err) } testExecute(templateFileExecTests, template, t) } func TestParseGlobWithData(t *testing.T) { template, err := New("root").ParseGlob("testdata/tmpl*.tmpl") if err != nil { t.Fatalf("error parsing files: %v", err) } testExecute(templateFileExecTests, template, t) } const ( cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}` cloneText2 = `{{define "b"}}b{{end}}` cloneText3 = `{{define "c"}}root{{end}}` cloneText4 = `{{define "c"}}clone{{end}}` ) func TestClone(t *testing.T) { // Create some templates and clone the root. root, err := New("root").Parse(cloneText1) if err != nil { t.Fatal(err) } _, err = root.Parse(cloneText2) if err != nil { t.Fatal(err) } clone := Must(root.Clone()) // Add variants to both. _, err = root.Parse(cloneText3) if err != nil { t.Fatal(err) } _, err = clone.Parse(cloneText4) if err != nil { t.Fatal(err) } // Verify that the clone is self-consistent. for k, v := range clone.tmpl { if k == clone.name && v.tmpl[k] != clone { t.Error("clone does not contain root") } if v != v.tmpl[v.name] { t.Errorf("clone does not contain self for %q", k) } } // Execute root. var b strings.Builder err = root.ExecuteTemplate(&b, "a", 0) if err != nil { t.Fatal(err) } if b.String() != "broot" { t.Errorf("expected %q got %q", "broot", b.String()) } // Execute copy. b.Reset() err = clone.ExecuteTemplate(&b, "a", 0) if err != nil { t.Fatal(err) } if b.String() != "bclone" { t.Errorf("expected %q got %q", "bclone", b.String()) } } func TestAddParseTree(t *testing.T) { // Create some templates. root, err := New("root").Parse(cloneText1) if err != nil { t.Fatal(err) } _, err = root.Parse(cloneText2) if err != nil { t.Fatal(err) } // Add a new parse tree. tree, err := parse.Parse("cloneText3", cloneText3, "", "", nil, builtins()) if err != nil { t.Fatal(err) } added, err := root.AddParseTree("c", tree["c"]) if err != nil { t.Fatal(err) } // Execute. var b strings.Builder err = added.ExecuteTemplate(&b, "a", 0) if err != nil { t.Fatal(err) } if b.String() != "broot" { t.Errorf("expected %q got %q", "broot", b.String()) } } // Issue 7032 func TestAddParseTreeToUnparsedTemplate(t *testing.T) { master := "{{define \"master\"}}{{end}}" tmpl := New("master") tree, err := parse.Parse("master", master, "", "", nil) if err != nil { t.Fatalf("unexpected parse err: %v", err) } masterTree := tree["master"] tmpl.AddParseTree("master", masterTree) // used to panic } func TestRedefinition(t *testing.T) { var tmpl *Template var err error if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil { t.Fatalf("parse 1: %v", err) } if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err != nil { t.Fatalf("got error %v, expected nil", err) } if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err != nil { t.Fatalf("got error %v, expected nil", err) } } // Issue 10879 func TestEmptyTemplateCloneCrash(t *testing.T) { t1 := New("base") t1.Clone() // used to panic } // Issue 10910, 10926 func TestTemplateLookUp(t *testing.T) { t1 := New("foo") if t1.Lookup("foo") != nil { t.Error("Lookup returned non-nil value for undefined template foo") } t1.New("bar") if t1.Lookup("bar") != nil { t.Error("Lookup returned non-nil value for undefined template bar") } t1.Parse(`{{define "foo"}}test{{end}}`) if t1.Lookup("foo") == nil { t.Error("Lookup returned nil value for defined template") } } func TestNew(t *testing.T) { // template with same name already exists t1, _ := New("test").Parse(`{{define "test"}}foo{{end}}`) t2 := t1.New("test") if t1.common != t2.common { t.Errorf("t1 & t2 didn't share common struct; got %v != %v", t1.common, t2.common) } if t1.Tree == nil { t.Error("defined template got nil Tree") } if t2.Tree != nil { t.Error("undefined template got non-nil Tree") } containsT1 := false for _, tmpl := range t1.Templates() { if tmpl == t2 { t.Error("Templates included undefined template") } if tmpl == t1 { containsT1 = true } } if !containsT1 { t.Error("Templates didn't include defined template") } } func TestParse(t *testing.T) { // In multiple calls to Parse with the same receiver template, only one call // can contain text other than space, comments, and template definitions t1 := New("test") if _, err := t1.Parse(`{{define "test"}}{{end}}`); err != nil { t.Fatalf("parsing test: %s", err) } if _, err := t1.Parse(`{{define "test"}}{{/* this is a comment */}}{{end}}`); err != nil { t.Fatalf("parsing test: %s", err) } if _, err := t1.Parse(`{{define "test"}}foo{{end}}`); err != nil { t.Fatalf("parsing test: %s", err) } } func TestEmptyTemplate(t *testing.T) { cases := []struct { defn []string in string want string }{ {[]string{"x", "y"}, "", "y"}, {[]string{""}, "once", ""}, {[]string{"", ""}, "twice", ""}, {[]string{"{{.}}", "{{.}}"}, "twice", "twice"}, {[]string{"{{/* a comment */}}", "{{/* a comment */}}"}, "comment", ""}, {[]string{"{{.}}", ""}, "twice", ""}, } for i, c := range cases { root := New("root") var ( m *Template err error ) for _, d := range c.defn { m, err = root.New(c.in).Parse(d) if err != nil { t.Fatal(err) } } buf := &strings.Builder{} if err := m.Execute(buf, c.in); err != nil { t.Error(i, err) continue } if buf.String() != c.want { t.Errorf("expected string %q: got %q", c.want, buf.String()) } } } // Issue 19249 was a regression in 1.8 caused by the handling of empty // templates added in that release, which got different answers depending // on the order templates appeared in the internal map. func TestIssue19294(t *testing.T) { // The empty block in "xhtml" should be replaced during execution // by the contents of "stylesheet", but if the internal map associating // names with templates is built in the wrong order, the empty block // looks non-empty and this doesn't happen. var inlined = map[string]string{ "stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`, "xhtml": `{{block "stylesheet" .}}{{end}}`, } all := []string{"stylesheet", "xhtml"} for i := 0; i < 100; i++ { res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`) if err != nil { t.Fatal(err) } for _, name := range all { _, err := res.New(name).Parse(inlined[name]) if err != nil { t.Fatal(err) } } var buf strings.Builder res.Execute(&buf, 0) if buf.String() != "stylesheet" { t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet") } } } // Issue 48436 func TestAddToZeroTemplate(t *testing.T) { tree, err := parse.Parse("c", cloneText3, "", "", nil, builtins()) if err != nil { t.Fatal(err) } var tmpl Template tmpl.AddParseTree("x", tree["c"]) }
go/src/text/template/multi_test.go/0
{ "file_path": "go/src/text/template/multi_test.go", "repo_id": "go", "token_count": 4952 }
447
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package time func ForceAusFromTZIForTesting() { ResetLocalOnceForTest() localOnce.Do(func() { initLocalFromTZI(&aus) }) } func ForceUSPacificFromTZIForTesting() { ResetLocalOnceForTest() localOnce.Do(func() { initLocalFromTZI(&usPacific) }) } func ToEnglishName(stdname, dstname string) (string, error) { return toEnglishName(stdname, dstname) }
go/src/time/export_windows_test.go/0
{ "file_path": "go/src/time/export_windows_test.go", "repo_id": "go", "token_count": 166 }
448
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package time import "unsafe" // Note: The runtime knows the layout of struct Ticker, since newTimer allocates it. // Note also that Ticker and Timer have the same layout, so that newTimer can handle both. // The initTimer and initTicker fields are named differently so that // users cannot convert between the two without unsafe. // A Ticker holds a channel that delivers “ticks” of a clock // at intervals. type Ticker struct { C <-chan Time // The channel on which the ticks are delivered. initTicker bool } // NewTicker returns a new [Ticker] containing a channel that will send // the current time on the channel after each tick. The period of the // ticks is specified by the duration argument. The ticker will adjust // the time interval or drop ticks to make up for slow receivers. // The duration d must be greater than zero; if not, NewTicker will // panic. // // Before Go 1.23, the garbage collector did not recover // tickers that had not yet expired or been stopped, so code often // immediately deferred t.Stop after calling NewTicker, to make // the ticker recoverable when it was no longer needed. // As of Go 1.23, the garbage collector can recover unreferenced // tickers, even if they haven't been stopped. // The Stop method is no longer necessary to help the garbage collector. // (Code may of course still want to call Stop to stop the ticker for other reasons.) func NewTicker(d Duration) *Ticker { if d <= 0 { panic("non-positive interval for NewTicker") } // Give the channel a 1-element time buffer. // If the client falls behind while reading, we drop ticks // on the floor until the client catches up. c := make(chan Time, 1) t := (*Ticker)(unsafe.Pointer(newTimer(when(d), int64(d), sendTime, c, syncTimer(c)))) t.C = c return t } // Stop turns off a ticker. After Stop, no more ticks will be sent. // Stop does not close the channel, to prevent a concurrent goroutine // reading from the channel from seeing an erroneous "tick". func (t *Ticker) Stop() { if !t.initTicker { // This is misuse, and the same for time.Timer would panic, // but this didn't always panic, and we keep it not panicking // to avoid breaking old programs. See issue 21874. return } stopTimer((*Timer)(unsafe.Pointer(t))) } // Reset stops a ticker and resets its period to the specified duration. // The next tick will arrive after the new period elapses. The duration d // must be greater than zero; if not, Reset will panic. func (t *Ticker) Reset(d Duration) { if d <= 0 { panic("non-positive interval for Ticker.Reset") } if !t.initTicker { panic("time: Reset called on uninitialized Ticker") } resetTimer((*Timer)(unsafe.Pointer(t)), when(d), int64(d)) } // Tick is a convenience wrapper for [NewTicker] providing access to the ticking // channel only. Unlike NewTicker, Tick will return nil if d <= 0. // // Before Go 1.23, this documentation warned that the underlying // [Ticker] would never be recovered by the garbage collector, and that // if efficiency was a concern, code should use NewTicker instead and // call [Ticker.Stop] when the ticker is no longer needed. // As of Go 1.23, the garbage collector can recover unreferenced // tickers, even if they haven't been stopped. // The Stop method is no longer necessary to help the garbage collector. // There is no longer any reason to prefer NewTicker when Tick will do. func Tick(d Duration) <-chan Time { if d <= 0 { return nil } return NewTicker(d).C }
go/src/time/tick.go/0
{ "file_path": "go/src/time/tick.go", "repo_id": "go", "token_count": 1034 }
449
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build unix && !ios && !android // Parse "zoneinfo" time zone file. // This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others. // See tzfile(5), https://en.wikipedia.org/wiki/Zoneinfo, // and ftp://munnari.oz.au/pub/oldtz/ package time import ( "syscall" ) // Many systems use /usr/share/zoneinfo, Solaris 2 has // /usr/share/lib/zoneinfo, IRIX 6 has /usr/lib/locale/TZ, // NixOS has /etc/zoneinfo. var platformZoneSources = []string{ "/usr/share/zoneinfo/", "/usr/share/lib/zoneinfo/", "/usr/lib/locale/TZ/", "/etc/zoneinfo", } func initLocal() { // consult $TZ to find the time zone to use. // no $TZ means use the system default /etc/localtime. // $TZ="" means use UTC. // $TZ="foo" or $TZ=":foo" if foo is an absolute path, then the file pointed // by foo will be used to initialize timezone; otherwise, file // /usr/share/zoneinfo/foo will be used. tz, ok := syscall.Getenv("TZ") switch { case !ok: z, err := loadLocation("localtime", []string{"/etc"}) if err == nil { localLoc = *z localLoc.name = "Local" return } case tz != "": if tz[0] == ':' { tz = tz[1:] } if tz != "" && tz[0] == '/' { if z, err := loadLocation(tz, []string{""}); err == nil { localLoc = *z if tz == "/etc/localtime" { localLoc.name = "Local" } else { localLoc.name = tz } return } } else if tz != "" && tz != "UTC" { if z, err := loadLocation(tz, platformZoneSources); err == nil { localLoc = *z return } } } // Fall back to UTC. localLoc.name = "UTC" }
go/src/time/zoneinfo_unix.go/0
{ "file_path": "go/src/time/zoneinfo_unix.go", "repo_id": "go", "token_count": 705 }
450
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package utf16 implements encoding and decoding of UTF-16 sequences. package utf16 // The conditions replacementChar==unicode.ReplacementChar and // maxRune==unicode.MaxRune are verified in the tests. // Defining them locally avoids this package depending on package unicode. const ( replacementChar = '\uFFFD' // Unicode replacement character maxRune = '\U0010FFFF' // Maximum valid Unicode code point. ) const ( // 0xd800-0xdc00 encodes the high 10 bits of a pair. // 0xdc00-0xe000 encodes the low 10 bits of a pair. // the value is those 20 bits plus 0x10000. surr1 = 0xd800 surr2 = 0xdc00 surr3 = 0xe000 surrSelf = 0x10000 ) // IsSurrogate reports whether the specified Unicode code point // can appear in a surrogate pair. func IsSurrogate(r rune) bool { return surr1 <= r && r < surr3 } // DecodeRune returns the UTF-16 decoding of a surrogate pair. // If the pair is not a valid UTF-16 surrogate pair, DecodeRune returns // the Unicode replacement code point U+FFFD. func DecodeRune(r1, r2 rune) rune { if surr1 <= r1 && r1 < surr2 && surr2 <= r2 && r2 < surr3 { return (r1-surr1)<<10 | (r2 - surr2) + surrSelf } return replacementChar } // EncodeRune returns the UTF-16 surrogate pair r1, r2 for the given rune. // If the rune is not a valid Unicode code point or does not need encoding, // EncodeRune returns U+FFFD, U+FFFD. func EncodeRune(r rune) (r1, r2 rune) { if r < surrSelf || r > maxRune { return replacementChar, replacementChar } r -= surrSelf return surr1 + (r>>10)&0x3ff, surr2 + r&0x3ff } // RuneLen returns the number of 16-bit words in the UTF-16 encoding of the rune. // It returns -1 if the rune is not a valid value to encode in UTF-16. func RuneLen(r rune) int { switch { case 0 <= r && r < surr1, surr3 <= r && r < surrSelf: return 1 case surrSelf <= r && r <= maxRune: return 2 default: return -1 } } // Encode returns the UTF-16 encoding of the Unicode code point sequence s. func Encode(s []rune) []uint16 { n := len(s) for _, v := range s { if v >= surrSelf { n++ } } a := make([]uint16, n) n = 0 for _, v := range s { switch RuneLen(v) { case 1: // normal rune a[n] = uint16(v) n++ case 2: // needs surrogate sequence r1, r2 := EncodeRune(v) a[n] = uint16(r1) a[n+1] = uint16(r2) n += 2 default: a[n] = uint16(replacementChar) n++ } } return a[:n] } // AppendRune appends the UTF-16 encoding of the Unicode code point r // to the end of p and returns the extended buffer. If the rune is not // a valid Unicode code point, it appends the encoding of U+FFFD. func AppendRune(a []uint16, r rune) []uint16 { // This function is inlineable for fast handling of ASCII. switch { case 0 <= r && r < surr1, surr3 <= r && r < surrSelf: // normal rune return append(a, uint16(r)) case surrSelf <= r && r <= maxRune: // needs surrogate sequence r1, r2 := EncodeRune(r) return append(a, uint16(r1), uint16(r2)) } return append(a, replacementChar) } // Decode returns the Unicode code point sequence represented // by the UTF-16 encoding s. func Decode(s []uint16) []rune { // Preallocate capacity to hold up to 64 runes. // Decode inlines, so the allocation can live on the stack. buf := make([]rune, 0, 64) return decode(s, buf) } // decode appends to buf the Unicode code point sequence represented // by the UTF-16 encoding s and return the extended buffer. func decode(s []uint16, buf []rune) []rune { for i := 0; i < len(s); i++ { var ar rune switch r := s[i]; { case r < surr1, surr3 <= r: // normal rune ar = rune(r) case surr1 <= r && r < surr2 && i+1 < len(s) && surr2 <= s[i+1] && s[i+1] < surr3: // valid surrogate sequence ar = DecodeRune(rune(r), rune(s[i+1])) i++ default: // invalid surrogate sequence ar = replacementChar } buf = append(buf, ar) } return buf }
go/src/unicode/utf16/utf16.go/0
{ "file_path": "go/src/unicode/utf16/utf16.go", "repo_id": "go", "token_count": 1513 }
451
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package cryptobyte contains types that help with parsing and constructing // length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage // contains useful ASN.1 constants.) // // The String type is for parsing. It wraps a []byte slice and provides helper // functions for consuming structures, value by value. // // The Builder type is for constructing messages. It providers helper functions // for appending values and also for appending length-prefixed submessages – // without having to worry about calculating the length prefix ahead of time. // // See the documentation and examples for the Builder and String types to get // started. package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. type String []byte // read advances a String by n bytes and returns them. If less than n bytes // remain, it returns nil. func (s *String) read(n int) []byte { if len(*s) < n || n < 0 { return nil } v := (*s)[:n] *s = (*s)[n:] return v } // Skip advances the String by n byte and reports whether it was successful. func (s *String) Skip(n int) bool { return s.read(n) != nil } // ReadUint8 decodes an 8-bit value into out and advances over it. // It reports whether the read was successful. func (s *String) ReadUint8(out *uint8) bool { v := s.read(1) if v == nil { return false } *out = uint8(v[0]) return true } // ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. // It reports whether the read was successful. func (s *String) ReadUint16(out *uint16) bool { v := s.read(2) if v == nil { return false } *out = uint16(v[0])<<8 | uint16(v[1]) return true } // ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. // It reports whether the read was successful. func (s *String) ReadUint24(out *uint32) bool { v := s.read(3) if v == nil { return false } *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) return true } // ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. // It reports whether the read was successful. func (s *String) ReadUint32(out *uint32) bool { v := s.read(4) if v == nil { return false } *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) return true } // ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. // It reports whether the read was successful. func (s *String) ReadUint48(out *uint64) bool { v := s.read(6) if v == nil { return false } *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) return true } // ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. // It reports whether the read was successful. func (s *String) ReadUint64(out *uint64) bool { v := s.read(8) if v == nil { return false } *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) return true } func (s *String) readUnsigned(out *uint32, length int) bool { v := s.read(length) if v == nil { return false } var result uint32 for i := 0; i < length; i++ { result <<= 8 result |= uint32(v[i]) } *out = result return true } func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { lenBytes := s.read(lenLen) if lenBytes == nil { return false } var length uint32 for _, b := range lenBytes { length = length << 8 length = length | uint32(b) } v := s.read(int(length)) if v == nil { return false } *outChild = v return true } // ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value // into out and advances over it. It reports whether the read was successful. func (s *String) ReadUint8LengthPrefixed(out *String) bool { return s.readLengthPrefixed(1, out) } // ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit // length-prefixed value into out and advances over it. It reports whether the // read was successful. func (s *String) ReadUint16LengthPrefixed(out *String) bool { return s.readLengthPrefixed(2, out) } // ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit // length-prefixed value into out and advances over it. It reports whether // the read was successful. func (s *String) ReadUint24LengthPrefixed(out *String) bool { return s.readLengthPrefixed(3, out) } // ReadBytes reads n bytes into out and advances over them. It reports // whether the read was successful. func (s *String) ReadBytes(out *[]byte, n int) bool { v := s.read(n) if v == nil { return false } *out = v return true } // CopyBytes copies len(out) bytes into out and advances over them. It reports // whether the copy operation was successful func (s *String) CopyBytes(out []byte) bool { n := len(out) v := s.read(n) if v == nil { return false } return copy(out, v) == n } // Empty reports whether the string does not contain any bytes. func (s String) Empty() bool { return len(s) == 0 }
go/src/vendor/golang.org/x/crypto/cryptobyte/string.go/0
{ "file_path": "go/src/vendor/golang.org/x/crypto/cryptobyte/string.go", "repo_id": "go", "token_count": 1814 }
452
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc package sha3 import "math/bits" // rc stores the round constants for use in the ι step. var rc = [24]uint64{ 0x0000000000000001, 0x0000000000008082, 0x800000000000808A, 0x8000000080008000, 0x000000000000808B, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009, 0x000000000000008A, 0x0000000000000088, 0x0000000080008009, 0x000000008000000A, 0x000000008000808B, 0x800000000000008B, 0x8000000000008089, 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, 0x000000000000800A, 0x800000008000000A, 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008, } // keccakF1600 applies the Keccak permutation to a 1600b-wide // state represented as a slice of 25 uint64s. func keccakF1600(a *[25]uint64) { // Implementation translated from Keccak-inplace.c // in the keccak reference code. var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 for i := 0; i < 24; i += 4 { // Combines the 5 steps in each round into 2 steps. // Unrolls 4 rounds per loop and spreads some steps across rounds. // Round 1 bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] d0 = bc4 ^ (bc1<<1 | bc1>>63) d1 = bc0 ^ (bc2<<1 | bc2>>63) d2 = bc1 ^ (bc3<<1 | bc3>>63) d3 = bc2 ^ (bc4<<1 | bc4>>63) d4 = bc3 ^ (bc0<<1 | bc0>>63) bc0 = a[0] ^ d0 t = a[6] ^ d1 bc1 = bits.RotateLeft64(t, 44) t = a[12] ^ d2 bc2 = bits.RotateLeft64(t, 43) t = a[18] ^ d3 bc3 = bits.RotateLeft64(t, 21) t = a[24] ^ d4 bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] a[6] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) a[18] = bc3 ^ (bc0 &^ bc4) a[24] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 bc2 = bits.RotateLeft64(t, 3) t = a[16] ^ d1 bc3 = bits.RotateLeft64(t, 45) t = a[22] ^ d2 bc4 = bits.RotateLeft64(t, 61) t = a[3] ^ d3 bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 bc1 = bits.RotateLeft64(t, 20) a[10] = bc0 ^ (bc2 &^ bc1) a[16] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) a[3] = bc3 ^ (bc0 &^ bc4) a[9] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 bc4 = bits.RotateLeft64(t, 18) t = a[1] ^ d1 bc0 = bits.RotateLeft64(t, 1) t = a[7] ^ d2 bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 bc2 = bits.RotateLeft64(t, 25) t = a[19] ^ d4 bc3 = bits.RotateLeft64(t, 8) a[20] = bc0 ^ (bc2 &^ bc1) a[1] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) a[13] = bc3 ^ (bc0 &^ bc4) a[19] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 bc1 = bits.RotateLeft64(t, 36) t = a[11] ^ d1 bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 bc3 = bits.RotateLeft64(t, 15) t = a[23] ^ d3 bc4 = bits.RotateLeft64(t, 56) t = a[4] ^ d4 bc0 = bits.RotateLeft64(t, 27) a[5] = bc0 ^ (bc2 &^ bc1) a[11] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) a[23] = bc3 ^ (bc0 &^ bc4) a[4] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 bc4 = bits.RotateLeft64(t, 2) t = a[2] ^ d2 bc0 = bits.RotateLeft64(t, 62) t = a[8] ^ d3 bc1 = bits.RotateLeft64(t, 55) t = a[14] ^ d4 bc2 = bits.RotateLeft64(t, 39) a[15] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) a[8] = bc3 ^ (bc0 &^ bc4) a[14] = bc4 ^ (bc1 &^ bc0) // Round 2 bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] d0 = bc4 ^ (bc1<<1 | bc1>>63) d1 = bc0 ^ (bc2<<1 | bc2>>63) d2 = bc1 ^ (bc3<<1 | bc3>>63) d3 = bc2 ^ (bc4<<1 | bc4>>63) d4 = bc3 ^ (bc0<<1 | bc0>>63) bc0 = a[0] ^ d0 t = a[16] ^ d1 bc1 = bits.RotateLeft64(t, 44) t = a[7] ^ d2 bc2 = bits.RotateLeft64(t, 43) t = a[23] ^ d3 bc3 = bits.RotateLeft64(t, 21) t = a[14] ^ d4 bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] a[16] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) a[23] = bc3 ^ (bc0 &^ bc4) a[14] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 bc2 = bits.RotateLeft64(t, 3) t = a[11] ^ d1 bc3 = bits.RotateLeft64(t, 45) t = a[2] ^ d2 bc4 = bits.RotateLeft64(t, 61) t = a[18] ^ d3 bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 bc1 = bits.RotateLeft64(t, 20) a[20] = bc0 ^ (bc2 &^ bc1) a[11] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) a[18] = bc3 ^ (bc0 &^ bc4) a[9] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 bc4 = bits.RotateLeft64(t, 18) t = a[6] ^ d1 bc0 = bits.RotateLeft64(t, 1) t = a[22] ^ d2 bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 bc2 = bits.RotateLeft64(t, 25) t = a[4] ^ d4 bc3 = bits.RotateLeft64(t, 8) a[15] = bc0 ^ (bc2 &^ bc1) a[6] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) a[13] = bc3 ^ (bc0 &^ bc4) a[4] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 bc1 = bits.RotateLeft64(t, 36) t = a[1] ^ d1 bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 bc3 = bits.RotateLeft64(t, 15) t = a[8] ^ d3 bc4 = bits.RotateLeft64(t, 56) t = a[24] ^ d4 bc0 = bits.RotateLeft64(t, 27) a[10] = bc0 ^ (bc2 &^ bc1) a[1] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) a[8] = bc3 ^ (bc0 &^ bc4) a[24] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 bc4 = bits.RotateLeft64(t, 2) t = a[12] ^ d2 bc0 = bits.RotateLeft64(t, 62) t = a[3] ^ d3 bc1 = bits.RotateLeft64(t, 55) t = a[19] ^ d4 bc2 = bits.RotateLeft64(t, 39) a[5] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) a[3] = bc3 ^ (bc0 &^ bc4) a[19] = bc4 ^ (bc1 &^ bc0) // Round 3 bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] d0 = bc4 ^ (bc1<<1 | bc1>>63) d1 = bc0 ^ (bc2<<1 | bc2>>63) d2 = bc1 ^ (bc3<<1 | bc3>>63) d3 = bc2 ^ (bc4<<1 | bc4>>63) d4 = bc3 ^ (bc0<<1 | bc0>>63) bc0 = a[0] ^ d0 t = a[11] ^ d1 bc1 = bits.RotateLeft64(t, 44) t = a[22] ^ d2 bc2 = bits.RotateLeft64(t, 43) t = a[8] ^ d3 bc3 = bits.RotateLeft64(t, 21) t = a[19] ^ d4 bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] a[11] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) a[8] = bc3 ^ (bc0 &^ bc4) a[19] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 bc2 = bits.RotateLeft64(t, 3) t = a[1] ^ d1 bc3 = bits.RotateLeft64(t, 45) t = a[12] ^ d2 bc4 = bits.RotateLeft64(t, 61) t = a[23] ^ d3 bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 bc1 = bits.RotateLeft64(t, 20) a[15] = bc0 ^ (bc2 &^ bc1) a[1] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) a[23] = bc3 ^ (bc0 &^ bc4) a[9] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 bc4 = bits.RotateLeft64(t, 18) t = a[16] ^ d1 bc0 = bits.RotateLeft64(t, 1) t = a[2] ^ d2 bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 bc2 = bits.RotateLeft64(t, 25) t = a[24] ^ d4 bc3 = bits.RotateLeft64(t, 8) a[5] = bc0 ^ (bc2 &^ bc1) a[16] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) a[13] = bc3 ^ (bc0 &^ bc4) a[24] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 bc1 = bits.RotateLeft64(t, 36) t = a[6] ^ d1 bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 bc3 = bits.RotateLeft64(t, 15) t = a[3] ^ d3 bc4 = bits.RotateLeft64(t, 56) t = a[14] ^ d4 bc0 = bits.RotateLeft64(t, 27) a[20] = bc0 ^ (bc2 &^ bc1) a[6] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) a[3] = bc3 ^ (bc0 &^ bc4) a[14] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 bc4 = bits.RotateLeft64(t, 2) t = a[7] ^ d2 bc0 = bits.RotateLeft64(t, 62) t = a[18] ^ d3 bc1 = bits.RotateLeft64(t, 55) t = a[4] ^ d4 bc2 = bits.RotateLeft64(t, 39) a[10] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) a[18] = bc3 ^ (bc0 &^ bc4) a[4] = bc4 ^ (bc1 &^ bc0) // Round 4 bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] d0 = bc4 ^ (bc1<<1 | bc1>>63) d1 = bc0 ^ (bc2<<1 | bc2>>63) d2 = bc1 ^ (bc3<<1 | bc3>>63) d3 = bc2 ^ (bc4<<1 | bc4>>63) d4 = bc3 ^ (bc0<<1 | bc0>>63) bc0 = a[0] ^ d0 t = a[1] ^ d1 bc1 = bits.RotateLeft64(t, 44) t = a[2] ^ d2 bc2 = bits.RotateLeft64(t, 43) t = a[3] ^ d3 bc3 = bits.RotateLeft64(t, 21) t = a[4] ^ d4 bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] a[1] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) a[3] = bc3 ^ (bc0 &^ bc4) a[4] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 bc2 = bits.RotateLeft64(t, 3) t = a[6] ^ d1 bc3 = bits.RotateLeft64(t, 45) t = a[7] ^ d2 bc4 = bits.RotateLeft64(t, 61) t = a[8] ^ d3 bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 bc1 = bits.RotateLeft64(t, 20) a[5] = bc0 ^ (bc2 &^ bc1) a[6] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) a[8] = bc3 ^ (bc0 &^ bc4) a[9] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 bc4 = bits.RotateLeft64(t, 18) t = a[11] ^ d1 bc0 = bits.RotateLeft64(t, 1) t = a[12] ^ d2 bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 bc2 = bits.RotateLeft64(t, 25) t = a[14] ^ d4 bc3 = bits.RotateLeft64(t, 8) a[10] = bc0 ^ (bc2 &^ bc1) a[11] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) a[13] = bc3 ^ (bc0 &^ bc4) a[14] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 bc1 = bits.RotateLeft64(t, 36) t = a[16] ^ d1 bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 bc3 = bits.RotateLeft64(t, 15) t = a[18] ^ d3 bc4 = bits.RotateLeft64(t, 56) t = a[19] ^ d4 bc0 = bits.RotateLeft64(t, 27) a[15] = bc0 ^ (bc2 &^ bc1) a[16] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) a[18] = bc3 ^ (bc0 &^ bc4) a[19] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 bc4 = bits.RotateLeft64(t, 2) t = a[22] ^ d2 bc0 = bits.RotateLeft64(t, 62) t = a[23] ^ d3 bc1 = bits.RotateLeft64(t, 55) t = a[24] ^ d4 bc2 = bits.RotateLeft64(t, 39) a[20] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) a[23] = bc3 ^ (bc0 &^ bc4) a[24] = bc4 ^ (bc1 &^ bc0) } }
go/src/vendor/golang.org/x/crypto/sha3/keccakf.go/0
{ "file_path": "go/src/vendor/golang.org/x/crypto/sha3/keccakf.go", "repo_id": "go", "token_count": 6274 }
453
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package route import ( "syscall" "unsafe" ) func (typ RIBType) parseable() bool { return true } // RouteMetrics represents route metrics. type RouteMetrics struct { PathMTU int // path maximum transmission unit } // SysType implements the SysType method of Sys interface. func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } // Sys implements the Sys method of Message interface. func (m *RouteMessage) Sys() []Sys { if kernelAlign == 8 { return []Sys{ &RouteMetrics{ PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), }, } } return []Sys{ &RouteMetrics{ PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), }, } } // InterfaceMetrics represents interface metrics. type InterfaceMetrics struct { Type int // interface type MTU int // maximum transmission unit } // SysType implements the SysType method of Sys interface. func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } // Sys implements the Sys method of Message interface. func (m *InterfaceMessage) Sys() []Sys { return []Sys{ &InterfaceMetrics{ Type: int(m.raw[m.extOff]), MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), }, } } var compatFreeBSD32 bool // 386 emulation on amd64 func probeRoutingStack() (int, map[int]*wireFormat) { var p uintptr wordSize := int(unsafe.Sizeof(p)) align := wordSize // In the case of kern.supported_archs="amd64 i386", we need // to know the underlying kernel's architecture because the // alignment for routing facilities are set at the build time // of the kernel. conf, _ := syscall.Sysctl("kern.conftxt") for i, j := 0, 0; j < len(conf); j++ { if conf[j] != '\n' { continue } s := conf[i:j] i = j + 1 if len(s) > len("machine") && s[:len("machine")] == "machine" { s = s[len("machine"):] for k := 0; k < len(s); k++ { if s[k] == ' ' || s[k] == '\t' { s = s[1:] } break } if s == "amd64" { align = 8 } break } } if align != wordSize { compatFreeBSD32 = true // 386 emulation on amd64 } var rtm, ifm, ifam, ifmam, ifanm *wireFormat if compatFreeBSD32 { rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} ifm = &wireFormat{extOff: 16} ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} } else { rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} ifm = &wireFormat{extOff: 16} ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} } rel, _ := syscall.SysctlUint32("kern.osreldate") switch { case rel < 800000: if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD7 } case 800000 <= rel && rel < 900000: if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD8 } case 900000 <= rel && rel < 1000000: if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD9 } case 1000000 <= rel && rel < 1100000: if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD10 } default: if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD11 } } rtm.parse = rtm.parseRouteMessage ifm.parse = ifm.parseInterfaceMessage ifam.parse = ifam.parseInterfaceAddrMessage ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage ifanm.parse = ifanm.parseInterfaceAnnounceMessage return align, map[int]*wireFormat{ syscall.RTM_ADD: rtm, syscall.RTM_DELETE: rtm, syscall.RTM_CHANGE: rtm, syscall.RTM_GET: rtm, syscall.RTM_LOSING: rtm, syscall.RTM_REDIRECT: rtm, syscall.RTM_MISS: rtm, syscall.RTM_LOCK: rtm, syscall.RTM_RESOLVE: rtm, syscall.RTM_NEWADDR: ifam, syscall.RTM_DELADDR: ifam, syscall.RTM_IFINFO: ifm, syscall.RTM_NEWMADDR: ifmam, syscall.RTM_DELMADDR: ifmam, syscall.RTM_IFANNOUNCE: ifanm, } }
go/src/vendor/golang.org/x/net/route/sys_freebsd.go/0
{ "file_path": "go/src/vendor/golang.org/x/net/route/sys_freebsd.go", "repo_id": "go", "token_count": 2025 }
454
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cpu import ( "runtime" ) // byteOrder is a subset of encoding/binary.ByteOrder. type byteOrder interface { Uint32([]byte) uint32 Uint64([]byte) uint64 } type littleEndian struct{} type bigEndian struct{} func (littleEndian) Uint32(b []byte) uint32 { _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 } func (littleEndian) Uint64(b []byte) uint64 { _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } func (bigEndian) Uint32(b []byte) uint32 { _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 } func (bigEndian) Uint64(b []byte) uint64 { _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 } // hostByteOrder returns littleEndian on little-endian machines and // bigEndian on big-endian machines. func hostByteOrder() byteOrder { switch runtime.GOARCH { case "386", "amd64", "amd64p32", "alpha", "arm", "arm64", "loong64", "mipsle", "mips64le", "mips64p32le", "nios2", "ppc64le", "riscv", "riscv64", "sh": return littleEndian{} case "armbe", "arm64be", "m68k", "mips", "mips64", "mips64p32", "ppc", "ppc64", "s390", "s390x", "shbe", "sparc", "sparc64": return bigEndian{} } panic("unknown architecture") }
go/src/vendor/golang.org/x/sys/cpu/byteorder.go/0
{ "file_path": "go/src/vendor/golang.org/x/sys/cpu/byteorder.go", "repo_id": "go", "token_count": 807 }
455
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Minimal copy of x/sys/unix so the cpu package can make a // system call on AIX without depending on x/sys/unix. // (See golang.org/issue/32102) //go:build aix && ppc64 && gc package cpu import ( "syscall" "unsafe" ) //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" //go:linkname libc_getsystemcfg libc_getsystemcfg type syscallFunc uintptr var libc_getsystemcfg syscallFunc type errno = syscall.Errno // Implemented in runtime/syscall_aix.go. func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) return }
go/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go/0
{ "file_path": "go/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go", "repo_id": "go", "token_count": 400 }
456
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 package bidi // UnicodeVersion is the Unicode version from which the tables in this package are derived. const UnicodeVersion = "9.0.0" // xorMasks contains masks to be xor-ed with brackets to get the reverse // version. var xorMasks = []int32{ // 8 elements 0, 1, 6, 7, 3, 15, 29, 63, } // Size: 56 bytes // lookup returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return bidiValues[c0], 1 case c0 < 0xC2: return 0, 1 // Illegal UTF-8: not a starter, not ASCII. case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c1), 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c2), 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } o = uint32(i)<<6 + uint32(c2) i = bidiIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return 0, 3 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c3), 4 } // Illegal rune return 0, 1 } // lookupUnsafe returns the trie value for the first UTF-8 encoding in s. // s must start with a full and valid UTF-8 encoded rune. func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { c0 := s[0] if c0 < 0x80 { // is ASCII return bidiValues[c0] } i := bidiIndex[c0] if c0 < 0xE0 { // 2-byte UTF-8 return t.lookupValue(uint32(i), s[1]) } i = bidiIndex[uint32(i)<<6+uint32(s[1])] if c0 < 0xF0 { // 3-byte UTF-8 return t.lookupValue(uint32(i), s[2]) } i = bidiIndex[uint32(i)<<6+uint32(s[2])] if c0 < 0xF8 { // 4-byte UTF-8 return t.lookupValue(uint32(i), s[3]) } return 0 } // lookupString returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return bidiValues[c0], 1 case c0 < 0xC2: return 0, 1 // Illegal UTF-8: not a starter, not ASCII. case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c1), 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c2), 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } o = uint32(i)<<6 + uint32(c2) i = bidiIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return 0, 3 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c3), 4 } // Illegal rune return 0, 1 } // lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. // s must start with a full and valid UTF-8 encoded rune. func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { c0 := s[0] if c0 < 0x80 { // is ASCII return bidiValues[c0] } i := bidiIndex[c0] if c0 < 0xE0 { // 2-byte UTF-8 return t.lookupValue(uint32(i), s[1]) } i = bidiIndex[uint32(i)<<6+uint32(s[1])] if c0 < 0xF0 { // 3-byte UTF-8 return t.lookupValue(uint32(i), s[2]) } i = bidiIndex[uint32(i)<<6+uint32(s[2])] if c0 < 0xF8 { // 4-byte UTF-8 return t.lookupValue(uint32(i), s[3]) } return 0 } // bidiTrie. Total size: 15744 bytes (15.38 KiB). Checksum: b4c3b70954803b86. type bidiTrie struct{} func newBidiTrie(i int) *bidiTrie { return &bidiTrie{} } // lookupValue determines the type of block n and looks up the value for b. func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { switch { default: return uint8(bidiValues[n<<6+uint32(b)]) } } // bidiValues: 222 blocks, 14208 entries, 14208 bytes // The third block is the zero block. var bidiValues = [14208]uint8{ // Block 0x0, offset 0x0 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, // Block 0x1, offset 0x40 0x40: 0x000a, 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, 0x7b: 0x005a, 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, // Block 0x4, offset 0x100 0x117: 0x000a, 0x137: 0x000a, // Block 0x5, offset 0x140 0x179: 0x000a, 0x17a: 0x000a, // Block 0x6, offset 0x180 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, 0x19e: 0x000a, 0x19f: 0x000a, 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, // Block 0x7, offset 0x1c0 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, // Block 0x8, offset 0x200 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, 0x234: 0x000a, 0x235: 0x000a, 0x23e: 0x000a, // Block 0x9, offset 0x240 0x244: 0x000a, 0x245: 0x000a, 0x247: 0x000a, // Block 0xa, offset 0x280 0x2b6: 0x000a, // Block 0xb, offset 0x2c0 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, // Block 0xc, offset 0x300 0x30a: 0x000a, 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, // Block 0xd, offset 0x340 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, // Block 0xe, offset 0x380 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, // Block 0xf, offset 0x3c0 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, // Block 0x10, offset 0x400 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, // Block 0x11, offset 0x440 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, // Block 0x12, offset 0x480 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, // Block 0x13, offset 0x4c0 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, // Block 0x14, offset 0x500 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, // Block 0x15, offset 0x540 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, 0x57c: 0x0001, 0x57d: 0x0001, 0x57e: 0x0001, 0x57f: 0x0001, // Block 0x16, offset 0x580 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, // Block 0x17, offset 0x5c0 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x0001, 0x5e1: 0x0001, 0x5e2: 0x0001, 0x5e3: 0x0001, 0x5e4: 0x0001, 0x5e5: 0x0001, 0x5e6: 0x0001, 0x5e7: 0x0001, 0x5e8: 0x0001, 0x5e9: 0x0001, 0x5ea: 0x0001, 0x5eb: 0x0001, 0x5ec: 0x0001, 0x5ed: 0x0001, 0x5ee: 0x0001, 0x5ef: 0x0001, 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, // Block 0x18, offset 0x600 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, // Block 0x19, offset 0x640 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, 0x652: 0x000d, 0x653: 0x000d, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, // Block 0x1a, offset 0x680 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, 0x6ba: 0x000c, 0x6bc: 0x000c, // Block 0x1b, offset 0x6c0 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, 0x6cd: 0x000c, 0x6d1: 0x000c, 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, 0x6e2: 0x000c, 0x6e3: 0x000c, // Block 0x1c, offset 0x700 0x701: 0x000c, 0x73c: 0x000c, // Block 0x1d, offset 0x740 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, 0x74d: 0x000c, 0x762: 0x000c, 0x763: 0x000c, 0x772: 0x0004, 0x773: 0x0004, 0x77b: 0x0004, // Block 0x1e, offset 0x780 0x781: 0x000c, 0x782: 0x000c, 0x7bc: 0x000c, // Block 0x1f, offset 0x7c0 0x7c1: 0x000c, 0x7c2: 0x000c, 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, // Block 0x20, offset 0x800 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, 0x807: 0x000c, 0x808: 0x000c, 0x80d: 0x000c, 0x822: 0x000c, 0x823: 0x000c, 0x831: 0x0004, // Block 0x21, offset 0x840 0x841: 0x000c, 0x87c: 0x000c, 0x87f: 0x000c, // Block 0x22, offset 0x880 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, 0x88d: 0x000c, 0x896: 0x000c, 0x8a2: 0x000c, 0x8a3: 0x000c, // Block 0x23, offset 0x8c0 0x8c2: 0x000c, // Block 0x24, offset 0x900 0x900: 0x000c, 0x90d: 0x000c, 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, // Block 0x25, offset 0x940 0x940: 0x000c, 0x97e: 0x000c, 0x97f: 0x000c, // Block 0x26, offset 0x980 0x980: 0x000c, 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, 0x98c: 0x000c, 0x98d: 0x000c, 0x995: 0x000c, 0x996: 0x000c, 0x9a2: 0x000c, 0x9a3: 0x000c, 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, // Block 0x27, offset 0x9c0 0x9cc: 0x000c, 0x9cd: 0x000c, 0x9e2: 0x000c, 0x9e3: 0x000c, // Block 0x28, offset 0xa00 0xa01: 0x000c, // Block 0x29, offset 0xa40 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, 0xa4d: 0x000c, 0xa62: 0x000c, 0xa63: 0x000c, // Block 0x2a, offset 0xa80 0xa8a: 0x000c, 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, // Block 0x2b, offset 0xac0 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, 0xaff: 0x0004, // Block 0x2c, offset 0xb00 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, // Block 0x2d, offset 0xb40 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, 0xb7c: 0x000c, // Block 0x2e, offset 0xb80 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, 0xb8c: 0x000c, 0xb8d: 0x000c, // Block 0x2f, offset 0xbc0 0xbd8: 0x000c, 0xbd9: 0x000c, 0xbf5: 0x000c, 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, 0xbfc: 0x003a, 0xbfd: 0x002a, // Block 0x30, offset 0xc00 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, // Block 0x31, offset 0xc40 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, 0xc46: 0x000c, 0xc47: 0x000c, 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, 0xc7c: 0x000c, // Block 0x32, offset 0xc80 0xc86: 0x000c, // Block 0x33, offset 0xcc0 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, 0xcfd: 0x000c, 0xcfe: 0x000c, // Block 0x34, offset 0xd00 0xd18: 0x000c, 0xd19: 0x000c, 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, // Block 0x35, offset 0xd40 0xd42: 0x000c, 0xd45: 0x000c, 0xd46: 0x000c, 0xd4d: 0x000c, 0xd5d: 0x000c, // Block 0x36, offset 0xd80 0xd9d: 0x000c, 0xd9e: 0x000c, 0xd9f: 0x000c, // Block 0x37, offset 0xdc0 0xdd0: 0x000a, 0xdd1: 0x000a, 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, 0xdd8: 0x000a, 0xdd9: 0x000a, // Block 0x38, offset 0xe00 0xe00: 0x000a, // Block 0x39, offset 0xe40 0xe40: 0x0009, 0xe5b: 0x007a, 0xe5c: 0x006a, // Block 0x3a, offset 0xe80 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, // Block 0x3b, offset 0xec0 0xed2: 0x000c, 0xed3: 0x000c, 0xef2: 0x000c, 0xef3: 0x000c, // Block 0x3c, offset 0xf00 0xf34: 0x000c, 0xf35: 0x000c, 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, 0xf3c: 0x000c, 0xf3d: 0x000c, // Block 0x3d, offset 0xf40 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, 0xf52: 0x000c, 0xf53: 0x000c, 0xf5b: 0x0004, 0xf5d: 0x000c, 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, // Block 0x3e, offset 0xf80 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, // Block 0x3f, offset 0xfc0 0xfc5: 0x000c, 0xfc6: 0x000c, 0xfe9: 0x000c, // Block 0x40, offset 0x1000 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, 0x1027: 0x000c, 0x1028: 0x000c, 0x1032: 0x000c, 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, // Block 0x41, offset 0x1040 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, // Block 0x42, offset 0x1080 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, // Block 0x43, offset 0x10c0 0x10d7: 0x000c, 0x10d8: 0x000c, 0x10db: 0x000c, // Block 0x44, offset 0x1100 0x1116: 0x000c, 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, 0x113c: 0x000c, 0x113f: 0x000c, // Block 0x45, offset 0x1140 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, // Block 0x46, offset 0x1180 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, 0x11b4: 0x000c, 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, 0x11bc: 0x000c, // Block 0x47, offset 0x11c0 0x11c2: 0x000c, 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, // Block 0x48, offset 0x1200 0x1200: 0x000c, 0x1201: 0x000c, 0x1222: 0x000c, 0x1223: 0x000c, 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, // Block 0x49, offset 0x1240 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, 0x126d: 0x000c, 0x126f: 0x000c, 0x1270: 0x000c, 0x1271: 0x000c, // Block 0x4a, offset 0x1280 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, 0x12b6: 0x000c, 0x12b7: 0x000c, // Block 0x4b, offset 0x12c0 0x12d0: 0x000c, 0x12d1: 0x000c, 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, 0x12ed: 0x000c, 0x12f4: 0x000c, 0x12f8: 0x000c, 0x12f9: 0x000c, // Block 0x4c, offset 0x1300 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, 0x133b: 0x000c, 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, // Block 0x4d, offset 0x1340 0x137d: 0x000a, 0x137f: 0x000a, // Block 0x4e, offset 0x1380 0x1380: 0x000a, 0x1381: 0x000a, 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, 0x139d: 0x000a, 0x139e: 0x000a, 0x139f: 0x000a, 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, 0x13bd: 0x000a, 0x13be: 0x000a, // Block 0x4f, offset 0x13c0 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, // Block 0x50, offset 0x1400 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, // Block 0x51, offset 0x1440 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, // Block 0x52, offset 0x1480 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, 0x14b0: 0x000c, // Block 0x53, offset 0x14c0 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, 0x14d8: 0x000a, 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, 0x14ee: 0x0004, 0x14fa: 0x000a, 0x14fb: 0x000a, // Block 0x54, offset 0x1500 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, 0x150a: 0x000a, 0x150b: 0x000a, 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, 0x151e: 0x000a, 0x151f: 0x000a, // Block 0x55, offset 0x1540 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, 0x1550: 0x000a, 0x1551: 0x000a, 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, // Block 0x56, offset 0x1580 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, // Block 0x57, offset 0x15c0 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, // Block 0x58, offset 0x1600 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, // Block 0x59, offset 0x1640 0x167b: 0x000a, 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, // Block 0x5a, offset 0x1680 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, // Block 0x5b, offset 0x16c0 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, 0x16e7: 0x000a, 0x16e8: 0x000a, 0x16e9: 0x000a, 0x16ea: 0x000a, 0x16eb: 0x000a, 0x16ec: 0x000a, 0x16ed: 0x000a, 0x16ee: 0x000a, 0x16ef: 0x000a, 0x16f0: 0x000a, 0x16f1: 0x000a, 0x16f2: 0x000a, 0x16f3: 0x000a, 0x16f4: 0x000a, 0x16f5: 0x000a, 0x16f6: 0x000a, 0x16f7: 0x000a, 0x16f8: 0x000a, 0x16f9: 0x000a, 0x16fa: 0x000a, 0x16fb: 0x000a, 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, // Block 0x5c, offset 0x1700 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1715: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, // Block 0x5d, offset 0x1740 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, 0x1767: 0x000a, 0x1768: 0x000a, 0x1769: 0x000a, 0x176a: 0x000a, 0x176b: 0x000a, 0x176c: 0x000a, 0x176d: 0x000a, 0x176e: 0x000a, 0x176f: 0x000a, 0x1770: 0x000a, 0x1771: 0x000a, 0x1772: 0x000a, 0x1773: 0x000a, 0x1774: 0x000a, 0x1775: 0x000a, 0x1776: 0x000a, 0x1777: 0x000a, 0x1778: 0x000a, 0x1779: 0x000a, 0x177a: 0x000a, 0x177b: 0x000a, 0x177c: 0x000a, 0x177d: 0x000a, 0x177e: 0x000a, 0x177f: 0x000a, // Block 0x5e, offset 0x1780 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x0002, 0x1789: 0x0002, 0x178a: 0x0002, 0x178b: 0x0002, 0x178c: 0x0002, 0x178d: 0x0002, 0x178e: 0x0002, 0x178f: 0x0002, 0x1790: 0x0002, 0x1791: 0x0002, 0x1792: 0x0002, 0x1793: 0x0002, 0x1794: 0x0002, 0x1795: 0x0002, 0x1796: 0x0002, 0x1797: 0x0002, 0x1798: 0x0002, 0x1799: 0x0002, 0x179a: 0x0002, 0x179b: 0x0002, // Block 0x5f, offset 0x17c0 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ec: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, // Block 0x60, offset 0x1800 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x000a, 0x1829: 0x000a, 0x182a: 0x000a, 0x182b: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, // Block 0x61, offset 0x1840 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x003a, 0x1869: 0x002a, 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, 0x1870: 0x003a, 0x1871: 0x002a, 0x1872: 0x003a, 0x1873: 0x002a, 0x1874: 0x003a, 0x1875: 0x002a, 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, // Block 0x62, offset 0x1880 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x009a, 0x1886: 0x008a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x003a, 0x18a7: 0x002a, 0x18a8: 0x003a, 0x18a9: 0x002a, 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, // Block 0x63, offset 0x18c0 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x007a, 0x18c4: 0x006a, 0x18c5: 0x009a, 0x18c6: 0x008a, 0x18c7: 0x00ba, 0x18c8: 0x00aa, 0x18c9: 0x009a, 0x18ca: 0x008a, 0x18cb: 0x007a, 0x18cc: 0x006a, 0x18cd: 0x00da, 0x18ce: 0x002a, 0x18cf: 0x003a, 0x18d0: 0x00ca, 0x18d1: 0x009a, 0x18d2: 0x008a, 0x18d3: 0x007a, 0x18d4: 0x006a, 0x18d5: 0x009a, 0x18d6: 0x008a, 0x18d7: 0x00ba, 0x18d8: 0x00aa, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, // Block 0x64, offset 0x1900 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, 0x1918: 0x003a, 0x1919: 0x002a, 0x191a: 0x003a, 0x191b: 0x002a, 0x191c: 0x000a, 0x191d: 0x000a, 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, 0x193c: 0x003a, 0x193d: 0x002a, 0x193e: 0x000a, 0x193f: 0x000a, // Block 0x65, offset 0x1940 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, // Block 0x66, offset 0x1980 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, // Block 0x67, offset 0x19c0 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, // Block 0x68, offset 0x1a00 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, 0x1a2a: 0x000a, 0x1a2f: 0x000c, 0x1a30: 0x000c, 0x1a31: 0x000c, 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, // Block 0x69, offset 0x1a40 0x1a7f: 0x000c, // Block 0x6a, offset 0x1a80 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, // Block 0x6b, offset 0x1ac0 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, // Block 0x6c, offset 0x1b00 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, // Block 0x6d, offset 0x1b40 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, // Block 0x6e, offset 0x1b80 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, // Block 0x6f, offset 0x1bc0 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, // Block 0x70, offset 0x1c00 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, 0x1c30: 0x000a, 0x1c36: 0x000a, 0x1c37: 0x000a, 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, // Block 0x71, offset 0x1c40 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, 0x1c60: 0x000a, // Block 0x72, offset 0x1c80 0x1cbb: 0x000a, // Block 0x73, offset 0x1cc0 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, // Block 0x74, offset 0x1d00 0x1d1d: 0x000a, 0x1d1e: 0x000a, // Block 0x75, offset 0x1d40 0x1d50: 0x000a, 0x1d51: 0x000a, 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, 0x1d5e: 0x000a, 0x1d5f: 0x000a, 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, // Block 0x76, offset 0x1d80 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, // Block 0x77, offset 0x1dc0 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, // Block 0x78, offset 0x1e00 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, // Block 0x79, offset 0x1e40 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e7f: 0x000a, // Block 0x7a, offset 0x1e80 0x1e90: 0x000a, 0x1e91: 0x000a, 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, // Block 0x7b, offset 0x1ec0 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, 0x1ec6: 0x000a, // Block 0x7c, offset 0x1f00 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, // Block 0x7d, offset 0x1f40 0x1f6f: 0x000c, 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, // Block 0x7e, offset 0x1f80 0x1f9e: 0x000c, 0x1f9f: 0x000c, // Block 0x7f, offset 0x1fc0 0x1ff0: 0x000c, 0x1ff1: 0x000c, // Block 0x80, offset 0x2000 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, // Block 0x81, offset 0x2040 0x2048: 0x000a, // Block 0x82, offset 0x2080 0x2082: 0x000c, 0x2086: 0x000c, 0x208b: 0x000c, 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, 0x20aa: 0x000a, 0x20ab: 0x000a, 0x20b8: 0x0004, 0x20b9: 0x0004, // Block 0x83, offset 0x20c0 0x20f4: 0x000a, 0x20f5: 0x000a, 0x20f6: 0x000a, 0x20f7: 0x000a, // Block 0x84, offset 0x2100 0x2104: 0x000c, 0x2105: 0x000c, 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, 0x2130: 0x000c, 0x2131: 0x000c, // Block 0x85, offset 0x2140 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, // Block 0x86, offset 0x2180 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, // Block 0x87, offset 0x21c0 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, 0x21f3: 0x000c, 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, 0x21fc: 0x000c, // Block 0x88, offset 0x2200 0x2225: 0x000c, // Block 0x89, offset 0x2240 0x2269: 0x000c, 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, 0x2276: 0x000c, // Block 0x8a, offset 0x2280 0x2283: 0x000c, 0x228c: 0x000c, 0x22bc: 0x000c, // Block 0x8b, offset 0x22c0 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, 0x22f7: 0x000c, 0x22f8: 0x000c, 0x22fe: 0x000c, 0x22ff: 0x000c, // Block 0x8c, offset 0x2300 0x2301: 0x000c, 0x232c: 0x000c, 0x232d: 0x000c, 0x2336: 0x000c, // Block 0x8d, offset 0x2340 0x2365: 0x000c, 0x2368: 0x000c, 0x236d: 0x000c, // Block 0x8e, offset 0x2380 0x239d: 0x0001, 0x239e: 0x000c, 0x239f: 0x0001, 0x23a0: 0x0001, 0x23a1: 0x0001, 0x23a2: 0x0001, 0x23a3: 0x0001, 0x23a4: 0x0001, 0x23a5: 0x0001, 0x23a6: 0x0001, 0x23a7: 0x0001, 0x23a8: 0x0001, 0x23a9: 0x0003, 0x23aa: 0x0001, 0x23ab: 0x0001, 0x23ac: 0x0001, 0x23ad: 0x0001, 0x23ae: 0x0001, 0x23af: 0x0001, 0x23b0: 0x0001, 0x23b1: 0x0001, 0x23b2: 0x0001, 0x23b3: 0x0001, 0x23b4: 0x0001, 0x23b5: 0x0001, 0x23b6: 0x0001, 0x23b7: 0x0001, 0x23b8: 0x0001, 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, 0x23bc: 0x0001, 0x23bd: 0x0001, 0x23be: 0x0001, 0x23bf: 0x0001, // Block 0x8f, offset 0x23c0 0x23c0: 0x0001, 0x23c1: 0x0001, 0x23c2: 0x0001, 0x23c3: 0x0001, 0x23c4: 0x0001, 0x23c5: 0x0001, 0x23c6: 0x0001, 0x23c7: 0x0001, 0x23c8: 0x0001, 0x23c9: 0x0001, 0x23ca: 0x0001, 0x23cb: 0x0001, 0x23cc: 0x0001, 0x23cd: 0x0001, 0x23ce: 0x0001, 0x23cf: 0x0001, 0x23d0: 0x000d, 0x23d1: 0x000d, 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000d, 0x23ff: 0x000d, // Block 0x90, offset 0x2400 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000d, 0x2411: 0x000d, 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000a, 0x243f: 0x000a, // Block 0x91, offset 0x2440 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000b, 0x2451: 0x000b, 0x2452: 0x000b, 0x2453: 0x000b, 0x2454: 0x000b, 0x2455: 0x000b, 0x2456: 0x000b, 0x2457: 0x000b, 0x2458: 0x000b, 0x2459: 0x000b, 0x245a: 0x000b, 0x245b: 0x000b, 0x245c: 0x000b, 0x245d: 0x000b, 0x245e: 0x000b, 0x245f: 0x000b, 0x2460: 0x000b, 0x2461: 0x000b, 0x2462: 0x000b, 0x2463: 0x000b, 0x2464: 0x000b, 0x2465: 0x000b, 0x2466: 0x000b, 0x2467: 0x000b, 0x2468: 0x000b, 0x2469: 0x000b, 0x246a: 0x000b, 0x246b: 0x000b, 0x246c: 0x000b, 0x246d: 0x000b, 0x246e: 0x000b, 0x246f: 0x000b, 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, 0x247c: 0x000d, 0x247d: 0x000a, 0x247e: 0x000d, 0x247f: 0x000d, // Block 0x92, offset 0x2480 0x2480: 0x000c, 0x2481: 0x000c, 0x2482: 0x000c, 0x2483: 0x000c, 0x2484: 0x000c, 0x2485: 0x000c, 0x2486: 0x000c, 0x2487: 0x000c, 0x2488: 0x000c, 0x2489: 0x000c, 0x248a: 0x000c, 0x248b: 0x000c, 0x248c: 0x000c, 0x248d: 0x000c, 0x248e: 0x000c, 0x248f: 0x000c, 0x2490: 0x000a, 0x2491: 0x000a, 0x2492: 0x000a, 0x2493: 0x000a, 0x2494: 0x000a, 0x2495: 0x000a, 0x2496: 0x000a, 0x2497: 0x000a, 0x2498: 0x000a, 0x2499: 0x000a, 0x24a0: 0x000c, 0x24a1: 0x000c, 0x24a2: 0x000c, 0x24a3: 0x000c, 0x24a4: 0x000c, 0x24a5: 0x000c, 0x24a6: 0x000c, 0x24a7: 0x000c, 0x24a8: 0x000c, 0x24a9: 0x000c, 0x24aa: 0x000c, 0x24ab: 0x000c, 0x24ac: 0x000c, 0x24ad: 0x000c, 0x24ae: 0x000c, 0x24af: 0x000c, 0x24b0: 0x000a, 0x24b1: 0x000a, 0x24b2: 0x000a, 0x24b3: 0x000a, 0x24b4: 0x000a, 0x24b5: 0x000a, 0x24b6: 0x000a, 0x24b7: 0x000a, 0x24b8: 0x000a, 0x24b9: 0x000a, 0x24ba: 0x000a, 0x24bb: 0x000a, 0x24bc: 0x000a, 0x24bd: 0x000a, 0x24be: 0x000a, 0x24bf: 0x000a, // Block 0x93, offset 0x24c0 0x24c0: 0x000a, 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x000a, 0x24c4: 0x000a, 0x24c5: 0x000a, 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x000a, 0x24c9: 0x000a, 0x24ca: 0x000a, 0x24cb: 0x000a, 0x24cc: 0x000a, 0x24cd: 0x000a, 0x24ce: 0x000a, 0x24cf: 0x000a, 0x24d0: 0x0006, 0x24d1: 0x000a, 0x24d2: 0x0006, 0x24d4: 0x000a, 0x24d5: 0x0006, 0x24d6: 0x000a, 0x24d7: 0x000a, 0x24d8: 0x000a, 0x24d9: 0x009a, 0x24da: 0x008a, 0x24db: 0x007a, 0x24dc: 0x006a, 0x24dd: 0x009a, 0x24de: 0x008a, 0x24df: 0x0004, 0x24e0: 0x000a, 0x24e1: 0x000a, 0x24e2: 0x0003, 0x24e3: 0x0003, 0x24e4: 0x000a, 0x24e5: 0x000a, 0x24e6: 0x000a, 0x24e8: 0x000a, 0x24e9: 0x0004, 0x24ea: 0x0004, 0x24eb: 0x000a, 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000d, // Block 0x94, offset 0x2500 0x2500: 0x000d, 0x2501: 0x000d, 0x2502: 0x000d, 0x2503: 0x000d, 0x2504: 0x000d, 0x2505: 0x000d, 0x2506: 0x000d, 0x2507: 0x000d, 0x2508: 0x000d, 0x2509: 0x000d, 0x250a: 0x000d, 0x250b: 0x000d, 0x250c: 0x000d, 0x250d: 0x000d, 0x250e: 0x000d, 0x250f: 0x000d, 0x2510: 0x000d, 0x2511: 0x000d, 0x2512: 0x000d, 0x2513: 0x000d, 0x2514: 0x000d, 0x2515: 0x000d, 0x2516: 0x000d, 0x2517: 0x000d, 0x2518: 0x000d, 0x2519: 0x000d, 0x251a: 0x000d, 0x251b: 0x000d, 0x251c: 0x000d, 0x251d: 0x000d, 0x251e: 0x000d, 0x251f: 0x000d, 0x2520: 0x000d, 0x2521: 0x000d, 0x2522: 0x000d, 0x2523: 0x000d, 0x2524: 0x000d, 0x2525: 0x000d, 0x2526: 0x000d, 0x2527: 0x000d, 0x2528: 0x000d, 0x2529: 0x000d, 0x252a: 0x000d, 0x252b: 0x000d, 0x252c: 0x000d, 0x252d: 0x000d, 0x252e: 0x000d, 0x252f: 0x000d, 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000b, // Block 0x95, offset 0x2540 0x2541: 0x000a, 0x2542: 0x000a, 0x2543: 0x0004, 0x2544: 0x0004, 0x2545: 0x0004, 0x2546: 0x000a, 0x2547: 0x000a, 0x2548: 0x003a, 0x2549: 0x002a, 0x254a: 0x000a, 0x254b: 0x0003, 0x254c: 0x0006, 0x254d: 0x0003, 0x254e: 0x0006, 0x254f: 0x0006, 0x2550: 0x0002, 0x2551: 0x0002, 0x2552: 0x0002, 0x2553: 0x0002, 0x2554: 0x0002, 0x2555: 0x0002, 0x2556: 0x0002, 0x2557: 0x0002, 0x2558: 0x0002, 0x2559: 0x0002, 0x255a: 0x0006, 0x255b: 0x000a, 0x255c: 0x000a, 0x255d: 0x000a, 0x255e: 0x000a, 0x255f: 0x000a, 0x2560: 0x000a, 0x257b: 0x005a, 0x257c: 0x000a, 0x257d: 0x004a, 0x257e: 0x000a, 0x257f: 0x000a, // Block 0x96, offset 0x2580 0x2580: 0x000a, 0x259b: 0x005a, 0x259c: 0x000a, 0x259d: 0x004a, 0x259e: 0x000a, 0x259f: 0x00fa, 0x25a0: 0x00ea, 0x25a1: 0x000a, 0x25a2: 0x003a, 0x25a3: 0x002a, 0x25a4: 0x000a, 0x25a5: 0x000a, // Block 0x97, offset 0x25c0 0x25e0: 0x0004, 0x25e1: 0x0004, 0x25e2: 0x000a, 0x25e3: 0x000a, 0x25e4: 0x000a, 0x25e5: 0x0004, 0x25e6: 0x0004, 0x25e8: 0x000a, 0x25e9: 0x000a, 0x25ea: 0x000a, 0x25eb: 0x000a, 0x25ec: 0x000a, 0x25ed: 0x000a, 0x25ee: 0x000a, 0x25f0: 0x000b, 0x25f1: 0x000b, 0x25f2: 0x000b, 0x25f3: 0x000b, 0x25f4: 0x000b, 0x25f5: 0x000b, 0x25f6: 0x000b, 0x25f7: 0x000b, 0x25f8: 0x000b, 0x25f9: 0x000a, 0x25fa: 0x000a, 0x25fb: 0x000a, 0x25fc: 0x000a, 0x25fd: 0x000a, 0x25fe: 0x000b, 0x25ff: 0x000b, // Block 0x98, offset 0x2600 0x2601: 0x000a, // Block 0x99, offset 0x2640 0x2640: 0x000a, 0x2641: 0x000a, 0x2642: 0x000a, 0x2643: 0x000a, 0x2644: 0x000a, 0x2645: 0x000a, 0x2646: 0x000a, 0x2647: 0x000a, 0x2648: 0x000a, 0x2649: 0x000a, 0x264a: 0x000a, 0x264b: 0x000a, 0x264c: 0x000a, 0x2650: 0x000a, 0x2651: 0x000a, 0x2652: 0x000a, 0x2653: 0x000a, 0x2654: 0x000a, 0x2655: 0x000a, 0x2656: 0x000a, 0x2657: 0x000a, 0x2658: 0x000a, 0x2659: 0x000a, 0x265a: 0x000a, 0x265b: 0x000a, 0x2660: 0x000a, // Block 0x9a, offset 0x2680 0x26bd: 0x000c, // Block 0x9b, offset 0x26c0 0x26e0: 0x000c, 0x26e1: 0x0002, 0x26e2: 0x0002, 0x26e3: 0x0002, 0x26e4: 0x0002, 0x26e5: 0x0002, 0x26e6: 0x0002, 0x26e7: 0x0002, 0x26e8: 0x0002, 0x26e9: 0x0002, 0x26ea: 0x0002, 0x26eb: 0x0002, 0x26ec: 0x0002, 0x26ed: 0x0002, 0x26ee: 0x0002, 0x26ef: 0x0002, 0x26f0: 0x0002, 0x26f1: 0x0002, 0x26f2: 0x0002, 0x26f3: 0x0002, 0x26f4: 0x0002, 0x26f5: 0x0002, 0x26f6: 0x0002, 0x26f7: 0x0002, 0x26f8: 0x0002, 0x26f9: 0x0002, 0x26fa: 0x0002, 0x26fb: 0x0002, // Block 0x9c, offset 0x2700 0x2736: 0x000c, 0x2737: 0x000c, 0x2738: 0x000c, 0x2739: 0x000c, 0x273a: 0x000c, // Block 0x9d, offset 0x2740 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, // Block 0x9e, offset 0x2780 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, 0x279e: 0x0001, 0x279f: 0x000a, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, // Block 0x9f, offset 0x27c0 0x27c0: 0x0001, 0x27c1: 0x000c, 0x27c2: 0x000c, 0x27c3: 0x000c, 0x27c4: 0x0001, 0x27c5: 0x000c, 0x27c6: 0x000c, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, 0x27cc: 0x000c, 0x27cd: 0x000c, 0x27ce: 0x000c, 0x27cf: 0x000c, 0x27d0: 0x0001, 0x27d1: 0x0001, 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x000c, 0x27f9: 0x000c, 0x27fa: 0x000c, 0x27fb: 0x0001, 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x000c, // Block 0xa0, offset 0x2800 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, 0x2824: 0x0001, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x0001, 0x283a: 0x0001, 0x283b: 0x0001, 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x0001, // Block 0xa1, offset 0x2840 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, 0x2864: 0x0001, 0x2865: 0x0001, 0x2866: 0x0001, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x000a, 0x287a: 0x000a, 0x287b: 0x000a, 0x287c: 0x000a, 0x287d: 0x000a, 0x287e: 0x000a, 0x287f: 0x000a, // Block 0xa2, offset 0x2880 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, // Block 0xa3, offset 0x28c0 0x28c1: 0x000c, 0x28f8: 0x000c, 0x28f9: 0x000c, 0x28fa: 0x000c, 0x28fb: 0x000c, 0x28fc: 0x000c, 0x28fd: 0x000c, 0x28fe: 0x000c, 0x28ff: 0x000c, // Block 0xa4, offset 0x2900 0x2900: 0x000c, 0x2901: 0x000c, 0x2902: 0x000c, 0x2903: 0x000c, 0x2904: 0x000c, 0x2905: 0x000c, 0x2906: 0x000c, 0x2912: 0x000a, 0x2913: 0x000a, 0x2914: 0x000a, 0x2915: 0x000a, 0x2916: 0x000a, 0x2917: 0x000a, 0x2918: 0x000a, 0x2919: 0x000a, 0x291a: 0x000a, 0x291b: 0x000a, 0x291c: 0x000a, 0x291d: 0x000a, 0x291e: 0x000a, 0x291f: 0x000a, 0x2920: 0x000a, 0x2921: 0x000a, 0x2922: 0x000a, 0x2923: 0x000a, 0x2924: 0x000a, 0x2925: 0x000a, 0x293f: 0x000c, // Block 0xa5, offset 0x2940 0x2940: 0x000c, 0x2941: 0x000c, 0x2973: 0x000c, 0x2974: 0x000c, 0x2975: 0x000c, 0x2976: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, // Block 0xa6, offset 0x2980 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, 0x29a7: 0x000c, 0x29a8: 0x000c, 0x29a9: 0x000c, 0x29aa: 0x000c, 0x29ab: 0x000c, 0x29ad: 0x000c, 0x29ae: 0x000c, 0x29af: 0x000c, 0x29b0: 0x000c, 0x29b1: 0x000c, 0x29b2: 0x000c, 0x29b3: 0x000c, 0x29b4: 0x000c, // Block 0xa7, offset 0x29c0 0x29f3: 0x000c, // Block 0xa8, offset 0x2a00 0x2a00: 0x000c, 0x2a01: 0x000c, 0x2a36: 0x000c, 0x2a37: 0x000c, 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, // Block 0xa9, offset 0x2a40 0x2a4a: 0x000c, 0x2a4b: 0x000c, 0x2a4c: 0x000c, // Block 0xaa, offset 0x2a80 0x2aaf: 0x000c, 0x2ab0: 0x000c, 0x2ab1: 0x000c, 0x2ab4: 0x000c, 0x2ab6: 0x000c, 0x2ab7: 0x000c, 0x2abe: 0x000c, // Block 0xab, offset 0x2ac0 0x2adf: 0x000c, 0x2ae3: 0x000c, 0x2ae4: 0x000c, 0x2ae5: 0x000c, 0x2ae6: 0x000c, 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, 0x2aea: 0x000c, // Block 0xac, offset 0x2b00 0x2b00: 0x000c, 0x2b01: 0x000c, 0x2b3c: 0x000c, // Block 0xad, offset 0x2b40 0x2b40: 0x000c, 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c, 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, // Block 0xae, offset 0x2b80 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c, 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c, // Block 0xaf, offset 0x2bc0 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c, 0x2bc6: 0x000c, // Block 0xb0, offset 0x2c00 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c, 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c, 0x2c3f: 0x000c, // Block 0xb1, offset 0x2c40 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c, // Block 0xb2, offset 0x2c80 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c, 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c, // Block 0xb3, offset 0x2cc0 0x2cc0: 0x000c, 0x2cdc: 0x000c, 0x2cdd: 0x000c, // Block 0xb4, offset 0x2d00 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, 0x2d3d: 0x000c, 0x2d3f: 0x000c, // Block 0xb5, offset 0x2d40 0x2d40: 0x000c, 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a, 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a, 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a, // Block 0xb6, offset 0x2d80 0x2dab: 0x000c, 0x2dad: 0x000c, 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, 0x2db7: 0x000c, // Block 0xb7, offset 0x2dc0 0x2ddd: 0x000c, 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c, 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c, 0x2dea: 0x000c, 0x2deb: 0x000c, // Block 0xb8, offset 0x2e00 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, 0x2e36: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, 0x2e3b: 0x000c, 0x2e3c: 0x000c, 0x2e3d: 0x000c, // Block 0xb9, offset 0x2e40 0x2e52: 0x000c, 0x2e53: 0x000c, 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c, 0x2e58: 0x000c, 0x2e59: 0x000c, 0x2e5a: 0x000c, 0x2e5b: 0x000c, 0x2e5c: 0x000c, 0x2e5d: 0x000c, 0x2e5e: 0x000c, 0x2e5f: 0x000c, 0x2e60: 0x000c, 0x2e61: 0x000c, 0x2e62: 0x000c, 0x2e63: 0x000c, 0x2e64: 0x000c, 0x2e65: 0x000c, 0x2e66: 0x000c, 0x2e67: 0x000c, 0x2e6a: 0x000c, 0x2e6b: 0x000c, 0x2e6c: 0x000c, 0x2e6d: 0x000c, 0x2e6e: 0x000c, 0x2e6f: 0x000c, 0x2e70: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e75: 0x000c, 0x2e76: 0x000c, // Block 0xba, offset 0x2e80 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, // Block 0xbb, offset 0x2ec0 0x2ef0: 0x000c, 0x2ef1: 0x000c, 0x2ef2: 0x000c, 0x2ef3: 0x000c, 0x2ef4: 0x000c, 0x2ef5: 0x000c, 0x2ef6: 0x000c, // Block 0xbc, offset 0x2f00 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, 0x2f12: 0x000c, // Block 0xbd, offset 0x2f40 0x2f5d: 0x000c, 0x2f5e: 0x000c, 0x2f60: 0x000b, 0x2f61: 0x000b, 0x2f62: 0x000b, 0x2f63: 0x000b, // Block 0xbe, offset 0x2f80 0x2fa7: 0x000c, 0x2fa8: 0x000c, 0x2fa9: 0x000c, 0x2fb3: 0x000b, 0x2fb4: 0x000b, 0x2fb5: 0x000b, 0x2fb6: 0x000b, 0x2fb7: 0x000b, 0x2fb8: 0x000b, 0x2fb9: 0x000b, 0x2fba: 0x000b, 0x2fbb: 0x000c, 0x2fbc: 0x000c, 0x2fbd: 0x000c, 0x2fbe: 0x000c, 0x2fbf: 0x000c, // Block 0xbf, offset 0x2fc0 0x2fc0: 0x000c, 0x2fc1: 0x000c, 0x2fc2: 0x000c, 0x2fc5: 0x000c, 0x2fc6: 0x000c, 0x2fc7: 0x000c, 0x2fc8: 0x000c, 0x2fc9: 0x000c, 0x2fca: 0x000c, 0x2fcb: 0x000c, 0x2fea: 0x000c, 0x2feb: 0x000c, 0x2fec: 0x000c, 0x2fed: 0x000c, // Block 0xc0, offset 0x3000 0x3000: 0x000a, 0x3001: 0x000a, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000a, // Block 0xc1, offset 0x3040 0x3040: 0x000a, 0x3041: 0x000a, 0x3042: 0x000a, 0x3043: 0x000a, 0x3044: 0x000a, 0x3045: 0x000a, 0x3046: 0x000a, 0x3047: 0x000a, 0x3048: 0x000a, 0x3049: 0x000a, 0x304a: 0x000a, 0x304b: 0x000a, 0x304c: 0x000a, 0x304d: 0x000a, 0x304e: 0x000a, 0x304f: 0x000a, 0x3050: 0x000a, 0x3051: 0x000a, 0x3052: 0x000a, 0x3053: 0x000a, 0x3054: 0x000a, 0x3055: 0x000a, 0x3056: 0x000a, // Block 0xc2, offset 0x3080 0x309b: 0x000a, // Block 0xc3, offset 0x30c0 0x30d5: 0x000a, // Block 0xc4, offset 0x3100 0x310f: 0x000a, // Block 0xc5, offset 0x3140 0x3149: 0x000a, // Block 0xc6, offset 0x3180 0x3183: 0x000a, 0x318e: 0x0002, 0x318f: 0x0002, 0x3190: 0x0002, 0x3191: 0x0002, 0x3192: 0x0002, 0x3193: 0x0002, 0x3194: 0x0002, 0x3195: 0x0002, 0x3196: 0x0002, 0x3197: 0x0002, 0x3198: 0x0002, 0x3199: 0x0002, 0x319a: 0x0002, 0x319b: 0x0002, 0x319c: 0x0002, 0x319d: 0x0002, 0x319e: 0x0002, 0x319f: 0x0002, 0x31a0: 0x0002, 0x31a1: 0x0002, 0x31a2: 0x0002, 0x31a3: 0x0002, 0x31a4: 0x0002, 0x31a5: 0x0002, 0x31a6: 0x0002, 0x31a7: 0x0002, 0x31a8: 0x0002, 0x31a9: 0x0002, 0x31aa: 0x0002, 0x31ab: 0x0002, 0x31ac: 0x0002, 0x31ad: 0x0002, 0x31ae: 0x0002, 0x31af: 0x0002, 0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b4: 0x0002, 0x31b5: 0x0002, 0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002, 0x31bc: 0x0002, 0x31bd: 0x0002, 0x31be: 0x0002, 0x31bf: 0x0002, // Block 0xc7, offset 0x31c0 0x31c0: 0x000c, 0x31c1: 0x000c, 0x31c2: 0x000c, 0x31c3: 0x000c, 0x31c4: 0x000c, 0x31c5: 0x000c, 0x31c6: 0x000c, 0x31c7: 0x000c, 0x31c8: 0x000c, 0x31c9: 0x000c, 0x31ca: 0x000c, 0x31cb: 0x000c, 0x31cc: 0x000c, 0x31cd: 0x000c, 0x31ce: 0x000c, 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c, 0x31d2: 0x000c, 0x31d3: 0x000c, 0x31d4: 0x000c, 0x31d5: 0x000c, 0x31d6: 0x000c, 0x31d7: 0x000c, 0x31d8: 0x000c, 0x31d9: 0x000c, 0x31da: 0x000c, 0x31db: 0x000c, 0x31dc: 0x000c, 0x31dd: 0x000c, 0x31de: 0x000c, 0x31df: 0x000c, 0x31e0: 0x000c, 0x31e1: 0x000c, 0x31e2: 0x000c, 0x31e3: 0x000c, 0x31e4: 0x000c, 0x31e5: 0x000c, 0x31e6: 0x000c, 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, 0x31ea: 0x000c, 0x31eb: 0x000c, 0x31ec: 0x000c, 0x31ed: 0x000c, 0x31ee: 0x000c, 0x31ef: 0x000c, 0x31f0: 0x000c, 0x31f1: 0x000c, 0x31f2: 0x000c, 0x31f3: 0x000c, 0x31f4: 0x000c, 0x31f5: 0x000c, 0x31f6: 0x000c, 0x31fb: 0x000c, 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, // Block 0xc8, offset 0x3200 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3203: 0x000c, 0x3204: 0x000c, 0x3205: 0x000c, 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, 0x320c: 0x000c, 0x320d: 0x000c, 0x320e: 0x000c, 0x320f: 0x000c, 0x3210: 0x000c, 0x3211: 0x000c, 0x3212: 0x000c, 0x3213: 0x000c, 0x3214: 0x000c, 0x3215: 0x000c, 0x3216: 0x000c, 0x3217: 0x000c, 0x3218: 0x000c, 0x3219: 0x000c, 0x321a: 0x000c, 0x321b: 0x000c, 0x321c: 0x000c, 0x321d: 0x000c, 0x321e: 0x000c, 0x321f: 0x000c, 0x3220: 0x000c, 0x3221: 0x000c, 0x3222: 0x000c, 0x3223: 0x000c, 0x3224: 0x000c, 0x3225: 0x000c, 0x3226: 0x000c, 0x3227: 0x000c, 0x3228: 0x000c, 0x3229: 0x000c, 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, 0x3235: 0x000c, // Block 0xc9, offset 0x3240 0x3244: 0x000c, 0x325b: 0x000c, 0x325c: 0x000c, 0x325d: 0x000c, 0x325e: 0x000c, 0x325f: 0x000c, 0x3261: 0x000c, 0x3262: 0x000c, 0x3263: 0x000c, 0x3264: 0x000c, 0x3265: 0x000c, 0x3266: 0x000c, 0x3267: 0x000c, 0x3268: 0x000c, 0x3269: 0x000c, 0x326a: 0x000c, 0x326b: 0x000c, 0x326c: 0x000c, 0x326d: 0x000c, 0x326e: 0x000c, 0x326f: 0x000c, // Block 0xca, offset 0x3280 0x3280: 0x000c, 0x3281: 0x000c, 0x3282: 0x000c, 0x3283: 0x000c, 0x3284: 0x000c, 0x3285: 0x000c, 0x3286: 0x000c, 0x3288: 0x000c, 0x3289: 0x000c, 0x328a: 0x000c, 0x328b: 0x000c, 0x328c: 0x000c, 0x328d: 0x000c, 0x328e: 0x000c, 0x328f: 0x000c, 0x3290: 0x000c, 0x3291: 0x000c, 0x3292: 0x000c, 0x3293: 0x000c, 0x3294: 0x000c, 0x3295: 0x000c, 0x3296: 0x000c, 0x3297: 0x000c, 0x3298: 0x000c, 0x329b: 0x000c, 0x329c: 0x000c, 0x329d: 0x000c, 0x329e: 0x000c, 0x329f: 0x000c, 0x32a0: 0x000c, 0x32a1: 0x000c, 0x32a3: 0x000c, 0x32a4: 0x000c, 0x32a6: 0x000c, 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c, 0x32aa: 0x000c, // Block 0xcb, offset 0x32c0 0x32c0: 0x0001, 0x32c1: 0x0001, 0x32c2: 0x0001, 0x32c3: 0x0001, 0x32c4: 0x0001, 0x32c5: 0x0001, 0x32c6: 0x0001, 0x32c7: 0x0001, 0x32c8: 0x0001, 0x32c9: 0x0001, 0x32ca: 0x0001, 0x32cb: 0x0001, 0x32cc: 0x0001, 0x32cd: 0x0001, 0x32ce: 0x0001, 0x32cf: 0x0001, 0x32d0: 0x000c, 0x32d1: 0x000c, 0x32d2: 0x000c, 0x32d3: 0x000c, 0x32d4: 0x000c, 0x32d5: 0x000c, 0x32d6: 0x000c, 0x32d7: 0x0001, 0x32d8: 0x0001, 0x32d9: 0x0001, 0x32da: 0x0001, 0x32db: 0x0001, 0x32dc: 0x0001, 0x32dd: 0x0001, 0x32de: 0x0001, 0x32df: 0x0001, 0x32e0: 0x0001, 0x32e1: 0x0001, 0x32e2: 0x0001, 0x32e3: 0x0001, 0x32e4: 0x0001, 0x32e5: 0x0001, 0x32e6: 0x0001, 0x32e7: 0x0001, 0x32e8: 0x0001, 0x32e9: 0x0001, 0x32ea: 0x0001, 0x32eb: 0x0001, 0x32ec: 0x0001, 0x32ed: 0x0001, 0x32ee: 0x0001, 0x32ef: 0x0001, 0x32f0: 0x0001, 0x32f1: 0x0001, 0x32f2: 0x0001, 0x32f3: 0x0001, 0x32f4: 0x0001, 0x32f5: 0x0001, 0x32f6: 0x0001, 0x32f7: 0x0001, 0x32f8: 0x0001, 0x32f9: 0x0001, 0x32fa: 0x0001, 0x32fb: 0x0001, 0x32fc: 0x0001, 0x32fd: 0x0001, 0x32fe: 0x0001, 0x32ff: 0x0001, // Block 0xcc, offset 0x3300 0x3300: 0x0001, 0x3301: 0x0001, 0x3302: 0x0001, 0x3303: 0x0001, 0x3304: 0x000c, 0x3305: 0x000c, 0x3306: 0x000c, 0x3307: 0x000c, 0x3308: 0x000c, 0x3309: 0x000c, 0x330a: 0x000c, 0x330b: 0x0001, 0x330c: 0x0001, 0x330d: 0x0001, 0x330e: 0x0001, 0x330f: 0x0001, 0x3310: 0x0001, 0x3311: 0x0001, 0x3312: 0x0001, 0x3313: 0x0001, 0x3314: 0x0001, 0x3315: 0x0001, 0x3316: 0x0001, 0x3317: 0x0001, 0x3318: 0x0001, 0x3319: 0x0001, 0x331a: 0x0001, 0x331b: 0x0001, 0x331c: 0x0001, 0x331d: 0x0001, 0x331e: 0x0001, 0x331f: 0x0001, 0x3320: 0x0001, 0x3321: 0x0001, 0x3322: 0x0001, 0x3323: 0x0001, 0x3324: 0x0001, 0x3325: 0x0001, 0x3326: 0x0001, 0x3327: 0x0001, 0x3328: 0x0001, 0x3329: 0x0001, 0x332a: 0x0001, 0x332b: 0x0001, 0x332c: 0x0001, 0x332d: 0x0001, 0x332e: 0x0001, 0x332f: 0x0001, 0x3330: 0x0001, 0x3331: 0x0001, 0x3332: 0x0001, 0x3333: 0x0001, 0x3334: 0x0001, 0x3335: 0x0001, 0x3336: 0x0001, 0x3337: 0x0001, 0x3338: 0x0001, 0x3339: 0x0001, 0x333a: 0x0001, 0x333b: 0x0001, 0x333c: 0x0001, 0x333d: 0x0001, 0x333e: 0x0001, 0x333f: 0x0001, // Block 0xcd, offset 0x3340 0x3340: 0x000d, 0x3341: 0x000d, 0x3342: 0x000d, 0x3343: 0x000d, 0x3344: 0x000d, 0x3345: 0x000d, 0x3346: 0x000d, 0x3347: 0x000d, 0x3348: 0x000d, 0x3349: 0x000d, 0x334a: 0x000d, 0x334b: 0x000d, 0x334c: 0x000d, 0x334d: 0x000d, 0x334e: 0x000d, 0x334f: 0x000d, 0x3350: 0x000d, 0x3351: 0x000d, 0x3352: 0x000d, 0x3353: 0x000d, 0x3354: 0x000d, 0x3355: 0x000d, 0x3356: 0x000d, 0x3357: 0x000d, 0x3358: 0x000d, 0x3359: 0x000d, 0x335a: 0x000d, 0x335b: 0x000d, 0x335c: 0x000d, 0x335d: 0x000d, 0x335e: 0x000d, 0x335f: 0x000d, 0x3360: 0x000d, 0x3361: 0x000d, 0x3362: 0x000d, 0x3363: 0x000d, 0x3364: 0x000d, 0x3365: 0x000d, 0x3366: 0x000d, 0x3367: 0x000d, 0x3368: 0x000d, 0x3369: 0x000d, 0x336a: 0x000d, 0x336b: 0x000d, 0x336c: 0x000d, 0x336d: 0x000d, 0x336e: 0x000d, 0x336f: 0x000d, 0x3370: 0x000a, 0x3371: 0x000a, 0x3372: 0x000d, 0x3373: 0x000d, 0x3374: 0x000d, 0x3375: 0x000d, 0x3376: 0x000d, 0x3377: 0x000d, 0x3378: 0x000d, 0x3379: 0x000d, 0x337a: 0x000d, 0x337b: 0x000d, 0x337c: 0x000d, 0x337d: 0x000d, 0x337e: 0x000d, 0x337f: 0x000d, // Block 0xce, offset 0x3380 0x3380: 0x000a, 0x3381: 0x000a, 0x3382: 0x000a, 0x3383: 0x000a, 0x3384: 0x000a, 0x3385: 0x000a, 0x3386: 0x000a, 0x3387: 0x000a, 0x3388: 0x000a, 0x3389: 0x000a, 0x338a: 0x000a, 0x338b: 0x000a, 0x338c: 0x000a, 0x338d: 0x000a, 0x338e: 0x000a, 0x338f: 0x000a, 0x3390: 0x000a, 0x3391: 0x000a, 0x3392: 0x000a, 0x3393: 0x000a, 0x3394: 0x000a, 0x3395: 0x000a, 0x3396: 0x000a, 0x3397: 0x000a, 0x3398: 0x000a, 0x3399: 0x000a, 0x339a: 0x000a, 0x339b: 0x000a, 0x339c: 0x000a, 0x339d: 0x000a, 0x339e: 0x000a, 0x339f: 0x000a, 0x33a0: 0x000a, 0x33a1: 0x000a, 0x33a2: 0x000a, 0x33a3: 0x000a, 0x33a4: 0x000a, 0x33a5: 0x000a, 0x33a6: 0x000a, 0x33a7: 0x000a, 0x33a8: 0x000a, 0x33a9: 0x000a, 0x33aa: 0x000a, 0x33ab: 0x000a, 0x33b0: 0x000a, 0x33b1: 0x000a, 0x33b2: 0x000a, 0x33b3: 0x000a, 0x33b4: 0x000a, 0x33b5: 0x000a, 0x33b6: 0x000a, 0x33b7: 0x000a, 0x33b8: 0x000a, 0x33b9: 0x000a, 0x33ba: 0x000a, 0x33bb: 0x000a, 0x33bc: 0x000a, 0x33bd: 0x000a, 0x33be: 0x000a, 0x33bf: 0x000a, // Block 0xcf, offset 0x33c0 0x33c0: 0x000a, 0x33c1: 0x000a, 0x33c2: 0x000a, 0x33c3: 0x000a, 0x33c4: 0x000a, 0x33c5: 0x000a, 0x33c6: 0x000a, 0x33c7: 0x000a, 0x33c8: 0x000a, 0x33c9: 0x000a, 0x33ca: 0x000a, 0x33cb: 0x000a, 0x33cc: 0x000a, 0x33cd: 0x000a, 0x33ce: 0x000a, 0x33cf: 0x000a, 0x33d0: 0x000a, 0x33d1: 0x000a, 0x33d2: 0x000a, 0x33d3: 0x000a, 0x33e0: 0x000a, 0x33e1: 0x000a, 0x33e2: 0x000a, 0x33e3: 0x000a, 0x33e4: 0x000a, 0x33e5: 0x000a, 0x33e6: 0x000a, 0x33e7: 0x000a, 0x33e8: 0x000a, 0x33e9: 0x000a, 0x33ea: 0x000a, 0x33eb: 0x000a, 0x33ec: 0x000a, 0x33ed: 0x000a, 0x33ee: 0x000a, 0x33f1: 0x000a, 0x33f2: 0x000a, 0x33f3: 0x000a, 0x33f4: 0x000a, 0x33f5: 0x000a, 0x33f6: 0x000a, 0x33f7: 0x000a, 0x33f8: 0x000a, 0x33f9: 0x000a, 0x33fa: 0x000a, 0x33fb: 0x000a, 0x33fc: 0x000a, 0x33fd: 0x000a, 0x33fe: 0x000a, 0x33ff: 0x000a, // Block 0xd0, offset 0x3400 0x3401: 0x000a, 0x3402: 0x000a, 0x3403: 0x000a, 0x3404: 0x000a, 0x3405: 0x000a, 0x3406: 0x000a, 0x3407: 0x000a, 0x3408: 0x000a, 0x3409: 0x000a, 0x340a: 0x000a, 0x340b: 0x000a, 0x340c: 0x000a, 0x340d: 0x000a, 0x340e: 0x000a, 0x340f: 0x000a, 0x3411: 0x000a, 0x3412: 0x000a, 0x3413: 0x000a, 0x3414: 0x000a, 0x3415: 0x000a, 0x3416: 0x000a, 0x3417: 0x000a, 0x3418: 0x000a, 0x3419: 0x000a, 0x341a: 0x000a, 0x341b: 0x000a, 0x341c: 0x000a, 0x341d: 0x000a, 0x341e: 0x000a, 0x341f: 0x000a, 0x3420: 0x000a, 0x3421: 0x000a, 0x3422: 0x000a, 0x3423: 0x000a, 0x3424: 0x000a, 0x3425: 0x000a, 0x3426: 0x000a, 0x3427: 0x000a, 0x3428: 0x000a, 0x3429: 0x000a, 0x342a: 0x000a, 0x342b: 0x000a, 0x342c: 0x000a, 0x342d: 0x000a, 0x342e: 0x000a, 0x342f: 0x000a, 0x3430: 0x000a, 0x3431: 0x000a, 0x3432: 0x000a, 0x3433: 0x000a, 0x3434: 0x000a, 0x3435: 0x000a, // Block 0xd1, offset 0x3440 0x3440: 0x0002, 0x3441: 0x0002, 0x3442: 0x0002, 0x3443: 0x0002, 0x3444: 0x0002, 0x3445: 0x0002, 0x3446: 0x0002, 0x3447: 0x0002, 0x3448: 0x0002, 0x3449: 0x0002, 0x344a: 0x0002, 0x344b: 0x000a, 0x344c: 0x000a, // Block 0xd2, offset 0x3480 0x34aa: 0x000a, 0x34ab: 0x000a, // Block 0xd3, offset 0x34c0 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, 0x34d2: 0x000a, 0x34e0: 0x000a, 0x34e1: 0x000a, 0x34e2: 0x000a, 0x34e3: 0x000a, 0x34e4: 0x000a, 0x34e5: 0x000a, 0x34e6: 0x000a, 0x34e7: 0x000a, 0x34e8: 0x000a, 0x34e9: 0x000a, 0x34ea: 0x000a, 0x34eb: 0x000a, 0x34ec: 0x000a, 0x34f0: 0x000a, 0x34f1: 0x000a, 0x34f2: 0x000a, 0x34f3: 0x000a, 0x34f4: 0x000a, 0x34f5: 0x000a, 0x34f6: 0x000a, // Block 0xd4, offset 0x3500 0x3500: 0x000a, 0x3501: 0x000a, 0x3502: 0x000a, 0x3503: 0x000a, 0x3504: 0x000a, 0x3505: 0x000a, 0x3506: 0x000a, 0x3507: 0x000a, 0x3508: 0x000a, 0x3509: 0x000a, 0x350a: 0x000a, 0x350b: 0x000a, 0x350c: 0x000a, 0x350d: 0x000a, 0x350e: 0x000a, 0x350f: 0x000a, 0x3510: 0x000a, 0x3511: 0x000a, 0x3512: 0x000a, 0x3513: 0x000a, 0x3514: 0x000a, // Block 0xd5, offset 0x3540 0x3540: 0x000a, 0x3541: 0x000a, 0x3542: 0x000a, 0x3543: 0x000a, 0x3544: 0x000a, 0x3545: 0x000a, 0x3546: 0x000a, 0x3547: 0x000a, 0x3548: 0x000a, 0x3549: 0x000a, 0x354a: 0x000a, 0x354b: 0x000a, 0x3550: 0x000a, 0x3551: 0x000a, 0x3552: 0x000a, 0x3553: 0x000a, 0x3554: 0x000a, 0x3555: 0x000a, 0x3556: 0x000a, 0x3557: 0x000a, 0x3558: 0x000a, 0x3559: 0x000a, 0x355a: 0x000a, 0x355b: 0x000a, 0x355c: 0x000a, 0x355d: 0x000a, 0x355e: 0x000a, 0x355f: 0x000a, 0x3560: 0x000a, 0x3561: 0x000a, 0x3562: 0x000a, 0x3563: 0x000a, 0x3564: 0x000a, 0x3565: 0x000a, 0x3566: 0x000a, 0x3567: 0x000a, 0x3568: 0x000a, 0x3569: 0x000a, 0x356a: 0x000a, 0x356b: 0x000a, 0x356c: 0x000a, 0x356d: 0x000a, 0x356e: 0x000a, 0x356f: 0x000a, 0x3570: 0x000a, 0x3571: 0x000a, 0x3572: 0x000a, 0x3573: 0x000a, 0x3574: 0x000a, 0x3575: 0x000a, 0x3576: 0x000a, 0x3577: 0x000a, 0x3578: 0x000a, 0x3579: 0x000a, 0x357a: 0x000a, 0x357b: 0x000a, 0x357c: 0x000a, 0x357d: 0x000a, 0x357e: 0x000a, 0x357f: 0x000a, // Block 0xd6, offset 0x3580 0x3580: 0x000a, 0x3581: 0x000a, 0x3582: 0x000a, 0x3583: 0x000a, 0x3584: 0x000a, 0x3585: 0x000a, 0x3586: 0x000a, 0x3587: 0x000a, 0x3590: 0x000a, 0x3591: 0x000a, 0x3592: 0x000a, 0x3593: 0x000a, 0x3594: 0x000a, 0x3595: 0x000a, 0x3596: 0x000a, 0x3597: 0x000a, 0x3598: 0x000a, 0x3599: 0x000a, 0x35a0: 0x000a, 0x35a1: 0x000a, 0x35a2: 0x000a, 0x35a3: 0x000a, 0x35a4: 0x000a, 0x35a5: 0x000a, 0x35a6: 0x000a, 0x35a7: 0x000a, 0x35a8: 0x000a, 0x35a9: 0x000a, 0x35aa: 0x000a, 0x35ab: 0x000a, 0x35ac: 0x000a, 0x35ad: 0x000a, 0x35ae: 0x000a, 0x35af: 0x000a, 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000a, 0x35b3: 0x000a, 0x35b4: 0x000a, 0x35b5: 0x000a, 0x35b6: 0x000a, 0x35b7: 0x000a, 0x35b8: 0x000a, 0x35b9: 0x000a, 0x35ba: 0x000a, 0x35bb: 0x000a, 0x35bc: 0x000a, 0x35bd: 0x000a, 0x35be: 0x000a, 0x35bf: 0x000a, // Block 0xd7, offset 0x35c0 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, 0x35c6: 0x000a, 0x35c7: 0x000a, 0x35d0: 0x000a, 0x35d1: 0x000a, 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, 0x35ea: 0x000a, 0x35eb: 0x000a, 0x35ec: 0x000a, 0x35ed: 0x000a, // Block 0xd8, offset 0x3600 0x3610: 0x000a, 0x3611: 0x000a, 0x3612: 0x000a, 0x3613: 0x000a, 0x3614: 0x000a, 0x3615: 0x000a, 0x3616: 0x000a, 0x3617: 0x000a, 0x3618: 0x000a, 0x3619: 0x000a, 0x361a: 0x000a, 0x361b: 0x000a, 0x361c: 0x000a, 0x361d: 0x000a, 0x361e: 0x000a, 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3630: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, // Block 0xd9, offset 0x3640 0x3640: 0x000a, 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, 0x3650: 0x000a, 0x3651: 0x000a, 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, 0x365e: 0x000a, // Block 0xda, offset 0x3680 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000a, 0x3683: 0x000a, 0x3684: 0x000a, 0x3685: 0x000a, 0x3686: 0x000a, 0x3687: 0x000a, 0x3688: 0x000a, 0x3689: 0x000a, 0x368a: 0x000a, 0x368b: 0x000a, 0x368c: 0x000a, 0x368d: 0x000a, 0x368e: 0x000a, 0x368f: 0x000a, 0x3690: 0x000a, 0x3691: 0x000a, // Block 0xdb, offset 0x36c0 0x36fe: 0x000b, 0x36ff: 0x000b, // Block 0xdc, offset 0x3700 0x3700: 0x000b, 0x3701: 0x000b, 0x3702: 0x000b, 0x3703: 0x000b, 0x3704: 0x000b, 0x3705: 0x000b, 0x3706: 0x000b, 0x3707: 0x000b, 0x3708: 0x000b, 0x3709: 0x000b, 0x370a: 0x000b, 0x370b: 0x000b, 0x370c: 0x000b, 0x370d: 0x000b, 0x370e: 0x000b, 0x370f: 0x000b, 0x3710: 0x000b, 0x3711: 0x000b, 0x3712: 0x000b, 0x3713: 0x000b, 0x3714: 0x000b, 0x3715: 0x000b, 0x3716: 0x000b, 0x3717: 0x000b, 0x3718: 0x000b, 0x3719: 0x000b, 0x371a: 0x000b, 0x371b: 0x000b, 0x371c: 0x000b, 0x371d: 0x000b, 0x371e: 0x000b, 0x371f: 0x000b, 0x3720: 0x000b, 0x3721: 0x000b, 0x3722: 0x000b, 0x3723: 0x000b, 0x3724: 0x000b, 0x3725: 0x000b, 0x3726: 0x000b, 0x3727: 0x000b, 0x3728: 0x000b, 0x3729: 0x000b, 0x372a: 0x000b, 0x372b: 0x000b, 0x372c: 0x000b, 0x372d: 0x000b, 0x372e: 0x000b, 0x372f: 0x000b, 0x3730: 0x000b, 0x3731: 0x000b, 0x3732: 0x000b, 0x3733: 0x000b, 0x3734: 0x000b, 0x3735: 0x000b, 0x3736: 0x000b, 0x3737: 0x000b, 0x3738: 0x000b, 0x3739: 0x000b, 0x373a: 0x000b, 0x373b: 0x000b, 0x373c: 0x000b, 0x373d: 0x000b, 0x373e: 0x000b, 0x373f: 0x000b, // Block 0xdd, offset 0x3740 0x3740: 0x000c, 0x3741: 0x000c, 0x3742: 0x000c, 0x3743: 0x000c, 0x3744: 0x000c, 0x3745: 0x000c, 0x3746: 0x000c, 0x3747: 0x000c, 0x3748: 0x000c, 0x3749: 0x000c, 0x374a: 0x000c, 0x374b: 0x000c, 0x374c: 0x000c, 0x374d: 0x000c, 0x374e: 0x000c, 0x374f: 0x000c, 0x3750: 0x000c, 0x3751: 0x000c, 0x3752: 0x000c, 0x3753: 0x000c, 0x3754: 0x000c, 0x3755: 0x000c, 0x3756: 0x000c, 0x3757: 0x000c, 0x3758: 0x000c, 0x3759: 0x000c, 0x375a: 0x000c, 0x375b: 0x000c, 0x375c: 0x000c, 0x375d: 0x000c, 0x375e: 0x000c, 0x375f: 0x000c, 0x3760: 0x000c, 0x3761: 0x000c, 0x3762: 0x000c, 0x3763: 0x000c, 0x3764: 0x000c, 0x3765: 0x000c, 0x3766: 0x000c, 0x3767: 0x000c, 0x3768: 0x000c, 0x3769: 0x000c, 0x376a: 0x000c, 0x376b: 0x000c, 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, 0x3770: 0x000b, 0x3771: 0x000b, 0x3772: 0x000b, 0x3773: 0x000b, 0x3774: 0x000b, 0x3775: 0x000b, 0x3776: 0x000b, 0x3777: 0x000b, 0x3778: 0x000b, 0x3779: 0x000b, 0x377a: 0x000b, 0x377b: 0x000b, 0x377c: 0x000b, 0x377d: 0x000b, 0x377e: 0x000b, 0x377f: 0x000b, } // bidiIndex: 24 blocks, 1536 entries, 1536 bytes // Block 0 is the zero block. var bidiIndex = [1536]uint8{ // Block 0x0, offset 0x0 // Block 0x1, offset 0x40 // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 0xc2: 0x01, 0xc3: 0x02, 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xea: 0x07, 0xef: 0x08, 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, // Block 0x4, offset 0x100 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, // Block 0x5, offset 0x140 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, 0x14d: 0x34, 0x14e: 0x35, 0x150: 0x36, 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, 0x17e: 0x4b, 0x17f: 0x4c, // Block 0x6, offset 0x180 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x59, 0x190: 0x5a, 0x191: 0x5b, 0x192: 0x5c, 0x193: 0x5d, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5e, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5f, 0x19e: 0x54, 0x19f: 0x60, 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x61, 0x1a7: 0x62, 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x63, 0x1ae: 0x64, 0x1af: 0x65, 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6d, // Block 0x7, offset 0x1c0 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, // Block 0x8, offset 0x200 0x237: 0x54, // Block 0x9, offset 0x240 0x252: 0x78, 0x253: 0x79, 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26f: 0x8b, // Block 0xa, offset 0x280 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x0e, 0x2af: 0x0e, 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8e, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8f, 0x2b8: 0x90, 0x2b9: 0x91, 0x2ba: 0x0e, 0x2bb: 0x92, 0x2bc: 0x93, 0x2bd: 0x94, 0x2bf: 0x95, // Block 0xb, offset 0x2c0 0x2c4: 0x96, 0x2c5: 0x54, 0x2c6: 0x97, 0x2c7: 0x98, 0x2cb: 0x99, 0x2cd: 0x9a, 0x2e0: 0x9b, 0x2e1: 0x9b, 0x2e2: 0x9b, 0x2e3: 0x9b, 0x2e4: 0x9c, 0x2e5: 0x9b, 0x2e6: 0x9b, 0x2e7: 0x9b, 0x2e8: 0x9d, 0x2e9: 0x9b, 0x2ea: 0x9b, 0x2eb: 0x9e, 0x2ec: 0x9f, 0x2ed: 0x9b, 0x2ee: 0x9b, 0x2ef: 0x9b, 0x2f0: 0x9b, 0x2f1: 0x9b, 0x2f2: 0x9b, 0x2f3: 0x9b, 0x2f4: 0x9b, 0x2f5: 0x9b, 0x2f6: 0x9b, 0x2f7: 0x9b, 0x2f8: 0x9b, 0x2f9: 0xa0, 0x2fa: 0x9b, 0x2fb: 0x9b, 0x2fc: 0x9b, 0x2fd: 0x9b, 0x2fe: 0x9b, 0x2ff: 0x9b, // Block 0xc, offset 0x300 0x300: 0xa1, 0x301: 0xa2, 0x302: 0xa3, 0x304: 0xa4, 0x305: 0xa5, 0x306: 0xa6, 0x307: 0xa7, 0x308: 0xa8, 0x30b: 0xa9, 0x30c: 0xaa, 0x30d: 0xab, 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1, 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5, 0x330: 0xb6, 0x332: 0xb7, // Block 0xd, offset 0x340 0x36b: 0xb8, 0x36c: 0xb9, 0x37e: 0xba, // Block 0xe, offset 0x380 0x3b2: 0xbb, // Block 0xf, offset 0x3c0 0x3c5: 0xbc, 0x3c6: 0xbd, 0x3c8: 0x54, 0x3c9: 0xbe, 0x3cc: 0x54, 0x3cd: 0xbf, 0x3db: 0xc0, 0x3dc: 0xc1, 0x3dd: 0xc2, 0x3de: 0xc3, 0x3df: 0xc4, 0x3e8: 0xc5, 0x3e9: 0xc6, 0x3ea: 0xc7, // Block 0x10, offset 0x400 0x400: 0xc8, 0x420: 0x9b, 0x421: 0x9b, 0x422: 0x9b, 0x423: 0xc9, 0x424: 0x9b, 0x425: 0xca, 0x426: 0x9b, 0x427: 0x9b, 0x428: 0x9b, 0x429: 0x9b, 0x42a: 0x9b, 0x42b: 0x9b, 0x42c: 0x9b, 0x42d: 0x9b, 0x42e: 0x9b, 0x42f: 0x9b, 0x430: 0x9b, 0x431: 0x9b, 0x432: 0x9b, 0x433: 0x9b, 0x434: 0x9b, 0x435: 0x9b, 0x436: 0x9b, 0x437: 0x9b, 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xcb, 0x43c: 0x9b, 0x43d: 0x9b, 0x43e: 0x9b, 0x43f: 0x9b, // Block 0x11, offset 0x440 0x440: 0xcc, 0x441: 0x54, 0x442: 0xcd, 0x443: 0xce, 0x444: 0xcf, 0x445: 0xd0, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xd1, 0x45c: 0x54, 0x45d: 0x6c, 0x45e: 0x54, 0x45f: 0xd2, 0x460: 0xd3, 0x461: 0xd4, 0x462: 0xd5, 0x464: 0xd6, 0x465: 0xd7, 0x466: 0xd8, 0x467: 0x36, 0x47f: 0xd9, // Block 0x12, offset 0x480 0x4bf: 0xd9, // Block 0x13, offset 0x4c0 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, 0x4ef: 0x10, 0x4ff: 0x10, // Block 0x14, offset 0x500 0x50f: 0x10, 0x51f: 0x10, 0x52f: 0x10, 0x53f: 0x10, // Block 0x15, offset 0x540 0x540: 0xda, 0x541: 0xda, 0x542: 0xda, 0x543: 0xda, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xdb, 0x548: 0xda, 0x549: 0xda, 0x54a: 0xda, 0x54b: 0xda, 0x54c: 0xda, 0x54d: 0xda, 0x54e: 0xda, 0x54f: 0xda, 0x550: 0xda, 0x551: 0xda, 0x552: 0xda, 0x553: 0xda, 0x554: 0xda, 0x555: 0xda, 0x556: 0xda, 0x557: 0xda, 0x558: 0xda, 0x559: 0xda, 0x55a: 0xda, 0x55b: 0xda, 0x55c: 0xda, 0x55d: 0xda, 0x55e: 0xda, 0x55f: 0xda, 0x560: 0xda, 0x561: 0xda, 0x562: 0xda, 0x563: 0xda, 0x564: 0xda, 0x565: 0xda, 0x566: 0xda, 0x567: 0xda, 0x568: 0xda, 0x569: 0xda, 0x56a: 0xda, 0x56b: 0xda, 0x56c: 0xda, 0x56d: 0xda, 0x56e: 0xda, 0x56f: 0xda, 0x570: 0xda, 0x571: 0xda, 0x572: 0xda, 0x573: 0xda, 0x574: 0xda, 0x575: 0xda, 0x576: 0xda, 0x577: 0xda, 0x578: 0xda, 0x579: 0xda, 0x57a: 0xda, 0x57b: 0xda, 0x57c: 0xda, 0x57d: 0xda, 0x57e: 0xda, 0x57f: 0xda, // Block 0x16, offset 0x580 0x58f: 0x10, 0x59f: 0x10, 0x5a0: 0x13, 0x5af: 0x10, 0x5bf: 0x10, // Block 0x17, offset 0x5c0 0x5cf: 0x10, } // Total table size 15800 bytes (15KiB); checksum: F50EF68C
go/src/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go/0
{ "file_path": "go/src/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go", "repo_id": "go", "token_count": 71225 }
457
// run //go:build !wasm // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import "fmt" // Test that register results are correctly returned (and passed) //go:registerparams //go:noinline func f(x int) (int, int) { if x < 3 { return 0, x } a, b := f(x - 2) c, d := f(x - 1) return a + d, b + c } func main() { x := 40 a, b := f(x) fmt.Printf("f(%d)=%d,%d\n", x, a, b) }
go/test/abi/fibish.go/0
{ "file_path": "go/test/abi/fibish.go", "repo_id": "go", "token_count": 207 }
458
Got this far! Sink = 7
go/test/abi/more_intstar_input.out/0
{ "file_path": "go/test/abi/more_intstar_input.out", "repo_id": "go", "token_count": 10 }
459
// run // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // When the function Store an Arg and also use it in another place, // be sure not to generate duplicated OpArgXXXReg values, which confuses // the register allocator. package main //go:noinline //go:registerparams func F(x, y float32) { if x < 0 { panic("FAIL") } g = [4]float32{x, y, x, y} } var g [4]float32 func main() { F(1, 2) if g[0] != 1 || g[1] != 2 || g[2] != 1 || g[3] != 2 { panic("FAIL") } }
go/test/abi/store_reg_args.go/0
{ "file_path": "go/test/abi/store_reg_args.go", "repo_id": "go", "token_count": 219 }
460
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package a import "go/build" type ( Float64 = float64 Rune = rune ) type ( Int int IntAlias = Int IntAlias2 = IntAlias S struct { Int IntAlias IntAlias2 } ) type ( Context = build.Context ) type ( I1 interface { M1(IntAlias2) Float64 M2() Context } I2 = interface { M1(Int) float64 M2() build.Context } ) var i1 I1 var i2 I2 = i1
go/test/alias3.dir/a.go/0
{ "file_path": "go/test/alias3.dir/a.go", "repo_id": "go", "token_count": 228 }
461
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test the internal "algorithms" for objects larger than a word: hashing, equality etc. package main type T struct { a float64 b int64 c string d byte } var a = []int{1, 2, 3} var NIL []int func arraycmptest() { if NIL != nil { println("fail1:", NIL, "!= nil") panic("bigalg") } if nil != NIL { println("fail2: nil !=", NIL) panic("bigalg") } if a == nil || nil == a { println("fail3:", a, "== nil") panic("bigalg") } } func SameArray(a, b []int) bool { if len(a) != len(b) || cap(a) != cap(b) { return false } if len(a) > 0 && &a[0] != &b[0] { return false } return true } var t = T{1.5, 123, "hello", 255} var mt = make(map[int]T) var ma = make(map[int][]int) func maptest() { mt[0] = t t1 := mt[0] if t1.a != t.a || t1.b != t.b || t1.c != t.c || t1.d != t.d { println("fail: map val struct", t1.a, t1.b, t1.c, t1.d) panic("bigalg") } ma[1] = a a1 := ma[1] if !SameArray(a, a1) { println("fail: map val array", a, a1) panic("bigalg") } } var ct = make(chan T) var ca = make(chan []int) func send() { ct <- t ca <- a } func chantest() { go send() t1 := <-ct if t1.a != t.a || t1.b != t.b || t1.c != t.c || t1.d != t.d { println("fail: map val struct", t1.a, t1.b, t1.c, t1.d) panic("bigalg") } a1 := <-ca if !SameArray(a, a1) { println("fail: map val array", a, a1) panic("bigalg") } } type E struct{} var e E func interfacetest() { var i interface{} i = a a1 := i.([]int) if !SameArray(a, a1) { println("interface <-> []int", a, a1) panic("bigalg") } pa := new([]int) *pa = a i = pa a1 = *i.(*[]int) if !SameArray(a, a1) { println("interface <-> *[]int", a, a1) panic("bigalg") } i = t t1 := i.(T) if t1.a != t.a || t1.b != t.b || t1.c != t.c || t1.d != t.d { println("interface <-> struct", t1.a, t1.b, t1.c, t1.d) panic("bigalg") } i = e e1 := i.(E) // nothing to check; just verify it doesn't crash _ = e1 } func main() { arraycmptest() maptest() chantest() interfacetest() }
go/test/bigalg.go/0
{ "file_path": "go/test/bigalg.go", "repo_id": "go", "token_count": 1028 }
462
// run // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test that selects do not consume undue memory. package main import "runtime" func sender(c chan int, n int) { for i := 0; i < n; i++ { c <- 1 } } func receiver(c, dummy chan int, n int) { for i := 0; i < n; i++ { select { case <-c: // nothing case <-dummy: panic("dummy") } } } func main() { runtime.MemProfileRate = 0 c := make(chan int) dummy := make(chan int) // warm up go sender(c, 100000) receiver(c, dummy, 100000) runtime.GC() memstats := new(runtime.MemStats) runtime.ReadMemStats(memstats) alloc := memstats.Alloc // second time shouldn't increase footprint by much go sender(c, 100000) receiver(c, dummy, 100000) runtime.GC() runtime.ReadMemStats(memstats) // Be careful to avoid wraparound. if memstats.Alloc > alloc && memstats.Alloc-alloc > 1.1e5 { println("BUG: too much memory for 100,000 selects:", memstats.Alloc-alloc) } }
go/test/chan/select2.go/0
{ "file_path": "go/test/chan/select2.go", "repo_id": "go", "token_count": 394 }
463
// run // Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import "math" func checkClearSlice() { s := []int{1, 2, 3} clear(s) for i := range s { if s[i] != 0 { panic("clear not zeroing slice elem") } } clear([]int{}) } func checkClearMap() { m1 := make(map[int]int) m1[0] = 0 m1[1] = 1 clear(m1) if len(m1) != 0 { panic("m1 is not cleared") } // map contains NaN keys is also cleared. m2 := make(map[float64]int) m2[math.NaN()] = 1 m2[math.NaN()] = 1 clear(m2) if len(m2) != 0 { panic("m2 is not cleared") } clear(map[int]int{}) } func main() { checkClearSlice() checkClearMap() }
go/test/clear.go/0
{ "file_path": "go/test/clear.go", "repo_id": "go", "token_count": 321 }
464
// errorcheck // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Verify that incorrect invocations of the complex predeclared function are detected. // Does not compile. package main type ( Float32 float32 Float64 float64 Complex64 complex64 Complex128 complex128 ) var ( f32 float32 f64 float64 F32 Float32 F64 Float64 c64 complex64 c128 complex128 C64 Complex64 C128 Complex128 ) func F1() int { return 1 } func F3() (int, int, int) { return 1, 2, 3 } func main() { // ok c64 = complex(f32, f32) c128 = complex(f64, f64) _ = complex128(0) // ok _ = complex(f32, f64) // ERROR "complex" _ = complex(f64, f32) // ERROR "complex" _ = complex(f32, F32) // ERROR "complex" _ = complex(F32, f32) // ERROR "complex" _ = complex(f64, F64) // ERROR "complex" _ = complex(F64, f64) // ERROR "complex" _ = complex(F1()) // ERROR "not enough arguments" _ = complex(F3()) // ERROR "too many arguments" _ = complex() // ERROR "not enough arguments" c128 = complex(f32, f32) // ERROR "cannot use" c64 = complex(f64, f64) // ERROR "cannot use" c64 = complex(1.0, 2.0) // ok, constant is untyped c128 = complex(1.0, 2.0) C64 = complex(1.0, 2.0) C128 = complex(1.0, 2.0) C64 = complex(f32, f32) // ERROR "cannot use" C128 = complex(f64, f64) // ERROR "cannot use" }
go/test/cmplx.go/0
{ "file_path": "go/test/cmplx.go", "repo_id": "go", "token_count": 558 }
465
// asmcheck // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package codegen import ( "cmp" "unsafe" ) // This file contains code generation tests related to the comparison // operators. // -------------- // // Equality // // -------------- // // Check that compare to constant string use 2/4/8 byte compares func CompareString1(s string) bool { // amd64:`CMPW\t\(.*\), [$]` // arm64:`MOVHU\t\(.*\), [R]`,`MOVD\t[$]`,`CMPW\tR` // ppc64le:`MOVHZ\t\(.*\), [R]`,`CMPW\t.*, [$]` // s390x:`MOVHBR\t\(.*\), [R]`,`CMPW\t.*, [$]` return s == "xx" } func CompareString2(s string) bool { // amd64:`CMPL\t\(.*\), [$]` // arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]` // ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]` // s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [$]` return s == "xxxx" } func CompareString3(s string) bool { // amd64:`CMPQ\t\(.*\), [A-Z]` // arm64:-`CMPW\t` // ppc64x:-`CMPW\t` // s390x:-`CMPW\t` return s == "xxxxxxxx" } // Check that arrays compare use 2/4/8 byte compares func CompareArray1(a, b [2]byte) bool { // amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // arm64:-`MOVBU\t` // ppc64le:-`MOVBZ\t` // s390x:-`MOVBZ\t` return a == b } func CompareArray2(a, b [3]uint16) bool { // amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } func CompareArray3(a, b [3]int16) bool { // amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } func CompareArray4(a, b [12]int8) bool { // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } func CompareArray5(a, b [15]byte) bool { // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } // This was a TODO in mapaccess1_faststr func CompareArray6(a, b unsafe.Pointer) bool { // amd64:`CMPL\t\(.*\), [A-Z]` // arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]` // ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]` // s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [R]` return *((*[4]byte)(a)) != *((*[4]byte)(b)) } // Check that some structs generate 2/4/8 byte compares. type T1 struct { a [8]byte } func CompareStruct1(s1, s2 T1) bool { // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:-`CALL` return s1 == s2 } type T2 struct { a [16]byte } func CompareStruct2(s1, s2 T2) bool { // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:-`CALL` return s1 == s2 } // Assert that a memequal call is still generated when // inlining would increase binary size too much. type T3 struct { a [24]byte } func CompareStruct3(s1, s2 T3) bool { // amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CALL` return s1 == s2 } type T4 struct { a [32]byte } func CompareStruct4(s1, s2 T4) bool { // amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CALL` return s1 == s2 } // -------------- // // Ordering // // -------------- // // Test that LEAQ/ADDQconst are folded into SETx ops var r bool func CmpFold(x uint32) { // amd64:`SETHI\t.*\(SB\)` r = x > 4 } // Test that direct comparisons with memory are generated when // possible func CmpMem1(p int, q *int) bool { // amd64:`CMPQ\t\(.*\), [A-Z]` return p < *q } func CmpMem2(p *int, q int) bool { // amd64:`CMPQ\t\(.*\), [A-Z]` return *p < q } func CmpMem3(p *int) bool { // amd64:`CMPQ\t\(.*\), [$]7` return *p < 7 } func CmpMem4(p *int) bool { // amd64:`CMPQ\t\(.*\), [$]7` return 7 < *p } func CmpMem5(p **int) { // amd64:`CMPL\truntime.writeBarrier\(SB\), [$]0` *p = nil } func CmpMem6(a []int) int { // 386:`CMPL\s8\([A-Z]+\),` // amd64:`CMPQ\s16\([A-Z]+\),` if a[1] > a[2] { return 1 } else { return 2 } } // Check tbz/tbnz are generated when comparing against zero on arm64 func CmpZero1(a int32, ptr *int) { if a < 0 { // arm64:"TBZ" *ptr = 0 } } func CmpZero2(a int64, ptr *int) { if a < 0 { // arm64:"TBZ" *ptr = 0 } } func CmpZero3(a int32, ptr *int) { if a >= 0 { // arm64:"TBNZ" *ptr = 0 } } func CmpZero4(a int64, ptr *int) { if a >= 0 { // arm64:"TBNZ" *ptr = 0 } } func CmpToZero(a, b, d int32, e, f int64, deOptC0, deOptC1 bool) int32 { // arm:`TST`,-`AND` // arm64:`TSTW`,-`AND` // 386:`TESTL`,-`ANDL` // amd64:`TESTL`,-`ANDL` c0 := a&b < 0 // arm:`CMN`,-`ADD` // arm64:`CMNW`,-`ADD` c1 := a+b < 0 // arm:`TEQ`,-`XOR` c2 := a^b < 0 // arm64:`TST`,-`AND` // amd64:`TESTQ`,-`ANDQ` c3 := e&f < 0 // arm64:`CMN`,-`ADD` c4 := e+f < 0 // not optimized to single CMNW/CMN due to further use of b+d // arm64:`ADD`,-`CMNW` // arm:`ADD`,-`CMN` c5 := b+d == 0 // not optimized to single TSTW/TST due to further use of a&d // arm64:`AND`,-`TSTW` // arm:`AND`,-`TST` // 386:`ANDL` c6 := a&d >= 0 // arm64:`TST\sR[0-9]+<<3,\sR[0-9]+` c7 := e&(f<<3) < 0 // arm64:`CMN\sR[0-9]+<<3,\sR[0-9]+` c8 := e+(f<<3) < 0 // arm64:`TST\sR[0-9],\sR[0-9]+` c9 := e&(-19) < 0 if c0 { return 1 } else if c1 { return 2 } else if c2 { return 3 } else if c3 { return 4 } else if c4 { return 5 } else if c5 { return 6 } else if c6 { return 7 } else if c7 { return 9 } else if c8 { return 10 } else if c9 { return 11 } else if deOptC0 { return b + d } else if deOptC1 { return a & d } else { return 0 } } func CmpLogicalToZero(a, b, c uint32, d, e uint64) uint64 { // ppc64x:"ANDCC",-"CMPW" // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" if a&63 == 0 { return 1 } // ppc64x:"ANDCC",-"CMP" // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" if d&255 == 0 { return 1 } // ppc64x:"ANDCC",-"CMP" // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" if d&e == 0 { return 1 } // ppc64x:"ORCC",-"CMP" // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" if d|e == 0 { return 1 } // ppc64x:"XORCC",-"CMP" // wasm:"I64Eqz","I32Eqz",-"I64ExtendI32U",-"I32WrapI64" if e^d == 0 { return 1 } return 0 } // The following CmpToZero_ex* check that cmp|cmn with bmi|bpl are generated for // 'comparing to zero' expressions // var + const // 'x-const' might be canonicalized to 'x+(-const)', so we check both // CMN and CMP for subtraction expressions to make the pattern robust. func CmpToZero_ex1(a int64, e int32) int { // arm64:`CMN`,-`ADD`,`(BMI|BPL)` if a+3 < 0 { return 1 } // arm64:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)` if a+5 <= 0 { return 1 } // arm64:`CMN`,-`ADD`,`(BMI|BPL)` if a+13 >= 0 { return 2 } // arm64:`CMP|CMN`,-`(ADD|SUB)`,`(BMI|BPL)` if a-7 < 0 { return 3 } // arm64:`SUB`,`TBZ` if a-11 >= 0 { return 4 } // arm64:`SUB`,`CMP`,`BGT` if a-19 > 0 { return 4 } // arm64:`CMNW`,-`ADDW`,`(BMI|BPL)` // arm:`CMN`,-`ADD`,`(BMI|BPL)` if e+3 < 0 { return 5 } // arm64:`CMNW`,-`ADDW`,`(BMI|BPL)` // arm:`CMN`,-`ADD`,`(BMI|BPL)` if e+13 >= 0 { return 6 } // arm64:`CMPW|CMNW`,`(BMI|BPL)` // arm:`CMP|CMN`, -`(ADD|SUB)`, `(BMI|BPL)` if e-7 < 0 { return 7 } // arm64:`SUB`,`TBNZ` // arm:`CMP|CMN`, -`(ADD|SUB)`, `(BMI|BPL)` if e-11 >= 0 { return 8 } return 0 } // var + var // TODO: optimize 'var - var' func CmpToZero_ex2(a, b, c int64, e, f, g int32) int { // arm64:`CMN`,-`ADD`,`(BMI|BPL)` if a+b < 0 { return 1 } // arm64:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)` if a+c <= 0 { return 1 } // arm64:`CMN`,-`ADD`,`(BMI|BPL)` if b+c >= 0 { return 2 } // arm64:`CMNW`,-`ADDW`,`(BMI|BPL)` // arm:`CMN`,-`ADD`,`(BMI|BPL)` if e+f < 0 { return 5 } // arm64:`CMNW`,-`ADDW`,`(BMI|BPL)` // arm:`CMN`,-`ADD`,`(BMI|BPL)` if f+g >= 0 { return 6 } return 0 } // var + var*var func CmpToZero_ex3(a, b, c, d int64, e, f, g, h int32) int { // arm64:`CMN`,-`MADD`,`MUL`,`(BMI|BPL)` if a+b*c < 0 { return 1 } // arm64:`CMN`,-`MADD`,`MUL`,`(BMI|BPL)` if b+c*d >= 0 { return 2 } // arm64:`CMNW`,-`MADDW`,`MULW`,`BEQ`,`(BMI|BPL)` // arm:`CMN`,-`MULA`,`MUL`,`BEQ`,`(BMI|BPL)` if e+f*g > 0 { return 5 } // arm64:`CMNW`,-`MADDW`,`MULW`,`BEQ`,`(BMI|BPL)` // arm:`CMN`,-`MULA`,`MUL`,`BEQ`,`(BMI|BPL)` if f+g*h <= 0 { return 6 } return 0 } // var - var*var func CmpToZero_ex4(a, b, c, d int64, e, f, g, h int32) int { // arm64:`CMP`,-`MSUB`,`MUL`,`BEQ`,`(BMI|BPL)` if a-b*c > 0 { return 1 } // arm64:`CMP`,-`MSUB`,`MUL`,`(BMI|BPL)` if b-c*d >= 0 { return 2 } // arm64:`CMPW`,-`MSUBW`,`MULW`,`(BMI|BPL)` if e-f*g < 0 { return 5 } // arm64:`CMPW`,-`MSUBW`,`MULW`,`(BMI|BPL)` if f-g*h >= 0 { return 6 } return 0 } func CmpToZero_ex5(e, f int32, u uint32) int { // arm:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)` if e+f<<1 > 0 { return 1 } // arm:`CMP`,-`SUB`,`(BMI|BPL)` if f-int32(u>>2) >= 0 { return 2 } return 0 } func UintLtZero(a uint8, b uint16, c uint32, d uint64) int { // amd64: -`(TESTB|TESTW|TESTL|TESTQ|JCC|JCS)` // arm64: -`(CMPW|CMP|BHS|BLO)` if a < 0 || b < 0 || c < 0 || d < 0 { return 1 } return 0 } func UintGeqZero(a uint8, b uint16, c uint32, d uint64) int { // amd64: -`(TESTB|TESTW|TESTL|TESTQ|JCS|JCC)` // arm64: -`(CMPW|CMP|BLO|BHS)` if a >= 0 || b >= 0 || c >= 0 || d >= 0 { return 1 } return 0 } func UintGtZero(a uint8, b uint16, c uint32, d uint64) int { // arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BLS|BHI)` if a > 0 || b > 0 || c > 0 || d > 0 { return 1 } return 0 } func UintLeqZero(a uint8, b uint16, c uint32, d uint64) int { // arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BHI|BLS)` if a <= 0 || b <= 0 || c <= 0 || d <= 0 { return 1 } return 0 } func UintLtOne(a uint8, b uint16, c uint32, d uint64) int { // arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BHS|BLO)` if a < 1 || b < 1 || c < 1 || d < 1 { return 1 } return 0 } func UintGeqOne(a uint8, b uint16, c uint32, d uint64) int { // arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BLO|BHS)` if a >= 1 || b >= 1 || c >= 1 || d >= 1 { return 1 } return 0 } func CmpToZeroU_ex1(a uint8, b uint16, c uint32, d uint64) int { // wasm:"I64Eqz"-"I64LtU" if 0 < a { return 1 } // wasm:"I64Eqz"-"I64LtU" if 0 < b { return 1 } // wasm:"I64Eqz"-"I64LtU" if 0 < c { return 1 } // wasm:"I64Eqz"-"I64LtU" if 0 < d { return 1 } return 0 } func CmpToZeroU_ex2(a uint8, b uint16, c uint32, d uint64) int { // wasm:"I64Eqz"-"I64LeU" if a <= 0 { return 1 } // wasm:"I64Eqz"-"I64LeU" if b <= 0 { return 1 } // wasm:"I64Eqz"-"I64LeU" if c <= 0 { return 1 } // wasm:"I64Eqz"-"I64LeU" if d <= 0 { return 1 } return 0 } func CmpToOneU_ex1(a uint8, b uint16, c uint32, d uint64) int { // wasm:"I64Eqz"-"I64LtU" if a < 1 { return 1 } // wasm:"I64Eqz"-"I64LtU" if b < 1 { return 1 } // wasm:"I64Eqz"-"I64LtU" if c < 1 { return 1 } // wasm:"I64Eqz"-"I64LtU" if d < 1 { return 1 } return 0 } func CmpToOneU_ex2(a uint8, b uint16, c uint32, d uint64) int { // wasm:"I64Eqz"-"I64LeU" if 1 <= a { return 1 } // wasm:"I64Eqz"-"I64LeU" if 1 <= b { return 1 } // wasm:"I64Eqz"-"I64LeU" if 1 <= c { return 1 } // wasm:"I64Eqz"-"I64LeU" if 1 <= d { return 1 } return 0 } // Check that small memequals are replaced with eq instructions func equalConstString1() bool { a := string("A") b := string("Z") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a == b } func equalVarString1(a string) bool { b := string("Z") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a[:1] == b } func equalConstString2() bool { a := string("AA") b := string("ZZ") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a == b } func equalVarString2(a string) bool { b := string("ZZ") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a[:2] == b } func equalConstString4() bool { a := string("AAAA") b := string("ZZZZ") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a == b } func equalVarString4(a string) bool { b := string("ZZZZ") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a[:4] == b } func equalConstString8() bool { a := string("AAAAAAAA") b := string("ZZZZZZZZ") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a == b } func equalVarString8(a string) bool { b := string("ZZZZZZZZ") // amd64:-".*memequal" // arm64:-".*memequal" // ppc64x:-".*memequal" return a[:8] == b } func cmpToCmn(a, b, c, d int) int { var c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 int // arm64:`CMN`,-`CMP` if a < -8 { c1 = 1 } // arm64:`CMN`,-`CMP` if a+1 == 0 { c2 = 1 } // arm64:`CMN`,-`CMP` if a+3 != 0 { c3 = 1 } // arm64:`CMN`,-`CMP` if a+b == 0 { c4 = 1 } // arm64:`CMN`,-`CMP` if b+c != 0 { c5 = 1 } // arm64:`CMN`,-`CMP` if a == -c { c6 = 1 } // arm64:`CMN`,-`CMP` if b != -d { c7 = 1 } // arm64:`CMN`,-`CMP` if a*b+c == 0 { c8 = 1 } // arm64:`CMN`,-`CMP` if a*c+b != 0 { c9 = 1 } // arm64:`CMP`,-`CMN` if b*c-a == 0 { c10 = 1 } // arm64:`CMP`,-`CMN` if a*d-b != 0 { c11 = 1 } return c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 + c10 + c11 } func cmpToCmnLessThan(a, b, c, d int) int { var c1, c2, c3, c4 int // arm64:`CMN`,`CSET\tMI`,-`CMP` if a+1 < 0 { c1 = 1 } // arm64:`CMN`,`CSET\tMI`,-`CMP` if a+b < 0 { c2 = 1 } // arm64:`CMN`,`CSET\tMI`,-`CMP` if a*b+c < 0 { c3 = 1 } // arm64:`CMP`,`CSET\tMI`,-`CMN` if a-b*c < 0 { c4 = 1 } return c1 + c2 + c3 + c4 } func cmpToCmnGreaterThanEqual(a, b, c, d int) int { var c1, c2, c3, c4 int // arm64:`CMN`,`CSET\tPL`,-`CMP` if a+1 >= 0 { c1 = 1 } // arm64:`CMN`,`CSET\tPL`,-`CMP` if a+b >= 0 { c2 = 1 } // arm64:`CMN`,`CSET\tPL`,-`CMP` if a*b+c >= 0 { c3 = 1 } // arm64:`CMP`,`CSET\tPL`,-`CMN` if a-b*c >= 0 { c4 = 1 } return c1 + c2 + c3 + c4 } func cmp1(val string) bool { var z string // amd64:-".*memequal" return z == val } func cmp2(val string) bool { var z string // amd64:-".*memequal" return val == z } func cmp3(val string) bool { z := "food" // amd64:-".*memequal" return z == val } func cmp4(val string) bool { z := "food" // amd64:-".*memequal" return val == z } func cmp5[T comparable](val T) bool { var z T // amd64:-".*memequal" return z == val } func cmp6[T comparable](val T) bool { var z T // amd64:-".*memequal" return val == z } func cmp7() { cmp5[string]("") // force instantiation cmp6[string]("") // force instantiation } type Point struct { X, Y int } // invertLessThanNoov checks (LessThanNoov (InvertFlags x)) is lowered as // CMP, CSET, CSEL instruction sequence. InvertFlags are only generated under // certain conditions, see canonLessThan, so if the code below does not // generate an InvertFlags OP, this check may fail. func invertLessThanNoov(p1, p2, p3 Point) bool { // arm64:`CMP`,`CSET`,`CSEL` return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0 } func cmpstring1(x, y string) int { // amd64:".*cmpstring" if x < y { return -1 } // amd64:-".*cmpstring" if x > y { return +1 } return 0 } func cmpstring2(x, y string) int { // We want to fail if there are two calls to cmpstring. // They will both have the same line number, so a test // like in cmpstring1 will not work. Instead, we // look for spill/restore instructions, which only // need to exist if there are 2 calls. //amd64:-`MOVQ\t.*\(SP\)` return cmp.Compare(x, y) }
go/test/codegen/comparisons.go/0
{ "file_path": "go/test/codegen/comparisons.go", "repo_id": "go", "token_count": 8281 }
466
// asmcheck // Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Check to make sure that we recognize when the length of an append // is constant. We check this by making sure that the constant length // is folded into a load offset. package p func f(x []int) int { s := make([]int, 3) s = append(s, 4, 5) // amd64:`MOVQ\t40\(.*\),` return x[len(s)] } func g(x []int, p *bool) int { s := make([]int, 3) for { s = s[:3] if cap(s) < 5 { s = make([]int, 3, 5) } s = append(s, 4, 5) if *p { // amd64:`MOVQ\t40\(.*\),` return x[len(s)] } } return 0 }
go/test/codegen/issue56440.go/0
{ "file_path": "go/test/codegen/issue56440.go", "repo_id": "go", "token_count": 289 }
467
// asmcheck -race // Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package codegen // Check that we elide racefuncenter/racefuncexit for // functions with no calls (but which might panic // in various ways). See issue 31219. // amd64:-"CALL.*racefuncenter.*" // arm64:-"CALL.*racefuncenter.*" // ppc64le:-"CALL.*racefuncenter.*" func RaceMightPanic(a []int, i, j, k, s int) { var b [4]int _ = b[i] // panicIndex _ = a[i:j] // panicSlice _ = a[i:j:k] // also panicSlice _ = i << s // panicShift _ = i / j // panicDivide }
go/test/codegen/race.go/0
{ "file_path": "go/test/codegen/race.go", "repo_id": "go", "token_count": 242 }
468
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test composite literals. package main type T struct { i int f float64 s string next *T } type R struct { num int } func itor(a int) *R { r := new(R) r.num = a return r } func eq(a []*R) { for i := 0; i < len(a); i++ { if a[i].num != i { panic("bad") } } } func teq(t *T, n int) { for i := 0; i < n; i++ { if t == nil || t.i != i { panic("bad") } t = t.next } if t != nil { panic("bad") } } type P struct { a, b int } func NewP(a, b int) *P { return &P{a, b} } func main() { var t T t = T{0, 7.2, "hi", &t} var tp *T tp = &T{0, 7.2, "hi", &t} tl := &T{i: 0, next: &T{i: 1, next: &T{i: 2, next: &T{i: 3, next: &T{i: 4}}}}} teq(tl, 5) a1 := []int{1, 2, 3} if len(a1) != 3 { panic("a1") } a2 := [10]int{1, 2, 3} if len(a2) != 10 || cap(a2) != 10 { panic("a2") } a3 := [10]int{1, 2, 3} if len(a3) != 10 || a2[3] != 0 { panic("a3") } var oai []int oai = []int{1, 2, 3} if len(oai) != 3 { panic("oai") } at := [...]*T{&t, tp, &t} if len(at) != 3 { panic("at") } c := make(chan int) ac := []chan int{c, c, c} if len(ac) != 3 { panic("ac") } aat := [][len(at)]*T{at, at} if len(aat) != 2 || len(aat[1]) != 3 { panic("aat") } s := string([]byte{'h', 'e', 'l', 'l', 'o'}) if s != "hello" { panic("s") } m := map[string]float64{"one": 1.0, "two": 2.0, "pi": 22. / 7.} if len(m) != 3 { panic("m") } eq([]*R{itor(0), itor(1), itor(2), itor(3), itor(4), itor(5)}) eq([]*R{{0}, {1}, {2}, {3}, {4}, {5}}) p1 := NewP(1, 2) p2 := NewP(1, 2) if p1 == p2 { panic("NewP") } }
go/test/complit.go/0
{ "file_path": "go/test/complit.go", "repo_id": "go", "token_count": 935 }
469
// errorcheck // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Verify allowed and disallowed conversions. // Does not compile. package main // everything here is legal except the ERROR line var c chan int var d1 chan<- int = c var d2 = (chan<- int)(c) var e *[4]int var f1 []int = e[0:] var f2 = []int(e[0:]) var g = []int(nil) type H []int type J []int var h H var j1 J = h // ERROR "compat|illegal|cannot" var j2 = J(h)
go/test/convert3.go/0
{ "file_path": "go/test/convert3.go", "repo_id": "go", "token_count": 193 }
470
// errorcheck -0 -l -d=defer // Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // check that open-coded defers are used in expected situations package main import "fmt" var glob = 3 func f1() { for i := 0; i < 10; i++ { fmt.Println("loop") } defer func() { // ERROR "open-coded defer" fmt.Println("defer") }() } func f2() { for { defer func() { // ERROR "heap-allocated defer" fmt.Println("defer1") }() if glob > 2 { break } } defer func() { // ERROR "stack-allocated defer" fmt.Println("defer2") }() } func f3() { defer func() { // ERROR "stack-allocated defer" fmt.Println("defer2") }() for { defer func() { // ERROR "heap-allocated defer" fmt.Println("defer1") }() if glob > 2 { break } } } func f4() { defer func() { // ERROR "open-coded defer" fmt.Println("defer") }() label: fmt.Println("goto loop") if glob > 2 { goto label } } func f5() { label: fmt.Println("goto loop") defer func() { // ERROR "heap-allocated defer" fmt.Println("defer") }() if glob > 2 { goto label } } func f6() { label: fmt.Println("goto loop") if glob > 2 { goto label } // The current analysis doesn't end a backward goto loop, so this defer is // considered to be inside a loop defer func() { // ERROR "heap-allocated defer" fmt.Println("defer") }() }
go/test/defererrcheck.go/0
{ "file_path": "go/test/defererrcheck.go", "repo_id": "go", "token_count": 604 }
471
// run // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //line foo/bar.y:4 package main //line foo/bar.y:60 func main() { //line foo/bar.y:297 f, l := 0, 0 //line yacctab:1 f, l = 1, 1 //line yaccpar:1 f, l = 2, 1 //line foo/bar.y:82 f, l = 3, 82 //line foo/bar.y:90 f, l = 3, 90 //line foo/bar.y:92 f, l = 3, 92 //line foo/bar.y:100 f, l = 3, 100 //line foo/bar.y:104 l = 104 //line foo/bar.y:112 l = 112 //line foo/bar.y:117 l = 117 //line foo/bar.y:121 l = 121 //line foo/bar.y:125 l = 125 //line foo/bar.y:133 l = 133 //line foo/bar.y:146 l = 146 //line foo/bar.y:148 //line foo/bar.y:153 //line foo/bar.y:155 l = 155 //line foo/bar.y:160 //line foo/bar.y:164 //line foo/bar.y:173 //line foo/bar.y:178 //line foo/bar.y:180 //line foo/bar.y:185 //line foo/bar.y:195 //line foo/bar.y:197 //line foo/bar.y:202 //line foo/bar.y:204 //line foo/bar.y:208 //line foo/bar.y:211 //line foo/bar.y:213 //line foo/bar.y:215 //line foo/bar.y:217 //line foo/bar.y:221 //line foo/bar.y:229 //line foo/bar.y:236 //line foo/bar.y:238 //line foo/bar.y:240 //line foo/bar.y:244 //line foo/bar.y:249 //line foo/bar.y:253 //line foo/bar.y:257 //line foo/bar.y:262 //line foo/bar.y:267 //line foo/bar.y:272 if l == f { //line foo/bar.y:277 panic("aie!") //line foo/bar.y:281 } //line foo/bar.y:285 return //line foo/bar.y:288 //line foo/bar.y:290 } //line foo/bar.y:293 //line foo/bar.y:295
go/test/dwarf/linedirectives.go/0
{ "file_path": "go/test/dwarf/linedirectives.go", "repo_id": "go", "token_count": 709 }
472
// errorcheck -0 -m -l // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test escape analysis with respect to field assignments. package escape var sink interface{} type X struct { p1 *int p2 *int a [2]*int } type Y struct { x X } func field0() { i := 0 // ERROR "moved to heap: i$" var x X x.p1 = &i sink = x.p1 } func field1() { i := 0 // ERROR "moved to heap: i$" var x X // BAD: &i should not escape x.p1 = &i sink = x.p2 } func field3() { i := 0 // ERROR "moved to heap: i$" var x X x.p1 = &i sink = x // ERROR "x escapes to heap" } func field4() { i := 0 // ERROR "moved to heap: i$" var y Y y.x.p1 = &i x := y.x sink = x // ERROR "x escapes to heap" } func field5() { i := 0 // ERROR "moved to heap: i$" var x X // BAD: &i should not escape here x.a[0] = &i sink = x.a[1] } // BAD: we are not leaking param x, only x.p2 func field6(x *X) { // ERROR "leaking param content: x$" sink = x.p2 } func field6a() { i := 0 // ERROR "moved to heap: i$" var x X // BAD: &i should not escape x.p1 = &i field6(&x) } func field7() { i := 0 var y Y y.x.p1 = &i x := y.x var y1 Y y1.x = x _ = y1.x.p1 } func field8() { i := 0 // ERROR "moved to heap: i$" var y Y y.x.p1 = &i x := y.x var y1 Y y1.x = x sink = y1.x.p1 } func field9() { i := 0 // ERROR "moved to heap: i$" var y Y y.x.p1 = &i x := y.x var y1 Y y1.x = x sink = y1.x // ERROR "y1\.x escapes to heap" } func field10() { i := 0 // ERROR "moved to heap: i$" var y Y // BAD: &i should not escape y.x.p1 = &i x := y.x var y1 Y y1.x = x sink = y1.x.p2 } func field11() { i := 0 // ERROR "moved to heap: i$" x := X{p1: &i} sink = x.p1 } func field12() { i := 0 // ERROR "moved to heap: i$" // BAD: &i should not escape x := X{p1: &i} sink = x.p2 } func field13() { i := 0 // ERROR "moved to heap: i$" x := &X{p1: &i} // ERROR "&X{...} does not escape$" sink = x.p1 } func field14() { i := 0 // ERROR "moved to heap: i$" // BAD: &i should not escape x := &X{p1: &i} // ERROR "&X{...} does not escape$" sink = x.p2 } func field15() { i := 0 // ERROR "moved to heap: i$" x := &X{p1: &i} // ERROR "&X{...} escapes to heap$" sink = x } func field16() { i := 0 // ERROR "moved to heap: i$" var x X // BAD: &i should not escape x.p1 = &i var iface interface{} = x // ERROR "x does not escape" x1 := iface.(X) sink = x1.p2 } func field17() { i := 0 // ERROR "moved to heap: i$" var x X x.p1 = &i var iface interface{} = x // ERROR "x does not escape" x1 := iface.(X) sink = x1.p1 } func field18() { i := 0 // ERROR "moved to heap: i$" var x X // BAD: &i should not escape x.p1 = &i var iface interface{} = x // ERROR "x does not escape" y, _ := iface.(Y) // Put X, but extracted Y. The cast will fail, so y is zero initialized. sink = y // ERROR "y escapes to heap" }
go/test/escape_field.go/0
{ "file_path": "go/test/escape_field.go", "repo_id": "go", "token_count": 1414 }
473
// errorcheck -0 -m -l // Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test escape analysis for sync/atomic. package escape import ( "sync/atomic" "unsafe" ) // BAD: should be "leaking param: addr to result ~r1 level=1$". func LoadPointer(addr *unsafe.Pointer) unsafe.Pointer { // ERROR "leaking param: addr$" return atomic.LoadPointer(addr) } var ptr unsafe.Pointer func StorePointer() { var x int // ERROR "moved to heap: x" atomic.StorePointer(&ptr, unsafe.Pointer(&x)) } func SwapPointer() { var x int // ERROR "moved to heap: x" atomic.SwapPointer(&ptr, unsafe.Pointer(&x)) } func CompareAndSwapPointer() { // BAD: x doesn't need to be heap allocated var x int // ERROR "moved to heap: x" var y int // ERROR "moved to heap: y" atomic.CompareAndSwapPointer(&ptr, unsafe.Pointer(&x), unsafe.Pointer(&y)) }
go/test/escape_sync_atomic.go/0
{ "file_path": "go/test/escape_sync_atomic.go", "repo_id": "go", "token_count": 326 }
474
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main func main() { var cu0 uint16 = '\u1234'; var cU1 uint32 = '\U00101234'; _, _ = cu0, cU1; } /* bug13.go:4: missing ' bug13.go:4: syntax error bug13.go:5: newline in string bug13.go:5: missing ' bug13.go:6: newline in string */
go/test/fixedbugs/bug013.go/0
{ "file_path": "go/test/fixedbugs/bug013.go", "repo_id": "go", "token_count": 157 }
475
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main type Box struct {}; var m map[string] *Box; func main() { m := make(map[string] *Box); s := "foo"; var x *Box = nil; m[s] = x; } /* bug058.go:9: illegal types for operand: INDEX (MAP[<string>*STRING]*<Box>{}) (<string>*STRING) */
go/test/fixedbugs/bug058.go/0
{ "file_path": "go/test/fixedbugs/bug058.go", "repo_id": "go", "token_count": 161 }
476
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import P "./bug0" func main() { a0 := P.V0(); // works a1 := P.V1(); // works a2, b2 := P.V2(); // doesn't work _, _, _, _ = a0, a1, a2, b2; } /* uetli:~/Source/go1/test/bugs/bug088.dir gri$ 6g bug0.go && 6g bug1.go bug1.go:8: shape error across := bug1.go:8: a2: undefined bug1.go:8: b2: undefined bug1.go:8: illegal types for operand: AS (<(bug0)P.int32>INT32) */
go/test/fixedbugs/bug088.dir/bug1.go/0
{ "file_path": "go/test/fixedbugs/bug088.dir/bug1.go", "repo_id": "go", "token_count": 234 }
477
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import "os" import "strconv" type Test struct { f float64 in string out string } var tests = []Test{ Test{123.5, "123.5", "123.5"}, Test{456.7, "456.7", "456.7"}, Test{1e23 + 8.5e6, "1e23+8.5e6", "1.0000000000000001e+23"}, Test{100000000000000008388608, "100000000000000008388608", "1.0000000000000001e+23"}, Test{1e23 + 8388609, "1e23+8388609", "1.0000000000000001e+23"}, // "x" = the floating point value from converting the string x. // These are exactly representable in 64-bit floating point: // 1e23-8388608 // 1e23+8388608 // The former has an even mantissa, so "1e23" rounds to 1e23-8388608. // If "1e23+8388608" is implemented as "1e23" + "8388608", // that ends up computing 1e23-8388608 + 8388608 = 1e23, // which rounds back to 1e23-8388608. // The correct answer, of course, would be "1e23+8388608" = 1e23+8388608. // This is not going to be correct until 6g has multiprecision floating point. // A simpler case is "1e23+1", which should also round to 1e23+8388608. Test{1e23 + 8.388608e6, "1e23+8.388608e6", "1.0000000000000001e+23"}, Test{1e23 + 1, "1e23+1", "1.0000000000000001e+23"}, } func main() { ok := true for i := 0; i < len(tests); i++ { t := tests[i] v := strconv.FormatFloat(t.f, 'g', -1, 64) if v != t.out { println("Bad float64 const:", t.in, "want", t.out, "got", v) x, err := strconv.ParseFloat(t.out, 64) if err != nil { println("bug120: strconv.Atof64", t.out) panic("fail") } println("\twant exact:", strconv.FormatFloat(x, 'g', 1000, 64)) println("\tgot exact: ", strconv.FormatFloat(t.f, 'g', 1000, 64)) ok = false } } if !ok { os.Exit(1) } }
go/test/fixedbugs/bug120.go/0
{ "file_path": "go/test/fixedbugs/bug120.go", "repo_id": "go", "token_count": 763 }
478
// compile // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main type Foo interface { } type T struct {} func (t *T) foo() {} func main() { t := new(T); var i interface {}; f, ok := i.(Foo); _, _, _ = t, f, ok; }
go/test/fixedbugs/bug135.go/0
{ "file_path": "go/test/fixedbugs/bug135.go", "repo_id": "go", "token_count": 119 }
479
// run // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main const ( joao = "João" jose = "José" ) func main() { s1 := joao s2 := jose if (s1 < s2) != (joao < jose) { panic("unequal") } }
go/test/fixedbugs/bug1515.go/0
{ "file_path": "go/test/fixedbugs/bug1515.go", "repo_id": "go", "token_count": 125 }
480
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main var g byte = 123 var f *byte = &g var b = make([]byte, 5) func main() { b[0:1][0] = *f if b[0] != 123 { println("want 123 got", b[0]) panic("fail") } }
go/test/fixedbugs/bug168.go/0
{ "file_path": "go/test/fixedbugs/bug168.go", "repo_id": "go", "token_count": 128 }
481
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import "fmt" type Buffer int func (*Buffer) Read() {} type Reader interface { Read() } func f() *Buffer { return nil } func g() Reader { // implicit interface conversion in assignment during return return f() } func h() (b *Buffer, ok bool) { return } func i() (r Reader, ok bool) { // implicit interface conversion in multi-assignment during return return h() } func fmter() (s string, i int, t string) { return "%#x %q", 100, "hello" } func main() { b := g() bb, ok := b.(*Buffer) _, _, _ = b, bb, ok b, ok = i() bb, ok = b.(*Buffer) _, _, _ = b, bb, ok s := fmt.Sprintf(fmter()) if s != "0x64 \"hello\"" { println(s) panic("fail") } }
go/test/fixedbugs/bug184.go/0
{ "file_path": "go/test/fixedbugs/bug184.go", "repo_id": "go", "token_count": 311 }
482
// run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main var m = map[int]int{0: 0, 1: 0} var nf = 0 var i int func multi() (int, int) { return 1, 2 } func xxx() { var c chan int x, ok := <-c var m map[int]int x, ok = m[1] var i interface{} var xx int xx, ok = i.(int) a, b := multi() _, _, _, _, _ = x, ok, xx, a, b } func f() map[int]int { nf++ return m } func g() *int { nf++ return &i } func main() { f()[0]++ f()[1] += 2 *g() %= 2 if nf != 3 { println("too many calls:", nf) panic("fail") } }
go/test/fixedbugs/bug196.go/0
{ "file_path": "go/test/fixedbugs/bug196.go", "repo_id": "go", "token_count": 294 }
483
// errorcheck // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main func f(x int, y ...int) // ok func h(x, y ...int) // ERROR "[.][.][.]" func i(x int, y ...int, z float32) // ERROR "[.][.][.]"
go/test/fixedbugs/bug228a.go/0
{ "file_path": "go/test/fixedbugs/bug228a.go", "repo_id": "go", "token_count": 108 }
484
// run // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main var nf int var ng int func f() (int, int, int) { nf++ return 1, 2, 3 } func g() int { ng++ return 4 } var x, y, z = f() var m = make(map[int]int) var v, ok = m[g()] func main() { if x != 1 || y != 2 || z != 3 || nf != 1 || v != 0 || ok != false || ng != 1 { println("x=", x, " y=", y, " z=", z, " nf=", nf, " v=", v, " ok=", ok, " ng=", ng) panic("fail") } }
go/test/fixedbugs/bug244.go/0
{ "file_path": "go/test/fixedbugs/bug244.go", "repo_id": "go", "token_count": 235 }
485
// errorcheck // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main type T U // bogus "invalid recursive type T" from 6g type U int const x T = 123 type V V // ERROR "invalid recursive type"
go/test/fixedbugs/bug256.go/0
{ "file_path": "go/test/fixedbugs/bug256.go", "repo_id": "go", "token_count": 93 }
486
// errorcheck // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // 6g accepts the program below even though it is syntactically incorrect: // Each statement in the list of statements for each case clause must be // terminated with a semicolon. No semicolon is present for the labeled // statements and because the last token is a colon ":", no semicolon is // inserted automatically. // // Both gccgo and gofmt correctly refuse this program as is and accept it // when the semicolons are present. // This is a test case for issue 777 ( https://golang.org/issue/777 ). package main func main() { switch 0 { case 0: L0: // ERROR "statement" case 1: L1: // ERROR "statement" default: // correct since no semicolon is required before a '}' goto L2 L2: } }
go/test/fixedbugs/bug274.go/0
{ "file_path": "go/test/fixedbugs/bug274.go", "repo_id": "go", "token_count": 266 }
487
// compile // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Used to run out of registers on 8g. Issue 868. package main func main() { var r uint32 var buf [4]byte a := buf[0:4] r = (((((uint32(a[3]) << 8) | uint32(a[2])) << 8) | uint32(a[1])) << 8) | uint32(a[0]) _ = r }
go/test/fixedbugs/bug288.go/0
{ "file_path": "go/test/fixedbugs/bug288.go", "repo_id": "go", "token_count": 155 }
488
// errorcheck // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Use //line to set the line number of the next line to 20. //line fixedbugs/bug305.go:20 package p // Introduce an error which should be reported on line 24. var a int = "bogus" // Line 15 of file. // 16 // 17 // 18 // 19 // 20 // 21 // 22 // 23 // ERROR "cannot|incompatible"
go/test/fixedbugs/bug305.go/0
{ "file_path": "go/test/fixedbugs/bug305.go", "repo_id": "go", "token_count": 146 }
489
// run // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main type Value struct { X interface{} Y int } type Struct struct { X complex128 } const magic = 1 + 2i func (Value) Complex(x complex128) { if x != magic { println(x) panic("bad complex magic") } } func f(x *byte, y, z int) complex128 { return magic } func (Value) Struct(x Struct) { if x.X != magic { println(x.X) panic("bad struct magic") } } func f1(x *byte, y, z int) Struct { return Struct{magic} } func main() { var v Value v.Struct(f1(nil, 0, 0)) // ok v.Complex(f(nil, 0, 0)) // used to fail }
go/test/fixedbugs/bug329.go/0
{ "file_path": "go/test/fixedbugs/bug329.go", "repo_id": "go", "token_count": 271 }
490
// run // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // issue 1900 package main func getArgs(data map[string]interface{}, keys ...string) map[string]string { ret := map[string]string{} var ok bool for _, k := range keys { ret[k], ok = data[k].(string) if !ok {} } return ret } func main() { x := getArgs(map[string]interface{}{"x":"y"}, "x") if x["x"] != "y" { println("BUG bug343", x) } } /* typecheck [1008592b0] . INDREG a(1) l(15) x(24) tc(2) runtime.ret G0 string bug343.go:15: internal compiler error: typecheck INDREG */
go/test/fixedbugs/bug343.go/0
{ "file_path": "go/test/fixedbugs/bug343.go", "repo_id": "go", "token_count": 301 }
491
// errorcheck // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // issue 1993. // error used to have last line number in file package main func bla1() bool { return false } func bla5() bool { _ = 1 false // ERROR "false evaluated but not used|value computed is not used|is not used" _ = 2 return false } func main() { x := bla1() _ = x }
go/test/fixedbugs/bug357.go/0
{ "file_path": "go/test/fixedbugs/bug357.go", "repo_id": "go", "token_count": 150 }
492
// errorcheck // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Issue 2520 package main func main() { if 2e9 { } // ERROR "2e.09|expected bool|non-boolean condition in if statement" if 3.14+1i { } // ERROR "3.14 . 1i|expected bool|non-boolean condition in if statement" }
go/test/fixedbugs/bug383.go/0
{ "file_path": "go/test/fixedbugs/bug383.go", "repo_id": "go", "token_count": 129 }
493
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Use the functions in one.go so that the inlined // forms get type-checked. package two import "./one" func use() { var r one.T r.F() }
go/test/fixedbugs/bug407.dir/two.go/0
{ "file_path": "go/test/fixedbugs/bug407.dir/two.go", "repo_id": "go", "token_count": 95 }
494
// errorcheck // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Issue 3044. // Multiple valued expressions in return lists. package p func Two() (a, b int) // F used to compile. func F() (x interface{}, y int) { return Two(), 0 // ERROR "single-value context|2\-valued" } // Recursive used to trigger an internal compiler error. func Recursive() (x interface{}, y int) { return Recursive(), 0 // ERROR "single-value context|2\-valued" }
go/test/fixedbugs/bug418.go/0
{ "file_path": "go/test/fixedbugs/bug418.go", "repo_id": "go", "token_count": 165 }
495
// run // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test that initializing struct fields out of order still runs // functions in the right order. This failed with gccgo. package main type S struct { i1, i2, i3 int } var G int func v(i int) int { if i != G { panic(i) } G = i + 1 return G } func F() S { return S{ i1: v(0), i3: v(1), i2: v(2), } } func main() { s := F() if s != (S{1, 3, 2}) { panic(s) } }
go/test/fixedbugs/bug433.go/0
{ "file_path": "go/test/fixedbugs/bug433.go", "repo_id": "go", "token_count": 227 }
496
// errorcheck // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Issue 3890: missing detection of init cycle involving // method calls in function bodies. package flag var commandLine = NewFlagSet() // ERROR "initialization cycle|depends upon itself" type FlagSet struct { } func (f *FlagSet) failf(format string, a ...interface{}) { f.usage() } func (f *FlagSet) usage() { if f == commandLine { panic(3) } } func NewFlagSet() *FlagSet { f := &FlagSet{} f.setErrorHandling(true) return f } func (f *FlagSet) setErrorHandling(b bool) { f.failf("DIE") }
go/test/fixedbugs/bug459.go/0
{ "file_path": "go/test/fixedbugs/bug459.go", "repo_id": "go", "token_count": 229 }
497
package main import "./p2" func main() { _ = p2.SockUnix() }
go/test/fixedbugs/bug467.dir/p3.go/0
{ "file_path": "go/test/fixedbugs/bug467.dir/p3.go", "repo_id": "go", "token_count": 30 }
498
// run // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Gccgo generated incorrect GC info when a global variable was // initialized to a slice of a value containing pointers. The initial // backing array for the slice was allocated in the .data section, // which is fine, but the backing array was not registered as a GC // root. package main import ( "runtime" ) type s struct { str string } var a = []struct { str string }{ {""}, } var b = "b" var c = "c" func init() { a[0].str = b + c } func main() { runtime.GC() if a[0].str != b + c { panic(a[0].str) } }
go/test/fixedbugs/bug500.go/0
{ "file_path": "go/test/fixedbugs/bug500.go", "repo_id": "go", "token_count": 231 }
499